Did I find the right examples for you? yes no      Crawl my project      Python Jobs

All Samples(53)  |  Call(53)  |  Derive(0)  |  Import(0)
Fetches Entrez results which are returned as a handle.

EFetch retrieves records in the requested format from a list of one or
more UIs or from user's environment.

See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/entrez/query/static/efetch_help.html

Return a handle to the results.
(more...)

        def efetch(db, **keywords):
    """Fetches Entrez results which are returned as a handle.

    EFetch retrieves records in the requested format from a list of one or
    more UIs or from user's environment.

    See the online documentation for an explanation of the parameters:
    http://www.ncbi.nlm.nih.gov/entrez/query/static/efetch_help.html

    Return a handle to the results.

    Raises an IOError exception if there's a network error.

    Short example:

    >>> from Bio import Entrez
    >>> Entrez.email = "Your.Name.Here@example.org"
    >>> handle = Entrez.efetch(db="nucleotide", id="57240072", rettype="gb", retmode="text")
    >>> print(handle.readline().strip())
    LOCUS       AY851612                 892 bp    DNA     linear   PLN 10-APR-2007
    >>> handle.close()

    Warning: The NCBI changed the default retmode in Feb 2012, so many
    databases which previously returned text output now give XML.
    """
    cgi = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'
    variables = {'db': db}
    variables.update(keywords)
    post = False
    try:
        ids = variables["id"]
    except KeyError:
        pass
    else:
        if isinstance(ids, list):
            ids = ",".join(ids)
            variables["id"] = ids
        if ids.count(",") >= 200:
            # NCBI prefers an HTTP POST instead of an HTTP GET if there are
            # more than about 200 IDs
            post = True
    return _open(cgi, variables, post)
        


src/n/e/nesoni-0.117/test/test_analyse_samples.py   nesoni(Download)
 
        #handle=Entrez.efetch(db='nucleotide',id=acc,rettype='gb')
        handle=Entrez.efetch(db='nuccore',id=acc,rettype='gbwithparts')
        with open(work/(acc+'.gbk'),'wb') as f:
            f.write(handle.read())

src/n/e/nesoni-HEAD/test/test_analyse_samples.py   nesoni(Download)
 
        #handle=Entrez.efetch(db='nucleotide',id=acc,rettype='gb')
        handle=Entrez.efetch(db='nuccore',id=acc,rettype='gbwithparts')
        with open(work/(acc+'.gbk'),'wb') as f:
            f.write(handle.read())

src/m/g/mgtaxa-HEAD/MGT/Entrez.py   mgtaxa(Download)
        for retstart in range(0,len(ids),self.batchSize):
            retstart = 0
            net_handle = Entrez.efetch(db=self.db,
                    rettype=self.rettype,
                    retstart=retstart,

src/b/i/biopython-HEAD/Scripts/query_pubmed.py   biopython(Download)
        end = min(count, start + batch_size)
        #print("Going to download record %i to %i" % (start+1, end))
        fetch_handle = Entrez.efetch(db="pubmed", rettype="medline",
                                     retmode="text",
                                     retstart=start, retmax=batch_size,

src/b/i/biopython-1.63/Scripts/query_pubmed.py   biopython(Download)
        end = min(count, start + batch_size)
        #print("Going to download record %i to %i" % (start+1, end))
        fetch_handle = Entrez.efetch(db="pubmed", rettype="medline",
                                     retmode="text",
                                     retstart=start, retmax=batch_size,

src/p/i/picobio-HEAD/fetch_viruses/fetch_viruses.py   picobio(Download)
def download(acc, name, filename):
    fetch_handle = Entrez.efetch("nuccore", rettype="gbwithparts", id=acc)
    #fetch_handle = TogoWS.entry("nuccore", acc)
    data = fetch_handle.read() # defaults to gb                                                                                                                                      
    fetch_handle.close()
        end = min(count, start+batch_size)
        print("Getting accessions for record %i to %i" % (start+1, end))
        fetch_handle = Entrez.efetch(db="nucleotide", rettype="acc", retmode="xml",
                                     retstart=start, retmax=batch_size,
                                     webenv=webenv, query_key=query_key)

src/i/v/ivy-phylo-20120228/ivy/genbank.py   ivy-phylo(Download)
    h.close()
    i = r['LinkSetDb'][0]['Link'][0]['Id']
    h = Entrez.efetch(db='taxonomy', id=i, retmode='xml')
    r = Entrez.read(h)[0]
    h.close()
        d = Entrez.read(h)
        h.close()
        h = Entrez.efetch(db="nucleotide", rettype="gb", retmax=len(v),
                          webenv=d["WebEnv"], query_key=d["QueryKey"])
        seqs = SeqIO.parse(h, "genbank")
        d = Entrez.read(h)
        h.close()
        h = Entrez.efetch(db="nucleotide", rettype="gb", retmax=len(v),
                          webenv=d["WebEnv"], query_key=d["QueryKey"])
        seqs = SeqIO.parse(h, "genbank")
def fetchseq(gi):
    global email
    assert email, "set email!"
    Entrez.email = email
    h = Entrez.efetch(db="nucleotide", id=str(gi), rettype="gb")
def fetchtax(taxid):
    global email
    assert email, "set email!"
    Entrez.email = email
    h = Entrez.efetch(db='taxonomy', id=taxid, retmode='xml')

src/f/a/fastools-0.9.0/fastools/fastools.py   fastools(Download)
    try:
        if start:
            handle = Entrez.efetch(db="nuccore", rettype="fasta", id=acc,
                seq_start=start, seq_stop=stop, strand=orientation)
        else:
            handle = Entrez.efetch(db="nuccore", rettype="fasta", id=acc)

src/p/h/phyloGenerator-HEAD/phyloGenerator.py   phyloGenerator(Download)
    while finished <= maxCheck:
        try:
            handleDownload = Entrez.efetch(db="taxonomy", id=taxonID, retmode="xml")
            resultsDownload = Entrez.read(handleDownload)
            handleDownload.close()
        resultsSpName = Entrez.read(handleSpName)
        handleSpName.close()
        handleID = Entrez.efetch(db="taxonomy", id=resultsSpName['IdList'], retmode="xml")
        resultsSpID = Entrez.read(handleID)
        lineage = resultsSpID[0]["Lineage"].split("; ")
def eFetchSeqID(seqID, rettype='gb'):
    finished = 0
    while finished <= maxCheck:
        try:
            handle = Entrez.efetch(db="nucleotide", rettype=rettype, retmode="text", id=seqID)
def eFetchESearch(eSearchOutput, rettype='gb'):
    finished = 0
    while finished <= maxCheck:
        try:
            handle = Entrez.efetch(db="nucleotide", rettype=rettype, retmode="text", webenv=eSearchOutput['WebEnv'], query_key=eSearchOutput['QueryKey'])

src/p/y/PyCon2013_SNA-HEAD/src/pubmed/pubmed.py   PyCon2013_SNA(Download)
        chunks=[idlist[i:i+100] for i in range(0, len(idlist), 100)]
        for chunk in chunks:
            handle = Entrez.efetch(db="pubmed", id=chunk, rettype="medline", retmode="text")
            articles.extend(list(Medline.parse(handle)))
            print '#'
            #pbar.update(p.currval+len(chunk))
    else:
        handle=Entrez.efetch(db="pubmed", id=idlist, rettype="medline", retmode="text")

  1 | 2 | 3 | 4  Next