Did I find the right examples for you? yes no

All Samples(11)  |  Call(8)  |  Derive(0)  |  Import(3)

src/t/a/Taxonome-1.0/taxonome/services/grin.py   Taxonome(Download)
from taxonome.taxa.base import UncertainSpeciesError
from taxonome.taxa.collection import TaxaResource
from .utils import urlopen
 
locnre = re.compile(r'Collected in: (.+?)<')
    """
    query = urlencode({"search":query}).encode('utf-8')
    page = BeautifulSoup(urlopen(TAX_SEARCH_URL, query))
    # Pages for single taxa have h2, a list of results has h1
    if page.h2:
def _read_acc_page(acc_url):
    acc_page = BeautifulSoup(urlopen(acc_url))
    detail_para = acc_page('p')[0]
    detail_para_s = str(detail_para)
    details = {}
    (numeric id, PI number or equivalent, Accession name)"""
    if withUnavailable:
        listpage = urlopen(TAX_ACCb,"taxno=%s&rownum=0&sort=numb&unavail=off" % sp_id)
    else:
        listpage = urlopen(TAX_ACCb,"taxno=%s&rownum=0&sort=numb" % sp_id)

src/t/a/Taxonome-1.0/taxonome/services/tnrs.py   Taxonome(Download)
from taxonome.taxa.collection import build_matched_taxonset, combine_dicts
from taxonome.tracker import noop_tracker, prepare_tracker
from .utils import urlopen
 
def call_api(names, retrieve="best"):
    names = ",".join(names)
    query = urlencode(dict(names=names, retrieve=retrieve))
    return json.load(io.TextIOWrapper(urlopen(base_url + query), 'utf-8'))

src/t/a/Taxonome-1.0/taxonome/services/col.py   Taxonome(Download)
from taxonome.taxa.collection import TaxaResource
from taxonome.taxa.taxonshelf import TaxonShelf
from .utils import urlopen
 
BASE_URL = "http://www.catalogueoflife.org/annual-checklist/2010/webservice?"
def _get_xml(name):
    "Get the XML for a given name."
    query = urlencode({'name':name, 'response':'full'})
    return ElementTree.parse(urlopen(BASE_URL+query))