Did I find the right examples for you? yes no      Crawl my project      Python Jobs

All Samples(6)  |  Call(4)  |  Derive(0)  |  Import(2)

src/p/y/pyscholar-HEAD/packages/classes.py   pyscholar(Download)
from BeautifulSoup import BeautifulSoup
#import BSXPath.BSXPathEvaluator, BSXPath.XPathResult
from BSXPath import BSXPathEvaluator, XPathResult
#http://www.crummy.com/software/BeautifulSoup/documentation.html
 
    def detectWeb(self, doc, url):
        if type(doc) == type(""): 
            doc = BSXPathEvaluator(doc)
        if url.count("_ob=DownloadURL") != 0 or doc.title == "ScienceDirect Login":
            return False
            #for each in links:
            #    print type(links[0])
            document = BSXPathEvaluator(doc)
        else: document = doc
        if document.evaluate("//*[contains(@src, \"exportarticle_a.gif\")]", document, None, XPathResult.ANY_TYPE, None):

src/p/y/pyscholar-HEAD/other-code/BSXPath_ver001e/TEST_BSXPath.py   pyscholar(Download)
import pdb
 
from BSXPath import BSXPathEvaluator,XPathResult
from BSXPath import ExtDict,typeof,toString
 
  if options.web:
    fp=urllib2.urlopen(url_data)
    dirdoc=BSXPathEvaluator(fp.read())
    files=map(lambda node:node.get('href'),dirdoc.getItemList('//li/a[@href!="../"]'))
  else:
    data=parseTestData(target,options.web)
    print '[%s]\n%s\n' % (name,data.comment)
    document=BSXPathEvaluator(data.html)
    context=document.evaluate(data.contextExpr,document,None,XPathResult.ORDERED_NODE_SNAPSHOT_TYPE,None).snapshotItem(0)
    tests=data.tests