Did I find the right examples for you? yes no

All Samples(452)  |  Call(2)  |  Derive(0)  |  Import(450)
Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/

Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.

A well-formed XML/HTML document yields a well-formed data(more...)

src/a/n/antisocial-HEAD/ve/share/doc/ipython/examples/parallel/fetchparse.py   antisocial(Download)
from IPython.parallel import Client, error
import time
import BeautifulSoup # this isn't necessary, but it helps throw the dependency error earlier
 
def fetchAndParse(url, data=None):
    import urllib2
    import urlparse
    import BeautifulSoup

src/i/p/ipython-py3k-HEAD/docs/examples/newparallel/fetchparse.py   ipython-py3k(Download)
from IPython.parallel import Client, error
import time
import BeautifulSoup # this isn't necessary, but it helps throw the dependency error earlier
 
def fetchAndParse(url, data=None):
    import urllib.request, urllib.error, urllib.parse
    import urllib.parse
    import BeautifulSoup

src/p/y/pydanny-event-notes-HEAD/Pycon2009/scrape/scrape_material/examples/search/urllib2-user-agent/google_as_ie.py   pydanny-event-notes(Download)
import urllib2
import urllib
import BeautifulSoup
 
GOOGLE_BASE='http://google.com/search?q='

src/i/p/ipython-2.0.0/examples/Parallel Computing/fetchparse.py   ipython(Download)
from IPython.parallel import Client, error
import time
import BeautifulSoup # this isn't necessary, but it helps throw the dependency error earlier
 
def fetchAndParse(url, data=None):
    import urllib2
    import urlparse
    import BeautifulSoup

src/i/p/ipython-HEAD/examples/Parallel Computing/fetchparse.py   ipython(Download)
from IPython.parallel import Client, error
import time
import BeautifulSoup # this isn't necessary, but it helps throw the dependency error earlier
 
def fetchAndParse(url, data=None):
    import urllib2
    import urlparse
    import BeautifulSoup

src/p/y/pydanny-event-notes-HEAD/Pycon2009/scrape/scrape_material/examples/tree-builders/beautifulsoup_yfinance.py   pydanny-event-notes(Download)
import BeautifulSoup
import urllib2
import re
 
def get_last_trade(ticker):

src/p/y/pydanny-event-notes-HEAD/Pycon2009/scrape/scrape_material/examples/tree-builders/beautifulsoup_parse.py   pydanny-event-notes(Download)
import BeautifulSoup
import urllib2
 
def make_tree():
    fd = urllib2.urlopen('http://mehfilindian.com/LunchMenuTakeOut.htm')

src/p/y/pydanny-event-notes-HEAD/Pycon2009/scrape/scrape_material/examples/search/yahoo.py   pydanny-event-notes(Download)
import urllib2
import urllib
import BeautifulSoup
 
YAHOO_BASE='http://search.yahoo.com/search?p='

src/p/y/pydanny-event-notes-HEAD/Pycon2009/scrape/scrape_material/examples/search/google.py   pydanny-event-notes(Download)
import urllib2
import urllib
import BeautifulSoup
 
GOOGLE_BASE='http://google.com/search?q='

src/p/y/pydanny-event-notes-HEAD/Pycon2009/scrape/scrape_material/examples/cepstral/just_post.py   pydanny-event-notes(Download)
import urllib2
import urllib
import BeautifulSoup
 
# The URL to this service

  1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9  Next