Did I find the right examples for you? yes no

All Samples(30)  |  Call(26)  |  Derive(0)  |  Import(4)

src/c/o/congress-HEAD/tasks/vote_info.py   congress(Download)
    # output JSON - so easy!
    utils.write(
        json.dumps(vote, sort_keys=True, indent=2, default=utils.format_datetime),
        output_for_vote(vote["vote_id"], "json"),
    )
 
    utils.write(
        xmloutput,
        output_for_vote(vote['vote_id'], "xml")
    )

src/p/e/petitions-HEAD/scripts/twitter.py   petitions(Download)
from urlparse import urlparse
from petitions import crawl
from utils import log, download, write, log_dir
from datetime import datetime
 
                    elif data["status"] == "active" or data["status"] == "answered":
                        scrapelog["signatures"][petition_path.split("/")[2]] = data["signatures"]
                        write(json.dumps(data, indent=2, sort_keys=True), petition_path.split("/")[2] + ".json")
 
    #write log
    scrapelog["query"] = args.query
    scrapelog["end"] = datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
    write(json.dumps(scrapelog, indent=2), "log-tw-" + scrapelog["begin"] + ".json", log_dir())
    log("Done. Found total %i petitions" % (len(scrapelog["signatures"])))
 

src/c/o/congress-HEAD/tasks/bill_info.py   congress(Download)
    for fmt in formats:
        if gpo_urls and fmt in gpo_urls:
            utils.write(utils.download(gpo_urls[fmt], bill_cache_for(bill_id, "bill." + fmt), {'binary': True}), output_for_bill(bill_id, fmt))
            logging.info("Saving %s format for %s" % (fmt, bill_id))
            status[fmt] = True
    # output JSON - so easy!
    utils.write(
        json.dumps(bill, sort_keys=True, indent=2, default=utils.format_datetime),
        output_for_bill(bill['bill_id'], "json")
    )
 
    utils.write(
        etree.tostring(root, pretty_print=True),
        output_for_bill(bill['bill_id'], "xml")
    )

src/p/e/petitions-HEAD/scripts/petitions.py   petitions(Download)
import scrapelib
from lxml.html import etree
from utils import log, download, write, log_dir
 
#intialize scraper and parser
            elif data["status"] == "active":
                scrapelog["signatures"][path.split("/")[2]] = data["signature_count"]
                write(json.dumps(data, indent=2, sort_keys=True), "scrape/petitions/" + data['id'] + ".json")
                hits += 1
                if mx != -1 and hits >= mx:
    #write log
    scrapelog["end"] = datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
    write(json.dumps(scrapelog, indent=2), "log-wh-" + scrapelog["begin"] + ".json", log_dir())
 
if __name__ == "__main__":

src/c/o/congress-HEAD/tasks/fdsys.py   congress(Download)
    # we need to fetch the file.
    if lastmod and not options.get("cached", False):
        utils.write(lastmod, lastmod_cache_file)
 
    try:
    # we need to fetch the files for this sitemap item.
    if lastmod and not options.get("cached", False):
        utils.write(lastmod, lastmod_cache_file)
 
 

src/c/o/congress-HEAD/tasks/deepbills.py   congress(Download)
def write_bill_catoxml(bill_version_id, options):
    catoxml_filename = catoxml_filename_for(bill_version_id)
 
    utils.write(
        extract_xml_from_json(fetch_single_bill_json(bill_version_id)),

src/c/o/congress-HEAD/tasks/bills.py   congress(Download)
            fast_cache_path = utils.cache_dir() + "/" + bill_info.bill_cache_for(bill_id, "search_result.html")
            new_state = search_state[bill_id]
            utils.write(new_state, fast_cache_path)
 
 

src/c/o/congress-HEAD/tasks/upcoming_house_floor.py   congress(Download)
    output_file = "%s/upcoming_house_floor/%s.json" % (utils.data_dir(), for_the_week)
    output = json.dumps(house_floor, sort_keys=True, indent=2, default=utils.format_datetime)
    utils.write(output, output_file)
 
    logging.warn("\nFound %i bills for the week of %s, written to %s" % (len(house_floor['upcoming']), for_the_week, output_file))

src/c/o/congress-HEAD/tasks/amendment_info.py   congress(Download)
    # output JSON - so easy!
    utils.write(
        json.dumps(amdt, sort_keys=True, indent=2, default=utils.format_datetime),
        output_for_amdt(amdt['amendment_id'], "json")
    )
 
    utils.write(
        etree.tostring(root, pretty_print=True),
        output_for_amdt(amdt['amendment_id'], "xml")
    )

src/c/o/congress-legislators-HEAD/scripts/wikipedia_ids.py   congress-legislators(Download)
		# Query Wikipedia API and save to cache.
		matching_pages = get_matching_pages()
		utils.write(("\n".join(matching_pages)), page_list_cache_file)
 
	# Filter out things that aren't actually pages (User:, Talk:, etcetera, anything with a colon).

  1 | 2  Next