Did I find the right examples for you? yes no      Crawl my project      Python Jobs

All Samples(66)  |  Call(0)  |  Derive(66)  |  Import(0)

src/b/i/bigquery-appengine-datastore-import-sample-HEAD/main.py   bigquery-appengine-datastore-import-sample(Download)
class DatastoreMapperPipeline(base_handler.PipelineBase):
  def run(self, entity_type):
    output = yield mapreduce_pipeline.MapperPipeline(
      "Datastore Mapper %s" % entity_type,
      "main.datastore_map",
class CloudStorageToBigQuery(base_handler.PipelineBase):
  def run(self, csv_output):
 
    credentials = AppAssertionCredentials(scope=SCOPE)
    http = credentials.authorize(httplib2.Http())

src/8/0/8-bits-HEAD/appengine-mapreduce/python/src/mapreduce/shuffler.py   8-bits(Download)
class _SortChunksPipeline(base_handler.PipelineBase):
  """A pipeline to sort multiple key-value files.
 
  Args:
    job_name: root job name.
class _CollectOutputFiles(base_handler.PipelineBase):
  """Collect output file names from _OutputFile entities for given jobs.
 
  Args:
    job_ids: list of job ids to load filenames.
class _CleanupOutputFiles(base_handler.PipelineBase):
  """Cleanup _OutputFile entities for given job ids.
 
  Args:
    job_ids: list of job ids.
class _ShardOutputs(base_handler.PipelineBase):
  """Takes a flat list of filenames, returns a list of lists, each with
  one member each.
  """
 
class _MergePipeline(base_handler.PipelineBase):
  """Pipeline to merge sorted chunks.
 
  This pipeline merges together individually sorted chunks of each shard.
 

src/e/v/EVE-Prosper-HEAD/Scraps/old_dbinit/mapreduce/shuffler.py   EVE-Prosper(Download)
class _SortChunksPipeline(base_handler.PipelineBase):
  """A pipeline to sort multiple key-value files.
 
  Args:
    job_name: root job name.
class _CollectOutputFiles(base_handler.PipelineBase):
  """Collect output file names from _OutputFile entities for given jobs.
 
  Args:
    job_ids: list of job ids to load filenames.
class _CleanupOutputFiles(base_handler.PipelineBase):
  """Cleanup _OutputFile entities for given job ids.
 
  Args:
    job_ids: list of job ids.
class _ShardOutputs(base_handler.PipelineBase):
  """Takes a flat list of filenames, returns a list of lists, each with
  one member each.
  """
 
class _MergePipeline(base_handler.PipelineBase):
  """Pipeline to merge sorted chunks.
 
  This pipeline merges together individually sorted chunks of each shard.
 

src/r/p/RPI-Directory-HEAD/appengine/mapreduce/shuffler.py   RPI-Directory(Download)
class _SortChunksPipeline(base_handler.PipelineBase):
  """A pipeline to sort multiple key-value files.
 
  Args:
    job_name: root job name.
class _CollectOutputFiles(base_handler.PipelineBase):
  """Collect output file names from _OutputFile entities for given jobs.
 
  Args:
    job_ids: list of job ids to load filenames.
class _CleanupOutputFiles(base_handler.PipelineBase):
  """Cleanup _OutputFile entities for given job ids.
 
  Args:
    job_ids: list of job ids.
class _ShardOutputs(base_handler.PipelineBase):
  """Takes a flat list of filenames, returns a list of lists, each with
  one member each.
  """
 
class _MergePipeline(base_handler.PipelineBase):
  """Pipeline to merge sorted chunks.
 
  This pipeline merges together individually sorted chunks of each shard.
 

src/m/o/MOL-HEAD/app/mapreduce/mapreduce_pipeline.py   MOL(Download)
class MapPipeline(base_handler.PipelineBase):
  """Runs the map stage of MapReduce.
 
  Iterates over input reader and outputs data into key/value format
  for shuffler consumption.
class ReducePipeline(base_handler.PipelineBase):
  """Runs the reduce stage of MapReduce.
 
  Merge-reads input files and runs reducer function on them.
 
class ShufflePipeline(base_handler.PipelineBase):
  """A pipeline to sort multiple key-value files.
 
  Args:
    filenames: list of file names to sort. Files have to be of records format
class CleanupPipeline(base_handler.PipelineBase):
  """A pipeline to do a cleanup for mapreduce jobs.
 
  Args:
    temp_files: list of lists of temporary files generated by mapreduce
class MapreducePipeline(base_handler.PipelineBase):
  """Pipeline to execute MapReduce jobs.
 
  Args:
    job_name: job name as string.

src/d/a/Darwin-Core-Engine-HEAD/lib/appengine-mapreduce/mapreduce_pipeline.py   Darwin-Core-Engine(Download)
class MapPipeline(base_handler.PipelineBase):
  """Runs the map stage of MapReduce.
 
  Iterates over input reader and outputs data into key/value format
  for shuffler consumption.
class ReducePipeline(base_handler.PipelineBase):
  """Runs the reduce stage of MapReduce.
 
  Merge-reads input files and runs reducer function on them.
 
class ShufflePipeline(base_handler.PipelineBase):
  """A pipeline to sort multiple key-value files.
 
  Args:
    filenames: list of file names to sort. Files have to be of records format
class CleanupPipeline(base_handler.PipelineBase):
  """A pipeline to do a cleanup for mapreduce jobs.
 
  Args:
    temp_files: list of lists of temporary files generated by mapreduce
class MapreducePipeline(base_handler.PipelineBase):
  """Pipeline to execute MapReduce jobs.
 
  Args:
    job_name: job name as string.

src/8/0/8-bits-HEAD/appengine-mapreduce/python/demo/main.py   8-bits(Download)
class WordCountPipeline(base_handler.PipelineBase):
  """A pipeline to run Word count demo.
 
  Args:
    blobkey: blobkey to process as string. Should be a zip archive with
class IndexPipeline(base_handler.PipelineBase):
  """A pipeline to run Index demo.
 
  Args:
    blobkey: blobkey to process as string. Should be a zip archive with
class PhrasesPipeline(base_handler.PipelineBase):
  """A pipeline to run Phrases demo.
 
  Args:
    blobkey: blobkey to process as string. Should be a zip archive with
class StoreOutput(base_handler.PipelineBase):
  """A pipeline to store the result of the MapReduce job in the database.
 
  Args:
    mr_type: the type of mapreduce job run (e.g., WordCount, Index)

src/8/0/8-bits-HEAD/appengine-mapreduce/python/src/mapreduce/mapreduce_pipeline.py   8-bits(Download)
class MapPipeline(base_handler.PipelineBase):
  """Runs the map stage of MapReduce.
 
  Iterates over input reader and outputs data into key/value format
  for shuffler consumption.
class ReducePipeline(base_handler.PipelineBase):
  """Runs the reduce stage of MapReduce.
 
  Merge-reads input files and runs reducer function on them.
 
class MapreducePipeline(base_handler.PipelineBase):
  """Pipeline to execute MapReduce jobs.
 
  Args:
    job_name: job name as string.

src/m/o/MOL-HEAD/app/mapreduce/shuffler.py   MOL(Download)
class _CollectOutputFiles(base_handler.PipelineBase):
  """Collect output file names from _OutputFile entities for a given job.
 
  Args:
    job_id: job id to load filenames.
class SortPipeline(base_handler.PipelineBase):
  """A pipeline to sort multiple key-value files.
 
  Args:
    filenames: list of file names to sort. Files have to be of records format

src/8/0/8-bits-HEAD/appengine-mapreduce/python/src/mapreduce/mapper_pipeline.py   8-bits(Download)
class MapperPipeline(base_handler.PipelineBase):
  """Pipeline wrapper for mapper job.
 
  Args:
    job_name: mapper job name as string
class _CleanupPipeline(base_handler.PipelineBase):
  """A pipeline to do a cleanup for mapreduce jobs.
 
  Args:
    filename_or_list: list of files or file lists to delete.

  1 | 2  Next