Did I find the right examples for you? yes no      Crawl my project      Python Jobs

All Samples(24)  |  Call(24)  |  Derive(0)  |  Import(0)
High level interface to parse GFF files into SeqRecords and SeqFeatures.
    

        def parse(gff_files, base_dict=None, limit_info=None, target_lines=None):
    """High level interface to parse GFF files into SeqRecords and SeqFeatures.
    """
    parser = GFFParser()
    for rec in parser.parse_in_parts(gff_files, base_dict, limit_info,
            target_lines):
        yield rec
        


src/e/d/edge-HEAD/example/parse.py   edge(Download)
def parse(fn):
    in_handle = open(fn)
    for rec in GFF.parse(in_handle):
        for feature in rec.features:
            print feature

src/b/c/bcbb-HEAD/biopython/glimmergff_to_proteins.py   bcbb(Download)
def glimmer_predictions(in_handle, ref_recs):
    """Parse Glimmer output, generating SeqRecord and SeqFeatures for predictions
    """
    for rec in GFF.parse(in_handle, target_lines=1000, base_dict=ref_recs):
        yield rec

src/c/o/CONCOCT-HEAD/scripts/COG_table.py   CONCOCT(Download)
def read_gff_file(gfffile):
    featureid_locations={}
    limits=dict(gff_type=["gene","mRNA","CDS"])
    with open(gfffile) as in_handle:
        for rec in GFF.parse(in_handle, limit_info=limits):

src/b/c/bcbb-HEAD/gff/Scripts/gff/gff_to_genbank.py   bcbb(Download)
def main(gff_file, fasta_file):
    out_file = "%s.gb" % os.path.splitext(gff_file)[0]
    fasta_input = SeqIO.to_dict(SeqIO.parse(fasta_file, "fasta", generic_dna))
    gff_iter = GFF.parse(gff_file, fasta_input)
    SeqIO.write(_check_gff(_fix_ncbi_id(gff_iter)), out_file, "genbank")

src/j/c/jcvi-HEAD/formats/gff.py   jcvi(Download)
    out_file = pf + ".gb"
    fasta_input = SeqIO.to_dict(SeqIO.parse(fasta_file, "fasta", generic_dna))
    gff_iter = GFF.parse(gff_file, fasta_input)
    SeqIO.write(gff_iter, out_file, "genbank")
 

src/e/d/edge-HEAD/src/edge/importer.py   edge(Download)
        connection.use_debug_cursor = False
 
        for rec in GFF.parse(in_handle):
            f = GFFFragmentImporter(rec).do_import()
            self.__genome.genome_fragment_set.create(fragment=f, inherited=False)

src/c/o/CONCOCT-HEAD/scripts/evaluation/gen_input_table_taxonomy.py   CONCOCT(Download)
    out_dict = {}
 
    for rec in GFF.parse(gfffile):
 
        # Add features if there are any

src/s/e/seqscripts-HEAD/gtf2fasta.py   seqscripts(Download)
def lookupSequences(files):
    gtf_file = open(files['gtf_file'])
    records = []
    for rec in GFF.parse(gtf_file):
        chrom = rec.id

src/b/c/bcbb-HEAD/gff/Tests/test_GFFSeqIOFeatureAdder.py   bcbb(Download)
                gff_id = ['I']
                )
        rec_dict = SeqIO.to_dict(GFF.parse(self._test_gff_file,
            limit_info=cds_limit_info))
        test_rec = rec_dict['I']
                      ('Coding_transcript', 'CDS')]
        limit_info = dict(gff_source_type = rnai_types + gene_types)
        for rec in GFF.parse(gff_file, seq_dict, limit_info=limit_info):
            pass
 
    def t_unknown_seq(self):
        """Prepare unknown base sequences with the correct length.
        """
        rec_dict = SeqIO.to_dict(GFF.parse(self._test_gff_file))
        assert len(rec_dict["I"].seq) == 12766937
        tfile = os.path.join(self._test_dir, "mouse_extra_comma.gff3")
        in_handle = open(tfile)
        for rec in GFF.parse(in_handle):
            pass
        in_handle.close()
                gff_source_type = [('snoRNA', 'exon')]
                )
        rec_dict = SeqIO.to_dict(GFF.parse(self._ensembl_file,
            limit_info=limit_info))
        work_rec = rec_dict['I']