Did I find the right examples for you? yes no      Crawl my project      Python Jobs

All Samples(58)  |  Call(29)  |  Derive(0)  |  Import(29)
Deduce the encoding of a source file from magic comment.

It does this in the same way as the `Python interpreter`__

.. __: http://docs.python.org/ref/encodings.html

The ``fp`` argument should be a seekable file object.

(From Jeff Dairiki)

        def parse_encoding(fp):
    """Deduce the encoding of a source file from magic comment.

    It does this in the same way as the `Python interpreter`__

    .. __: http://docs.python.org/ref/encodings.html

    The ``fp`` argument should be a seekable file object.

    (From Jeff Dairiki)
    """
    pos = fp.tell()
    fp.seek(0)
    try:
        line1 = fp.readline()
        has_bom = line1.startswith(codecs.BOM_UTF8)
        if has_bom:
            line1 = line1[len(codecs.BOM_UTF8):]

        m = PYTHON_MAGIC_COMMENT_re.match(line1)
        if not m:
            try:
                import parser
                parser.suite(line1.decode('latin-1'))
            except (ImportError, SyntaxError):
                # Either it's a real syntax error, in which case the source is
                # not valid python source, or line2 is a continuation of line1,
                # in which case we don't want to scan line2 for a magic
                # comment.
                pass
            else:
                line2 = fp.readline()
                m = PYTHON_MAGIC_COMMENT_re.match(line2)

        if has_bom:
            if m:
                raise SyntaxError(
                    "python refuses to compile code with both a UTF8 "
                    "byte-order-mark and a magic encoding comment")
            return 'utf_8'
        elif m:
            return m.group(1).decode('latin-1')
        else:
            return None
    finally:
        fp.seek(pos)
        


src/h/u/hue-HEAD/desktop/core/ext-py/Babel-0.9.6/babel/messages/extract.py   hue(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath
from textwrap import dedent
 
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    tokens = generate_tokens(fileobj.readline)

src/h/o/hortonworks-sandbox-HEAD/desktop/core/ext-py/Babel-0.9.6/babel/messages/extract.py   hortonworks-sandbox(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath
from textwrap import dedent
 
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    tokens = generate_tokens(fileobj.readline)

src/p/l/plexnet-HEAD/third_party/python/babel/messages/extract.py   plexnet(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath, set
from textwrap import dedent
 
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    tokens = generate_tokens(fileobj.readline)

src/f/i/FireCheckIn-HEAD/kay/lib/babel/messages/extract.py   FireCheckIn(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath, set
from textwrap import dedent
 
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    tokens = generate_tokens(fileobj.readline)

src/k/a/kay-HEAD/kay/lib/babel/messages/extract.py   kay(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath, set
from textwrap import dedent
 
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    tokens = generate_tokens(fileobj.readline)

src/k/i/kitsune-HEAD/vendor/packages/Babel/babel/messages/extract.py   kitsune(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath
from textwrap import dedent
 
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    tokens = generate_tokens(fileobj.readline)

src/m/i/microblog-HEAD/flask/lib/python2.7/site-packages/babel/messages/extract.py   microblog(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath
from babel._compat import PY2, text_type
from textwrap import dedent
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    if PY2:

src/s/o/socorro-HEAD/webapp-django/vendor/lib/python/babel/messages/extract.py   socorro(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath
from textwrap import dedent
 
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    tokens = generate_tokens(fileobj.readline)

src/f/j/fjord-HEAD/vendor/packages/Babel/babel/messages/extract.py   fjord(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath
from babel._compat import PY2, text_type
from textwrap import dedent
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    if PY2:

src/g/a/gae-boilerplate-HEAD/bp_includes/external/babel/messages/extract.py   gae-boilerplate(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath
from textwrap import dedent
 
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    tokens = generate_tokens(fileobj.readline)

  1 | 2 | 3  Next