Did I find the right examples for you? yes no      Crawl my project      Python Jobs

All Samples(58)  |  Call(29)  |  Derive(0)  |  Import(29)
Deduce the encoding of a source file from magic comment.

It does this in the same way as the `Python interpreter`__

.. __: http://docs.python.org/ref/encodings.html

The ``fp`` argument should be a seekable file object.

(From Jeff Dairiki)

        def parse_encoding(fp):
    """Deduce the encoding of a source file from magic comment.

    It does this in the same way as the `Python interpreter`__

    .. __: http://docs.python.org/ref/encodings.html

    The ``fp`` argument should be a seekable file object.

    (From Jeff Dairiki)
    """
    pos = fp.tell()
    fp.seek(0)
    try:
        line1 = fp.readline()
        has_bom = line1.startswith(codecs.BOM_UTF8)
        if has_bom:
            line1 = line1[len(codecs.BOM_UTF8):]

        m = PYTHON_MAGIC_COMMENT_re.match(line1)
        if not m:
            try:
                import parser
                parser.suite(line1.decode('latin-1'))
            except (ImportError, SyntaxError):
                # Either it's a real syntax error, in which case the source is
                # not valid python source, or line2 is a continuation of line1,
                # in which case we don't want to scan line2 for a magic
                # comment.
                pass
            else:
                line2 = fp.readline()
                m = PYTHON_MAGIC_COMMENT_re.match(line2)

        if has_bom:
            if m:
                raise SyntaxError(
                    "python refuses to compile code with both a UTF8 "
                    "byte-order-mark and a magic encoding comment")
            return 'utf_8'
        elif m:
            return m.group(1).decode('latin-1')
        else:
            return None
    finally:
        fp.seek(pos)
        


src/b/a/babel-HEAD/babel/messages/extract.py   babel(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath, set
from textwrap import dedent
 
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    tokens = generate_tokens(fileobj.readline)

src/s/t/stackgeek-gaeb-HEAD/lib/externals/babel/messages/extract.py   stackgeek-gaeb(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath
from textwrap import dedent
 
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    tokens = generate_tokens(fileobj.readline)

src/b/o/bomfu-HEAD/boilerplate/external/babel/messages/extract.py   bomfu(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath
from textwrap import dedent
 
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    tokens = generate_tokens(fileobj.readline)

src/a/l/algae-HEAD/gae/libs/babel/messages/extract.py   algae(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath
from textwrap import dedent
 
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    tokens = generate_tokens(fileobj.readline)

src/l/i/livepythonconsole-app-engine-HEAD/boilerplate/external/babel/messages/extract.py   livepythonconsole-app-engine(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath
from textwrap import dedent
 
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    tokens = generate_tokens(fileobj.readline)

src/g/l/glow-HEAD/vendor/lib/python/babel/messages/extract.py   glow(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath
from textwrap import dedent
 
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    tokens = generate_tokens(fileobj.readline)

src/w/e/WebPutty-HEAD/libs/babel/messages/extract.py   WebPutty(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath
from textwrap import dedent
 
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    tokens = generate_tokens(fileobj.readline)

src/m/e/me-HEAD/libs/babel/messages/extract.py   me(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath
from textwrap import dedent
 
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    tokens = generate_tokens(fileobj.readline)

src/z/e/ZenPacks.zenoss.OpenStack-HEAD/src/Babel-1.3/babel/messages/extract.py   ZenPacks.zenoss.OpenStack(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath
from babel._compat import PY2, text_type
from textwrap import dedent
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    if PY2:

src/w/e/webapp-improved-HEAD/lib/babel/babel/messages/extract.py   webapp-improved(Download)
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
 
from babel.util import parse_encoding, pathmatch, relpath
from textwrap import dedent
 
    comment_tag = None
 
    encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
 
    tokens = generate_tokens(fileobj.readline)

Previous  1 | 2 | 3  Next