tokenize.py 24.3 KB
Newer Older
1 2
"""Tokenization help for Python programs.

3 4 5
tokenize(readline) is a generator that breaks a stream of bytes into
Python tokens.  It decodes the bytes according to PEP-0263 for
determining source file encoding.
6

7 8 9
It accepts a readline-like method which is called repeatedly to get the
next line of input (or b"" for EOF).  It generates 5-tuples with these
members:
10 11 12 13 14 15 16 17 18

    the token type (see token.py)
    the token (a string)
    the starting (row, column) indices of the token (a 2-tuple of ints)
    the ending (row, column) indices of the token (a 2-tuple of ints)
    the original line (string)

It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
19 20 21
operators.  Additionally, all token lists start with an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
22

23
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
24 25 26
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
               'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
               'Michael Foord')
27
import builtins
28
from codecs import lookup, BOM_UTF8
29
import collections
30
from io import TextIOWrapper
31 32 33 34 35
from itertools import chain
import re
import sys
from token import *

36
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
37
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
Guido van Rossum's avatar
Guido van Rossum committed
38

39
import token
40 41
__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
                           "NL", "untokenize", "ENCODING", "TokenInfo"]
42 43
del token

44 45
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
46 47
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
48 49 50
ENCODING = N_TOKENS + 2
tok_name[ENCODING] = 'ENCODING'
N_TOKENS += 3
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
EXACT_TOKEN_TYPES = {
    '(':   LPAR,
    ')':   RPAR,
    '[':   LSQB,
    ']':   RSQB,
    ':':   COLON,
    ',':   COMMA,
    ';':   SEMI,
    '+':   PLUS,
    '-':   MINUS,
    '*':   STAR,
    '/':   SLASH,
    '|':   VBAR,
    '&':   AMPER,
    '<':   LESS,
    '>':   GREATER,
    '=':   EQUAL,
    '.':   DOT,
    '%':   PERCENT,
    '{':   LBRACE,
    '}':   RBRACE,
    '==':  EQEQUAL,
    '!=':  NOTEQUAL,
    '<=':  LESSEQUAL,
    '>=':  GREATEREQUAL,
    '~':   TILDE,
    '^':   CIRCUMFLEX,
    '<<':  LEFTSHIFT,
    '>>':  RIGHTSHIFT,
    '**':  DOUBLESTAR,
    '+=':  PLUSEQUAL,
    '-=':  MINEQUAL,
    '*=':  STAREQUAL,
    '/=':  SLASHEQUAL,
    '%=':  PERCENTEQUAL,
    '&=':  AMPEREQUAL,
    '|=':  VBAREQUAL,
    '^=': CIRCUMFLEXEQUAL,
    '<<=': LEFTSHIFTEQUAL,
    '>>=': RIGHTSHIFTEQUAL,
    '**=': DOUBLESTAREQUAL,
    '//':  DOUBLESLASH,
    '//=': DOUBLESLASHEQUAL,
    '@':   AT
}
96

97
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
98
    def __repr__(self):
99 100 101
        annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
        return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
                self._replace(type=annotated_type))
102

103 104 105 106 107 108 109
    @property
    def exact_type(self):
        if self.type == OP and self.string in EXACT_TOKEN_TYPES:
            return EXACT_TOKEN_TYPES[self.string]
        else:
            return self.type

110
def group(*choices): return '(' + '|'.join(choices) + ')'
111 112
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Guido van Rossum's avatar
Guido van Rossum committed
113

114 115
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
116 117 118
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
119
Name = r'\w+'
Guido van Rossum's avatar
Guido van Rossum committed
120

121
Hexnumber = r'0[xX][0-9a-fA-F]+'
122 123
Binnumber = r'0[bB][01]+'
Octnumber = r'0[oO][0-7]+'
124
Decnumber = r'(?:0+|[1-9][0-9]*)'
125
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
126 127 128
Exponent = r'[eE][-+]?[0-9]+'
Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
Expfloat = r'[0-9]+' + Exponent
129
Floatnumber = group(Pointfloat, Expfloat)
130
Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
131
Number = group(Imagnumber, Floatnumber, Intnumber)
Guido van Rossum's avatar
Guido van Rossum committed
132

133
StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?'
134

135 136 137 138 139 140 141 142
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
143
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
144
# Single-line ' or " string.
145 146
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
               StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
Guido van Rossum's avatar
Guido van Rossum committed
147

148 149 150
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
151
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
152
                 r"//=?", r"->",
153 154
                 r"[+\-*/%&|^=<>]=?",
                 r"~")
155

Guido van Rossum's avatar
Guido van Rossum committed
156
Bracket = '[][(){}]'
157
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Guido van Rossum's avatar
Guido van Rossum committed
158
Funny = group(Operator, Bracket, Special)
Guido van Rossum's avatar
Guido van Rossum committed
159

160
PlainToken = group(Number, Funny, String, Name)
Guido van Rossum's avatar
Guido van Rossum committed
161
Token = Ignore + PlainToken
Guido van Rossum's avatar
Guido van Rossum committed
162

163
# First (or only) line of ' or " string.
164
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
165
                group("'", r'\\\r?\n'),
166
                StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
167
                group('"', r'\\\r?\n'))
168
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
169
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
170

171 172 173
def _compile(expr):
    return re.compile(expr, re.UNICODE)

174 175 176 177 178 179
endpats = {"'": Single, '"': Double,
           "'''": Single3, '"""': Double3,
           "r'''": Single3, 'r"""': Double3,
           "b'''": Single3, 'b"""': Double3,
           "R'''": Single3, 'R"""': Double3,
           "B'''": Single3, 'B"""': Double3,
180
           "br'''": Single3, 'br"""': Double3,
181 182 183
           "bR'''": Single3, 'bR"""': Double3,
           "Br'''": Single3, 'Br"""': Double3,
           "BR'''": Single3, 'BR"""': Double3,
184 185 186 187
           "rb'''": Single3, 'rb"""': Double3,
           "Rb'''": Single3, 'Rb"""': Double3,
           "rB'''": Single3, 'rB"""': Double3,
           "RB'''": Single3, 'RB"""': Double3,
188 189 190 191 192
           "u'''": Single3, 'u"""': Double3,
           "R'''": Single3, 'R"""': Double3,
           "U'''": Single3, 'U"""': Double3,
           'r': None, 'R': None, 'b': None, 'B': None,
           'u': None, 'U': None}
Guido van Rossum's avatar
Guido van Rossum committed
193

194 195 196
triple_quoted = {}
for t in ("'''", '"""',
          "r'''", 'r"""', "R'''", 'R"""',
197 198
          "b'''", 'b"""', "B'''", 'B"""',
          "br'''", 'br"""', "Br'''", 'Br"""',
199
          "bR'''", 'bR"""', "BR'''", 'BR"""',
200 201
          "rb'''", 'rb"""', "rB'''", 'rB"""',
          "Rb'''", 'Rb"""', "RB'''", 'RB"""',
202
          "u'''", 'u"""', "U'''", 'U"""',
203
          ):
204 205 206 207
    triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
          "r'", 'r"', "R'", 'R"',
208 209
          "b'", 'b"', "B'", 'B"',
          "br'", 'br"', "Br'", 'Br"',
210
          "bR'", 'bR"', "BR'", 'BR"' ,
211 212
          "rb'", 'rb"', "rB'", 'rB"',
          "Rb'", 'Rb"', "RB'", 'RB"' ,
213
          "u'", 'u"', "U'", 'U"',
214
          ):
215 216
    single_quoted[t] = t

Guido van Rossum's avatar
Guido van Rossum committed
217
tabsize = 8
218

219 220 221
class TokenError(Exception): pass

class StopTokenizing(Exception): pass
222

223

224 225 226 227 228 229
class Untokenizer:

    def __init__(self):
        self.tokens = []
        self.prev_row = 1
        self.prev_col = 0
230
        self.encoding = None
231 232 233

    def add_whitespace(self, start):
        row, col = start
234 235 236
        if row < self.prev_row or row == self.prev_row and col < self.prev_col:
            raise ValueError("start ({},{}) precedes previous end ({},{})"
                             .format(row, col, self.prev_row, self.prev_col))
237
        row_offset = row - self.prev_row
Terry Jan Reedy's avatar
Terry Jan Reedy committed
238
        if row_offset:
239 240
            self.tokens.append("\\\n" * row_offset)
            self.prev_col = 0
241 242 243 244 245
        col_offset = col - self.prev_col
        if col_offset:
            self.tokens.append(" " * col_offset)

    def untokenize(self, iterable):
246 247
        it = iter(iterable)
        for t in it:
248
            if len(t) == 2:
249
                self.compat(t, it)
250 251
                break
            tok_type, token, start, end, line = t
252 253 254
            if tok_type == ENCODING:
                self.encoding = token
                continue
255 256
            if tok_type == ENDMARKER:
                break
257 258 259 260 261 262 263 264 265 266 267
            self.add_whitespace(start)
            self.tokens.append(token)
            self.prev_row, self.prev_col = end
            if tok_type in (NEWLINE, NL):
                self.prev_row += 1
                self.prev_col = 0
        return "".join(self.tokens)

    def compat(self, token, iterable):
        indents = []
        toks_append = self.tokens.append
268
        startline = token[0] in (NEWLINE, NL)
269
        prevstring = False
270 271

        for tok in chain([token], iterable):
272
            toknum, tokval = tok[:2]
273 274 275
            if toknum == ENCODING:
                self.encoding = tokval
                continue
276 277 278 279

            if toknum in (NAME, NUMBER):
                tokval += ' '

280 281 282 283 284 285 286 287
            # Insert a space between two consecutive strings
            if toknum == STRING:
                if prevstring:
                    tokval = ' ' + tokval
                prevstring = True
            else:
                prevstring = False

288 289 290 291 292 293 294 295 296 297 298 299
            if toknum == INDENT:
                indents.append(tokval)
                continue
            elif toknum == DEDENT:
                indents.pop()
                continue
            elif toknum in (NEWLINE, NL):
                startline = True
            elif startline and indents:
                toks_append(indents[-1])
                startline = False
            toks_append(tokval)
300

301

302 303
def untokenize(iterable):
    """Transform tokens back into Python source code.
304 305
    It returns a bytes object, encoded using the ENCODING
    token, which is the first token sequence output by tokenize.
306 307

    Each element returned by the iterable must be a token sequence
308 309 310 311 312
    with at least two elements, a token number and token value.  If
    only two tokens are passed, the resulting output is poor.

    Round-trip invariant for full input:
        Untokenized source will match input source exactly
313

314
    Round-trip invariant for limited intput:
315 316
        # Output bytes will tokenize the back to the input
        t1 = [tok[:2] for tok in tokenize(f.readline)]
317
        newcode = untokenize(t1)
318 319
        readline = BytesIO(newcode).readline
        t2 = [tok[:2] for tok in tokenize(readline)]
320 321
        assert t1 == t2
    """
322
    ut = Untokenizer()
323 324 325 326
    out = ut.untokenize(iterable)
    if ut.encoding is not None:
        out = out.encode(ut.encoding)
    return out
327

328

329 330 331 332 333 334 335 336 337 338 339
def _get_normal_name(orig_enc):
    """Imitates get_normal_name in tokenizer.c."""
    # Only care about the first 12 characters.
    enc = orig_enc[:12].lower().replace("_", "-")
    if enc == "utf-8" or enc.startswith("utf-8-"):
        return "utf-8"
    if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
       enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
        return "iso-8859-1"
    return orig_enc

340
def detect_encoding(readline):
341
    """
342
    The detect_encoding() function is used to detect the encoding that should
343
    be used to decode a Python source file.  It requires one argument, readline,
344 345 346
    in the same way as the tokenize() generator.

    It will call readline a maximum of twice, and return the encoding used
347
    (as a string) and a list of any lines (left as bytes) it has read in.
348 349

    It detects the encoding from the presence of a utf-8 bom or an encoding
350 351 352
    cookie as specified in pep-0263.  If both a bom and a cookie are present,
    but disagree, a SyntaxError will be raised.  If the encoding cookie is an
    invalid charset, raise a SyntaxError.  Note that if a utf-8 bom is found,
353
    'utf-8-sig' is returned.
354 355 356

    If no encoding is specified, then the default of 'utf-8' will be returned.
    """
357 358 359 360
    try:
        filename = readline.__self__.name
    except AttributeError:
        filename = None
361 362
    bom_found = False
    encoding = None
363
    default = 'utf-8'
364 365 366 367 368 369 370 371
    def read_or_stop():
        try:
            return readline()
        except StopIteration:
            return b''

    def find_cookie(line):
        try:
372 373 374 375
            # Decode as UTF-8. Either the line is an encoding declaration,
            # in which case it should be pure ASCII, or it must be UTF-8
            # per default encoding.
            line_string = line.decode('utf-8')
376
        except UnicodeDecodeError:
377 378 379 380
            msg = "invalid or missing encoding declaration"
            if filename is not None:
                msg = '{} for {!r}'.format(msg, filename)
            raise SyntaxError(msg)
381

382 383
        match = cookie_re.match(line_string)
        if not match:
384
            return None
385
        encoding = _get_normal_name(match.group(1))
386 387 388 389
        try:
            codec = lookup(encoding)
        except LookupError:
            # This behaviour mimics the Python interpreter
390 391 392 393 394 395
            if filename is None:
                msg = "unknown encoding: " + encoding
            else:
                msg = "unknown encoding for {!r}: {}".format(filename,
                        encoding)
            raise SyntaxError(msg)
396

397
        if bom_found:
398
            if encoding != 'utf-8':
399
                # This behaviour mimics the Python interpreter
400 401 402 403 404
                if filename is None:
                    msg = 'encoding problem: utf-8'
                else:
                    msg = 'encoding problem for {!r}: utf-8'.format(filename)
                raise SyntaxError(msg)
405
            encoding += '-sig'
406
        return encoding
407 408

    first = read_or_stop()
409
    if first.startswith(BOM_UTF8):
410 411
        bom_found = True
        first = first[3:]
412
        default = 'utf-8-sig'
413
    if not first:
414
        return default, []
415 416 417 418

    encoding = find_cookie(first)
    if encoding:
        return encoding, [first]
419 420
    if not blank_re.match(first):
        return default, [first]
421 422 423

    second = read_or_stop()
    if not second:
424
        return default, [first]
425 426 427 428 429

    encoding = find_cookie(second)
    if encoding:
        return encoding, [first, second]

430
    return default, [first, second]
431 432


433 434 435 436
def open(filename):
    """Open a file in read only mode using the encoding detected by
    detect_encoding().
    """
437
    buffer = builtins.open(filename, 'rb')
438 439 440 441 442 443 444
    encoding, lines = detect_encoding(buffer.readline)
    buffer.seek(0)
    text = TextIOWrapper(buffer, encoding, line_buffering=True)
    text.mode = 'r'
    return text


445 446 447
def tokenize(readline):
    """
    The tokenize() generator requires one argment, readline, which
448
    must be a callable object which provides the same interface as the
449
    readline() method of built-in file objects.  Each call to the function
450
    should return one line of input as bytes.  Alternately, readline
451
    can be a callable function terminating with StopIteration:
452
        readline = open(myfile, 'rb').__next__  # Example of alternate readline
Tim Peters's avatar
Tim Peters committed
453

454 455 456 457
    The generator produces 5-tuples with these members: the token type; the
    token string; a 2-tuple (srow, scol) of ints specifying the row and
    column where the token begins in the source; a 2-tuple (erow, ecol) of
    ints specifying the row and column where the token ends in the source;
458
    and the line on which the token was found.  The line passed is the
Tim Peters's avatar
Tim Peters committed
459
    logical line; continuation lines are included.
460 461 462

    The first token sequence will always be an ENCODING token
    which tells you which encoding was used to decode the bytes stream.
463
    """
464 465
    # This import is here to avoid problems when the itertools module is not
    # built yet and tokenize is imported.
466
    from itertools import chain, repeat
467
    encoding, consumed = detect_encoding(readline)
468 469 470
    rl_gen = iter(readline, b"")
    empty = repeat(b"")
    return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
471 472 473


def _tokenize(readline, encoding):
474
    lnum = parenlev = continued = 0
475
    numchars = '0123456789'
476
    contstr, needcont = '', 0
477
    contline = None
Guido van Rossum's avatar
Guido van Rossum committed
478
    indents = [0]
479

480
    if encoding is not None:
481 482 483
        if encoding == "utf-8-sig":
            # BOM will already have been stripped.
            encoding = "utf-8"
484
        yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
485
    while True:             # loop over lines in stream
486 487 488
        try:
            line = readline()
        except StopIteration:
489 490 491 492
            line = b''

        if encoding is not None:
            line = line.decode(encoding)
Benjamin Peterson's avatar
Benjamin Peterson committed
493
        lnum += 1
Guido van Rossum's avatar
Guido van Rossum committed
494 495 496
        pos, max = 0, len(line)

        if contstr:                            # continued string
497
            if not line:
498
                raise TokenError("EOF in multi-line string", strstart)
499 500 501
            endmatch = endprog.match(line)
            if endmatch:
                pos = end = endmatch.end(0)
502
                yield TokenInfo(STRING, contstr + line[:end],
503
                       strstart, (lnum, end), contline + line)
504
                contstr, needcont = '', 0
505
                contline = None
506
            elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
507
                yield TokenInfo(ERRORTOKEN, contstr + line,
508
                           strstart, (lnum, len(line)), contline)
Guido van Rossum's avatar
Guido van Rossum committed
509
                contstr = ''
510
                contline = None
511
                continue
Guido van Rossum's avatar
Guido van Rossum committed
512 513
            else:
                contstr = contstr + line
514
                contline = contline + line
Guido van Rossum's avatar
Guido van Rossum committed
515 516
                continue

517
        elif parenlev == 0 and not continued:  # new statement
Guido van Rossum's avatar
Guido van Rossum committed
518 519
            if not line: break
            column = 0
520
            while pos < max:                   # measure leading whitespace
Benjamin Peterson's avatar
Benjamin Peterson committed
521 522 523 524 525 526 527 528 529 530 531
                if line[pos] == ' ':
                    column += 1
                elif line[pos] == '\t':
                    column = (column//tabsize + 1)*tabsize
                elif line[pos] == '\f':
                    column = 0
                else:
                    break
                pos += 1
            if pos == max:
                break
532 533

            if line[pos] in '#\r\n':           # skip comments or blank lines
534 535 536
                if line[pos] == '#':
                    comment_token = line[pos:].rstrip('\r\n')
                    nl_pos = pos + len(comment_token)
537
                    yield TokenInfo(COMMENT, comment_token,
538
                           (lnum, pos), (lnum, pos + len(comment_token)), line)
539
                    yield TokenInfo(NL, line[nl_pos:],
540 541
                           (lnum, nl_pos), (lnum, len(line)), line)
                else:
542
                    yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
543 544
                           (lnum, pos), (lnum, len(line)), line)
                continue
Guido van Rossum's avatar
Guido van Rossum committed
545 546 547

            if column > indents[-1]:           # count indents or dedents
                indents.append(column)
548
                yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
Guido van Rossum's avatar
Guido van Rossum committed
549
            while column < indents[-1]:
550 551
                if column not in indents:
                    raise IndentationError(
552 553
                        "unindent does not match any outer indentation level",
                        ("<tokenize>", lnum, pos, line))
Guido van Rossum's avatar
Guido van Rossum committed
554
                indents = indents[:-1]
555
                yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
Guido van Rossum's avatar
Guido van Rossum committed
556 557

        else:                                  # continued statement
558
            if not line:
559
                raise TokenError("EOF in multi-line statement", (lnum, 0))
Guido van Rossum's avatar
Guido van Rossum committed
560 561 562
            continued = 0

        while pos < max:
563
            pseudomatch = _compile(PseudoToken).match(line, pos)
564 565
            if pseudomatch:                                # scan for tokens
                start, end = pseudomatch.span(1)
566
                spos, epos, pos = (lnum, start), (lnum, end), end
567 568
                if start == end:
                    continue
569
                token, initial = line[start:end], line[start]
Guido van Rossum's avatar
Guido van Rossum committed
570

571 572
                if (initial in numchars or                  # ordinary number
                    (initial == '.' and token != '.' and token != '...')):
573
                    yield TokenInfo(NUMBER, token, spos, epos, line)
574
                elif initial in '\r\n':
575
                    yield TokenInfo(NL if parenlev > 0 else NEWLINE,
576
                           token, spos, epos, line)
577
                elif initial == '#':
578
                    assert not token.endswith("\n")
579
                    yield TokenInfo(COMMENT, token, spos, epos, line)
580
                elif token in triple_quoted:
581
                    endprog = _compile(endpats[token])
582 583 584
                    endmatch = endprog.match(line, pos)
                    if endmatch:                           # all on one line
                        pos = endmatch.end(0)
585
                        token = line[start:pos]
586
                        yield TokenInfo(STRING, token, spos, (lnum, pos), line)
Guido van Rossum's avatar
Guido van Rossum committed
587
                    else:
588 589
                        strstart = (lnum, start)           # multiple lines
                        contstr = line[start:]
590
                        contline = line
Guido van Rossum's avatar
Guido van Rossum committed
591
                        break
592 593 594
                elif initial in single_quoted or \
                    token[:2] in single_quoted or \
                    token[:3] in single_quoted:
Guido van Rossum's avatar
Guido van Rossum committed
595
                    if token[-1] == '\n':                  # continued string
596
                        strstart = (lnum, start)
597 598 599
                        endprog = _compile(endpats[initial] or
                                           endpats[token[1]] or
                                           endpats[token[2]])
600
                        contstr, needcont = line[start:], 1
601
                        contline = line
Guido van Rossum's avatar
Guido van Rossum committed
602 603
                        break
                    else:                                  # ordinary string
604
                        yield TokenInfo(STRING, token, spos, epos, line)
605
                elif initial.isidentifier():               # ordinary name
606
                    yield TokenInfo(NAME, token, spos, epos, line)
607 608
                elif initial == '\\':                      # continued stmt
                    continued = 1
Guido van Rossum's avatar
Guido van Rossum committed
609
                else:
Benjamin Peterson's avatar
Benjamin Peterson committed
610 611 612 613
                    if initial in '([{':
                        parenlev += 1
                    elif initial in ')]}':
                        parenlev -= 1
614
                    yield TokenInfo(OP, token, spos, epos, line)
Guido van Rossum's avatar
Guido van Rossum committed
615
            else:
616
                yield TokenInfo(ERRORTOKEN, line[pos],
617
                           (lnum, pos), (lnum, pos+1), line)
Benjamin Peterson's avatar
Benjamin Peterson committed
618
                pos += 1
Guido van Rossum's avatar
Guido van Rossum committed
619 620

    for indent in indents[1:]:                 # pop remaining indent levels
621 622
        yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
    yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
Guido van Rossum's avatar
Guido van Rossum committed
623

624 625 626 627 628

# An undocumented, backwards compatible, API for all the places in the standard
# library that expect to be able to use tokenize with strings
def generate_tokens(readline):
    return _tokenize(readline, None)
629

630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
def main():
    import argparse

    # Helper error handling routines
    def perror(message):
        print(message, file=sys.stderr)

    def error(message, filename=None, location=None):
        if location:
            args = (filename,) + location + (message,)
            perror("%s:%d:%d: error: %s" % args)
        elif filename:
            perror("%s: error: %s" % (filename, message))
        else:
            perror("error: %s" % message)
        sys.exit(1)

    # Parse the arguments and options
    parser = argparse.ArgumentParser(prog='python -m tokenize')
    parser.add_argument(dest='filename', nargs='?',
                        metavar='filename.py',
                        help='the file to tokenize; defaults to stdin')
652 653
    parser.add_argument('-e', '--exact', dest='exact', action='store_true',
                        help='display token names using the exact type')
654 655 656 657 658 659 660 661 662 663 664 665 666 667
    args = parser.parse_args()

    try:
        # Tokenize the input
        if args.filename:
            filename = args.filename
            with builtins.open(filename, 'rb') as f:
                tokens = list(tokenize(f.readline))
        else:
            filename = "<stdin>"
            tokens = _tokenize(sys.stdin.readline, None)

        # Output the tokenization
        for token in tokens:
668 669 670
            token_type = token.type
            if args.exact:
                token_type = token.exact_type
671 672
            token_range = "%d,%d-%d,%d:" % (token.start + token.end)
            print("%-20s%-15s%-15r" %
673
                  (token_range, tok_name[token_type], token.string))
674 675 676 677 678 679 680 681
    except IndentationError as err:
        line, column = err.args[1][1:3]
        error(err.args[0], filename, (line, column))
    except TokenError as err:
        line, column = err.args[1]
        error(err.args[0], filename, (line, column))
    except SyntaxError as err:
        error(err, filename)
682
    except OSError as err:
683 684 685 686 687 688 689
        error(err)
    except KeyboardInterrupt:
        print("interrupted\n")
    except Exception as err:
        perror("unexpected error: %s" % err)
        raise

690
if __name__ == "__main__":
691
    main()