Current Path: > > > lib64 > python3.6
Operation : Linux premium107.web-hosting.com 4.18.0-553.44.1.lve.el8.x86_64 #1 SMP Thu Mar 13 14:29:12 UTC 2025 x86_64 Software : Apache Server IP : 198.54.126.246 | Your IP: 216.73.216.181 Domains : 1034 Domain(s) Permission : [ 0755 ]
| Name | Type | Size | Last Modified | Actions |
|---|---|---|---|---|
| __pycache__ | Directory | - | - | |
| asyncio | Directory | - | - | |
| collections | Directory | - | - | |
| concurrent | Directory | - | - | |
| config-3.6m-x86_64-linux-gnu | Directory | - | - | |
| ctypes | Directory | - | - | |
| curses | Directory | - | - | |
| dbm | Directory | - | - | |
| distutils | Directory | - | - | |
| Directory | - | - | ||
| encodings | Directory | - | - | |
| ensurepip | Directory | - | - | |
| html | Directory | - | - | |
| http | Directory | - | - | |
| importlib | Directory | - | - | |
| json | Directory | - | - | |
| lib-dynload | Directory | - | - | |
| lib2to3 | Directory | - | - | |
| logging | Directory | - | - | |
| multiprocessing | Directory | - | - | |
| pydoc_data | Directory | - | - | |
| site-packages | Directory | - | - | |
| sqlite3 | Directory | - | - | |
| test | Directory | - | - | |
| unittest | Directory | - | - | |
| urllib | Directory | - | - | |
| venv | Directory | - | - | |
| wsgiref | Directory | - | - | |
| xml | Directory | - | - | |
| xmlrpc | Directory | - | - | |
| __future__.py | File | 4841 bytes | December 23 2018 21:37:14. | |
| __phello__.foo.py | File | 64 bytes | December 23 2018 21:37:14. | |
| _bootlocale.py | File | 1301 bytes | December 23 2018 21:37:14. | |
| _collections_abc.py | File | 26392 bytes | December 23 2018 21:37:14. | |
| _compat_pickle.py | File | 8749 bytes | December 23 2018 21:37:14. | |
| _compression.py | File | 5340 bytes | December 23 2018 21:37:14. | |
| _dummy_thread.py | File | 5118 bytes | December 23 2018 21:37:14. | |
| _markupbase.py | File | 14598 bytes | December 23 2018 21:37:14. | |
| _osx_support.py | File | 19138 bytes | December 23 2018 21:37:14. | |
| _pydecimal.py | File | 230228 bytes | December 23 2018 21:37:14. | |
| _pyio.py | File | 88097 bytes | December 23 2018 21:37:14. | |
| _sitebuiltins.py | File | 3115 bytes | December 23 2018 21:37:14. | |
| _strptime.py | File | 24747 bytes | December 23 2018 21:37:14. | |
| _sysconfigdata_dm_linux_x86_64-linux-gnu.py | File | 30191 bytes | February 05 2026 20:00:59. | |
| _sysconfigdata_m_linux_x86_64-linux-gnu.py | File | 30367 bytes | February 05 2026 20:04:42. | |
| _threading_local.py | File | 7214 bytes | December 23 2018 21:37:14. | |
| _weakrefset.py | File | 5705 bytes | December 23 2018 21:37:14. | |
| abc.py | File | 8727 bytes | December 23 2018 21:37:14. | |
| aifc.py | File | 32454 bytes | December 23 2018 21:37:14. | |
| antigravity.py | File | 477 bytes | December 23 2018 21:37:14. | |
| argparse.py | File | 90372 bytes | December 23 2018 21:37:14. | |
| ast.py | File | 12166 bytes | December 23 2018 21:37:14. | |
| asynchat.py | File | 11328 bytes | December 23 2018 21:37:14. | |
| asyncore.py | File | 20159 bytes | December 23 2018 21:37:14. | |
| base64.py | File | 20388 bytes | December 23 2018 21:37:14. | |
| bdb.py | File | 23556 bytes | December 23 2018 21:37:14. | |
| binhex.py | File | 13954 bytes | December 23 2018 21:37:14. | |
| bisect.py | File | 2595 bytes | December 23 2018 21:37:14. | |
| bz2.py | File | 12478 bytes | December 23 2018 21:37:14. | |
| cProfile.py | File | 5380 bytes | December 23 2018 21:37:14. | |
| calendar.py | File | 23213 bytes | December 23 2018 21:37:14. | |
| cgi.py | File | 37219 bytes | February 05 2026 20:00:12. | |
| cgitb.py | File | 12018 bytes | December 23 2018 21:37:14. | |
| chunk.py | File | 5425 bytes | December 23 2018 21:37:14. | |
| cmd.py | File | 14860 bytes | December 23 2018 21:37:14. | |
| code.py | File | 10614 bytes | December 23 2018 21:37:14. | |
| codecs.py | File | 36276 bytes | December 23 2018 21:37:14. | |
| codeop.py | File | 5994 bytes | December 23 2018 21:37:14. | |
| colorsys.py | File | 4064 bytes | December 23 2018 21:37:14. | |
| compileall.py | File | 12125 bytes | December 23 2018 21:37:14. | |
| configparser.py | File | 53592 bytes | December 23 2018 21:37:14. | |
| contextlib.py | File | 13162 bytes | December 23 2018 21:37:14. | |
| copy.py | File | 8815 bytes | December 23 2018 21:37:14. | |
| copyreg.py | File | 7007 bytes | December 23 2018 21:37:14. | |
| crypt.py | File | 1864 bytes | December 23 2018 21:37:14. | |
| csv.py | File | 16180 bytes | December 23 2018 21:37:14. | |
| datetime.py | File | 82034 bytes | December 23 2018 21:37:14. | |
| decimal.py | File | 320 bytes | December 23 2018 21:37:14. | |
| difflib.py | File | 84377 bytes | December 23 2018 21:37:14. | |
| dis.py | File | 18132 bytes | December 23 2018 21:37:14. | |
| doctest.py | File | 104391 bytes | December 23 2018 21:37:14. | |
| dummy_threading.py | File | 2815 bytes | December 23 2018 21:37:14. | |
| enum.py | File | 33606 bytes | December 23 2018 21:37:14. | |
| filecmp.py | File | 9830 bytes | December 23 2018 21:37:14. | |
| fileinput.py | File | 14471 bytes | December 23 2018 21:37:14. | |
| fnmatch.py | File | 3166 bytes | December 23 2018 21:37:14. | |
| formatter.py | File | 15143 bytes | December 23 2018 21:37:14. | |
| fractions.py | File | 23639 bytes | December 23 2018 21:37:14. | |
| ftplib.py | File | 35617 bytes | February 05 2026 20:00:12. | |
| functools.py | File | 31346 bytes | December 23 2018 21:37:14. | |
| genericpath.py | File | 5028 bytes | February 05 2026 20:00:12. | |
| getopt.py | File | 7489 bytes | December 23 2018 21:37:14. | |
| getpass.py | File | 5994 bytes | December 23 2018 21:37:14. | |
| gettext.py | File | 21530 bytes | December 23 2018 21:37:14. | |
| glob.py | File | 5638 bytes | December 23 2018 21:37:14. | |
| gzip.py | File | 20334 bytes | December 23 2018 21:37:14. | |
| hashlib.py | File | 8799 bytes | February 05 2026 20:00:12. | |
| heapq.py | File | 22929 bytes | December 23 2018 21:37:14. | |
| hmac.py | File | 6381 bytes | February 05 2026 20:00:12. | |
| imaplib.py | File | 53464 bytes | February 05 2026 20:00:12. | |
| imghdr.py | File | 3795 bytes | December 23 2018 21:37:14. | |
| imp.py | File | 10669 bytes | December 23 2018 21:37:14. | |
| inspect.py | File | 116958 bytes | December 23 2018 21:37:14. | |
| io.py | File | 3517 bytes | December 23 2018 21:37:14. | |
| ipaddress.py | File | 77818 bytes | February 05 2026 20:00:12. | |
| keyword.py | File | 2219 bytes | December 23 2018 21:37:14. | |
| linecache.py | File | 5312 bytes | December 23 2018 21:37:14. | |
| locale.py | File | 77300 bytes | December 23 2018 21:37:14. | |
| lzma.py | File | 12983 bytes | December 23 2018 21:37:14. | |
| macpath.py | File | 5971 bytes | December 23 2018 21:37:14. | |
| macurl2path.py | File | 2732 bytes | December 23 2018 21:37:14. | |
| mailbox.py | File | 78624 bytes | December 23 2018 21:37:14. | |
| mailcap.py | File | 9067 bytes | February 05 2026 20:00:12. | |
| mimetypes.py | File | 21042 bytes | December 23 2018 21:37:14. | |
| modulefinder.py | File | 23027 bytes | December 23 2018 21:37:14. | |
| netrc.py | File | 5684 bytes | December 23 2018 21:37:14. | |
| nntplib.py | File | 43078 bytes | December 23 2018 21:37:14. | |
| ntpath.py | File | 23094 bytes | December 23 2018 21:37:14. | |
| nturl2path.py | File | 2444 bytes | December 23 2018 21:37:14. | |
| numbers.py | File | 10243 bytes | December 23 2018 21:37:14. | |
| opcode.py | File | 5822 bytes | December 23 2018 21:37:14. | |
| operator.py | File | 10863 bytes | December 23 2018 21:37:14. | |
| optparse.py | File | 60371 bytes | December 23 2018 21:37:14. | |
| os.py | File | 37526 bytes | December 23 2018 21:37:14. | |
| pathlib.py | File | 46238 bytes | February 05 2026 20:00:12. | |
| pdb.py | File | 61320 bytes | December 23 2018 21:37:14. | |
| pickle.py | File | 55691 bytes | December 23 2018 21:37:14. | |
| pickletools.py | File | 91775 bytes | December 23 2018 21:37:14. | |
| pipes.py | File | 8916 bytes | December 23 2018 21:37:14. | |
| pkgutil.py | File | 21315 bytes | December 23 2018 21:37:14. | |
| platform.py | File | 47214 bytes | February 05 2026 20:00:12. | |
| plistlib.py | File | 32291 bytes | February 05 2026 20:00:12. | |
| poplib.py | File | 15087 bytes | February 05 2026 20:00:12. | |
| posixpath.py | File | 16324 bytes | February 05 2026 20:00:12. | |
| pprint.py | File | 20860 bytes | December 23 2018 21:37:14. | |
| profile.py | File | 22029 bytes | December 23 2018 21:37:14. | |
| pstats.py | File | 26564 bytes | December 23 2018 21:37:14. | |
| pty.py | File | 4763 bytes | December 23 2018 21:37:14. | |
| py_compile.py | File | 7181 bytes | December 23 2018 21:37:14. | |
| pyclbr.py | File | 13558 bytes | December 23 2018 21:37:14. | |
| pydoc.py | File | 103501 bytes | February 05 2026 20:05:20. | |
| queue.py | File | 8780 bytes | December 23 2018 21:37:14. | |
| quopri.py | File | 7262 bytes | December 23 2018 21:37:14. | |
| random.py | File | 27442 bytes | December 23 2018 21:37:14. | |
| re.py | File | 15552 bytes | December 23 2018 21:37:14. | |
| reprlib.py | File | 5336 bytes | December 23 2018 21:37:14. | |
| rlcompleter.py | File | 7097 bytes | December 23 2018 21:37:14. | |
| runpy.py | File | 11959 bytes | December 23 2018 21:37:14. | |
| sched.py | File | 6511 bytes | December 23 2018 21:37:14. | |
| secrets.py | File | 2038 bytes | December 23 2018 21:37:14. | |
| selectors.py | File | 19438 bytes | December 23 2018 21:37:14. | |
| shelve.py | File | 8515 bytes | December 23 2018 21:37:14. | |
| shlex.py | File | 12956 bytes | December 23 2018 21:37:14. | |
| shutil.py | File | 40829 bytes | February 05 2026 20:00:12. | |
| signal.py | File | 2123 bytes | December 23 2018 21:37:14. | |
| site.py | File | 21268 bytes | February 05 2026 20:00:12. | |
| smtpd.py | File | 34719 bytes | December 23 2018 21:37:14. | |
| smtplib.py | File | 44218 bytes | December 23 2018 21:37:14. | |
| sndhdr.py | File | 7088 bytes | December 23 2018 21:37:14. | |
| socket.py | File | 27443 bytes | December 23 2018 21:37:14. | |
| socketserver.py | File | 27010 bytes | December 23 2018 21:37:14. | |
| sre_compile.py | File | 19338 bytes | December 23 2018 21:37:14. | |
| sre_constants.py | File | 6821 bytes | December 23 2018 21:37:14. | |
| sre_parse.py | File | 36536 bytes | December 23 2018 21:37:14. | |
| ssl.py | File | 44509 bytes | February 05 2026 20:00:12. | |
| stat.py | File | 5038 bytes | December 23 2018 21:37:14. | |
| statistics.py | File | 20673 bytes | December 23 2018 21:37:14. | |
| string.py | File | 11795 bytes | December 23 2018 21:37:14. | |
| stringprep.py | File | 12917 bytes | December 23 2018 21:37:14. | |
| struct.py | File | 257 bytes | December 23 2018 21:37:14. | |
| subprocess.py | File | 62339 bytes | December 23 2018 21:37:14. | |
| sunau.py | File | 18095 bytes | December 23 2018 21:37:14. | |
| symbol.py | File | 2119 bytes | December 23 2018 21:37:14. | |
| symtable.py | File | 7277 bytes | December 23 2018 21:37:14. | |
| sysconfig.py | File | 24876 bytes | February 05 2026 20:05:18. | |
| tabnanny.py | File | 11411 bytes | December 23 2018 21:37:14. | |
| tarfile.py | File | 111635 bytes | February 05 2026 20:00:12. | |
| telnetlib.py | File | 23136 bytes | December 23 2018 21:37:14. | |
| tempfile.py | File | 28066 bytes | February 05 2026 20:00:12. | |
| textwrap.py | File | 19558 bytes | December 23 2018 21:37:14. | |
| this.py | File | 1003 bytes | December 23 2018 21:37:14. | |
| threading.py | File | 50136 bytes | February 05 2026 20:00:12. | |
| timeit.py | File | 13342 bytes | December 23 2018 21:37:14. | |
| token.py | File | 3075 bytes | December 23 2018 21:37:14. | |
| tokenize.py | File | 29496 bytes | December 23 2018 21:37:14. | |
| trace.py | File | 28733 bytes | December 23 2018 21:37:14. | |
| traceback.py | File | 23458 bytes | December 23 2018 21:37:14. | |
| tracemalloc.py | File | 16658 bytes | December 23 2018 21:37:14. | |
| tty.py | File | 879 bytes | December 23 2018 21:37:14. | |
| types.py | File | 8870 bytes | December 23 2018 21:37:14. | |
| typing.py | File | 80274 bytes | December 23 2018 21:37:14. | |
| uu.py | File | 6763 bytes | December 23 2018 21:37:14. | |
| uuid.py | File | 24020 bytes | February 05 2026 20:00:12. | |
| warnings.py | File | 18488 bytes | December 23 2018 21:37:14. | |
| wave.py | File | 17709 bytes | December 23 2018 21:37:14. | |
| weakref.py | File | 20466 bytes | December 23 2018 21:37:14. | |
| webbrowser.py | File | 21767 bytes | December 23 2018 21:37:14. | |
| xdrlib.py | File | 5913 bytes | December 23 2018 21:37:14. | |
| zipapp.py | File | 7157 bytes | December 23 2018 21:37:14. | |
| zipfile.py | File | 79924 bytes | February 05 2026 20:00:12. |
"""Tokenization help for Python programs.
tokenize(readline) is a generator that breaks a stream of bytes into
Python tokens. It decodes the bytes according to PEP-0263 for
determining source file encoding.
It accepts a readline-like method which is called repeatedly to get the
next line of input (or b"" for EOF). It generates 5-tuples with these
members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators. Additionally, all token lists start with an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
'Michael Foord')
from builtins import open as _builtin_open
from codecs import lookup, BOM_UTF8
import collections
from io import TextIOWrapper
from itertools import chain
import itertools as _itertools
import re
import sys
from token import *
cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
import token
__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
"NL", "untokenize", "ENCODING", "TokenInfo"]
del token
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
ENCODING = N_TOKENS + 2
tok_name[ENCODING] = 'ENCODING'
N_TOKENS += 3
EXACT_TOKEN_TYPES = {
'(': LPAR,
')': RPAR,
'[': LSQB,
']': RSQB,
':': COLON,
',': COMMA,
';': SEMI,
'+': PLUS,
'-': MINUS,
'*': STAR,
'/': SLASH,
'|': VBAR,
'&': AMPER,
'<': LESS,
'>': GREATER,
'=': EQUAL,
'.': DOT,
'%': PERCENT,
'{': LBRACE,
'}': RBRACE,
'==': EQEQUAL,
'!=': NOTEQUAL,
'<=': LESSEQUAL,
'>=': GREATEREQUAL,
'~': TILDE,
'^': CIRCUMFLEX,
'<<': LEFTSHIFT,
'>>': RIGHTSHIFT,
'**': DOUBLESTAR,
'+=': PLUSEQUAL,
'-=': MINEQUAL,
'*=': STAREQUAL,
'/=': SLASHEQUAL,
'%=': PERCENTEQUAL,
'&=': AMPEREQUAL,
'|=': VBAREQUAL,
'^=': CIRCUMFLEXEQUAL,
'<<=': LEFTSHIFTEQUAL,
'>>=': RIGHTSHIFTEQUAL,
'**=': DOUBLESTAREQUAL,
'//': DOUBLESLASH,
'//=': DOUBLESLASHEQUAL,
'@': AT,
'@=': ATEQUAL,
}
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
def __repr__(self):
annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
self._replace(type=annotated_type))
@property
def exact_type(self):
if self.type == OP and self.string in EXACT_TOKEN_TYPES:
return EXACT_TOKEN_TYPES[self.string]
else:
return self.type
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'
Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
Binnumber = r'0[bB](?:_?[01])+'
Octnumber = r'0[oO](?:_?[0-7])+'
Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*'
Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?',
r'\.[0-9](?:_?[0-9])*') + maybe(Exponent)
Expfloat = r'[0-9](?:_?[0-9])*' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Return the empty string, plus all of the valid string prefixes.
def _all_string_prefixes():
# The valid string prefixes. Only contain the lower case versions,
# and don't contain any permuations (include 'fr', but not
# 'rf'). The various permutations will be generated.
_valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
# if we add binary f-strings, add: ['fb', 'fbr']
result = set([''])
for prefix in _valid_string_prefixes:
for t in _itertools.permutations(prefix):
# create a list with upper and lower versions of each
# character
for u in _itertools.product(*[(c, c.upper()) for c in t]):
result.add(''.join(u))
return result
def _compile(expr):
return re.compile(expr, re.UNICODE)
# Note that since _all_string_prefixes includes the empty string,
# StringPrefix can be the empty string (making it optional).
StringPrefix = group(*_all_string_prefixes())
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Single-line ' or " string.
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
r"//=?", r"->",
r"[+\-*/%&@|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
# For a given string prefix plus quotes, endpats maps it to a regex
# to match the remainder of that string. _prefix can be empty, for
# a normal single or triple quoted string (with no prefix).
endpats = {}
for _prefix in _all_string_prefixes():
endpats[_prefix + "'"] = Single
endpats[_prefix + '"'] = Double
endpats[_prefix + "'''"] = Single3
endpats[_prefix + '"""'] = Double3
# A set of all of the single and triple quoted string prefixes,
# including the opening quotes.
single_quoted = set()
triple_quoted = set()
for t in _all_string_prefixes():
for u in (t + '"', t + "'"):
single_quoted.add(u)
for u in (t + '"""', t + "'''"):
triple_quoted.add(u)
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
self.encoding = None
def add_whitespace(self, start):
row, col = start
if row < self.prev_row or row == self.prev_row and col < self.prev_col:
raise ValueError("start ({},{}) precedes previous end ({},{})"
.format(row, col, self.prev_row, self.prev_col))
row_offset = row - self.prev_row
if row_offset:
self.tokens.append("\\\n" * row_offset)
self.prev_col = 0
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
it = iter(iterable)
indents = []
startline = False
for t in it:
if len(t) == 2:
self.compat(t, it)
break
tok_type, token, start, end, line = t
if tok_type == ENCODING:
self.encoding = token
continue
if tok_type == ENDMARKER:
break
if tok_type == INDENT:
indents.append(token)
continue
elif tok_type == DEDENT:
indents.pop()
self.prev_row, self.prev_col = end
continue
elif tok_type in (NEWLINE, NL):
startline = True
elif startline and indents:
indent = indents[-1]
if start[1] >= len(indent):
self.tokens.append(indent)
self.prev_col = len(indent)
startline = False
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
indents = []
toks_append = self.tokens.append
startline = token[0] in (NEWLINE, NL)
prevstring = False
for tok in chain([token], iterable):
toknum, tokval = tok[:2]
if toknum == ENCODING:
self.encoding = tokval
continue
if toknum in (NAME, NUMBER, ASYNC, AWAIT):
tokval += ' '
# Insert a space between two consecutive strings
if toknum == STRING:
if prevstring:
tokval = ' ' + tokval
prevstring = True
else:
prevstring = False
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
It returns a bytes object, encoded using the ENCODING
token, which is the first token sequence output by tokenize.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited input:
# Output bytes will tokenize back to the input
t1 = [tok[:2] for tok in tokenize(f.readline)]
newcode = untokenize(t1)
readline = BytesIO(newcode).readline
t2 = [tok[:2] for tok in tokenize(readline)]
assert t1 == t2
"""
ut = Untokenizer()
out = ut.untokenize(iterable)
if ut.encoding is not None:
out = out.encode(ut.encoding)
return out
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if encoding != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
if not blank_re.match(first):
return default, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = _builtin_open(filename, 'rb')
try:
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text
except:
buffer.close()
raise
def tokenize(readline):
"""
The tokenize() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as bytes. Alternatively, readline
can be a callable function terminating with StopIteration:
readline = open(myfile, 'rb').__next__ # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
The first token sequence will always be an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
# This import is here to avoid problems when the itertools module is not
# built yet and tokenize is imported.
from itertools import chain, repeat
encoding, consumed = detect_encoding(readline)
rl_gen = iter(readline, b"")
empty = repeat(b"")
return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
def _tokenize(readline, encoding):
lnum = parenlev = continued = 0
numchars = '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
# 'stashed' and 'async_*' are used for async/await parsing
stashed = None
async_def = False
async_def_indent = 0
async_def_nl = False
if encoding is not None:
if encoding == "utf-8-sig":
# BOM will already have been stripped.
encoding = "utf-8"
yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
last_line = b''
line = b''
while True: # loop over lines in stream
try:
# We capture the value of the line variable here because
# readline uses the empty string '' to signal end of input,
# hence `line` itself will always be overwritten at the end
# of this loop.
last_line = line
line = readline()
except StopIteration:
line = b''
if encoding is not None:
line = line.decode(encoding)
lnum += 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield TokenInfo(STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield TokenInfo(ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ':
column += 1
elif line[pos] == '\t':
column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f':
column = 0
else:
break
pos += 1
if pos == max:
break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield TokenInfo(COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield TokenInfo(NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
if async_def and async_def_indent >= indents[-1]:
async_def = False
async_def_nl = False
async_def_indent = 0
yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
if async_def and async_def_nl and async_def_indent >= indents[-1]:
async_def = False
async_def_nl = False
async_def_indent = 0
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = _compile(PseudoToken).match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
if start == end:
continue
token, initial = line[start:end], line[start]
if (initial in numchars or # ordinary number
(initial == '.' and token != '.' and token != '...')):
yield TokenInfo(NUMBER, token, spos, epos, line)
elif initial in '\r\n':
if stashed:
yield stashed
stashed = None
if parenlev > 0:
yield TokenInfo(NL, token, spos, epos, line)
else:
yield TokenInfo(NEWLINE, token, spos, epos, line)
if async_def:
async_def_nl = True
elif initial == '#':
assert not token.endswith("\n")
if stashed:
yield stashed
stashed = None
yield TokenInfo(COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = _compile(endpats[token])
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield TokenInfo(STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
# Check up to the first 3 chars of the token to see if
# they're in the single_quoted set. If so, they start
# a string.
# We're using the first 3, because we're looking for
# "rb'" (for example) at the start of the token. If
# we switch to longer prefixes, this needs to be
# adjusted.
# Note that initial == token[:1].
# Also note that single quote checking must come after
# triple quote checking (above).
elif (initial in single_quoted or
token[:2] in single_quoted or
token[:3] in single_quoted):
if token[-1] == '\n': # continued string
strstart = (lnum, start)
# Again, using the first 3 chars of the
# token. This is looking for the matching end
# regex for the correct type of quote
# character. So it's really looking for
# endpats["'"] or endpats['"'], by trying to
# skip string prefix characters, if any.
endprog = _compile(endpats.get(initial) or
endpats.get(token[1]) or
endpats.get(token[2]))
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield TokenInfo(STRING, token, spos, epos, line)
elif initial.isidentifier(): # ordinary name
if token in ('async', 'await'):
if async_def:
yield TokenInfo(
ASYNC if token == 'async' else AWAIT,
token, spos, epos, line)
continue
tok = TokenInfo(NAME, token, spos, epos, line)
if token == 'async' and not stashed:
stashed = tok
continue
if token == 'def':
if (stashed
and stashed.type == NAME
and stashed.string == 'async'):
async_def = True
async_def_indent = indents[-1]
yield TokenInfo(ASYNC, stashed.string,
stashed.start, stashed.end,
stashed.line)
stashed = None
if stashed:
yield stashed
stashed = None
yield tok
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{':
parenlev += 1
elif initial in ')]}':
parenlev -= 1
if stashed:
yield stashed
stashed = None
yield TokenInfo(OP, token, spos, epos, line)
else:
yield TokenInfo(ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos += 1
if stashed:
yield stashed
stashed = None
# Add an implicit NEWLINE if the input doesn't end in one
if last_line and last_line[-1] not in '\r\n':
yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '')
for indent in indents[1:]: # pop remaining indent levels
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
# An undocumented, backwards compatible, API for all the places in the standard
# library that expect to be able to use tokenize with strings
def generate_tokens(readline):
return _tokenize(readline, None)
def main():
import argparse
# Helper error handling routines
def perror(message):
print(message, file=sys.stderr)
def error(message, filename=None, location=None):
if location:
args = (filename,) + location + (message,)
perror("%s:%d:%d: error: %s" % args)
elif filename:
perror("%s: error: %s" % (filename, message))
else:
perror("error: %s" % message)
sys.exit(1)
# Parse the arguments and options
parser = argparse.ArgumentParser(prog='python -m tokenize')
parser.add_argument(dest='filename', nargs='?',
metavar='filename.py',
help='the file to tokenize; defaults to stdin')
parser.add_argument('-e', '--exact', dest='exact', action='store_true',
help='display token names using the exact type')
args = parser.parse_args()
try:
# Tokenize the input
if args.filename:
filename = args.filename
with _builtin_open(filename, 'rb') as f:
tokens = list(tokenize(f.readline))
else:
filename = "<stdin>"
tokens = _tokenize(sys.stdin.readline, None)
# Output the tokenization
for token in tokens:
token_type = token.type
if args.exact:
token_type = token.exact_type
token_range = "%d,%d-%d,%d:" % (token.start + token.end)
print("%-20s%-15s%-15r" %
(token_range, tok_name[token_type], token.string))
except IndentationError as err:
line, column = err.args[1][1:3]
error(err.args[0], filename, (line, column))
except TokenError as err:
line, column = err.args[1]
error(err.args[0], filename, (line, column))
except SyntaxError as err:
error(err, filename)
except OSError as err:
error(err)
except KeyboardInterrupt:
print("interrupted\n")
except Exception as err:
perror("unexpected error: %s" % err)
raise
if __name__ == "__main__":
main()
SILENT KILLER Tool