second commit

This commit is contained in:
2024-12-27 22:31:23 +09:00
parent 2353324570
commit 10a0f110ca
8819 changed files with 1307198 additions and 28 deletions

View File

@ -0,0 +1,97 @@
import sys
from lib2to3 import refactor
# The following fixers are "safe": they convert Python 2 code to more
# modern Python 2 code. They should be uncontroversial to apply to most
# projects that are happy to drop support for Py2.5 and below. Applying
# them first will reduce the size of the patch set for the real porting.
lib2to3_fix_names_stage1 = set([
'lib2to3.fixes.fix_apply',
'lib2to3.fixes.fix_except',
'lib2to3.fixes.fix_exec',
'lib2to3.fixes.fix_exitfunc',
'lib2to3.fixes.fix_funcattrs',
'lib2to3.fixes.fix_has_key',
'lib2to3.fixes.fix_idioms',
# 'lib2to3.fixes.fix_import', # makes any implicit relative imports explicit. (Use with ``from __future__ import absolute_import)
'lib2to3.fixes.fix_intern',
'lib2to3.fixes.fix_isinstance',
'lib2to3.fixes.fix_methodattrs',
'lib2to3.fixes.fix_ne',
# 'lib2to3.fixes.fix_next', # would replace ``next`` method names
# with ``__next__``.
'lib2to3.fixes.fix_numliterals', # turns 1L into 1, 0755 into 0o755
'lib2to3.fixes.fix_paren',
# 'lib2to3.fixes.fix_print', # see the libfuturize fixer that also
# adds ``from __future__ import print_function``
# 'lib2to3.fixes.fix_raise', # uses incompatible with_traceback() method on exceptions
'lib2to3.fixes.fix_reduce', # reduce is available in functools on Py2.6/Py2.7
'lib2to3.fixes.fix_renames', # sys.maxint -> sys.maxsize
# 'lib2to3.fixes.fix_set_literal', # this is unnecessary and breaks Py2.6 support
'lib2to3.fixes.fix_repr',
'lib2to3.fixes.fix_standarderror',
'lib2to3.fixes.fix_sys_exc',
'lib2to3.fixes.fix_throw',
'lib2to3.fixes.fix_tuple_params',
'lib2to3.fixes.fix_types',
'lib2to3.fixes.fix_ws_comma', # can perhaps decrease readability: see issue #58
'lib2to3.fixes.fix_xreadlines',
])
# The following fixers add a dependency on the ``future`` package on order to
# support Python 2:
lib2to3_fix_names_stage2 = set([
# 'lib2to3.fixes.fix_buffer', # perhaps not safe. Test this.
# 'lib2to3.fixes.fix_callable', # not needed in Py3.2+
'lib2to3.fixes.fix_dict', # TODO: add support for utils.viewitems() etc. and move to stage2
# 'lib2to3.fixes.fix_execfile', # some problems: see issue #37.
# We use a custom fixer instead (see below)
# 'lib2to3.fixes.fix_future', # we don't want to remove __future__ imports
'lib2to3.fixes.fix_getcwdu',
# 'lib2to3.fixes.fix_imports', # called by libfuturize.fixes.fix_future_standard_library
# 'lib2to3.fixes.fix_imports2', # we don't handle this yet (dbm)
# 'lib2to3.fixes.fix_input', # Called conditionally by libfuturize.fixes.fix_input
'lib2to3.fixes.fix_itertools',
'lib2to3.fixes.fix_itertools_imports',
'lib2to3.fixes.fix_filter',
'lib2to3.fixes.fix_long',
'lib2to3.fixes.fix_map',
# 'lib2to3.fixes.fix_metaclass', # causes SyntaxError in Py2! Use the one from ``six`` instead
'lib2to3.fixes.fix_next',
'lib2to3.fixes.fix_nonzero', # TODO: cause this to import ``object`` and/or add a decorator for mapping __bool__ to __nonzero__
'lib2to3.fixes.fix_operator', # we will need support for this by e.g. extending the Py2 operator module to provide those functions in Py3
'lib2to3.fixes.fix_raw_input',
# 'lib2to3.fixes.fix_unicode', # strips off the u'' prefix, which removes a potentially helpful source of information for disambiguating unicode/byte strings
# 'lib2to3.fixes.fix_urllib', # included in libfuturize.fix_future_standard_library_urllib
# 'lib2to3.fixes.fix_xrange', # custom one because of a bug with Py3.3's lib2to3
'lib2to3.fixes.fix_zip',
])
libfuturize_fix_names_stage1 = set([
'libfuturize.fixes.fix_absolute_import',
'libfuturize.fixes.fix_next_call', # obj.next() -> next(obj). Unlike
# lib2to3.fixes.fix_next, doesn't change
# the ``next`` method to ``__next__``.
'libfuturize.fixes.fix_print_with_import',
'libfuturize.fixes.fix_raise',
# 'libfuturize.fixes.fix_order___future__imports', # TODO: consolidate to a single line to simplify testing
])
libfuturize_fix_names_stage2 = set([
'libfuturize.fixes.fix_basestring',
# 'libfuturize.fixes.fix_add__future__imports_except_unicode_literals', # just in case
'libfuturize.fixes.fix_cmp',
'libfuturize.fixes.fix_division_safe',
'libfuturize.fixes.fix_execfile',
'libfuturize.fixes.fix_future_builtins',
'libfuturize.fixes.fix_future_standard_library',
'libfuturize.fixes.fix_future_standard_library_urllib',
'libfuturize.fixes.fix_input',
'libfuturize.fixes.fix_metaclass',
'libpasteurize.fixes.fix_newstyle',
'libfuturize.fixes.fix_object',
# 'libfuturize.fixes.fix_order___future__imports', # TODO: consolidate to a single line to simplify testing
'libfuturize.fixes.fix_unicode_keep_u',
# 'libfuturize.fixes.fix_unicode_literals_import',
'libfuturize.fixes.fix_xrange_with_import', # custom one because of a bug with Py3.3's lib2to3
])

View File

@ -0,0 +1,102 @@
"""Fix UserDict.
Incomplete!
TODO: base this on fix_urllib perhaps?
"""
# Local imports
from lib2to3 import fixer_base
from lib2to3.fixer_util import Name, attr_chain
from lib2to3.fixes.fix_imports import alternates, build_pattern, FixImports
MAPPING = {'UserDict': 'collections',
}
# def alternates(members):
# return "(" + "|".join(map(repr, members)) + ")"
#
#
# def build_pattern(mapping=MAPPING):
# mod_list = ' | '.join(["module_name='%s'" % key for key in mapping])
# bare_names = alternates(mapping.keys())
#
# yield """name_import=import_name< 'import' ((%s) |
# multiple_imports=dotted_as_names< any* (%s) any* >) >
# """ % (mod_list, mod_list)
# yield """import_from< 'from' (%s) 'import' ['(']
# ( any | import_as_name< any 'as' any > |
# import_as_names< any* >) [')'] >
# """ % mod_list
# yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
# multiple_imports=dotted_as_names<
# any* dotted_as_name< (%s) 'as' any > any* >) >
# """ % (mod_list, mod_list)
#
# # Find usages of module members in code e.g. thread.foo(bar)
# yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names
# class FixUserDict(fixer_base.BaseFix):
class FixUserdict(FixImports):
BM_compatible = True
keep_line_order = True
# This is overridden in fix_imports2.
mapping = MAPPING
# We want to run this fixer late, so fix_import doesn't try to make stdlib
# renames into relative imports.
run_order = 6
def build_pattern(self):
return "|".join(build_pattern(self.mapping))
def compile_pattern(self):
# We override this, so MAPPING can be pragmatically altered and the
# changes will be reflected in PATTERN.
self.PATTERN = self.build_pattern()
super(FixImports, self).compile_pattern()
# Don't match the node if it's within another match.
def match(self, node):
match = super(FixImports, self).match
results = match(node)
if results:
# Module usage could be in the trailer of an attribute lookup, so we
# might have nested matches when "bare_with_attr" is present.
if "bare_with_attr" not in results and \
any(match(obj) for obj in attr_chain(node, "parent")):
return False
return results
return False
def start_tree(self, tree, filename):
super(FixImports, self).start_tree(tree, filename)
self.replace = {}
def transform(self, node, results):
import_mod = results.get("module_name")
if import_mod:
mod_name = import_mod.value
new_name = unicode(self.mapping[mod_name])
import_mod.replace(Name(new_name, prefix=import_mod.prefix))
if "name_import" in results:
# If it's not a "from x import x, y" or "import x as y" import,
# marked its usage to be replaced.
self.replace[mod_name] = new_name
if "multiple_imports" in results:
# This is a nasty hack to fix multiple imports on a line (e.g.,
# "import StringIO, urlparse"). The problem is that I can't
# figure out an easy way to make a pattern recognize the keys of
# MAPPING randomly sprinkled in an import statement.
results = self.match(node)
if results:
self.transform(node, results)
else:
# Replace usage of the module.
bare_name = results["bare_with_attr"][0]
new_name = self.replace.get(bare_name.value)
if new_name:
bare_name.replace(Name(new_name, prefix=bare_name.prefix))

View File

@ -0,0 +1,91 @@
"""
Fixer for import statements, with a __future__ import line.
Based on lib2to3/fixes/fix_import.py, but extended slightly so it also
supports Cython modules.
If spam is being imported from the local directory, this import:
from spam import eggs
becomes:
from __future__ import absolute_import
from .spam import eggs
and this import:
import spam
becomes:
from __future__ import absolute_import
from . import spam
"""
from os.path import dirname, join, exists, sep
from lib2to3.fixes.fix_import import FixImport
from lib2to3.fixer_util import FromImport, syms
from lib2to3.fixes.fix_import import traverse_imports
from libfuturize.fixer_util import future_import
class FixAbsoluteImport(FixImport):
run_order = 9
def transform(self, node, results):
"""
Copied from FixImport.transform(), but with this line added in
any modules that had implicit relative imports changed:
from __future__ import absolute_import"
"""
if self.skip:
return
imp = results['imp']
if node.type == syms.import_from:
# Some imps are top-level (eg: 'import ham')
# some are first level (eg: 'import ham.eggs')
# some are third level (eg: 'import ham.eggs as spam')
# Hence, the loop
while not hasattr(imp, 'value'):
imp = imp.children[0]
if self.probably_a_local_import(imp.value):
imp.value = u"." + imp.value
imp.changed()
future_import(u"absolute_import", node)
else:
have_local = False
have_absolute = False
for mod_name in traverse_imports(imp):
if self.probably_a_local_import(mod_name):
have_local = True
else:
have_absolute = True
if have_absolute:
if have_local:
# We won't handle both sibling and absolute imports in the
# same statement at the moment.
self.warning(node, "absolute and local imports together")
return
new = FromImport(u".", [imp])
new.prefix = node.prefix
future_import(u"absolute_import", node)
return new
def probably_a_local_import(self, imp_name):
"""
Like the corresponding method in the base class, but this also
supports Cython modules.
"""
if imp_name.startswith(u"."):
# Relative imports are certainly not local imports.
return False
imp_name = imp_name.split(u".", 1)[0]
base_path = dirname(self.filename)
base_path = join(base_path, imp_name)
# If there is no __init__.py next to the file its not in a package
# so can't be a relative import.
if not exists(join(dirname(base_path), "__init__.py")):
return False
for ext in [".py", sep, ".pyc", ".so", ".sl", ".pyd", ".pyx"]:
if exists(base_path + ext):
return True
return False

View File

@ -0,0 +1,26 @@
"""
Fixer for adding:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
This is "stage 1": hopefully uncontroversial changes.
Stage 2 adds ``unicode_literals``.
"""
from lib2to3 import fixer_base
from libfuturize.fixer_util import future_import
class FixAddFutureImportsExceptUnicodeLiterals(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "file_input"
run_order = 9
def transform(self, node, results):
# Reverse order:
future_import(u"absolute_import", node)
future_import(u"division", node)
future_import(u"print_function", node)

View File

@ -0,0 +1,17 @@
"""
Fixer that adds ``from past.builtins import basestring`` if there is a
reference to ``basestring``
"""
from lib2to3 import fixer_base
from libfuturize.fixer_util import touch_import_top
class FixBasestring(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "'basestring'"
def transform(self, node, results):
touch_import_top(u'past.builtins', 'basestring', node)

View File

@ -0,0 +1,24 @@
"""Optional fixer that changes all unprefixed string literals "..." to b"...".
br'abcd' is a SyntaxError on Python 2 but valid on Python 3.
ur'abcd' is a SyntaxError on Python 3 but valid on Python 2.
"""
from __future__ import unicode_literals
import re
from lib2to3.pgen2 import token
from lib2to3 import fixer_base
_literal_re = re.compile(r"[^bBuUrR]?[\'\"]")
class FixBytes(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "STRING"
def transform(self, node, results):
if node.type == token.STRING:
if _literal_re.match(node.value):
new = node.clone()
new.value = u'b' + new.value
return new

View File

@ -0,0 +1,33 @@
# coding: utf-8
"""
Fixer for the cmp() function on Py2, which was removed in Py3.
Adds this import line::
from past.builtins import cmp
if cmp() is called in the code.
"""
from __future__ import unicode_literals
from lib2to3 import fixer_base
from libfuturize.fixer_util import touch_import_top
expression = "name='cmp'"
class FixCmp(fixer_base.BaseFix):
BM_compatible = True
run_order = 9
PATTERN = """
power<
({0}) trailer< '(' args=[any] ')' >
rest=any* >
""".format(expression)
def transform(self, node, results):
name = results["name"]
touch_import_top(u'past.builtins', name.value, node)

View File

@ -0,0 +1,12 @@
"""
UNFINISHED
For the ``future`` package.
Adds this import line:
from __future__ import division
at the top so the code runs identically on Py3 and Py2.6/2.7
"""
from libpasteurize.fixes.fix_division import FixDivision

View File

@ -0,0 +1,109 @@
"""
For the ``future`` package.
Adds this import line:
from __future__ import division
at the top and changes any old-style divisions to be calls to
past.utils.old_div so the code runs as before on Py2.6/2.7 and has the same
behaviour on Py3.
If "from __future__ import division" is already in effect, this fixer does
nothing.
"""
import re
from lib2to3.fixer_util import Leaf, Node, Comma
from lib2to3 import fixer_base
from libfuturize.fixer_util import (token, future_import, touch_import_top,
wrap_in_fn_call)
def match_division(node):
u"""
__future__.division redefines the meaning of a single slash for division,
so we match that and only that.
"""
slash = token.SLASH
return node.type == slash and not node.next_sibling.type == slash and \
not node.prev_sibling.type == slash
const_re = re.compile('^[0-9]*[.][0-9]*$')
def is_floaty(node):
return _is_floaty(node.prev_sibling) or _is_floaty(node.next_sibling)
def _is_floaty(expr):
if isinstance(expr, list):
expr = expr[0]
if isinstance(expr, Leaf):
# If it's a leaf, let's see if it's a numeric constant containing a '.'
return const_re.match(expr.value)
elif isinstance(expr, Node):
# If the expression is a node, let's see if it's a direct cast to float
if isinstance(expr.children[0], Leaf):
return expr.children[0].value == u'float'
return False
class FixDivisionSafe(fixer_base.BaseFix):
# BM_compatible = True
run_order = 4 # this seems to be ignored?
_accept_type = token.SLASH
PATTERN = """
term<(not('/') any)+ '/' ((not('/') any))>
"""
def start_tree(self, tree, name):
"""
Skip this fixer if "__future__.division" is already imported.
"""
super(FixDivisionSafe, self).start_tree(tree, name)
self.skip = "division" in tree.future_features
def match(self, node):
u"""
Since the tree needs to be fixed once and only once if and only if it
matches, we can start discarding matches after the first.
"""
if node.type == self.syms.term:
matched = False
skip = False
children = []
for child in node.children:
if skip:
skip = False
continue
if match_division(child) and not is_floaty(child):
matched = True
# Strip any leading space for the first number:
children[0].prefix = u''
children = [wrap_in_fn_call("old_div",
children + [Comma(), child.next_sibling.clone()],
prefix=node.prefix)]
skip = True
else:
children.append(child.clone())
if matched:
# In Python 2.6, `Node` does not have the fixers_applied attribute
# https://github.com/python/cpython/blob/8493c0cd66cfc181ac1517268a74f077e9998701/Lib/lib2to3/pytree.py#L235
if hasattr(Node, "fixers_applied"):
return Node(node.type, children, fixers_applied=node.fixers_applied)
else:
return Node(node.type, children)
return False
def transform(self, node, results):
if self.skip:
return
future_import(u"division", node)
touch_import_top(u'past.utils', u'old_div', node)
return results

View File

@ -0,0 +1,37 @@
# coding: utf-8
"""
Fixer for the execfile() function on Py2, which was removed in Py3.
The Lib/lib2to3/fixes/fix_execfile.py module has some problems: see
python-future issue #37. This fixer merely imports execfile() from
past.builtins and leaves the code alone.
Adds this import line::
from past.builtins import execfile
for the function execfile() that was removed from Py3.
"""
from __future__ import unicode_literals
from lib2to3 import fixer_base
from libfuturize.fixer_util import touch_import_top
expression = "name='execfile'"
class FixExecfile(fixer_base.BaseFix):
BM_compatible = True
run_order = 9
PATTERN = """
power<
({0}) trailer< '(' args=[any] ')' >
rest=any* >
""".format(expression)
def transform(self, node, results):
name = results["name"]
touch_import_top(u'past.builtins', name.value, node)

View File

@ -0,0 +1,59 @@
"""
For the ``future`` package.
Adds this import line::
from builtins import XYZ
for each of the functions XYZ that is used in the module.
Adds these imports after any other imports (in an initial block of them).
"""
from __future__ import unicode_literals
from lib2to3 import fixer_base
from lib2to3.pygram import python_symbols as syms
from lib2to3.fixer_util import Name, Call, in_special_context
from libfuturize.fixer_util import touch_import_top
# All builtins are:
# from future.builtins.iterators import (filter, map, zip)
# from future.builtins.misc import (ascii, chr, hex, input, isinstance, oct, open, round, super)
# from future.types import (bytes, dict, int, range, str)
# We don't need isinstance any more.
replaced_builtin_fns = '''filter map zip
ascii chr hex input next oct
bytes range str raw_input'''.split()
# This includes raw_input as a workaround for the
# lib2to3 fixer for raw_input on Py3 (only), allowing
# the correct import to be included. (Py3 seems to run
# the fixers the wrong way around, perhaps ignoring the
# run_order class attribute below ...)
expression = '|'.join(["name='{0}'".format(name) for name in replaced_builtin_fns])
class FixFutureBuiltins(fixer_base.BaseFix):
BM_compatible = True
run_order = 7
# Currently we only match uses as a function. This doesn't match e.g.:
# if isinstance(s, str):
# ...
PATTERN = """
power<
({0}) trailer< '(' [arglist=any] ')' >
rest=any* >
|
power<
'map' trailer< '(' [arglist=any] ')' >
>
""".format(expression)
def transform(self, node, results):
name = results["name"]
touch_import_top(u'builtins', name.value, node)
# name.replace(Name(u"input", prefix=name.prefix))

View File

@ -0,0 +1,24 @@
"""
For the ``future`` package.
Changes any imports needed to reflect the standard library reorganization. Also
Also adds these import lines:
from future import standard_library
standard_library.install_aliases()
after any __future__ imports but before any other imports.
"""
from lib2to3.fixes.fix_imports import FixImports
from libfuturize.fixer_util import touch_import_top
class FixFutureStandardLibrary(FixImports):
run_order = 8
def transform(self, node, results):
result = super(FixFutureStandardLibrary, self).transform(node, results)
# TODO: add a blank line between any __future__ imports and this?
touch_import_top(u'future', u'standard_library', node)
return result

View File

@ -0,0 +1,28 @@
"""
For the ``future`` package.
A special fixer that ensures that these lines have been added::
from future import standard_library
standard_library.install_hooks()
even if the only module imported was ``urllib``, in which case the regular fixer
wouldn't have added these lines.
"""
from lib2to3.fixes.fix_urllib import FixUrllib
from libfuturize.fixer_util import touch_import_top, find_root
class FixFutureStandardLibraryUrllib(FixUrllib): # not a subclass of FixImports
run_order = 8
def transform(self, node, results):
# transform_member() in lib2to3/fixes/fix_urllib.py breaks node so find_root(node)
# no longer works after the super() call below. So we find the root first:
root = find_root(node)
result = super(FixFutureStandardLibraryUrllib, self).transform(node, results)
# TODO: add a blank line between any __future__ imports and this?
touch_import_top(u'future', u'standard_library', root)
return result

View File

@ -0,0 +1,32 @@
"""
Fixer for input.
Does a check for `from builtins import input` before running the lib2to3 fixer.
The fixer will not run when the input is already present.
this:
a = input()
becomes:
from builtins import input
a = eval(input())
and this:
from builtins import input
a = input()
becomes (no change):
from builtins import input
a = input()
"""
import lib2to3.fixes.fix_input
from lib2to3.fixer_util import does_tree_import
class FixInput(lib2to3.fixes.fix_input.FixInput):
def transform(self, node, results):
if does_tree_import('builtins', 'input', node):
return
return super(FixInput, self).transform(node, results)

View File

@ -0,0 +1,262 @@
# coding: utf-8
"""Fixer for __metaclass__ = X -> (future.utils.with_metaclass(X)) methods.
The various forms of classef (inherits nothing, inherits once, inherints
many) don't parse the same in the CST so we look at ALL classes for
a __metaclass__ and if we find one normalize the inherits to all be
an arglist.
For one-liner classes ('class X: pass') there is no indent/dedent so
we normalize those into having a suite.
Moving the __metaclass__ into the classdef can also cause the class
body to be empty so there is some special casing for that as well.
This fixer also tries very hard to keep original indenting and spacing
in all those corner cases.
"""
# This is a derived work of Lib/lib2to3/fixes/fix_metaclass.py under the
# copyright of the Python Software Foundation, licensed under the Python
# Software Foundation License 2.
#
# Copyright notice:
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013 Python Software Foundation. All rights reserved.
#
# Full license text: http://docs.python.org/3.4/license.html
# Author: Jack Diederich, Daniel Neuhäuser
# Local imports
from lib2to3 import fixer_base
from lib2to3.pygram import token
from lib2to3.fixer_util import Name, syms, Node, Leaf, touch_import, Call, \
String, Comma, parenthesize
def has_metaclass(parent):
""" we have to check the cls_node without changing it.
There are two possibilities:
1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
2) clsdef => simple_stmt => expr_stmt => Leaf('__meta')
"""
for node in parent.children:
if node.type == syms.suite:
return has_metaclass(node)
elif node.type == syms.simple_stmt and node.children:
expr_node = node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
left_side = expr_node.children[0]
if isinstance(left_side, Leaf) and \
left_side.value == '__metaclass__':
return True
return False
def fixup_parse_tree(cls_node):
""" one-line classes don't get a suite in the parse tree so we add
one to normalize the tree
"""
for node in cls_node.children:
if node.type == syms.suite:
# already in the preferred format, do nothing
return
# !%@#! one-liners have no suite node, we have to fake one up
for i, node in enumerate(cls_node.children):
if node.type == token.COLON:
break
else:
raise ValueError("No class suite and no ':'!")
# move everything into a suite node
suite = Node(syms.suite, [])
while cls_node.children[i+1:]:
move_node = cls_node.children[i+1]
suite.append_child(move_node.clone())
move_node.remove()
cls_node.append_child(suite)
node = suite
def fixup_simple_stmt(parent, i, stmt_node):
""" if there is a semi-colon all the parts count as part of the same
simple_stmt. We just want the __metaclass__ part so we move
everything efter the semi-colon into its own simple_stmt node
"""
for semi_ind, node in enumerate(stmt_node.children):
if node.type == token.SEMI: # *sigh*
break
else:
return
node.remove() # kill the semicolon
new_expr = Node(syms.expr_stmt, [])
new_stmt = Node(syms.simple_stmt, [new_expr])
while stmt_node.children[semi_ind:]:
move_node = stmt_node.children[semi_ind]
new_expr.append_child(move_node.clone())
move_node.remove()
parent.insert_child(i, new_stmt)
new_leaf1 = new_stmt.children[0].children[0]
old_leaf1 = stmt_node.children[0].children[0]
new_leaf1.prefix = old_leaf1.prefix
def remove_trailing_newline(node):
if node.children and node.children[-1].type == token.NEWLINE:
node.children[-1].remove()
def find_metas(cls_node):
# find the suite node (Mmm, sweet nodes)
for node in cls_node.children:
if node.type == syms.suite:
break
else:
raise ValueError("No class suite!")
# look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
for i, simple_node in list(enumerate(node.children)):
if simple_node.type == syms.simple_stmt and simple_node.children:
expr_node = simple_node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
# Check if the expr_node is a simple assignment.
left_node = expr_node.children[0]
if isinstance(left_node, Leaf) and \
left_node.value == u'__metaclass__':
# We found a assignment to __metaclass__.
fixup_simple_stmt(node, i, simple_node)
remove_trailing_newline(simple_node)
yield (node, i, simple_node)
def fixup_indent(suite):
""" If an INDENT is followed by a thing with a prefix then nuke the prefix
Otherwise we get in trouble when removing __metaclass__ at suite start
"""
kids = suite.children[::-1]
# find the first indent
while kids:
node = kids.pop()
if node.type == token.INDENT:
break
# find the first Leaf
while kids:
node = kids.pop()
if isinstance(node, Leaf) and node.type != token.DEDENT:
if node.prefix:
node.prefix = u''
return
else:
kids.extend(node.children[::-1])
class FixMetaclass(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
classdef<any*>
"""
def transform(self, node, results):
if not has_metaclass(node):
return
fixup_parse_tree(node)
# find metaclasses, keep the last one
last_metaclass = None
for suite, i, stmt in find_metas(node):
last_metaclass = stmt
stmt.remove()
text_type = node.children[0].type # always Leaf(nnn, 'class')
# figure out what kind of classdef we have
if len(node.children) == 7:
# Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite])
# 0 1 2 3 4 5 6
if node.children[3].type == syms.arglist:
arglist = node.children[3]
# Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite])
else:
parent = node.children[3].clone()
arglist = Node(syms.arglist, [parent])
node.set_child(3, arglist)
elif len(node.children) == 6:
# Node(classdef, ['class', 'name', '(', ')', ':', suite])
# 0 1 2 3 4 5
arglist = Node(syms.arglist, [])
node.insert_child(3, arglist)
elif len(node.children) == 4:
# Node(classdef, ['class', 'name', ':', suite])
# 0 1 2 3
arglist = Node(syms.arglist, [])
node.insert_child(2, Leaf(token.RPAR, u')'))
node.insert_child(2, arglist)
node.insert_child(2, Leaf(token.LPAR, u'('))
else:
raise ValueError("Unexpected class definition")
# now stick the metaclass in the arglist
meta_txt = last_metaclass.children[0].children[0]
meta_txt.value = 'metaclass'
orig_meta_prefix = meta_txt.prefix
# Was: touch_import(None, u'future.utils', node)
touch_import(u'future.utils', u'with_metaclass', node)
metaclass = last_metaclass.children[0].children[2].clone()
metaclass.prefix = u''
arguments = [metaclass]
if arglist.children:
if len(arglist.children) == 1:
base = arglist.children[0].clone()
base.prefix = u' '
else:
# Unfortunately six.with_metaclass() only allows one base
# class, so we have to dynamically generate a base class if
# there is more than one.
bases = parenthesize(arglist.clone())
bases.prefix = u' '
base = Call(Name('type'), [
String("'NewBase'"),
Comma(),
bases,
Comma(),
Node(
syms.atom,
[Leaf(token.LBRACE, u'{'), Leaf(token.RBRACE, u'}')],
prefix=u' '
)
], prefix=u' ')
arguments.extend([Comma(), base])
arglist.replace(Call(
Name(u'with_metaclass', prefix=arglist.prefix),
arguments
))
fixup_indent(suite)
# check for empty suite
if not suite.children:
# one-liner that was just __metaclass_
suite.remove()
pass_leaf = Leaf(text_type, u'pass')
pass_leaf.prefix = orig_meta_prefix
node.append_child(pass_leaf)
node.append_child(Leaf(token.NEWLINE, u'\n'))
elif len(suite.children) > 1 and \
(suite.children[-2].type == token.INDENT and
suite.children[-1].type == token.DEDENT):
# there was only one line in the class body and it was __metaclass__
pass_leaf = Leaf(text_type, u'pass')
suite.insert_child(-1, pass_leaf)
suite.insert_child(-1, Leaf(token.NEWLINE, u'\n'))

View File

@ -0,0 +1,104 @@
"""
Based on fix_next.py by Collin Winter.
Replaces it.next() -> next(it), per PEP 3114.
Unlike fix_next.py, this fixer doesn't replace the name of a next method with __next__,
which would break Python 2 compatibility without further help from fixers in
stage 2.
"""
# Local imports
from lib2to3.pgen2 import token
from lib2to3.pygram import python_symbols as syms
from lib2to3 import fixer_base
from lib2to3.fixer_util import Name, Call, find_binding
bind_warning = "Calls to builtin next() possibly shadowed by global binding"
class FixNextCall(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > >
|
power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > >
|
global=global_stmt< 'global' any* 'next' any* >
"""
order = "pre" # Pre-order tree traversal
def start_tree(self, tree, filename):
super(FixNextCall, self).start_tree(tree, filename)
n = find_binding('next', tree)
if n:
self.warning(n, bind_warning)
self.shadowed_next = True
else:
self.shadowed_next = False
def transform(self, node, results):
assert results
base = results.get("base")
attr = results.get("attr")
name = results.get("name")
if base:
if self.shadowed_next:
# Omit this:
# attr.replace(Name("__next__", prefix=attr.prefix))
pass
else:
base = [n.clone() for n in base]
base[0].prefix = ""
node.replace(Call(Name("next", prefix=node.prefix), base))
elif name:
# Omit this:
# n = Name("__next__", prefix=name.prefix)
# name.replace(n)
pass
elif attr:
# We don't do this transformation if we're assigning to "x.next".
# Unfortunately, it doesn't seem possible to do this in PATTERN,
# so it's being done here.
if is_assign_target(node):
head = results["head"]
if "".join([str(n) for n in head]).strip() == '__builtin__':
self.warning(node, bind_warning)
return
# Omit this:
# attr.replace(Name("__next__"))
elif "global" in results:
self.warning(node, bind_warning)
self.shadowed_next = True
### The following functions help test if node is part of an assignment
### target.
def is_assign_target(node):
assign = find_assign(node)
if assign is None:
return False
for child in assign.children:
if child.type == token.EQUAL:
return False
elif is_subtree(child, node):
return True
return False
def find_assign(node):
if node.type == syms.expr_stmt:
return node
if node.type == syms.simple_stmt or node.parent is None:
return None
return find_assign(node.parent)
def is_subtree(root, node):
if root == node:
return True
return any(is_subtree(c, node) for c in root.children)

View File

@ -0,0 +1,17 @@
"""
Fixer that adds ``from builtins import object`` if there is a line
like this:
class Foo(object):
"""
from lib2to3 import fixer_base
from libfuturize.fixer_util import touch_import_top
class FixObject(fixer_base.BaseFix):
PATTERN = u"classdef< 'class' NAME '(' name='object' ')' colon=':' any >"
def transform(self, node, results):
touch_import_top(u'builtins', 'object', node)

View File

@ -0,0 +1,39 @@
"""
For the ``future`` package.
Adds this import line:
from past.builtins import str as oldstr
at the top and wraps any unadorned string literals 'abc' or explicit byte-string
literals b'abc' in oldstr() calls so the code has the same behaviour on Py3 as
on Py2.6/2.7.
"""
from __future__ import unicode_literals
import re
from lib2to3 import fixer_base
from lib2to3.pgen2 import token
from lib2to3.fixer_util import syms
from libfuturize.fixer_util import (future_import, touch_import_top,
wrap_in_fn_call)
_literal_re = re.compile(r"[^uUrR]?[\'\"]")
class FixOldstrWrap(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "STRING"
def transform(self, node, results):
if node.type == token.STRING:
touch_import_top(u'past.types', u'oldstr', node)
if _literal_re.match(node.value):
new = node.clone()
# Strip any leading space or comments:
# TODO: check: do we really want to do this?
new.prefix = u''
new.value = u'b' + new.value
wrapped = wrap_in_fn_call("oldstr", [new], prefix=node.prefix)
return wrapped

View File

@ -0,0 +1,36 @@
"""
UNFINISHED
Fixer for turning multiple lines like these:
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
into a single line like this:
from __future__ import (absolute_import, division, print_function)
This helps with testing of ``futurize``.
"""
from lib2to3 import fixer_base
from libfuturize.fixer_util import future_import
class FixOrderFutureImports(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "file_input"
run_order = 10
# def match(self, node):
# """
# Match only once per file
# """
# if hasattr(node, 'type') and node.type == syms.file_input:
# return True
# return False
def transform(self, node, results):
# TODO # write me
pass

View File

@ -0,0 +1,104 @@
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for print.
Change:
"print" into "print()"
"print ..." into "print(...)"
"print(...)" not changed
"print ... ," into "print(..., end=' ')"
"print >>x, ..." into "print(..., file=x)"
No changes are applied if print_function is imported from __future__
"""
# Local imports
from lib2to3 import patcomp, pytree, fixer_base
from lib2to3.pgen2 import token
from lib2to3.fixer_util import Name, Call, Comma, String
# from libmodernize import add_future
parend_expr = patcomp.compile_pattern(
"""atom< '(' [arith_expr|atom|power|term|STRING|NAME] ')' >"""
)
class FixPrint(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
simple_stmt< any* bare='print' any* > | print_stmt
"""
def transform(self, node, results):
assert results
bare_print = results.get("bare")
if bare_print:
# Special-case print all by itself.
bare_print.replace(Call(Name(u"print"), [],
prefix=bare_print.prefix))
# The "from __future__ import print_function"" declaration is added
# by the fix_print_with_import fixer, so we skip it here.
# add_future(node, u'print_function')
return
assert node.children[0] == Name(u"print")
args = node.children[1:]
if len(args) == 1 and parend_expr.match(args[0]):
# We don't want to keep sticking parens around an
# already-parenthesised expression.
return
sep = end = file = None
if args and args[-1] == Comma():
args = args[:-1]
end = " "
# try to determine if the string ends in a non-space whitespace character, in which
# case there should be no space at the end of the conversion
string_leaves = [leaf for leaf in args[-1].leaves() if leaf.type == token.STRING]
if (
string_leaves
and string_leaves[-1].value[0] != "r" # "raw" string
and string_leaves[-1].value[-3:-1] in (r"\t", r"\n", r"\r")
):
end = ""
if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, u">>"):
assert len(args) >= 2
file = args[1].clone()
args = args[3:] # Strip a possible comma after the file expression
# Now synthesize a print(args, sep=..., end=..., file=...) node.
l_args = [arg.clone() for arg in args]
if l_args:
l_args[0].prefix = u""
if sep is not None or end is not None or file is not None:
if sep is not None:
self.add_kwarg(l_args, u"sep", String(repr(sep)))
if end is not None:
self.add_kwarg(l_args, u"end", String(repr(end)))
if file is not None:
self.add_kwarg(l_args, u"file", file)
n_stmt = Call(Name(u"print"), l_args)
n_stmt.prefix = node.prefix
# Note that there are corner cases where adding this future-import is
# incorrect, for example when the file also has a 'print ()' statement
# that was intended to print "()".
# add_future(node, u'print_function')
return n_stmt
def add_kwarg(self, l_nodes, s_kwd, n_expr):
# XXX All this prefix-setting may lose comments (though rarely)
n_expr.prefix = u""
n_argument = pytree.Node(self.syms.argument,
(Name(s_kwd),
pytree.Leaf(token.EQUAL, u"="),
n_expr))
if l_nodes:
l_nodes.append(Comma())
n_argument.prefix = u" "
l_nodes.append(n_argument)

View File

@ -0,0 +1,22 @@
"""
For the ``future`` package.
Turns any print statements into functions and adds this import line:
from __future__ import print_function
at the top to retain compatibility with Python 2.6+.
"""
from libfuturize.fixes.fix_print import FixPrint
from libfuturize.fixer_util import future_import
class FixPrintWithImport(FixPrint):
run_order = 7
def transform(self, node, results):
# Add the __future__ import first. (Otherwise any shebang or encoding
# comment line attached as a prefix to the print statement will be
# copied twice and appear twice.)
future_import(u'print_function', node)
n_stmt = super(FixPrintWithImport, self).transform(node, results)
return n_stmt

View File

@ -0,0 +1,107 @@
"""Fixer for 'raise E, V'
From Armin Ronacher's ``python-modernize``.
raise -> raise
raise E -> raise E
raise E, 5 -> raise E(5)
raise E, 5, T -> raise E(5).with_traceback(T)
raise E, None, T -> raise E.with_traceback(T)
raise (((E, E'), E''), E'''), 5 -> raise E(5)
raise "foo", V, T -> warns about string exceptions
raise E, (V1, V2) -> raise E(V1, V2)
raise E, (V1, V2), T -> raise E(V1, V2).with_traceback(T)
CAVEATS:
1) "raise E, V, T" cannot be translated safely in general. If V
is not a tuple or a (number, string, None) literal, then:
raise E, V, T -> from future.utils import raise_
raise_(E, V, T)
"""
# Author: Collin Winter, Armin Ronacher, Mark Huang
# Local imports
from lib2to3 import pytree, fixer_base
from lib2to3.pgen2 import token
from lib2to3.fixer_util import Name, Call, is_tuple, Comma, Attr, ArgList
from libfuturize.fixer_util import touch_import_top
class FixRaise(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
"""
def transform(self, node, results):
syms = self.syms
exc = results["exc"].clone()
if exc.type == token.STRING:
msg = "Python 3 does not support string exceptions"
self.cannot_convert(node, msg)
return
# Python 2 supports
# raise ((((E1, E2), E3), E4), E5), V
# as a synonym for
# raise E1, V
# Since Python 3 will not support this, we recurse down any tuple
# literals, always taking the first element.
if is_tuple(exc):
while is_tuple(exc):
# exc.children[1:-1] is the unparenthesized tuple
# exc.children[1].children[0] is the first element of the tuple
exc = exc.children[1].children[0].clone()
exc.prefix = u" "
if "tb" in results:
tb = results["tb"].clone()
else:
tb = None
if "val" in results:
val = results["val"].clone()
if is_tuple(val):
# Assume that exc is a subclass of Exception and call exc(*val).
args = [c.clone() for c in val.children[1:-1]]
exc = Call(exc, args)
elif val.type in (token.NUMBER, token.STRING):
# Handle numeric and string literals specially, e.g.
# "raise Exception, 5" -> "raise Exception(5)".
val.prefix = u""
exc = Call(exc, [val])
elif val.type == token.NAME and val.value == u"None":
# Handle None specially, e.g.
# "raise Exception, None" -> "raise Exception".
pass
else:
# val is some other expression. If val evaluates to an instance
# of exc, it should just be raised. If val evaluates to None,
# a default instance of exc should be raised (as above). If val
# evaluates to a tuple, exc(*val) should be called (as
# above). Otherwise, exc(val) should be called. We can only
# tell what to do at runtime, so defer to future.utils.raise_(),
# which handles all of these cases.
touch_import_top(u"future.utils", u"raise_", node)
exc.prefix = u""
args = [exc, Comma(), val]
if tb is not None:
args += [Comma(), tb]
return Call(Name(u"raise_"), args, prefix=node.prefix)
if tb is not None:
tb.prefix = ""
exc_list = Attr(exc, Name('with_traceback')) + [ArgList([tb])]
else:
exc_list = [exc]
return pytree.Node(syms.raise_stmt,
[Name(u"raise")] + exc_list,
prefix=node.prefix)

View File

@ -0,0 +1,26 @@
"""
Fixer for removing any of these lines:
from __future__ import with_statement
from __future__ import nested_scopes
from __future__ import generators
The reason is that __future__ imports like these are required to be the first
line of code (after docstrings) on Python 2.6+, which can get in the way.
These imports are always enabled in Python 2.6+, which is the minimum sane
version to target for Py2/3 compatibility.
"""
from lib2to3 import fixer_base
from libfuturize.fixer_util import remove_future_import
class FixRemoveOldFutureImports(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "file_input"
run_order = 1
def transform(self, node, results):
remove_future_import(u"with_statement", node)
remove_future_import(u"nested_scopes", node)
remove_future_import(u"generators", node)

View File

@ -0,0 +1,24 @@
"""Fixer that changes unicode to str and unichr to chr, but -- unlike the
lib2to3 fix_unicode.py fixer, does not change u"..." into "...".
The reason is that Py3.3+ supports the u"..." string prefix, and, if
present, the prefix may provide useful information for disambiguating
between byte strings and unicode strings, which is often the hardest part
of the porting task.
"""
from lib2to3.pgen2 import token
from lib2to3 import fixer_base
_mapping = {u"unichr" : u"chr", u"unicode" : u"str"}
class FixUnicodeKeepU(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "'unicode' | 'unichr'"
def transform(self, node, results):
if node.type == token.NAME:
new = node.clone()
new.value = _mapping[node.value]
return new

View File

@ -0,0 +1,18 @@
"""
Adds this import:
from __future__ import unicode_literals
"""
from lib2to3 import fixer_base
from libfuturize.fixer_util import future_import
class FixUnicodeLiteralsImport(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "file_input"
run_order = 9
def transform(self, node, results):
future_import(u"unicode_literals", node)

View File

@ -0,0 +1,20 @@
"""
For the ``future`` package.
Turns any xrange calls into range calls and adds this import line:
from builtins import range
at the top.
"""
from lib2to3.fixes.fix_xrange import FixXrange
from libfuturize.fixer_util import touch_import_top
class FixXrangeWithImport(FixXrange):
def transform(self, node, results):
result = super(FixXrangeWithImport, self).transform(node, results)
touch_import_top('builtins', 'range', node)
return result