" % (ensure_new_type(self.faultCode),
+ ensure_new_type(self.faultString))
+
+# --------------------------------------------------------------------
+# Special values
+
+##
+# Backwards compatibility
+
+boolean = Boolean = bool
+
+##
+# Wrapper for XML-RPC DateTime values. This converts a time value to
+# the format used by XML-RPC.
+#
+# The value can be given as a datetime object, as a string in the
+# format "yyyymmddThh:mm:ss", as a 9-item time tuple (as returned by
+# time.localtime()), or an integer value (as returned by time.time()).
+# The wrapper uses time.localtime() to convert an integer to a time
+# tuple.
+#
+# @param value The time, given as a datetime object, an ISO 8601 string,
+# a time tuple, or an integer time value.
+
+
+### For Python-Future:
+def _iso8601_format(value):
+ return "%04d%02d%02dT%02d:%02d:%02d" % (
+ value.year, value.month, value.day,
+ value.hour, value.minute, value.second)
+###
+# Issue #13305: different format codes across platforms
+# _day0 = datetime(1, 1, 1)
+# if _day0.strftime('%Y') == '0001': # Mac OS X
+# def _iso8601_format(value):
+# return value.strftime("%Y%m%dT%H:%M:%S")
+# elif _day0.strftime('%4Y') == '0001': # Linux
+# def _iso8601_format(value):
+# return value.strftime("%4Y%m%dT%H:%M:%S")
+# else:
+# def _iso8601_format(value):
+# return value.strftime("%Y%m%dT%H:%M:%S").zfill(17)
+# del _day0
+
+
+def _strftime(value):
+ if isinstance(value, datetime):
+ return _iso8601_format(value)
+
+ if not isinstance(value, (tuple, time.struct_time)):
+ if value == 0:
+ value = time.time()
+ value = time.localtime(value)
+
+ return "%04d%02d%02dT%02d:%02d:%02d" % value[:6]
+
+class DateTime(object):
+ """DateTime wrapper for an ISO 8601 string or time tuple or
+ localtime integer value to generate 'dateTime.iso8601' XML-RPC
+ value.
+ """
+
+ def __init__(self, value=0):
+ if isinstance(value, str):
+ self.value = value
+ else:
+ self.value = _strftime(value)
+
+ def make_comparable(self, other):
+ if isinstance(other, DateTime):
+ s = self.value
+ o = other.value
+ elif isinstance(other, datetime):
+ s = self.value
+ o = _iso8601_format(other)
+ elif isinstance(other, str):
+ s = self.value
+ o = other
+ elif hasattr(other, "timetuple"):
+ s = self.timetuple()
+ o = other.timetuple()
+ else:
+ otype = (hasattr(other, "__class__")
+ and other.__class__.__name__
+ or type(other))
+ raise TypeError("Can't compare %s and %s" %
+ (self.__class__.__name__, otype))
+ return s, o
+
+ def __lt__(self, other):
+ s, o = self.make_comparable(other)
+ return s < o
+
+ def __le__(self, other):
+ s, o = self.make_comparable(other)
+ return s <= o
+
+ def __gt__(self, other):
+ s, o = self.make_comparable(other)
+ return s > o
+
+ def __ge__(self, other):
+ s, o = self.make_comparable(other)
+ return s >= o
+
+ def __eq__(self, other):
+ s, o = self.make_comparable(other)
+ return s == o
+
+ def __ne__(self, other):
+ s, o = self.make_comparable(other)
+ return s != o
+
+ def timetuple(self):
+ return time.strptime(self.value, "%Y%m%dT%H:%M:%S")
+
+ ##
+ # Get date/time value.
+ #
+ # @return Date/time value, as an ISO 8601 string.
+
+ def __str__(self):
+ return self.value
+
+ def __repr__(self):
+ return "" % (ensure_new_type(self.value), id(self))
+
+ def decode(self, data):
+ self.value = str(data).strip()
+
+ def encode(self, out):
+ out.write("")
+ out.write(self.value)
+ out.write("\n")
+
+def _datetime(data):
+ # decode xml element contents into a DateTime structure.
+ value = DateTime()
+ value.decode(data)
+ return value
+
+def _datetime_type(data):
+ return datetime.strptime(data, "%Y%m%dT%H:%M:%S")
+
+##
+# Wrapper for binary data. This can be used to transport any kind
+# of binary data over XML-RPC, using BASE64 encoding.
+#
+# @param data An 8-bit string containing arbitrary data.
+
+class Binary(object):
+ """Wrapper for binary data."""
+
+ def __init__(self, data=None):
+ if data is None:
+ data = b""
+ else:
+ if not isinstance(data, (bytes, bytearray)):
+ raise TypeError("expected bytes or bytearray, not %s" %
+ data.__class__.__name__)
+ data = bytes(data) # Make a copy of the bytes!
+ self.data = data
+
+ ##
+ # Get buffer contents.
+ #
+ # @return Buffer contents, as an 8-bit string.
+
+ def __str__(self):
+ return str(self.data, "latin-1") # XXX encoding?!
+
+ def __eq__(self, other):
+ if isinstance(other, Binary):
+ other = other.data
+ return self.data == other
+
+ def __ne__(self, other):
+ if isinstance(other, Binary):
+ other = other.data
+ return self.data != other
+
+ def decode(self, data):
+ self.data = base64.decodebytes(data)
+
+ def encode(self, out):
+ out.write("\n")
+ encoded = base64.encodebytes(self.data)
+ out.write(encoded.decode('ascii'))
+ out.write("\n")
+
+def _binary(data):
+ # decode xml element contents into a Binary structure
+ value = Binary()
+ value.decode(data)
+ return value
+
+WRAPPERS = (DateTime, Binary)
+
+# --------------------------------------------------------------------
+# XML parsers
+
+class ExpatParser(object):
+ # fast expat parser for Python 2.0 and later.
+ def __init__(self, target):
+ self._parser = parser = expat.ParserCreate(None, None)
+ self._target = target
+ parser.StartElementHandler = target.start
+ parser.EndElementHandler = target.end
+ parser.CharacterDataHandler = target.data
+ encoding = None
+ target.xml(encoding, None)
+
+ def feed(self, data):
+ self._parser.Parse(data, 0)
+
+ def close(self):
+ self._parser.Parse("", 1) # end of data
+ del self._target, self._parser # get rid of circular references
+
+# --------------------------------------------------------------------
+# XML-RPC marshalling and unmarshalling code
+
+##
+# XML-RPC marshaller.
+#
+# @param encoding Default encoding for 8-bit strings. The default
+# value is None (interpreted as UTF-8).
+# @see dumps
+
+class Marshaller(object):
+ """Generate an XML-RPC params chunk from a Python data structure.
+
+ Create a Marshaller instance for each set of parameters, and use
+ the "dumps" method to convert your data (represented as a tuple)
+ to an XML-RPC params chunk. To write a fault response, pass a
+ Fault instance instead. You may prefer to use the "dumps" module
+ function for this purpose.
+ """
+
+ # by the way, if you don't understand what's going on in here,
+ # that's perfectly ok.
+
+ def __init__(self, encoding=None, allow_none=False):
+ self.memo = {}
+ self.data = None
+ self.encoding = encoding
+ self.allow_none = allow_none
+
+ dispatch = {}
+
+ def dumps(self, values):
+ out = []
+ write = out.append
+ dump = self.__dump
+ if isinstance(values, Fault):
+ # fault instance
+ write("\n")
+ dump({'faultCode': values.faultCode,
+ 'faultString': values.faultString},
+ write)
+ write("\n")
+ else:
+ # parameter block
+ # FIXME: the xml-rpc specification allows us to leave out
+ # the entire block if there are no parameters.
+ # however, changing this may break older code (including
+ # old versions of xmlrpclib.py), so this is better left as
+ # is for now. See @XMLRPC3 for more information. /F
+ write("\n")
+ for v in values:
+ write("\n")
+ dump(v, write)
+ write("\n")
+ write("\n")
+ result = "".join(out)
+ return str(result)
+
+ def __dump(self, value, write):
+ try:
+ f = self.dispatch[type(ensure_new_type(value))]
+ except KeyError:
+ # check if this object can be marshalled as a structure
+ if not hasattr(value, '__dict__'):
+ raise TypeError("cannot marshal %s objects" % type(value))
+ # check if this class is a sub-class of a basic type,
+ # because we don't know how to marshal these types
+ # (e.g. a string sub-class)
+ for type_ in type(value).__mro__:
+ if type_ in self.dispatch.keys():
+ raise TypeError("cannot marshal %s objects" % type(value))
+ # XXX(twouters): using "_arbitrary_instance" as key as a quick-fix
+ # for the p3yk merge, this should probably be fixed more neatly.
+ f = self.dispatch["_arbitrary_instance"]
+ f(self, value, write)
+
+ def dump_nil (self, value, write):
+ if not self.allow_none:
+ raise TypeError("cannot marshal None unless allow_none is enabled")
+ write("")
+ dispatch[type(None)] = dump_nil
+
+ def dump_bool(self, value, write):
+ write("")
+ write(value and "1" or "0")
+ write("\n")
+ dispatch[bool] = dump_bool
+
+ def dump_long(self, value, write):
+ if value > MAXINT or value < MININT:
+ raise OverflowError("long int exceeds XML-RPC limits")
+ write("")
+ write(str(int(value)))
+ write("\n")
+ dispatch[int] = dump_long
+
+ # backward compatible
+ dump_int = dump_long
+
+ def dump_double(self, value, write):
+ write("")
+ write(repr(ensure_new_type(value)))
+ write("\n")
+ dispatch[float] = dump_double
+
+ def dump_unicode(self, value, write, escape=escape):
+ write("")
+ write(escape(value))
+ write("\n")
+ dispatch[str] = dump_unicode
+
+ def dump_bytes(self, value, write):
+ write("\n")
+ encoded = base64.encodebytes(value)
+ write(encoded.decode('ascii'))
+ write("\n")
+ dispatch[bytes] = dump_bytes
+ dispatch[bytearray] = dump_bytes
+
+ def dump_array(self, value, write):
+ i = id(value)
+ if i in self.memo:
+ raise TypeError("cannot marshal recursive sequences")
+ self.memo[i] = None
+ dump = self.__dump
+ write("\n")
+ for v in value:
+ dump(v, write)
+ write("\n")
+ del self.memo[i]
+ dispatch[tuple] = dump_array
+ dispatch[list] = dump_array
+
+ def dump_struct(self, value, write, escape=escape):
+ i = id(value)
+ if i in self.memo:
+ raise TypeError("cannot marshal recursive dictionaries")
+ self.memo[i] = None
+ dump = self.__dump
+ write("\n")
+ for k, v in value.items():
+ write("\n")
+ if not isinstance(k, str):
+ raise TypeError("dictionary key must be string")
+ write("%s\n" % escape(k))
+ dump(v, write)
+ write("\n")
+ write("\n")
+ del self.memo[i]
+ dispatch[dict] = dump_struct
+
+ def dump_datetime(self, value, write):
+ write("")
+ write(_strftime(value))
+ write("\n")
+ dispatch[datetime] = dump_datetime
+
+ def dump_instance(self, value, write):
+ # check for special wrappers
+ if value.__class__ in WRAPPERS:
+ self.write = write
+ value.encode(self)
+ del self.write
+ else:
+ # store instance attributes as a struct (really?)
+ self.dump_struct(value.__dict__, write)
+ dispatch[DateTime] = dump_instance
+ dispatch[Binary] = dump_instance
+ # XXX(twouters): using "_arbitrary_instance" as key as a quick-fix
+ # for the p3yk merge, this should probably be fixed more neatly.
+ dispatch["_arbitrary_instance"] = dump_instance
+
+##
+# XML-RPC unmarshaller.
+#
+# @see loads
+
+class Unmarshaller(object):
+ """Unmarshal an XML-RPC response, based on incoming XML event
+ messages (start, data, end). Call close() to get the resulting
+ data structure.
+
+ Note that this reader is fairly tolerant, and gladly accepts bogus
+ XML-RPC data without complaining (but not bogus XML).
+ """
+
+ # and again, if you don't understand what's going on in here,
+ # that's perfectly ok.
+
+ def __init__(self, use_datetime=False, use_builtin_types=False):
+ self._type = None
+ self._stack = []
+ self._marks = []
+ self._data = []
+ self._methodname = None
+ self._encoding = "utf-8"
+ self.append = self._stack.append
+ self._use_datetime = use_builtin_types or use_datetime
+ self._use_bytes = use_builtin_types
+
+ def close(self):
+ # return response tuple and target method
+ if self._type is None or self._marks:
+ raise ResponseError()
+ if self._type == "fault":
+ raise Fault(**self._stack[0])
+ return tuple(self._stack)
+
+ def getmethodname(self):
+ return self._methodname
+
+ #
+ # event handlers
+
+ def xml(self, encoding, standalone):
+ self._encoding = encoding
+ # FIXME: assert standalone == 1 ???
+
+ def start(self, tag, attrs):
+ # prepare to handle this element
+ if tag == "array" or tag == "struct":
+ self._marks.append(len(self._stack))
+ self._data = []
+ self._value = (tag == "value")
+
+ def data(self, text):
+ self._data.append(text)
+
+ def end(self, tag):
+ # call the appropriate end tag handler
+ try:
+ f = self.dispatch[tag]
+ except KeyError:
+ pass # unknown tag ?
+ else:
+ return f(self, "".join(self._data))
+
+ #
+ # accelerator support
+
+ def end_dispatch(self, tag, data):
+ # dispatch data
+ try:
+ f = self.dispatch[tag]
+ except KeyError:
+ pass # unknown tag ?
+ else:
+ return f(self, data)
+
+ #
+ # element decoders
+
+ dispatch = {}
+
+ def end_nil (self, data):
+ self.append(None)
+ self._value = 0
+ dispatch["nil"] = end_nil
+
+ def end_boolean(self, data):
+ if data == "0":
+ self.append(False)
+ elif data == "1":
+ self.append(True)
+ else:
+ raise TypeError("bad boolean value")
+ self._value = 0
+ dispatch["boolean"] = end_boolean
+
+ def end_int(self, data):
+ self.append(int(data))
+ self._value = 0
+ dispatch["i4"] = end_int
+ dispatch["i8"] = end_int
+ dispatch["int"] = end_int
+
+ def end_double(self, data):
+ self.append(float(data))
+ self._value = 0
+ dispatch["double"] = end_double
+
+ def end_string(self, data):
+ if self._encoding:
+ data = data.decode(self._encoding)
+ self.append(data)
+ self._value = 0
+ dispatch["string"] = end_string
+ dispatch["name"] = end_string # struct keys are always strings
+
+ def end_array(self, data):
+ mark = self._marks.pop()
+ # map arrays to Python lists
+ self._stack[mark:] = [self._stack[mark:]]
+ self._value = 0
+ dispatch["array"] = end_array
+
+ def end_struct(self, data):
+ mark = self._marks.pop()
+ # map structs to Python dictionaries
+ dict = {}
+ items = self._stack[mark:]
+ for i in range(0, len(items), 2):
+ dict[items[i]] = items[i+1]
+ self._stack[mark:] = [dict]
+ self._value = 0
+ dispatch["struct"] = end_struct
+
+ def end_base64(self, data):
+ value = Binary()
+ value.decode(data.encode("ascii"))
+ if self._use_bytes:
+ value = value.data
+ self.append(value)
+ self._value = 0
+ dispatch["base64"] = end_base64
+
+ def end_dateTime(self, data):
+ value = DateTime()
+ value.decode(data)
+ if self._use_datetime:
+ value = _datetime_type(data)
+ self.append(value)
+ dispatch["dateTime.iso8601"] = end_dateTime
+
+ def end_value(self, data):
+ # if we stumble upon a value element with no internal
+ # elements, treat it as a string element
+ if self._value:
+ self.end_string(data)
+ dispatch["value"] = end_value
+
+ def end_params(self, data):
+ self._type = "params"
+ dispatch["params"] = end_params
+
+ def end_fault(self, data):
+ self._type = "fault"
+ dispatch["fault"] = end_fault
+
+ def end_methodName(self, data):
+ if self._encoding:
+ data = data.decode(self._encoding)
+ self._methodname = data
+ self._type = "methodName" # no params
+ dispatch["methodName"] = end_methodName
+
+## Multicall support
+#
+
+class _MultiCallMethod(object):
+ # some lesser magic to store calls made to a MultiCall object
+ # for batch execution
+ def __init__(self, call_list, name):
+ self.__call_list = call_list
+ self.__name = name
+ def __getattr__(self, name):
+ return _MultiCallMethod(self.__call_list, "%s.%s" % (self.__name, name))
+ def __call__(self, *args):
+ self.__call_list.append((self.__name, args))
+
+class MultiCallIterator(object):
+ """Iterates over the results of a multicall. Exceptions are
+ raised in response to xmlrpc faults."""
+
+ def __init__(self, results):
+ self.results = results
+
+ def __getitem__(self, i):
+ item = self.results[i]
+ if isinstance(type(item), dict):
+ raise Fault(item['faultCode'], item['faultString'])
+ elif type(item) == type([]):
+ return item[0]
+ else:
+ raise ValueError("unexpected type in multicall result")
+
+class MultiCall(object):
+ """server -> a object used to boxcar method calls
+
+ server should be a ServerProxy object.
+
+ Methods can be added to the MultiCall using normal
+ method call syntax e.g.:
+
+ multicall = MultiCall(server_proxy)
+ multicall.add(2,3)
+ multicall.get_address("Guido")
+
+ To execute the multicall, call the MultiCall object e.g.:
+
+ add_result, address = multicall()
+ """
+
+ def __init__(self, server):
+ self.__server = server
+ self.__call_list = []
+
+ def __repr__(self):
+ return "" % id(self)
+
+ __str__ = __repr__
+
+ def __getattr__(self, name):
+ return _MultiCallMethod(self.__call_list, name)
+
+ def __call__(self):
+ marshalled_list = []
+ for name, args in self.__call_list:
+ marshalled_list.append({'methodName' : name, 'params' : args})
+
+ return MultiCallIterator(self.__server.system.multicall(marshalled_list))
+
+# --------------------------------------------------------------------
+# convenience functions
+
+FastMarshaller = FastParser = FastUnmarshaller = None
+
+##
+# Create a parser object, and connect it to an unmarshalling instance.
+# This function picks the fastest available XML parser.
+#
+# return A (parser, unmarshaller) tuple.
+
+def getparser(use_datetime=False, use_builtin_types=False):
+ """getparser() -> parser, unmarshaller
+
+ Create an instance of the fastest available parser, and attach it
+ to an unmarshalling object. Return both objects.
+ """
+ if FastParser and FastUnmarshaller:
+ if use_builtin_types:
+ mkdatetime = _datetime_type
+ mkbytes = base64.decodebytes
+ elif use_datetime:
+ mkdatetime = _datetime_type
+ mkbytes = _binary
+ else:
+ mkdatetime = _datetime
+ mkbytes = _binary
+ target = FastUnmarshaller(True, False, mkbytes, mkdatetime, Fault)
+ parser = FastParser(target)
+ else:
+ target = Unmarshaller(use_datetime=use_datetime, use_builtin_types=use_builtin_types)
+ if FastParser:
+ parser = FastParser(target)
+ else:
+ parser = ExpatParser(target)
+ return parser, target
+
+##
+# Convert a Python tuple or a Fault instance to an XML-RPC packet.
+#
+# @def dumps(params, **options)
+# @param params A tuple or Fault instance.
+# @keyparam methodname If given, create a methodCall request for
+# this method name.
+# @keyparam methodresponse If given, create a methodResponse packet.
+# If used with a tuple, the tuple must be a singleton (that is,
+# it must contain exactly one element).
+# @keyparam encoding The packet encoding.
+# @return A string containing marshalled data.
+
+def dumps(params, methodname=None, methodresponse=None, encoding=None,
+ allow_none=False):
+ """data [,options] -> marshalled data
+
+ Convert an argument tuple or a Fault instance to an XML-RPC
+ request (or response, if the methodresponse option is used).
+
+ In addition to the data object, the following options can be given
+ as keyword arguments:
+
+ methodname: the method name for a methodCall packet
+
+ methodresponse: true to create a methodResponse packet.
+ If this option is used with a tuple, the tuple must be
+ a singleton (i.e. it can contain only one element).
+
+ encoding: the packet encoding (default is UTF-8)
+
+ All byte strings in the data structure are assumed to use the
+ packet encoding. Unicode strings are automatically converted,
+ where necessary.
+ """
+
+ assert isinstance(params, (tuple, Fault)), "argument must be tuple or Fault instance"
+ if isinstance(params, Fault):
+ methodresponse = 1
+ elif methodresponse and isinstance(params, tuple):
+ assert len(params) == 1, "response tuple must be a singleton"
+
+ if not encoding:
+ encoding = "utf-8"
+
+ if FastMarshaller:
+ m = FastMarshaller(encoding)
+ else:
+ m = Marshaller(encoding, allow_none)
+
+ data = m.dumps(params)
+
+ if encoding != "utf-8":
+ xmlheader = "\n" % str(encoding)
+ else:
+ xmlheader = "\n" # utf-8 is default
+
+ # standard XML-RPC wrappings
+ if methodname:
+ # a method call
+ if not isinstance(methodname, str):
+ methodname = methodname.encode(encoding)
+ data = (
+ xmlheader,
+ "\n"
+ "", methodname, "\n",
+ data,
+ "\n"
+ )
+ elif methodresponse:
+ # a method response, or a fault structure
+ data = (
+ xmlheader,
+ "\n",
+ data,
+ "\n"
+ )
+ else:
+ return data # return as is
+ return str("").join(data)
+
+##
+# Convert an XML-RPC packet to a Python object. If the XML-RPC packet
+# represents a fault condition, this function raises a Fault exception.
+#
+# @param data An XML-RPC packet, given as an 8-bit string.
+# @return A tuple containing the unpacked data, and the method name
+# (None if not present).
+# @see Fault
+
+def loads(data, use_datetime=False, use_builtin_types=False):
+ """data -> unmarshalled data, method name
+
+ Convert an XML-RPC packet to unmarshalled data plus a method
+ name (None if not present).
+
+ If the XML-RPC packet represents a fault condition, this function
+ raises a Fault exception.
+ """
+ p, u = getparser(use_datetime=use_datetime, use_builtin_types=use_builtin_types)
+ p.feed(data)
+ p.close()
+ return u.close(), u.getmethodname()
+
+##
+# Encode a string using the gzip content encoding such as specified by the
+# Content-Encoding: gzip
+# in the HTTP header, as described in RFC 1952
+#
+# @param data the unencoded data
+# @return the encoded data
+
+def gzip_encode(data):
+ """data -> gzip encoded data
+
+ Encode data using the gzip content encoding as described in RFC 1952
+ """
+ if not gzip:
+ raise NotImplementedError
+ f = BytesIO()
+ gzf = gzip.GzipFile(mode="wb", fileobj=f, compresslevel=1)
+ gzf.write(data)
+ gzf.close()
+ encoded = f.getvalue()
+ f.close()
+ return encoded
+
+##
+# Decode a string using the gzip content encoding such as specified by the
+# Content-Encoding: gzip
+# in the HTTP header, as described in RFC 1952
+#
+# @param data The encoded data
+# @return the unencoded data
+# @raises ValueError if data is not correctly coded.
+
+def gzip_decode(data):
+ """gzip encoded data -> unencoded data
+
+ Decode data using the gzip content encoding as described in RFC 1952
+ """
+ if not gzip:
+ raise NotImplementedError
+ f = BytesIO(data)
+ gzf = gzip.GzipFile(mode="rb", fileobj=f)
+ try:
+ decoded = gzf.read()
+ except IOError:
+ raise ValueError("invalid data")
+ f.close()
+ gzf.close()
+ return decoded
+
+##
+# Return a decoded file-like object for the gzip encoding
+# as described in RFC 1952.
+#
+# @param response A stream supporting a read() method
+# @return a file-like object that the decoded data can be read() from
+
+class GzipDecodedResponse(gzip.GzipFile if gzip else object):
+ """a file-like object to decode a response encoded with the gzip
+ method, as described in RFC 1952.
+ """
+ def __init__(self, response):
+ #response doesn't support tell() and read(), required by
+ #GzipFile
+ if not gzip:
+ raise NotImplementedError
+ self.io = BytesIO(response.read())
+ gzip.GzipFile.__init__(self, mode="rb", fileobj=self.io)
+
+ def close(self):
+ gzip.GzipFile.close(self)
+ self.io.close()
+
+
+# --------------------------------------------------------------------
+# request dispatcher
+
+class _Method(object):
+ # some magic to bind an XML-RPC method to an RPC server.
+ # supports "nested" methods (e.g. examples.getStateName)
+ def __init__(self, send, name):
+ self.__send = send
+ self.__name = name
+ def __getattr__(self, name):
+ return _Method(self.__send, "%s.%s" % (self.__name, name))
+ def __call__(self, *args):
+ return self.__send(self.__name, args)
+
+##
+# Standard transport class for XML-RPC over HTTP.
+#
+# You can create custom transports by subclassing this method, and
+# overriding selected methods.
+
+class Transport(object):
+ """Handles an HTTP transaction to an XML-RPC server."""
+
+ # client identifier (may be overridden)
+ user_agent = "Python-xmlrpc/%s" % __version__
+
+ #if true, we'll request gzip encoding
+ accept_gzip_encoding = True
+
+ # if positive, encode request using gzip if it exceeds this threshold
+ # note that many server will get confused, so only use it if you know
+ # that they can decode such a request
+ encode_threshold = None #None = don't encode
+
+ def __init__(self, use_datetime=False, use_builtin_types=False):
+ self._use_datetime = use_datetime
+ self._use_builtin_types = use_builtin_types
+ self._connection = (None, None)
+ self._extra_headers = []
+
+ ##
+ # Send a complete request, and parse the response.
+ # Retry request if a cached connection has disconnected.
+ #
+ # @param host Target host.
+ # @param handler Target PRC handler.
+ # @param request_body XML-RPC request body.
+ # @param verbose Debugging flag.
+ # @return Parsed response.
+
+ def request(self, host, handler, request_body, verbose=False):
+ #retry request once if cached connection has gone cold
+ for i in (0, 1):
+ try:
+ return self.single_request(host, handler, request_body, verbose)
+ except socket.error as e:
+ if i or e.errno not in (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE):
+ raise
+ except http_client.BadStatusLine: #close after we sent request
+ if i:
+ raise
+
+ def single_request(self, host, handler, request_body, verbose=False):
+ # issue XML-RPC request
+ try:
+ http_conn = self.send_request(host, handler, request_body, verbose)
+ resp = http_conn.getresponse()
+ if resp.status == 200:
+ self.verbose = verbose
+ return self.parse_response(resp)
+
+ except Fault:
+ raise
+ except Exception:
+ #All unexpected errors leave connection in
+ # a strange state, so we clear it.
+ self.close()
+ raise
+
+ #We got an error response.
+ #Discard any response data and raise exception
+ if resp.getheader("content-length", ""):
+ resp.read()
+ raise ProtocolError(
+ host + handler,
+ resp.status, resp.reason,
+ dict(resp.getheaders())
+ )
+
+
+ ##
+ # Create parser.
+ #
+ # @return A 2-tuple containing a parser and a unmarshaller.
+
+ def getparser(self):
+ # get parser and unmarshaller
+ return getparser(use_datetime=self._use_datetime,
+ use_builtin_types=self._use_builtin_types)
+
+ ##
+ # Get authorization info from host parameter
+ # Host may be a string, or a (host, x509-dict) tuple; if a string,
+ # it is checked for a "user:pw@host" format, and a "Basic
+ # Authentication" header is added if appropriate.
+ #
+ # @param host Host descriptor (URL or (URL, x509 info) tuple).
+ # @return A 3-tuple containing (actual host, extra headers,
+ # x509 info). The header and x509 fields may be None.
+
+ def get_host_info(self, host):
+
+ x509 = {}
+ if isinstance(host, tuple):
+ host, x509 = host
+
+ auth, host = urllib_parse.splituser(host)
+
+ if auth:
+ auth = urllib_parse.unquote_to_bytes(auth)
+ auth = base64.encodebytes(auth).decode("utf-8")
+ auth = "".join(auth.split()) # get rid of whitespace
+ extra_headers = [
+ ("Authorization", "Basic " + auth)
+ ]
+ else:
+ extra_headers = []
+
+ return host, extra_headers, x509
+
+ ##
+ # Connect to server.
+ #
+ # @param host Target host.
+ # @return An HTTPConnection object
+
+ def make_connection(self, host):
+ #return an existing connection if possible. This allows
+ #HTTP/1.1 keep-alive.
+ if self._connection and host == self._connection[0]:
+ return self._connection[1]
+ # create a HTTP connection object from a host descriptor
+ chost, self._extra_headers, x509 = self.get_host_info(host)
+ self._connection = host, http_client.HTTPConnection(chost)
+ return self._connection[1]
+
+ ##
+ # Clear any cached connection object.
+ # Used in the event of socket errors.
+ #
+ def close(self):
+ if self._connection[1]:
+ self._connection[1].close()
+ self._connection = (None, None)
+
+ ##
+ # Send HTTP request.
+ #
+ # @param host Host descriptor (URL or (URL, x509 info) tuple).
+ # @param handler Targer RPC handler (a path relative to host)
+ # @param request_body The XML-RPC request body
+ # @param debug Enable debugging if debug is true.
+ # @return An HTTPConnection.
+
+ def send_request(self, host, handler, request_body, debug):
+ connection = self.make_connection(host)
+ headers = self._extra_headers[:]
+ if debug:
+ connection.set_debuglevel(1)
+ if self.accept_gzip_encoding and gzip:
+ connection.putrequest("POST", handler, skip_accept_encoding=True)
+ headers.append(("Accept-Encoding", "gzip"))
+ else:
+ connection.putrequest("POST", handler)
+ headers.append(("Content-Type", "text/xml"))
+ headers.append(("User-Agent", self.user_agent))
+ self.send_headers(connection, headers)
+ self.send_content(connection, request_body)
+ return connection
+
+ ##
+ # Send request headers.
+ # This function provides a useful hook for subclassing
+ #
+ # @param connection httpConnection.
+ # @param headers list of key,value pairs for HTTP headers
+
+ def send_headers(self, connection, headers):
+ for key, val in headers:
+ connection.putheader(key, val)
+
+ ##
+ # Send request body.
+ # This function provides a useful hook for subclassing
+ #
+ # @param connection httpConnection.
+ # @param request_body XML-RPC request body.
+
+ def send_content(self, connection, request_body):
+ #optionally encode the request
+ if (self.encode_threshold is not None and
+ self.encode_threshold < len(request_body) and
+ gzip):
+ connection.putheader("Content-Encoding", "gzip")
+ request_body = gzip_encode(request_body)
+
+ connection.putheader("Content-Length", str(len(request_body)))
+ connection.endheaders(request_body)
+
+ ##
+ # Parse response.
+ #
+ # @param file Stream.
+ # @return Response tuple and target method.
+
+ def parse_response(self, response):
+ # read response data from httpresponse, and parse it
+ # Check for new http response object, otherwise it is a file object.
+ if hasattr(response, 'getheader'):
+ if response.getheader("Content-Encoding", "") == "gzip":
+ stream = GzipDecodedResponse(response)
+ else:
+ stream = response
+ else:
+ stream = response
+
+ p, u = self.getparser()
+
+ while 1:
+ data = stream.read(1024)
+ if not data:
+ break
+ if self.verbose:
+ print("body:", repr(data))
+ p.feed(data)
+
+ if stream is not response:
+ stream.close()
+ p.close()
+
+ return u.close()
+
+##
+# Standard transport class for XML-RPC over HTTPS.
+
+class SafeTransport(Transport):
+ """Handles an HTTPS transaction to an XML-RPC server."""
+
+ # FIXME: mostly untested
+
+ def make_connection(self, host):
+ if self._connection and host == self._connection[0]:
+ return self._connection[1]
+
+ if not hasattr(http_client, "HTTPSConnection"):
+ raise NotImplementedError(
+ "your version of http.client doesn't support HTTPS")
+ # create a HTTPS connection object from a host descriptor
+ # host may be a string, or a (host, x509-dict) tuple
+ chost, self._extra_headers, x509 = self.get_host_info(host)
+ self._connection = host, http_client.HTTPSConnection(chost,
+ None, **(x509 or {}))
+ return self._connection[1]
+
+##
+# Standard server proxy. This class establishes a virtual connection
+# to an XML-RPC server.
+#
+# This class is available as ServerProxy and Server. New code should
+# use ServerProxy, to avoid confusion.
+#
+# @def ServerProxy(uri, **options)
+# @param uri The connection point on the server.
+# @keyparam transport A transport factory, compatible with the
+# standard transport class.
+# @keyparam encoding The default encoding used for 8-bit strings
+# (default is UTF-8).
+# @keyparam verbose Use a true value to enable debugging output.
+# (printed to standard output).
+# @see Transport
+
+class ServerProxy(object):
+ """uri [,options] -> a logical connection to an XML-RPC server
+
+ uri is the connection point on the server, given as
+ scheme://host/target.
+
+ The standard implementation always supports the "http" scheme. If
+ SSL socket support is available (Python 2.0), it also supports
+ "https".
+
+ If the target part and the slash preceding it are both omitted,
+ "/RPC2" is assumed.
+
+ The following options can be given as keyword arguments:
+
+ transport: a transport factory
+ encoding: the request encoding (default is UTF-8)
+
+ All 8-bit strings passed to the server proxy are assumed to use
+ the given encoding.
+ """
+
+ def __init__(self, uri, transport=None, encoding=None, verbose=False,
+ allow_none=False, use_datetime=False, use_builtin_types=False):
+ # establish a "logical" server connection
+
+ # get the url
+ type, uri = urllib_parse.splittype(uri)
+ if type not in ("http", "https"):
+ raise IOError("unsupported XML-RPC protocol")
+ self.__host, self.__handler = urllib_parse.splithost(uri)
+ if not self.__handler:
+ self.__handler = "/RPC2"
+
+ if transport is None:
+ if type == "https":
+ handler = SafeTransport
+ else:
+ handler = Transport
+ transport = handler(use_datetime=use_datetime,
+ use_builtin_types=use_builtin_types)
+ self.__transport = transport
+
+ self.__encoding = encoding or 'utf-8'
+ self.__verbose = verbose
+ self.__allow_none = allow_none
+
+ def __close(self):
+ self.__transport.close()
+
+ def __request(self, methodname, params):
+ # call a method on the remote server
+
+ request = dumps(params, methodname, encoding=self.__encoding,
+ allow_none=self.__allow_none).encode(self.__encoding)
+
+ response = self.__transport.request(
+ self.__host,
+ self.__handler,
+ request,
+ verbose=self.__verbose
+ )
+
+ if len(response) == 1:
+ response = response[0]
+
+ return response
+
+ def __repr__(self):
+ return (
+ "" %
+ (self.__host, self.__handler)
+ )
+
+ __str__ = __repr__
+
+ def __getattr__(self, name):
+ # magic method dispatcher
+ return _Method(self.__request, name)
+
+ # note: to call a remote object with an non-standard name, use
+ # result getattr(server, "strange-python-name")(args)
+
+ def __call__(self, attr):
+ """A workaround to get special attributes on the ServerProxy
+ without interfering with the magic __getattr__
+ """
+ if attr == "close":
+ return self.__close
+ elif attr == "transport":
+ return self.__transport
+ raise AttributeError("Attribute %r not found" % (attr,))
+
+# compatibility
+
+Server = ServerProxy
+
+# --------------------------------------------------------------------
+# test code
+
+if __name__ == "__main__":
+
+ # simple test program (from the XML-RPC specification)
+
+ # local server, available from Lib/xmlrpc/server.py
+ server = ServerProxy("http://localhost:8000")
+
+ try:
+ print(server.currentTime.getCurrentTime())
+ except Error as v:
+ print("ERROR", v)
+
+ multi = MultiCall(server)
+ multi.getData()
+ multi.pow(2,9)
+ multi.add(1,2)
+ try:
+ for response in multi():
+ print(response)
+ except Error as v:
+ print("ERROR", v)
diff --git a/src/clyphx/vendor/future/future/backports/xmlrpc/server.py b/src/clyphx/vendor/future/future/backports/xmlrpc/server.py
new file mode 100644
index 0000000..28072bf
--- /dev/null
+++ b/src/clyphx/vendor/future/future/backports/xmlrpc/server.py
@@ -0,0 +1,999 @@
+r"""
+Ported using Python-Future from the Python 3.3 standard library.
+
+XML-RPC Servers.
+
+This module can be used to create simple XML-RPC servers
+by creating a server and either installing functions, a
+class instance, or by extending the SimpleXMLRPCServer
+class.
+
+It can also be used to handle XML-RPC requests in a CGI
+environment using CGIXMLRPCRequestHandler.
+
+The Doc* classes can be used to create XML-RPC servers that
+serve pydoc-style documentation in response to HTTP
+GET requests. This documentation is dynamically generated
+based on the functions and methods registered with the
+server.
+
+A list of possible usage patterns follows:
+
+1. Install functions:
+
+server = SimpleXMLRPCServer(("localhost", 8000))
+server.register_function(pow)
+server.register_function(lambda x,y: x+y, 'add')
+server.serve_forever()
+
+2. Install an instance:
+
+class MyFuncs:
+ def __init__(self):
+ # make all of the sys functions available through sys.func_name
+ import sys
+ self.sys = sys
+ def _listMethods(self):
+ # implement this method so that system.listMethods
+ # knows to advertise the sys methods
+ return list_public_methods(self) + \
+ ['sys.' + method for method in list_public_methods(self.sys)]
+ def pow(self, x, y): return pow(x, y)
+ def add(self, x, y) : return x + y
+
+server = SimpleXMLRPCServer(("localhost", 8000))
+server.register_introspection_functions()
+server.register_instance(MyFuncs())
+server.serve_forever()
+
+3. Install an instance with custom dispatch method:
+
+class Math:
+ def _listMethods(self):
+ # this method must be present for system.listMethods
+ # to work
+ return ['add', 'pow']
+ def _methodHelp(self, method):
+ # this method must be present for system.methodHelp
+ # to work
+ if method == 'add':
+ return "add(2,3) => 5"
+ elif method == 'pow':
+ return "pow(x, y[, z]) => number"
+ else:
+ # By convention, return empty
+ # string if no help is available
+ return ""
+ def _dispatch(self, method, params):
+ if method == 'pow':
+ return pow(*params)
+ elif method == 'add':
+ return params[0] + params[1]
+ else:
+ raise ValueError('bad method')
+
+server = SimpleXMLRPCServer(("localhost", 8000))
+server.register_introspection_functions()
+server.register_instance(Math())
+server.serve_forever()
+
+4. Subclass SimpleXMLRPCServer:
+
+class MathServer(SimpleXMLRPCServer):
+ def _dispatch(self, method, params):
+ try:
+ # We are forcing the 'export_' prefix on methods that are
+ # callable through XML-RPC to prevent potential security
+ # problems
+ func = getattr(self, 'export_' + method)
+ except AttributeError:
+ raise Exception('method "%s" is not supported' % method)
+ else:
+ return func(*params)
+
+ def export_add(self, x, y):
+ return x + y
+
+server = MathServer(("localhost", 8000))
+server.serve_forever()
+
+5. CGI script:
+
+server = CGIXMLRPCRequestHandler()
+server.register_function(pow)
+server.handle_request()
+"""
+
+from __future__ import absolute_import, division, print_function, unicode_literals
+from future.builtins import int, str
+
+# Written by Brian Quinlan (brian@sweetapp.com).
+# Based on code written by Fredrik Lundh.
+
+from future.backports.xmlrpc.client import Fault, dumps, loads, gzip_encode, gzip_decode
+from future.backports.http.server import BaseHTTPRequestHandler
+import future.backports.http.server as http_server
+from future.backports import socketserver
+import sys
+import os
+import re
+import pydoc
+import inspect
+import traceback
+try:
+ import fcntl
+except ImportError:
+ fcntl = None
+
+def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
+ """resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
+
+ Resolves a dotted attribute name to an object. Raises
+ an AttributeError if any attribute in the chain starts with a '_'.
+
+ If the optional allow_dotted_names argument is false, dots are not
+ supported and this function operates similar to getattr(obj, attr).
+ """
+
+ if allow_dotted_names:
+ attrs = attr.split('.')
+ else:
+ attrs = [attr]
+
+ for i in attrs:
+ if i.startswith('_'):
+ raise AttributeError(
+ 'attempt to access private attribute "%s"' % i
+ )
+ else:
+ obj = getattr(obj,i)
+ return obj
+
+def list_public_methods(obj):
+ """Returns a list of attribute strings, found in the specified
+ object, which represent callable attributes"""
+
+ return [member for member in dir(obj)
+ if not member.startswith('_') and
+ callable(getattr(obj, member))]
+
+class SimpleXMLRPCDispatcher(object):
+ """Mix-in class that dispatches XML-RPC requests.
+
+ This class is used to register XML-RPC method handlers
+ and then to dispatch them. This class doesn't need to be
+ instanced directly when used by SimpleXMLRPCServer but it
+ can be instanced when used by the MultiPathXMLRPCServer
+ """
+
+ def __init__(self, allow_none=False, encoding=None,
+ use_builtin_types=False):
+ self.funcs = {}
+ self.instance = None
+ self.allow_none = allow_none
+ self.encoding = encoding or 'utf-8'
+ self.use_builtin_types = use_builtin_types
+
+ def register_instance(self, instance, allow_dotted_names=False):
+ """Registers an instance to respond to XML-RPC requests.
+
+ Only one instance can be installed at a time.
+
+ If the registered instance has a _dispatch method then that
+ method will be called with the name of the XML-RPC method and
+ its parameters as a tuple
+ e.g. instance._dispatch('add',(2,3))
+
+ If the registered instance does not have a _dispatch method
+ then the instance will be searched to find a matching method
+ and, if found, will be called. Methods beginning with an '_'
+ are considered private and will not be called by
+ SimpleXMLRPCServer.
+
+ If a registered function matches a XML-RPC request, then it
+ will be called instead of the registered instance.
+
+ If the optional allow_dotted_names argument is true and the
+ instance does not have a _dispatch method, method names
+ containing dots are supported and resolved, as long as none of
+ the name segments start with an '_'.
+
+ *** SECURITY WARNING: ***
+
+ Enabling the allow_dotted_names options allows intruders
+ to access your module's global variables and may allow
+ intruders to execute arbitrary code on your machine. Only
+ use this option on a secure, closed network.
+
+ """
+
+ self.instance = instance
+ self.allow_dotted_names = allow_dotted_names
+
+ def register_function(self, function, name=None):
+ """Registers a function to respond to XML-RPC requests.
+
+ The optional name argument can be used to set a Unicode name
+ for the function.
+ """
+
+ if name is None:
+ name = function.__name__
+ self.funcs[name] = function
+
+ def register_introspection_functions(self):
+ """Registers the XML-RPC introspection methods in the system
+ namespace.
+
+ see http://xmlrpc.usefulinc.com/doc/reserved.html
+ """
+
+ self.funcs.update({'system.listMethods' : self.system_listMethods,
+ 'system.methodSignature' : self.system_methodSignature,
+ 'system.methodHelp' : self.system_methodHelp})
+
+ def register_multicall_functions(self):
+ """Registers the XML-RPC multicall method in the system
+ namespace.
+
+ see http://www.xmlrpc.com/discuss/msgReader$1208"""
+
+ self.funcs.update({'system.multicall' : self.system_multicall})
+
+ def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
+ """Dispatches an XML-RPC method from marshalled (XML) data.
+
+ XML-RPC methods are dispatched from the marshalled (XML) data
+ using the _dispatch method and the result is returned as
+ marshalled data. For backwards compatibility, a dispatch
+ function can be provided as an argument (see comment in
+ SimpleXMLRPCRequestHandler.do_POST) but overriding the
+ existing method through subclassing is the preferred means
+ of changing method dispatch behavior.
+ """
+
+ try:
+ params, method = loads(data, use_builtin_types=self.use_builtin_types)
+
+ # generate response
+ if dispatch_method is not None:
+ response = dispatch_method(method, params)
+ else:
+ response = self._dispatch(method, params)
+ # wrap response in a singleton tuple
+ response = (response,)
+ response = dumps(response, methodresponse=1,
+ allow_none=self.allow_none, encoding=self.encoding)
+ except Fault as fault:
+ response = dumps(fault, allow_none=self.allow_none,
+ encoding=self.encoding)
+ except:
+ # report exception back to server
+ exc_type, exc_value, exc_tb = sys.exc_info()
+ response = dumps(
+ Fault(1, "%s:%s" % (exc_type, exc_value)),
+ encoding=self.encoding, allow_none=self.allow_none,
+ )
+
+ return response.encode(self.encoding)
+
+ def system_listMethods(self):
+ """system.listMethods() => ['add', 'subtract', 'multiple']
+
+ Returns a list of the methods supported by the server."""
+
+ methods = set(self.funcs.keys())
+ if self.instance is not None:
+ # Instance can implement _listMethod to return a list of
+ # methods
+ if hasattr(self.instance, '_listMethods'):
+ methods |= set(self.instance._listMethods())
+ # if the instance has a _dispatch method then we
+ # don't have enough information to provide a list
+ # of methods
+ elif not hasattr(self.instance, '_dispatch'):
+ methods |= set(list_public_methods(self.instance))
+ return sorted(methods)
+
+ def system_methodSignature(self, method_name):
+ """system.methodSignature('add') => [double, int, int]
+
+ Returns a list describing the signature of the method. In the
+ above example, the add method takes two integers as arguments
+ and returns a double result.
+
+ This server does NOT support system.methodSignature."""
+
+ # See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
+
+ return 'signatures not supported'
+
+ def system_methodHelp(self, method_name):
+ """system.methodHelp('add') => "Adds two integers together"
+
+ Returns a string containing documentation for the specified method."""
+
+ method = None
+ if method_name in self.funcs:
+ method = self.funcs[method_name]
+ elif self.instance is not None:
+ # Instance can implement _methodHelp to return help for a method
+ if hasattr(self.instance, '_methodHelp'):
+ return self.instance._methodHelp(method_name)
+ # if the instance has a _dispatch method then we
+ # don't have enough information to provide help
+ elif not hasattr(self.instance, '_dispatch'):
+ try:
+ method = resolve_dotted_attribute(
+ self.instance,
+ method_name,
+ self.allow_dotted_names
+ )
+ except AttributeError:
+ pass
+
+ # Note that we aren't checking that the method actually
+ # be a callable object of some kind
+ if method is None:
+ return ""
+ else:
+ return pydoc.getdoc(method)
+
+ def system_multicall(self, call_list):
+ """system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
+[[4], ...]
+
+ Allows the caller to package multiple XML-RPC calls into a single
+ request.
+
+ See http://www.xmlrpc.com/discuss/msgReader$1208
+ """
+
+ results = []
+ for call in call_list:
+ method_name = call['methodName']
+ params = call['params']
+
+ try:
+ # XXX A marshalling error in any response will fail the entire
+ # multicall. If someone cares they should fix this.
+ results.append([self._dispatch(method_name, params)])
+ except Fault as fault:
+ results.append(
+ {'faultCode' : fault.faultCode,
+ 'faultString' : fault.faultString}
+ )
+ except:
+ exc_type, exc_value, exc_tb = sys.exc_info()
+ results.append(
+ {'faultCode' : 1,
+ 'faultString' : "%s:%s" % (exc_type, exc_value)}
+ )
+ return results
+
+ def _dispatch(self, method, params):
+ """Dispatches the XML-RPC method.
+
+ XML-RPC calls are forwarded to a registered function that
+ matches the called XML-RPC method name. If no such function
+ exists then the call is forwarded to the registered instance,
+ if available.
+
+ If the registered instance has a _dispatch method then that
+ method will be called with the name of the XML-RPC method and
+ its parameters as a tuple
+ e.g. instance._dispatch('add',(2,3))
+
+ If the registered instance does not have a _dispatch method
+ then the instance will be searched to find a matching method
+ and, if found, will be called.
+
+ Methods beginning with an '_' are considered private and will
+ not be called.
+ """
+
+ func = None
+ try:
+ # check to see if a matching function has been registered
+ func = self.funcs[method]
+ except KeyError:
+ if self.instance is not None:
+ # check for a _dispatch method
+ if hasattr(self.instance, '_dispatch'):
+ return self.instance._dispatch(method, params)
+ else:
+ # call instance method directly
+ try:
+ func = resolve_dotted_attribute(
+ self.instance,
+ method,
+ self.allow_dotted_names
+ )
+ except AttributeError:
+ pass
+
+ if func is not None:
+ return func(*params)
+ else:
+ raise Exception('method "%s" is not supported' % method)
+
+class SimpleXMLRPCRequestHandler(BaseHTTPRequestHandler):
+ """Simple XML-RPC request handler class.
+
+ Handles all HTTP POST requests and attempts to decode them as
+ XML-RPC requests.
+ """
+
+ # Class attribute listing the accessible path components;
+ # paths not on this list will result in a 404 error.
+ rpc_paths = ('/', '/RPC2')
+
+ #if not None, encode responses larger than this, if possible
+ encode_threshold = 1400 #a common MTU
+
+ #Override form StreamRequestHandler: full buffering of output
+ #and no Nagle.
+ wbufsize = -1
+ disable_nagle_algorithm = True
+
+ # a re to match a gzip Accept-Encoding
+ aepattern = re.compile(r"""
+ \s* ([^\s;]+) \s* #content-coding
+ (;\s* q \s*=\s* ([0-9\.]+))? #q
+ """, re.VERBOSE | re.IGNORECASE)
+
+ def accept_encodings(self):
+ r = {}
+ ae = self.headers.get("Accept-Encoding", "")
+ for e in ae.split(","):
+ match = self.aepattern.match(e)
+ if match:
+ v = match.group(3)
+ v = float(v) if v else 1.0
+ r[match.group(1)] = v
+ return r
+
+ def is_rpc_path_valid(self):
+ if self.rpc_paths:
+ return self.path in self.rpc_paths
+ else:
+ # If .rpc_paths is empty, just assume all paths are legal
+ return True
+
+ def do_POST(self):
+ """Handles the HTTP POST request.
+
+ Attempts to interpret all HTTP POST requests as XML-RPC calls,
+ which are forwarded to the server's _dispatch method for handling.
+ """
+
+ # Check that the path is legal
+ if not self.is_rpc_path_valid():
+ self.report_404()
+ return
+
+ try:
+ # Get arguments by reading body of request.
+ # We read this in chunks to avoid straining
+ # socket.read(); around the 10 or 15Mb mark, some platforms
+ # begin to have problems (bug #792570).
+ max_chunk_size = 10*1024*1024
+ size_remaining = int(self.headers["content-length"])
+ L = []
+ while size_remaining:
+ chunk_size = min(size_remaining, max_chunk_size)
+ chunk = self.rfile.read(chunk_size)
+ if not chunk:
+ break
+ L.append(chunk)
+ size_remaining -= len(L[-1])
+ data = b''.join(L)
+
+ data = self.decode_request_content(data)
+ if data is None:
+ return #response has been sent
+
+ # In previous versions of SimpleXMLRPCServer, _dispatch
+ # could be overridden in this class, instead of in
+ # SimpleXMLRPCDispatcher. To maintain backwards compatibility,
+ # check to see if a subclass implements _dispatch and dispatch
+ # using that method if present.
+ response = self.server._marshaled_dispatch(
+ data, getattr(self, '_dispatch', None), self.path
+ )
+ except Exception as e: # This should only happen if the module is buggy
+ # internal error, report as HTTP server error
+ self.send_response(500)
+
+ # Send information about the exception if requested
+ if hasattr(self.server, '_send_traceback_header') and \
+ self.server._send_traceback_header:
+ self.send_header("X-exception", str(e))
+ trace = traceback.format_exc()
+ trace = str(trace.encode('ASCII', 'backslashreplace'), 'ASCII')
+ self.send_header("X-traceback", trace)
+
+ self.send_header("Content-length", "0")
+ self.end_headers()
+ else:
+ self.send_response(200)
+ self.send_header("Content-type", "text/xml")
+ if self.encode_threshold is not None:
+ if len(response) > self.encode_threshold:
+ q = self.accept_encodings().get("gzip", 0)
+ if q:
+ try:
+ response = gzip_encode(response)
+ self.send_header("Content-Encoding", "gzip")
+ except NotImplementedError:
+ pass
+ self.send_header("Content-length", str(len(response)))
+ self.end_headers()
+ self.wfile.write(response)
+
+ def decode_request_content(self, data):
+ #support gzip encoding of request
+ encoding = self.headers.get("content-encoding", "identity").lower()
+ if encoding == "identity":
+ return data
+ if encoding == "gzip":
+ try:
+ return gzip_decode(data)
+ except NotImplementedError:
+ self.send_response(501, "encoding %r not supported" % encoding)
+ except ValueError:
+ self.send_response(400, "error decoding gzip content")
+ else:
+ self.send_response(501, "encoding %r not supported" % encoding)
+ self.send_header("Content-length", "0")
+ self.end_headers()
+
+ def report_404 (self):
+ # Report a 404 error
+ self.send_response(404)
+ response = b'No such page'
+ self.send_header("Content-type", "text/plain")
+ self.send_header("Content-length", str(len(response)))
+ self.end_headers()
+ self.wfile.write(response)
+
+ def log_request(self, code='-', size='-'):
+ """Selectively log an accepted request."""
+
+ if self.server.logRequests:
+ BaseHTTPRequestHandler.log_request(self, code, size)
+
+class SimpleXMLRPCServer(socketserver.TCPServer,
+ SimpleXMLRPCDispatcher):
+ """Simple XML-RPC server.
+
+ Simple XML-RPC server that allows functions and a single instance
+ to be installed to handle requests. The default implementation
+ attempts to dispatch XML-RPC calls to the functions or instance
+ installed in the server. Override the _dispatch method inherited
+ from SimpleXMLRPCDispatcher to change this behavior.
+ """
+
+ allow_reuse_address = True
+
+ # Warning: this is for debugging purposes only! Never set this to True in
+ # production code, as will be sending out sensitive information (exception
+ # and stack trace details) when exceptions are raised inside
+ # SimpleXMLRPCRequestHandler.do_POST
+ _send_traceback_header = False
+
+ def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
+ logRequests=True, allow_none=False, encoding=None,
+ bind_and_activate=True, use_builtin_types=False):
+ self.logRequests = logRequests
+
+ SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types)
+ socketserver.TCPServer.__init__(self, addr, requestHandler, bind_and_activate)
+
+ # [Bug #1222790] If possible, set close-on-exec flag; if a
+ # method spawns a subprocess, the subprocess shouldn't have
+ # the listening socket open.
+ if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
+ flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
+ flags |= fcntl.FD_CLOEXEC
+ fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
+
+class MultiPathXMLRPCServer(SimpleXMLRPCServer):
+ """Multipath XML-RPC Server
+ This specialization of SimpleXMLRPCServer allows the user to create
+ multiple Dispatcher instances and assign them to different
+ HTTP request paths. This makes it possible to run two or more
+ 'virtual XML-RPC servers' at the same port.
+ Make sure that the requestHandler accepts the paths in question.
+ """
+ def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
+ logRequests=True, allow_none=False, encoding=None,
+ bind_and_activate=True, use_builtin_types=False):
+
+ SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none,
+ encoding, bind_and_activate, use_builtin_types)
+ self.dispatchers = {}
+ self.allow_none = allow_none
+ self.encoding = encoding or 'utf-8'
+
+ def add_dispatcher(self, path, dispatcher):
+ self.dispatchers[path] = dispatcher
+ return dispatcher
+
+ def get_dispatcher(self, path):
+ return self.dispatchers[path]
+
+ def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
+ try:
+ response = self.dispatchers[path]._marshaled_dispatch(
+ data, dispatch_method, path)
+ except:
+ # report low level exception back to server
+ # (each dispatcher should have handled their own
+ # exceptions)
+ exc_type, exc_value = sys.exc_info()[:2]
+ response = dumps(
+ Fault(1, "%s:%s" % (exc_type, exc_value)),
+ encoding=self.encoding, allow_none=self.allow_none)
+ response = response.encode(self.encoding)
+ return response
+
+class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
+ """Simple handler for XML-RPC data passed through CGI."""
+
+ def __init__(self, allow_none=False, encoding=None, use_builtin_types=False):
+ SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types)
+
+ def handle_xmlrpc(self, request_text):
+ """Handle a single XML-RPC request"""
+
+ response = self._marshaled_dispatch(request_text)
+
+ print('Content-Type: text/xml')
+ print('Content-Length: %d' % len(response))
+ print()
+ sys.stdout.flush()
+ sys.stdout.buffer.write(response)
+ sys.stdout.buffer.flush()
+
+ def handle_get(self):
+ """Handle a single HTTP GET request.
+
+ Default implementation indicates an error because
+ XML-RPC uses the POST method.
+ """
+
+ code = 400
+ message, explain = BaseHTTPRequestHandler.responses[code]
+
+ response = http_server.DEFAULT_ERROR_MESSAGE % \
+ {
+ 'code' : code,
+ 'message' : message,
+ 'explain' : explain
+ }
+ response = response.encode('utf-8')
+ print('Status: %d %s' % (code, message))
+ print('Content-Type: %s' % http_server.DEFAULT_ERROR_CONTENT_TYPE)
+ print('Content-Length: %d' % len(response))
+ print()
+ sys.stdout.flush()
+ sys.stdout.buffer.write(response)
+ sys.stdout.buffer.flush()
+
+ def handle_request(self, request_text=None):
+ """Handle a single XML-RPC request passed through a CGI post method.
+
+ If no XML data is given then it is read from stdin. The resulting
+ XML-RPC response is printed to stdout along with the correct HTTP
+ headers.
+ """
+
+ if request_text is None and \
+ os.environ.get('REQUEST_METHOD', None) == 'GET':
+ self.handle_get()
+ else:
+ # POST data is normally available through stdin
+ try:
+ length = int(os.environ.get('CONTENT_LENGTH', None))
+ except (ValueError, TypeError):
+ length = -1
+ if request_text is None:
+ request_text = sys.stdin.read(length)
+
+ self.handle_xmlrpc(request_text)
+
+
+# -----------------------------------------------------------------------------
+# Self documenting XML-RPC Server.
+
+class ServerHTMLDoc(pydoc.HTMLDoc):
+ """Class used to generate pydoc HTML document for a server"""
+
+ def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
+ """Mark up some plain text, given a context of symbols to look for.
+ Each context dictionary maps object names to anchor names."""
+ escape = escape or self.escape
+ results = []
+ here = 0
+
+ # XXX Note that this regular expression does not allow for the
+ # hyperlinking of arbitrary strings being used as method
+ # names. Only methods with names consisting of word characters
+ # and '.'s are hyperlinked.
+ pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
+ r'RFC[- ]?(\d+)|'
+ r'PEP[- ]?(\d+)|'
+ r'(self\.)?((?:\w|\.)+))\b')
+ while 1:
+ match = pattern.search(text, here)
+ if not match: break
+ start, end = match.span()
+ results.append(escape(text[here:start]))
+
+ all, scheme, rfc, pep, selfdot, name = match.groups()
+ if scheme:
+ url = escape(all).replace('"', '"')
+ results.append('%s' % (url, url))
+ elif rfc:
+ url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
+ results.append('%s' % (url, escape(all)))
+ elif pep:
+ url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
+ results.append('%s' % (url, escape(all)))
+ elif text[end:end+1] == '(':
+ results.append(self.namelink(name, methods, funcs, classes))
+ elif selfdot:
+ results.append('self.%s' % name)
+ else:
+ results.append(self.namelink(name, classes))
+ here = end
+ results.append(escape(text[here:]))
+ return ''.join(results)
+
+ def docroutine(self, object, name, mod=None,
+ funcs={}, classes={}, methods={}, cl=None):
+ """Produce HTML documentation for a function or method object."""
+
+ anchor = (cl and cl.__name__ or '') + '-' + name
+ note = ''
+
+ title = '%s' % (
+ self.escape(anchor), self.escape(name))
+
+ if inspect.ismethod(object):
+ args = inspect.getfullargspec(object)
+ # exclude the argument bound to the instance, it will be
+ # confusing to the non-Python user
+ argspec = inspect.formatargspec (
+ args.args[1:],
+ args.varargs,
+ args.varkw,
+ args.defaults,
+ annotations=args.annotations,
+ formatvalue=self.formatvalue
+ )
+ elif inspect.isfunction(object):
+ args = inspect.getfullargspec(object)
+ argspec = inspect.formatargspec(
+ args.args, args.varargs, args.varkw, args.defaults,
+ annotations=args.annotations,
+ formatvalue=self.formatvalue)
+ else:
+ argspec = '(...)'
+
+ if isinstance(object, tuple):
+ argspec = object[0] or argspec
+ docstring = object[1] or ""
+ else:
+ docstring = pydoc.getdoc(object)
+
+ decl = title + argspec + (note and self.grey(
+ '%s' % note))
+
+ doc = self.markup(
+ docstring, self.preformat, funcs, classes, methods)
+ doc = doc and '%s' % doc
+ return '- %s
%s
\n' % (decl, doc)
+
+ def docserver(self, server_name, package_documentation, methods):
+ """Produce HTML documentation for an XML-RPC server."""
+
+ fdict = {}
+ for key, value in methods.items():
+ fdict[key] = '#-' + key
+ fdict[value] = fdict[key]
+
+ server_name = self.escape(server_name)
+ head = '%s' % server_name
+ result = self.heading(head, '#ffffff', '#7799ee')
+
+ doc = self.markup(package_documentation, self.preformat, fdict)
+ doc = doc and '%s' % doc
+ result = result + '%s
\n' % doc
+
+ contents = []
+ method_items = sorted(methods.items())
+ for key, value in method_items:
+ contents.append(self.docroutine(value, key, funcs=fdict))
+ result = result + self.bigsection(
+ 'Methods', '#ffffff', '#eeaa77', ''.join(contents))
+
+ return result
+
+class XMLRPCDocGenerator(object):
+ """Generates documentation for an XML-RPC server.
+
+ This class is designed as mix-in and should not
+ be constructed directly.
+ """
+
+ def __init__(self):
+ # setup variables used for HTML documentation
+ self.server_name = 'XML-RPC Server Documentation'
+ self.server_documentation = \
+ "This server exports the following methods through the XML-RPC "\
+ "protocol."
+ self.server_title = 'XML-RPC Server Documentation'
+
+ def set_server_title(self, server_title):
+ """Set the HTML title of the generated server documentation"""
+
+ self.server_title = server_title
+
+ def set_server_name(self, server_name):
+ """Set the name of the generated HTML server documentation"""
+
+ self.server_name = server_name
+
+ def set_server_documentation(self, server_documentation):
+ """Set the documentation string for the entire server."""
+
+ self.server_documentation = server_documentation
+
+ def generate_html_documentation(self):
+ """generate_html_documentation() => html documentation for the server
+
+ Generates HTML documentation for the server using introspection for
+ installed functions and instances that do not implement the
+ _dispatch method. Alternatively, instances can choose to implement
+ the _get_method_argstring(method_name) method to provide the
+ argument string used in the documentation and the
+ _methodHelp(method_name) method to provide the help text used
+ in the documentation."""
+
+ methods = {}
+
+ for method_name in self.system_listMethods():
+ if method_name in self.funcs:
+ method = self.funcs[method_name]
+ elif self.instance is not None:
+ method_info = [None, None] # argspec, documentation
+ if hasattr(self.instance, '_get_method_argstring'):
+ method_info[0] = self.instance._get_method_argstring(method_name)
+ if hasattr(self.instance, '_methodHelp'):
+ method_info[1] = self.instance._methodHelp(method_name)
+
+ method_info = tuple(method_info)
+ if method_info != (None, None):
+ method = method_info
+ elif not hasattr(self.instance, '_dispatch'):
+ try:
+ method = resolve_dotted_attribute(
+ self.instance,
+ method_name
+ )
+ except AttributeError:
+ method = method_info
+ else:
+ method = method_info
+ else:
+ assert 0, "Could not find method in self.functions and no "\
+ "instance installed"
+
+ methods[method_name] = method
+
+ documenter = ServerHTMLDoc()
+ documentation = documenter.docserver(
+ self.server_name,
+ self.server_documentation,
+ methods
+ )
+
+ return documenter.page(self.server_title, documentation)
+
+class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
+ """XML-RPC and documentation request handler class.
+
+ Handles all HTTP POST requests and attempts to decode them as
+ XML-RPC requests.
+
+ Handles all HTTP GET requests and interprets them as requests
+ for documentation.
+ """
+
+ def do_GET(self):
+ """Handles the HTTP GET request.
+
+ Interpret all HTTP GET requests as requests for server
+ documentation.
+ """
+ # Check that the path is legal
+ if not self.is_rpc_path_valid():
+ self.report_404()
+ return
+
+ response = self.server.generate_html_documentation().encode('utf-8')
+ self.send_response(200)
+ self.send_header("Content-type", "text/html")
+ self.send_header("Content-length", str(len(response)))
+ self.end_headers()
+ self.wfile.write(response)
+
+class DocXMLRPCServer( SimpleXMLRPCServer,
+ XMLRPCDocGenerator):
+ """XML-RPC and HTML documentation server.
+
+ Adds the ability to serve server documentation to the capabilities
+ of SimpleXMLRPCServer.
+ """
+
+ def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler,
+ logRequests=True, allow_none=False, encoding=None,
+ bind_and_activate=True, use_builtin_types=False):
+ SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests,
+ allow_none, encoding, bind_and_activate,
+ use_builtin_types)
+ XMLRPCDocGenerator.__init__(self)
+
+class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
+ XMLRPCDocGenerator):
+ """Handler for XML-RPC data and documentation requests passed through
+ CGI"""
+
+ def handle_get(self):
+ """Handles the HTTP GET request.
+
+ Interpret all HTTP GET requests as requests for server
+ documentation.
+ """
+
+ response = self.generate_html_documentation().encode('utf-8')
+
+ print('Content-Type: text/html')
+ print('Content-Length: %d' % len(response))
+ print()
+ sys.stdout.flush()
+ sys.stdout.buffer.write(response)
+ sys.stdout.buffer.flush()
+
+ def __init__(self):
+ CGIXMLRPCRequestHandler.__init__(self)
+ XMLRPCDocGenerator.__init__(self)
+
+
+if __name__ == '__main__':
+ import datetime
+
+ class ExampleService:
+ def getData(self):
+ return '42'
+
+ class currentTime:
+ @staticmethod
+ def getCurrentTime():
+ return datetime.datetime.now()
+
+ server = SimpleXMLRPCServer(("localhost", 8000))
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_instance(ExampleService(), allow_dotted_names=True)
+ server.register_multicall_functions()
+ print('Serving XML-RPC on localhost port 8000')
+ print('It is advisable to run this example server within a secure, closed network.')
+ try:
+ server.serve_forever()
+ except KeyboardInterrupt:
+ print("\nKeyboard interrupt received, exiting.")
+ server.server_close()
+ sys.exit(0)
diff --git a/src/clyphx/vendor/future/future/builtins/__init__.py b/src/clyphx/vendor/future/future/builtins/__init__.py
new file mode 100644
index 0000000..8bc1649
--- /dev/null
+++ b/src/clyphx/vendor/future/future/builtins/__init__.py
@@ -0,0 +1,51 @@
+"""
+A module that brings in equivalents of the new and modified Python 3
+builtins into Py2. Has no effect on Py3.
+
+See the docs `here `_
+(``docs/what-else.rst``) for more information.
+
+"""
+
+from future.builtins.iterators import (filter, map, zip)
+# The isinstance import is no longer needed. We provide it only for
+# backward-compatibility with future v0.8.2. It will be removed in future v1.0.
+from future.builtins.misc import (ascii, chr, hex, input, isinstance, next,
+ oct, open, pow, round, super, max, min)
+from future.utils import PY3
+
+if PY3:
+ import builtins
+ bytes = builtins.bytes
+ dict = builtins.dict
+ int = builtins.int
+ list = builtins.list
+ object = builtins.object
+ range = builtins.range
+ str = builtins.str
+ __all__ = []
+else:
+ from future.types import (newbytes as bytes,
+ newdict as dict,
+ newint as int,
+ newlist as list,
+ newobject as object,
+ newrange as range,
+ newstr as str)
+from future import utils
+
+
+if not utils.PY3:
+ # We only import names that shadow the builtins on Py2. No other namespace
+ # pollution on Py2.
+
+ # Only shadow builtins on Py2; no new names
+ __all__ = ['filter', 'map', 'zip',
+ 'ascii', 'chr', 'hex', 'input', 'next', 'oct', 'open', 'pow',
+ 'round', 'super',
+ 'bytes', 'dict', 'int', 'list', 'object', 'range', 'str', 'max', 'min'
+ ]
+
+else:
+ # No namespace pollution on Py3
+ __all__ = []
diff --git a/src/clyphx/vendor/future/future/builtins/disabled.py b/src/clyphx/vendor/future/future/builtins/disabled.py
new file mode 100644
index 0000000..f6d6ea9
--- /dev/null
+++ b/src/clyphx/vendor/future/future/builtins/disabled.py
@@ -0,0 +1,66 @@
+"""
+This disables builtin functions (and one exception class) which are
+removed from Python 3.3.
+
+This module is designed to be used like this::
+
+ from future.builtins.disabled import *
+
+This disables the following obsolete Py2 builtin functions::
+
+ apply, cmp, coerce, execfile, file, input, long,
+ raw_input, reduce, reload, unicode, xrange
+
+We don't hack __builtin__, which is very fragile because it contaminates
+imported modules too. Instead, we just create new functions with
+the same names as the obsolete builtins from Python 2 which raise
+NameError exceptions when called.
+
+Note that both ``input()`` and ``raw_input()`` are among the disabled
+functions (in this module). Although ``input()`` exists as a builtin in
+Python 3, the Python 2 ``input()`` builtin is unsafe to use because it
+can lead to shell injection. Therefore we shadow it by default upon ``from
+future.builtins.disabled import *``, in case someone forgets to import our
+replacement ``input()`` somehow and expects Python 3 semantics.
+
+See the ``future.builtins.misc`` module for a working version of
+``input`` with Python 3 semantics.
+
+(Note that callable() is not among the functions disabled; this was
+reintroduced into Python 3.2.)
+
+This exception class is also disabled:
+
+ StandardError
+
+"""
+
+from __future__ import division, absolute_import, print_function
+
+from future import utils
+
+
+OBSOLETE_BUILTINS = ['apply', 'chr', 'cmp', 'coerce', 'execfile', 'file',
+ 'input', 'long', 'raw_input', 'reduce', 'reload',
+ 'unicode', 'xrange', 'StandardError']
+
+
+def disabled_function(name):
+ '''
+ Returns a function that cannot be called
+ '''
+ def disabled(*args, **kwargs):
+ '''
+ A function disabled by the ``future`` module. This function is
+ no longer a builtin in Python 3.
+ '''
+ raise NameError('obsolete Python 2 builtin {0} is disabled'.format(name))
+ return disabled
+
+
+if not utils.PY3:
+ for fname in OBSOLETE_BUILTINS:
+ locals()[fname] = disabled_function(fname)
+ __all__ = OBSOLETE_BUILTINS
+else:
+ __all__ = []
diff --git a/src/clyphx/vendor/future/future/builtins/iterators.py b/src/clyphx/vendor/future/future/builtins/iterators.py
new file mode 100644
index 0000000..dff651e
--- /dev/null
+++ b/src/clyphx/vendor/future/future/builtins/iterators.py
@@ -0,0 +1,52 @@
+"""
+This module is designed to be used as follows::
+
+ from future.builtins.iterators import *
+
+And then, for example::
+
+ for i in range(10**15):
+ pass
+
+ for (a, b) in zip(range(10**15), range(-10**15, 0)):
+ pass
+
+Note that this is standard Python 3 code, plus some imports that do
+nothing on Python 3.
+
+The iterators this brings in are::
+
+- ``range``
+- ``filter``
+- ``map``
+- ``zip``
+
+On Python 2, ``range`` is a pure-Python backport of Python 3's ``range``
+iterator with slicing support. The other iterators (``filter``, ``map``,
+``zip``) are from the ``itertools`` module on Python 2. On Python 3 these
+are available in the module namespace but not exported for * imports via
+__all__ (zero no namespace pollution).
+
+Note that these are also available in the standard library
+``future_builtins`` module on Python 2 -- but not Python 3, so using
+the standard library version is not portable, nor anywhere near complete.
+"""
+
+from __future__ import division, absolute_import, print_function
+
+import itertools
+from future import utils
+
+if not utils.PY3:
+ filter = itertools.ifilter
+ map = itertools.imap
+ from future.types import newrange as range
+ zip = itertools.izip
+ __all__ = ['filter', 'map', 'range', 'zip']
+else:
+ import builtins
+ filter = builtins.filter
+ map = builtins.map
+ range = builtins.range
+ zip = builtins.zip
+ __all__ = []
diff --git a/src/clyphx/vendor/future/future/builtins/misc.py b/src/clyphx/vendor/future/future/builtins/misc.py
new file mode 100644
index 0000000..f86ce5f
--- /dev/null
+++ b/src/clyphx/vendor/future/future/builtins/misc.py
@@ -0,0 +1,135 @@
+"""
+A module that brings in equivalents of various modified Python 3 builtins
+into Py2. Has no effect on Py3.
+
+The builtin functions are:
+
+- ``ascii`` (from Py2's future_builtins module)
+- ``hex`` (from Py2's future_builtins module)
+- ``oct`` (from Py2's future_builtins module)
+- ``chr`` (equivalent to ``unichr`` on Py2)
+- ``input`` (equivalent to ``raw_input`` on Py2)
+- ``next`` (calls ``__next__`` if it exists, else ``next`` method)
+- ``open`` (equivalent to io.open on Py2)
+- ``super`` (backport of Py3's magic zero-argument super() function
+- ``round`` (new "Banker's Rounding" behaviour from Py3)
+- ``max`` (new default option from Py3.4)
+- ``min`` (new default option from Py3.4)
+
+``isinstance`` is also currently exported for backwards compatibility
+with v0.8.2, although this has been deprecated since v0.9.
+
+
+input()
+-------
+Like the new ``input()`` function from Python 3 (without eval()), except
+that it returns bytes. Equivalent to Python 2's ``raw_input()``.
+
+Warning: By default, importing this module *removes* the old Python 2
+input() function entirely from ``__builtin__`` for safety. This is
+because forgetting to import the new ``input`` from ``future`` might
+otherwise lead to a security vulnerability (shell injection) on Python 2.
+
+To restore it, you can retrieve it yourself from
+``__builtin__._old_input``.
+
+Fortunately, ``input()`` seems to be seldom used in the wild in Python
+2...
+
+"""
+
+from future import utils
+
+
+if utils.PY2:
+ from io import open
+ from future_builtins import ascii, oct, hex
+ from __builtin__ import unichr as chr, pow as _builtin_pow
+ import __builtin__
+
+ # Only for backward compatibility with future v0.8.2:
+ isinstance = __builtin__.isinstance
+
+ # Warning: Python 2's input() is unsafe and MUST not be able to be used
+ # accidentally by someone who expects Python 3 semantics but forgets
+ # to import it on Python 2. Versions of ``future`` prior to 0.11
+ # deleted it from __builtin__. Now we keep in __builtin__ but shadow
+ # the name like all others. Just be sure to import ``input``.
+
+ input = raw_input
+
+ from future.builtins.newnext import newnext as next
+ from future.builtins.newround import newround as round
+ from future.builtins.newsuper import newsuper as super
+ from future.builtins.new_min_max import newmax as max
+ from future.builtins.new_min_max import newmin as min
+ from future.types.newint import newint
+
+ _SENTINEL = object()
+
+ def pow(x, y, z=_SENTINEL):
+ """
+ pow(x, y[, z]) -> number
+
+ With two arguments, equivalent to x**y. With three arguments,
+ equivalent to (x**y) % z, but may be more efficient (e.g. for ints).
+ """
+ # Handle newints
+ if isinstance(x, newint):
+ x = long(x)
+ if isinstance(y, newint):
+ y = long(y)
+ if isinstance(z, newint):
+ z = long(z)
+
+ try:
+ if z == _SENTINEL:
+ return _builtin_pow(x, y)
+ else:
+ return _builtin_pow(x, y, z)
+ except ValueError:
+ if z == _SENTINEL:
+ return _builtin_pow(x+0j, y)
+ else:
+ return _builtin_pow(x+0j, y, z)
+
+
+ # ``future`` doesn't support Py3.0/3.1. If we ever did, we'd add this:
+ # callable = __builtin__.callable
+
+ __all__ = ['ascii', 'chr', 'hex', 'input', 'isinstance', 'next', 'oct',
+ 'open', 'pow', 'round', 'super', 'max', 'min']
+
+else:
+ import builtins
+ ascii = builtins.ascii
+ chr = builtins.chr
+ hex = builtins.hex
+ input = builtins.input
+ next = builtins.next
+ # Only for backward compatibility with future v0.8.2:
+ isinstance = builtins.isinstance
+ oct = builtins.oct
+ open = builtins.open
+ pow = builtins.pow
+ round = builtins.round
+ super = builtins.super
+ if utils.PY34_PLUS:
+ max = builtins.max
+ min = builtins.min
+ __all__ = []
+ else:
+ from future.builtins.new_min_max import newmax as max
+ from future.builtins.new_min_max import newmin as min
+ __all__ = ['min', 'max']
+
+ # The callable() function was removed from Py3.0 and 3.1 and
+ # reintroduced into Py3.2+. ``future`` doesn't support Py3.0/3.1. If we ever
+ # did, we'd add this:
+ # try:
+ # callable = builtins.callable
+ # except AttributeError:
+ # # Definition from Pandas
+ # def callable(obj):
+ # return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+ # __all__.append('callable')
diff --git a/src/clyphx/vendor/future/future/builtins/new_min_max.py b/src/clyphx/vendor/future/future/builtins/new_min_max.py
new file mode 100644
index 0000000..6f0c2a8
--- /dev/null
+++ b/src/clyphx/vendor/future/future/builtins/new_min_max.py
@@ -0,0 +1,59 @@
+import itertools
+
+from future import utils
+if utils.PY2:
+ from __builtin__ import max as _builtin_max, min as _builtin_min
+else:
+ from builtins import max as _builtin_max, min as _builtin_min
+
+_SENTINEL = object()
+
+
+def newmin(*args, **kwargs):
+ return new_min_max(_builtin_min, *args, **kwargs)
+
+
+def newmax(*args, **kwargs):
+ return new_min_max(_builtin_max, *args, **kwargs)
+
+
+def new_min_max(_builtin_func, *args, **kwargs):
+ """
+ To support the argument "default" introduced in python 3.4 for min and max
+ :param _builtin_func: builtin min or builtin max
+ :param args:
+ :param kwargs:
+ :return: returns the min or max based on the arguments passed
+ """
+
+ for key, _ in kwargs.items():
+ if key not in set(['key', 'default']):
+ raise TypeError('Illegal argument %s', key)
+
+ if len(args) == 0:
+ raise TypeError
+
+ if len(args) != 1 and kwargs.get('default', _SENTINEL) is not _SENTINEL:
+ raise TypeError
+
+ if len(args) == 1:
+ iterator = iter(args[0])
+ try:
+ first = next(iterator)
+ except StopIteration:
+ if kwargs.get('default', _SENTINEL) is not _SENTINEL:
+ return kwargs.get('default')
+ else:
+ raise ValueError('{}() arg is an empty sequence'.format(_builtin_func.__name__))
+ else:
+ iterator = itertools.chain([first], iterator)
+ if kwargs.get('key') is not None:
+ return _builtin_func(iterator, key=kwargs.get('key'))
+ else:
+ return _builtin_func(iterator)
+
+ if len(args) > 1:
+ if kwargs.get('key') is not None:
+ return _builtin_func(args, key=kwargs.get('key'))
+ else:
+ return _builtin_func(args)
diff --git a/src/clyphx/vendor/future/future/builtins/newnext.py b/src/clyphx/vendor/future/future/builtins/newnext.py
new file mode 100644
index 0000000..097638a
--- /dev/null
+++ b/src/clyphx/vendor/future/future/builtins/newnext.py
@@ -0,0 +1,70 @@
+'''
+This module provides a newnext() function in Python 2 that mimics the
+behaviour of ``next()`` in Python 3, falling back to Python 2's behaviour for
+compatibility if this fails.
+
+``newnext(iterator)`` calls the iterator's ``__next__()`` method if it exists. If this
+doesn't exist, it falls back to calling a ``next()`` method.
+
+For example:
+
+ >>> class Odds(object):
+ ... def __init__(self, start=1):
+ ... self.value = start - 2
+ ... def __next__(self): # note the Py3 interface
+ ... self.value += 2
+ ... return self.value
+ ... def __iter__(self):
+ ... return self
+ ...
+ >>> iterator = Odds()
+ >>> next(iterator)
+ 1
+ >>> next(iterator)
+ 3
+
+If you are defining your own custom iterator class as above, it is preferable
+to explicitly decorate the class with the @implements_iterator decorator from
+``future.utils`` as follows:
+
+ >>> @implements_iterator
+ ... class Odds(object):
+ ... # etc
+ ... pass
+
+This next() function is primarily for consuming iterators defined in Python 3
+code elsewhere that we would like to run on Python 2 or 3.
+'''
+
+_builtin_next = next
+
+_SENTINEL = object()
+
+def newnext(iterator, default=_SENTINEL):
+ """
+ next(iterator[, default])
+
+ Return the next item from the iterator. If default is given and the iterator
+ is exhausted, it is returned instead of raising StopIteration.
+ """
+
+ # args = []
+ # if default is not _SENTINEL:
+ # args.append(default)
+ try:
+ try:
+ return iterator.__next__()
+ except AttributeError:
+ try:
+ return iterator.next()
+ except AttributeError:
+ raise TypeError("'{0}' object is not an iterator".format(
+ iterator.__class__.__name__))
+ except StopIteration as e:
+ if default is _SENTINEL:
+ raise e
+ else:
+ return default
+
+
+__all__ = ['newnext']
diff --git a/src/clyphx/vendor/future/future/builtins/newround.py b/src/clyphx/vendor/future/future/builtins/newround.py
new file mode 100644
index 0000000..394a2c6
--- /dev/null
+++ b/src/clyphx/vendor/future/future/builtins/newround.py
@@ -0,0 +1,102 @@
+"""
+``python-future``: pure Python implementation of Python 3 round().
+"""
+
+from future.utils import PYPY, PY26, bind_method
+
+# Use the decimal module for simplicity of implementation (and
+# hopefully correctness).
+from decimal import Decimal, ROUND_HALF_EVEN
+
+
+def newround(number, ndigits=None):
+ """
+ See Python 3 documentation: uses Banker's Rounding.
+
+ Delegates to the __round__ method if for some reason this exists.
+
+ If not, rounds a number to a given precision in decimal digits (default
+ 0 digits). This returns an int when called with one argument,
+ otherwise the same type as the number. ndigits may be negative.
+
+ See the test_round method in future/tests/test_builtins.py for
+ examples.
+ """
+ return_int = False
+ if ndigits is None:
+ return_int = True
+ ndigits = 0
+ if hasattr(number, '__round__'):
+ return number.__round__(ndigits)
+
+ if ndigits < 0:
+ raise NotImplementedError('negative ndigits not supported yet')
+ exponent = Decimal('10') ** (-ndigits)
+
+ if PYPY:
+ # Work around issue #24: round() breaks on PyPy with NumPy's types
+ if 'numpy' in repr(type(number)):
+ number = float(number)
+
+ if isinstance(number, Decimal):
+ d = number
+ else:
+ if not PY26:
+ d = Decimal.from_float(number).quantize(exponent,
+ rounding=ROUND_HALF_EVEN)
+ else:
+ d = from_float_26(number).quantize(exponent, rounding=ROUND_HALF_EVEN)
+
+ if return_int:
+ return int(d)
+ else:
+ return float(d)
+
+
+### From Python 2.7's decimal.py. Only needed to support Py2.6:
+
+def from_float_26(f):
+ """Converts a float to a decimal number, exactly.
+
+ Note that Decimal.from_float(0.1) is not the same as Decimal('0.1').
+ Since 0.1 is not exactly representable in binary floating point, the
+ value is stored as the nearest representable value which is
+ 0x1.999999999999ap-4. The exact equivalent of the value in decimal
+ is 0.1000000000000000055511151231257827021181583404541015625.
+
+ >>> Decimal.from_float(0.1)
+ Decimal('0.1000000000000000055511151231257827021181583404541015625')
+ >>> Decimal.from_float(float('nan'))
+ Decimal('NaN')
+ >>> Decimal.from_float(float('inf'))
+ Decimal('Infinity')
+ >>> Decimal.from_float(-float('inf'))
+ Decimal('-Infinity')
+ >>> Decimal.from_float(-0.0)
+ Decimal('-0')
+
+ """
+ import math as _math
+ from decimal import _dec_from_triple # only available on Py2.6 and Py2.7 (not 3.3)
+
+ if isinstance(f, (int, long)): # handle integer inputs
+ return Decimal(f)
+ if _math.isinf(f) or _math.isnan(f): # raises TypeError if not a float
+ return Decimal(repr(f))
+ if _math.copysign(1.0, f) == 1.0:
+ sign = 0
+ else:
+ sign = 1
+ n, d = abs(f).as_integer_ratio()
+ # int.bit_length() method doesn't exist on Py2.6:
+ def bit_length(d):
+ if d != 0:
+ return len(bin(abs(d))) - 2
+ else:
+ return 0
+ k = bit_length(d) - 1
+ result = _dec_from_triple(sign, str(n*5**k), -k)
+ return result
+
+
+__all__ = ['newround']
diff --git a/src/clyphx/vendor/future/future/builtins/newsuper.py b/src/clyphx/vendor/future/future/builtins/newsuper.py
new file mode 100644
index 0000000..993aec6
--- /dev/null
+++ b/src/clyphx/vendor/future/future/builtins/newsuper.py
@@ -0,0 +1,114 @@
+'''
+This module provides a newsuper() function in Python 2 that mimics the
+behaviour of super() in Python 3. It is designed to be used as follows:
+
+ from __future__ import division, absolute_import, print_function
+ from future.builtins import super
+
+And then, for example:
+
+ class VerboseList(list):
+ def append(self, item):
+ print('Adding an item')
+ super().append(item) # new simpler super() function
+
+Importing this module on Python 3 has no effect.
+
+This is based on (i.e. almost identical to) Ryan Kelly's magicsuper
+module here:
+
+ https://github.com/rfk/magicsuper.git
+
+Excerpts from Ryan's docstring:
+
+ "Of course, you can still explicitly pass in the arguments if you want
+ to do something strange. Sometimes you really do want that, e.g. to
+ skip over some classes in the method resolution order.
+
+ "How does it work? By inspecting the calling frame to determine the
+ function object being executed and the object on which it's being
+ called, and then walking the object's __mro__ chain to find out where
+ that function was defined. Yuck, but it seems to work..."
+'''
+
+from __future__ import absolute_import
+import sys
+from types import FunctionType
+
+from future.utils import PY3, PY26
+
+
+_builtin_super = super
+
+_SENTINEL = object()
+
+def newsuper(typ=_SENTINEL, type_or_obj=_SENTINEL, framedepth=1):
+ '''Like builtin super(), but capable of magic.
+
+ This acts just like the builtin super() function, but if called
+ without any arguments it attempts to infer them at runtime.
+ '''
+ # Infer the correct call if used without arguments.
+ if typ is _SENTINEL:
+ # We'll need to do some frame hacking.
+ f = sys._getframe(framedepth)
+
+ try:
+ # Get the function's first positional argument.
+ type_or_obj = f.f_locals[f.f_code.co_varnames[0]]
+ except (IndexError, KeyError,):
+ raise RuntimeError('super() used in a function with no args')
+
+ try:
+ # Get the MRO so we can crawl it.
+ mro = type_or_obj.__mro__
+ except (AttributeError, RuntimeError): # see issue #160
+ try:
+ mro = type_or_obj.__class__.__mro__
+ except AttributeError:
+ raise RuntimeError('super() used with a non-newstyle class: {!r} {}'.format(type_or_obj, dir(type_or_obj)))
+
+ # A ``for...else`` block? Yes! It's odd, but useful.
+ # If unfamiliar with for...else, see:
+ #
+ # http://psung.blogspot.com/2007/12/for-else-in-python.html
+ for typ in mro:
+ # Find the class that owns the currently-executing method.
+ for meth in typ.__dict__.values():
+ # Drill down through any wrappers to the underlying func.
+ # This handles e.g. classmethod() and staticmethod().
+ try:
+ while not isinstance(meth,FunctionType):
+ if isinstance(meth, property):
+ # Calling __get__ on the property will invoke
+ # user code which might throw exceptions or have
+ # side effects
+ meth = meth.fget
+ else:
+ try:
+ meth = meth.__func__
+ except AttributeError:
+ meth = meth.__get__(type_or_obj, typ)
+ except (AttributeError, TypeError):
+ continue
+ if meth.func_code is f.f_code:
+ break # Aha! Found you.
+ else:
+ continue # Not found! Move onto the next class in MRO.
+ break # Found! Break out of the search loop.
+ else:
+ raise RuntimeError('super() called outside a method')
+
+ # Dispatch to builtin super().
+ if type_or_obj is not _SENTINEL:
+ return _builtin_super(typ, type_or_obj)
+ return _builtin_super(typ)
+
+
+def superm(*args, **kwds):
+ f = sys._getframe(1)
+ nm = f.f_code.co_name
+ return getattr(newsuper(framedepth=2),nm)(*args, **kwds)
+
+
+__all__ = ['newsuper']
diff --git a/src/clyphx/vendor/future/future/moves/__init__.py b/src/clyphx/vendor/future/future/moves/__init__.py
new file mode 100644
index 0000000..0cd60d3
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/__init__.py
@@ -0,0 +1,8 @@
+# future.moves package
+from __future__ import absolute_import
+import sys
+__future_module__ = True
+from future.standard_library import import_top_level_modules
+
+if sys.version_info[0] >= 3:
+ import_top_level_modules()
diff --git a/src/clyphx/vendor/future/future/moves/_dummy_thread.py b/src/clyphx/vendor/future/future/moves/_dummy_thread.py
new file mode 100644
index 0000000..688d249
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/_dummy_thread.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ from _dummy_thread import *
+else:
+ __future_module__ = True
+ from dummy_thread import *
diff --git a/src/clyphx/vendor/future/future/moves/_markupbase.py b/src/clyphx/vendor/future/future/moves/_markupbase.py
new file mode 100644
index 0000000..f9fb4bb
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/_markupbase.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ from _markupbase import *
+else:
+ __future_module__ = True
+ from markupbase import *
diff --git a/src/clyphx/vendor/future/future/moves/_thread.py b/src/clyphx/vendor/future/future/moves/_thread.py
new file mode 100644
index 0000000..c68018b
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/_thread.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ from _thread import *
+else:
+ __future_module__ = True
+ from thread import *
diff --git a/src/clyphx/vendor/future/future/moves/builtins.py b/src/clyphx/vendor/future/future/moves/builtins.py
new file mode 100644
index 0000000..e4b6221
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/builtins.py
@@ -0,0 +1,10 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ from builtins import *
+else:
+ __future_module__ = True
+ from __builtin__ import *
+ # Overwrite any old definitions with the equivalent future.builtins ones:
+ from future.builtins import *
diff --git a/src/clyphx/vendor/future/future/moves/collections.py b/src/clyphx/vendor/future/future/moves/collections.py
new file mode 100644
index 0000000..664ee6a
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/collections.py
@@ -0,0 +1,18 @@
+from __future__ import absolute_import
+import sys
+
+from future.utils import PY2, PY26
+__future_module__ = True
+
+from collections import *
+
+if PY2:
+ from UserDict import UserDict
+ from UserList import UserList
+ from UserString import UserString
+
+if PY26:
+ from future.backports.misc import OrderedDict, Counter
+
+if sys.version_info < (3, 3):
+ from future.backports.misc import ChainMap, _count_elements
diff --git a/src/clyphx/vendor/future/future/moves/configparser.py b/src/clyphx/vendor/future/future/moves/configparser.py
new file mode 100644
index 0000000..33d9cf9
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/configparser.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import
+
+from future.utils import PY2
+
+if PY2:
+ from ConfigParser import *
+else:
+ from configparser import *
diff --git a/src/clyphx/vendor/future/future/moves/copyreg.py b/src/clyphx/vendor/future/future/moves/copyreg.py
new file mode 100644
index 0000000..9d08cdc
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/copyreg.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ import copyreg, sys
+ # A "*" import uses Python 3's copyreg.__all__ which does not include
+ # all public names in the API surface for copyreg, this avoids that
+ # problem by just making our module _be_ a reference to the actual module.
+ sys.modules['future.moves.copyreg'] = copyreg
+else:
+ __future_module__ = True
+ from copy_reg import *
diff --git a/src/clyphx/vendor/future/future/moves/dbm/__init__.py b/src/clyphx/vendor/future/future/moves/dbm/__init__.py
new file mode 100644
index 0000000..626b406
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/dbm/__init__.py
@@ -0,0 +1,20 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ from dbm import *
+else:
+ __future_module__ = True
+ from whichdb import *
+ from anydbm import *
+
+# Py3.3's dbm/__init__.py imports ndbm but doesn't expose it via __all__.
+# In case some (badly written) code depends on dbm.ndbm after import dbm,
+# we simulate this:
+if PY3:
+ from dbm import ndbm
+else:
+ try:
+ from future.moves.dbm import ndbm
+ except ImportError:
+ ndbm = None
diff --git a/src/clyphx/vendor/future/future/moves/dbm/dumb.py b/src/clyphx/vendor/future/future/moves/dbm/dumb.py
new file mode 100644
index 0000000..528383f
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/dbm/dumb.py
@@ -0,0 +1,9 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from dbm.dumb import *
+else:
+ __future_module__ = True
+ from dumbdbm import *
diff --git a/src/clyphx/vendor/future/future/moves/dbm/gnu.py b/src/clyphx/vendor/future/future/moves/dbm/gnu.py
new file mode 100644
index 0000000..68ccf67
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/dbm/gnu.py
@@ -0,0 +1,9 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from dbm.gnu import *
+else:
+ __future_module__ = True
+ from gdbm import *
diff --git a/src/clyphx/vendor/future/future/moves/dbm/ndbm.py b/src/clyphx/vendor/future/future/moves/dbm/ndbm.py
new file mode 100644
index 0000000..8c6fff8
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/dbm/ndbm.py
@@ -0,0 +1,9 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from dbm.ndbm import *
+else:
+ __future_module__ = True
+ from dbm import *
diff --git a/src/clyphx/vendor/future/future/moves/html/__init__.py b/src/clyphx/vendor/future/future/moves/html/__init__.py
new file mode 100644
index 0000000..22ed6e7
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/html/__init__.py
@@ -0,0 +1,31 @@
+from __future__ import absolute_import
+from future.utils import PY3
+__future_module__ = True
+
+if PY3:
+ from html import *
+else:
+ # cgi.escape isn't good enough for the single Py3.3 html test to pass.
+ # Define it inline here instead. From the Py3.4 stdlib. Note that the
+ # html.escape() function from the Py3.3 stdlib is not suitable for use on
+ # Py2.x.
+ """
+ General functions for HTML manipulation.
+ """
+
+ def escape(s, quote=True):
+ """
+ Replace special characters "&", "<" and ">" to HTML-safe sequences.
+ If the optional flag quote is true (the default), the quotation mark
+ characters, both double quote (") and single quote (') characters are also
+ translated.
+ """
+ s = s.replace("&", "&") # Must be done first!
+ s = s.replace("<", "<")
+ s = s.replace(">", ">")
+ if quote:
+ s = s.replace('"', """)
+ s = s.replace('\'', "'")
+ return s
+
+ __all__ = ['escape']
diff --git a/src/clyphx/vendor/future/future/moves/html/entities.py b/src/clyphx/vendor/future/future/moves/html/entities.py
new file mode 100644
index 0000000..56a8860
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/html/entities.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ from html.entities import *
+else:
+ __future_module__ = True
+ from htmlentitydefs import *
diff --git a/src/clyphx/vendor/future/future/moves/html/parser.py b/src/clyphx/vendor/future/future/moves/html/parser.py
new file mode 100644
index 0000000..a6115b5
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/html/parser.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import
+from future.utils import PY3
+__future_module__ = True
+
+if PY3:
+ from html.parser import *
+else:
+ from HTMLParser import *
diff --git a/src/clyphx/vendor/future/future/moves/http/__init__.py b/src/clyphx/vendor/future/future/moves/http/__init__.py
new file mode 100644
index 0000000..917b3d7
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/http/__init__.py
@@ -0,0 +1,4 @@
+from future.utils import PY3
+
+if not PY3:
+ __future_module__ = True
diff --git a/src/clyphx/vendor/future/future/moves/http/client.py b/src/clyphx/vendor/future/future/moves/http/client.py
new file mode 100644
index 0000000..55f9c9c
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/http/client.py
@@ -0,0 +1,8 @@
+from future.utils import PY3
+
+if PY3:
+ from http.client import *
+else:
+ from httplib import *
+ from httplib import HTTPMessage
+ __future_module__ = True
diff --git a/src/clyphx/vendor/future/future/moves/http/cookiejar.py b/src/clyphx/vendor/future/future/moves/http/cookiejar.py
new file mode 100644
index 0000000..ea00df7
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/http/cookiejar.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ from http.cookiejar import *
+else:
+ __future_module__ = True
+ from cookielib import *
diff --git a/src/clyphx/vendor/future/future/moves/http/cookies.py b/src/clyphx/vendor/future/future/moves/http/cookies.py
new file mode 100644
index 0000000..1b74fe2
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/http/cookies.py
@@ -0,0 +1,9 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ from http.cookies import *
+else:
+ __future_module__ = True
+ from Cookie import *
+ from Cookie import Morsel # left out of __all__ on Py2.7!
diff --git a/src/clyphx/vendor/future/future/moves/http/server.py b/src/clyphx/vendor/future/future/moves/http/server.py
new file mode 100644
index 0000000..4e75cc1
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/http/server.py
@@ -0,0 +1,20 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ from http.server import *
+else:
+ __future_module__ = True
+ from BaseHTTPServer import *
+ from CGIHTTPServer import *
+ from SimpleHTTPServer import *
+ try:
+ from CGIHTTPServer import _url_collapse_path # needed for a test
+ except ImportError:
+ try:
+ # Python 2.7.0 to 2.7.3
+ from CGIHTTPServer import (
+ _url_collapse_path_split as _url_collapse_path)
+ except ImportError:
+ # Doesn't exist on Python 2.6.x. Ignore it.
+ pass
diff --git a/src/clyphx/vendor/future/future/moves/itertools.py b/src/clyphx/vendor/future/future/moves/itertools.py
new file mode 100644
index 0000000..e5eb20d
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/itertools.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import
+
+from itertools import *
+try:
+ zip_longest = izip_longest
+ filterfalse = ifilterfalse
+except NameError:
+ pass
diff --git a/src/clyphx/vendor/future/future/moves/pickle.py b/src/clyphx/vendor/future/future/moves/pickle.py
new file mode 100644
index 0000000..c53d693
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/pickle.py
@@ -0,0 +1,11 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ from pickle import *
+else:
+ __future_module__ = True
+ try:
+ from cPickle import *
+ except ImportError:
+ from pickle import *
diff --git a/src/clyphx/vendor/future/future/moves/queue.py b/src/clyphx/vendor/future/future/moves/queue.py
new file mode 100644
index 0000000..1cb1437
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/queue.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ from queue import *
+else:
+ __future_module__ = True
+ from Queue import *
diff --git a/src/clyphx/vendor/future/future/moves/reprlib.py b/src/clyphx/vendor/future/future/moves/reprlib.py
new file mode 100644
index 0000000..a313a13
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/reprlib.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ from reprlib import *
+else:
+ __future_module__ = True
+ from repr import *
diff --git a/src/clyphx/vendor/future/future/moves/socketserver.py b/src/clyphx/vendor/future/future/moves/socketserver.py
new file mode 100644
index 0000000..062e084
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/socketserver.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ from socketserver import *
+else:
+ __future_module__ = True
+ from SocketServer import *
diff --git a/src/clyphx/vendor/future/future/moves/subprocess.py b/src/clyphx/vendor/future/future/moves/subprocess.py
new file mode 100644
index 0000000..43ffd2a
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/subprocess.py
@@ -0,0 +1,11 @@
+from __future__ import absolute_import
+from future.utils import PY2, PY26
+
+from subprocess import *
+
+if PY2:
+ __future_module__ = True
+ from commands import getoutput, getstatusoutput
+
+if PY26:
+ from future.backports.misc import check_output
diff --git a/src/clyphx/vendor/future/future/moves/sys.py b/src/clyphx/vendor/future/future/moves/sys.py
new file mode 100644
index 0000000..1293bcb
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/sys.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import
+
+from future.utils import PY2
+
+from sys import *
+
+if PY2:
+ from __builtin__ import intern
diff --git a/src/clyphx/vendor/future/future/moves/test/__init__.py b/src/clyphx/vendor/future/future/moves/test/__init__.py
new file mode 100644
index 0000000..5cf428b
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/test/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if not PY3:
+ __future_module__ = True
diff --git a/src/clyphx/vendor/future/future/moves/test/support.py b/src/clyphx/vendor/future/future/moves/test/support.py
new file mode 100644
index 0000000..e9aa0f4
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/test/support.py
@@ -0,0 +1,10 @@
+from __future__ import absolute_import
+from future.standard_library import suspend_hooks
+from future.utils import PY3
+
+if PY3:
+ from test.support import *
+else:
+ __future_module__ = True
+ with suspend_hooks():
+ from test.test_support import *
diff --git a/src/clyphx/vendor/future/future/moves/tkinter/__init__.py b/src/clyphx/vendor/future/future/moves/tkinter/__init__.py
new file mode 100644
index 0000000..e408296
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/tkinter/__init__.py
@@ -0,0 +1,27 @@
+from __future__ import absolute_import
+from future.utils import PY3
+__future_module__ = True
+
+if not PY3:
+ from Tkinter import *
+ from Tkinter import (_cnfmerge, _default_root, _flatten,
+ _support_default_root, _test,
+ _tkinter, _setit)
+
+ try: # >= 2.7.4
+ from Tkinter import (_join)
+ except ImportError:
+ pass
+
+ try: # >= 2.7.4
+ from Tkinter import (_stringify)
+ except ImportError:
+ pass
+
+ try: # >= 2.7.9
+ from Tkinter import (_splitdict)
+ except ImportError:
+ pass
+
+else:
+ from tkinter import *
diff --git a/src/clyphx/vendor/future/future/moves/tkinter/colorchooser.py b/src/clyphx/vendor/future/future/moves/tkinter/colorchooser.py
new file mode 100644
index 0000000..6dde6e8
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/tkinter/colorchooser.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.colorchooser import *
+else:
+ try:
+ from tkColorChooser import *
+ except ImportError:
+ raise ImportError('The tkColorChooser module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/future/moves/tkinter/commondialog.py b/src/clyphx/vendor/future/future/moves/tkinter/commondialog.py
new file mode 100644
index 0000000..eb7ae8d
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/tkinter/commondialog.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.commondialog import *
+else:
+ try:
+ from tkCommonDialog import *
+ except ImportError:
+ raise ImportError('The tkCommonDialog module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/future/moves/tkinter/constants.py b/src/clyphx/vendor/future/future/moves/tkinter/constants.py
new file mode 100644
index 0000000..ffe0981
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/tkinter/constants.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.constants import *
+else:
+ try:
+ from Tkconstants import *
+ except ImportError:
+ raise ImportError('The Tkconstants module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/future/moves/tkinter/dialog.py b/src/clyphx/vendor/future/future/moves/tkinter/dialog.py
new file mode 100644
index 0000000..113370c
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/tkinter/dialog.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.dialog import *
+else:
+ try:
+ from Dialog import *
+ except ImportError:
+ raise ImportError('The Dialog module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/future/moves/tkinter/dnd.py b/src/clyphx/vendor/future/future/moves/tkinter/dnd.py
new file mode 100644
index 0000000..1ab4379
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/tkinter/dnd.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.dnd import *
+else:
+ try:
+ from Tkdnd import *
+ except ImportError:
+ raise ImportError('The Tkdnd module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/future/moves/tkinter/filedialog.py b/src/clyphx/vendor/future/future/moves/tkinter/filedialog.py
new file mode 100644
index 0000000..973923e
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/tkinter/filedialog.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.filedialog import *
+else:
+ try:
+ from FileDialog import *
+ except ImportError:
+ raise ImportError('The FileDialog module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/future/moves/tkinter/font.py b/src/clyphx/vendor/future/future/moves/tkinter/font.py
new file mode 100644
index 0000000..628f399
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/tkinter/font.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.font import *
+else:
+ try:
+ from tkFont import *
+ except ImportError:
+ raise ImportError('The tkFont module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/future/moves/tkinter/messagebox.py b/src/clyphx/vendor/future/future/moves/tkinter/messagebox.py
new file mode 100644
index 0000000..b43d870
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/tkinter/messagebox.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.messagebox import *
+else:
+ try:
+ from tkMessageBox import *
+ except ImportError:
+ raise ImportError('The tkMessageBox module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/future/moves/tkinter/scrolledtext.py b/src/clyphx/vendor/future/future/moves/tkinter/scrolledtext.py
new file mode 100644
index 0000000..1c69db6
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/tkinter/scrolledtext.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.scrolledtext import *
+else:
+ try:
+ from ScrolledText import *
+ except ImportError:
+ raise ImportError('The ScrolledText module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/future/moves/tkinter/simpledialog.py b/src/clyphx/vendor/future/future/moves/tkinter/simpledialog.py
new file mode 100644
index 0000000..dba93fb
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/tkinter/simpledialog.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.simpledialog import *
+else:
+ try:
+ from SimpleDialog import *
+ except ImportError:
+ raise ImportError('The SimpleDialog module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/future/moves/tkinter/tix.py b/src/clyphx/vendor/future/future/moves/tkinter/tix.py
new file mode 100644
index 0000000..8d1718a
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/tkinter/tix.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.tix import *
+else:
+ try:
+ from Tix import *
+ except ImportError:
+ raise ImportError('The Tix module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/future/moves/tkinter/ttk.py b/src/clyphx/vendor/future/future/moves/tkinter/ttk.py
new file mode 100644
index 0000000..081c1b4
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/tkinter/ttk.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.ttk import *
+else:
+ try:
+ from ttk import *
+ except ImportError:
+ raise ImportError('The ttk module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/future/moves/urllib/__init__.py b/src/clyphx/vendor/future/future/moves/urllib/__init__.py
new file mode 100644
index 0000000..5cf428b
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/urllib/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if not PY3:
+ __future_module__ = True
diff --git a/src/clyphx/vendor/future/future/moves/urllib/error.py b/src/clyphx/vendor/future/future/moves/urllib/error.py
new file mode 100644
index 0000000..7d8ada7
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/urllib/error.py
@@ -0,0 +1,16 @@
+from __future__ import absolute_import
+from future.standard_library import suspend_hooks
+
+from future.utils import PY3
+
+if PY3:
+ from urllib.error import *
+else:
+ __future_module__ = True
+
+ # We use this method to get at the original Py2 urllib before any renaming magic
+ # ContentTooShortError = sys.py2_modules['urllib'].ContentTooShortError
+
+ with suspend_hooks():
+ from urllib import ContentTooShortError
+ from urllib2 import URLError, HTTPError
diff --git a/src/clyphx/vendor/future/future/moves/urllib/parse.py b/src/clyphx/vendor/future/future/moves/urllib/parse.py
new file mode 100644
index 0000000..9074b81
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/urllib/parse.py
@@ -0,0 +1,28 @@
+from __future__ import absolute_import
+from future.standard_library import suspend_hooks
+
+from future.utils import PY3
+
+if PY3:
+ from urllib.parse import *
+else:
+ __future_module__ = True
+ from urlparse import (ParseResult, SplitResult, parse_qs, parse_qsl,
+ urldefrag, urljoin, urlparse, urlsplit,
+ urlunparse, urlunsplit)
+
+ # we use this method to get at the original py2 urllib before any renaming
+ # quote = sys.py2_modules['urllib'].quote
+ # quote_plus = sys.py2_modules['urllib'].quote_plus
+ # unquote = sys.py2_modules['urllib'].unquote
+ # unquote_plus = sys.py2_modules['urllib'].unquote_plus
+ # urlencode = sys.py2_modules['urllib'].urlencode
+ # splitquery = sys.py2_modules['urllib'].splitquery
+
+ with suspend_hooks():
+ from urllib import (quote,
+ quote_plus,
+ unquote,
+ unquote_plus,
+ urlencode,
+ splitquery)
diff --git a/src/clyphx/vendor/future/future/moves/urllib/request.py b/src/clyphx/vendor/future/future/moves/urllib/request.py
new file mode 100644
index 0000000..972aa4a
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/urllib/request.py
@@ -0,0 +1,94 @@
+from __future__ import absolute_import
+
+from future.standard_library import suspend_hooks
+from future.utils import PY3
+
+if PY3:
+ from urllib.request import *
+ # This aren't in __all__:
+ from urllib.request import (getproxies,
+ pathname2url,
+ proxy_bypass,
+ quote,
+ request_host,
+ thishost,
+ unquote,
+ url2pathname,
+ urlcleanup,
+ urljoin,
+ urlopen,
+ urlparse,
+ urlretrieve,
+ urlsplit,
+ urlunparse)
+
+ from urllib.parse import (splitattr,
+ splithost,
+ splitpasswd,
+ splitport,
+ splitquery,
+ splittag,
+ splittype,
+ splituser,
+ splitvalue,
+ to_bytes,
+ unwrap)
+else:
+ __future_module__ = True
+ with suspend_hooks():
+ from urllib import *
+ from urllib2 import *
+ from urlparse import *
+
+ # Rename:
+ from urllib import toBytes # missing from __all__ on Py2.6
+ to_bytes = toBytes
+
+ # from urllib import (pathname2url,
+ # url2pathname,
+ # getproxies,
+ # urlretrieve,
+ # urlcleanup,
+ # URLopener,
+ # FancyURLopener,
+ # proxy_bypass)
+
+ # from urllib2 import (
+ # AbstractBasicAuthHandler,
+ # AbstractDigestAuthHandler,
+ # BaseHandler,
+ # CacheFTPHandler,
+ # FileHandler,
+ # FTPHandler,
+ # HTTPBasicAuthHandler,
+ # HTTPCookieProcessor,
+ # HTTPDefaultErrorHandler,
+ # HTTPDigestAuthHandler,
+ # HTTPErrorProcessor,
+ # HTTPHandler,
+ # HTTPPasswordMgr,
+ # HTTPPasswordMgrWithDefaultRealm,
+ # HTTPRedirectHandler,
+ # HTTPSHandler,
+ # URLError,
+ # build_opener,
+ # install_opener,
+ # OpenerDirector,
+ # ProxyBasicAuthHandler,
+ # ProxyDigestAuthHandler,
+ # ProxyHandler,
+ # Request,
+ # UnknownHandler,
+ # urlopen,
+ # )
+
+ # from urlparse import (
+ # urldefrag
+ # urljoin,
+ # urlparse,
+ # urlunparse,
+ # urlsplit,
+ # urlunsplit,
+ # parse_qs,
+ # parse_q"
+ # )
diff --git a/src/clyphx/vendor/future/future/moves/urllib/response.py b/src/clyphx/vendor/future/future/moves/urllib/response.py
new file mode 100644
index 0000000..a287ae2
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/urllib/response.py
@@ -0,0 +1,12 @@
+from future import standard_library
+from future.utils import PY3
+
+if PY3:
+ from urllib.response import *
+else:
+ __future_module__ = True
+ with standard_library.suspend_hooks():
+ from urllib import (addbase,
+ addclosehook,
+ addinfo,
+ addinfourl)
diff --git a/src/clyphx/vendor/future/future/moves/urllib/robotparser.py b/src/clyphx/vendor/future/future/moves/urllib/robotparser.py
new file mode 100644
index 0000000..0dc8f57
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/urllib/robotparser.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ from urllib.robotparser import *
+else:
+ __future_module__ = True
+ from robotparser import *
diff --git a/src/clyphx/vendor/future/future/moves/winreg.py b/src/clyphx/vendor/future/future/moves/winreg.py
new file mode 100644
index 0000000..c8b1475
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/winreg.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ from winreg import *
+else:
+ __future_module__ = True
+ from _winreg import *
diff --git a/src/clyphx/vendor/future/future/moves/xmlrpc/__init__.py b/src/clyphx/vendor/future/future/moves/xmlrpc/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/clyphx/vendor/future/future/moves/xmlrpc/client.py b/src/clyphx/vendor/future/future/moves/xmlrpc/client.py
new file mode 100644
index 0000000..4708cf8
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/xmlrpc/client.py
@@ -0,0 +1,7 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ from xmlrpc.client import *
+else:
+ from xmlrpclib import *
diff --git a/src/clyphx/vendor/future/future/moves/xmlrpc/server.py b/src/clyphx/vendor/future/future/moves/xmlrpc/server.py
new file mode 100644
index 0000000..1a8af34
--- /dev/null
+++ b/src/clyphx/vendor/future/future/moves/xmlrpc/server.py
@@ -0,0 +1,7 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ from xmlrpc.server import *
+else:
+ from xmlrpclib import *
diff --git a/src/clyphx/vendor/future/future/standard_library/__init__.py b/src/clyphx/vendor/future/future/standard_library/__init__.py
new file mode 100644
index 0000000..cff02f9
--- /dev/null
+++ b/src/clyphx/vendor/future/future/standard_library/__init__.py
@@ -0,0 +1,815 @@
+"""
+Python 3 reorganized the standard library (PEP 3108). This module exposes
+several standard library modules to Python 2 under their new Python 3
+names.
+
+It is designed to be used as follows::
+
+ from future import standard_library
+ standard_library.install_aliases()
+
+And then these normal Py3 imports work on both Py3 and Py2::
+
+ import builtins
+ import copyreg
+ import queue
+ import reprlib
+ import socketserver
+ import winreg # on Windows only
+ import test.support
+ import html, html.parser, html.entites
+ import http, http.client, http.server
+ import http.cookies, http.cookiejar
+ import urllib.parse, urllib.request, urllib.response, urllib.error, urllib.robotparser
+ import xmlrpc.client, xmlrpc.server
+
+ import _thread
+ import _dummy_thread
+ import _markupbase
+
+ from itertools import filterfalse, zip_longest
+ from sys import intern
+ from collections import UserDict, UserList, UserString
+ from collections import OrderedDict, Counter, ChainMap # even on Py2.6
+ from subprocess import getoutput, getstatusoutput
+ from subprocess import check_output # even on Py2.6
+
+(The renamed modules and functions are still available under their old
+names on Python 2.)
+
+This is a cleaner alternative to this idiom (see
+http://docs.pythonsprints.com/python3_porting/py-porting.html)::
+
+ try:
+ import queue
+ except ImportError:
+ import Queue as queue
+
+
+Limitations
+-----------
+We don't currently support these modules, but would like to::
+
+ import dbm
+ import dbm.dumb
+ import dbm.gnu
+ import collections.abc # on Py33
+ import pickle # should (optionally) bring in cPickle on Python 2
+
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import sys
+import logging
+import imp
+import contextlib
+import types
+import copy
+import os
+
+# Make a dedicated logger; leave the root logger to be configured
+# by the application.
+flog = logging.getLogger('future_stdlib')
+_formatter = logging.Formatter(logging.BASIC_FORMAT)
+_handler = logging.StreamHandler()
+_handler.setFormatter(_formatter)
+flog.addHandler(_handler)
+flog.setLevel(logging.WARN)
+
+from future.utils import PY2, PY3
+
+# The modules that are defined under the same names on Py3 but with
+# different contents in a significant way (e.g. submodules) are:
+# pickle (fast one)
+# dbm
+# urllib
+# test
+# email
+
+REPLACED_MODULES = set(['test', 'urllib', 'pickle', 'dbm']) # add email and dbm when we support it
+
+# The following module names are not present in Python 2.x, so they cause no
+# potential clashes between the old and new names:
+# http
+# html
+# tkinter
+# xmlrpc
+# Keys: Py2 / real module names
+# Values: Py3 / simulated module names
+RENAMES = {
+ # 'cStringIO': 'io', # there's a new io module in Python 2.6
+ # that provides StringIO and BytesIO
+ # 'StringIO': 'io', # ditto
+ # 'cPickle': 'pickle',
+ '__builtin__': 'builtins',
+ 'copy_reg': 'copyreg',
+ 'Queue': 'queue',
+ 'future.moves.socketserver': 'socketserver',
+ 'ConfigParser': 'configparser',
+ 'repr': 'reprlib',
+ # 'FileDialog': 'tkinter.filedialog',
+ # 'tkFileDialog': 'tkinter.filedialog',
+ # 'SimpleDialog': 'tkinter.simpledialog',
+ # 'tkSimpleDialog': 'tkinter.simpledialog',
+ # 'tkColorChooser': 'tkinter.colorchooser',
+ # 'tkCommonDialog': 'tkinter.commondialog',
+ # 'Dialog': 'tkinter.dialog',
+ # 'Tkdnd': 'tkinter.dnd',
+ # 'tkFont': 'tkinter.font',
+ # 'tkMessageBox': 'tkinter.messagebox',
+ # 'ScrolledText': 'tkinter.scrolledtext',
+ # 'Tkconstants': 'tkinter.constants',
+ # 'Tix': 'tkinter.tix',
+ # 'ttk': 'tkinter.ttk',
+ # 'Tkinter': 'tkinter',
+ '_winreg': 'winreg',
+ 'thread': '_thread',
+ 'dummy_thread': '_dummy_thread',
+ # 'anydbm': 'dbm', # causes infinite import loop
+ # 'whichdb': 'dbm', # causes infinite import loop
+ # anydbm and whichdb are handled by fix_imports2
+ # 'dbhash': 'dbm.bsd',
+ # 'dumbdbm': 'dbm.dumb',
+ # 'dbm': 'dbm.ndbm',
+ # 'gdbm': 'dbm.gnu',
+ 'future.moves.xmlrpc': 'xmlrpc',
+ # 'future.backports.email': 'email', # for use by urllib
+ # 'DocXMLRPCServer': 'xmlrpc.server',
+ # 'SimpleXMLRPCServer': 'xmlrpc.server',
+ # 'httplib': 'http.client',
+ # 'htmlentitydefs' : 'html.entities',
+ # 'HTMLParser' : 'html.parser',
+ # 'Cookie': 'http.cookies',
+ # 'cookielib': 'http.cookiejar',
+ # 'BaseHTTPServer': 'http.server',
+ # 'SimpleHTTPServer': 'http.server',
+ # 'CGIHTTPServer': 'http.server',
+ # 'future.backports.test': 'test', # primarily for renaming test_support to support
+ # 'commands': 'subprocess',
+ # 'urlparse' : 'urllib.parse',
+ # 'robotparser' : 'urllib.robotparser',
+ # 'abc': 'collections.abc', # for Py33
+ # 'future.utils.six.moves.html': 'html',
+ # 'future.utils.six.moves.http': 'http',
+ 'future.moves.html': 'html',
+ 'future.moves.http': 'http',
+ # 'future.backports.urllib': 'urllib',
+ # 'future.utils.six.moves.urllib': 'urllib',
+ 'future.moves._markupbase': '_markupbase',
+ }
+
+
+# It is complicated and apparently brittle to mess around with the
+# ``sys.modules`` cache in order to support "import urllib" meaning two
+# different things (Py2.7 urllib and backported Py3.3-like urllib) in different
+# contexts. So we require explicit imports for these modules.
+assert len(set(RENAMES.values()) & set(REPLACED_MODULES)) == 0
+
+
+# Harmless renames that we can insert.
+# These modules need names from elsewhere being added to them:
+# subprocess: should provide getoutput and other fns from commands
+# module but these fns are missing: getstatus, mk2arg,
+# mkarg
+# re: needs an ASCII constant that works compatibly with Py3
+
+# etc: see lib2to3/fixes/fix_imports.py
+
+# (New module name, new object name, old module name, old object name)
+MOVES = [('collections', 'UserList', 'UserList', 'UserList'),
+ ('collections', 'UserDict', 'UserDict', 'UserDict'),
+ ('collections', 'UserString','UserString', 'UserString'),
+ ('collections', 'ChainMap', 'future.backports.misc', 'ChainMap'),
+ ('itertools', 'filterfalse','itertools', 'ifilterfalse'),
+ ('itertools', 'zip_longest','itertools', 'izip_longest'),
+ ('sys', 'intern','__builtin__', 'intern'),
+ # The re module has no ASCII flag in Py2, but this is the default.
+ # Set re.ASCII to a zero constant. stat.ST_MODE just happens to be one
+ # (and it exists on Py2.6+).
+ ('re', 'ASCII','stat', 'ST_MODE'),
+ ('base64', 'encodebytes','base64', 'encodestring'),
+ ('base64', 'decodebytes','base64', 'decodestring'),
+ ('subprocess', 'getoutput', 'commands', 'getoutput'),
+ ('subprocess', 'getstatusoutput', 'commands', 'getstatusoutput'),
+ ('subprocess', 'check_output', 'future.backports.misc', 'check_output'),
+ ('math', 'ceil', 'future.backports.misc', 'ceil'),
+ ('collections', 'OrderedDict', 'future.backports.misc', 'OrderedDict'),
+ ('collections', 'Counter', 'future.backports.misc', 'Counter'),
+ ('collections', 'ChainMap', 'future.backports.misc', 'ChainMap'),
+ ('itertools', 'count', 'future.backports.misc', 'count'),
+ ('reprlib', 'recursive_repr', 'future.backports.misc', 'recursive_repr'),
+ ('functools', 'cmp_to_key', 'future.backports.misc', 'cmp_to_key'),
+
+# This is no use, since "import urllib.request" etc. still fails:
+# ('urllib', 'error', 'future.moves.urllib', 'error'),
+# ('urllib', 'parse', 'future.moves.urllib', 'parse'),
+# ('urllib', 'request', 'future.moves.urllib', 'request'),
+# ('urllib', 'response', 'future.moves.urllib', 'response'),
+# ('urllib', 'robotparser', 'future.moves.urllib', 'robotparser'),
+ ]
+
+
+# A minimal example of an import hook:
+# class WarnOnImport(object):
+# def __init__(self, *args):
+# self.module_names = args
+#
+# def find_module(self, fullname, path=None):
+# if fullname in self.module_names:
+# self.path = path
+# return self
+# return None
+#
+# def load_module(self, name):
+# if name in sys.modules:
+# return sys.modules[name]
+# module_info = imp.find_module(name, self.path)
+# module = imp.load_module(name, *module_info)
+# sys.modules[name] = module
+# flog.warning("Imported deprecated module %s", name)
+# return module
+
+
+class RenameImport(object):
+ """
+ A class for import hooks mapping Py3 module names etc. to the Py2 equivalents.
+ """
+ # Different RenameImport classes are created when importing this module from
+ # different source files. This causes isinstance(hook, RenameImport) checks
+ # to produce inconsistent results. We add this RENAMER attribute here so
+ # remove_hooks() and install_hooks() can find instances of these classes
+ # easily:
+ RENAMER = True
+
+ def __init__(self, old_to_new):
+ '''
+ Pass in a dictionary-like object mapping from old names to new
+ names. E.g. {'ConfigParser': 'configparser', 'cPickle': 'pickle'}
+ '''
+ self.old_to_new = old_to_new
+ both = set(old_to_new.keys()) & set(old_to_new.values())
+ assert (len(both) == 0 and
+ len(set(old_to_new.values())) == len(old_to_new.values())), \
+ 'Ambiguity in renaming (handler not implemented)'
+ self.new_to_old = dict((new, old) for (old, new) in old_to_new.items())
+
+ def find_module(self, fullname, path=None):
+ # Handles hierarchical importing: package.module.module2
+ new_base_names = set([s.split('.')[0] for s in self.new_to_old])
+ # Before v0.12: Was: if fullname in set(self.old_to_new) | new_base_names:
+ if fullname in new_base_names:
+ return self
+ return None
+
+ def load_module(self, name):
+ path = None
+ if name in sys.modules:
+ return sys.modules[name]
+ elif name in self.new_to_old:
+ # New name. Look up the corresponding old (Py2) name:
+ oldname = self.new_to_old[name]
+ module = self._find_and_load_module(oldname)
+ # module.__future_module__ = True
+ else:
+ module = self._find_and_load_module(name)
+ # In any case, make it available under the requested (Py3) name
+ sys.modules[name] = module
+ return module
+
+ def _find_and_load_module(self, name, path=None):
+ """
+ Finds and loads it. But if there's a . in the name, handles it
+ properly.
+ """
+ bits = name.split('.')
+ while len(bits) > 1:
+ # Treat the first bit as a package
+ packagename = bits.pop(0)
+ package = self._find_and_load_module(packagename, path)
+ try:
+ path = package.__path__
+ except AttributeError:
+ # This could be e.g. moves.
+ flog.debug('Package {0} has no __path__.'.format(package))
+ if name in sys.modules:
+ return sys.modules[name]
+ flog.debug('What to do here?')
+
+ name = bits[0]
+ module_info = imp.find_module(name, path)
+ return imp.load_module(name, *module_info)
+
+
+class hooks(object):
+ """
+ Acts as a context manager. Saves the state of sys.modules and restores it
+ after the 'with' block.
+
+ Use like this:
+
+ >>> from future import standard_library
+ >>> with standard_library.hooks():
+ ... import http.client
+ >>> import requests
+
+ For this to work, http.client will be scrubbed from sys.modules after the
+ 'with' block. That way the modules imported in the 'with' block will
+ continue to be accessible in the current namespace but not from any
+ imported modules (like requests).
+ """
+ def __enter__(self):
+ # flog.debug('Entering hooks context manager')
+ self.old_sys_modules = copy.copy(sys.modules)
+ self.hooks_were_installed = detect_hooks()
+ # self.scrubbed = scrub_py2_sys_modules()
+ install_hooks()
+ return self
+
+ def __exit__(self, *args):
+ # flog.debug('Exiting hooks context manager')
+ # restore_sys_modules(self.scrubbed)
+ if not self.hooks_were_installed:
+ remove_hooks()
+ # scrub_future_sys_modules()
+
+# Sanity check for is_py2_stdlib_module(): We aren't replacing any
+# builtin modules names:
+if PY2:
+ assert len(set(RENAMES.values()) & set(sys.builtin_module_names)) == 0
+
+
+def is_py2_stdlib_module(m):
+ """
+ Tries to infer whether the module m is from the Python 2 standard library.
+ This may not be reliable on all systems.
+ """
+ if PY3:
+ return False
+ if not 'stdlib_path' in is_py2_stdlib_module.__dict__:
+ stdlib_files = [contextlib.__file__, os.__file__, copy.__file__]
+ stdlib_paths = [os.path.split(f)[0] for f in stdlib_files]
+ if not len(set(stdlib_paths)) == 1:
+ # This seems to happen on travis-ci.org. Very strange. We'll try to
+ # ignore it.
+ flog.warn('Multiple locations found for the Python standard '
+ 'library: %s' % stdlib_paths)
+ # Choose the first one arbitrarily
+ is_py2_stdlib_module.stdlib_path = stdlib_paths[0]
+
+ if m.__name__ in sys.builtin_module_names:
+ return True
+
+ if hasattr(m, '__file__'):
+ modpath = os.path.split(m.__file__)
+ if (modpath[0].startswith(is_py2_stdlib_module.stdlib_path) and
+ 'site-packages' not in modpath[0]):
+ return True
+
+ return False
+
+
+def scrub_py2_sys_modules():
+ """
+ Removes any Python 2 standard library modules from ``sys.modules`` that
+ would interfere with Py3-style imports using import hooks. Examples are
+ modules with the same names (like urllib or email).
+
+ (Note that currently import hooks are disabled for modules like these
+ with ambiguous names anyway ...)
+ """
+ if PY3:
+ return {}
+ scrubbed = {}
+ for modulename in REPLACED_MODULES & set(RENAMES.keys()):
+ if not modulename in sys.modules:
+ continue
+
+ module = sys.modules[modulename]
+
+ if is_py2_stdlib_module(module):
+ flog.debug('Deleting (Py2) {} from sys.modules'.format(modulename))
+ scrubbed[modulename] = sys.modules[modulename]
+ del sys.modules[modulename]
+ return scrubbed
+
+
+def scrub_future_sys_modules():
+ """
+ Deprecated.
+ """
+ return {}
+
+class suspend_hooks(object):
+ """
+ Acts as a context manager. Use like this:
+
+ >>> from future import standard_library
+ >>> standard_library.install_hooks()
+ >>> import http.client
+ >>> # ...
+ >>> with standard_library.suspend_hooks():
+ >>> import requests # incompatible with ``future``'s standard library hooks
+
+ If the hooks were disabled before the context, they are not installed when
+ the context is left.
+ """
+ def __enter__(self):
+ self.hooks_were_installed = detect_hooks()
+ remove_hooks()
+ # self.scrubbed = scrub_future_sys_modules()
+ return self
+
+ def __exit__(self, *args):
+ if self.hooks_were_installed:
+ install_hooks()
+ # restore_sys_modules(self.scrubbed)
+
+
+def restore_sys_modules(scrubbed):
+ """
+ Add any previously scrubbed modules back to the sys.modules cache,
+ but only if it's safe to do so.
+ """
+ clash = set(sys.modules) & set(scrubbed)
+ if len(clash) != 0:
+ # If several, choose one arbitrarily to raise an exception about
+ first = list(clash)[0]
+ raise ImportError('future module {} clashes with Py2 module'
+ .format(first))
+ sys.modules.update(scrubbed)
+
+
+def install_aliases():
+ """
+ Monkey-patches the standard library in Py2.6/7 to provide
+ aliases for better Py3 compatibility.
+ """
+ if PY3:
+ return
+ # if hasattr(install_aliases, 'run_already'):
+ # return
+ for (newmodname, newobjname, oldmodname, oldobjname) in MOVES:
+ __import__(newmodname)
+ # We look up the module in sys.modules because __import__ just returns the
+ # top-level package:
+ newmod = sys.modules[newmodname]
+ # newmod.__future_module__ = True
+
+ __import__(oldmodname)
+ oldmod = sys.modules[oldmodname]
+
+ obj = getattr(oldmod, oldobjname)
+ setattr(newmod, newobjname, obj)
+
+ # Hack for urllib so it appears to have the same structure on Py2 as on Py3
+ import urllib
+ from future.backports.urllib import request
+ from future.backports.urllib import response
+ from future.backports.urllib import parse
+ from future.backports.urllib import error
+ from future.backports.urllib import robotparser
+ urllib.request = request
+ urllib.response = response
+ urllib.parse = parse
+ urllib.error = error
+ urllib.robotparser = robotparser
+ sys.modules['urllib.request'] = request
+ sys.modules['urllib.response'] = response
+ sys.modules['urllib.parse'] = parse
+ sys.modules['urllib.error'] = error
+ sys.modules['urllib.robotparser'] = robotparser
+
+ # Patch the test module so it appears to have the same structure on Py2 as on Py3
+ try:
+ import test
+ except ImportError:
+ pass
+ try:
+ from future.moves.test import support
+ except ImportError:
+ pass
+ else:
+ test.support = support
+ sys.modules['test.support'] = support
+
+ # Patch the dbm module so it appears to have the same structure on Py2 as on Py3
+ try:
+ import dbm
+ except ImportError:
+ pass
+ else:
+ from future.moves.dbm import dumb
+ dbm.dumb = dumb
+ sys.modules['dbm.dumb'] = dumb
+ try:
+ from future.moves.dbm import gnu
+ except ImportError:
+ pass
+ else:
+ dbm.gnu = gnu
+ sys.modules['dbm.gnu'] = gnu
+ try:
+ from future.moves.dbm import ndbm
+ except ImportError:
+ pass
+ else:
+ dbm.ndbm = ndbm
+ sys.modules['dbm.ndbm'] = ndbm
+
+ # install_aliases.run_already = True
+
+
+def install_hooks():
+ """
+ This function installs the future.standard_library import hook into
+ sys.meta_path.
+ """
+ if PY3:
+ return
+
+ install_aliases()
+
+ flog.debug('sys.meta_path was: {0}'.format(sys.meta_path))
+ flog.debug('Installing hooks ...')
+
+ # Add it unless it's there already
+ newhook = RenameImport(RENAMES)
+ if not detect_hooks():
+ sys.meta_path.append(newhook)
+ flog.debug('sys.meta_path is now: {0}'.format(sys.meta_path))
+
+
+def enable_hooks():
+ """
+ Deprecated. Use install_hooks() instead. This will be removed by
+ ``future`` v1.0.
+ """
+ install_hooks()
+
+
+def remove_hooks(scrub_sys_modules=False):
+ """
+ This function removes the import hook from sys.meta_path.
+ """
+ if PY3:
+ return
+ flog.debug('Uninstalling hooks ...')
+ # Loop backwards, so deleting items keeps the ordering:
+ for i, hook in list(enumerate(sys.meta_path))[::-1]:
+ if hasattr(hook, 'RENAMER'):
+ del sys.meta_path[i]
+
+ # Explicit is better than implicit. In the future the interface should
+ # probably change so that scrubbing the import hooks requires a separate
+ # function call. Left as is for now for backward compatibility with
+ # v0.11.x.
+ if scrub_sys_modules:
+ scrub_future_sys_modules()
+
+
+def disable_hooks():
+ """
+ Deprecated. Use remove_hooks() instead. This will be removed by
+ ``future`` v1.0.
+ """
+ remove_hooks()
+
+
+def detect_hooks():
+ """
+ Returns True if the import hooks are installed, False if not.
+ """
+ flog.debug('Detecting hooks ...')
+ present = any([hasattr(hook, 'RENAMER') for hook in sys.meta_path])
+ if present:
+ flog.debug('Detected.')
+ else:
+ flog.debug('Not detected.')
+ return present
+
+
+# As of v0.12, this no longer happens implicitly:
+# if not PY3:
+# install_hooks()
+
+
+if not hasattr(sys, 'py2_modules'):
+ sys.py2_modules = {}
+
+def cache_py2_modules():
+ """
+ Currently this function is unneeded, as we are not attempting to provide import hooks
+ for modules with ambiguous names: email, urllib, pickle.
+ """
+ if len(sys.py2_modules) != 0:
+ return
+ assert not detect_hooks()
+ import urllib
+ sys.py2_modules['urllib'] = urllib
+
+ import email
+ sys.py2_modules['email'] = email
+
+ import pickle
+ sys.py2_modules['pickle'] = pickle
+
+ # Not all Python installations have test module. (Anaconda doesn't, for example.)
+ # try:
+ # import test
+ # except ImportError:
+ # sys.py2_modules['test'] = None
+ # sys.py2_modules['test'] = test
+
+ # import dbm
+ # sys.py2_modules['dbm'] = dbm
+
+
+def import_(module_name, backport=False):
+ """
+ Pass a (potentially dotted) module name of a Python 3 standard library
+ module. This function imports the module compatibly on Py2 and Py3 and
+ returns the top-level module.
+
+ Example use:
+ >>> http = import_('http.client')
+ >>> http = import_('http.server')
+ >>> urllib = import_('urllib.request')
+
+ Then:
+ >>> conn = http.client.HTTPConnection(...)
+ >>> response = urllib.request.urlopen('http://mywebsite.com')
+ >>> # etc.
+
+ Use as follows:
+ >>> package_name = import_(module_name)
+
+ On Py3, equivalent to this:
+
+ >>> import module_name
+
+ On Py2, equivalent to this if backport=False:
+
+ >>> from future.moves import module_name
+
+ or to this if backport=True:
+
+ >>> from future.backports import module_name
+
+ except that it also handles dotted module names such as ``http.client``
+ The effect then is like this:
+
+ >>> from future.backports import module
+ >>> from future.backports.module import submodule
+ >>> module.submodule = submodule
+
+ Note that this would be a SyntaxError in Python:
+
+ >>> from future.backports import http.client
+
+ """
+ # Python 2.6 doesn't have importlib in the stdlib, so it requires
+ # the backported ``importlib`` package from PyPI as a dependency to use
+ # this function:
+ import importlib
+
+ if PY3:
+ return __import__(module_name)
+ else:
+ # client.blah = blah
+ # Then http.client = client
+ # etc.
+ if backport:
+ prefix = 'future.backports'
+ else:
+ prefix = 'future.moves'
+ parts = prefix.split('.') + module_name.split('.')
+
+ modules = []
+ for i, part in enumerate(parts):
+ sofar = '.'.join(parts[:i+1])
+ modules.append(importlib.import_module(sofar))
+ for i, part in reversed(list(enumerate(parts))):
+ if i == 0:
+ break
+ setattr(modules[i-1], part, modules[i])
+
+ # Return the next-most top-level module after future.backports / future.moves:
+ return modules[2]
+
+
+def from_import(module_name, *symbol_names, **kwargs):
+ """
+ Example use:
+ >>> HTTPConnection = from_import('http.client', 'HTTPConnection')
+ >>> HTTPServer = from_import('http.server', 'HTTPServer')
+ >>> urlopen, urlparse = from_import('urllib.request', 'urlopen', 'urlparse')
+
+ Equivalent to this on Py3:
+
+ >>> from module_name import symbol_names[0], symbol_names[1], ...
+
+ and this on Py2:
+
+ >>> from future.moves.module_name import symbol_names[0], ...
+
+ or:
+
+ >>> from future.backports.module_name import symbol_names[0], ...
+
+ except that it also handles dotted module names such as ``http.client``.
+ """
+
+ if PY3:
+ return __import__(module_name)
+ else:
+ if 'backport' in kwargs and bool(kwargs['backport']):
+ prefix = 'future.backports'
+ else:
+ prefix = 'future.moves'
+ parts = prefix.split('.') + module_name.split('.')
+ module = importlib.import_module(prefix + '.' + module_name)
+ output = [getattr(module, name) for name in symbol_names]
+ if len(output) == 1:
+ return output[0]
+ else:
+ return output
+
+
+class exclude_local_folder_imports(object):
+ """
+ A context-manager that prevents standard library modules like configparser
+ from being imported from the local python-future source folder on Py3.
+
+ (This was need prior to v0.16.0 because the presence of a configparser
+ folder would otherwise have prevented setuptools from running on Py3. Maybe
+ it's not needed any more?)
+ """
+ def __init__(self, *args):
+ assert len(args) > 0
+ self.module_names = args
+ # Disallow dotted module names like http.client:
+ if any(['.' in m for m in self.module_names]):
+ raise NotImplementedError('Dotted module names are not supported')
+
+ def __enter__(self):
+ self.old_sys_path = copy.copy(sys.path)
+ self.old_sys_modules = copy.copy(sys.modules)
+ if sys.version_info[0] < 3:
+ return
+ # The presence of all these indicates we've found our source folder,
+ # because `builtins` won't have been installed in site-packages by setup.py:
+ FUTURE_SOURCE_SUBFOLDERS = ['future', 'past', 'libfuturize', 'libpasteurize', 'builtins']
+
+ # Look for the future source folder:
+ for folder in self.old_sys_path:
+ if all([os.path.exists(os.path.join(folder, subfolder))
+ for subfolder in FUTURE_SOURCE_SUBFOLDERS]):
+ # Found it. Remove it.
+ sys.path.remove(folder)
+
+ # Ensure we import the system module:
+ for m in self.module_names:
+ # Delete the module and any submodules from sys.modules:
+ # for key in list(sys.modules):
+ # if key == m or key.startswith(m + '.'):
+ # try:
+ # del sys.modules[key]
+ # except KeyError:
+ # pass
+ try:
+ module = __import__(m, level=0)
+ except ImportError:
+ # There's a problem importing the system module. E.g. the
+ # winreg module is not available except on Windows.
+ pass
+
+ def __exit__(self, *args):
+ # Restore sys.path and sys.modules:
+ sys.path = self.old_sys_path
+ for m in set(self.old_sys_modules.keys()) - set(sys.modules.keys()):
+ sys.modules[m] = self.old_sys_modules[m]
+
+TOP_LEVEL_MODULES = ['builtins',
+ 'copyreg',
+ 'html',
+ 'http',
+ 'queue',
+ 'reprlib',
+ 'socketserver',
+ 'test',
+ 'tkinter',
+ 'winreg',
+ 'xmlrpc',
+ '_dummy_thread',
+ '_markupbase',
+ '_thread',
+ ]
+
+def import_top_level_modules():
+ with exclude_local_folder_imports(*TOP_LEVEL_MODULES):
+ for m in TOP_LEVEL_MODULES:
+ try:
+ __import__(m)
+ except ImportError: # e.g. winreg
+ pass
diff --git a/src/clyphx/vendor/future/future/tests/__init__.py b/src/clyphx/vendor/future/future/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/clyphx/vendor/future/future/tests/base.py b/src/clyphx/vendor/future/future/tests/base.py
new file mode 100644
index 0000000..4ef437b
--- /dev/null
+++ b/src/clyphx/vendor/future/future/tests/base.py
@@ -0,0 +1,539 @@
+from __future__ import print_function, absolute_import
+import os
+import tempfile
+import unittest
+import sys
+import re
+import warnings
+import io
+from textwrap import dedent
+
+from future.utils import bind_method, PY26, PY3, PY2, PY27
+from future.moves.subprocess import check_output, STDOUT, CalledProcessError
+
+if PY26:
+ import unittest2 as unittest
+
+
+def reformat_code(code):
+ """
+ Removes any leading \n and dedents.
+ """
+ if code.startswith('\n'):
+ code = code[1:]
+ return dedent(code)
+
+
+def order_future_lines(code):
+ """
+ Returns the code block with any ``__future__`` import lines sorted, and
+ then any ``future`` import lines sorted, then any ``builtins`` import lines
+ sorted.
+
+ This only sorts the lines within the expected blocks.
+
+ See test_order_future_lines() for an example.
+ """
+
+ # We need .splitlines(keepends=True), which doesn't exist on Py2,
+ # so we use this instead:
+ lines = code.split('\n')
+
+ uufuture_line_numbers = [i for i, line in enumerate(lines)
+ if line.startswith('from __future__ import ')]
+
+ future_line_numbers = [i for i, line in enumerate(lines)
+ if line.startswith('from future')
+ or line.startswith('from past')]
+
+ builtins_line_numbers = [i for i, line in enumerate(lines)
+ if line.startswith('from builtins')]
+
+ assert code.lstrip() == code, ('internal usage error: '
+ 'dedent the code before calling order_future_lines()')
+
+ def mymax(numbers):
+ return max(numbers) if len(numbers) > 0 else 0
+
+ def mymin(numbers):
+ return min(numbers) if len(numbers) > 0 else float('inf')
+
+ assert mymax(uufuture_line_numbers) <= mymin(future_line_numbers), \
+ 'the __future__ and future imports are out of order'
+
+ # assert mymax(future_line_numbers) <= mymin(builtins_line_numbers), \
+ # 'the future and builtins imports are out of order'
+
+ uul = sorted([lines[i] for i in uufuture_line_numbers])
+ sorted_uufuture_lines = dict(zip(uufuture_line_numbers, uul))
+
+ fl = sorted([lines[i] for i in future_line_numbers])
+ sorted_future_lines = dict(zip(future_line_numbers, fl))
+
+ bl = sorted([lines[i] for i in builtins_line_numbers])
+ sorted_builtins_lines = dict(zip(builtins_line_numbers, bl))
+
+ # Replace the old unsorted "from __future__ import ..." lines with the
+ # new sorted ones:
+ new_lines = []
+ for i in range(len(lines)):
+ if i in uufuture_line_numbers:
+ new_lines.append(sorted_uufuture_lines[i])
+ elif i in future_line_numbers:
+ new_lines.append(sorted_future_lines[i])
+ elif i in builtins_line_numbers:
+ new_lines.append(sorted_builtins_lines[i])
+ else:
+ new_lines.append(lines[i])
+ return '\n'.join(new_lines)
+
+
+class VerboseCalledProcessError(CalledProcessError):
+ """
+ Like CalledProcessError, but it displays more information (message and
+ script output) for diagnosing test failures etc.
+ """
+ def __init__(self, msg, returncode, cmd, output=None):
+ self.msg = msg
+ self.returncode = returncode
+ self.cmd = cmd
+ self.output = output
+
+ def __str__(self):
+ return ("Command '%s' failed with exit status %d\nMessage: %s\nOutput: %s"
+ % (self.cmd, self.returncode, self.msg, self.output))
+
+class FuturizeError(VerboseCalledProcessError):
+ pass
+
+class PasteurizeError(VerboseCalledProcessError):
+ pass
+
+
+class CodeHandler(unittest.TestCase):
+ """
+ Handy mixin for test classes for writing / reading / futurizing /
+ running .py files in the test suite.
+ """
+ def setUp(self):
+ """
+ The outputs from the various futurize stages should have the
+ following headers:
+ """
+ # After stage1:
+ # TODO: use this form after implementing a fixer to consolidate
+ # __future__ imports into a single line:
+ # self.headers1 = """
+ # from __future__ import absolute_import, division, print_function
+ # """
+ self.headers1 = reformat_code("""
+ from __future__ import absolute_import
+ from __future__ import division
+ from __future__ import print_function
+ """)
+
+ # After stage2 --all-imports:
+ # TODO: use this form after implementing a fixer to consolidate
+ # __future__ imports into a single line:
+ # self.headers2 = """
+ # from __future__ import (absolute_import, division,
+ # print_function, unicode_literals)
+ # from future import standard_library
+ # from future.builtins import *
+ # """
+ self.headers2 = reformat_code("""
+ from __future__ import absolute_import
+ from __future__ import division
+ from __future__ import print_function
+ from __future__ import unicode_literals
+ from future import standard_library
+ standard_library.install_aliases()
+ from builtins import *
+ """)
+ self.interpreters = [sys.executable]
+ self.tempdir = tempfile.mkdtemp() + os.path.sep
+ pypath = os.getenv('PYTHONPATH')
+ if pypath:
+ self.env = {'PYTHONPATH': os.getcwd() + os.pathsep + pypath}
+ else:
+ self.env = {'PYTHONPATH': os.getcwd()}
+
+ def convert(self, code, stages=(1, 2), all_imports=False, from3=False,
+ reformat=True, run=True, conservative=False):
+ """
+ Converts the code block using ``futurize`` and returns the
+ resulting code.
+
+ Passing stages=[1] or stages=[2] passes the flag ``--stage1`` or
+ ``stage2`` to ``futurize``. Passing both stages runs ``futurize``
+ with both stages by default.
+
+ If from3 is False, runs ``futurize``, converting from Python 2 to
+ both 2 and 3. If from3 is True, runs ``pasteurize`` to convert
+ from Python 3 to both 2 and 3.
+
+ Optionally reformats the code block first using the reformat() function.
+
+ If run is True, runs the resulting code under all Python
+ interpreters in self.interpreters.
+ """
+ if reformat:
+ code = reformat_code(code)
+ self._write_test_script(code)
+ self._futurize_test_script(stages=stages, all_imports=all_imports,
+ from3=from3, conservative=conservative)
+ output = self._read_test_script()
+ if run:
+ for interpreter in self.interpreters:
+ _ = self._run_test_script(interpreter=interpreter)
+ return output
+
+ def compare(self, output, expected, ignore_imports=True):
+ """
+ Compares whether the code blocks are equal. If not, raises an
+ exception so the test fails. Ignores any trailing whitespace like
+ blank lines.
+
+ If ignore_imports is True, passes the code blocks into the
+ strip_future_imports method.
+
+ If one code block is a unicode string and the other a
+ byte-string, it assumes the byte-string is encoded as utf-8.
+ """
+ if ignore_imports:
+ output = self.strip_future_imports(output)
+ expected = self.strip_future_imports(expected)
+ if isinstance(output, bytes) and not isinstance(expected, bytes):
+ output = output.decode('utf-8')
+ if isinstance(expected, bytes) and not isinstance(output, bytes):
+ expected = expected.decode('utf-8')
+ self.assertEqual(order_future_lines(output.rstrip()),
+ expected.rstrip())
+
+ def strip_future_imports(self, code):
+ """
+ Strips any of these import lines:
+
+ from __future__ import
+ from future
+ from future.
+ from builtins
+
+ or any line containing:
+ install_hooks()
+ or:
+ install_aliases()
+
+ Limitation: doesn't handle imports split across multiple lines like
+ this:
+
+ from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+ """
+ output = []
+ # We need .splitlines(keepends=True), which doesn't exist on Py2,
+ # so we use this instead:
+ for line in code.split('\n'):
+ if not (line.startswith('from __future__ import ')
+ or line.startswith('from future ')
+ or line.startswith('from builtins ')
+ or 'install_hooks()' in line
+ or 'install_aliases()' in line
+ # but don't match "from future_builtins" :)
+ or line.startswith('from future.')):
+ output.append(line)
+ return '\n'.join(output)
+
+ def convert_check(self, before, expected, stages=(1, 2), all_imports=False,
+ ignore_imports=True, from3=False, run=True,
+ conservative=False):
+ """
+ Convenience method that calls convert() and compare().
+
+ Reformats the code blocks automatically using the reformat_code()
+ function.
+
+ If all_imports is passed, we add the appropriate import headers
+ for the stage(s) selected to the ``expected`` code-block, so they
+ needn't appear repeatedly in the test code.
+
+ If ignore_imports is True, ignores the presence of any lines
+ beginning:
+
+ from __future__ import ...
+ from future import ...
+
+ for the purpose of the comparison.
+ """
+ output = self.convert(before, stages=stages, all_imports=all_imports,
+ from3=from3, run=run, conservative=conservative)
+ if all_imports:
+ headers = self.headers2 if 2 in stages else self.headers1
+ else:
+ headers = ''
+
+ reformatted = reformat_code(expected)
+ if headers in reformatted:
+ headers = ''
+
+ self.compare(output, headers + reformatted,
+ ignore_imports=ignore_imports)
+
+ def unchanged(self, code, **kwargs):
+ """
+ Convenience method to ensure the code is unchanged by the
+ futurize process.
+ """
+ self.convert_check(code, code, **kwargs)
+
+ def _write_test_script(self, code, filename='mytestscript.py'):
+ """
+ Dedents the given code (a multiline string) and writes it out to
+ a file in a temporary folder like /tmp/tmpUDCn7x/mytestscript.py.
+ """
+ if isinstance(code, bytes):
+ code = code.decode('utf-8')
+ # Be explicit about encoding the temp file as UTF-8 (issue #63):
+ with io.open(self.tempdir + filename, 'wt', encoding='utf-8') as f:
+ f.write(dedent(code))
+
+ def _read_test_script(self, filename='mytestscript.py'):
+ with io.open(self.tempdir + filename, 'rt', encoding='utf-8') as f:
+ newsource = f.read()
+ return newsource
+
+ def _futurize_test_script(self, filename='mytestscript.py', stages=(1, 2),
+ all_imports=False, from3=False,
+ conservative=False):
+ params = []
+ stages = list(stages)
+ if all_imports:
+ params.append('--all-imports')
+ if from3:
+ script = 'pasteurize.py'
+ else:
+ script = 'futurize.py'
+ if stages == [1]:
+ params.append('--stage1')
+ elif stages == [2]:
+ params.append('--stage2')
+ else:
+ assert stages == [1, 2]
+ if conservative:
+ params.append('--conservative')
+ # No extra params needed
+
+ # Absolute file path:
+ fn = self.tempdir + filename
+ call_args = [sys.executable, script] + params + ['-w', fn]
+ try:
+ output = check_output(call_args, stderr=STDOUT, env=self.env)
+ except CalledProcessError as e:
+ with open(fn) as f:
+ msg = (
+ 'Error running the command %s\n'
+ '%s\n'
+ 'Contents of file %s:\n'
+ '\n'
+ '%s') % (
+ ' '.join(call_args),
+ 'env=%s' % self.env,
+ fn,
+ '----\n%s\n----' % f.read(),
+ )
+ ErrorClass = (FuturizeError if 'futurize' in script else PasteurizeError)
+
+ if not hasattr(e, 'output'):
+ # The attribute CalledProcessError.output doesn't exist on Py2.6
+ e.output = None
+ raise ErrorClass(msg, e.returncode, e.cmd, output=e.output)
+ return output
+
+ def _run_test_script(self, filename='mytestscript.py',
+ interpreter=sys.executable):
+ # Absolute file path:
+ fn = self.tempdir + filename
+ try:
+ output = check_output([interpreter, fn],
+ env=self.env, stderr=STDOUT)
+ except CalledProcessError as e:
+ with open(fn) as f:
+ msg = (
+ 'Error running the command %s\n'
+ '%s\n'
+ 'Contents of file %s:\n'
+ '\n'
+ '%s') % (
+ ' '.join([interpreter, fn]),
+ 'env=%s' % self.env,
+ fn,
+ '----\n%s\n----' % f.read(),
+ )
+ if not hasattr(e, 'output'):
+ # The attribute CalledProcessError.output doesn't exist on Py2.6
+ e.output = None
+ raise VerboseCalledProcessError(msg, e.returncode, e.cmd, output=e.output)
+ return output
+
+
+# Decorator to skip some tests on Python 2.6 ...
+skip26 = unittest.skipIf(PY26, "this test is known to fail on Py2.6")
+
+
+def expectedFailurePY3(func):
+ if not PY3:
+ return func
+ return unittest.expectedFailure(func)
+
+def expectedFailurePY26(func):
+ if not PY26:
+ return func
+ return unittest.expectedFailure(func)
+
+
+def expectedFailurePY27(func):
+ if not PY27:
+ return func
+ return unittest.expectedFailure(func)
+
+
+def expectedFailurePY2(func):
+ if not PY2:
+ return func
+ return unittest.expectedFailure(func)
+
+
+# Renamed in Py3.3:
+if not hasattr(unittest.TestCase, 'assertRaisesRegex'):
+ unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
+
+# From Py3.3:
+def assertRegex(self, text, expected_regex, msg=None):
+ """Fail the test unless the text matches the regular expression."""
+ if isinstance(expected_regex, (str, unicode)):
+ assert expected_regex, "expected_regex must not be empty."
+ expected_regex = re.compile(expected_regex)
+ if not expected_regex.search(text):
+ msg = msg or "Regex didn't match"
+ msg = '%s: %r not found in %r' % (msg, expected_regex.pattern, text)
+ raise self.failureException(msg)
+
+if not hasattr(unittest.TestCase, 'assertRegex'):
+ bind_method(unittest.TestCase, 'assertRegex', assertRegex)
+
+class _AssertRaisesBaseContext(object):
+
+ def __init__(self, expected, test_case, callable_obj=None,
+ expected_regex=None):
+ self.expected = expected
+ self.test_case = test_case
+ if callable_obj is not None:
+ try:
+ self.obj_name = callable_obj.__name__
+ except AttributeError:
+ self.obj_name = str(callable_obj)
+ else:
+ self.obj_name = None
+ if isinstance(expected_regex, (bytes, str)):
+ expected_regex = re.compile(expected_regex)
+ self.expected_regex = expected_regex
+ self.msg = None
+
+ def _raiseFailure(self, standardMsg):
+ msg = self.test_case._formatMessage(self.msg, standardMsg)
+ raise self.test_case.failureException(msg)
+
+ def handle(self, name, callable_obj, args, kwargs):
+ """
+ If callable_obj is None, assertRaises/Warns is being used as a
+ context manager, so check for a 'msg' kwarg and return self.
+ If callable_obj is not None, call it passing args and kwargs.
+ """
+ if callable_obj is None:
+ self.msg = kwargs.pop('msg', None)
+ return self
+ with self:
+ callable_obj(*args, **kwargs)
+
+class _AssertWarnsContext(_AssertRaisesBaseContext):
+ """A context manager used to implement TestCase.assertWarns* methods."""
+
+ def __enter__(self):
+ # The __warningregistry__'s need to be in a pristine state for tests
+ # to work properly.
+ for v in sys.modules.values():
+ if getattr(v, '__warningregistry__', None):
+ v.__warningregistry__ = {}
+ self.warnings_manager = warnings.catch_warnings(record=True)
+ self.warnings = self.warnings_manager.__enter__()
+ warnings.simplefilter("always", self.expected)
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self.warnings_manager.__exit__(exc_type, exc_value, tb)
+ if exc_type is not None:
+ # let unexpected exceptions pass through
+ return
+ try:
+ exc_name = self.expected.__name__
+ except AttributeError:
+ exc_name = str(self.expected)
+ first_matching = None
+ for m in self.warnings:
+ w = m.message
+ if not isinstance(w, self.expected):
+ continue
+ if first_matching is None:
+ first_matching = w
+ if (self.expected_regex is not None and
+ not self.expected_regex.search(str(w))):
+ continue
+ # store warning for later retrieval
+ self.warning = w
+ self.filename = m.filename
+ self.lineno = m.lineno
+ return
+ # Now we simply try to choose a helpful failure message
+ if first_matching is not None:
+ self._raiseFailure('"{}" does not match "{}"'.format(
+ self.expected_regex.pattern, str(first_matching)))
+ if self.obj_name:
+ self._raiseFailure("{} not triggered by {}".format(exc_name,
+ self.obj_name))
+ else:
+ self._raiseFailure("{} not triggered".format(exc_name))
+
+
+def assertWarns(self, expected_warning, callable_obj=None, *args, **kwargs):
+ """Fail unless a warning of class warnClass is triggered
+ by callable_obj when invoked with arguments args and keyword
+ arguments kwargs. If a different type of warning is
+ triggered, it will not be handled: depending on the other
+ warning filtering rules in effect, it might be silenced, printed
+ out, or raised as an exception.
+
+ If called with callable_obj omitted or None, will return a
+ context object used like this::
+
+ with self.assertWarns(SomeWarning):
+ do_something()
+
+ An optional keyword argument 'msg' can be provided when assertWarns
+ is used as a context object.
+
+ The context manager keeps a reference to the first matching
+ warning as the 'warning' attribute; similarly, the 'filename'
+ and 'lineno' attributes give you information about the line
+ of Python code from which the warning was triggered.
+ This allows you to inspect the warning after the assertion::
+
+ with self.assertWarns(SomeWarning) as cm:
+ do_something()
+ the_warning = cm.warning
+ self.assertEqual(the_warning.some_attribute, 147)
+ """
+ context = _AssertWarnsContext(expected_warning, self, callable_obj)
+ return context.handle('assertWarns', callable_obj, args, kwargs)
+
+if not hasattr(unittest.TestCase, 'assertWarns'):
+ bind_method(unittest.TestCase, 'assertWarns', assertWarns)
diff --git a/src/clyphx/vendor/future/future/types/__init__.py b/src/clyphx/vendor/future/future/types/__init__.py
new file mode 100644
index 0000000..0625077
--- /dev/null
+++ b/src/clyphx/vendor/future/future/types/__init__.py
@@ -0,0 +1,257 @@
+"""
+This module contains backports the data types that were significantly changed
+in the transition from Python 2 to Python 3.
+
+- an implementation of Python 3's bytes object (pure Python subclass of
+ Python 2's builtin 8-bit str type)
+- an implementation of Python 3's str object (pure Python subclass of
+ Python 2's builtin unicode type)
+- a backport of the range iterator from Py3 with slicing support
+
+It is used as follows::
+
+ from __future__ import division, absolute_import, print_function
+ from builtins import bytes, dict, int, range, str
+
+to bring in the new semantics for these functions from Python 3. And
+then, for example::
+
+ b = bytes(b'ABCD')
+ assert list(b) == [65, 66, 67, 68]
+ assert repr(b) == "b'ABCD'"
+ assert [65, 66] in b
+
+ # These raise TypeErrors:
+ # b + u'EFGH'
+ # b.split(u'B')
+ # bytes(b',').join([u'Fred', u'Bill'])
+
+
+ s = str(u'ABCD')
+
+ # These raise TypeErrors:
+ # s.join([b'Fred', b'Bill'])
+ # s.startswith(b'A')
+ # b'B' in s
+ # s.find(b'A')
+ # s.replace(u'A', b'a')
+
+ # This raises an AttributeError:
+ # s.decode('utf-8')
+
+ assert repr(s) == 'ABCD' # consistent repr with Py3 (no u prefix)
+
+
+ for i in range(10**11)[:10]:
+ pass
+
+and::
+
+ class VerboseList(list):
+ def append(self, item):
+ print('Adding an item')
+ super().append(item) # new simpler super() function
+
+For more information:
+---------------------
+
+- future.types.newbytes
+- future.types.newdict
+- future.types.newint
+- future.types.newobject
+- future.types.newrange
+- future.types.newstr
+
+
+Notes
+=====
+
+range()
+-------
+``range`` is a custom class that backports the slicing behaviour from
+Python 3 (based on the ``xrange`` module by Dan Crosta). See the
+``newrange`` module docstring for more details.
+
+
+super()
+-------
+``super()`` is based on Ryan Kelly's ``magicsuper`` module. See the
+``newsuper`` module docstring for more details.
+
+
+round()
+-------
+Python 3 modifies the behaviour of ``round()`` to use "Banker's Rounding".
+See http://stackoverflow.com/a/10825998. See the ``newround`` module
+docstring for more details.
+
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import functools
+from numbers import Integral
+
+from future import utils
+
+
+# Some utility functions to enforce strict type-separation of unicode str and
+# bytes:
+def disallow_types(argnums, disallowed_types):
+ """
+ A decorator that raises a TypeError if any of the given numbered
+ arguments is of the corresponding given type (e.g. bytes or unicode
+ string).
+
+ For example:
+
+ @disallow_types([0, 1], [unicode, bytes])
+ def f(a, b):
+ pass
+
+ raises a TypeError when f is called if a unicode object is passed as
+ `a` or a bytes object is passed as `b`.
+
+ This also skips over keyword arguments, so
+
+ @disallow_types([0, 1], [unicode, bytes])
+ def g(a, b=None):
+ pass
+
+ doesn't raise an exception if g is called with only one argument a,
+ e.g.:
+
+ g(b'Byte string')
+
+ Example use:
+
+ >>> class newbytes(object):
+ ... @disallow_types([1], [unicode])
+ ... def __add__(self, other):
+ ... pass
+
+ >>> newbytes('1234') + u'1234' #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ TypeError: can't concat 'bytes' to (unicode) str
+ """
+
+ def decorator(function):
+
+ @functools.wraps(function)
+ def wrapper(*args, **kwargs):
+ # These imports are just for this decorator, and are defined here
+ # to prevent circular imports:
+ from .newbytes import newbytes
+ from .newint import newint
+ from .newstr import newstr
+
+ errmsg = "argument can't be {0}"
+ for (argnum, mytype) in zip(argnums, disallowed_types):
+ # Handle the case where the type is passed as a string like 'newbytes'.
+ if isinstance(mytype, str) or isinstance(mytype, bytes):
+ mytype = locals()[mytype]
+
+ # Only restrict kw args only if they are passed:
+ if len(args) <= argnum:
+ break
+
+ # Here we use type() rather than isinstance() because
+ # __instancecheck__ is being overridden. E.g.
+ # isinstance(b'abc', newbytes) is True on Py2.
+ if type(args[argnum]) == mytype:
+ raise TypeError(errmsg.format(mytype))
+
+ return function(*args, **kwargs)
+ return wrapper
+ return decorator
+
+
+def no(mytype, argnums=(1,)):
+ """
+ A shortcut for the disallow_types decorator that disallows only one type
+ (in any position in argnums).
+
+ Example use:
+
+ >>> class newstr(object):
+ ... @no('bytes')
+ ... def __add__(self, other):
+ ... pass
+
+ >>> newstr(u'1234') + b'1234' #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ TypeError: argument can't be bytes
+
+ The object can also be passed directly, but passing the string helps
+ to prevent circular import problems.
+ """
+ if isinstance(argnums, Integral):
+ argnums = (argnums,)
+ disallowed_types = [mytype] * len(argnums)
+ return disallow_types(argnums, disallowed_types)
+
+
+def issubset(list1, list2):
+ """
+ Examples:
+
+ >>> issubset([], [65, 66, 67])
+ True
+ >>> issubset([65], [65, 66, 67])
+ True
+ >>> issubset([65, 66], [65, 66, 67])
+ True
+ >>> issubset([65, 67], [65, 66, 67])
+ False
+ """
+ n = len(list1)
+ for startpos in range(len(list2) - n + 1):
+ if list2[startpos:startpos+n] == list1:
+ return True
+ return False
+
+
+if utils.PY3:
+ import builtins
+ bytes = builtins.bytes
+ dict = builtins.dict
+ int = builtins.int
+ list = builtins.list
+ object = builtins.object
+ range = builtins.range
+ str = builtins.str
+
+ # The identity mapping
+ newtypes = {bytes: bytes,
+ dict: dict,
+ int: int,
+ list: list,
+ object: object,
+ range: range,
+ str: str}
+
+ __all__ = ['newtypes']
+
+else:
+
+ from .newbytes import newbytes
+ from .newdict import newdict
+ from .newint import newint
+ from .newlist import newlist
+ from .newrange import newrange
+ from .newobject import newobject
+ from .newstr import newstr
+
+ newtypes = {bytes: newbytes,
+ dict: newdict,
+ int: newint,
+ long: newint,
+ list: newlist,
+ object: newobject,
+ range: newrange,
+ str: newbytes,
+ unicode: newstr}
+
+ __all__ = ['newbytes', 'newdict', 'newint', 'newlist', 'newrange', 'newstr', 'newtypes']
diff --git a/src/clyphx/vendor/future/future/types/newbytes.py b/src/clyphx/vendor/future/future/types/newbytes.py
new file mode 100644
index 0000000..c9d584a
--- /dev/null
+++ b/src/clyphx/vendor/future/future/types/newbytes.py
@@ -0,0 +1,460 @@
+"""
+Pure-Python implementation of a Python 3-like bytes object for Python 2.
+
+Why do this? Without it, the Python 2 bytes object is a very, very
+different beast to the Python 3 bytes object.
+"""
+
+from numbers import Integral
+import string
+import copy
+
+from future.utils import istext, isbytes, PY2, PY3, with_metaclass
+from future.types import no, issubset
+from future.types.newobject import newobject
+
+if PY2:
+ from collections import Iterable
+else:
+ from collections.abc import Iterable
+
+
+_builtin_bytes = bytes
+
+if PY3:
+ # We'll probably never use newstr on Py3 anyway...
+ unicode = str
+
+
+class BaseNewBytes(type):
+ def __instancecheck__(cls, instance):
+ if cls == newbytes:
+ return isinstance(instance, _builtin_bytes)
+ else:
+ return issubclass(instance.__class__, cls)
+
+
+def _newchr(x):
+ if isinstance(x, str): # this happens on pypy
+ return x.encode('ascii')
+ else:
+ return chr(x)
+
+
+class newbytes(with_metaclass(BaseNewBytes, _builtin_bytes)):
+ """
+ A backport of the Python 3 bytes object to Py2
+ """
+ def __new__(cls, *args, **kwargs):
+ """
+ From the Py3 bytes docstring:
+
+ bytes(iterable_of_ints) -> bytes
+ bytes(string, encoding[, errors]) -> bytes
+ bytes(bytes_or_buffer) -> immutable copy of bytes_or_buffer
+ bytes(int) -> bytes object of size given by the parameter initialized with null bytes
+ bytes() -> empty bytes object
+
+ Construct an immutable array of bytes from:
+ - an iterable yielding integers in range(256)
+ - a text string encoded using the specified encoding
+ - any object implementing the buffer API.
+ - an integer
+ """
+
+ encoding = None
+ errors = None
+
+ if len(args) == 0:
+ return super(newbytes, cls).__new__(cls)
+ elif len(args) >= 2:
+ args = list(args)
+ if len(args) == 3:
+ errors = args.pop()
+ encoding=args.pop()
+ # Was: elif isinstance(args[0], newbytes):
+ # We use type() instead of the above because we're redefining
+ # this to be True for all unicode string subclasses. Warning:
+ # This may render newstr un-subclassable.
+ if type(args[0]) == newbytes:
+ # Special-case: for consistency with Py3.3, we return the same object
+ # (with the same id) if a newbytes object is passed into the
+ # newbytes constructor.
+ return args[0]
+ elif isinstance(args[0], _builtin_bytes):
+ value = args[0]
+ elif isinstance(args[0], unicode):
+ try:
+ if 'encoding' in kwargs:
+ assert encoding is None
+ encoding = kwargs['encoding']
+ if 'errors' in kwargs:
+ assert errors is None
+ errors = kwargs['errors']
+ except AssertionError:
+ raise TypeError('Argument given by name and position')
+ if encoding is None:
+ raise TypeError('unicode string argument without an encoding')
+ ###
+ # Was: value = args[0].encode(**kwargs)
+ # Python 2.6 string encode() method doesn't take kwargs:
+ # Use this instead:
+ newargs = [encoding]
+ if errors is not None:
+ newargs.append(errors)
+ value = args[0].encode(*newargs)
+ ###
+ elif hasattr(args[0], '__bytes__'):
+ value = args[0].__bytes__()
+ elif isinstance(args[0], Iterable):
+ if len(args[0]) == 0:
+ # This could be an empty list or tuple. Return b'' as on Py3.
+ value = b''
+ else:
+ # Was: elif len(args[0])>0 and isinstance(args[0][0], Integral):
+ # # It's a list of integers
+ # But then we can't index into e.g. frozensets. Try to proceed
+ # anyway.
+ try:
+ value = bytearray([_newchr(x) for x in args[0]])
+ except:
+ raise ValueError('bytes must be in range(0, 256)')
+ elif isinstance(args[0], Integral):
+ if args[0] < 0:
+ raise ValueError('negative count')
+ value = b'\x00' * args[0]
+ else:
+ value = args[0]
+ if type(value) == newbytes:
+ # Above we use type(...) rather than isinstance(...) because the
+ # newbytes metaclass overrides __instancecheck__.
+ # oldbytes(value) gives the wrong thing on Py2: the same
+ # result as str(value) on Py3, e.g. "b'abc'". (Issue #193).
+ # So we handle this case separately:
+ return copy.copy(value)
+ else:
+ return super(newbytes, cls).__new__(cls, value)
+
+ def __repr__(self):
+ return 'b' + super(newbytes, self).__repr__()
+
+ def __str__(self):
+ return 'b' + "'{0}'".format(super(newbytes, self).__str__())
+
+ def __getitem__(self, y):
+ value = super(newbytes, self).__getitem__(y)
+ if isinstance(y, Integral):
+ return ord(value)
+ else:
+ return newbytes(value)
+
+ def __getslice__(self, *args):
+ return self.__getitem__(slice(*args))
+
+ def __contains__(self, key):
+ if isinstance(key, int):
+ newbyteskey = newbytes([key])
+ # Don't use isinstance() here because we only want to catch
+ # newbytes, not Python 2 str:
+ elif type(key) == newbytes:
+ newbyteskey = key
+ else:
+ newbyteskey = newbytes(key)
+ return issubset(list(newbyteskey), list(self))
+
+ @no(unicode)
+ def __add__(self, other):
+ return newbytes(super(newbytes, self).__add__(other))
+
+ @no(unicode)
+ def __radd__(self, left):
+ return newbytes(left) + self
+
+ @no(unicode)
+ def __mul__(self, other):
+ return newbytes(super(newbytes, self).__mul__(other))
+
+ @no(unicode)
+ def __rmul__(self, other):
+ return newbytes(super(newbytes, self).__rmul__(other))
+
+ def __mod__(self, vals):
+ if isinstance(vals, newbytes):
+ vals = _builtin_bytes.__str__(vals)
+
+ elif isinstance(vals, tuple):
+ newvals = []
+ for v in vals:
+ if isinstance(v, newbytes):
+ v = _builtin_bytes.__str__(v)
+ newvals.append(v)
+ vals = tuple(newvals)
+
+ elif (hasattr(vals.__class__, '__getitem__') and
+ hasattr(vals.__class__, 'iteritems')):
+ for k, v in vals.iteritems():
+ if isinstance(v, newbytes):
+ vals[k] = _builtin_bytes.__str__(v)
+
+ return _builtin_bytes.__mod__(self, vals)
+
+ def __imod__(self, other):
+ return self.__mod__(other)
+
+ def join(self, iterable_of_bytes):
+ errmsg = 'sequence item {0}: expected bytes, {1} found'
+ if isbytes(iterable_of_bytes) or istext(iterable_of_bytes):
+ raise TypeError(errmsg.format(0, type(iterable_of_bytes)))
+ for i, item in enumerate(iterable_of_bytes):
+ if istext(item):
+ raise TypeError(errmsg.format(i, type(item)))
+ return newbytes(super(newbytes, self).join(iterable_of_bytes))
+
+ @classmethod
+ def fromhex(cls, string):
+ # Only on Py2:
+ return cls(string.replace(' ', '').decode('hex'))
+
+ @no(unicode)
+ def find(self, sub, *args):
+ return super(newbytes, self).find(sub, *args)
+
+ @no(unicode)
+ def rfind(self, sub, *args):
+ return super(newbytes, self).rfind(sub, *args)
+
+ @no(unicode, (1, 2))
+ def replace(self, old, new, *args):
+ return newbytes(super(newbytes, self).replace(old, new, *args))
+
+ def encode(self, *args):
+ raise AttributeError("encode method has been disabled in newbytes")
+
+ def decode(self, encoding='utf-8', errors='strict'):
+ """
+ Returns a newstr (i.e. unicode subclass)
+
+ Decode B using the codec registered for encoding. Default encoding
+ is 'utf-8'. errors may be given to set a different error
+ handling scheme. Default is 'strict' meaning that encoding errors raise
+ a UnicodeDecodeError. Other possible values are 'ignore' and 'replace'
+ as well as any other name registered with codecs.register_error that is
+ able to handle UnicodeDecodeErrors.
+ """
+ # Py2 str.encode() takes encoding and errors as optional parameter,
+ # not keyword arguments as in Python 3 str.
+
+ from future.types.newstr import newstr
+
+ if errors == 'surrogateescape':
+ from future.utils.surrogateescape import register_surrogateescape
+ register_surrogateescape()
+
+ return newstr(super(newbytes, self).decode(encoding, errors))
+
+ # This is currently broken:
+ # # We implement surrogateescape error handling here in addition rather
+ # # than relying on the custom error handler from
+ # # future.utils.surrogateescape to be registered globally, even though
+ # # that is fine in the case of decoding. (But not encoding: see the
+ # # comments in newstr.encode()``.)
+ #
+ # if errors == 'surrogateescape':
+ # # Decode char by char
+ # mybytes = []
+ # for code in self:
+ # # Code is an int
+ # if 0x80 <= code <= 0xFF:
+ # b = 0xDC00 + code
+ # elif code <= 0x7F:
+ # b = _unichr(c).decode(encoding=encoding)
+ # else:
+ # # # It may be a bad byte
+ # # FIXME: What to do in this case? See the Py3 docs / tests.
+ # # # Try swallowing it.
+ # # continue
+ # # print("RAISE!")
+ # raise NotASurrogateError
+ # mybytes.append(b)
+ # return newbytes(mybytes)
+ # return newbytes(super(newstr, self).decode(encoding, errors))
+
+ @no(unicode)
+ def startswith(self, prefix, *args):
+ return super(newbytes, self).startswith(prefix, *args)
+
+ @no(unicode)
+ def endswith(self, prefix, *args):
+ return super(newbytes, self).endswith(prefix, *args)
+
+ @no(unicode)
+ def split(self, sep=None, maxsplit=-1):
+ # Py2 str.split() takes maxsplit as an optional parameter, not as a
+ # keyword argument as in Python 3 bytes.
+ parts = super(newbytes, self).split(sep, maxsplit)
+ return [newbytes(part) for part in parts]
+
+ def splitlines(self, keepends=False):
+ """
+ B.splitlines([keepends]) -> list of lines
+
+ Return a list of the lines in B, breaking at line boundaries.
+ Line breaks are not included in the resulting list unless keepends
+ is given and true.
+ """
+ # Py2 str.splitlines() takes keepends as an optional parameter,
+ # not as a keyword argument as in Python 3 bytes.
+ parts = super(newbytes, self).splitlines(keepends)
+ return [newbytes(part) for part in parts]
+
+ @no(unicode)
+ def rsplit(self, sep=None, maxsplit=-1):
+ # Py2 str.rsplit() takes maxsplit as an optional parameter, not as a
+ # keyword argument as in Python 3 bytes.
+ parts = super(newbytes, self).rsplit(sep, maxsplit)
+ return [newbytes(part) for part in parts]
+
+ @no(unicode)
+ def partition(self, sep):
+ parts = super(newbytes, self).partition(sep)
+ return tuple(newbytes(part) for part in parts)
+
+ @no(unicode)
+ def rpartition(self, sep):
+ parts = super(newbytes, self).rpartition(sep)
+ return tuple(newbytes(part) for part in parts)
+
+ @no(unicode, (1,))
+ def rindex(self, sub, *args):
+ '''
+ S.rindex(sub [,start [,end]]) -> int
+
+ Like S.rfind() but raise ValueError when the substring is not found.
+ '''
+ pos = self.rfind(sub, *args)
+ if pos == -1:
+ raise ValueError('substring not found')
+
+ @no(unicode)
+ def index(self, sub, *args):
+ '''
+ Returns index of sub in bytes.
+ Raises ValueError if byte is not in bytes and TypeError if can't
+ be converted bytes or its length is not 1.
+ '''
+ if isinstance(sub, int):
+ if len(args) == 0:
+ start, end = 0, len(self)
+ elif len(args) == 1:
+ start = args[0]
+ elif len(args) == 2:
+ start, end = args
+ else:
+ raise TypeError('takes at most 3 arguments')
+ return list(self)[start:end].index(sub)
+ if not isinstance(sub, bytes):
+ try:
+ sub = self.__class__(sub)
+ except (TypeError, ValueError):
+ raise TypeError("can't convert sub to bytes")
+ try:
+ return super(newbytes, self).index(sub, *args)
+ except ValueError:
+ raise ValueError('substring not found')
+
+ def __eq__(self, other):
+ if isinstance(other, (_builtin_bytes, bytearray)):
+ return super(newbytes, self).__eq__(other)
+ else:
+ return False
+
+ def __ne__(self, other):
+ if isinstance(other, _builtin_bytes):
+ return super(newbytes, self).__ne__(other)
+ else:
+ return True
+
+ unorderable_err = 'unorderable types: bytes() and {0}'
+
+ def __lt__(self, other):
+ if isinstance(other, _builtin_bytes):
+ return super(newbytes, self).__lt__(other)
+ raise TypeError(self.unorderable_err.format(type(other)))
+
+ def __le__(self, other):
+ if isinstance(other, _builtin_bytes):
+ return super(newbytes, self).__le__(other)
+ raise TypeError(self.unorderable_err.format(type(other)))
+
+ def __gt__(self, other):
+ if isinstance(other, _builtin_bytes):
+ return super(newbytes, self).__gt__(other)
+ raise TypeError(self.unorderable_err.format(type(other)))
+
+ def __ge__(self, other):
+ if isinstance(other, _builtin_bytes):
+ return super(newbytes, self).__ge__(other)
+ raise TypeError(self.unorderable_err.format(type(other)))
+
+ def __native__(self):
+ # We can't just feed a newbytes object into str(), because
+ # newbytes.__str__() returns e.g. "b'blah'", consistent with Py3 bytes.
+ return super(newbytes, self).__str__()
+
+ def __getattribute__(self, name):
+ """
+ A trick to cause the ``hasattr`` builtin-fn to return False for
+ the 'encode' method on Py2.
+ """
+ if name in ['encode', u'encode']:
+ raise AttributeError("encode method has been disabled in newbytes")
+ return super(newbytes, self).__getattribute__(name)
+
+ @no(unicode)
+ def rstrip(self, bytes_to_strip=None):
+ """
+ Strip trailing bytes contained in the argument.
+ If the argument is omitted, strip trailing ASCII whitespace.
+ """
+ return newbytes(super(newbytes, self).rstrip(bytes_to_strip))
+
+ @no(unicode)
+ def strip(self, bytes_to_strip=None):
+ """
+ Strip leading and trailing bytes contained in the argument.
+ If the argument is omitted, strip trailing ASCII whitespace.
+ """
+ return newbytes(super(newbytes, self).strip(bytes_to_strip))
+
+ def lower(self):
+ """
+ b.lower() -> copy of b
+
+ Return a copy of b with all ASCII characters converted to lowercase.
+ """
+ return newbytes(super(newbytes, self).lower())
+
+ @no(unicode)
+ def upper(self):
+ """
+ b.upper() -> copy of b
+
+ Return a copy of b with all ASCII characters converted to uppercase.
+ """
+ return newbytes(super(newbytes, self).upper())
+
+ @classmethod
+ @no(unicode)
+ def maketrans(cls, frm, to):
+ """
+ B.maketrans(frm, to) -> translation table
+
+ Return a translation table (a bytes object of length 256) suitable
+ for use in the bytes or bytearray translate method where each byte
+ in frm is mapped to the byte at the same position in to.
+ The bytes objects frm and to must be of the same length.
+ """
+ return newbytes(string.maketrans(frm, to))
+
+
+__all__ = ['newbytes']
diff --git a/src/clyphx/vendor/future/future/types/newdict.py b/src/clyphx/vendor/future/future/types/newdict.py
new file mode 100644
index 0000000..bc9b54d
--- /dev/null
+++ b/src/clyphx/vendor/future/future/types/newdict.py
@@ -0,0 +1,125 @@
+"""
+A dict subclass for Python 2 that behaves like Python 3's dict
+
+Example use:
+
+>>> from builtins import dict
+>>> d1 = dict() # instead of {} for an empty dict
+>>> d2 = dict(key1='value1', key2='value2')
+
+The keys, values and items methods now return iterators on Python 2.x
+(with set-like behaviour on Python 2.7).
+
+>>> for d in (d1, d2):
+... assert not isinstance(d.keys(), list)
+... assert not isinstance(d.values(), list)
+... assert not isinstance(d.items(), list)
+"""
+
+import sys
+
+from future.utils import with_metaclass
+from future.types.newobject import newobject
+
+
+_builtin_dict = dict
+ver = sys.version_info[:2]
+
+
+class BaseNewDict(type):
+ def __instancecheck__(cls, instance):
+ if cls == newdict:
+ return isinstance(instance, _builtin_dict)
+ else:
+ return issubclass(instance.__class__, cls)
+
+
+class newdict(with_metaclass(BaseNewDict, _builtin_dict)):
+ """
+ A backport of the Python 3 dict object to Py2
+ """
+ # def items(self):
+ # """
+ # On Python 2.7+:
+ # D.items() -> a set-like object providing a view on D's items
+ # On Python 2.6:
+ # D.items() -> an iterator over D's items
+ # """
+ # if ver == (2, 7):
+ # return self.viewitems()
+ # raise ValueError
+ # elif ver == (2, 6):
+ # return self.iteritems()
+ # elif ver >= (3, 0):
+ # return self.items()
+
+ # def keys(self):
+ # """
+ # On Python 2.7+:
+ # D.keys() -> a set-like object providing a view on D's keys
+ # On Python 2.6:
+ # D.keys() -> an iterator over D's keys
+ # """
+ # if ver == (2, 7):
+ # return self.viewkeys()
+ # elif ver == (2, 6):
+ # return self.iterkeys()
+ # elif ver >= (3, 0):
+ # return self.keys()
+
+ # def values(self):
+ # """
+ # On Python 2.7+:
+ # D.values() -> a set-like object providing a view on D's values
+ # On Python 2.6:
+ # D.values() -> an iterator over D's values
+ # """
+ # if ver == (2, 7):
+ # return self.viewvalues()
+ # elif ver == (2, 6):
+ # return self.itervalues()
+ # elif ver >= (3, 0):
+ # return self.values()
+
+ def __new__(cls, *args, **kwargs):
+ """
+ dict() -> new empty dictionary
+ dict(mapping) -> new dictionary initialized from a mapping object's
+ (key, value) pairs
+ dict(iterable) -> new dictionary initialized as if via:
+ d = {}
+ for k, v in iterable:
+ d[k] = v
+ dict(**kwargs) -> new dictionary initialized with the name=value pairs
+ in the keyword argument list. For example: dict(one=1, two=2)
+ """
+
+ if len(args) == 0:
+ return super(newdict, cls).__new__(cls)
+ elif type(args[0]) == newdict:
+ value = args[0]
+ else:
+ value = args[0]
+ return super(newdict, cls).__new__(cls, value)
+
+ def __native__(self):
+ """
+ Hook for the future.utils.native() function
+ """
+ return dict(self)
+
+
+if ver == (2, 7):
+ # set-like objects providing a view on D's items, keys, and values
+ newdict.items = newdict.viewitems
+ newdict.keys = newdict.viewkeys
+ newdict.values = newdict.viewvalues
+
+elif ver == (2, 6):
+ # iterators over D's items, keys, and values
+ newdict.items = newdict.iteritems
+ newdict.keys = newdict.iterkeys
+ newdict.values = newdict.itervalues
+
+
+__all__ = ['newdict']
diff --git a/src/clyphx/vendor/future/future/types/newint.py b/src/clyphx/vendor/future/future/types/newint.py
new file mode 100644
index 0000000..748dba9
--- /dev/null
+++ b/src/clyphx/vendor/future/future/types/newint.py
@@ -0,0 +1,381 @@
+"""
+Backport of Python 3's int, based on Py2's long.
+
+They are very similar. The most notable difference is:
+
+- representation: trailing L in Python 2 removed in Python 3
+"""
+from __future__ import division
+
+import struct
+
+from future.types.newbytes import newbytes
+from future.types.newobject import newobject
+from future.utils import PY3, isint, istext, isbytes, with_metaclass, native
+
+
+if PY3:
+ long = int
+ from collections.abc import Iterable
+else:
+ from collections import Iterable
+
+
+class BaseNewInt(type):
+ def __instancecheck__(cls, instance):
+ if cls == newint:
+ # Special case for Py2 short or long int
+ return isinstance(instance, (int, long))
+ else:
+ return issubclass(instance.__class__, cls)
+
+
+class newint(with_metaclass(BaseNewInt, long)):
+ """
+ A backport of the Python 3 int object to Py2
+ """
+ def __new__(cls, x=0, base=10):
+ """
+ From the Py3 int docstring:
+
+ | int(x=0) -> integer
+ | int(x, base=10) -> integer
+ |
+ | Convert a number or string to an integer, or return 0 if no
+ | arguments are given. If x is a number, return x.__int__(). For
+ | floating point numbers, this truncates towards zero.
+ |
+ | If x is not a number or if base is given, then x must be a string,
+ | bytes, or bytearray instance representing an integer literal in the
+ | given base. The literal can be preceded by '+' or '-' and be
+ | surrounded by whitespace. The base defaults to 10. Valid bases are
+ | 0 and 2-36. Base 0 means to interpret the base from the string as an
+ | integer literal.
+ | >>> int('0b100', base=0)
+ | 4
+
+ """
+ try:
+ val = x.__int__()
+ except AttributeError:
+ val = x
+ else:
+ if not isint(val):
+ raise TypeError('__int__ returned non-int ({0})'.format(
+ type(val)))
+
+ if base != 10:
+ # Explicit base
+ if not (istext(val) or isbytes(val) or isinstance(val, bytearray)):
+ raise TypeError(
+ "int() can't convert non-string with explicit base")
+ try:
+ return super(newint, cls).__new__(cls, val, base)
+ except TypeError:
+ return super(newint, cls).__new__(cls, newbytes(val), base)
+ # After here, base is 10
+ try:
+ return super(newint, cls).__new__(cls, val)
+ except TypeError:
+ # Py2 long doesn't handle bytearray input with an explicit base, so
+ # handle this here.
+ # Py3: int(bytearray(b'10'), 2) == 2
+ # Py2: int(bytearray(b'10'), 2) == 2 raises TypeError
+ # Py2: long(bytearray(b'10'), 2) == 2 raises TypeError
+ try:
+ return super(newint, cls).__new__(cls, newbytes(val))
+ except:
+ raise TypeError("newint argument must be a string or a number,"
+ "not '{0}'".format(type(val)))
+
+ def __repr__(self):
+ """
+ Without the L suffix
+ """
+ value = super(newint, self).__repr__()
+ assert value[-1] == 'L'
+ return value[:-1]
+
+ def __add__(self, other):
+ value = super(newint, self).__add__(other)
+ if value is NotImplemented:
+ return long(self) + other
+ return newint(value)
+
+ def __radd__(self, other):
+ value = super(newint, self).__radd__(other)
+ if value is NotImplemented:
+ return other + long(self)
+ return newint(value)
+
+ def __sub__(self, other):
+ value = super(newint, self).__sub__(other)
+ if value is NotImplemented:
+ return long(self) - other
+ return newint(value)
+
+ def __rsub__(self, other):
+ value = super(newint, self).__rsub__(other)
+ if value is NotImplemented:
+ return other - long(self)
+ return newint(value)
+
+ def __mul__(self, other):
+ value = super(newint, self).__mul__(other)
+ if isint(value):
+ return newint(value)
+ elif value is NotImplemented:
+ return long(self) * other
+ return value
+
+ def __rmul__(self, other):
+ value = super(newint, self).__rmul__(other)
+ if isint(value):
+ return newint(value)
+ elif value is NotImplemented:
+ return other * long(self)
+ return value
+
+ def __div__(self, other):
+ # We override this rather than e.g. relying on object.__div__ or
+ # long.__div__ because we want to wrap the value in a newint()
+ # call if other is another int
+ value = long(self) / other
+ if isinstance(other, (int, long)):
+ return newint(value)
+ else:
+ return value
+
+ def __rdiv__(self, other):
+ value = other / long(self)
+ if isinstance(other, (int, long)):
+ return newint(value)
+ else:
+ return value
+
+ def __idiv__(self, other):
+ # long has no __idiv__ method. Use __itruediv__ and cast back to
+ # newint:
+ value = self.__itruediv__(other)
+ if isinstance(other, (int, long)):
+ return newint(value)
+ else:
+ return value
+
+ def __truediv__(self, other):
+ value = super(newint, self).__truediv__(other)
+ if value is NotImplemented:
+ value = long(self) / other
+ return value
+
+ def __rtruediv__(self, other):
+ return super(newint, self).__rtruediv__(other)
+
+ def __itruediv__(self, other):
+ # long has no __itruediv__ method
+ mylong = long(self)
+ mylong /= other
+ return mylong
+
+ def __floordiv__(self, other):
+ return newint(super(newint, self).__floordiv__(other))
+
+ def __rfloordiv__(self, other):
+ return newint(super(newint, self).__rfloordiv__(other))
+
+ def __ifloordiv__(self, other):
+ # long has no __ifloordiv__ method
+ mylong = long(self)
+ mylong //= other
+ return newint(mylong)
+
+ def __mod__(self, other):
+ value = super(newint, self).__mod__(other)
+ if value is NotImplemented:
+ return long(self) % other
+ return newint(value)
+
+ def __rmod__(self, other):
+ value = super(newint, self).__rmod__(other)
+ if value is NotImplemented:
+ return other % long(self)
+ return newint(value)
+
+ def __divmod__(self, other):
+ value = super(newint, self).__divmod__(other)
+ if value is NotImplemented:
+ mylong = long(self)
+ return (mylong // other, mylong % other)
+ return (newint(value[0]), newint(value[1]))
+
+ def __rdivmod__(self, other):
+ value = super(newint, self).__rdivmod__(other)
+ if value is NotImplemented:
+ mylong = long(self)
+ return (other // mylong, other % mylong)
+ return (newint(value[0]), newint(value[1]))
+
+ def __pow__(self, other):
+ value = super(newint, self).__pow__(other)
+ if value is NotImplemented:
+ return long(self) ** other
+ return newint(value)
+
+ def __rpow__(self, other):
+ value = super(newint, self).__rpow__(other)
+ if value is NotImplemented:
+ return other ** long(self)
+ return newint(value)
+
+ def __lshift__(self, other):
+ if not isint(other):
+ raise TypeError(
+ "unsupported operand type(s) for <<: '%s' and '%s'" %
+ (type(self).__name__, type(other).__name__))
+ return newint(super(newint, self).__lshift__(other))
+
+ def __rshift__(self, other):
+ if not isint(other):
+ raise TypeError(
+ "unsupported operand type(s) for >>: '%s' and '%s'" %
+ (type(self).__name__, type(other).__name__))
+ return newint(super(newint, self).__rshift__(other))
+
+ def __and__(self, other):
+ if not isint(other):
+ raise TypeError(
+ "unsupported operand type(s) for &: '%s' and '%s'" %
+ (type(self).__name__, type(other).__name__))
+ return newint(super(newint, self).__and__(other))
+
+ def __or__(self, other):
+ if not isint(other):
+ raise TypeError(
+ "unsupported operand type(s) for |: '%s' and '%s'" %
+ (type(self).__name__, type(other).__name__))
+ return newint(super(newint, self).__or__(other))
+
+ def __xor__(self, other):
+ if not isint(other):
+ raise TypeError(
+ "unsupported operand type(s) for ^: '%s' and '%s'" %
+ (type(self).__name__, type(other).__name__))
+ return newint(super(newint, self).__xor__(other))
+
+ def __neg__(self):
+ return newint(super(newint, self).__neg__())
+
+ def __pos__(self):
+ return newint(super(newint, self).__pos__())
+
+ def __abs__(self):
+ return newint(super(newint, self).__abs__())
+
+ def __invert__(self):
+ return newint(super(newint, self).__invert__())
+
+ def __int__(self):
+ return self
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+ def __bool__(self):
+ """
+ So subclasses can override this, Py3-style
+ """
+ return super(newint, self).__nonzero__()
+
+ def __native__(self):
+ return long(self)
+
+ def to_bytes(self, length, byteorder='big', signed=False):
+ """
+ Return an array of bytes representing an integer.
+
+ The integer is represented using length bytes. An OverflowError is
+ raised if the integer is not representable with the given number of
+ bytes.
+
+ The byteorder argument determines the byte order used to represent the
+ integer. If byteorder is 'big', the most significant byte is at the
+ beginning of the byte array. If byteorder is 'little', the most
+ significant byte is at the end of the byte array. To request the native
+ byte order of the host system, use `sys.byteorder' as the byte order value.
+
+ The signed keyword-only argument determines whether two's complement is
+ used to represent the integer. If signed is False and a negative integer
+ is given, an OverflowError is raised.
+ """
+ if length < 0:
+ raise ValueError("length argument must be non-negative")
+ if length == 0 and self == 0:
+ return newbytes()
+ if signed and self < 0:
+ bits = length * 8
+ num = (2**bits) + self
+ if num <= 0:
+ raise OverflowError("int too smal to convert")
+ else:
+ if self < 0:
+ raise OverflowError("can't convert negative int to unsigned")
+ num = self
+ if byteorder not in ('little', 'big'):
+ raise ValueError("byteorder must be either 'little' or 'big'")
+ h = b'%x' % num
+ s = newbytes((b'0'*(len(h) % 2) + h).zfill(length*2).decode('hex'))
+ if signed:
+ high_set = s[0] & 0x80
+ if self > 0 and high_set:
+ raise OverflowError("int too big to convert")
+ if self < 0 and not high_set:
+ raise OverflowError("int too small to convert")
+ if len(s) > length:
+ raise OverflowError("int too big to convert")
+ return s if byteorder == 'big' else s[::-1]
+
+ @classmethod
+ def from_bytes(cls, mybytes, byteorder='big', signed=False):
+ """
+ Return the integer represented by the given array of bytes.
+
+ The mybytes argument must either support the buffer protocol or be an
+ iterable object producing bytes. Bytes and bytearray are examples of
+ built-in objects that support the buffer protocol.
+
+ The byteorder argument determines the byte order used to represent the
+ integer. If byteorder is 'big', the most significant byte is at the
+ beginning of the byte array. If byteorder is 'little', the most
+ significant byte is at the end of the byte array. To request the native
+ byte order of the host system, use `sys.byteorder' as the byte order value.
+
+ The signed keyword-only argument indicates whether two's complement is
+ used to represent the integer.
+ """
+ if byteorder not in ('little', 'big'):
+ raise ValueError("byteorder must be either 'little' or 'big'")
+ if isinstance(mybytes, unicode):
+ raise TypeError("cannot convert unicode objects to bytes")
+ # mybytes can also be passed as a sequence of integers on Py3.
+ # Test for this:
+ elif isinstance(mybytes, Iterable):
+ mybytes = newbytes(mybytes)
+ b = mybytes if byteorder == 'big' else mybytes[::-1]
+ if len(b) == 0:
+ b = b'\x00'
+ # The encode() method has been disabled by newbytes, but Py2's
+ # str has it:
+ num = int(native(b).encode('hex'), 16)
+ if signed and (b[0] & 0x80):
+ num = num - (2 ** (len(b)*8))
+ return cls(num)
+
+
+# def _twos_comp(val, bits):
+# """compute the 2's compliment of int value val"""
+# if( (val&(1<<(bits-1))) != 0 ):
+# val = val - (1<>> from builtins import list
+>>> l1 = list() # instead of {} for an empty list
+>>> l1.append('hello')
+>>> l2 = l1.copy()
+
+"""
+
+import sys
+import copy
+
+from future.utils import with_metaclass
+from future.types.newobject import newobject
+
+
+_builtin_list = list
+ver = sys.version_info[:2]
+
+
+class BaseNewList(type):
+ def __instancecheck__(cls, instance):
+ if cls == newlist:
+ return isinstance(instance, _builtin_list)
+ else:
+ return issubclass(instance.__class__, cls)
+
+
+class newlist(with_metaclass(BaseNewList, _builtin_list)):
+ """
+ A backport of the Python 3 list object to Py2
+ """
+ def copy(self):
+ """
+ L.copy() -> list -- a shallow copy of L
+ """
+ return copy.copy(self)
+
+ def clear(self):
+ """L.clear() -> None -- remove all items from L"""
+ for i in range(len(self)):
+ self.pop()
+
+ def __new__(cls, *args, **kwargs):
+ """
+ list() -> new empty list
+ list(iterable) -> new list initialized from iterable's items
+ """
+
+ if len(args) == 0:
+ return super(newlist, cls).__new__(cls)
+ elif type(args[0]) == newlist:
+ value = args[0]
+ else:
+ value = args[0]
+ return super(newlist, cls).__new__(cls, value)
+
+ def __add__(self, value):
+ return newlist(super(newlist, self).__add__(value))
+
+ def __radd__(self, left):
+ " left + self "
+ try:
+ return newlist(left) + self
+ except:
+ return NotImplemented
+
+ def __getitem__(self, y):
+ """
+ x.__getitem__(y) <==> x[y]
+
+ Warning: a bug in Python 2.x prevents indexing via a slice from
+ returning a newlist object.
+ """
+ if isinstance(y, slice):
+ return newlist(super(newlist, self).__getitem__(y))
+ else:
+ return super(newlist, self).__getitem__(y)
+
+ def __native__(self):
+ """
+ Hook for the future.utils.native() function
+ """
+ return list(self)
+
+ def __nonzero__(self):
+ return len(self) > 0
+
+
+__all__ = ['newlist']
diff --git a/src/clyphx/vendor/future/future/types/newmemoryview.py b/src/clyphx/vendor/future/future/types/newmemoryview.py
new file mode 100644
index 0000000..09f804d
--- /dev/null
+++ b/src/clyphx/vendor/future/future/types/newmemoryview.py
@@ -0,0 +1,29 @@
+"""
+A pretty lame implementation of a memoryview object for Python 2.6.
+"""
+from numbers import Integral
+import string
+
+from future.utils import istext, isbytes, PY2, with_metaclass
+from future.types import no, issubset
+
+if PY2:
+ from collections import Iterable
+else:
+ from collections.abc import Iterable
+
+# class BaseNewBytes(type):
+# def __instancecheck__(cls, instance):
+# return isinstance(instance, _builtin_bytes)
+
+
+class newmemoryview(object): # with_metaclass(BaseNewBytes, _builtin_bytes)):
+ """
+ A pretty lame backport of the Python 2.7 and Python 3.x
+ memoryviewview object to Py2.6.
+ """
+ def __init__(self, obj):
+ return obj
+
+
+__all__ = ['newmemoryview']
diff --git a/src/clyphx/vendor/future/future/types/newobject.py b/src/clyphx/vendor/future/future/types/newobject.py
new file mode 100644
index 0000000..31b84fc
--- /dev/null
+++ b/src/clyphx/vendor/future/future/types/newobject.py
@@ -0,0 +1,117 @@
+"""
+An object subclass for Python 2 that gives new-style classes written in the
+style of Python 3 (with ``__next__`` and unicode-returning ``__str__`` methods)
+the appropriate Python 2-style ``next`` and ``__unicode__`` methods for compatible.
+
+Example use::
+
+ from builtins import object
+
+ my_unicode_str = u'Unicode string: \u5b54\u5b50'
+
+ class A(object):
+ def __str__(self):
+ return my_unicode_str
+
+ a = A()
+ print(str(a))
+
+ # On Python 2, these relations hold:
+ assert unicode(a) == my_unicode_string
+ assert str(a) == my_unicode_string.encode('utf-8')
+
+
+Another example::
+
+ from builtins import object
+
+ class Upper(object):
+ def __init__(self, iterable):
+ self._iter = iter(iterable)
+ def __next__(self): # note the Py3 interface
+ return next(self._iter).upper()
+ def __iter__(self):
+ return self
+
+ assert list(Upper('hello')) == list('HELLO')
+
+"""
+
+
+class newobject(object):
+ """
+ A magical object class that provides Python 2 compatibility methods::
+ next
+ __unicode__
+ __nonzero__
+
+ Subclasses of this class can merely define the Python 3 methods (__next__,
+ __str__, and __bool__).
+ """
+ def next(self):
+ if hasattr(self, '__next__'):
+ return type(self).__next__(self)
+ raise TypeError('newobject is not an iterator')
+
+ def __unicode__(self):
+ # All subclasses of the builtin object should have __str__ defined.
+ # Note that old-style classes do not have __str__ defined.
+ if hasattr(self, '__str__'):
+ s = type(self).__str__(self)
+ else:
+ s = str(self)
+ if isinstance(s, unicode):
+ return s
+ else:
+ return s.decode('utf-8')
+
+ def __nonzero__(self):
+ if hasattr(self, '__bool__'):
+ return type(self).__bool__(self)
+ if hasattr(self, '__len__'):
+ return type(self).__len__(self)
+ # object has no __nonzero__ method
+ return True
+
+ # Are these ever needed?
+ # def __div__(self):
+ # return self.__truediv__()
+
+ # def __idiv__(self, other):
+ # return self.__itruediv__(other)
+
+ def __long__(self):
+ if not hasattr(self, '__int__'):
+ return NotImplemented
+ return self.__int__() # not type(self).__int__(self)
+
+ # def __new__(cls, *args, **kwargs):
+ # """
+ # dict() -> new empty dictionary
+ # dict(mapping) -> new dictionary initialized from a mapping object's
+ # (key, value) pairs
+ # dict(iterable) -> new dictionary initialized as if via:
+ # d = {}
+ # for k, v in iterable:
+ # d[k] = v
+ # dict(**kwargs) -> new dictionary initialized with the name=value pairs
+ # in the keyword argument list. For example: dict(one=1, two=2)
+ # """
+
+ # if len(args) == 0:
+ # return super(newdict, cls).__new__(cls)
+ # elif type(args[0]) == newdict:
+ # return args[0]
+ # else:
+ # value = args[0]
+ # return super(newdict, cls).__new__(cls, value)
+
+ def __native__(self):
+ """
+ Hook for the future.utils.native() function
+ """
+ return object(self)
+
+ __slots__ = []
+
+__all__ = ['newobject']
diff --git a/src/clyphx/vendor/future/future/types/newopen.py b/src/clyphx/vendor/future/future/types/newopen.py
new file mode 100644
index 0000000..b75d45a
--- /dev/null
+++ b/src/clyphx/vendor/future/future/types/newopen.py
@@ -0,0 +1,32 @@
+"""
+A substitute for the Python 3 open() function.
+
+Note that io.open() is more complete but maybe slower. Even so, the
+completeness may be a better default. TODO: compare these
+"""
+
+_builtin_open = open
+
+class newopen(object):
+ """Wrapper providing key part of Python 3 open() interface.
+
+ From IPython's py3compat.py module. License: BSD.
+ """
+ def __init__(self, fname, mode="r", encoding="utf-8"):
+ self.f = _builtin_open(fname, mode)
+ self.enc = encoding
+
+ def write(self, s):
+ return self.f.write(s.encode(self.enc))
+
+ def read(self, size=-1):
+ return self.f.read(size).decode(self.enc)
+
+ def close(self):
+ return self.f.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, etype, value, traceback):
+ self.f.close()
diff --git a/src/clyphx/vendor/future/future/types/newrange.py b/src/clyphx/vendor/future/future/types/newrange.py
new file mode 100644
index 0000000..eda01a5
--- /dev/null
+++ b/src/clyphx/vendor/future/future/types/newrange.py
@@ -0,0 +1,170 @@
+"""
+Nearly identical to xrange.py, by Dan Crosta, from
+
+ https://github.com/dcrosta/xrange.git
+
+This is included here in the ``future`` package rather than pointed to as
+a dependency because there is no package for ``xrange`` on PyPI. It is
+also tweaked to appear like a regular Python 3 ``range`` object rather
+than a Python 2 xrange.
+
+From Dan Crosta's README:
+
+ "A pure-Python implementation of Python 2.7's xrange built-in, with
+ some features backported from the Python 3.x range built-in (which
+ replaced xrange) in that version."
+
+ Read more at
+ https://late.am/post/2012/06/18/what-the-heck-is-an-xrange
+"""
+from __future__ import absolute_import
+
+from future.utils import PY2
+
+if PY2:
+ from collections import Sequence, Iterator
+else:
+ from collections.abc import Sequence, Iterator
+from itertools import islice
+
+from future.backports.misc import count # with step parameter on Py2.6
+# For backward compatibility with python-future versions < 0.14.4:
+_count = count
+
+
+class newrange(Sequence):
+ """
+ Pure-Python backport of Python 3's range object. See `the CPython
+ documentation for details:
+ `_
+ """
+
+ def __init__(self, *args):
+ if len(args) == 1:
+ start, stop, step = 0, args[0], 1
+ elif len(args) == 2:
+ start, stop, step = args[0], args[1], 1
+ elif len(args) == 3:
+ start, stop, step = args
+ else:
+ raise TypeError('range() requires 1-3 int arguments')
+
+ try:
+ start, stop, step = int(start), int(stop), int(step)
+ except ValueError:
+ raise TypeError('an integer is required')
+
+ if step == 0:
+ raise ValueError('range() arg 3 must not be zero')
+ elif step < 0:
+ stop = min(stop, start)
+ else:
+ stop = max(stop, start)
+
+ self._start = start
+ self._stop = stop
+ self._step = step
+ self._len = (stop - start) // step + bool((stop - start) % step)
+
+ @property
+ def start(self):
+ return self._start
+
+ @property
+ def stop(self):
+ return self._stop
+
+ @property
+ def step(self):
+ return self._step
+
+ def __repr__(self):
+ if self._step == 1:
+ return 'range(%d, %d)' % (self._start, self._stop)
+ return 'range(%d, %d, %d)' % (self._start, self._stop, self._step)
+
+ def __eq__(self, other):
+ return (isinstance(other, newrange) and
+ (self._len == 0 == other._len or
+ (self._start, self._step, self._len) ==
+ (other._start, other._step, self._len)))
+
+ def __len__(self):
+ return self._len
+
+ def index(self, value):
+ """Return the 0-based position of integer `value` in
+ the sequence this range represents."""
+ try:
+ diff = value - self._start
+ except TypeError:
+ raise ValueError('%r is not in range' % value)
+ quotient, remainder = divmod(diff, self._step)
+ if remainder == 0 and 0 <= quotient < self._len:
+ return abs(quotient)
+ raise ValueError('%r is not in range' % value)
+
+ def count(self, value):
+ """Return the number of ocurrences of integer `value`
+ in the sequence this range represents."""
+ # a value can occur exactly zero or one times
+ return int(value in self)
+
+ def __contains__(self, value):
+ """Return ``True`` if the integer `value` occurs in
+ the sequence this range represents."""
+ try:
+ self.index(value)
+ return True
+ except ValueError:
+ return False
+
+ def __reversed__(self):
+ return iter(self[::-1])
+
+ def __getitem__(self, index):
+ """Return the element at position ``index`` in the sequence
+ this range represents, or raise :class:`IndexError` if the
+ position is out of range."""
+ if isinstance(index, slice):
+ return self.__getitem_slice(index)
+ if index < 0:
+ # negative indexes access from the end
+ index = self._len + index
+ if index < 0 or index >= self._len:
+ raise IndexError('range object index out of range')
+ return self._start + index * self._step
+
+ def __getitem_slice(self, slce):
+ """Return a range which represents the requested slce
+ of the sequence represented by this range.
+ """
+ scaled_indices = (self._step * n for n in slce.indices(self._len))
+ start_offset, stop_offset, new_step = scaled_indices
+ return newrange(self._start + start_offset,
+ self._start + stop_offset,
+ new_step)
+
+ def __iter__(self):
+ """Return an iterator which enumerates the elements of the
+ sequence this range represents."""
+ return range_iterator(self)
+
+
+class range_iterator(Iterator):
+ """An iterator for a :class:`range`.
+ """
+ def __init__(self, range_):
+ self._stepper = islice(count(range_.start, range_.step), len(range_))
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return next(self._stepper)
+
+ def next(self):
+ return next(self._stepper)
+
+
+__all__ = ['newrange']
diff --git a/src/clyphx/vendor/future/future/types/newstr.py b/src/clyphx/vendor/future/future/types/newstr.py
new file mode 100644
index 0000000..8ca191f
--- /dev/null
+++ b/src/clyphx/vendor/future/future/types/newstr.py
@@ -0,0 +1,426 @@
+"""
+This module redefines ``str`` on Python 2.x to be a subclass of the Py2
+``unicode`` type that behaves like the Python 3.x ``str``.
+
+The main differences between ``newstr`` and Python 2.x's ``unicode`` type are
+the stricter type-checking and absence of a `u''` prefix in the representation.
+
+It is designed to be used together with the ``unicode_literals`` import
+as follows:
+
+ >>> from __future__ import unicode_literals
+ >>> from builtins import str, isinstance
+
+On Python 3.x and normally on Python 2.x, these expressions hold
+
+ >>> str('blah') is 'blah'
+ True
+ >>> isinstance('blah', str)
+ True
+
+However, on Python 2.x, with this import:
+
+ >>> from __future__ import unicode_literals
+
+the same expressions are False:
+
+ >>> str('blah') is 'blah'
+ False
+ >>> isinstance('blah', str)
+ False
+
+This module is designed to be imported together with ``unicode_literals`` on
+Python 2 to bring the meaning of ``str`` back into alignment with unprefixed
+string literals (i.e. ``unicode`` subclasses).
+
+Note that ``str()`` (and ``print()``) would then normally call the
+``__unicode__`` method on objects in Python 2. To define string
+representations of your objects portably across Py3 and Py2, use the
+:func:`python_2_unicode_compatible` decorator in :mod:`future.utils`.
+
+"""
+
+from numbers import Number
+
+from future.utils import PY3, istext, with_metaclass, isnewbytes
+from future.types import no, issubset
+from future.types.newobject import newobject
+
+
+if PY3:
+ # We'll probably never use newstr on Py3 anyway...
+ unicode = str
+ from collections.abc import Iterable
+else:
+ from collections import Iterable
+
+
+class BaseNewStr(type):
+ def __instancecheck__(cls, instance):
+ if cls == newstr:
+ return isinstance(instance, unicode)
+ else:
+ return issubclass(instance.__class__, cls)
+
+
+class newstr(with_metaclass(BaseNewStr, unicode)):
+ """
+ A backport of the Python 3 str object to Py2
+ """
+ no_convert_msg = "Can't convert '{0}' object to str implicitly"
+
+ def __new__(cls, *args, **kwargs):
+ """
+ From the Py3 str docstring:
+
+ str(object='') -> str
+ str(bytes_or_buffer[, encoding[, errors]]) -> str
+
+ Create a new string object from the given object. If encoding or
+ errors is specified, then the object must expose a data buffer
+ that will be decoded using the given encoding and error handler.
+ Otherwise, returns the result of object.__str__() (if defined)
+ or repr(object).
+ encoding defaults to sys.getdefaultencoding().
+ errors defaults to 'strict'.
+
+ """
+ if len(args) == 0:
+ return super(newstr, cls).__new__(cls)
+ # Special case: If someone requests str(str(u'abc')), return the same
+ # object (same id) for consistency with Py3.3. This is not true for
+ # other objects like list or dict.
+ elif type(args[0]) == newstr and cls == newstr:
+ return args[0]
+ elif isinstance(args[0], unicode):
+ value = args[0]
+ elif isinstance(args[0], bytes): # i.e. Py2 bytes or newbytes
+ if 'encoding' in kwargs or len(args) > 1:
+ value = args[0].decode(*args[1:], **kwargs)
+ else:
+ value = args[0].__str__()
+ else:
+ value = args[0]
+ return super(newstr, cls).__new__(cls, value)
+
+ def __repr__(self):
+ """
+ Without the u prefix
+ """
+
+ value = super(newstr, self).__repr__()
+ # assert value[0] == u'u'
+ return value[1:]
+
+ def __getitem__(self, y):
+ """
+ Warning: Python <= 2.7.6 has a bug that causes this method never to be called
+ when y is a slice object. Therefore the type of newstr()[:2] is wrong
+ (unicode instead of newstr).
+ """
+ return newstr(super(newstr, self).__getitem__(y))
+
+ def __contains__(self, key):
+ errmsg = "'in ' requires string as left operand, not {0}"
+ # Don't use isinstance() here because we only want to catch
+ # newstr, not Python 2 unicode:
+ if type(key) == newstr:
+ newkey = key
+ elif isinstance(key, unicode) or isinstance(key, bytes) and not isnewbytes(key):
+ newkey = newstr(key)
+ else:
+ raise TypeError(errmsg.format(type(key)))
+ return issubset(list(newkey), list(self))
+
+ @no('newbytes')
+ def __add__(self, other):
+ return newstr(super(newstr, self).__add__(other))
+
+ @no('newbytes')
+ def __radd__(self, left):
+ " left + self "
+ try:
+ return newstr(left) + self
+ except:
+ return NotImplemented
+
+ def __mul__(self, other):
+ return newstr(super(newstr, self).__mul__(other))
+
+ def __rmul__(self, other):
+ return newstr(super(newstr, self).__rmul__(other))
+
+ def join(self, iterable):
+ errmsg = 'sequence item {0}: expected unicode string, found bytes'
+ for i, item in enumerate(iterable):
+ # Here we use type() rather than isinstance() because
+ # __instancecheck__ is being overridden. E.g.
+ # isinstance(b'abc', newbytes) is True on Py2.
+ if isnewbytes(item):
+ raise TypeError(errmsg.format(i))
+ # Support use as a staticmethod: str.join('-', ['a', 'b'])
+ if type(self) == newstr:
+ return newstr(super(newstr, self).join(iterable))
+ else:
+ return newstr(super(newstr, newstr(self)).join(iterable))
+
+ @no('newbytes')
+ def find(self, sub, *args):
+ return super(newstr, self).find(sub, *args)
+
+ @no('newbytes')
+ def rfind(self, sub, *args):
+ return super(newstr, self).rfind(sub, *args)
+
+ @no('newbytes', (1, 2))
+ def replace(self, old, new, *args):
+ return newstr(super(newstr, self).replace(old, new, *args))
+
+ def decode(self, *args):
+ raise AttributeError("decode method has been disabled in newstr")
+
+ def encode(self, encoding='utf-8', errors='strict'):
+ """
+ Returns bytes
+
+ Encode S using the codec registered for encoding. Default encoding
+ is 'utf-8'. errors may be given to set a different error
+ handling scheme. Default is 'strict' meaning that encoding errors raise
+ a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and
+ 'xmlcharrefreplace' as well as any other name registered with
+ codecs.register_error that can handle UnicodeEncodeErrors.
+ """
+ from future.types.newbytes import newbytes
+ # Py2 unicode.encode() takes encoding and errors as optional parameter,
+ # not keyword arguments as in Python 3 str.
+
+ # For the surrogateescape error handling mechanism, the
+ # codecs.register_error() function seems to be inadequate for an
+ # implementation of it when encoding. (Decoding seems fine, however.)
+ # For example, in the case of
+ # u'\udcc3'.encode('ascii', 'surrogateescape_handler')
+ # after registering the ``surrogateescape_handler`` function in
+ # future.utils.surrogateescape, both Python 2.x and 3.x raise an
+ # exception anyway after the function is called because the unicode
+ # string it has to return isn't encodable strictly as ASCII.
+
+ if errors == 'surrogateescape':
+ if encoding == 'utf-16':
+ # Known to fail here. See test_encoding_works_normally()
+ raise NotImplementedError('FIXME: surrogateescape handling is '
+ 'not yet implemented properly')
+ # Encode char by char, building up list of byte-strings
+ mybytes = []
+ for c in self:
+ code = ord(c)
+ if 0xD800 <= code <= 0xDCFF:
+ mybytes.append(newbytes([code - 0xDC00]))
+ else:
+ mybytes.append(c.encode(encoding=encoding))
+ return newbytes(b'').join(mybytes)
+ return newbytes(super(newstr, self).encode(encoding, errors))
+
+ @no('newbytes', 1)
+ def startswith(self, prefix, *args):
+ if isinstance(prefix, Iterable):
+ for thing in prefix:
+ if isnewbytes(thing):
+ raise TypeError(self.no_convert_msg.format(type(thing)))
+ return super(newstr, self).startswith(prefix, *args)
+
+ @no('newbytes', 1)
+ def endswith(self, prefix, *args):
+ # Note we need the decorator above as well as the isnewbytes()
+ # check because prefix can be either a bytes object or e.g. a
+ # tuple of possible prefixes. (If it's a bytes object, each item
+ # in it is an int.)
+ if isinstance(prefix, Iterable):
+ for thing in prefix:
+ if isnewbytes(thing):
+ raise TypeError(self.no_convert_msg.format(type(thing)))
+ return super(newstr, self).endswith(prefix, *args)
+
+ @no('newbytes', 1)
+ def split(self, sep=None, maxsplit=-1):
+ # Py2 unicode.split() takes maxsplit as an optional parameter,
+ # not as a keyword argument as in Python 3 str.
+ parts = super(newstr, self).split(sep, maxsplit)
+ return [newstr(part) for part in parts]
+
+ @no('newbytes', 1)
+ def rsplit(self, sep=None, maxsplit=-1):
+ # Py2 unicode.rsplit() takes maxsplit as an optional parameter,
+ # not as a keyword argument as in Python 3 str.
+ parts = super(newstr, self).rsplit(sep, maxsplit)
+ return [newstr(part) for part in parts]
+
+ @no('newbytes', 1)
+ def partition(self, sep):
+ parts = super(newstr, self).partition(sep)
+ return tuple(newstr(part) for part in parts)
+
+ @no('newbytes', 1)
+ def rpartition(self, sep):
+ parts = super(newstr, self).rpartition(sep)
+ return tuple(newstr(part) for part in parts)
+
+ @no('newbytes', 1)
+ def index(self, sub, *args):
+ """
+ Like newstr.find() but raise ValueError when the substring is not
+ found.
+ """
+ pos = self.find(sub, *args)
+ if pos == -1:
+ raise ValueError('substring not found')
+ return pos
+
+ def splitlines(self, keepends=False):
+ """
+ S.splitlines(keepends=False) -> list of strings
+
+ Return a list of the lines in S, breaking at line boundaries.
+ Line breaks are not included in the resulting list unless keepends
+ is given and true.
+ """
+ # Py2 unicode.splitlines() takes keepends as an optional parameter,
+ # not as a keyword argument as in Python 3 str.
+ parts = super(newstr, self).splitlines(keepends)
+ return [newstr(part) for part in parts]
+
+ def __eq__(self, other):
+ if (isinstance(other, unicode) or
+ isinstance(other, bytes) and not isnewbytes(other)):
+ return super(newstr, self).__eq__(other)
+ else:
+ return NotImplemented
+
+ def __hash__(self):
+ if (isinstance(self, unicode) or
+ isinstance(self, bytes) and not isnewbytes(self)):
+ return super(newstr, self).__hash__()
+ else:
+ raise NotImplementedError()
+
+ def __ne__(self, other):
+ if (isinstance(other, unicode) or
+ isinstance(other, bytes) and not isnewbytes(other)):
+ return super(newstr, self).__ne__(other)
+ else:
+ return True
+
+ unorderable_err = 'unorderable types: str() and {0}'
+
+ def __lt__(self, other):
+ if (isinstance(other, unicode) or
+ isinstance(other, bytes) and not isnewbytes(other)):
+ return super(newstr, self).__lt__(other)
+ raise TypeError(self.unorderable_err.format(type(other)))
+
+ def __le__(self, other):
+ if (isinstance(other, unicode) or
+ isinstance(other, bytes) and not isnewbytes(other)):
+ return super(newstr, self).__le__(other)
+ raise TypeError(self.unorderable_err.format(type(other)))
+
+ def __gt__(self, other):
+ if (isinstance(other, unicode) or
+ isinstance(other, bytes) and not isnewbytes(other)):
+ return super(newstr, self).__gt__(other)
+ raise TypeError(self.unorderable_err.format(type(other)))
+
+ def __ge__(self, other):
+ if (isinstance(other, unicode) or
+ isinstance(other, bytes) and not isnewbytes(other)):
+ return super(newstr, self).__ge__(other)
+ raise TypeError(self.unorderable_err.format(type(other)))
+
+ def __getattribute__(self, name):
+ """
+ A trick to cause the ``hasattr`` builtin-fn to return False for
+ the 'decode' method on Py2.
+ """
+ if name in ['decode', u'decode']:
+ raise AttributeError("decode method has been disabled in newstr")
+ return super(newstr, self).__getattribute__(name)
+
+ def __native__(self):
+ """
+ A hook for the future.utils.native() function.
+ """
+ return unicode(self)
+
+ @staticmethod
+ def maketrans(x, y=None, z=None):
+ """
+ Return a translation table usable for str.translate().
+
+ If there is only one argument, it must be a dictionary mapping Unicode
+ ordinals (integers) or characters to Unicode ordinals, strings or None.
+ Character keys will be then converted to ordinals.
+ If there are two arguments, they must be strings of equal length, and
+ in the resulting dictionary, each character in x will be mapped to the
+ character at the same position in y. If there is a third argument, it
+ must be a string, whose characters will be mapped to None in the result.
+ """
+
+ if y is None:
+ assert z is None
+ if not isinstance(x, dict):
+ raise TypeError('if you give only one argument to maketrans it must be a dict')
+ result = {}
+ for (key, value) in x.items():
+ if len(key) > 1:
+ raise ValueError('keys in translate table must be strings or integers')
+ result[ord(key)] = value
+ else:
+ if not isinstance(x, unicode) and isinstance(y, unicode):
+ raise TypeError('x and y must be unicode strings')
+ if not len(x) == len(y):
+ raise ValueError('the first two maketrans arguments must have equal length')
+ result = {}
+ for (xi, yi) in zip(x, y):
+ if len(xi) > 1:
+ raise ValueError('keys in translate table must be strings or integers')
+ result[ord(xi)] = ord(yi)
+
+ if z is not None:
+ for char in z:
+ result[ord(char)] = None
+ return result
+
+ def translate(self, table):
+ """
+ S.translate(table) -> str
+
+ Return a copy of the string S, where all characters have been mapped
+ through the given translation table, which must be a mapping of
+ Unicode ordinals to Unicode ordinals, strings, or None.
+ Unmapped characters are left untouched. Characters mapped to None
+ are deleted.
+ """
+ l = []
+ for c in self:
+ if ord(c) in table:
+ val = table[ord(c)]
+ if val is None:
+ continue
+ elif isinstance(val, unicode):
+ l.append(val)
+ else:
+ l.append(chr(val))
+ else:
+ l.append(c)
+ return ''.join(l)
+
+ def isprintable(self):
+ raise NotImplementedError('fixme')
+
+ def isidentifier(self):
+ raise NotImplementedError('fixme')
+
+ def format_map(self):
+ raise NotImplementedError('fixme')
+
+
+__all__ = ['newstr']
diff --git a/src/clyphx/vendor/future/future/utils/__init__.py b/src/clyphx/vendor/future/future/utils/__init__.py
new file mode 100644
index 0000000..46bd96d
--- /dev/null
+++ b/src/clyphx/vendor/future/future/utils/__init__.py
@@ -0,0 +1,767 @@
+"""
+A selection of cross-compatible functions for Python 2 and 3.
+
+This module exports useful functions for 2/3 compatible code:
+
+ * bind_method: binds functions to classes
+ * ``native_str_to_bytes`` and ``bytes_to_native_str``
+ * ``native_str``: always equal to the native platform string object (because
+ this may be shadowed by imports from future.builtins)
+ * lists: lrange(), lmap(), lzip(), lfilter()
+ * iterable method compatibility:
+ - iteritems, iterkeys, itervalues
+ - viewitems, viewkeys, viewvalues
+
+ These use the original method if available, otherwise they use items,
+ keys, values.
+
+ * types:
+
+ * text_type: unicode in Python 2, str in Python 3
+ * string_types: basestring in Python 2, str in Python 3
+ * binary_type: str in Python 2, bytes in Python 3
+ * integer_types: (int, long) in Python 2, int in Python 3
+ * class_types: (type, types.ClassType) in Python 2, type in Python 3
+
+ * bchr(c):
+ Take an integer and make a 1-character byte string
+ * bord(c)
+ Take the result of indexing on a byte string and make an integer
+ * tobytes(s)
+ Take a text string, a byte string, or a sequence of characters taken
+ from a byte string, and make a byte string.
+
+ * raise_from()
+ * raise_with_traceback()
+
+This module also defines these decorators:
+
+ * ``python_2_unicode_compatible``
+ * ``with_metaclass``
+ * ``implements_iterator``
+
+Some of the functions in this module come from the following sources:
+
+ * Jinja2 (BSD licensed: see
+ https://github.com/mitsuhiko/jinja2/blob/master/LICENSE)
+ * Pandas compatibility module pandas.compat
+ * six.py by Benjamin Peterson
+ * Django
+"""
+
+import types
+import sys
+import numbers
+import functools
+import copy
+import inspect
+
+
+PY3 = sys.version_info[0] >= 3
+PY34_PLUS = sys.version_info[0:2] >= (3, 4)
+PY35_PLUS = sys.version_info[0:2] >= (3, 5)
+PY36_PLUS = sys.version_info[0:2] >= (3, 6)
+PY2 = sys.version_info[0] == 2
+PY26 = sys.version_info[0:2] == (2, 6)
+PY27 = sys.version_info[0:2] == (2, 7)
+PYPY = hasattr(sys, 'pypy_translation_info')
+
+
+def python_2_unicode_compatible(cls):
+ """
+ A decorator that defines __unicode__ and __str__ methods under Python
+ 2. Under Python 3, this decorator is a no-op.
+
+ To support Python 2 and 3 with a single code base, define a __str__
+ method returning unicode text and apply this decorator to the class, like
+ this::
+
+ >>> from future.utils import python_2_unicode_compatible
+
+ >>> @python_2_unicode_compatible
+ ... class MyClass(object):
+ ... def __str__(self):
+ ... return u'Unicode string: \u5b54\u5b50'
+
+ >>> a = MyClass()
+
+ Then, after this import:
+
+ >>> from future.builtins import str
+
+ the following is ``True`` on both Python 3 and 2::
+
+ >>> str(a) == a.encode('utf-8').decode('utf-8')
+ True
+
+ and, on a Unicode-enabled terminal with the right fonts, these both print the
+ Chinese characters for Confucius::
+
+ >>> print(a)
+ >>> print(str(a))
+
+ The implementation comes from django.utils.encoding.
+ """
+ if not PY3:
+ cls.__unicode__ = cls.__str__
+ cls.__str__ = lambda self: self.__unicode__().encode('utf-8')
+ return cls
+
+
+def with_metaclass(meta, *bases):
+ """
+ Function from jinja2/_compat.py. License: BSD.
+
+ Use it like this::
+
+ class BaseForm(object):
+ pass
+
+ class FormType(type):
+ pass
+
+ class Form(with_metaclass(FormType, BaseForm)):
+ pass
+
+ This requires a bit of explanation: the basic idea is to make a
+ dummy metaclass for one level of class instantiation that replaces
+ itself with the actual metaclass. Because of internal type checks
+ we also need to make sure that we downgrade the custom metaclass
+ for one level to something closer to type (that's why __call__ and
+ __init__ comes back from type etc.).
+
+ This has the advantage over six.with_metaclass of not introducing
+ dummy classes into the final MRO.
+ """
+ class metaclass(meta):
+ __call__ = type.__call__
+ __init__ = type.__init__
+ def __new__(cls, name, this_bases, d):
+ if this_bases is None:
+ return type.__new__(cls, name, (), d)
+ return meta(name, bases, d)
+ return metaclass('temporary_class', None, {})
+
+
+# Definitions from pandas.compat and six.py follow:
+if PY3:
+ def bchr(s):
+ return bytes([s])
+ def bstr(s):
+ if isinstance(s, str):
+ return bytes(s, 'latin-1')
+ else:
+ return bytes(s)
+ def bord(s):
+ return s
+
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+else:
+ # Python 2
+ def bchr(s):
+ return chr(s)
+ def bstr(s):
+ return str(s)
+ def bord(s):
+ return ord(s)
+
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+###
+
+if PY3:
+ def tobytes(s):
+ if isinstance(s, bytes):
+ return s
+ else:
+ if isinstance(s, str):
+ return s.encode('latin-1')
+ else:
+ return bytes(s)
+else:
+ # Python 2
+ def tobytes(s):
+ if isinstance(s, unicode):
+ return s.encode('latin-1')
+ else:
+ return ''.join(s)
+
+tobytes.__doc__ = """
+ Encodes to latin-1 (where the first 256 chars are the same as
+ ASCII.)
+ """
+
+if PY3:
+ def native_str_to_bytes(s, encoding='utf-8'):
+ return s.encode(encoding)
+
+ def bytes_to_native_str(b, encoding='utf-8'):
+ return b.decode(encoding)
+
+ def text_to_native_str(t, encoding=None):
+ return t
+else:
+ # Python 2
+ def native_str_to_bytes(s, encoding=None):
+ from future.types import newbytes # to avoid a circular import
+ return newbytes(s)
+
+ def bytes_to_native_str(b, encoding=None):
+ return native(b)
+
+ def text_to_native_str(t, encoding='ascii'):
+ """
+ Use this to create a Py2 native string when "from __future__ import
+ unicode_literals" is in effect.
+ """
+ return unicode(t).encode(encoding)
+
+native_str_to_bytes.__doc__ = """
+ On Py3, returns an encoded string.
+ On Py2, returns a newbytes type, ignoring the ``encoding`` argument.
+ """
+
+if PY3:
+ # list-producing versions of the major Python iterating functions
+ def lrange(*args, **kwargs):
+ return list(range(*args, **kwargs))
+
+ def lzip(*args, **kwargs):
+ return list(zip(*args, **kwargs))
+
+ def lmap(*args, **kwargs):
+ return list(map(*args, **kwargs))
+
+ def lfilter(*args, **kwargs):
+ return list(filter(*args, **kwargs))
+else:
+ import __builtin__
+ # Python 2-builtin ranges produce lists
+ lrange = __builtin__.range
+ lzip = __builtin__.zip
+ lmap = __builtin__.map
+ lfilter = __builtin__.filter
+
+
+def isidentifier(s, dotted=False):
+ '''
+ A function equivalent to the str.isidentifier method on Py3
+ '''
+ if dotted:
+ return all(isidentifier(a) for a in s.split('.'))
+ if PY3:
+ return s.isidentifier()
+ else:
+ import re
+ _name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
+ return bool(_name_re.match(s))
+
+
+def viewitems(obj, **kwargs):
+ """
+ Function for iterating over dictionary items with the same set-like
+ behaviour on Py2.7 as on Py3.
+
+ Passes kwargs to method."""
+ func = getattr(obj, "viewitems", None)
+ if not func:
+ func = obj.items
+ return func(**kwargs)
+
+
+def viewkeys(obj, **kwargs):
+ """
+ Function for iterating over dictionary keys with the same set-like
+ behaviour on Py2.7 as on Py3.
+
+ Passes kwargs to method."""
+ func = getattr(obj, "viewkeys", None)
+ if not func:
+ func = obj.keys
+ return func(**kwargs)
+
+
+def viewvalues(obj, **kwargs):
+ """
+ Function for iterating over dictionary values with the same set-like
+ behaviour on Py2.7 as on Py3.
+
+ Passes kwargs to method."""
+ func = getattr(obj, "viewvalues", None)
+ if not func:
+ func = obj.values
+ return func(**kwargs)
+
+
+def iteritems(obj, **kwargs):
+ """Use this only if compatibility with Python versions before 2.7 is
+ required. Otherwise, prefer viewitems().
+ """
+ func = getattr(obj, "iteritems", None)
+ if not func:
+ func = obj.items
+ return func(**kwargs)
+
+
+def iterkeys(obj, **kwargs):
+ """Use this only if compatibility with Python versions before 2.7 is
+ required. Otherwise, prefer viewkeys().
+ """
+ func = getattr(obj, "iterkeys", None)
+ if not func:
+ func = obj.keys
+ return func(**kwargs)
+
+
+def itervalues(obj, **kwargs):
+ """Use this only if compatibility with Python versions before 2.7 is
+ required. Otherwise, prefer viewvalues().
+ """
+ func = getattr(obj, "itervalues", None)
+ if not func:
+ func = obj.values
+ return func(**kwargs)
+
+
+def bind_method(cls, name, func):
+ """Bind a method to class, python 2 and python 3 compatible.
+
+ Parameters
+ ----------
+
+ cls : type
+ class to receive bound method
+ name : basestring
+ name of method on class instance
+ func : function
+ function to be bound as method
+
+ Returns
+ -------
+ None
+ """
+ # only python 2 has an issue with bound/unbound methods
+ if not PY3:
+ setattr(cls, name, types.MethodType(func, None, cls))
+ else:
+ setattr(cls, name, func)
+
+
+def getexception():
+ return sys.exc_info()[1]
+
+
+def _get_caller_globals_and_locals():
+ """
+ Returns the globals and locals of the calling frame.
+
+ Is there an alternative to frame hacking here?
+ """
+ caller_frame = inspect.stack()[2]
+ myglobals = caller_frame[0].f_globals
+ mylocals = caller_frame[0].f_locals
+ return myglobals, mylocals
+
+
+def _repr_strip(mystring):
+ """
+ Returns the string without any initial or final quotes.
+ """
+ r = repr(mystring)
+ if r.startswith("'") and r.endswith("'"):
+ return r[1:-1]
+ else:
+ return r
+
+
+if PY3:
+ def raise_from(exc, cause):
+ """
+ Equivalent to:
+
+ raise EXCEPTION from CAUSE
+
+ on Python 3. (See PEP 3134).
+ """
+ myglobals, mylocals = _get_caller_globals_and_locals()
+
+ # We pass the exception and cause along with other globals
+ # when we exec():
+ myglobals = myglobals.copy()
+ myglobals['__python_future_raise_from_exc'] = exc
+ myglobals['__python_future_raise_from_cause'] = cause
+ execstr = "raise __python_future_raise_from_exc from __python_future_raise_from_cause"
+ exec(execstr, myglobals, mylocals)
+
+ def raise_(tp, value=None, tb=None):
+ """
+ A function that matches the Python 2.x ``raise`` statement. This
+ allows re-raising exceptions with the cls value and traceback on
+ Python 2 and 3.
+ """
+ if isinstance(tp, BaseException):
+ # If the first object is an instance, the type of the exception
+ # is the class of the instance, the instance itself is the value,
+ # and the second object must be None.
+ if value is not None:
+ raise TypeError("instance exception may not have a separate value")
+ exc = tp
+ elif isinstance(tp, type) and not issubclass(tp, BaseException):
+ # If the first object is a class, it becomes the type of the
+ # exception.
+ raise TypeError("class must derive from BaseException, not %s" % tp.__name__)
+ else:
+ # The second object is used to determine the exception value: If it
+ # is an instance of the class, the instance becomes the exception
+ # value. If the second object is a tuple, it is used as the argument
+ # list for the class constructor; if it is None, an empty argument
+ # list is used, and any other object is treated as a single argument
+ # to the constructor. The instance so created by calling the
+ # constructor is used as the exception value.
+ if isinstance(value, tp):
+ exc = value
+ elif isinstance(value, tuple):
+ exc = tp(*value)
+ elif value is None:
+ exc = tp()
+ else:
+ exc = tp(value)
+
+ if exc.__traceback__ is not tb:
+ raise exc.with_traceback(tb)
+ raise exc
+
+ def raise_with_traceback(exc, traceback=Ellipsis):
+ if traceback == Ellipsis:
+ _, _, traceback = sys.exc_info()
+ raise exc.with_traceback(traceback)
+
+else:
+ def raise_from(exc, cause):
+ """
+ Equivalent to:
+
+ raise EXCEPTION from CAUSE
+
+ on Python 3. (See PEP 3134).
+ """
+ # Is either arg an exception class (e.g. IndexError) rather than
+ # instance (e.g. IndexError('my message here')? If so, pass the
+ # name of the class undisturbed through to "raise ... from ...".
+ if isinstance(exc, type) and issubclass(exc, Exception):
+ e = exc()
+ # exc = exc.__name__
+ # execstr = "e = " + _repr_strip(exc) + "()"
+ # myglobals, mylocals = _get_caller_globals_and_locals()
+ # exec(execstr, myglobals, mylocals)
+ else:
+ e = exc
+ e.__suppress_context__ = False
+ if isinstance(cause, type) and issubclass(cause, Exception):
+ e.__cause__ = cause()
+ e.__cause__.__traceback__ = sys.exc_info()[2]
+ e.__suppress_context__ = True
+ elif cause is None:
+ e.__cause__ = None
+ e.__suppress_context__ = True
+ elif isinstance(cause, BaseException):
+ e.__cause__ = cause
+ object.__setattr__(e.__cause__, '__traceback__', sys.exc_info()[2])
+ e.__suppress_context__ = True
+ else:
+ raise TypeError("exception causes must derive from BaseException")
+ e.__context__ = sys.exc_info()[1]
+ raise e
+
+ exec('''
+def raise_(tp, value=None, tb=None):
+ raise tp, value, tb
+
+def raise_with_traceback(exc, traceback=Ellipsis):
+ if traceback == Ellipsis:
+ _, _, traceback = sys.exc_info()
+ raise exc, None, traceback
+'''.strip())
+
+
+raise_with_traceback.__doc__ = (
+"""Raise exception with existing traceback.
+If traceback is not passed, uses sys.exc_info() to get traceback."""
+)
+
+
+# Deprecated alias for backward compatibility with ``future`` versions < 0.11:
+reraise = raise_
+
+
+def implements_iterator(cls):
+ '''
+ From jinja2/_compat.py. License: BSD.
+
+ Use as a decorator like this::
+
+ @implements_iterator
+ class UppercasingIterator(object):
+ def __init__(self, iterable):
+ self._iter = iter(iterable)
+ def __iter__(self):
+ return self
+ def __next__(self):
+ return next(self._iter).upper()
+
+ '''
+ if PY3:
+ return cls
+ else:
+ cls.next = cls.__next__
+ del cls.__next__
+ return cls
+
+if PY3:
+ get_next = lambda x: x.next
+else:
+ get_next = lambda x: x.__next__
+
+
+def encode_filename(filename):
+ if PY3:
+ return filename
+ else:
+ if isinstance(filename, unicode):
+ return filename.encode('utf-8')
+ return filename
+
+
+def is_new_style(cls):
+ """
+ Python 2.7 has both new-style and old-style classes. Old-style classes can
+ be pesky in some circumstances, such as when using inheritance. Use this
+ function to test for whether a class is new-style. (Python 3 only has
+ new-style classes.)
+ """
+ return hasattr(cls, '__class__') and ('__dict__' in dir(cls)
+ or hasattr(cls, '__slots__'))
+
+# The native platform string and bytes types. Useful because ``str`` and
+# ``bytes`` are redefined on Py2 by ``from future.builtins import *``.
+native_str = str
+native_bytes = bytes
+
+
+def istext(obj):
+ """
+ Deprecated. Use::
+ >>> isinstance(obj, str)
+ after this import:
+ >>> from future.builtins import str
+ """
+ return isinstance(obj, type(u''))
+
+
+def isbytes(obj):
+ """
+ Deprecated. Use::
+ >>> isinstance(obj, bytes)
+ after this import:
+ >>> from future.builtins import bytes
+ """
+ return isinstance(obj, type(b''))
+
+
+def isnewbytes(obj):
+ """
+ Equivalent to the result of ``type(obj) == type(newbytes)``
+ in other words, it is REALLY a newbytes instance, not a Py2 native str
+ object?
+
+ Note that this does not cover subclasses of newbytes, and it is not
+ equivalent to ininstance(obj, newbytes)
+ """
+ return type(obj).__name__ == 'newbytes'
+
+
+def isint(obj):
+ """
+ Deprecated. Tests whether an object is a Py3 ``int`` or either a Py2 ``int`` or
+ ``long``.
+
+ Instead of using this function, you can use:
+
+ >>> from future.builtins import int
+ >>> isinstance(obj, int)
+
+ The following idiom is equivalent:
+
+ >>> from numbers import Integral
+ >>> isinstance(obj, Integral)
+ """
+
+ return isinstance(obj, numbers.Integral)
+
+
+def native(obj):
+ """
+ On Py3, this is a no-op: native(obj) -> obj
+
+ On Py2, returns the corresponding native Py2 types that are
+ superclasses for backported objects from Py3:
+
+ >>> from builtins import str, bytes, int
+
+ >>> native(str(u'ABC'))
+ u'ABC'
+ >>> type(native(str(u'ABC')))
+ unicode
+
+ >>> native(bytes(b'ABC'))
+ b'ABC'
+ >>> type(native(bytes(b'ABC')))
+ bytes
+
+ >>> native(int(10**20))
+ 100000000000000000000L
+ >>> type(native(int(10**20)))
+ long
+
+ Existing native types on Py2 will be returned unchanged:
+
+ >>> type(native(u'ABC'))
+ unicode
+ """
+ if hasattr(obj, '__native__'):
+ return obj.__native__()
+ else:
+ return obj
+
+
+# Implementation of exec_ is from ``six``:
+if PY3:
+ import builtins
+ exec_ = getattr(builtins, "exec")
+else:
+ def exec_(code, globs=None, locs=None):
+ """Execute code in a namespace."""
+ if globs is None:
+ frame = sys._getframe(1)
+ globs = frame.f_globals
+ if locs is None:
+ locs = frame.f_locals
+ del frame
+ elif locs is None:
+ locs = globs
+ exec("""exec code in globs, locs""")
+
+
+# Defined here for backward compatibility:
+def old_div(a, b):
+ """
+ DEPRECATED: import ``old_div`` from ``past.utils`` instead.
+
+ Equivalent to ``a / b`` on Python 2 without ``from __future__ import
+ division``.
+
+ TODO: generalize this to other objects (like arrays etc.)
+ """
+ if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral):
+ return a // b
+ else:
+ return a / b
+
+
+def as_native_str(encoding='utf-8'):
+ '''
+ A decorator to turn a function or method call that returns text, i.e.
+ unicode, into one that returns a native platform str.
+
+ Use it as a decorator like this::
+
+ from __future__ import unicode_literals
+
+ class MyClass(object):
+ @as_native_str(encoding='ascii')
+ def __repr__(self):
+ return next(self._iter).upper()
+ '''
+ if PY3:
+ return lambda f: f
+ else:
+ def encoder(f):
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ return f(*args, **kwargs).encode(encoding=encoding)
+ return wrapper
+ return encoder
+
+# listvalues and listitems definitions from Nick Coghlan's (withdrawn)
+# PEP 496:
+try:
+ dict.iteritems
+except AttributeError:
+ # Python 3
+ def listvalues(d):
+ return list(d.values())
+ def listitems(d):
+ return list(d.items())
+else:
+ # Python 2
+ def listvalues(d):
+ return d.values()
+ def listitems(d):
+ return d.items()
+
+if PY3:
+ def ensure_new_type(obj):
+ return obj
+else:
+ def ensure_new_type(obj):
+ from future.types.newbytes import newbytes
+ from future.types.newstr import newstr
+ from future.types.newint import newint
+ from future.types.newdict import newdict
+
+ native_type = type(native(obj))
+
+ # Upcast only if the type is already a native (non-future) type
+ if issubclass(native_type, type(obj)):
+ # Upcast
+ if native_type == str: # i.e. Py2 8-bit str
+ return newbytes(obj)
+ elif native_type == unicode:
+ return newstr(obj)
+ elif native_type == int:
+ return newint(obj)
+ elif native_type == long:
+ return newint(obj)
+ elif native_type == dict:
+ return newdict(obj)
+ else:
+ return obj
+ else:
+ # Already a new type
+ assert type(obj) in [newbytes, newstr]
+ return obj
+
+
+__all__ = ['PY2', 'PY26', 'PY3', 'PYPY',
+ 'as_native_str', 'binary_type', 'bind_method', 'bord', 'bstr',
+ 'bytes_to_native_str', 'class_types', 'encode_filename',
+ 'ensure_new_type', 'exec_', 'get_next', 'getexception',
+ 'implements_iterator', 'integer_types', 'is_new_style', 'isbytes',
+ 'isidentifier', 'isint', 'isnewbytes', 'istext', 'iteritems',
+ 'iterkeys', 'itervalues', 'lfilter', 'listitems', 'listvalues',
+ 'lmap', 'lrange', 'lzip', 'native', 'native_bytes', 'native_str',
+ 'native_str_to_bytes', 'old_div',
+ 'python_2_unicode_compatible', 'raise_',
+ 'raise_with_traceback', 'reraise', 'string_types',
+ 'text_to_native_str', 'text_type', 'tobytes', 'viewitems',
+ 'viewkeys', 'viewvalues', 'with_metaclass'
+ ]
diff --git a/src/clyphx/vendor/future/future/utils/surrogateescape.py b/src/clyphx/vendor/future/future/utils/surrogateescape.py
new file mode 100644
index 0000000..0dcc9fa
--- /dev/null
+++ b/src/clyphx/vendor/future/future/utils/surrogateescape.py
@@ -0,0 +1,198 @@
+"""
+This is Victor Stinner's pure-Python implementation of PEP 383: the "surrogateescape" error
+handler of Python 3.
+
+Source: misc/python/surrogateescape.py in https://bitbucket.org/haypo/misc
+"""
+
+# This code is released under the Python license and the BSD 2-clause license
+
+import codecs
+import sys
+
+from future import utils
+
+
+FS_ERRORS = 'surrogateescape'
+
+# # -- Python 2/3 compatibility -------------------------------------
+# FS_ERRORS = 'my_surrogateescape'
+
+def u(text):
+ if utils.PY3:
+ return text
+ else:
+ return text.decode('unicode_escape')
+
+def b(data):
+ if utils.PY3:
+ return data.encode('latin1')
+ else:
+ return data
+
+if utils.PY3:
+ _unichr = chr
+ bytes_chr = lambda code: bytes((code,))
+else:
+ _unichr = unichr
+ bytes_chr = chr
+
+def surrogateescape_handler(exc):
+ """
+ Pure Python implementation of the PEP 383: the "surrogateescape" error
+ handler of Python 3. Undecodable bytes will be replaced by a Unicode
+ character U+DCxx on decoding, and these are translated into the
+ original bytes on encoding.
+ """
+ mystring = exc.object[exc.start:exc.end]
+
+ try:
+ if isinstance(exc, UnicodeDecodeError):
+ # mystring is a byte-string in this case
+ decoded = replace_surrogate_decode(mystring)
+ elif isinstance(exc, UnicodeEncodeError):
+ # In the case of u'\udcc3'.encode('ascii',
+ # 'this_surrogateescape_handler'), both Python 2.x and 3.x raise an
+ # exception anyway after this function is called, even though I think
+ # it's doing what it should. It seems that the strict encoder is called
+ # to encode the unicode string that this function returns ...
+ decoded = replace_surrogate_encode(mystring)
+ else:
+ raise exc
+ except NotASurrogateError:
+ raise exc
+ return (decoded, exc.end)
+
+
+class NotASurrogateError(Exception):
+ pass
+
+
+def replace_surrogate_encode(mystring):
+ """
+ Returns a (unicode) string, not the more logical bytes, because the codecs
+ register_error functionality expects this.
+ """
+ decoded = []
+ for ch in mystring:
+ # if utils.PY3:
+ # code = ch
+ # else:
+ code = ord(ch)
+
+ # The following magic comes from Py3.3's Python/codecs.c file:
+ if not 0xD800 <= code <= 0xDCFF:
+ # Not a surrogate. Fail with the original exception.
+ raise NotASurrogateError
+ # mybytes = [0xe0 | (code >> 12),
+ # 0x80 | ((code >> 6) & 0x3f),
+ # 0x80 | (code & 0x3f)]
+ # Is this a good idea?
+ if 0xDC00 <= code <= 0xDC7F:
+ decoded.append(_unichr(code - 0xDC00))
+ elif code <= 0xDCFF:
+ decoded.append(_unichr(code - 0xDC00))
+ else:
+ raise NotASurrogateError
+ return str().join(decoded)
+
+
+def replace_surrogate_decode(mybytes):
+ """
+ Returns a (unicode) string
+ """
+ decoded = []
+ for ch in mybytes:
+ # We may be parsing newbytes (in which case ch is an int) or a native
+ # str on Py2
+ if isinstance(ch, int):
+ code = ch
+ else:
+ code = ord(ch)
+ if 0x80 <= code <= 0xFF:
+ decoded.append(_unichr(0xDC00 + code))
+ elif code <= 0x7F:
+ decoded.append(_unichr(code))
+ else:
+ # # It may be a bad byte
+ # # Try swallowing it.
+ # continue
+ # print("RAISE!")
+ raise NotASurrogateError
+ return str().join(decoded)
+
+
+def encodefilename(fn):
+ if FS_ENCODING == 'ascii':
+ # ASCII encoder of Python 2 expects that the error handler returns a
+ # Unicode string encodable to ASCII, whereas our surrogateescape error
+ # handler has to return bytes in 0x80-0xFF range.
+ encoded = []
+ for index, ch in enumerate(fn):
+ code = ord(ch)
+ if code < 128:
+ ch = bytes_chr(code)
+ elif 0xDC80 <= code <= 0xDCFF:
+ ch = bytes_chr(code - 0xDC00)
+ else:
+ raise UnicodeEncodeError(FS_ENCODING,
+ fn, index, index+1,
+ 'ordinal not in range(128)')
+ encoded.append(ch)
+ return bytes().join(encoded)
+ elif FS_ENCODING == 'utf-8':
+ # UTF-8 encoder of Python 2 encodes surrogates, so U+DC80-U+DCFF
+ # doesn't go through our error handler
+ encoded = []
+ for index, ch in enumerate(fn):
+ code = ord(ch)
+ if 0xD800 <= code <= 0xDFFF:
+ if 0xDC80 <= code <= 0xDCFF:
+ ch = bytes_chr(code - 0xDC00)
+ encoded.append(ch)
+ else:
+ raise UnicodeEncodeError(
+ FS_ENCODING,
+ fn, index, index+1, 'surrogates not allowed')
+ else:
+ ch_utf8 = ch.encode('utf-8')
+ encoded.append(ch_utf8)
+ return bytes().join(encoded)
+ else:
+ return fn.encode(FS_ENCODING, FS_ERRORS)
+
+def decodefilename(fn):
+ return fn.decode(FS_ENCODING, FS_ERRORS)
+
+FS_ENCODING = 'ascii'; fn = b('[abc\xff]'); encoded = u('[abc\udcff]')
+# FS_ENCODING = 'cp932'; fn = b('[abc\x81\x00]'); encoded = u('[abc\udc81\x00]')
+# FS_ENCODING = 'UTF-8'; fn = b('[abc\xff]'); encoded = u('[abc\udcff]')
+
+
+# normalize the filesystem encoding name.
+# For example, we expect "utf-8", not "UTF8".
+FS_ENCODING = codecs.lookup(FS_ENCODING).name
+
+
+def register_surrogateescape():
+ """
+ Registers the surrogateescape error handler on Python 2 (only)
+ """
+ if utils.PY3:
+ return
+ try:
+ codecs.lookup_error(FS_ERRORS)
+ except LookupError:
+ codecs.register_error(FS_ERRORS, surrogateescape_handler)
+
+
+if __name__ == '__main__':
+ pass
+ # # Tests:
+ # register_surrogateescape()
+
+ # b = decodefilename(fn)
+ # assert b == encoded, "%r != %r" % (b, encoded)
+ # c = encodefilename(b)
+ # assert c == fn, '%r != %r' % (c, fn)
+ # # print("ok")
diff --git a/src/clyphx/vendor/future/html/__init__.py b/src/clyphx/vendor/future/html/__init__.py
new file mode 100644
index 0000000..e957e74
--- /dev/null
+++ b/src/clyphx/vendor/future/html/__init__.py
@@ -0,0 +1,9 @@
+from __future__ import absolute_import
+import sys
+
+if sys.version_info[0] < 3:
+ from future.moves.html import *
+else:
+ raise ImportError('This package should not be accessible on Python 3. '
+ 'Either you are trying to run from the python-future src folder '
+ 'or your installation of python-future is corrupted.')
diff --git a/src/clyphx/vendor/future/html/entities.py b/src/clyphx/vendor/future/html/entities.py
new file mode 100644
index 0000000..211649e
--- /dev/null
+++ b/src/clyphx/vendor/future/html/entities.py
@@ -0,0 +1,7 @@
+from __future__ import absolute_import
+from future.utils import PY3
+
+if PY3:
+ from html.entities import *
+else:
+ from future.moves.html.entities import *
diff --git a/src/clyphx/vendor/future/html/parser.py b/src/clyphx/vendor/future/html/parser.py
new file mode 100644
index 0000000..e394887
--- /dev/null
+++ b/src/clyphx/vendor/future/html/parser.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import
+import sys
+__future_module__ = True
+
+if sys.version_info[0] >= 3:
+ raise ImportError('Cannot import module from python-future source folder')
+else:
+ from future.moves.html.parser import *
diff --git a/src/clyphx/vendor/future/http/__init__.py b/src/clyphx/vendor/future/http/__init__.py
new file mode 100644
index 0000000..e4f853e
--- /dev/null
+++ b/src/clyphx/vendor/future/http/__init__.py
@@ -0,0 +1,9 @@
+from __future__ import absolute_import
+import sys
+
+if sys.version_info[0] < 3:
+ pass
+else:
+ raise ImportError('This package should not be accessible on Python 3. '
+ 'Either you are trying to run from the python-future src folder '
+ 'or your installation of python-future is corrupted.')
diff --git a/src/clyphx/vendor/future/http/client.py b/src/clyphx/vendor/future/http/client.py
new file mode 100644
index 0000000..a6a3100
--- /dev/null
+++ b/src/clyphx/vendor/future/http/client.py
@@ -0,0 +1,90 @@
+from __future__ import absolute_import
+import sys
+
+assert sys.version_info[0] < 3
+
+from httplib import *
+from httplib import HTTPMessage
+
+# These constants aren't included in __all__ in httplib.py:
+
+from httplib import (HTTP_PORT,
+ HTTPS_PORT,
+
+ CONTINUE,
+ SWITCHING_PROTOCOLS,
+ PROCESSING,
+
+ OK,
+ CREATED,
+ ACCEPTED,
+ NON_AUTHORITATIVE_INFORMATION,
+ NO_CONTENT,
+ RESET_CONTENT,
+ PARTIAL_CONTENT,
+ MULTI_STATUS,
+ IM_USED,
+
+ MULTIPLE_CHOICES,
+ MOVED_PERMANENTLY,
+ FOUND,
+ SEE_OTHER,
+ NOT_MODIFIED,
+ USE_PROXY,
+ TEMPORARY_REDIRECT,
+
+ BAD_REQUEST,
+ UNAUTHORIZED,
+ PAYMENT_REQUIRED,
+ FORBIDDEN,
+ NOT_FOUND,
+ METHOD_NOT_ALLOWED,
+ NOT_ACCEPTABLE,
+ PROXY_AUTHENTICATION_REQUIRED,
+ REQUEST_TIMEOUT,
+ CONFLICT,
+ GONE,
+ LENGTH_REQUIRED,
+ PRECONDITION_FAILED,
+ REQUEST_ENTITY_TOO_LARGE,
+ REQUEST_URI_TOO_LONG,
+ UNSUPPORTED_MEDIA_TYPE,
+ REQUESTED_RANGE_NOT_SATISFIABLE,
+ EXPECTATION_FAILED,
+ UNPROCESSABLE_ENTITY,
+ LOCKED,
+ FAILED_DEPENDENCY,
+ UPGRADE_REQUIRED,
+
+ INTERNAL_SERVER_ERROR,
+ NOT_IMPLEMENTED,
+ BAD_GATEWAY,
+ SERVICE_UNAVAILABLE,
+ GATEWAY_TIMEOUT,
+ HTTP_VERSION_NOT_SUPPORTED,
+ INSUFFICIENT_STORAGE,
+ NOT_EXTENDED,
+
+ MAXAMOUNT,
+ )
+
+# These are not available on Python 2.6.x:
+try:
+ from httplib import LineTooLong, LineAndFileWrapper
+except ImportError:
+ pass
+
+# These may not be available on all versions of Python 2.6.x or 2.7.x
+try:
+ from httplib import (
+ _CS_IDLE,
+ _CS_REQ_STARTED,
+ _CS_REQ_SENT,
+ _MAXLINE,
+ _MAXHEADERS,
+ _is_legal_header_name,
+ _is_illegal_header_value,
+ _METHODS_EXPECTING_BODY
+ )
+except ImportError:
+ pass
diff --git a/src/clyphx/vendor/future/http/cookiejar.py b/src/clyphx/vendor/future/http/cookiejar.py
new file mode 100644
index 0000000..d847b2b
--- /dev/null
+++ b/src/clyphx/vendor/future/http/cookiejar.py
@@ -0,0 +1,6 @@
+from __future__ import absolute_import
+import sys
+
+assert sys.version_info[0] < 3
+
+from cookielib import *
diff --git a/src/clyphx/vendor/future/http/cookies.py b/src/clyphx/vendor/future/http/cookies.py
new file mode 100644
index 0000000..eb2a823
--- /dev/null
+++ b/src/clyphx/vendor/future/http/cookies.py
@@ -0,0 +1,7 @@
+from __future__ import absolute_import
+import sys
+
+assert sys.version_info[0] < 3
+
+from Cookie import *
+from Cookie import Morsel # left out of __all__ on Py2.7!
diff --git a/src/clyphx/vendor/future/http/server.py b/src/clyphx/vendor/future/http/server.py
new file mode 100644
index 0000000..2971055
--- /dev/null
+++ b/src/clyphx/vendor/future/http/server.py
@@ -0,0 +1,18 @@
+from __future__ import absolute_import
+import sys
+
+assert sys.version_info[0] < 3
+
+from BaseHTTPServer import *
+from CGIHTTPServer import *
+from SimpleHTTPServer import *
+try:
+ from CGIHTTPServer import _url_collapse_path # needed for a test
+except ImportError:
+ try:
+ # Python 2.7.0 to 2.7.3
+ from CGIHTTPServer import (
+ _url_collapse_path_split as _url_collapse_path)
+ except ImportError:
+ # Doesn't exist on Python 2.6.x. Ignore it.
+ pass
diff --git a/src/clyphx/vendor/future/libfuturize/__init__.py b/src/clyphx/vendor/future/libfuturize/__init__.py
new file mode 100644
index 0000000..4cb1cbc
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/__init__.py
@@ -0,0 +1 @@
+# empty to make this a package
diff --git a/src/clyphx/vendor/future/libfuturize/fixer_util.py b/src/clyphx/vendor/future/libfuturize/fixer_util.py
new file mode 100644
index 0000000..48e4689
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixer_util.py
@@ -0,0 +1,520 @@
+"""
+Utility functions from 2to3, 3to2 and python-modernize (and some home-grown
+ones).
+
+Licences:
+2to3: PSF License v2
+3to2: Apache Software License (from 3to2/setup.py)
+python-modernize licence: BSD (from python-modernize/LICENSE)
+"""
+
+from lib2to3.fixer_util import (FromImport, Newline, is_import,
+ find_root, does_tree_import, Comma)
+from lib2to3.pytree import Leaf, Node
+from lib2to3.pygram import python_symbols as syms, python_grammar
+from lib2to3.pygram import token
+from lib2to3.fixer_util import (Node, Call, Name, syms, Comma, Number)
+import re
+
+
+def canonical_fix_name(fix, avail_fixes):
+ """
+ Examples:
+ >>> canonical_fix_name('fix_wrap_text_literals')
+ 'libfuturize.fixes.fix_wrap_text_literals'
+ >>> canonical_fix_name('wrap_text_literals')
+ 'libfuturize.fixes.fix_wrap_text_literals'
+ >>> canonical_fix_name('wrap_te')
+ ValueError("unknown fixer name")
+ >>> canonical_fix_name('wrap')
+ ValueError("ambiguous fixer name")
+ """
+ if ".fix_" in fix:
+ return fix
+ else:
+ if fix.startswith('fix_'):
+ fix = fix[4:]
+ # Infer the full module name for the fixer.
+ # First ensure that no names clash (e.g.
+ # lib2to3.fixes.fix_blah and libfuturize.fixes.fix_blah):
+ found = [f for f in avail_fixes
+ if f.endswith('fix_{0}'.format(fix))]
+ if len(found) > 1:
+ raise ValueError("Ambiguous fixer name. Choose a fully qualified "
+ "module name instead from these:\n" +
+ "\n".join(" " + myf for myf in found))
+ elif len(found) == 0:
+ raise ValueError("Unknown fixer. Use --list-fixes or -l for a list.")
+ return found[0]
+
+
+
+## These functions are from 3to2 by Joe Amenta:
+
+def Star(prefix=None):
+ return Leaf(token.STAR, u'*', prefix=prefix)
+
+def DoubleStar(prefix=None):
+ return Leaf(token.DOUBLESTAR, u'**', prefix=prefix)
+
+def Minus(prefix=None):
+ return Leaf(token.MINUS, u'-', prefix=prefix)
+
+def commatize(leafs):
+ """
+ Accepts/turns: (Name, Name, ..., Name, Name)
+ Returns/into: (Name, Comma, Name, Comma, ..., Name, Comma, Name)
+ """
+ new_leafs = []
+ for leaf in leafs:
+ new_leafs.append(leaf)
+ new_leafs.append(Comma())
+ del new_leafs[-1]
+ return new_leafs
+
+def indentation(node):
+ """
+ Returns the indentation for this node
+ Iff a node is in a suite, then it has indentation.
+ """
+ while node.parent is not None and node.parent.type != syms.suite:
+ node = node.parent
+ if node.parent is None:
+ return u""
+ # The first three children of a suite are NEWLINE, INDENT, (some other node)
+ # INDENT.value contains the indentation for this suite
+ # anything after (some other node) has the indentation as its prefix.
+ if node.type == token.INDENT:
+ return node.value
+ elif node.prev_sibling is not None and node.prev_sibling.type == token.INDENT:
+ return node.prev_sibling.value
+ elif node.prev_sibling is None:
+ return u""
+ else:
+ return node.prefix
+
+def indentation_step(node):
+ """
+ Dirty little trick to get the difference between each indentation level
+ Implemented by finding the shortest indentation string
+ (technically, the "least" of all of the indentation strings, but
+ tabs and spaces mixed won't get this far, so those are synonymous.)
+ """
+ r = find_root(node)
+ # Collect all indentations into one set.
+ all_indents = set(i.value for i in r.pre_order() if i.type == token.INDENT)
+ if not all_indents:
+ # nothing is indented anywhere, so we get to pick what we want
+ return u" " # four spaces is a popular convention
+ else:
+ return min(all_indents)
+
+def suitify(parent):
+ """
+ Turn the stuff after the first colon in parent's children
+ into a suite, if it wasn't already
+ """
+ for node in parent.children:
+ if node.type == syms.suite:
+ # already in the prefered format, do nothing
+ return
+
+ # One-liners have no suite node, we have to fake one up
+ for i, node in enumerate(parent.children):
+ if node.type == token.COLON:
+ break
+ else:
+ raise ValueError(u"No class suite and no ':'!")
+ # Move everything into a suite node
+ suite = Node(syms.suite, [Newline(), Leaf(token.INDENT, indentation(node) + indentation_step(node))])
+ one_node = parent.children[i+1]
+ one_node.remove()
+ one_node.prefix = u''
+ suite.append_child(one_node)
+ parent.append_child(suite)
+
+def NameImport(package, as_name=None, prefix=None):
+ """
+ Accepts a package (Name node), name to import it as (string), and
+ optional prefix and returns a node:
+ import [as ]
+ """
+ if prefix is None:
+ prefix = u""
+ children = [Name(u"import", prefix=prefix), package]
+ if as_name is not None:
+ children.extend([Name(u"as", prefix=u" "),
+ Name(as_name, prefix=u" ")])
+ return Node(syms.import_name, children)
+
+_compound_stmts = (syms.if_stmt, syms.while_stmt, syms.for_stmt, syms.try_stmt, syms.with_stmt)
+_import_stmts = (syms.import_name, syms.import_from)
+
+def import_binding_scope(node):
+ """
+ Generator yields all nodes for which a node (an import_stmt) has scope
+ The purpose of this is for a call to _find() on each of them
+ """
+ # import_name / import_from are small_stmts
+ assert node.type in _import_stmts
+ test = node.next_sibling
+ # A small_stmt can only be followed by a SEMI or a NEWLINE.
+ while test.type == token.SEMI:
+ nxt = test.next_sibling
+ # A SEMI can only be followed by a small_stmt or a NEWLINE
+ if nxt.type == token.NEWLINE:
+ break
+ else:
+ yield nxt
+ # A small_stmt can only be followed by either a SEMI or a NEWLINE
+ test = nxt.next_sibling
+ # Covered all subsequent small_stmts after the import_stmt
+ # Now to cover all subsequent stmts after the parent simple_stmt
+ parent = node.parent
+ assert parent.type == syms.simple_stmt
+ test = parent.next_sibling
+ while test is not None:
+ # Yes, this will yield NEWLINE and DEDENT. Deal with it.
+ yield test
+ test = test.next_sibling
+
+ context = parent.parent
+ # Recursively yield nodes following imports inside of a if/while/for/try/with statement
+ if context.type in _compound_stmts:
+ # import is in a one-liner
+ c = context
+ while c.next_sibling is not None:
+ yield c.next_sibling
+ c = c.next_sibling
+ context = context.parent
+
+ # Can't chain one-liners on one line, so that takes care of that.
+
+ p = context.parent
+ if p is None:
+ return
+
+ # in a multi-line suite
+
+ while p.type in _compound_stmts:
+
+ if context.type == syms.suite:
+ yield context
+
+ context = context.next_sibling
+
+ if context is None:
+ context = p.parent
+ p = context.parent
+ if p is None:
+ break
+
+def ImportAsName(name, as_name, prefix=None):
+ new_name = Name(name)
+ new_as = Name(u"as", prefix=u" ")
+ new_as_name = Name(as_name, prefix=u" ")
+ new_node = Node(syms.import_as_name, [new_name, new_as, new_as_name])
+ if prefix is not None:
+ new_node.prefix = prefix
+ return new_node
+
+
+def is_docstring(node):
+ """
+ Returns True if the node appears to be a docstring
+ """
+ return (node.type == syms.simple_stmt and
+ len(node.children) > 0 and node.children[0].type == token.STRING)
+
+
+def future_import(feature, node):
+ """
+ This seems to work
+ """
+ root = find_root(node)
+
+ if does_tree_import(u"__future__", feature, node):
+ return
+
+ # Look for a shebang or encoding line
+ shebang_encoding_idx = None
+
+ for idx, node in enumerate(root.children):
+ # Is it a shebang or encoding line?
+ if is_shebang_comment(node) or is_encoding_comment(node):
+ shebang_encoding_idx = idx
+ if is_docstring(node):
+ # skip over docstring
+ continue
+ names = check_future_import(node)
+ if not names:
+ # not a future statement; need to insert before this
+ break
+ if feature in names:
+ # already imported
+ return
+
+ import_ = FromImport(u'__future__', [Leaf(token.NAME, feature, prefix=" ")])
+ if shebang_encoding_idx == 0 and idx == 0:
+ # If this __future__ import would go on the first line,
+ # detach the shebang / encoding prefix from the current first line.
+ # and attach it to our new __future__ import node.
+ import_.prefix = root.children[0].prefix
+ root.children[0].prefix = u''
+ # End the __future__ import line with a newline and add a blank line
+ # afterwards:
+ children = [import_ , Newline()]
+ root.insert_child(idx, Node(syms.simple_stmt, children))
+
+
+def future_import2(feature, node):
+ """
+ An alternative to future_import() which might not work ...
+ """
+ root = find_root(node)
+
+ if does_tree_import(u"__future__", feature, node):
+ return
+
+ insert_pos = 0
+ for idx, node in enumerate(root.children):
+ if node.type == syms.simple_stmt and node.children and \
+ node.children[0].type == token.STRING:
+ insert_pos = idx + 1
+ break
+
+ for thing_after in root.children[insert_pos:]:
+ if thing_after.type == token.NEWLINE:
+ insert_pos += 1
+ continue
+
+ prefix = thing_after.prefix
+ thing_after.prefix = u""
+ break
+ else:
+ prefix = u""
+
+ import_ = FromImport(u"__future__", [Leaf(token.NAME, feature, prefix=u" ")])
+
+ children = [import_, Newline()]
+ root.insert_child(insert_pos, Node(syms.simple_stmt, children, prefix=prefix))
+
+def parse_args(arglist, scheme):
+ u"""
+ Parse a list of arguments into a dict
+ """
+ arglist = [i for i in arglist if i.type != token.COMMA]
+
+ ret_mapping = dict([(k, None) for k in scheme])
+
+ for i, arg in enumerate(arglist):
+ if arg.type == syms.argument and arg.children[1].type == token.EQUAL:
+ # argument < NAME '=' any >
+ slot = arg.children[0].value
+ ret_mapping[slot] = arg.children[2]
+ else:
+ slot = scheme[i]
+ ret_mapping[slot] = arg
+
+ return ret_mapping
+
+
+# def is_import_from(node):
+# """Returns true if the node is a statement "from ... import ..."
+# """
+# return node.type == syms.import_from
+
+
+def is_import_stmt(node):
+ return (node.type == syms.simple_stmt and node.children and
+ is_import(node.children[0]))
+
+
+def touch_import_top(package, name_to_import, node):
+ """Works like `does_tree_import` but adds an import statement at the
+ top if it was not imported (but below any __future__ imports) and below any
+ comments such as shebang lines).
+
+ Based on lib2to3.fixer_util.touch_import()
+
+ Calling this multiple times adds the imports in reverse order.
+
+ Also adds "standard_library.install_aliases()" after "from future import
+ standard_library". This should probably be factored into another function.
+ """
+
+ root = find_root(node)
+
+ if does_tree_import(package, name_to_import, root):
+ return
+
+ # Ideally, we would look for whether futurize --all-imports has been run,
+ # as indicated by the presence of ``from builtins import (ascii, ...,
+ # zip)`` -- and, if it has, we wouldn't import the name again.
+
+ # Look for __future__ imports and insert below them
+ found = False
+ for name in ['absolute_import', 'division', 'print_function',
+ 'unicode_literals']:
+ if does_tree_import('__future__', name, root):
+ found = True
+ break
+ if found:
+ # At least one __future__ import. We want to loop until we've seen them
+ # all.
+ start, end = None, None
+ for idx, node in enumerate(root.children):
+ if check_future_import(node):
+ start = idx
+ # Start looping
+ idx2 = start
+ while node:
+ node = node.next_sibling
+ idx2 += 1
+ if not check_future_import(node):
+ end = idx2
+ break
+ break
+ assert start is not None
+ assert end is not None
+ insert_pos = end
+ else:
+ # No __future__ imports.
+ # We look for a docstring and insert the new node below that. If no docstring
+ # exists, just insert the node at the top.
+ for idx, node in enumerate(root.children):
+ if node.type != syms.simple_stmt:
+ break
+ if not is_docstring(node):
+ # This is the usual case.
+ break
+ insert_pos = idx
+
+ if package is None:
+ import_ = Node(syms.import_name, [
+ Leaf(token.NAME, u"import"),
+ Leaf(token.NAME, name_to_import, prefix=u" ")
+ ])
+ else:
+ import_ = FromImport(package, [Leaf(token.NAME, name_to_import, prefix=u" ")])
+ if name_to_import == u'standard_library':
+ # Add:
+ # standard_library.install_aliases()
+ # after:
+ # from future import standard_library
+ install_hooks = Node(syms.simple_stmt,
+ [Node(syms.power,
+ [Leaf(token.NAME, u'standard_library'),
+ Node(syms.trailer, [Leaf(token.DOT, u'.'),
+ Leaf(token.NAME, u'install_aliases')]),
+ Node(syms.trailer, [Leaf(token.LPAR, u'('),
+ Leaf(token.RPAR, u')')])
+ ])
+ ]
+ )
+ children_hooks = [install_hooks, Newline()]
+ else:
+ children_hooks = []
+
+ # FromImport(package, [Leaf(token.NAME, name_to_import, prefix=u" ")])
+
+ children_import = [import_, Newline()]
+ old_prefix = root.children[insert_pos].prefix
+ root.children[insert_pos].prefix = u''
+ root.insert_child(insert_pos, Node(syms.simple_stmt, children_import, prefix=old_prefix))
+ if len(children_hooks) > 0:
+ root.insert_child(insert_pos + 1, Node(syms.simple_stmt, children_hooks))
+
+
+## The following functions are from python-modernize by Armin Ronacher:
+# (a little edited).
+
+def check_future_import(node):
+ """If this is a future import, return set of symbols that are imported,
+ else return None."""
+ # node should be the import statement here
+ savenode = node
+ if not (node.type == syms.simple_stmt and node.children):
+ return set()
+ node = node.children[0]
+ # now node is the import_from node
+ if not (node.type == syms.import_from and
+ # node.type == token.NAME and # seems to break it
+ hasattr(node.children[1], 'value') and
+ node.children[1].value == u'__future__'):
+ return set()
+ if node.children[3].type == token.LPAR:
+ node = node.children[4]
+ else:
+ node = node.children[3]
+ # now node is the import_as_name[s]
+ # print(python_grammar.number2symbol[node.type]) # breaks sometimes
+ if node.type == syms.import_as_names:
+ result = set()
+ for n in node.children:
+ if n.type == token.NAME:
+ result.add(n.value)
+ elif n.type == syms.import_as_name:
+ n = n.children[0]
+ assert n.type == token.NAME
+ result.add(n.value)
+ return result
+ elif node.type == syms.import_as_name:
+ node = node.children[0]
+ assert node.type == token.NAME
+ return set([node.value])
+ elif node.type == token.NAME:
+ return set([node.value])
+ else:
+ # TODO: handle brackets like this:
+ # from __future__ import (absolute_import, division)
+ assert False, "strange import: %s" % savenode
+
+
+SHEBANG_REGEX = r'^#!.*python'
+ENCODING_REGEX = r"^#.*coding[:=]\s*([-\w.]+)"
+
+
+def is_shebang_comment(node):
+ """
+ Comments are prefixes for Leaf nodes. Returns whether the given node has a
+ prefix that looks like a shebang line or an encoding line:
+
+ #!/usr/bin/env python
+ #!/usr/bin/python3
+ """
+ return bool(re.match(SHEBANG_REGEX, node.prefix))
+
+
+def is_encoding_comment(node):
+ """
+ Comments are prefixes for Leaf nodes. Returns whether the given node has a
+ prefix that looks like an encoding line:
+
+ # coding: utf-8
+ # encoding: utf-8
+ # -*- coding: -*-
+ # vim: set fileencoding= :
+ """
+ return bool(re.match(ENCODING_REGEX, node.prefix))
+
+
+def wrap_in_fn_call(fn_name, args, prefix=None):
+ """
+ Example:
+ >>> wrap_in_fn_call("oldstr", (arg,))
+ oldstr(arg)
+
+ >>> wrap_in_fn_call("olddiv", (arg1, arg2))
+ olddiv(arg1, arg2)
+
+ >>> wrap_in_fn_call("olddiv", [arg1, comma, arg2, comma, arg3])
+ olddiv(arg1, arg2, arg3)
+ """
+ assert len(args) > 0
+ if len(args) == 2:
+ expr1, expr2 = args
+ newargs = [expr1, Comma(), expr2]
+ else:
+ newargs = args
+ return Call(Name(fn_name), newargs, prefix=prefix)
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/__init__.py b/src/clyphx/vendor/future/libfuturize/fixes/__init__.py
new file mode 100644
index 0000000..0b56250
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/__init__.py
@@ -0,0 +1,97 @@
+import sys
+from lib2to3 import refactor
+
+# The following fixers are "safe": they convert Python 2 code to more
+# modern Python 2 code. They should be uncontroversial to apply to most
+# projects that are happy to drop support for Py2.5 and below. Applying
+# them first will reduce the size of the patch set for the real porting.
+lib2to3_fix_names_stage1 = set([
+ 'lib2to3.fixes.fix_apply',
+ 'lib2to3.fixes.fix_except',
+ 'lib2to3.fixes.fix_exec',
+ 'lib2to3.fixes.fix_exitfunc',
+ 'lib2to3.fixes.fix_funcattrs',
+ 'lib2to3.fixes.fix_has_key',
+ 'lib2to3.fixes.fix_idioms',
+ # 'lib2to3.fixes.fix_import', # makes any implicit relative imports explicit. (Use with ``from __future__ import absolute_import)
+ 'lib2to3.fixes.fix_intern',
+ 'lib2to3.fixes.fix_isinstance',
+ 'lib2to3.fixes.fix_methodattrs',
+ 'lib2to3.fixes.fix_ne',
+ # 'lib2to3.fixes.fix_next', # would replace ``next`` method names
+ # with ``__next__``.
+ 'lib2to3.fixes.fix_numliterals', # turns 1L into 1, 0755 into 0o755
+ 'lib2to3.fixes.fix_paren',
+ # 'lib2to3.fixes.fix_print', # see the libfuturize fixer that also
+ # adds ``from __future__ import print_function``
+ # 'lib2to3.fixes.fix_raise', # uses incompatible with_traceback() method on exceptions
+ 'lib2to3.fixes.fix_reduce', # reduce is available in functools on Py2.6/Py2.7
+ 'lib2to3.fixes.fix_renames', # sys.maxint -> sys.maxsize
+ # 'lib2to3.fixes.fix_set_literal', # this is unnecessary and breaks Py2.6 support
+ 'lib2to3.fixes.fix_repr',
+ 'lib2to3.fixes.fix_standarderror',
+ 'lib2to3.fixes.fix_sys_exc',
+ 'lib2to3.fixes.fix_throw',
+ 'lib2to3.fixes.fix_tuple_params',
+ 'lib2to3.fixes.fix_types',
+ 'lib2to3.fixes.fix_ws_comma', # can perhaps decrease readability: see issue #58
+ 'lib2to3.fixes.fix_xreadlines',
+])
+
+# The following fixers add a dependency on the ``future`` package on order to
+# support Python 2:
+lib2to3_fix_names_stage2 = set([
+ # 'lib2to3.fixes.fix_buffer', # perhaps not safe. Test this.
+ # 'lib2to3.fixes.fix_callable', # not needed in Py3.2+
+ 'lib2to3.fixes.fix_dict', # TODO: add support for utils.viewitems() etc. and move to stage2
+ # 'lib2to3.fixes.fix_execfile', # some problems: see issue #37.
+ # We use a custom fixer instead (see below)
+ # 'lib2to3.fixes.fix_future', # we don't want to remove __future__ imports
+ 'lib2to3.fixes.fix_getcwdu',
+ # 'lib2to3.fixes.fix_imports', # called by libfuturize.fixes.fix_future_standard_library
+ # 'lib2to3.fixes.fix_imports2', # we don't handle this yet (dbm)
+ # 'lib2to3.fixes.fix_input', # Called conditionally by libfuturize.fixes.fix_input
+ 'lib2to3.fixes.fix_itertools',
+ 'lib2to3.fixes.fix_itertools_imports',
+ 'lib2to3.fixes.fix_filter',
+ 'lib2to3.fixes.fix_long',
+ 'lib2to3.fixes.fix_map',
+ # 'lib2to3.fixes.fix_metaclass', # causes SyntaxError in Py2! Use the one from ``six`` instead
+ 'lib2to3.fixes.fix_next',
+ 'lib2to3.fixes.fix_nonzero', # TODO: cause this to import ``object`` and/or add a decorator for mapping __bool__ to __nonzero__
+ 'lib2to3.fixes.fix_operator', # we will need support for this by e.g. extending the Py2 operator module to provide those functions in Py3
+ 'lib2to3.fixes.fix_raw_input',
+ # 'lib2to3.fixes.fix_unicode', # strips off the u'' prefix, which removes a potentially helpful source of information for disambiguating unicode/byte strings
+ # 'lib2to3.fixes.fix_urllib', # included in libfuturize.fix_future_standard_library_urllib
+ # 'lib2to3.fixes.fix_xrange', # custom one because of a bug with Py3.3's lib2to3
+ 'lib2to3.fixes.fix_zip',
+])
+
+libfuturize_fix_names_stage1 = set([
+ 'libfuturize.fixes.fix_absolute_import',
+ 'libfuturize.fixes.fix_next_call', # obj.next() -> next(obj). Unlike
+ # lib2to3.fixes.fix_next, doesn't change
+ # the ``next`` method to ``__next__``.
+ 'libfuturize.fixes.fix_print_with_import',
+ 'libfuturize.fixes.fix_raise',
+ # 'libfuturize.fixes.fix_order___future__imports', # TODO: consolidate to a single line to simplify testing
+])
+
+libfuturize_fix_names_stage2 = set([
+ 'libfuturize.fixes.fix_basestring',
+ # 'libfuturize.fixes.fix_add__future__imports_except_unicode_literals', # just in case
+ 'libfuturize.fixes.fix_cmp',
+ 'libfuturize.fixes.fix_division_safe',
+ 'libfuturize.fixes.fix_execfile',
+ 'libfuturize.fixes.fix_future_builtins',
+ 'libfuturize.fixes.fix_future_standard_library',
+ 'libfuturize.fixes.fix_future_standard_library_urllib',
+ 'libfuturize.fixes.fix_input',
+ 'libfuturize.fixes.fix_metaclass',
+ 'libpasteurize.fixes.fix_newstyle',
+ 'libfuturize.fixes.fix_object',
+ # 'libfuturize.fixes.fix_order___future__imports', # TODO: consolidate to a single line to simplify testing
+ 'libfuturize.fixes.fix_unicode_keep_u',
+ # 'libfuturize.fixes.fix_unicode_literals_import',
+ 'libfuturize.fixes.fix_xrange_with_import', # custom one because of a bug with Py3.3's lib2to3
+])
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_UserDict.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_UserDict.py
new file mode 100644
index 0000000..cb0cfac
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_UserDict.py
@@ -0,0 +1,102 @@
+"""Fix UserDict.
+
+Incomplete!
+
+TODO: base this on fix_urllib perhaps?
+"""
+
+
+# Local imports
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import Name, attr_chain
+from lib2to3.fixes.fix_imports import alternates, build_pattern, FixImports
+
+MAPPING = {'UserDict': 'collections',
+}
+
+# def alternates(members):
+# return "(" + "|".join(map(repr, members)) + ")"
+#
+#
+# def build_pattern(mapping=MAPPING):
+# mod_list = ' | '.join(["module_name='%s'" % key for key in mapping])
+# bare_names = alternates(mapping.keys())
+#
+# yield """name_import=import_name< 'import' ((%s) |
+# multiple_imports=dotted_as_names< any* (%s) any* >) >
+# """ % (mod_list, mod_list)
+# yield """import_from< 'from' (%s) 'import' ['(']
+# ( any | import_as_name< any 'as' any > |
+# import_as_names< any* >) [')'] >
+# """ % mod_list
+# yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
+# multiple_imports=dotted_as_names<
+# any* dotted_as_name< (%s) 'as' any > any* >) >
+# """ % (mod_list, mod_list)
+#
+# # Find usages of module members in code e.g. thread.foo(bar)
+# yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names
+
+
+# class FixUserDict(fixer_base.BaseFix):
+class FixUserdict(FixImports):
+
+ BM_compatible = True
+ keep_line_order = True
+ # This is overridden in fix_imports2.
+ mapping = MAPPING
+
+ # We want to run this fixer late, so fix_import doesn't try to make stdlib
+ # renames into relative imports.
+ run_order = 6
+
+ def build_pattern(self):
+ return "|".join(build_pattern(self.mapping))
+
+ def compile_pattern(self):
+ # We override this, so MAPPING can be pragmatically altered and the
+ # changes will be reflected in PATTERN.
+ self.PATTERN = self.build_pattern()
+ super(FixImports, self).compile_pattern()
+
+ # Don't match the node if it's within another match.
+ def match(self, node):
+ match = super(FixImports, self).match
+ results = match(node)
+ if results:
+ # Module usage could be in the trailer of an attribute lookup, so we
+ # might have nested matches when "bare_with_attr" is present.
+ if "bare_with_attr" not in results and \
+ any(match(obj) for obj in attr_chain(node, "parent")):
+ return False
+ return results
+ return False
+
+ def start_tree(self, tree, filename):
+ super(FixImports, self).start_tree(tree, filename)
+ self.replace = {}
+
+ def transform(self, node, results):
+ import_mod = results.get("module_name")
+ if import_mod:
+ mod_name = import_mod.value
+ new_name = unicode(self.mapping[mod_name])
+ import_mod.replace(Name(new_name, prefix=import_mod.prefix))
+ if "name_import" in results:
+ # If it's not a "from x import x, y" or "import x as y" import,
+ # marked its usage to be replaced.
+ self.replace[mod_name] = new_name
+ if "multiple_imports" in results:
+ # This is a nasty hack to fix multiple imports on a line (e.g.,
+ # "import StringIO, urlparse"). The problem is that I can't
+ # figure out an easy way to make a pattern recognize the keys of
+ # MAPPING randomly sprinkled in an import statement.
+ results = self.match(node)
+ if results:
+ self.transform(node, results)
+ else:
+ # Replace usage of the module.
+ bare_name = results["bare_with_attr"][0]
+ new_name = self.replace.get(bare_name.value)
+ if new_name:
+ bare_name.replace(Name(new_name, prefix=bare_name.prefix))
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_absolute_import.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_absolute_import.py
new file mode 100644
index 0000000..eab9c52
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_absolute_import.py
@@ -0,0 +1,91 @@
+"""
+Fixer for import statements, with a __future__ import line.
+
+Based on lib2to3/fixes/fix_import.py, but extended slightly so it also
+supports Cython modules.
+
+If spam is being imported from the local directory, this import:
+ from spam import eggs
+becomes:
+ from __future__ import absolute_import
+ from .spam import eggs
+
+and this import:
+ import spam
+becomes:
+ from __future__ import absolute_import
+ from . import spam
+"""
+
+from os.path import dirname, join, exists, sep
+from lib2to3.fixes.fix_import import FixImport
+from lib2to3.fixer_util import FromImport, syms
+from lib2to3.fixes.fix_import import traverse_imports
+
+from libfuturize.fixer_util import future_import
+
+
+class FixAbsoluteImport(FixImport):
+ run_order = 9
+
+ def transform(self, node, results):
+ """
+ Copied from FixImport.transform(), but with this line added in
+ any modules that had implicit relative imports changed:
+
+ from __future__ import absolute_import"
+ """
+ if self.skip:
+ return
+ imp = results['imp']
+
+ if node.type == syms.import_from:
+ # Some imps are top-level (eg: 'import ham')
+ # some are first level (eg: 'import ham.eggs')
+ # some are third level (eg: 'import ham.eggs as spam')
+ # Hence, the loop
+ while not hasattr(imp, 'value'):
+ imp = imp.children[0]
+ if self.probably_a_local_import(imp.value):
+ imp.value = u"." + imp.value
+ imp.changed()
+ future_import(u"absolute_import", node)
+ else:
+ have_local = False
+ have_absolute = False
+ for mod_name in traverse_imports(imp):
+ if self.probably_a_local_import(mod_name):
+ have_local = True
+ else:
+ have_absolute = True
+ if have_absolute:
+ if have_local:
+ # We won't handle both sibling and absolute imports in the
+ # same statement at the moment.
+ self.warning(node, "absolute and local imports together")
+ return
+
+ new = FromImport(u".", [imp])
+ new.prefix = node.prefix
+ future_import(u"absolute_import", node)
+ return new
+
+ def probably_a_local_import(self, imp_name):
+ """
+ Like the corresponding method in the base class, but this also
+ supports Cython modules.
+ """
+ if imp_name.startswith(u"."):
+ # Relative imports are certainly not local imports.
+ return False
+ imp_name = imp_name.split(u".", 1)[0]
+ base_path = dirname(self.filename)
+ base_path = join(base_path, imp_name)
+ # If there is no __init__.py next to the file its not in a package
+ # so can't be a relative import.
+ if not exists(join(dirname(base_path), "__init__.py")):
+ return False
+ for ext in [".py", sep, ".pyc", ".so", ".sl", ".pyd", ".pyx"]:
+ if exists(base_path + ext):
+ return True
+ return False
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py
new file mode 100644
index 0000000..37d7fee
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py
@@ -0,0 +1,26 @@
+"""
+Fixer for adding:
+
+ from __future__ import absolute_import
+ from __future__ import division
+ from __future__ import print_function
+
+This is "stage 1": hopefully uncontroversial changes.
+
+Stage 2 adds ``unicode_literals``.
+"""
+
+from lib2to3 import fixer_base
+from libfuturize.fixer_util import future_import
+
+class FixAddFutureImportsExceptUnicodeLiterals(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = "file_input"
+
+ run_order = 9
+
+ def transform(self, node, results):
+ # Reverse order:
+ future_import(u"absolute_import", node)
+ future_import(u"division", node)
+ future_import(u"print_function", node)
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_basestring.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_basestring.py
new file mode 100644
index 0000000..5676d08
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_basestring.py
@@ -0,0 +1,17 @@
+"""
+Fixer that adds ``from past.builtins import basestring`` if there is a
+reference to ``basestring``
+"""
+
+from lib2to3 import fixer_base
+
+from libfuturize.fixer_util import touch_import_top
+
+
+class FixBasestring(fixer_base.BaseFix):
+ BM_compatible = True
+
+ PATTERN = "'basestring'"
+
+ def transform(self, node, results):
+ touch_import_top(u'past.builtins', 'basestring', node)
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_bytes.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_bytes.py
new file mode 100644
index 0000000..4202122
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_bytes.py
@@ -0,0 +1,24 @@
+"""Optional fixer that changes all unprefixed string literals "..." to b"...".
+
+br'abcd' is a SyntaxError on Python 2 but valid on Python 3.
+ur'abcd' is a SyntaxError on Python 3 but valid on Python 2.
+
+"""
+from __future__ import unicode_literals
+
+import re
+from lib2to3.pgen2 import token
+from lib2to3 import fixer_base
+
+_literal_re = re.compile(r"[^bBuUrR]?[\'\"]")
+
+class FixBytes(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = "STRING"
+
+ def transform(self, node, results):
+ if node.type == token.STRING:
+ if _literal_re.match(node.value):
+ new = node.clone()
+ new.value = u'b' + new.value
+ return new
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_cmp.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_cmp.py
new file mode 100644
index 0000000..762eb4b
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_cmp.py
@@ -0,0 +1,33 @@
+# coding: utf-8
+"""
+Fixer for the cmp() function on Py2, which was removed in Py3.
+
+Adds this import line::
+
+ from past.builtins import cmp
+
+if cmp() is called in the code.
+"""
+
+from __future__ import unicode_literals
+from lib2to3 import fixer_base
+
+from libfuturize.fixer_util import touch_import_top
+
+
+expression = "name='cmp'"
+
+
+class FixCmp(fixer_base.BaseFix):
+ BM_compatible = True
+ run_order = 9
+
+ PATTERN = """
+ power<
+ ({0}) trailer< '(' args=[any] ')' >
+ rest=any* >
+ """.format(expression)
+
+ def transform(self, node, results):
+ name = results["name"]
+ touch_import_top(u'past.builtins', name.value, node)
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_division.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_division.py
new file mode 100644
index 0000000..6975a52
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_division.py
@@ -0,0 +1,12 @@
+"""
+UNFINISHED
+For the ``future`` package.
+
+Adds this import line:
+
+ from __future__ import division
+
+at the top so the code runs identically on Py3 and Py2.6/2.7
+"""
+
+from libpasteurize.fixes.fix_division import FixDivision
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_division_safe.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_division_safe.py
new file mode 100644
index 0000000..3d5909c
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_division_safe.py
@@ -0,0 +1,104 @@
+"""
+For the ``future`` package.
+
+Adds this import line:
+
+ from __future__ import division
+
+at the top and changes any old-style divisions to be calls to
+past.utils.old_div so the code runs as before on Py2.6/2.7 and has the same
+behaviour on Py3.
+
+If "from __future__ import division" is already in effect, this fixer does
+nothing.
+"""
+
+import re
+from lib2to3.fixer_util import Leaf, Node, Comma
+from lib2to3 import fixer_base
+from libfuturize.fixer_util import (token, future_import, touch_import_top,
+ wrap_in_fn_call)
+
+
+def match_division(node):
+ u"""
+ __future__.division redefines the meaning of a single slash for division,
+ so we match that and only that.
+ """
+ slash = token.SLASH
+ return node.type == slash and not node.next_sibling.type == slash and \
+ not node.prev_sibling.type == slash
+
+const_re = re.compile('^[0-9]*[.][0-9]*$')
+
+def is_floaty(node):
+ return _is_floaty(node.prev_sibling) or _is_floaty(node.next_sibling)
+
+
+def _is_floaty(expr):
+ if isinstance(expr, list):
+ expr = expr[0]
+
+ if isinstance(expr, Leaf):
+ # If it's a leaf, let's see if it's a numeric constant containing a '.'
+ return const_re.match(expr.value)
+ elif isinstance(expr, Node):
+ # If the expression is a node, let's see if it's a direct cast to float
+ if isinstance(expr.children[0], Leaf):
+ return expr.children[0].value == u'float'
+ return False
+
+
+class FixDivisionSafe(fixer_base.BaseFix):
+ # BM_compatible = True
+ run_order = 4 # this seems to be ignored?
+
+ _accept_type = token.SLASH
+
+ PATTERN = """
+ term<(not('/') any)+ '/' ((not('/') any))>
+ """
+
+ def start_tree(self, tree, name):
+ """
+ Skip this fixer if "__future__.division" is already imported.
+ """
+ super(FixDivisionSafe, self).start_tree(tree, name)
+ self.skip = "division" in tree.future_features
+
+ def match(self, node):
+ u"""
+ Since the tree needs to be fixed once and only once if and only if it
+ matches, we can start discarding matches after the first.
+ """
+ if node.type == self.syms.term:
+ matched = False
+ skip = False
+ children = []
+ for child in node.children:
+ if skip:
+ skip = False
+ continue
+ if match_division(child) and not is_floaty(child):
+ matched = True
+
+ # Strip any leading space for the first number:
+ children[0].prefix = u''
+
+ children = [wrap_in_fn_call("old_div",
+ children + [Comma(), child.next_sibling.clone()],
+ prefix=node.prefix)]
+ skip = True
+ else:
+ children.append(child.clone())
+ if matched:
+ return Node(node.type, children, fixers_applied=node.fixers_applied)
+
+ return False
+
+ def transform(self, node, results):
+ if self.skip:
+ return
+ future_import(u"division", node)
+ touch_import_top(u'past.utils', u'old_div', node)
+ return results
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_execfile.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_execfile.py
new file mode 100644
index 0000000..cfe9d8d
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_execfile.py
@@ -0,0 +1,37 @@
+# coding: utf-8
+"""
+Fixer for the execfile() function on Py2, which was removed in Py3.
+
+The Lib/lib2to3/fixes/fix_execfile.py module has some problems: see
+python-future issue #37. This fixer merely imports execfile() from
+past.builtins and leaves the code alone.
+
+Adds this import line::
+
+ from past.builtins import execfile
+
+for the function execfile() that was removed from Py3.
+"""
+
+from __future__ import unicode_literals
+from lib2to3 import fixer_base
+
+from libfuturize.fixer_util import touch_import_top
+
+
+expression = "name='execfile'"
+
+
+class FixExecfile(fixer_base.BaseFix):
+ BM_compatible = True
+ run_order = 9
+
+ PATTERN = """
+ power<
+ ({0}) trailer< '(' args=[any] ')' >
+ rest=any* >
+ """.format(expression)
+
+ def transform(self, node, results):
+ name = results["name"]
+ touch_import_top(u'past.builtins', name.value, node)
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_future_builtins.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_future_builtins.py
new file mode 100644
index 0000000..eea6c6a
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_future_builtins.py
@@ -0,0 +1,59 @@
+"""
+For the ``future`` package.
+
+Adds this import line::
+
+ from builtins import XYZ
+
+for each of the functions XYZ that is used in the module.
+
+Adds these imports after any other imports (in an initial block of them).
+"""
+
+from __future__ import unicode_literals
+
+from lib2to3 import fixer_base
+from lib2to3.pygram import python_symbols as syms
+from lib2to3.fixer_util import Name, Call, in_special_context
+
+from libfuturize.fixer_util import touch_import_top
+
+# All builtins are:
+# from future.builtins.iterators import (filter, map, zip)
+# from future.builtins.misc import (ascii, chr, hex, input, isinstance, oct, open, round, super)
+# from future.types import (bytes, dict, int, range, str)
+# We don't need isinstance any more.
+
+replaced_builtin_fns = '''filter map zip
+ ascii chr hex input next oct
+ bytes range str raw_input'''.split()
+ # This includes raw_input as a workaround for the
+ # lib2to3 fixer for raw_input on Py3 (only), allowing
+ # the correct import to be included. (Py3 seems to run
+ # the fixers the wrong way around, perhaps ignoring the
+ # run_order class attribute below ...)
+
+expression = '|'.join(["name='{0}'".format(name) for name in replaced_builtin_fns])
+
+
+class FixFutureBuiltins(fixer_base.BaseFix):
+ BM_compatible = True
+ run_order = 7
+
+ # Currently we only match uses as a function. This doesn't match e.g.:
+ # if isinstance(s, str):
+ # ...
+ PATTERN = """
+ power<
+ ({0}) trailer< '(' [arglist=any] ')' >
+ rest=any* >
+ |
+ power<
+ 'map' trailer< '(' [arglist=any] ')' >
+ >
+ """.format(expression)
+
+ def transform(self, node, results):
+ name = results["name"]
+ touch_import_top(u'builtins', name.value, node)
+ # name.replace(Name(u"input", prefix=name.prefix))
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_future_standard_library.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_future_standard_library.py
new file mode 100644
index 0000000..a1c3f3d
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_future_standard_library.py
@@ -0,0 +1,24 @@
+"""
+For the ``future`` package.
+
+Changes any imports needed to reflect the standard library reorganization. Also
+Also adds these import lines:
+
+ from future import standard_library
+ standard_library.install_aliases()
+
+after any __future__ imports but before any other imports.
+"""
+
+from lib2to3.fixes.fix_imports import FixImports
+from libfuturize.fixer_util import touch_import_top
+
+
+class FixFutureStandardLibrary(FixImports):
+ run_order = 8
+
+ def transform(self, node, results):
+ result = super(FixFutureStandardLibrary, self).transform(node, results)
+ # TODO: add a blank line between any __future__ imports and this?
+ touch_import_top(u'future', u'standard_library', node)
+ return result
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_future_standard_library_urllib.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_future_standard_library_urllib.py
new file mode 100644
index 0000000..cf67388
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_future_standard_library_urllib.py
@@ -0,0 +1,28 @@
+"""
+For the ``future`` package.
+
+A special fixer that ensures that these lines have been added::
+
+ from future import standard_library
+ standard_library.install_hooks()
+
+even if the only module imported was ``urllib``, in which case the regular fixer
+wouldn't have added these lines.
+
+"""
+
+from lib2to3.fixes.fix_urllib import FixUrllib
+from libfuturize.fixer_util import touch_import_top, find_root
+
+
+class FixFutureStandardLibraryUrllib(FixUrllib): # not a subclass of FixImports
+ run_order = 8
+
+ def transform(self, node, results):
+ # transform_member() in lib2to3/fixes/fix_urllib.py breaks node so find_root(node)
+ # no longer works after the super() call below. So we find the root first:
+ root = find_root(node)
+ result = super(FixFutureStandardLibraryUrllib, self).transform(node, results)
+ # TODO: add a blank line between any __future__ imports and this?
+ touch_import_top(u'future', u'standard_library', root)
+ return result
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_input.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_input.py
new file mode 100644
index 0000000..8a43882
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_input.py
@@ -0,0 +1,32 @@
+"""
+Fixer for input.
+
+Does a check for `from builtins import input` before running the lib2to3 fixer.
+The fixer will not run when the input is already present.
+
+
+this:
+ a = input()
+becomes:
+ from builtins import input
+ a = eval(input())
+
+and this:
+ from builtins import input
+ a = input()
+becomes (no change):
+ from builtins import input
+ a = input()
+"""
+
+import lib2to3.fixes.fix_input
+from lib2to3.fixer_util import does_tree_import
+
+
+class FixInput(lib2to3.fixes.fix_input.FixInput):
+ def transform(self, node, results):
+
+ if does_tree_import('builtins', 'input', node):
+ return
+
+ return super(FixInput, self).transform(node, results)
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_metaclass.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_metaclass.py
new file mode 100644
index 0000000..2ac41c9
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_metaclass.py
@@ -0,0 +1,262 @@
+# coding: utf-8
+"""Fixer for __metaclass__ = X -> (future.utils.with_metaclass(X)) methods.
+
+ The various forms of classef (inherits nothing, inherits once, inherints
+ many) don't parse the same in the CST so we look at ALL classes for
+ a __metaclass__ and if we find one normalize the inherits to all be
+ an arglist.
+
+ For one-liner classes ('class X: pass') there is no indent/dedent so
+ we normalize those into having a suite.
+
+ Moving the __metaclass__ into the classdef can also cause the class
+ body to be empty so there is some special casing for that as well.
+
+ This fixer also tries very hard to keep original indenting and spacing
+ in all those corner cases.
+"""
+# This is a derived work of Lib/lib2to3/fixes/fix_metaclass.py under the
+# copyright of the Python Software Foundation, licensed under the Python
+# Software Foundation License 2.
+#
+# Copyright notice:
+#
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013 Python Software Foundation. All rights reserved.
+#
+# Full license text: http://docs.python.org/3.4/license.html
+
+# Author: Jack Diederich, Daniel Neuhäuser
+
+# Local imports
+from lib2to3 import fixer_base
+from lib2to3.pygram import token
+from lib2to3.fixer_util import Name, syms, Node, Leaf, touch_import, Call, \
+ String, Comma, parenthesize
+
+
+def has_metaclass(parent):
+ """ we have to check the cls_node without changing it.
+ There are two possiblities:
+ 1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
+ 2) clsdef => simple_stmt => expr_stmt => Leaf('__meta')
+ """
+ for node in parent.children:
+ if node.type == syms.suite:
+ return has_metaclass(node)
+ elif node.type == syms.simple_stmt and node.children:
+ expr_node = node.children[0]
+ if expr_node.type == syms.expr_stmt and expr_node.children:
+ left_side = expr_node.children[0]
+ if isinstance(left_side, Leaf) and \
+ left_side.value == '__metaclass__':
+ return True
+ return False
+
+
+def fixup_parse_tree(cls_node):
+ """ one-line classes don't get a suite in the parse tree so we add
+ one to normalize the tree
+ """
+ for node in cls_node.children:
+ if node.type == syms.suite:
+ # already in the preferred format, do nothing
+ return
+
+ # !%@#! oneliners have no suite node, we have to fake one up
+ for i, node in enumerate(cls_node.children):
+ if node.type == token.COLON:
+ break
+ else:
+ raise ValueError("No class suite and no ':'!")
+
+ # move everything into a suite node
+ suite = Node(syms.suite, [])
+ while cls_node.children[i+1:]:
+ move_node = cls_node.children[i+1]
+ suite.append_child(move_node.clone())
+ move_node.remove()
+ cls_node.append_child(suite)
+ node = suite
+
+
+def fixup_simple_stmt(parent, i, stmt_node):
+ """ if there is a semi-colon all the parts count as part of the same
+ simple_stmt. We just want the __metaclass__ part so we move
+ everything efter the semi-colon into its own simple_stmt node
+ """
+ for semi_ind, node in enumerate(stmt_node.children):
+ if node.type == token.SEMI: # *sigh*
+ break
+ else:
+ return
+
+ node.remove() # kill the semicolon
+ new_expr = Node(syms.expr_stmt, [])
+ new_stmt = Node(syms.simple_stmt, [new_expr])
+ while stmt_node.children[semi_ind:]:
+ move_node = stmt_node.children[semi_ind]
+ new_expr.append_child(move_node.clone())
+ move_node.remove()
+ parent.insert_child(i, new_stmt)
+ new_leaf1 = new_stmt.children[0].children[0]
+ old_leaf1 = stmt_node.children[0].children[0]
+ new_leaf1.prefix = old_leaf1.prefix
+
+
+def remove_trailing_newline(node):
+ if node.children and node.children[-1].type == token.NEWLINE:
+ node.children[-1].remove()
+
+
+def find_metas(cls_node):
+ # find the suite node (Mmm, sweet nodes)
+ for node in cls_node.children:
+ if node.type == syms.suite:
+ break
+ else:
+ raise ValueError("No class suite!")
+
+ # look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
+ for i, simple_node in list(enumerate(node.children)):
+ if simple_node.type == syms.simple_stmt and simple_node.children:
+ expr_node = simple_node.children[0]
+ if expr_node.type == syms.expr_stmt and expr_node.children:
+ # Check if the expr_node is a simple assignment.
+ left_node = expr_node.children[0]
+ if isinstance(left_node, Leaf) and \
+ left_node.value == u'__metaclass__':
+ # We found a assignment to __metaclass__.
+ fixup_simple_stmt(node, i, simple_node)
+ remove_trailing_newline(simple_node)
+ yield (node, i, simple_node)
+
+
+def fixup_indent(suite):
+ """ If an INDENT is followed by a thing with a prefix then nuke the prefix
+ Otherwise we get in trouble when removing __metaclass__ at suite start
+ """
+ kids = suite.children[::-1]
+ # find the first indent
+ while kids:
+ node = kids.pop()
+ if node.type == token.INDENT:
+ break
+
+ # find the first Leaf
+ while kids:
+ node = kids.pop()
+ if isinstance(node, Leaf) and node.type != token.DEDENT:
+ if node.prefix:
+ node.prefix = u''
+ return
+ else:
+ kids.extend(node.children[::-1])
+
+
+class FixMetaclass(fixer_base.BaseFix):
+ BM_compatible = True
+
+ PATTERN = """
+ classdef
+ """
+
+ def transform(self, node, results):
+ if not has_metaclass(node):
+ return
+
+ fixup_parse_tree(node)
+
+ # find metaclasses, keep the last one
+ last_metaclass = None
+ for suite, i, stmt in find_metas(node):
+ last_metaclass = stmt
+ stmt.remove()
+
+ text_type = node.children[0].type # always Leaf(nnn, 'class')
+
+ # figure out what kind of classdef we have
+ if len(node.children) == 7:
+ # Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite])
+ # 0 1 2 3 4 5 6
+ if node.children[3].type == syms.arglist:
+ arglist = node.children[3]
+ # Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite])
+ else:
+ parent = node.children[3].clone()
+ arglist = Node(syms.arglist, [parent])
+ node.set_child(3, arglist)
+ elif len(node.children) == 6:
+ # Node(classdef, ['class', 'name', '(', ')', ':', suite])
+ # 0 1 2 3 4 5
+ arglist = Node(syms.arglist, [])
+ node.insert_child(3, arglist)
+ elif len(node.children) == 4:
+ # Node(classdef, ['class', 'name', ':', suite])
+ # 0 1 2 3
+ arglist = Node(syms.arglist, [])
+ node.insert_child(2, Leaf(token.RPAR, u')'))
+ node.insert_child(2, arglist)
+ node.insert_child(2, Leaf(token.LPAR, u'('))
+ else:
+ raise ValueError("Unexpected class definition")
+
+ # now stick the metaclass in the arglist
+ meta_txt = last_metaclass.children[0].children[0]
+ meta_txt.value = 'metaclass'
+ orig_meta_prefix = meta_txt.prefix
+
+ # Was: touch_import(None, u'future.utils', node)
+ touch_import(u'future.utils', u'with_metaclass', node)
+
+ metaclass = last_metaclass.children[0].children[2].clone()
+ metaclass.prefix = u''
+
+ arguments = [metaclass]
+
+ if arglist.children:
+ if len(arglist.children) == 1:
+ base = arglist.children[0].clone()
+ base.prefix = u' '
+ else:
+ # Unfortunately six.with_metaclass() only allows one base
+ # class, so we have to dynamically generate a base class if
+ # there is more than one.
+ bases = parenthesize(arglist.clone())
+ bases.prefix = u' '
+ base = Call(Name('type'), [
+ String("'NewBase'"),
+ Comma(),
+ bases,
+ Comma(),
+ Node(
+ syms.atom,
+ [Leaf(token.LBRACE, u'{'), Leaf(token.RBRACE, u'}')],
+ prefix=u' '
+ )
+ ], prefix=u' ')
+ arguments.extend([Comma(), base])
+
+ arglist.replace(Call(
+ Name(u'with_metaclass', prefix=arglist.prefix),
+ arguments
+ ))
+
+ fixup_indent(suite)
+
+ # check for empty suite
+ if not suite.children:
+ # one-liner that was just __metaclass_
+ suite.remove()
+ pass_leaf = Leaf(text_type, u'pass')
+ pass_leaf.prefix = orig_meta_prefix
+ node.append_child(pass_leaf)
+ node.append_child(Leaf(token.NEWLINE, u'\n'))
+
+ elif len(suite.children) > 1 and \
+ (suite.children[-2].type == token.INDENT and
+ suite.children[-1].type == token.DEDENT):
+ # there was only one line in the class body and it was __metaclass__
+ pass_leaf = Leaf(text_type, u'pass')
+ suite.insert_child(-1, pass_leaf)
+ suite.insert_child(-1, Leaf(token.NEWLINE, u'\n'))
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_next_call.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_next_call.py
new file mode 100644
index 0000000..282f185
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_next_call.py
@@ -0,0 +1,104 @@
+"""
+Based on fix_next.py by Collin Winter.
+
+Replaces it.next() -> next(it), per PEP 3114.
+
+Unlike fix_next.py, this fixer doesn't replace the name of a next method with __next__,
+which would break Python 2 compatibility without further help from fixers in
+stage 2.
+"""
+
+# Local imports
+from lib2to3.pgen2 import token
+from lib2to3.pygram import python_symbols as syms
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import Name, Call, find_binding
+
+bind_warning = "Calls to builtin next() possibly shadowed by global binding"
+
+
+class FixNextCall(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = """
+ power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > >
+ |
+ power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > >
+ |
+ global=global_stmt< 'global' any* 'next' any* >
+ """
+
+ order = "pre" # Pre-order tree traversal
+
+ def start_tree(self, tree, filename):
+ super(FixNextCall, self).start_tree(tree, filename)
+
+ n = find_binding('next', tree)
+ if n:
+ self.warning(n, bind_warning)
+ self.shadowed_next = True
+ else:
+ self.shadowed_next = False
+
+ def transform(self, node, results):
+ assert results
+
+ base = results.get("base")
+ attr = results.get("attr")
+ name = results.get("name")
+
+ if base:
+ if self.shadowed_next:
+ # Omit this:
+ # attr.replace(Name("__next__", prefix=attr.prefix))
+ pass
+ else:
+ base = [n.clone() for n in base]
+ base[0].prefix = ""
+ node.replace(Call(Name("next", prefix=node.prefix), base))
+ elif name:
+ # Omit this:
+ # n = Name("__next__", prefix=name.prefix)
+ # name.replace(n)
+ pass
+ elif attr:
+ # We don't do this transformation if we're assigning to "x.next".
+ # Unfortunately, it doesn't seem possible to do this in PATTERN,
+ # so it's being done here.
+ if is_assign_target(node):
+ head = results["head"]
+ if "".join([str(n) for n in head]).strip() == '__builtin__':
+ self.warning(node, bind_warning)
+ return
+ # Omit this:
+ # attr.replace(Name("__next__"))
+ elif "global" in results:
+ self.warning(node, bind_warning)
+ self.shadowed_next = True
+
+
+### The following functions help test if node is part of an assignment
+### target.
+
+def is_assign_target(node):
+ assign = find_assign(node)
+ if assign is None:
+ return False
+
+ for child in assign.children:
+ if child.type == token.EQUAL:
+ return False
+ elif is_subtree(child, node):
+ return True
+ return False
+
+def find_assign(node):
+ if node.type == syms.expr_stmt:
+ return node
+ if node.type == syms.simple_stmt or node.parent is None:
+ return None
+ return find_assign(node.parent)
+
+def is_subtree(root, node):
+ if root == node:
+ return True
+ return any(is_subtree(c, node) for c in root.children)
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_object.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_object.py
new file mode 100644
index 0000000..accf2c5
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_object.py
@@ -0,0 +1,17 @@
+"""
+Fixer that adds ``from builtins import object`` if there is a line
+like this:
+ class Foo(object):
+"""
+
+from lib2to3 import fixer_base
+
+from libfuturize.fixer_util import touch_import_top
+
+
+class FixObject(fixer_base.BaseFix):
+
+ PATTERN = u"classdef< 'class' NAME '(' name='object' ')' colon=':' any >"
+
+ def transform(self, node, results):
+ touch_import_top(u'builtins', 'object', node)
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_oldstr_wrap.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_oldstr_wrap.py
new file mode 100644
index 0000000..ad58771
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_oldstr_wrap.py
@@ -0,0 +1,39 @@
+"""
+For the ``future`` package.
+
+Adds this import line:
+
+ from past.builtins import str as oldstr
+
+at the top and wraps any unadorned string literals 'abc' or explicit byte-string
+literals b'abc' in oldstr() calls so the code has the same behaviour on Py3 as
+on Py2.6/2.7.
+"""
+
+from __future__ import unicode_literals
+import re
+from lib2to3 import fixer_base
+from lib2to3.pgen2 import token
+from lib2to3.fixer_util import syms
+from libfuturize.fixer_util import (future_import, touch_import_top,
+ wrap_in_fn_call)
+
+
+_literal_re = re.compile(r"[^uUrR]?[\'\"]")
+
+
+class FixOldstrWrap(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = "STRING"
+
+ def transform(self, node, results):
+ if node.type == token.STRING:
+ touch_import_top(u'past.types', u'oldstr', node)
+ if _literal_re.match(node.value):
+ new = node.clone()
+ # Strip any leading space or comments:
+ # TODO: check: do we really want to do this?
+ new.prefix = u''
+ new.value = u'b' + new.value
+ wrapped = wrap_in_fn_call("oldstr", [new], prefix=node.prefix)
+ return wrapped
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_order___future__imports.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_order___future__imports.py
new file mode 100644
index 0000000..00d7ef6
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_order___future__imports.py
@@ -0,0 +1,36 @@
+"""
+UNFINISHED
+
+Fixer for turning multiple lines like these:
+
+ from __future__ import division
+ from __future__ import absolute_import
+ from __future__ import print_function
+
+into a single line like this:
+
+ from __future__ import (absolute_import, division, print_function)
+
+This helps with testing of ``futurize``.
+"""
+
+from lib2to3 import fixer_base
+from libfuturize.fixer_util import future_import
+
+class FixOrderFutureImports(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = "file_input"
+
+ run_order = 10
+
+ # def match(self, node):
+ # """
+ # Match only once per file
+ # """
+ # if hasattr(node, 'type') and node.type == syms.file_input:
+ # return True
+ # return False
+
+ def transform(self, node, results):
+ # TODO # write me
+ pass
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_print.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_print.py
new file mode 100644
index 0000000..247b91b
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_print.py
@@ -0,0 +1,94 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for print.
+
+Change:
+ "print" into "print()"
+ "print ..." into "print(...)"
+ "print(...)" not changed
+ "print ... ," into "print(..., end=' ')"
+ "print >>x, ..." into "print(..., file=x)"
+
+No changes are applied if print_function is imported from __future__
+
+"""
+
+# Local imports
+from lib2to3 import patcomp, pytree, fixer_base
+from lib2to3.pgen2 import token
+from lib2to3.fixer_util import Name, Call, Comma, String
+# from libmodernize import add_future
+
+parend_expr = patcomp.compile_pattern(
+ """atom< '(' [arith_expr|atom|power|term|STRING|NAME] ')' >"""
+ )
+
+
+class FixPrint(fixer_base.BaseFix):
+
+ BM_compatible = True
+
+ PATTERN = """
+ simple_stmt< any* bare='print' any* > | print_stmt
+ """
+
+ def transform(self, node, results):
+ assert results
+
+ bare_print = results.get("bare")
+
+ if bare_print:
+ # Special-case print all by itself.
+ bare_print.replace(Call(Name(u"print"), [],
+ prefix=bare_print.prefix))
+ # The "from __future__ import print_function"" declaration is added
+ # by the fix_print_with_import fixer, so we skip it here.
+ # add_future(node, u'print_function')
+ return
+ assert node.children[0] == Name(u"print")
+ args = node.children[1:]
+ if len(args) == 1 and parend_expr.match(args[0]):
+ # We don't want to keep sticking parens around an
+ # already-parenthesised expression.
+ return
+
+ sep = end = file = None
+ if args and args[-1] == Comma():
+ args = args[:-1]
+ end = " "
+ if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, u">>"):
+ assert len(args) >= 2
+ file = args[1].clone()
+ args = args[3:] # Strip a possible comma after the file expression
+ # Now synthesize a print(args, sep=..., end=..., file=...) node.
+ l_args = [arg.clone() for arg in args]
+ if l_args:
+ l_args[0].prefix = u""
+ if sep is not None or end is not None or file is not None:
+ if sep is not None:
+ self.add_kwarg(l_args, u"sep", String(repr(sep)))
+ if end is not None:
+ self.add_kwarg(l_args, u"end", String(repr(end)))
+ if file is not None:
+ self.add_kwarg(l_args, u"file", file)
+ n_stmt = Call(Name(u"print"), l_args)
+ n_stmt.prefix = node.prefix
+
+ # Note that there are corner cases where adding this future-import is
+ # incorrect, for example when the file also has a 'print ()' statement
+ # that was intended to print "()".
+ # add_future(node, u'print_function')
+ return n_stmt
+
+ def add_kwarg(self, l_nodes, s_kwd, n_expr):
+ # XXX All this prefix-setting may lose comments (though rarely)
+ n_expr.prefix = u""
+ n_argument = pytree.Node(self.syms.argument,
+ (Name(s_kwd),
+ pytree.Leaf(token.EQUAL, u"="),
+ n_expr))
+ if l_nodes:
+ l_nodes.append(Comma())
+ n_argument.prefix = u" "
+ l_nodes.append(n_argument)
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_print_with_import.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_print_with_import.py
new file mode 100644
index 0000000..3449046
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_print_with_import.py
@@ -0,0 +1,22 @@
+"""
+For the ``future`` package.
+
+Turns any print statements into functions and adds this import line:
+
+ from __future__ import print_function
+
+at the top to retain compatibility with Python 2.6+.
+"""
+
+from libfuturize.fixes.fix_print import FixPrint
+from libfuturize.fixer_util import future_import
+
+class FixPrintWithImport(FixPrint):
+ run_order = 7
+ def transform(self, node, results):
+ # Add the __future__ import first. (Otherwise any shebang or encoding
+ # comment line attached as a prefix to the print statement will be
+ # copied twice and appear twice.)
+ future_import(u'print_function', node)
+ n_stmt = super(FixPrintWithImport, self).transform(node, results)
+ return n_stmt
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_raise.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_raise.py
new file mode 100644
index 0000000..f751841
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_raise.py
@@ -0,0 +1,107 @@
+"""Fixer for 'raise E, V'
+
+From Armin Ronacher's ``python-modernize``.
+
+raise -> raise
+raise E -> raise E
+raise E, 5 -> raise E(5)
+raise E, 5, T -> raise E(5).with_traceback(T)
+raise E, None, T -> raise E.with_traceback(T)
+
+raise (((E, E'), E''), E'''), 5 -> raise E(5)
+raise "foo", V, T -> warns about string exceptions
+
+raise E, (V1, V2) -> raise E(V1, V2)
+raise E, (V1, V2), T -> raise E(V1, V2).with_traceback(T)
+
+
+CAVEATS:
+1) "raise E, V, T" cannot be translated safely in general. If V
+ is not a tuple or a (number, string, None) literal, then:
+
+ raise E, V, T -> from future.utils import raise_
+ raise_(E, V, T)
+"""
+# Author: Collin Winter, Armin Ronacher, Mark Huang
+
+# Local imports
+from lib2to3 import pytree, fixer_base
+from lib2to3.pgen2 import token
+from lib2to3.fixer_util import Name, Call, is_tuple, Comma, Attr, ArgList
+
+from libfuturize.fixer_util import touch_import_top
+
+
+class FixRaise(fixer_base.BaseFix):
+
+ BM_compatible = True
+ PATTERN = """
+ raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
+ """
+
+ def transform(self, node, results):
+ syms = self.syms
+
+ exc = results["exc"].clone()
+ if exc.type == token.STRING:
+ msg = "Python 3 does not support string exceptions"
+ self.cannot_convert(node, msg)
+ return
+
+ # Python 2 supports
+ # raise ((((E1, E2), E3), E4), E5), V
+ # as a synonym for
+ # raise E1, V
+ # Since Python 3 will not support this, we recurse down any tuple
+ # literals, always taking the first element.
+ if is_tuple(exc):
+ while is_tuple(exc):
+ # exc.children[1:-1] is the unparenthesized tuple
+ # exc.children[1].children[0] is the first element of the tuple
+ exc = exc.children[1].children[0].clone()
+ exc.prefix = u" "
+
+ if "tb" in results:
+ tb = results["tb"].clone()
+ else:
+ tb = None
+
+ if "val" in results:
+ val = results["val"].clone()
+ if is_tuple(val):
+ # Assume that exc is a subclass of Exception and call exc(*val).
+ args = [c.clone() for c in val.children[1:-1]]
+ exc = Call(exc, args)
+ elif val.type in (token.NUMBER, token.STRING):
+ # Handle numeric and string literals specially, e.g.
+ # "raise Exception, 5" -> "raise Exception(5)".
+ val.prefix = u""
+ exc = Call(exc, [val])
+ elif val.type == token.NAME and val.value == u"None":
+ # Handle None specially, e.g.
+ # "raise Exception, None" -> "raise Exception".
+ pass
+ else:
+ # val is some other expression. If val evaluates to an instance
+ # of exc, it should just be raised. If val evaluates to None,
+ # a default instance of exc should be raised (as above). If val
+ # evaluates to a tuple, exc(*val) should be called (as
+ # above). Otherwise, exc(val) should be called. We can only
+ # tell what to do at runtime, so defer to future.utils.raise_(),
+ # which handles all of these cases.
+ touch_import_top(u"future.utils", u"raise_", node)
+ exc.prefix = u""
+ args = [exc, Comma(), val]
+ if tb is not None:
+ args += [Comma(), tb]
+ return Call(Name(u"raise_"), args)
+
+ if tb is not None:
+ tb.prefix = ""
+ exc_list = Attr(exc, Name('with_traceback')) + [ArgList([tb])]
+ else:
+ exc_list = [exc]
+
+ return pytree.Node(syms.raise_stmt,
+ [Name(u"raise")] + exc_list,
+ prefix=node.prefix)
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_remove_old__future__imports.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_remove_old__future__imports.py
new file mode 100644
index 0000000..9336f75
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_remove_old__future__imports.py
@@ -0,0 +1,26 @@
+"""
+Fixer for removing any of these lines:
+
+ from __future__ import with_statement
+ from __future__ import nested_scopes
+ from __future__ import generators
+
+The reason is that __future__ imports like these are required to be the first
+line of code (after docstrings) on Python 2.6+, which can get in the way.
+
+These imports are always enabled in Python 2.6+, which is the minimum sane
+version to target for Py2/3 compatibility.
+"""
+
+from lib2to3 import fixer_base
+from libfuturize.fixer_util import remove_future_import
+
+class FixRemoveOldFutureImports(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = "file_input"
+ run_order = 1
+
+ def transform(self, node, results):
+ remove_future_import(u"with_statement", node)
+ remove_future_import(u"nested_scopes", node)
+ remove_future_import(u"generators", node)
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_unicode_keep_u.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_unicode_keep_u.py
new file mode 100644
index 0000000..2e9a4e4
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_unicode_keep_u.py
@@ -0,0 +1,24 @@
+"""Fixer that changes unicode to str and unichr to chr, but -- unlike the
+lib2to3 fix_unicode.py fixer, does not change u"..." into "...".
+
+The reason is that Py3.3+ supports the u"..." string prefix, and, if
+present, the prefix may provide useful information for disambiguating
+between byte strings and unicode strings, which is often the hardest part
+of the porting task.
+
+"""
+
+from lib2to3.pgen2 import token
+from lib2to3 import fixer_base
+
+_mapping = {u"unichr" : u"chr", u"unicode" : u"str"}
+
+class FixUnicodeKeepU(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = "'unicode' | 'unichr'"
+
+ def transform(self, node, results):
+ if node.type == token.NAME:
+ new = node.clone()
+ new.value = _mapping[node.value]
+ return new
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_unicode_literals_import.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_unicode_literals_import.py
new file mode 100644
index 0000000..51c5062
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_unicode_literals_import.py
@@ -0,0 +1,18 @@
+"""
+Adds this import:
+
+ from __future__ import unicode_literals
+
+"""
+
+from lib2to3 import fixer_base
+from libfuturize.fixer_util import future_import
+
+class FixUnicodeLiteralsImport(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = "file_input"
+
+ run_order = 9
+
+ def transform(self, node, results):
+ future_import(u"unicode_literals", node)
diff --git a/src/clyphx/vendor/future/libfuturize/fixes/fix_xrange_with_import.py b/src/clyphx/vendor/future/libfuturize/fixes/fix_xrange_with_import.py
new file mode 100644
index 0000000..c910f81
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/fixes/fix_xrange_with_import.py
@@ -0,0 +1,20 @@
+"""
+For the ``future`` package.
+
+Turns any xrange calls into range calls and adds this import line:
+
+ from builtins import range
+
+at the top.
+"""
+
+from lib2to3.fixes.fix_xrange import FixXrange
+
+from libfuturize.fixer_util import touch_import_top
+
+
+class FixXrangeWithImport(FixXrange):
+ def transform(self, node, results):
+ result = super(FixXrangeWithImport, self).transform(node, results)
+ touch_import_top('builtins', 'range', node)
+ return result
diff --git a/src/clyphx/vendor/future/libfuturize/main.py b/src/clyphx/vendor/future/libfuturize/main.py
new file mode 100644
index 0000000..634c2f2
--- /dev/null
+++ b/src/clyphx/vendor/future/libfuturize/main.py
@@ -0,0 +1,322 @@
+"""
+futurize: automatic conversion to clean 2/3 code using ``python-future``
+======================================================================
+
+Like Armin Ronacher's modernize.py, ``futurize`` attempts to produce clean
+standard Python 3 code that runs on both Py2 and Py3.
+
+One pass
+--------
+
+Use it like this on Python 2 code:
+
+ $ futurize --verbose mypython2script.py
+
+This will attempt to port the code to standard Py3 code that also
+provides Py2 compatibility with the help of the right imports from
+``future``.
+
+To write changes to the files, use the -w flag.
+
+Two stages
+----------
+
+The ``futurize`` script can also be called in two separate stages. First:
+
+ $ futurize --stage1 mypython2script.py
+
+This produces more modern Python 2 code that is not yet compatible with Python
+3. The tests should still run and the diff should be uncontroversial to apply to
+most Python projects that are willing to drop support for Python 2.5 and lower.
+
+After this, the recommended approach is to explicitly mark all strings that must
+be byte-strings with a b'' prefix and all text (unicode) strings with a u''
+prefix, and then invoke the second stage of Python 2 to 2/3 conversion with::
+
+ $ futurize --stage2 mypython2script.py
+
+Stage 2 adds a dependency on ``future``. It converts most remaining Python
+2-specific code to Python 3 code and adds appropriate imports from ``future``
+to restore Py2 support.
+
+The command above leaves all unadorned string literals as native strings
+(byte-strings on Py2, unicode strings on Py3). If instead you would like all
+unadorned string literals to be promoted to unicode, you can also pass this
+flag:
+
+ $ futurize --stage2 --unicode-literals mypython2script.py
+
+This adds the declaration ``from __future__ import unicode_literals`` to the
+top of each file, which implicitly declares all unadorned string literals to be
+unicode strings (``unicode`` on Py2).
+
+All imports
+-----------
+
+The --all-imports option forces adding all ``__future__`` imports,
+``builtins`` imports, and standard library aliases, even if they don't
+seem necessary for the current state of each module. (This can simplify
+testing, and can reduce the need to think about Py2 compatibility when editing
+the code further.)
+
+"""
+
+from __future__ import (absolute_import, print_function, unicode_literals)
+import future.utils
+from future import __version__
+
+import sys
+import logging
+import optparse
+import os
+
+from lib2to3.main import warn, StdoutRefactoringTool
+from lib2to3 import refactor
+
+from libfuturize.fixes import (lib2to3_fix_names_stage1,
+ lib2to3_fix_names_stage2,
+ libfuturize_fix_names_stage1,
+ libfuturize_fix_names_stage2)
+
+fixer_pkg = 'libfuturize.fixes'
+
+
+def main(args=None):
+ """Main program.
+
+ Args:
+ fixer_pkg: the name of a package where the fixers are located.
+ args: optional; a list of command line arguments. If omitted,
+ sys.argv[1:] is used.
+
+ Returns a suggested exit status (0, 1, 2).
+ """
+
+ # Set up option parser
+ parser = optparse.OptionParser(usage="futurize [options] file|dir ...")
+ parser.add_option("-V", "--version", action="store_true",
+ help="Report the version number of futurize")
+ parser.add_option("-a", "--all-imports", action="store_true",
+ help="Add all __future__ and future imports to each module")
+ parser.add_option("-1", "--stage1", action="store_true",
+ help="Modernize Python 2 code only; no compatibility with Python 3 (or dependency on ``future``)")
+ parser.add_option("-2", "--stage2", action="store_true",
+ help="Take modernized (stage1) code and add a dependency on ``future`` to provide Py3 compatibility.")
+ parser.add_option("-0", "--both-stages", action="store_true",
+ help="Apply both stages 1 and 2")
+ parser.add_option("-u", "--unicode-literals", action="store_true",
+ help="Add ``from __future__ import unicode_literals`` to implicitly convert all unadorned string literals '' into unicode strings")
+ parser.add_option("-f", "--fix", action="append", default=[],
+ help="Each FIX specifies a transformation; default: all.\nEither use '-f division -f metaclass' etc. or use the fully-qualified module name: '-f lib2to3.fixes.fix_types -f libfuturize.fixes.fix_unicode_keep_u'")
+ parser.add_option("-j", "--processes", action="store", default=1,
+ type="int", help="Run 2to3 concurrently")
+ parser.add_option("-x", "--nofix", action="append", default=[],
+ help="Prevent a fixer from being run.")
+ parser.add_option("-l", "--list-fixes", action="store_true",
+ help="List available transformations")
+ parser.add_option("-p", "--print-function", action="store_true",
+ help="Modify the grammar so that print() is a function")
+ parser.add_option("-v", "--verbose", action="store_true",
+ help="More verbose logging")
+ parser.add_option("--no-diffs", action="store_true",
+ help="Don't show diffs of the refactoring")
+ parser.add_option("-w", "--write", action="store_true",
+ help="Write back modified files")
+ parser.add_option("-n", "--nobackups", action="store_true", default=False,
+ help="Don't write backups for modified files.")
+ parser.add_option("-o", "--output-dir", action="store", type="str",
+ default="", help="Put output files in this directory "
+ "instead of overwriting the input files. Requires -n. "
+ "For Python >= 2.7 only.")
+ parser.add_option("-W", "--write-unchanged-files", action="store_true",
+ help="Also write files even if no changes were required"
+ " (useful with --output-dir); implies -w.")
+ parser.add_option("--add-suffix", action="store", type="str", default="",
+ help="Append this string to all output filenames."
+ " Requires -n if non-empty. For Python >= 2.7 only."
+ "ex: --add-suffix='3' will generate .py3 files.")
+
+ # Parse command line arguments
+ flags = {}
+ refactor_stdin = False
+ options, args = parser.parse_args(args)
+
+ if options.write_unchanged_files:
+ flags["write_unchanged_files"] = True
+ if not options.write:
+ warn("--write-unchanged-files/-W implies -w.")
+ options.write = True
+ # If we allowed these, the original files would be renamed to backup names
+ # but not replaced.
+ if options.output_dir and not options.nobackups:
+ parser.error("Can't use --output-dir/-o without -n.")
+ if options.add_suffix and not options.nobackups:
+ parser.error("Can't use --add-suffix without -n.")
+
+ if not options.write and options.no_diffs:
+ warn("not writing files and not printing diffs; that's not very useful")
+ if not options.write and options.nobackups:
+ parser.error("Can't use -n without -w")
+ if "-" in args:
+ refactor_stdin = True
+ if options.write:
+ print("Can't write to stdin.", file=sys.stderr)
+ return 2
+ # Is this ever necessary?
+ if options.print_function:
+ flags["print_function"] = True
+
+ # Set up logging handler
+ level = logging.DEBUG if options.verbose else logging.INFO
+ logging.basicConfig(format='%(name)s: %(message)s', level=level)
+ logger = logging.getLogger('libfuturize.main')
+
+ if options.stage1 or options.stage2:
+ assert options.both_stages is None
+ options.both_stages = False
+ else:
+ options.both_stages = True
+
+ avail_fixes = set()
+
+ if options.stage1 or options.both_stages:
+ avail_fixes.update(lib2to3_fix_names_stage1)
+ avail_fixes.update(libfuturize_fix_names_stage1)
+ if options.stage2 or options.both_stages:
+ avail_fixes.update(lib2to3_fix_names_stage2)
+ avail_fixes.update(libfuturize_fix_names_stage2)
+
+ if options.unicode_literals:
+ avail_fixes.add('libfuturize.fixes.fix_unicode_literals_import')
+
+ if options.version:
+ print(__version__)
+ return 0
+ if options.list_fixes:
+ print("Available transformations for the -f/--fix option:")
+ # for fixname in sorted(refactor.get_all_fix_names(fixer_pkg)):
+ for fixname in sorted(avail_fixes):
+ print(fixname)
+ if not args:
+ return 0
+ if not args:
+ print("At least one file or directory argument required.",
+ file=sys.stderr)
+ print("Use --help to show usage.", file=sys.stderr)
+ return 2
+
+ unwanted_fixes = set()
+ for fix in options.nofix:
+ if ".fix_" in fix:
+ unwanted_fixes.add(fix)
+ else:
+ # Infer the full module name for the fixer.
+ # First ensure that no names clash (e.g.
+ # lib2to3.fixes.fix_blah and libfuturize.fixes.fix_blah):
+ found = [f for f in avail_fixes
+ if f.endswith('fix_{0}'.format(fix))]
+ if len(found) > 1:
+ print("Ambiguous fixer name. Choose a fully qualified "
+ "module name instead from these:\n" +
+ "\n".join(" " + myf for myf in found),
+ file=sys.stderr)
+ return 2
+ elif len(found) == 0:
+ print("Unknown fixer. Use --list-fixes or -l for a list.",
+ file=sys.stderr)
+ return 2
+ unwanted_fixes.add(found[0])
+
+ extra_fixes = set()
+ if options.all_imports:
+ if options.stage1:
+ prefix = 'libfuturize.fixes.'
+ extra_fixes.add(prefix +
+ 'fix_add__future__imports_except_unicode_literals')
+ else:
+ # In case the user hasn't run stage1 for some reason:
+ prefix = 'libpasteurize.fixes.'
+ extra_fixes.add(prefix + 'fix_add_all__future__imports')
+ extra_fixes.add(prefix + 'fix_add_future_standard_library_import')
+ extra_fixes.add(prefix + 'fix_add_all_future_builtins')
+ explicit = set()
+ if options.fix:
+ all_present = False
+ for fix in options.fix:
+ if fix == 'all':
+ all_present = True
+ else:
+ if ".fix_" in fix:
+ explicit.add(fix)
+ else:
+ # Infer the full module name for the fixer.
+ # First ensure that no names clash (e.g.
+ # lib2to3.fixes.fix_blah and libfuturize.fixes.fix_blah):
+ found = [f for f in avail_fixes
+ if f.endswith('fix_{0}'.format(fix))]
+ if len(found) > 1:
+ print("Ambiguous fixer name. Choose a fully qualified "
+ "module name instead from these:\n" +
+ "\n".join(" " + myf for myf in found),
+ file=sys.stderr)
+ return 2
+ elif len(found) == 0:
+ print("Unknown fixer. Use --list-fixes or -l for a list.",
+ file=sys.stderr)
+ return 2
+ explicit.add(found[0])
+ if len(explicit & unwanted_fixes) > 0:
+ print("Conflicting usage: the following fixers have been "
+ "simultaneously requested and disallowed:\n" +
+ "\n".join(" " + myf for myf in (explicit & unwanted_fixes)),
+ file=sys.stderr)
+ return 2
+ requested = avail_fixes.union(explicit) if all_present else explicit
+ else:
+ requested = avail_fixes.union(explicit)
+ fixer_names = (requested | extra_fixes) - unwanted_fixes
+
+ input_base_dir = os.path.commonprefix(args)
+ if (input_base_dir and not input_base_dir.endswith(os.sep)
+ and not os.path.isdir(input_base_dir)):
+ # One or more similar names were passed, their directory is the base.
+ # os.path.commonprefix() is ignorant of path elements, this corrects
+ # for that weird API.
+ input_base_dir = os.path.dirname(input_base_dir)
+ if options.output_dir:
+ input_base_dir = input_base_dir.rstrip(os.sep)
+ logger.info('Output in %r will mirror the input directory %r layout.',
+ options.output_dir, input_base_dir)
+
+ # Initialize the refactoring tool
+ if future.utils.PY26:
+ extra_kwargs = {}
+ else:
+ extra_kwargs = {
+ 'append_suffix': options.add_suffix,
+ 'output_dir': options.output_dir,
+ 'input_base_dir': input_base_dir,
+ }
+
+ rt = StdoutRefactoringTool(
+ sorted(fixer_names), flags, sorted(explicit),
+ options.nobackups, not options.no_diffs,
+ **extra_kwargs)
+
+ # Refactor all files and directories passed as arguments
+ if not rt.errors:
+ if refactor_stdin:
+ rt.refactor_stdin()
+ else:
+ try:
+ rt.refactor(args, options.write, None,
+ options.processes)
+ except refactor.MultiprocessingUnsupported:
+ assert options.processes > 1
+ print("Sorry, -j isn't " \
+ "supported on this platform.", file=sys.stderr)
+ return 1
+ rt.summarize()
+
+ # Return error status (0 if rt.errors is zero)
+ return int(bool(rt.errors))
diff --git a/src/clyphx/vendor/future/libpasteurize/__init__.py b/src/clyphx/vendor/future/libpasteurize/__init__.py
new file mode 100644
index 0000000..4cb1cbc
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/__init__.py
@@ -0,0 +1 @@
+# empty to make this a package
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/__init__.py b/src/clyphx/vendor/future/libpasteurize/fixes/__init__.py
new file mode 100644
index 0000000..905aec4
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/__init__.py
@@ -0,0 +1,54 @@
+import sys
+from lib2to3 import refactor
+
+# The original set of these fixes comes from lib3to2 (https://bitbucket.org/amentajo/lib3to2):
+fix_names = set([
+ 'libpasteurize.fixes.fix_add_all__future__imports', # from __future__ import absolute_import etc. on separate lines
+ 'libpasteurize.fixes.fix_add_future_standard_library_import', # we force adding this import for now, even if it doesn't seem necessary to the fix_future_standard_library fixer, for ease of testing
+ # 'libfuturize.fixes.fix_order___future__imports', # consolidates to a single line to simplify testing -- UNFINISHED
+ 'libpasteurize.fixes.fix_future_builtins', # adds "from future.builtins import *"
+ 'libfuturize.fixes.fix_future_standard_library', # adds "from future import standard_library"
+
+ 'libpasteurize.fixes.fix_annotations',
+ # 'libpasteurize.fixes.fix_bitlength', # ints have this in Py2.7
+ # 'libpasteurize.fixes.fix_bool', # need a decorator or Mixin
+ # 'libpasteurize.fixes.fix_bytes', # leave bytes as bytes
+ # 'libpasteurize.fixes.fix_classdecorator', # available in
+ # Py2.6+
+ # 'libpasteurize.fixes.fix_collections', hmmm ...
+ # 'libpasteurize.fixes.fix_dctsetcomp', # avail in Py27
+ 'libpasteurize.fixes.fix_division', # yes
+ # 'libpasteurize.fixes.fix_except', # avail in Py2.6+
+ # 'libpasteurize.fixes.fix_features', # ?
+ 'libpasteurize.fixes.fix_fullargspec',
+ # 'libpasteurize.fixes.fix_funcattrs',
+ 'libpasteurize.fixes.fix_getcwd',
+ 'libpasteurize.fixes.fix_imports', # adds "from future import standard_library"
+ 'libpasteurize.fixes.fix_imports2',
+ # 'libpasteurize.fixes.fix_input',
+ # 'libpasteurize.fixes.fix_int',
+ # 'libpasteurize.fixes.fix_intern',
+ # 'libpasteurize.fixes.fix_itertools',
+ 'libpasteurize.fixes.fix_kwargs', # yes, we want this
+ # 'libpasteurize.fixes.fix_memoryview',
+ # 'libpasteurize.fixes.fix_metaclass', # write a custom handler for
+ # this
+ # 'libpasteurize.fixes.fix_methodattrs', # __func__ and __self__ seem to be defined on Py2.7 already
+ 'libpasteurize.fixes.fix_newstyle', # yes, we want this: explicit inheritance from object. Without new-style classes in Py2, super() will break etc.
+ # 'libpasteurize.fixes.fix_next', # use a decorator for this
+ # 'libpasteurize.fixes.fix_numliterals', # prob not
+ # 'libpasteurize.fixes.fix_open', # huh?
+ # 'libpasteurize.fixes.fix_print', # no way
+ 'libpasteurize.fixes.fix_printfunction', # adds __future__ import print_function
+ # 'libpasteurize.fixes.fix_raise_', # TODO: get this working!
+
+ # 'libpasteurize.fixes.fix_range', # nope
+ # 'libpasteurize.fixes.fix_reduce',
+ # 'libpasteurize.fixes.fix_setliteral',
+ # 'libpasteurize.fixes.fix_str',
+ # 'libpasteurize.fixes.fix_super', # maybe, if our magic super() isn't robust enough
+ 'libpasteurize.fixes.fix_throw', # yes, if Py3 supports it
+ # 'libpasteurize.fixes.fix_unittest',
+ 'libpasteurize.fixes.fix_unpacking', # yes, this is useful
+ # 'libpasteurize.fixes.fix_with' # way out of date
+ ])
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/feature_base.py b/src/clyphx/vendor/future/libpasteurize/fixes/feature_base.py
new file mode 100644
index 0000000..c36d9a9
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/feature_base.py
@@ -0,0 +1,57 @@
+u"""
+Base classes for features that are backwards-incompatible.
+
+Usage:
+features = Features()
+features.add(Feature("py3k_feature", "power< 'py3k' any* >", "2.7"))
+PATTERN = features.PATTERN
+"""
+
+pattern_unformatted = u"%s=%s" # name=pattern, for dict lookups
+message_unformatted = u"""
+%s is only supported in Python %s and above."""
+
+class Feature(object):
+ u"""
+ A feature has a name, a pattern, and a minimum version of Python 2.x
+ required to use the feature (or 3.x if there is no backwards-compatible
+ version of 2.x)
+ """
+ def __init__(self, name, PATTERN, version):
+ self.name = name
+ self._pattern = PATTERN
+ self.version = version
+
+ def message_text(self):
+ u"""
+ Format the above text with the name and minimum version required.
+ """
+ return message_unformatted % (self.name, self.version)
+
+class Features(set):
+ u"""
+ A set of features that generates a pattern for the features it contains.
+ This set will act like a mapping in that we map names to patterns.
+ """
+ mapping = {}
+
+ def update_mapping(self):
+ u"""
+ Called every time we care about the mapping of names to features.
+ """
+ self.mapping = dict([(f.name, f) for f in iter(self)])
+
+ @property
+ def PATTERN(self):
+ u"""
+ Uses the mapping of names to features to return a PATTERN suitable
+ for using the lib2to3 patcomp.
+ """
+ self.update_mapping()
+ return u" |\n".join([pattern_unformatted % (f.name, f._pattern) for f in iter(self)])
+
+ def __getitem__(self, key):
+ u"""
+ Implement a simple mapping to get patterns from names.
+ """
+ return self.mapping[key]
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_add_all__future__imports.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_add_all__future__imports.py
new file mode 100644
index 0000000..a151f9f
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_add_all__future__imports.py
@@ -0,0 +1,24 @@
+"""
+Fixer for adding:
+
+ from __future__ import absolute_import
+ from __future__ import division
+ from __future__ import print_function
+ from __future__ import unicode_literals
+
+This is done when converting from Py3 to both Py3/Py2.
+"""
+
+from lib2to3 import fixer_base
+from libfuturize.fixer_util import future_import
+
+class FixAddAllFutureImports(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = "file_input"
+ run_order = 1
+
+ def transform(self, node, results):
+ future_import(u"absolute_import", node)
+ future_import(u"division", node)
+ future_import(u"print_function", node)
+ future_import(u"unicode_literals", node)
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_add_all_future_builtins.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_add_all_future_builtins.py
new file mode 100644
index 0000000..22911ba
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_add_all_future_builtins.py
@@ -0,0 +1,37 @@
+"""
+For the ``future`` package.
+
+Adds this import line::
+
+ from builtins import (ascii, bytes, chr, dict, filter, hex, input,
+ int, list, map, next, object, oct, open, pow,
+ range, round, str, super, zip)
+
+to a module, irrespective of whether each definition is used.
+
+Adds these imports after any other imports (in an initial block of them).
+"""
+
+from __future__ import unicode_literals
+
+from lib2to3 import fixer_base
+
+from libfuturize.fixer_util import touch_import_top
+
+
+class FixAddAllFutureBuiltins(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = "file_input"
+ run_order = 1
+
+ def transform(self, node, results):
+ # import_str = """(ascii, bytes, chr, dict, filter, hex, input,
+ # int, list, map, next, object, oct, open, pow,
+ # range, round, str, super, zip)"""
+ touch_import_top(u'builtins', '*', node)
+
+ # builtins = """ascii bytes chr dict filter hex input
+ # int list map next object oct open pow
+ # range round str super zip"""
+ # for builtin in sorted(builtins.split(), reverse=True):
+ # touch_import_top(u'builtins', builtin, node)
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_add_future_standard_library_import.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_add_future_standard_library_import.py
new file mode 100644
index 0000000..0778406
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_add_future_standard_library_import.py
@@ -0,0 +1,23 @@
+"""
+For the ``future`` package.
+
+Adds this import line:
+
+ from future import standard_library
+
+after any __future__ imports but before any other imports. Doesn't actually
+change the imports to Py3 style.
+"""
+
+from lib2to3 import fixer_base
+from libfuturize.fixer_util import touch_import_top
+
+class FixAddFutureStandardLibraryImport(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = "file_input"
+ run_order = 8
+
+ def transform(self, node, results):
+ # TODO: add a blank line between any __future__ imports and this?
+ touch_import_top(u'future', u'standard_library', node)
+ # TODO: also add standard_library.install_hooks()
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_annotations.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_annotations.py
new file mode 100644
index 0000000..884b674
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_annotations.py
@@ -0,0 +1,48 @@
+u"""
+Fixer to remove function annotations
+"""
+
+from lib2to3 import fixer_base
+from lib2to3.pgen2 import token
+from lib2to3.fixer_util import syms
+
+warning_text = u"Removing function annotations completely."
+
+def param_without_annotations(node):
+ return node.children[0]
+
+class FixAnnotations(fixer_base.BaseFix):
+
+ warned = False
+
+ def warn_once(self, node, reason):
+ if not self.warned:
+ self.warned = True
+ self.warning(node, reason=reason)
+
+ PATTERN = u"""
+ funcdef< 'def' any parameters< '(' [params=any] ')' > ['->' ret=any] ':' any* >
+ """
+
+ def transform(self, node, results):
+ u"""
+ This just strips annotations from the funcdef completely.
+ """
+ params = results.get(u"params")
+ ret = results.get(u"ret")
+ if ret is not None:
+ assert ret.prev_sibling.type == token.RARROW, u"Invalid return annotation"
+ self.warn_once(node, reason=warning_text)
+ ret.prev_sibling.remove()
+ ret.remove()
+ if params is None: return
+ if params.type == syms.typedargslist:
+ # more than one param in a typedargslist
+ for param in params.children:
+ if param.type == syms.tname:
+ self.warn_once(node, reason=warning_text)
+ param.replace(param_without_annotations(param))
+ elif params.type == syms.tname:
+ # one param
+ self.warn_once(node, reason=warning_text)
+ params.replace(param_without_annotations(params))
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_division.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_division.py
new file mode 100644
index 0000000..6a04871
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_division.py
@@ -0,0 +1,28 @@
+u"""
+Fixer for division: from __future__ import division if needed
+"""
+
+from lib2to3 import fixer_base
+from libfuturize.fixer_util import token, future_import
+
+def match_division(node):
+ u"""
+ __future__.division redefines the meaning of a single slash for division,
+ so we match that and only that.
+ """
+ slash = token.SLASH
+ return node.type == slash and not node.next_sibling.type == slash and \
+ not node.prev_sibling.type == slash
+
+class FixDivision(fixer_base.BaseFix):
+ run_order = 4 # this seems to be ignored?
+
+ def match(self, node):
+ u"""
+ Since the tree needs to be fixed once and only once if and only if it
+ matches, then we can start discarding matches after we make the first.
+ """
+ return match_division(node)
+
+ def transform(self, node, results):
+ future_import(u"division", node)
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_features.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_features.py
new file mode 100644
index 0000000..52630f9
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_features.py
@@ -0,0 +1,86 @@
+u"""
+Warn about features that are not present in Python 2.5, giving a message that
+points to the earliest version of Python 2.x (or 3.x, if none) that supports it
+"""
+
+from .feature_base import Feature, Features
+from lib2to3 import fixer_base
+
+FEATURES = [
+ #(FeatureName,
+ # FeaturePattern,
+ # FeatureMinVersion,
+ #),
+ (u"memoryview",
+ u"power < 'memoryview' trailer < '(' any* ')' > any* >",
+ u"2.7",
+ ),
+ (u"numbers",
+ u"""import_from< 'from' 'numbers' 'import' any* > |
+ import_name< 'import' ('numbers' dotted_as_names< any* 'numbers' any* >) >""",
+ u"2.6",
+ ),
+ (u"abc",
+ u"""import_name< 'import' ('abc' dotted_as_names< any* 'abc' any* >) > |
+ import_from< 'from' 'abc' 'import' any* >""",
+ u"2.6",
+ ),
+ (u"io",
+ u"""import_name< 'import' ('io' dotted_as_names< any* 'io' any* >) > |
+ import_from< 'from' 'io' 'import' any* >""",
+ u"2.6",
+ ),
+ (u"bin",
+ u"power< 'bin' trailer< '(' any* ')' > any* >",
+ u"2.6",
+ ),
+ (u"formatting",
+ u"power< any trailer< '.' 'format' > trailer< '(' any* ')' > >",
+ u"2.6",
+ ),
+ (u"nonlocal",
+ u"global_stmt< 'nonlocal' any* >",
+ u"3.0",
+ ),
+ (u"with_traceback",
+ u"trailer< '.' 'with_traceback' >",
+ u"3.0",
+ ),
+]
+
+class FixFeatures(fixer_base.BaseFix):
+
+ run_order = 9 # Wait until all other fixers have run to check for these
+
+ # To avoid spamming, we only want to warn for each feature once.
+ features_warned = set()
+
+ # Build features from the list above
+ features = Features([Feature(name, pattern, version) for \
+ name, pattern, version in FEATURES])
+
+ PATTERN = features.PATTERN
+
+ def match(self, node):
+ to_ret = super(FixFeatures, self).match(node)
+ # We want the mapping only to tell us the node's specific information.
+ try:
+ del to_ret[u'node']
+ except Exception:
+ # We want it to delete the 'node' from the results
+ # if it's there, so we don't care if it fails for normal reasons.
+ pass
+ return to_ret
+
+ def transform(self, node, results):
+ for feature_name in results:
+ if feature_name in self.features_warned:
+ continue
+ else:
+ curr_feature = self.features[feature_name]
+ if curr_feature.version >= u"3":
+ fail = self.cannot_convert
+ else:
+ fail = self.warning
+ fail(node, reason=curr_feature.message_text())
+ self.features_warned.add(feature_name)
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_fullargspec.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_fullargspec.py
new file mode 100644
index 0000000..4bd37e1
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_fullargspec.py
@@ -0,0 +1,16 @@
+u"""
+Fixer for getfullargspec -> getargspec
+"""
+
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import Name
+
+warn_msg = u"some of the values returned by getfullargspec are not valid in Python 2 and have no equivalent."
+
+class FixFullargspec(fixer_base.BaseFix):
+
+ PATTERN = u"'getfullargspec'"
+
+ def transform(self, node, results):
+ self.warning(node, warn_msg)
+ return Name(u"getargspec", prefix=node.prefix)
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_future_builtins.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_future_builtins.py
new file mode 100644
index 0000000..6849679
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_future_builtins.py
@@ -0,0 +1,46 @@
+"""
+Adds this import line:
+
+ from builtins import XYZ
+
+for each of the functions XYZ that is used in the module.
+"""
+
+from __future__ import unicode_literals
+
+from lib2to3 import fixer_base
+from lib2to3.pygram import python_symbols as syms
+from lib2to3.fixer_util import Name, Call, in_special_context
+
+from libfuturize.fixer_util import touch_import_top
+
+# All builtins are:
+# from future.builtins.iterators import (filter, map, zip)
+# from future.builtins.misc import (ascii, chr, hex, input, isinstance, oct, open, round, super)
+# from future.types import (bytes, dict, int, range, str)
+# We don't need isinstance any more.
+
+replaced_builtins = '''filter map zip
+ ascii chr hex input next oct open round super
+ bytes dict int range str'''.split()
+
+expression = '|'.join(["name='{0}'".format(name) for name in replaced_builtins])
+
+
+class FixFutureBuiltins(fixer_base.BaseFix):
+ BM_compatible = True
+ run_order = 9
+
+ # Currently we only match uses as a function. This doesn't match e.g.:
+ # if isinstance(s, str):
+ # ...
+ PATTERN = """
+ power<
+ ({0}) trailer< '(' args=[any] ')' >
+ rest=any* >
+ """.format(expression)
+
+ def transform(self, node, results):
+ name = results["name"]
+ touch_import_top(u'builtins', name.value, node)
+ # name.replace(Name(u"input", prefix=name.prefix))
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_getcwd.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_getcwd.py
new file mode 100644
index 0000000..9b7f002
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_getcwd.py
@@ -0,0 +1,26 @@
+u"""
+Fixer for os.getcwd() -> os.getcwdu().
+Also warns about "from os import getcwd", suggesting the above form.
+"""
+
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import Name
+
+class FixGetcwd(fixer_base.BaseFix):
+
+ PATTERN = u"""
+ power< 'os' trailer< dot='.' name='getcwd' > any* >
+ |
+ import_from< 'from' 'os' 'import' bad='getcwd' >
+ """
+
+ def transform(self, node, results):
+ if u"name" in results:
+ name = results[u"name"]
+ name.replace(Name(u"getcwdu", prefix=name.prefix))
+ elif u"bad" in results:
+ # Can't convert to getcwdu and then expect to catch every use.
+ self.cannot_convert(node, u"import os, use os.getcwd() instead.")
+ return
+ else:
+ raise ValueError(u"For some reason, the pattern matcher failed.")
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_imports.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_imports.py
new file mode 100644
index 0000000..2d6718f
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_imports.py
@@ -0,0 +1,112 @@
+u"""
+Fixer for standard library imports renamed in Python 3
+"""
+
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import Name, is_probably_builtin, Newline, does_tree_import
+from lib2to3.pygram import python_symbols as syms
+from lib2to3.pgen2 import token
+from lib2to3.pytree import Node, Leaf
+
+from libfuturize.fixer_util import touch_import_top
+# from ..fixer_util import NameImport
+
+# used in simple_mapping_to_pattern()
+MAPPING = {u"reprlib": u"repr",
+ u"winreg": u"_winreg",
+ u"configparser": u"ConfigParser",
+ u"copyreg": u"copy_reg",
+ u"queue": u"Queue",
+ u"socketserver": u"SocketServer",
+ u"_markupbase": u"markupbase",
+ u"test.support": u"test.test_support",
+ u"dbm.bsd": u"dbhash",
+ u"dbm.ndbm": u"dbm",
+ u"dbm.dumb": u"dumbdbm",
+ u"dbm.gnu": u"gdbm",
+ u"html.parser": u"HTMLParser",
+ u"html.entities": u"htmlentitydefs",
+ u"http.client": u"httplib",
+ u"http.cookies": u"Cookie",
+ u"http.cookiejar": u"cookielib",
+# "tkinter": "Tkinter",
+ u"tkinter.dialog": u"Dialog",
+ u"tkinter._fix": u"FixTk",
+ u"tkinter.scrolledtext": u"ScrolledText",
+ u"tkinter.tix": u"Tix",
+ u"tkinter.constants": u"Tkconstants",
+ u"tkinter.dnd": u"Tkdnd",
+ u"tkinter.__init__": u"Tkinter",
+ u"tkinter.colorchooser": u"tkColorChooser",
+ u"tkinter.commondialog": u"tkCommonDialog",
+ u"tkinter.font": u"tkFont",
+ u"tkinter.ttk": u"ttk",
+ u"tkinter.messagebox": u"tkMessageBox",
+ u"tkinter.turtle": u"turtle",
+ u"urllib.robotparser": u"robotparser",
+ u"xmlrpc.client": u"xmlrpclib",
+ u"builtins": u"__builtin__",
+}
+
+# generic strings to help build patterns
+# these variables mean (with http.client.HTTPConnection as an example):
+# name = http
+# attr = client
+# used = HTTPConnection
+# fmt_name is a formatted subpattern (simple_name_match or dotted_name_match)
+
+# helps match 'queue', as in 'from queue import ...'
+simple_name_match = u"name='%s'"
+# helps match 'client', to be used if client has been imported from http
+subname_match = u"attr='%s'"
+# helps match 'http.client', as in 'import urllib.request'
+dotted_name_match = u"dotted_name=dotted_name< %s '.' %s >"
+# helps match 'queue', as in 'queue.Queue(...)'
+power_onename_match = u"%s"
+# helps match 'http.client', as in 'http.client.HTTPConnection(...)'
+power_twoname_match = u"power< %s trailer< '.' %s > any* >"
+# helps match 'client.HTTPConnection', if 'client' has been imported from http
+power_subname_match = u"power< %s any* >"
+# helps match 'from http.client import HTTPConnection'
+from_import_match = u"from_import=import_from< 'from' %s 'import' imported=any >"
+# helps match 'from http import client'
+from_import_submod_match = u"from_import_submod=import_from< 'from' %s 'import' (%s | import_as_name< %s 'as' renamed=any > | import_as_names< any* (%s | import_as_name< %s 'as' renamed=any >) any* > ) >"
+# helps match 'import urllib.request'
+name_import_match = u"name_import=import_name< 'import' %s > | name_import=import_name< 'import' dotted_as_name< %s 'as' renamed=any > >"
+# helps match 'import http.client, winreg'
+multiple_name_import_match = u"name_import=import_name< 'import' dotted_as_names< names=any* > >"
+
+def all_patterns(name):
+ u"""
+ Accepts a string and returns a pattern of possible patterns involving that name
+ Called by simple_mapping_to_pattern for each name in the mapping it receives.
+ """
+
+ # i_ denotes an import-like node
+ # u_ denotes a node that appears to be a usage of the name
+ if u'.' in name:
+ name, attr = name.split(u'.', 1)
+ simple_name = simple_name_match % (name)
+ simple_attr = subname_match % (attr)
+ dotted_name = dotted_name_match % (simple_name, simple_attr)
+ i_from = from_import_match % (dotted_name)
+ i_from_submod = from_import_submod_match % (simple_name, simple_attr, simple_attr, simple_attr, simple_attr)
+ i_name = name_import_match % (dotted_name, dotted_name)
+ u_name = power_twoname_match % (simple_name, simple_attr)
+ u_subname = power_subname_match % (simple_attr)
+ return u' | \n'.join((i_name, i_from, i_from_submod, u_name, u_subname))
+ else:
+ simple_name = simple_name_match % (name)
+ i_name = name_import_match % (simple_name, simple_name)
+ i_from = from_import_match % (simple_name)
+ u_name = power_onename_match % (simple_name)
+ return u' | \n'.join((i_name, i_from, u_name))
+
+
+class FixImports(fixer_base.BaseFix):
+
+ PATTERN = u' | \n'.join([all_patterns(name) for name in MAPPING])
+ PATTERN = u' | \n'.join((PATTERN, multiple_name_import_match))
+
+ def transform(self, node, results):
+ touch_import_top(u'future', u'standard_library', node)
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_imports2.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_imports2.py
new file mode 100644
index 0000000..70444e9
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_imports2.py
@@ -0,0 +1,174 @@
+u"""
+Fixer for complicated imports
+"""
+
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import Name, String, FromImport, Newline, Comma
+from libfuturize.fixer_util import touch_import_top
+
+
+TK_BASE_NAMES = (u'ACTIVE', u'ALL', u'ANCHOR', u'ARC',u'BASELINE', u'BEVEL', u'BOTH',
+ u'BOTTOM', u'BROWSE', u'BUTT', u'CASCADE', u'CENTER', u'CHAR',
+ u'CHECKBUTTON', u'CHORD', u'COMMAND', u'CURRENT', u'DISABLED',
+ u'DOTBOX', u'E', u'END', u'EW', u'EXCEPTION', u'EXTENDED', u'FALSE',
+ u'FIRST', u'FLAT', u'GROOVE', u'HIDDEN', u'HORIZONTAL', u'INSERT',
+ u'INSIDE', u'LAST', u'LEFT', u'MITER', u'MOVETO', u'MULTIPLE', u'N',
+ u'NE', u'NO', u'NONE', u'NORMAL', u'NS', u'NSEW', u'NUMERIC', u'NW',
+ u'OFF', u'ON', u'OUTSIDE', u'PAGES', u'PIESLICE', u'PROJECTING',
+ u'RADIOBUTTON', u'RAISED', u'READABLE', u'RIDGE', u'RIGHT',
+ u'ROUND', u'S', u'SCROLL', u'SE', u'SEL', u'SEL_FIRST', u'SEL_LAST',
+ u'SEPARATOR', u'SINGLE', u'SOLID', u'SUNKEN', u'SW', u'StringTypes',
+ u'TOP', u'TRUE', u'TclVersion', u'TkVersion', u'UNDERLINE',
+ u'UNITS', u'VERTICAL', u'W', u'WORD', u'WRITABLE', u'X', u'Y', u'YES',
+ u'wantobjects')
+
+PY2MODULES = {
+ u'urllib2' : (
+ u'AbstractBasicAuthHandler', u'AbstractDigestAuthHandler',
+ u'AbstractHTTPHandler', u'BaseHandler', u'CacheFTPHandler',
+ u'FTPHandler', u'FileHandler', u'HTTPBasicAuthHandler',
+ u'HTTPCookieProcessor', u'HTTPDefaultErrorHandler',
+ u'HTTPDigestAuthHandler', u'HTTPError', u'HTTPErrorProcessor',
+ u'HTTPHandler', u'HTTPPasswordMgr',
+ u'HTTPPasswordMgrWithDefaultRealm', u'HTTPRedirectHandler',
+ u'HTTPSHandler', u'OpenerDirector', u'ProxyBasicAuthHandler',
+ u'ProxyDigestAuthHandler', u'ProxyHandler', u'Request',
+ u'StringIO', u'URLError', u'UnknownHandler', u'addinfourl',
+ u'build_opener', u'install_opener', u'parse_http_list',
+ u'parse_keqv_list', u'randombytes', u'request_host', u'urlopen'),
+ u'urllib' : (
+ u'ContentTooShortError', u'FancyURLopener',u'URLopener',
+ u'basejoin', u'ftperrors', u'getproxies',
+ u'getproxies_environment', u'localhost', u'pathname2url',
+ u'quote', u'quote_plus', u'splitattr', u'splithost',
+ u'splitnport', u'splitpasswd', u'splitport', u'splitquery',
+ u'splittag', u'splittype', u'splituser', u'splitvalue',
+ u'thishost', u'unquote', u'unquote_plus', u'unwrap',
+ u'url2pathname', u'urlcleanup', u'urlencode', u'urlopen',
+ u'urlretrieve',),
+ u'urlparse' : (
+ u'parse_qs', u'parse_qsl', u'urldefrag', u'urljoin',
+ u'urlparse', u'urlsplit', u'urlunparse', u'urlunsplit'),
+ u'dbm' : (
+ u'ndbm', u'gnu', u'dumb'),
+ u'anydbm' : (
+ u'error', u'open'),
+ u'whichdb' : (
+ u'whichdb',),
+ u'BaseHTTPServer' : (
+ u'BaseHTTPRequestHandler', u'HTTPServer'),
+ u'CGIHTTPServer' : (
+ u'CGIHTTPRequestHandler',),
+ u'SimpleHTTPServer' : (
+ u'SimpleHTTPRequestHandler',),
+ u'FileDialog' : TK_BASE_NAMES + (
+ u'FileDialog', u'LoadFileDialog', u'SaveFileDialog',
+ u'dialogstates', u'test'),
+ u'tkFileDialog' : (
+ u'Directory', u'Open', u'SaveAs', u'_Dialog', u'askdirectory',
+ u'askopenfile', u'askopenfilename', u'askopenfilenames',
+ u'askopenfiles', u'asksaveasfile', u'asksaveasfilename'),
+ u'SimpleDialog' : TK_BASE_NAMES + (
+ u'SimpleDialog',),
+ u'tkSimpleDialog' : TK_BASE_NAMES + (
+ u'askfloat', u'askinteger', u'askstring', u'Dialog'),
+ u'SimpleXMLRPCServer' : (
+ u'CGIXMLRPCRequestHandler', u'SimpleXMLRPCDispatcher',
+ u'SimpleXMLRPCRequestHandler', u'SimpleXMLRPCServer',
+ u'list_public_methods', u'remove_duplicates',
+ u'resolve_dotted_attribute'),
+ u'DocXMLRPCServer' : (
+ u'DocCGIXMLRPCRequestHandler', u'DocXMLRPCRequestHandler',
+ u'DocXMLRPCServer', u'ServerHTMLDoc',u'XMLRPCDocGenerator'),
+ }
+
+MAPPING = { u'urllib.request' :
+ (u'urllib2', u'urllib'),
+ u'urllib.error' :
+ (u'urllib2', u'urllib'),
+ u'urllib.parse' :
+ (u'urllib2', u'urllib', u'urlparse'),
+ u'dbm.__init__' :
+ (u'anydbm', u'whichdb'),
+ u'http.server' :
+ (u'CGIHTTPServer', u'SimpleHTTPServer', u'BaseHTTPServer'),
+ u'tkinter.filedialog' :
+ (u'tkFileDialog', u'FileDialog'),
+ u'tkinter.simpledialog' :
+ (u'tkSimpleDialog', u'SimpleDialog'),
+ u'xmlrpc.server' :
+ (u'DocXMLRPCServer', u'SimpleXMLRPCServer'),
+ }
+
+# helps match 'http', as in 'from http.server import ...'
+simple_name = u"name='%s'"
+# helps match 'server', as in 'from http.server import ...'
+simple_attr = u"attr='%s'"
+# helps match 'HTTPServer', as in 'from http.server import HTTPServer'
+simple_using = u"using='%s'"
+# helps match 'urllib.request', as in 'import urllib.request'
+dotted_name = u"dotted_name=dotted_name< %s '.' %s >"
+# helps match 'http.server', as in 'http.server.HTTPServer(...)'
+power_twoname = u"pow=power< %s trailer< '.' %s > trailer< '.' using=any > any* >"
+# helps match 'dbm.whichdb', as in 'dbm.whichdb(...)'
+power_onename = u"pow=power< %s trailer< '.' using=any > any* >"
+# helps match 'from http.server import HTTPServer'
+# also helps match 'from http.server import HTTPServer, SimpleHTTPRequestHandler'
+# also helps match 'from http.server import *'
+from_import = u"from_import=import_from< 'from' %s 'import' (import_as_name< using=any 'as' renamed=any> | in_list=import_as_names< using=any* > | using='*' | using=NAME) >"
+# helps match 'import urllib.request'
+name_import = u"name_import=import_name< 'import' (%s | in_list=dotted_as_names< imp_list=any* >) >"
+
+#############
+# WON'T FIX #
+#############
+
+# helps match 'import urllib.request as name'
+name_import_rename = u"name_import_rename=dotted_as_name< %s 'as' renamed=any >"
+# helps match 'from http import server'
+from_import_rename = u"from_import_rename=import_from< 'from' %s 'import' (%s | import_as_name< %s 'as' renamed=any > | in_list=import_as_names< any* (%s | import_as_name< %s 'as' renamed=any >) any* >) >"
+
+
+def all_modules_subpattern():
+ u"""
+ Builds a pattern for all toplevel names
+ (urllib, http, etc)
+ """
+ names_dot_attrs = [mod.split(u".") for mod in MAPPING]
+ ret = u"( " + u" | ".join([dotted_name % (simple_name % (mod[0]),
+ simple_attr % (mod[1])) for mod in names_dot_attrs])
+ ret += u" | "
+ ret += u" | ".join([simple_name % (mod[0]) for mod in names_dot_attrs if mod[1] == u"__init__"]) + u" )"
+ return ret
+
+
+def build_import_pattern(mapping1, mapping2):
+ u"""
+ mapping1: A dict mapping py3k modules to all possible py2k replacements
+ mapping2: A dict mapping py2k modules to the things they do
+ This builds a HUGE pattern to match all ways that things can be imported
+ """
+ # py3k: urllib.request, py2k: ('urllib2', 'urllib')
+ yield from_import % (all_modules_subpattern())
+ for py3k, py2k in mapping1.items():
+ name, attr = py3k.split(u'.')
+ s_name = simple_name % (name)
+ s_attr = simple_attr % (attr)
+ d_name = dotted_name % (s_name, s_attr)
+ yield name_import % (d_name)
+ yield power_twoname % (s_name, s_attr)
+ if attr == u'__init__':
+ yield name_import % (s_name)
+ yield power_onename % (s_name)
+ yield name_import_rename % (d_name)
+ yield from_import_rename % (s_name, s_attr, s_attr, s_attr, s_attr)
+
+
+class FixImports2(fixer_base.BaseFix):
+
+ run_order = 4
+
+ PATTERN = u" | \n".join(build_import_pattern(MAPPING, PY2MODULES))
+
+ def transform(self, node, results):
+ touch_import_top(u'future', u'standard_library', node)
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_kwargs.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_kwargs.py
new file mode 100644
index 0000000..290f991
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_kwargs.py
@@ -0,0 +1,147 @@
+u"""
+Fixer for Python 3 function parameter syntax
+This fixer is rather sensitive to incorrect py3k syntax.
+"""
+
+# Note: "relevant" parameters are parameters following the first STAR in the list.
+
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import token, String, Newline, Comma, Name
+from libfuturize.fixer_util import indentation, suitify, DoubleStar
+
+_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
+_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
+_else_template = u"else: %(name)s = %(default)s"
+_kwargs_default_name = u"_3to2kwargs"
+
+def gen_params(raw_params):
+ u"""
+ Generator that yields tuples of (name, default_value) for each parameter in the list
+ If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
+ """
+ assert raw_params[0].type == token.STAR and len(raw_params) > 2
+ curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
+ max_idx = len(raw_params)
+ while curr_idx < max_idx:
+ curr_item = raw_params[curr_idx]
+ prev_item = curr_item.prev_sibling
+ if curr_item.type != token.NAME:
+ curr_idx += 1
+ continue
+ if prev_item is not None and prev_item.type == token.DOUBLESTAR:
+ break
+ name = curr_item.value
+ nxt = curr_item.next_sibling
+ if nxt is not None and nxt.type == token.EQUAL:
+ default_value = nxt.next_sibling
+ curr_idx += 2
+ else:
+ default_value = None
+ yield (name, default_value)
+ curr_idx += 1
+
+def remove_params(raw_params, kwargs_default=_kwargs_default_name):
+ u"""
+ Removes all keyword-only args from the params list and a bare star, if any.
+ Does not add the kwargs dict if needed.
+ Returns True if more action is needed, False if not
+ (more action is needed if no kwargs dict exists)
+ """
+ assert raw_params[0].type == token.STAR
+ if raw_params[1].type == token.COMMA:
+ raw_params[0].remove()
+ raw_params[1].remove()
+ kw_params = raw_params[2:]
+ else:
+ kw_params = raw_params[3:]
+ for param in kw_params:
+ if param.type != token.DOUBLESTAR:
+ param.remove()
+ else:
+ return False
+ else:
+ return True
+
+def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
+ u"""
+ Returns string with the name of the kwargs dict if the params after the first star need fixing
+ Otherwise returns empty string
+ """
+ found_kwargs = False
+ needs_fix = False
+
+ for t in raw_params[2:]:
+ if t.type == token.COMMA:
+ # Commas are irrelevant at this stage.
+ continue
+ elif t.type == token.NAME and not found_kwargs:
+ # Keyword-only argument: definitely need to fix.
+ needs_fix = True
+ elif t.type == token.NAME and found_kwargs:
+ # Return 'foobar' of **foobar, if needed.
+ return t.value if needs_fix else u''
+ elif t.type == token.DOUBLESTAR:
+ # Found either '*' from **foobar.
+ found_kwargs = True
+ else:
+ # Never found **foobar. Return a synthetic name, if needed.
+ return kwargs_default if needs_fix else u''
+
+class FixKwargs(fixer_base.BaseFix):
+
+ run_order = 7 # Run after function annotations are removed
+
+ PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
+
+ def transform(self, node, results):
+ params_rawlist = results[u"params"]
+ for i, item in enumerate(params_rawlist):
+ if item.type == token.STAR:
+ params_rawlist = params_rawlist[i:]
+ break
+ else:
+ return
+ # params is guaranteed to be a list starting with *.
+ # if fixing is needed, there will be at least 3 items in this list:
+ # [STAR, COMMA, NAME] is the minimum that we need to worry about.
+ new_kwargs = needs_fixing(params_rawlist)
+ # new_kwargs is the name of the kwargs dictionary.
+ if not new_kwargs:
+ return
+ suitify(node)
+
+ # At this point, params_rawlist is guaranteed to be a list
+ # beginning with a star that includes at least one keyword-only param
+ # e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
+ # [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
+
+ # Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
+ # Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
+ # We need to insert our new stuff before the first_stmt and change the
+ # first_stmt's prefix.
+
+ suite = node.children[4]
+ first_stmt = suite.children[2]
+ ident = indentation(first_stmt)
+
+ for name, default_value in gen_params(params_rawlist):
+ if default_value is None:
+ suite.insert_child(2, Newline())
+ suite.insert_child(2, String(_assign_template %{u'name':name, u'kwargs':new_kwargs}, prefix=ident))
+ else:
+ suite.insert_child(2, Newline())
+ suite.insert_child(2, String(_else_template %{u'name':name, u'default':default_value}, prefix=ident))
+ suite.insert_child(2, Newline())
+ suite.insert_child(2, String(_if_template %{u'assign':_assign_template %{u'name':name, u'kwargs':new_kwargs}, u'name':name, u'kwargs':new_kwargs}, prefix=ident))
+ first_stmt.prefix = ident
+ suite.children[2].prefix = u""
+
+ # Now, we need to fix up the list of params.
+
+ must_add_kwargs = remove_params(params_rawlist)
+ if must_add_kwargs:
+ arglist = results[u'arglist']
+ if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
+ arglist.append_child(Comma())
+ arglist.append_child(DoubleStar(prefix=u" "))
+ arglist.append_child(Name(new_kwargs))
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_memoryview.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_memoryview.py
new file mode 100644
index 0000000..a20f6f3
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_memoryview.py
@@ -0,0 +1,21 @@
+u"""
+Fixer for memoryview(s) -> buffer(s).
+Explicit because some memoryview methods are invalid on buffer objects.
+"""
+
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import Name
+
+
+class FixMemoryview(fixer_base.BaseFix):
+
+ explicit = True # User must specify that they want this.
+
+ PATTERN = u"""
+ power< name='memoryview' trailer< '(' [any] ')' >
+ rest=any* >
+ """
+
+ def transform(self, node, results):
+ name = results[u"name"]
+ name.replace(Name(u"buffer", prefix=name.prefix))
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_metaclass.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_metaclass.py
new file mode 100644
index 0000000..52dd1d1
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_metaclass.py
@@ -0,0 +1,78 @@
+u"""
+Fixer for (metaclass=X) -> __metaclass__ = X
+Some semantics (see PEP 3115) may be altered in the translation."""
+
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import Name, syms, Node, Leaf, Newline, find_root
+from lib2to3.pygram import token
+from libfuturize.fixer_util import indentation, suitify
+# from ..fixer_util import Name, syms, Node, Leaf, Newline, find_root, indentation, suitify
+
+def has_metaclass(parent):
+ results = None
+ for node in parent.children:
+ kids = node.children
+ if node.type == syms.argument:
+ if kids[0] == Leaf(token.NAME, u"metaclass") and \
+ kids[1] == Leaf(token.EQUAL, u"=") and \
+ kids[2]:
+ #Hack to avoid "class X(=):" with this case.
+ results = [node] + kids
+ break
+ elif node.type == syms.arglist:
+ # Argument list... loop through it looking for:
+ # Node(*, [*, Leaf(token.NAME, u"metaclass"), Leaf(token.EQUAL, u"="), Leaf(*, *)]
+ for child in node.children:
+ if results: break
+ if child.type == token.COMMA:
+ #Store the last comma, which precedes the metaclass
+ comma = child
+ elif type(child) == Node:
+ meta = equal = name = None
+ for arg in child.children:
+ if arg == Leaf(token.NAME, u"metaclass"):
+ #We have the (metaclass) part
+ meta = arg
+ elif meta and arg == Leaf(token.EQUAL, u"="):
+ #We have the (metaclass=) part
+ equal = arg
+ elif meta and equal:
+ #Here we go, we have (metaclass=X)
+ name = arg
+ results = (comma, meta, equal, name)
+ break
+ return results
+
+
+class FixMetaclass(fixer_base.BaseFix):
+
+ PATTERN = u"""
+ classdef
+ """
+
+ def transform(self, node, results):
+ meta_results = has_metaclass(node)
+ if not meta_results: return
+ for meta in meta_results:
+ meta.remove()
+ target = Leaf(token.NAME, u"__metaclass__")
+ equal = Leaf(token.EQUAL, u"=", prefix=u" ")
+ # meta is the last item in what was returned by has_metaclass(): name
+ name = meta
+ name.prefix = u" "
+ stmt_node = Node(syms.atom, [target, equal, name])
+
+ suitify(node)
+ for item in node.children:
+ if item.type == syms.suite:
+ for stmt in item.children:
+ if stmt.type == token.INDENT:
+ # Insert, in reverse order, the statement, a newline,
+ # and an indent right after the first indented line
+ loc = item.children.index(stmt) + 1
+ # Keep consistent indentation form
+ ident = Leaf(token.INDENT, stmt.value)
+ item.insert_child(loc, ident)
+ item.insert_child(loc, Newline())
+ item.insert_child(loc, stmt_node)
+ break
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_newstyle.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_newstyle.py
new file mode 100644
index 0000000..cc6b3ad
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_newstyle.py
@@ -0,0 +1,33 @@
+u"""
+Fixer for "class Foo: ..." -> "class Foo(object): ..."
+"""
+
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import LParen, RParen, Name
+
+from libfuturize.fixer_util import touch_import_top
+
+
+def insert_object(node, idx):
+ node.insert_child(idx, RParen())
+ node.insert_child(idx, Name(u"object"))
+ node.insert_child(idx, LParen())
+
+class FixNewstyle(fixer_base.BaseFix):
+
+ # Match:
+ # class Blah:
+ # and:
+ # class Blah():
+
+ PATTERN = u"classdef< 'class' NAME ['(' ')'] colon=':' any >"
+
+ def transform(self, node, results):
+ colon = results[u"colon"]
+ idx = node.children.index(colon)
+ if (node.children[idx-2].value == '(' and
+ node.children[idx-1].value == ')'):
+ del node.children[idx-2:idx]
+ idx -= 2
+ insert_object(node, idx)
+ touch_import_top(u'builtins', 'object', node)
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_next.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_next.py
new file mode 100644
index 0000000..9ecb6c0
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_next.py
@@ -0,0 +1,43 @@
+u"""
+Fixer for:
+it.__next__() -> it.next().
+next(it) -> it.next().
+"""
+
+from lib2to3.pgen2 import token
+from lib2to3.pygram import python_symbols as syms
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import Name, Call, find_binding, Attr
+
+bind_warning = u"Calls to builtin next() possibly shadowed by global binding"
+
+
+class FixNext(fixer_base.BaseFix):
+
+ PATTERN = u"""
+ power< base=any+ trailer< '.' attr='__next__' > any* >
+ |
+ power< head='next' trailer< '(' arg=any ')' > any* >
+ |
+ classdef< 'class' base=any+ ':'
+ suite< any*
+ funcdef< 'def'
+ attr='__next__'
+ parameters< '(' NAME ')' > any+ >
+ any* > >
+ """
+
+ def transform(self, node, results):
+ assert results
+
+ base = results.get(u"base")
+ attr = results.get(u"attr")
+ head = results.get(u"head")
+ arg_ = results.get(u"arg")
+ if arg_:
+ arg = arg_.clone()
+ head.replace(Attr(Name(unicode(arg),prefix=head.prefix),
+ Name(u"next")))
+ arg_.remove()
+ elif base:
+ attr.replace(Name(u"next", prefix=attr.prefix))
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_printfunction.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_printfunction.py
new file mode 100644
index 0000000..a2a6e08
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_printfunction.py
@@ -0,0 +1,17 @@
+u"""
+Fixer for print: from __future__ import print_function.
+"""
+
+from lib2to3 import fixer_base
+from libfuturize.fixer_util import future_import
+
+class FixPrintfunction(fixer_base.BaseFix):
+
+ # explicit = True
+
+ PATTERN = u"""
+ power< 'print' trailer < '(' any* ')' > any* >
+ """
+
+ def transform(self, node, results):
+ future_import(u"print_function", node)
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_raise.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_raise.py
new file mode 100644
index 0000000..9c9c192
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_raise.py
@@ -0,0 +1,25 @@
+u"""Fixer for 'raise E(V).with_traceback(T)' -> 'raise E, V, T'"""
+
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import Comma, Node, Leaf, token, syms
+
+class FixRaise(fixer_base.BaseFix):
+
+ PATTERN = u"""
+ raise_stmt< 'raise' (power< name=any [trailer< '(' val=any* ')' >]
+ [trailer< '.' 'with_traceback' > trailer< '(' trc=any ')' >] > | any) ['from' chain=any] >"""
+
+ def transform(self, node, results):
+ name, val, trc = (results.get(u"name"), results.get(u"val"), results.get(u"trc"))
+ chain = results.get(u"chain")
+ if chain is not None:
+ self.warning(node, u"explicit exception chaining is not supported in Python 2")
+ chain.prev_sibling.remove()
+ chain.remove()
+ if trc is not None:
+ val = val[0] if val else Leaf(token.NAME, u"None")
+ val.prefix = trc.prefix = u" "
+ kids = [Leaf(token.NAME, u"raise"), name.clone(), Comma(),
+ val.clone(), Comma(), trc.clone()]
+ raise_stmt = Node(syms.raise_stmt, kids)
+ node.replace(raise_stmt)
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_raise_.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_raise_.py
new file mode 100644
index 0000000..0f020c4
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_raise_.py
@@ -0,0 +1,35 @@
+u"""Fixer for
+ raise E(V).with_traceback(T)
+ to:
+ from future.utils import raise_
+ ...
+ raise_(E, V, T)
+
+TODO: FIXME!!
+
+"""
+
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import Comma, Node, Leaf, token, syms
+
+class FixRaise(fixer_base.BaseFix):
+
+ PATTERN = u"""
+ raise_stmt< 'raise' (power< name=any [trailer< '(' val=any* ')' >]
+ [trailer< '.' 'with_traceback' > trailer< '(' trc=any ')' >] > | any) ['from' chain=any] >"""
+
+ def transform(self, node, results):
+ FIXME
+ name, val, trc = (results.get(u"name"), results.get(u"val"), results.get(u"trc"))
+ chain = results.get(u"chain")
+ if chain is not None:
+ self.warning(node, u"explicit exception chaining is not supported in Python 2")
+ chain.prev_sibling.remove()
+ chain.remove()
+ if trc is not None:
+ val = val[0] if val else Leaf(token.NAME, u"None")
+ val.prefix = trc.prefix = u" "
+ kids = [Leaf(token.NAME, u"raise"), name.clone(), Comma(),
+ val.clone(), Comma(), trc.clone()]
+ raise_stmt = Node(syms.raise_stmt, kids)
+ node.replace(raise_stmt)
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_throw.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_throw.py
new file mode 100644
index 0000000..c0feed1
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_throw.py
@@ -0,0 +1,23 @@
+u"""Fixer for 'g.throw(E(V).with_traceback(T))' -> 'g.throw(E, V, T)'"""
+
+from lib2to3 import fixer_base
+from lib2to3.pytree import Node, Leaf
+from lib2to3.pgen2 import token
+from lib2to3.fixer_util import Comma
+
+class FixThrow(fixer_base.BaseFix):
+
+ PATTERN = u"""
+ power< any trailer< '.' 'throw' >
+ trailer< '(' args=power< exc=any trailer< '(' val=any* ')' >
+ trailer< '.' 'with_traceback' > trailer< '(' trc=any ')' > > ')' > >
+ """
+
+ def transform(self, node, results):
+ syms = self.syms
+ exc, val, trc = (results[u"exc"], results[u"val"], results[u"trc"])
+ val = val[0] if val else Leaf(token.NAME, u"None")
+ val.prefix = trc.prefix = u" "
+ kids = [exc.clone(), Comma(), val.clone(), Comma(), trc.clone()]
+ args = results[u"args"]
+ args.children = kids
diff --git a/src/clyphx/vendor/future/libpasteurize/fixes/fix_unpacking.py b/src/clyphx/vendor/future/libpasteurize/fixes/fix_unpacking.py
new file mode 100644
index 0000000..c2d3207
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/fixes/fix_unpacking.py
@@ -0,0 +1,120 @@
+u"""
+Fixer for:
+(a,)* *b (,c)* [,] = s
+for (a,)* *b (,c)* [,] in d: ...
+"""
+
+from lib2to3 import fixer_base
+from itertools import count
+from lib2to3.fixer_util import (Assign, Comma, Call, Newline, Name,
+ Number, token, syms, Node, Leaf)
+from libfuturize.fixer_util import indentation, suitify, commatize
+# from libfuturize.fixer_util import Assign, Comma, Call, Newline, Name, Number, indentation, suitify, commatize, token, syms, Node, Leaf
+
+def assignment_source(num_pre, num_post, LISTNAME, ITERNAME):
+ u"""
+ Accepts num_pre and num_post, which are counts of values
+ before and after the starg (not including the starg)
+ Returns a source fit for Assign() from fixer_util
+ """
+ children = []
+ pre = unicode(num_pre)
+ post = unicode(num_post)
+ # This code builds the assignment source from lib2to3 tree primitives.
+ # It's not very readable, but it seems like the most correct way to do it.
+ if num_pre > 0:
+ pre_part = Node(syms.power, [Name(LISTNAME), Node(syms.trailer, [Leaf(token.LSQB, u"["), Node(syms.subscript, [Leaf(token.COLON, u":"), Number(pre)]), Leaf(token.RSQB, u"]")])])
+ children.append(pre_part)
+ children.append(Leaf(token.PLUS, u"+", prefix=u" "))
+ main_part = Node(syms.power, [Leaf(token.LSQB, u"[", prefix=u" "), Name(LISTNAME), Node(syms.trailer, [Leaf(token.LSQB, u"["), Node(syms.subscript, [Number(pre) if num_pre > 0 else Leaf(1, u""), Leaf(token.COLON, u":"), Node(syms.factor, [Leaf(token.MINUS, u"-"), Number(post)]) if num_post > 0 else Leaf(1, u"")]), Leaf(token.RSQB, u"]"), Leaf(token.RSQB, u"]")])])
+ children.append(main_part)
+ if num_post > 0:
+ children.append(Leaf(token.PLUS, u"+", prefix=u" "))
+ post_part = Node(syms.power, [Name(LISTNAME, prefix=u" "), Node(syms.trailer, [Leaf(token.LSQB, u"["), Node(syms.subscript, [Node(syms.factor, [Leaf(token.MINUS, u"-"), Number(post)]), Leaf(token.COLON, u":")]), Leaf(token.RSQB, u"]")])])
+ children.append(post_part)
+ source = Node(syms.arith_expr, children)
+ return source
+
+class FixUnpacking(fixer_base.BaseFix):
+
+ PATTERN = u"""
+ expl=expr_stmt< testlist_star_expr<
+ pre=(any ',')*
+ star_expr< '*' name=NAME >
+ post=(',' any)* [','] > '=' source=any > |
+ impl=for_stmt< 'for' lst=exprlist<
+ pre=(any ',')*
+ star_expr< '*' name=NAME >
+ post=(',' any)* [','] > 'in' it=any ':' suite=any>"""
+
+ def fix_explicit_context(self, node, results):
+ pre, name, post, source = (results.get(n) for n in (u"pre", u"name", u"post", u"source"))
+ pre = [n.clone() for n in pre if n.type == token.NAME]
+ name.prefix = u" "
+ post = [n.clone() for n in post if n.type == token.NAME]
+ target = [n.clone() for n in commatize(pre + [name.clone()] + post)]
+ # to make the special-case fix for "*z, = ..." correct with the least
+ # amount of modification, make the left-side into a guaranteed tuple
+ target.append(Comma())
+ source.prefix = u""
+ setup_line = Assign(Name(self.LISTNAME), Call(Name(u"list"), [source.clone()]))
+ power_line = Assign(target, assignment_source(len(pre), len(post), self.LISTNAME, self.ITERNAME))
+ return setup_line, power_line
+
+ def fix_implicit_context(self, node, results):
+ u"""
+ Only example of the implicit context is
+ a for loop, so only fix that.
+ """
+ pre, name, post, it = (results.get(n) for n in (u"pre", u"name", u"post", u"it"))
+ pre = [n.clone() for n in pre if n.type == token.NAME]
+ name.prefix = u" "
+ post = [n.clone() for n in post if n.type == token.NAME]
+ target = [n.clone() for n in commatize(pre + [name.clone()] + post)]
+ # to make the special-case fix for "*z, = ..." correct with the least
+ # amount of modification, make the left-side into a guaranteed tuple
+ target.append(Comma())
+ source = it.clone()
+ source.prefix = u""
+ setup_line = Assign(Name(self.LISTNAME), Call(Name(u"list"), [Name(self.ITERNAME)]))
+ power_line = Assign(target, assignment_source(len(pre), len(post), self.LISTNAME, self.ITERNAME))
+ return setup_line, power_line
+
+ def transform(self, node, results):
+ u"""
+ a,b,c,d,e,f,*g,h,i = range(100) changes to
+ _3to2list = list(range(100))
+ a,b,c,d,e,f,g,h,i, = _3to2list[:6] + [_3to2list[6:-2]] + _3to2list[-2:]
+
+ and
+
+ for a,b,*c,d,e in iter_of_iters: do_stuff changes to
+ for _3to2iter in iter_of_iters:
+ _3to2list = list(_3to2iter)
+ a,b,c,d,e, = _3to2list[:2] + [_3to2list[2:-2]] + _3to2list[-2:]
+ do_stuff
+ """
+ self.LISTNAME = self.new_name(u"_3to2list")
+ self.ITERNAME = self.new_name(u"_3to2iter")
+ expl, impl = results.get(u"expl"), results.get(u"impl")
+ if expl is not None:
+ setup_line, power_line = self.fix_explicit_context(node, results)
+ setup_line.prefix = expl.prefix
+ power_line.prefix = indentation(expl.parent)
+ setup_line.append_child(Newline())
+ parent = node.parent
+ i = node.remove()
+ parent.insert_child(i, power_line)
+ parent.insert_child(i, setup_line)
+ elif impl is not None:
+ setup_line, power_line = self.fix_implicit_context(node, results)
+ suitify(node)
+ suite = [k for k in node.children if k.type == syms.suite][0]
+ setup_line.prefix = u""
+ power_line.prefix = suite.children[1].value
+ suite.children[2].prefix = indentation(suite.children[2])
+ suite.insert_child(2, Newline())
+ suite.insert_child(2, power_line)
+ suite.insert_child(2, Newline())
+ suite.insert_child(2, setup_line)
+ results.get(u"lst").replace(Name(self.ITERNAME, prefix=u" "))
diff --git a/src/clyphx/vendor/future/libpasteurize/main.py b/src/clyphx/vendor/future/libpasteurize/main.py
new file mode 100644
index 0000000..4179174
--- /dev/null
+++ b/src/clyphx/vendor/future/libpasteurize/main.py
@@ -0,0 +1,204 @@
+"""
+pasteurize: automatic conversion of Python 3 code to clean 2/3 code
+===================================================================
+
+``pasteurize`` attempts to convert existing Python 3 code into source-compatible
+Python 2 and 3 code.
+
+Use it like this on Python 3 code:
+
+ $ pasteurize --verbose mypython3script.py
+
+This removes any Py3-only syntax (e.g. new metaclasses) and adds these
+import lines:
+
+ from __future__ import absolute_import
+ from __future__ import division
+ from __future__ import print_function
+ from __future__ import unicode_literals
+ from future import standard_library
+ standard_library.install_hooks()
+ from builtins import *
+
+To write changes to the files, use the -w flag.
+
+It also adds any other wrappers needed for Py2/3 compatibility.
+
+Note that separate stages are not available (or needed) when converting from
+Python 3 with ``pasteurize`` as they are when converting from Python 2 with
+``futurize``.
+
+The --all-imports option forces adding all ``__future__`` imports,
+``builtins`` imports, and standard library aliases, even if they don't
+seem necessary for the current state of each module. (This can simplify
+testing, and can reduce the need to think about Py2 compatibility when editing
+the code further.)
+
+"""
+
+from __future__ import (absolute_import, print_function, unicode_literals)
+
+import sys
+import logging
+import optparse
+from lib2to3.main import main, warn, StdoutRefactoringTool
+from lib2to3 import refactor
+
+from future import __version__
+from libpasteurize.fixes import fix_names
+
+
+def main(args=None):
+ """Main program.
+
+ Returns a suggested exit status (0, 1, 2).
+ """
+ # Set up option parser
+ parser = optparse.OptionParser(usage="pasteurize [options] file|dir ...")
+ parser.add_option("-V", "--version", action="store_true",
+ help="Report the version number of pasteurize")
+ parser.add_option("-a", "--all-imports", action="store_true",
+ help="Adds all __future__ and future imports to each module")
+ parser.add_option("-f", "--fix", action="append", default=[],
+ help="Each FIX specifies a transformation; default: all")
+ parser.add_option("-j", "--processes", action="store", default=1,
+ type="int", help="Run 2to3 concurrently")
+ parser.add_option("-x", "--nofix", action="append", default=[],
+ help="Prevent a fixer from being run.")
+ parser.add_option("-l", "--list-fixes", action="store_true",
+ help="List available transformations")
+ # parser.add_option("-p", "--print-function", action="store_true",
+ # help="Modify the grammar so that print() is a function")
+ parser.add_option("-v", "--verbose", action="store_true",
+ help="More verbose logging")
+ parser.add_option("--no-diffs", action="store_true",
+ help="Don't show diffs of the refactoring")
+ parser.add_option("-w", "--write", action="store_true",
+ help="Write back modified files")
+ parser.add_option("-n", "--nobackups", action="store_true", default=False,
+ help="Don't write backups for modified files.")
+
+ # Parse command line arguments
+ refactor_stdin = False
+ flags = {}
+ options, args = parser.parse_args(args)
+ fixer_pkg = 'libpasteurize.fixes'
+ avail_fixes = fix_names
+ flags["print_function"] = True
+
+ if not options.write and options.no_diffs:
+ warn("not writing files and not printing diffs; that's not very useful")
+ if not options.write and options.nobackups:
+ parser.error("Can't use -n without -w")
+ if options.version:
+ print(__version__)
+ return 0
+ if options.list_fixes:
+ print("Available transformations for the -f/--fix option:")
+ for fixname in sorted(avail_fixes):
+ print(fixname)
+ if not args:
+ return 0
+ if not args:
+ print("At least one file or directory argument required.",
+ file=sys.stderr)
+ print("Use --help to show usage.", file=sys.stderr)
+ return 2
+ if "-" in args:
+ refactor_stdin = True
+ if options.write:
+ print("Can't write to stdin.", file=sys.stderr)
+ return 2
+
+ # Set up logging handler
+ level = logging.DEBUG if options.verbose else logging.INFO
+ logging.basicConfig(format='%(name)s: %(message)s', level=level)
+
+ unwanted_fixes = set()
+ for fix in options.nofix:
+ if ".fix_" in fix:
+ unwanted_fixes.add(fix)
+ else:
+ # Infer the full module name for the fixer.
+ # First ensure that no names clash (e.g.
+ # lib2to3.fixes.fix_blah and libfuturize.fixes.fix_blah):
+ found = [f for f in avail_fixes
+ if f.endswith('fix_{0}'.format(fix))]
+ if len(found) > 1:
+ print("Ambiguous fixer name. Choose a fully qualified "
+ "module name instead from these:\n" +
+ "\n".join(" " + myf for myf in found),
+ file=sys.stderr)
+ return 2
+ elif len(found) == 0:
+ print("Unknown fixer. Use --list-fixes or -l for a list.",
+ file=sys.stderr)
+ return 2
+ unwanted_fixes.add(found[0])
+
+ extra_fixes = set()
+ if options.all_imports:
+ prefix = 'libpasteurize.fixes.'
+ extra_fixes.add(prefix + 'fix_add_all__future__imports')
+ extra_fixes.add(prefix + 'fix_add_future_standard_library_import')
+ extra_fixes.add(prefix + 'fix_add_all_future_builtins')
+
+ explicit = set()
+ if options.fix:
+ all_present = False
+ for fix in options.fix:
+ if fix == 'all':
+ all_present = True
+ else:
+ if ".fix_" in fix:
+ explicit.add(fix)
+ else:
+ # Infer the full module name for the fixer.
+ # First ensure that no names clash (e.g.
+ # lib2to3.fixes.fix_blah and libpasteurize.fixes.fix_blah):
+ found = [f for f in avail_fixes
+ if f.endswith('fix_{0}'.format(fix))]
+ if len(found) > 1:
+ print("Ambiguous fixer name. Choose a fully qualified "
+ "module name instead from these:\n" +
+ "\n".join(" " + myf for myf in found),
+ file=sys.stderr)
+ return 2
+ elif len(found) == 0:
+ print("Unknown fixer. Use --list-fixes or -l for a list.",
+ file=sys.stderr)
+ return 2
+ explicit.add(found[0])
+ if len(explicit & unwanted_fixes) > 0:
+ print("Conflicting usage: the following fixers have been "
+ "simultaneously requested and disallowed:\n" +
+ "\n".join(" " + myf for myf in (explicit & unwanted_fixes)),
+ file=sys.stderr)
+ return 2
+ requested = avail_fixes.union(explicit) if all_present else explicit
+ else:
+ requested = avail_fixes.union(explicit)
+
+ fixer_names = requested | extra_fixes - unwanted_fixes
+
+ # Initialize the refactoring tool
+ rt = StdoutRefactoringTool(sorted(fixer_names), flags, set(),
+ options.nobackups, not options.no_diffs)
+
+ # Refactor all files and directories passed as arguments
+ if not rt.errors:
+ if refactor_stdin:
+ rt.refactor_stdin()
+ else:
+ try:
+ rt.refactor(args, options.write, None,
+ options.processes)
+ except refactor.MultiprocessingUnsupported:
+ assert options.processes > 1
+ print("Sorry, -j isn't " \
+ "supported on this platform.", file=sys.stderr)
+ return 1
+ rt.summarize()
+
+ # Return error status (0 if rt.errors is zero)
+ return int(bool(rt.errors))
diff --git a/src/clyphx/vendor/future/past/__init__.py b/src/clyphx/vendor/future/past/__init__.py
new file mode 100644
index 0000000..1471303
--- /dev/null
+++ b/src/clyphx/vendor/future/past/__init__.py
@@ -0,0 +1,90 @@
+# coding=utf-8
+"""
+past: compatibility with Python 2 from Python 3
+===============================================
+
+``past`` is a package to aid with Python 2/3 compatibility. Whereas ``future``
+contains backports of Python 3 constructs to Python 2, ``past`` provides
+implementations of some Python 2 constructs in Python 3 and tools to import and
+run Python 2 code in Python 3. It is intended to be used sparingly, as a way of
+running old Python 2 code from Python 3 until the code is ported properly.
+
+Potential uses for libraries:
+
+- as a step in porting a Python 2 codebase to Python 3 (e.g. with the ``futurize`` script)
+- to provide Python 3 support for previously Python 2-only libraries with the
+ same APIs as on Python 2 -- particularly with regard to 8-bit strings (the
+ ``past.builtins.str`` type).
+- to aid in providing minimal-effort Python 3 support for applications using
+ libraries that do not yet wish to upgrade their code properly to Python 3, or
+ wish to upgrade it gradually to Python 3 style.
+
+
+Here are some code examples that run identically on Python 3 and 2::
+
+ >>> from past.builtins import str as oldstr
+
+ >>> philosopher = oldstr(u'\u5b54\u5b50'.encode('utf-8'))
+ >>> # This now behaves like a Py2 byte-string on both Py2 and Py3.
+ >>> # For example, indexing returns a Python 2-like string object, not
+ >>> # an integer:
+ >>> philosopher[0]
+ '\xe5'
+ >>> type(philosopher[0])
+
+
+ >>> # List-producing versions of range, reduce, map, filter
+ >>> from past.builtins import range, reduce
+ >>> range(10)
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ >>> reduce(lambda x, y: x+y, [1, 2, 3, 4, 5])
+ 15
+
+ >>> # Other functions removed in Python 3 are resurrected ...
+ >>> from past.builtins import execfile
+ >>> execfile('myfile.py')
+
+ >>> from past.builtins import raw_input
+ >>> name = raw_input('What is your name? ')
+ What is your name? [cursor]
+
+ >>> from past.builtins import reload
+ >>> reload(mymodule) # equivalent to imp.reload(mymodule) in Python 3
+
+ >>> from past.builtins import xrange
+ >>> for i in xrange(10):
+ ... pass
+
+
+It also provides import hooks so you can import and use Python 2 modules like
+this::
+
+ $ python3
+
+ >>> from past.translation import autotranslate
+ >>> authotranslate('mypy2module')
+ >>> import mypy2module
+
+until the authors of the Python 2 modules have upgraded their code. Then, for
+example::
+
+ >>> mypy2module.func_taking_py2_string(oldstr(b'abcd'))
+
+
+Credits
+-------
+
+:Author: Ed Schofield, Jordan M. Adler, et al
+:Sponsor: Python Charmers Pty Ltd, Australia: http://pythoncharmers.com
+
+
+Licensing
+---------
+Copyright 2013-2019 Python Charmers Pty Ltd, Australia.
+The software is distributed under an MIT licence. See LICENSE.txt.
+"""
+
+from future import __version__, __copyright__, __license__
+
+__title__ = 'past'
+__author__ = 'Ed Schofield'
diff --git a/src/clyphx/vendor/future/past/builtins/__init__.py b/src/clyphx/vendor/future/past/builtins/__init__.py
new file mode 100644
index 0000000..1b19e37
--- /dev/null
+++ b/src/clyphx/vendor/future/past/builtins/__init__.py
@@ -0,0 +1,72 @@
+"""
+A resurrection of some old functions from Python 2 for use in Python 3. These
+should be used sparingly, to help with porting efforts, since code using them
+is no longer standard Python 3 code.
+
+This module provides the following:
+
+1. Implementations of these builtin functions which have no equivalent on Py3:
+
+- apply
+- chr
+- cmp
+- execfile
+
+2. Aliases:
+
+- intern <- sys.intern
+- raw_input <- input
+- reduce <- functools.reduce
+- reload <- imp.reload
+- unichr <- chr
+- unicode <- str
+- xrange <- range
+
+3. List-producing versions of the corresponding Python 3 iterator-producing functions:
+
+- filter
+- map
+- range
+- zip
+
+4. Forward-ported Py2 types:
+
+- basestring
+- dict
+- str
+- long
+- unicode
+
+"""
+
+from future.utils import PY3
+from past.builtins.noniterators import (filter, map, range, reduce, zip)
+# from past.builtins.misc import (ascii, hex, input, oct, open)
+if PY3:
+ from past.types import (basestring,
+ olddict as dict,
+ oldstr as str,
+ long,
+ unicode)
+else:
+ from __builtin__ import (basestring, dict, str, long, unicode)
+
+from past.builtins.misc import (apply, chr, cmp, execfile, intern, oct,
+ raw_input, reload, unichr, unicode, xrange)
+from past import utils
+
+
+if utils.PY3:
+ # We only import names that shadow the builtins on Py3. No other namespace
+ # pollution on Py3.
+
+ # Only shadow builtins on Py3; no new names
+ __all__ = ['filter', 'map', 'range', 'reduce', 'zip',
+ 'basestring', 'dict', 'str', 'long', 'unicode',
+ 'apply', 'chr', 'cmp', 'execfile', 'intern', 'raw_input',
+ 'reload', 'unichr', 'xrange'
+ ]
+
+else:
+ # No namespace pollution on Py2
+ __all__ = []
diff --git a/src/clyphx/vendor/future/past/builtins/misc.py b/src/clyphx/vendor/future/past/builtins/misc.py
new file mode 100644
index 0000000..ba50aa9
--- /dev/null
+++ b/src/clyphx/vendor/future/past/builtins/misc.py
@@ -0,0 +1,94 @@
+from __future__ import unicode_literals
+
+import inspect
+
+from future.utils import PY2, PY3, exec_
+
+if PY2:
+ from collections import Mapping
+else:
+ from collections.abc import Mapping
+
+if PY3:
+ import builtins
+ from collections.abc import Mapping
+
+ def apply(f, *args, **kw):
+ return f(*args, **kw)
+
+ from past.builtins import str as oldstr
+
+ def chr(i):
+ """
+ Return a byte-string of one character with ordinal i; 0 <= i <= 256
+ """
+ return oldstr(bytes((i,)))
+
+ def cmp(x, y):
+ """
+ cmp(x, y) -> integer
+
+ Return negative if xy.
+ """
+ return (x > y) - (x < y)
+
+ from sys import intern
+
+ def oct(number):
+ """oct(number) -> string
+
+ Return the octal representation of an integer
+ """
+ return '0' + builtins.oct(number)[2:]
+
+ raw_input = input
+ from imp import reload
+ unicode = str
+ unichr = chr
+ xrange = range
+else:
+ import __builtin__
+ from collections import Mapping
+ apply = __builtin__.apply
+ chr = __builtin__.chr
+ cmp = __builtin__.cmp
+ execfile = __builtin__.execfile
+ intern = __builtin__.intern
+ oct = __builtin__.oct
+ raw_input = __builtin__.raw_input
+ reload = __builtin__.reload
+ unicode = __builtin__.unicode
+ unichr = __builtin__.unichr
+ xrange = __builtin__.xrange
+
+
+if PY3:
+ def execfile(filename, myglobals=None, mylocals=None):
+ """
+ Read and execute a Python script from a file in the given namespaces.
+ The globals and locals are dictionaries, defaulting to the current
+ globals and locals. If only globals is given, locals defaults to it.
+ """
+ if myglobals is None:
+ # There seems to be no alternative to frame hacking here.
+ caller_frame = inspect.stack()[1]
+ myglobals = caller_frame[0].f_globals
+ mylocals = caller_frame[0].f_locals
+ elif mylocals is None:
+ # Only if myglobals is given do we set mylocals to it.
+ mylocals = myglobals
+ if not isinstance(myglobals, Mapping):
+ raise TypeError('globals must be a mapping')
+ if not isinstance(mylocals, Mapping):
+ raise TypeError('locals must be a mapping')
+ with open(filename, "rb") as fin:
+ source = fin.read()
+ code = compile(source, filename, "exec")
+ exec_(code, myglobals, mylocals)
+
+
+if PY3:
+ __all__ = ['apply', 'chr', 'cmp', 'execfile', 'intern', 'raw_input',
+ 'reload', 'unichr', 'unicode', 'xrange']
+else:
+ __all__ = []
diff --git a/src/clyphx/vendor/future/past/builtins/noniterators.py b/src/clyphx/vendor/future/past/builtins/noniterators.py
new file mode 100644
index 0000000..183ffff
--- /dev/null
+++ b/src/clyphx/vendor/future/past/builtins/noniterators.py
@@ -0,0 +1,272 @@
+"""
+This module is designed to be used as follows::
+
+ from past.builtins.noniterators import filter, map, range, reduce, zip
+
+And then, for example::
+
+ assert isinstance(range(5), list)
+
+The list-producing functions this brings in are::
+
+- ``filter``
+- ``map``
+- ``range``
+- ``reduce``
+- ``zip``
+
+"""
+
+from __future__ import division, absolute_import, print_function
+
+from itertools import chain, starmap
+import itertools # since zip_longest doesn't exist on Py2
+from past.types import basestring
+from past.utils import PY3
+
+
+def flatmap(f, items):
+ return chain.from_iterable(map(f, items))
+
+
+if PY3:
+ import builtins
+
+ # list-producing versions of the major Python iterating functions
+ def oldfilter(*args):
+ """
+ filter(function or None, sequence) -> list, tuple, or string
+
+ Return those items of sequence for which function(item) is true.
+ If function is None, return the items that are true. If sequence
+ is a tuple or string, return the same type, else return a list.
+ """
+ mytype = type(args[1])
+ if isinstance(args[1], basestring):
+ return mytype().join(builtins.filter(*args))
+ elif isinstance(args[1], (tuple, list)):
+ return mytype(builtins.filter(*args))
+ else:
+ # Fall back to list. Is this the right thing to do?
+ return list(builtins.filter(*args))
+
+ # This is surprisingly difficult to get right. For example, the
+ # solutions here fail with the test cases in the docstring below:
+ # http://stackoverflow.com/questions/8072755/
+ def oldmap(func, *iterables):
+ """
+ map(function, sequence[, sequence, ...]) -> list
+
+ Return a list of the results of applying the function to the
+ items of the argument sequence(s). If more than one sequence is
+ given, the function is called with an argument list consisting of
+ the corresponding item of each sequence, substituting None for
+ missing values when not all sequences have the same length. If
+ the function is None, return a list of the items of the sequence
+ (or a list of tuples if more than one sequence).
+
+ Test cases:
+ >>> oldmap(None, 'hello world')
+ ['h', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd']
+
+ >>> oldmap(None, range(4))
+ [0, 1, 2, 3]
+
+ More test cases are in test_past.test_builtins.
+ """
+ zipped = itertools.zip_longest(*iterables)
+ l = list(zipped)
+ if len(l) == 0:
+ return []
+ if func is None:
+ result = l
+ else:
+ result = list(starmap(func, l))
+
+ # Inspect to see whether it's a simple sequence of tuples
+ try:
+ if max([len(item) for item in result]) == 1:
+ return list(chain.from_iterable(result))
+ # return list(flatmap(func, result))
+ except TypeError as e:
+ # Simple objects like ints have no len()
+ pass
+ return result
+
+ ############################
+ ### For reference, the source code for Py2.7 map function:
+ # static PyObject *
+ # builtin_map(PyObject *self, PyObject *args)
+ # {
+ # typedef struct {
+ # PyObject *it; /* the iterator object */
+ # int saw_StopIteration; /* bool: did the iterator end? */
+ # } sequence;
+ #
+ # PyObject *func, *result;
+ # sequence *seqs = NULL, *sqp;
+ # Py_ssize_t n, len;
+ # register int i, j;
+ #
+ # n = PyTuple_Size(args);
+ # if (n < 2) {
+ # PyErr_SetString(PyExc_TypeError,
+ # "map() requires at least two args");
+ # return NULL;
+ # }
+ #
+ # func = PyTuple_GetItem(args, 0);
+ # n--;
+ #
+ # if (func == Py_None) {
+ # if (PyErr_WarnPy3k("map(None, ...) not supported in 3.x; "
+ # "use list(...)", 1) < 0)
+ # return NULL;
+ # if (n == 1) {
+ # /* map(None, S) is the same as list(S). */
+ # return PySequence_List(PyTuple_GetItem(args, 1));
+ # }
+ # }
+ #
+ # /* Get space for sequence descriptors. Must NULL out the iterator
+ # * pointers so that jumping to Fail_2 later doesn't see trash.
+ # */
+ # if ((seqs = PyMem_NEW(sequence, n)) == NULL) {
+ # PyErr_NoMemory();
+ # return NULL;
+ # }
+ # for (i = 0; i < n; ++i) {
+ # seqs[i].it = (PyObject*)NULL;
+ # seqs[i].saw_StopIteration = 0;
+ # }
+ #
+ # /* Do a first pass to obtain iterators for the arguments, and set len
+ # * to the largest of their lengths.
+ # */
+ # len = 0;
+ # for (i = 0, sqp = seqs; i < n; ++i, ++sqp) {
+ # PyObject *curseq;
+ # Py_ssize_t curlen;
+ #
+ # /* Get iterator. */
+ # curseq = PyTuple_GetItem(args, i+1);
+ # sqp->it = PyObject_GetIter(curseq);
+ # if (sqp->it == NULL) {
+ # static char errmsg[] =
+ # "argument %d to map() must support iteration";
+ # char errbuf[sizeof(errmsg) + 25];
+ # PyOS_snprintf(errbuf, sizeof(errbuf), errmsg, i+2);
+ # PyErr_SetString(PyExc_TypeError, errbuf);
+ # goto Fail_2;
+ # }
+ #
+ # /* Update len. */
+ # curlen = _PyObject_LengthHint(curseq, 8);
+ # if (curlen > len)
+ # len = curlen;
+ # }
+ #
+ # /* Get space for the result list. */
+ # if ((result = (PyObject *) PyList_New(len)) == NULL)
+ # goto Fail_2;
+ #
+ # /* Iterate over the sequences until all have stopped. */
+ # for (i = 0; ; ++i) {
+ # PyObject *alist, *item=NULL, *value;
+ # int numactive = 0;
+ #
+ # if (func == Py_None && n == 1)
+ # alist = NULL;
+ # else if ((alist = PyTuple_New(n)) == NULL)
+ # goto Fail_1;
+ #
+ # for (j = 0, sqp = seqs; j < n; ++j, ++sqp) {
+ # if (sqp->saw_StopIteration) {
+ # Py_INCREF(Py_None);
+ # item = Py_None;
+ # }
+ # else {
+ # item = PyIter_Next(sqp->it);
+ # if (item)
+ # ++numactive;
+ # else {
+ # if (PyErr_Occurred()) {
+ # Py_XDECREF(alist);
+ # goto Fail_1;
+ # }
+ # Py_INCREF(Py_None);
+ # item = Py_None;
+ # sqp->saw_StopIteration = 1;
+ # }
+ # }
+ # if (alist)
+ # PyTuple_SET_ITEM(alist, j, item);
+ # else
+ # break;
+ # }
+ #
+ # if (!alist)
+ # alist = item;
+ #
+ # if (numactive == 0) {
+ # Py_DECREF(alist);
+ # break;
+ # }
+ #
+ # if (func == Py_None)
+ # value = alist;
+ # else {
+ # value = PyEval_CallObject(func, alist);
+ # Py_DECREF(alist);
+ # if (value == NULL)
+ # goto Fail_1;
+ # }
+ # if (i >= len) {
+ # int status = PyList_Append(result, value);
+ # Py_DECREF(value);
+ # if (status < 0)
+ # goto Fail_1;
+ # }
+ # else if (PyList_SetItem(result, i, value) < 0)
+ # goto Fail_1;
+ # }
+ #
+ # if (i < len && PyList_SetSlice(result, i, len, NULL) < 0)
+ # goto Fail_1;
+ #
+ # goto Succeed;
+ #
+ # Fail_1:
+ # Py_DECREF(result);
+ # Fail_2:
+ # result = NULL;
+ # Succeed:
+ # assert(seqs);
+ # for (i = 0; i < n; ++i)
+ # Py_XDECREF(seqs[i].it);
+ # PyMem_DEL(seqs);
+ # return result;
+ # }
+
+ def oldrange(*args, **kwargs):
+ return list(builtins.range(*args, **kwargs))
+
+ def oldzip(*args, **kwargs):
+ return list(builtins.zip(*args, **kwargs))
+
+ filter = oldfilter
+ map = oldmap
+ range = oldrange
+ from functools import reduce
+ zip = oldzip
+ __all__ = ['filter', 'map', 'range', 'reduce', 'zip']
+
+else:
+ import __builtin__
+ # Python 2-builtin ranges produce lists
+ filter = __builtin__.filter
+ map = __builtin__.map
+ range = __builtin__.range
+ reduce = __builtin__.reduce
+ zip = __builtin__.zip
+ __all__ = []
diff --git a/src/clyphx/vendor/future/past/translation/__init__.py b/src/clyphx/vendor/future/past/translation/__init__.py
new file mode 100644
index 0000000..7c67886
--- /dev/null
+++ b/src/clyphx/vendor/future/past/translation/__init__.py
@@ -0,0 +1,485 @@
+# -*- coding: utf-8 -*-
+"""
+past.translation
+==================
+
+The ``past.translation`` package provides an import hook for Python 3 which
+transparently runs ``futurize`` fixers over Python 2 code on import to convert
+print statements into functions, etc.
+
+It is intended to assist users in migrating to Python 3.x even if some
+dependencies still only support Python 2.x.
+
+Usage
+-----
+
+Once your Py2 package is installed in the usual module search path, the import
+hook is invoked as follows:
+
+ >>> from past.translation import autotranslate
+ >>> autotranslate('mypackagename')
+
+Or:
+
+ >>> autotranslate(['mypackage1', 'mypackage2'])
+
+You can unregister the hook using::
+
+ >>> from past.translation import remove_hooks
+ >>> remove_hooks()
+
+Author: Ed Schofield.
+Inspired by and based on ``uprefix`` by Vinay M. Sajip.
+"""
+
+import imp
+import logging
+import marshal
+import os
+import sys
+import copy
+from lib2to3.pgen2.parse import ParseError
+from lib2to3.refactor import RefactoringTool
+
+from libfuturize import fixes
+
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+myfixes = (list(fixes.libfuturize_fix_names_stage1) +
+ list(fixes.lib2to3_fix_names_stage1) +
+ list(fixes.libfuturize_fix_names_stage2) +
+ list(fixes.lib2to3_fix_names_stage2))
+
+
+# We detect whether the code is Py2 or Py3 by applying certain lib2to3 fixers
+# to it. If the diff is empty, it's Python 3 code.
+
+py2_detect_fixers = [
+# From stage 1:
+ 'lib2to3.fixes.fix_apply',
+ # 'lib2to3.fixes.fix_dict', # TODO: add support for utils.viewitems() etc. and move to stage2
+ 'lib2to3.fixes.fix_except',
+ 'lib2to3.fixes.fix_execfile',
+ 'lib2to3.fixes.fix_exitfunc',
+ 'lib2to3.fixes.fix_funcattrs',
+ 'lib2to3.fixes.fix_filter',
+ 'lib2to3.fixes.fix_has_key',
+ 'lib2to3.fixes.fix_idioms',
+ 'lib2to3.fixes.fix_import', # makes any implicit relative imports explicit. (Use with ``from __future__ import absolute_import)
+ 'lib2to3.fixes.fix_intern',
+ 'lib2to3.fixes.fix_isinstance',
+ 'lib2to3.fixes.fix_methodattrs',
+ 'lib2to3.fixes.fix_ne',
+ 'lib2to3.fixes.fix_numliterals', # turns 1L into 1, 0755 into 0o755
+ 'lib2to3.fixes.fix_paren',
+ 'lib2to3.fixes.fix_print',
+ 'lib2to3.fixes.fix_raise', # uses incompatible with_traceback() method on exceptions
+ 'lib2to3.fixes.fix_renames',
+ 'lib2to3.fixes.fix_reduce',
+ # 'lib2to3.fixes.fix_set_literal', # this is unnecessary and breaks Py2.6 support
+ 'lib2to3.fixes.fix_repr',
+ 'lib2to3.fixes.fix_standarderror',
+ 'lib2to3.fixes.fix_sys_exc',
+ 'lib2to3.fixes.fix_throw',
+ 'lib2to3.fixes.fix_tuple_params',
+ 'lib2to3.fixes.fix_types',
+ 'lib2to3.fixes.fix_ws_comma',
+ 'lib2to3.fixes.fix_xreadlines',
+
+# From stage 2:
+ 'lib2to3.fixes.fix_basestring',
+ # 'lib2to3.fixes.fix_buffer', # perhaps not safe. Test this.
+ # 'lib2to3.fixes.fix_callable', # not needed in Py3.2+
+ # 'lib2to3.fixes.fix_dict', # TODO: add support for utils.viewitems() etc.
+ 'lib2to3.fixes.fix_exec',
+ # 'lib2to3.fixes.fix_future', # we don't want to remove __future__ imports
+ 'lib2to3.fixes.fix_getcwdu',
+ # 'lib2to3.fixes.fix_imports', # called by libfuturize.fixes.fix_future_standard_library
+ # 'lib2to3.fixes.fix_imports2', # we don't handle this yet (dbm)
+ # 'lib2to3.fixes.fix_input',
+ # 'lib2to3.fixes.fix_itertools',
+ # 'lib2to3.fixes.fix_itertools_imports',
+ 'lib2to3.fixes.fix_long',
+ # 'lib2to3.fixes.fix_map',
+ # 'lib2to3.fixes.fix_metaclass', # causes SyntaxError in Py2! Use the one from ``six`` instead
+ 'lib2to3.fixes.fix_next',
+ 'lib2to3.fixes.fix_nonzero', # TODO: add a decorator for mapping __bool__ to __nonzero__
+ # 'lib2to3.fixes.fix_operator', # we will need support for this by e.g. extending the Py2 operator module to provide those functions in Py3
+ 'lib2to3.fixes.fix_raw_input',
+ # 'lib2to3.fixes.fix_unicode', # strips off the u'' prefix, which removes a potentially helpful source of information for disambiguating unicode/byte strings
+ # 'lib2to3.fixes.fix_urllib',
+ 'lib2to3.fixes.fix_xrange',
+ # 'lib2to3.fixes.fix_zip',
+]
+
+
+class RTs:
+ """
+ A namespace for the refactoring tools. This avoids creating these at
+ the module level, which slows down the module import. (See issue #117).
+
+ There are two possible grammars: with or without the print statement.
+ Hence we have two possible refactoring tool implementations.
+ """
+ _rt = None
+ _rtp = None
+ _rt_py2_detect = None
+ _rtp_py2_detect = None
+
+ @staticmethod
+ def setup():
+ """
+ Call this before using the refactoring tools to create them on demand
+ if needed.
+ """
+ if None in [RTs._rt, RTs._rtp]:
+ RTs._rt = RefactoringTool(myfixes)
+ RTs._rtp = RefactoringTool(myfixes, {'print_function': True})
+
+
+ @staticmethod
+ def setup_detect_python2():
+ """
+ Call this before using the refactoring tools to create them on demand
+ if needed.
+ """
+ if None in [RTs._rt_py2_detect, RTs._rtp_py2_detect]:
+ RTs._rt_py2_detect = RefactoringTool(py2_detect_fixers)
+ RTs._rtp_py2_detect = RefactoringTool(py2_detect_fixers,
+ {'print_function': True})
+
+
+# We need to find a prefix for the standard library, as we don't want to
+# process any files there (they will already be Python 3).
+#
+# The following method is used by Sanjay Vinip in uprefix. This fails for
+# ``conda`` environments:
+# # In a non-pythonv virtualenv, sys.real_prefix points to the installed Python.
+# # In a pythonv venv, sys.base_prefix points to the installed Python.
+# # Outside a virtual environment, sys.prefix points to the installed Python.
+
+# if hasattr(sys, 'real_prefix'):
+# _syslibprefix = sys.real_prefix
+# else:
+# _syslibprefix = getattr(sys, 'base_prefix', sys.prefix)
+
+# Instead, we use the portion of the path common to both the stdlib modules
+# ``math`` and ``urllib``.
+
+def splitall(path):
+ """
+ Split a path into all components. From Python Cookbook.
+ """
+ allparts = []
+ while True:
+ parts = os.path.split(path)
+ if parts[0] == path: # sentinel for absolute paths
+ allparts.insert(0, parts[0])
+ break
+ elif parts[1] == path: # sentinel for relative paths
+ allparts.insert(0, parts[1])
+ break
+ else:
+ path = parts[0]
+ allparts.insert(0, parts[1])
+ return allparts
+
+
+def common_substring(s1, s2):
+ """
+ Returns the longest common substring to the two strings, starting from the
+ left.
+ """
+ chunks = []
+ path1 = splitall(s1)
+ path2 = splitall(s2)
+ for (dir1, dir2) in zip(path1, path2):
+ if dir1 != dir2:
+ break
+ chunks.append(dir1)
+ return os.path.join(*chunks)
+
+# _stdlibprefix = common_substring(math.__file__, urllib.__file__)
+
+
+def detect_python2(source, pathname):
+ """
+ Returns a bool indicating whether we think the code is Py2
+ """
+ RTs.setup_detect_python2()
+ try:
+ tree = RTs._rt_py2_detect.refactor_string(source, pathname)
+ except ParseError as e:
+ if e.msg != 'bad input' or e.value != '=':
+ raise
+ tree = RTs._rtp.refactor_string(source, pathname)
+
+ if source != str(tree)[:-1]: # remove added newline
+ # The above fixers made changes, so we conclude it's Python 2 code
+ logger.debug('Detected Python 2 code: {0}'.format(pathname))
+ return True
+ else:
+ logger.debug('Detected Python 3 code: {0}'.format(pathname))
+ return False
+
+
+class Py2Fixer(object):
+ """
+ An import hook class that uses lib2to3 for source-to-source translation of
+ Py2 code to Py3.
+ """
+
+ # See the comments on :class:future.standard_library.RenameImport.
+ # We add this attribute here so remove_hooks() and install_hooks() can
+ # unambiguously detect whether the import hook is installed:
+ PY2FIXER = True
+
+ def __init__(self):
+ self.found = None
+ self.base_exclude_paths = ['future', 'past']
+ self.exclude_paths = copy.copy(self.base_exclude_paths)
+ self.include_paths = []
+
+ def include(self, paths):
+ """
+ Pass in a sequence of module names such as 'plotrique.plotting' that,
+ if present at the leftmost side of the full package name, would
+ specify the module to be transformed from Py2 to Py3.
+ """
+ self.include_paths += paths
+
+ def exclude(self, paths):
+ """
+ Pass in a sequence of strings such as 'mymodule' that, if
+ present at the leftmost side of the full package name, would cause
+ the module not to undergo any source transformation.
+ """
+ self.exclude_paths += paths
+
+ def find_module(self, fullname, path=None):
+ logger.debug('Running find_module: {0}...'.format(fullname))
+ if '.' in fullname:
+ parent, child = fullname.rsplit('.', 1)
+ if path is None:
+ loader = self.find_module(parent, path)
+ mod = loader.load_module(parent)
+ path = mod.__path__
+ fullname = child
+
+ # Perhaps we should try using the new importlib functionality in Python
+ # 3.3: something like this?
+ # thing = importlib.machinery.PathFinder.find_module(fullname, path)
+ try:
+ self.found = imp.find_module(fullname, path)
+ except Exception as e:
+ logger.debug('Py2Fixer could not find {0}')
+ logger.debug('Exception was: {0})'.format(fullname, e))
+ return None
+ self.kind = self.found[-1][-1]
+ if self.kind == imp.PKG_DIRECTORY:
+ self.pathname = os.path.join(self.found[1], '__init__.py')
+ elif self.kind == imp.PY_SOURCE:
+ self.pathname = self.found[1]
+ return self
+
+ def transform(self, source):
+ # This implementation uses lib2to3,
+ # you can override and use something else
+ # if that's better for you
+
+ # lib2to3 likes a newline at the end
+ RTs.setup()
+ source += '\n'
+ try:
+ tree = RTs._rt.refactor_string(source, self.pathname)
+ except ParseError as e:
+ if e.msg != 'bad input' or e.value != '=':
+ raise
+ tree = RTs._rtp.refactor_string(source, self.pathname)
+ # could optimise a bit for only doing str(tree) if
+ # getattr(tree, 'was_changed', False) returns True
+ return str(tree)[:-1] # remove added newline
+
+ def load_module(self, fullname):
+ logger.debug('Running load_module for {0}...'.format(fullname))
+ if fullname in sys.modules:
+ mod = sys.modules[fullname]
+ else:
+ if self.kind in (imp.PY_COMPILED, imp.C_EXTENSION, imp.C_BUILTIN,
+ imp.PY_FROZEN):
+ convert = False
+ # elif (self.pathname.startswith(_stdlibprefix)
+ # and 'site-packages' not in self.pathname):
+ # # We assume it's a stdlib package in this case. Is this too brittle?
+ # # Please file a bug report at https://github.com/PythonCharmers/python-future
+ # # if so.
+ # convert = False
+ # in theory, other paths could be configured to be excluded here too
+ elif any([fullname.startswith(path) for path in self.exclude_paths]):
+ convert = False
+ elif any([fullname.startswith(path) for path in self.include_paths]):
+ convert = True
+ else:
+ convert = False
+ if not convert:
+ logger.debug('Excluded {0} from translation'.format(fullname))
+ mod = imp.load_module(fullname, *self.found)
+ else:
+ logger.debug('Autoconverting {0} ...'.format(fullname))
+ mod = imp.new_module(fullname)
+ sys.modules[fullname] = mod
+
+ # required by PEP 302
+ mod.__file__ = self.pathname
+ mod.__name__ = fullname
+ mod.__loader__ = self
+
+ # This:
+ # mod.__package__ = '.'.join(fullname.split('.')[:-1])
+ # seems to result in "SystemError: Parent module '' not loaded,
+ # cannot perform relative import" for a package's __init__.py
+ # file. We use the approach below. Another option to try is the
+ # minimal load_module pattern from the PEP 302 text instead.
+
+ # Is the test in the next line more or less robust than the
+ # following one? Presumably less ...
+ # ispkg = self.pathname.endswith('__init__.py')
+
+ if self.kind == imp.PKG_DIRECTORY:
+ mod.__path__ = [ os.path.dirname(self.pathname) ]
+ mod.__package__ = fullname
+ else:
+ #else, regular module
+ mod.__path__ = []
+ mod.__package__ = fullname.rpartition('.')[0]
+
+ try:
+ cachename = imp.cache_from_source(self.pathname)
+ if not os.path.exists(cachename):
+ update_cache = True
+ else:
+ sourcetime = os.stat(self.pathname).st_mtime
+ cachetime = os.stat(cachename).st_mtime
+ update_cache = cachetime < sourcetime
+ # # Force update_cache to work around a problem with it being treated as Py3 code???
+ # update_cache = True
+ if not update_cache:
+ with open(cachename, 'rb') as f:
+ data = f.read()
+ try:
+ code = marshal.loads(data)
+ except Exception:
+ # pyc could be corrupt. Regenerate it
+ update_cache = True
+ if update_cache:
+ if self.found[0]:
+ source = self.found[0].read()
+ elif self.kind == imp.PKG_DIRECTORY:
+ with open(self.pathname) as f:
+ source = f.read()
+
+ if detect_python2(source, self.pathname):
+ source = self.transform(source)
+
+ code = compile(source, self.pathname, 'exec')
+
+ dirname = os.path.dirname(cachename)
+ try:
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+ with open(cachename, 'wb') as f:
+ data = marshal.dumps(code)
+ f.write(data)
+ except Exception: # could be write-protected
+ pass
+ exec(code, mod.__dict__)
+ except Exception as e:
+ # must remove module from sys.modules
+ del sys.modules[fullname]
+ raise # keep it simple
+
+ if self.found[0]:
+ self.found[0].close()
+ return mod
+
+_hook = Py2Fixer()
+
+
+def install_hooks(include_paths=(), exclude_paths=()):
+ if isinstance(include_paths, str):
+ include_paths = (include_paths,)
+ if isinstance(exclude_paths, str):
+ exclude_paths = (exclude_paths,)
+ assert len(include_paths) + len(exclude_paths) > 0, 'Pass at least one argument'
+ _hook.include(include_paths)
+ _hook.exclude(exclude_paths)
+ # _hook.debug = debug
+ enable = sys.version_info[0] >= 3 # enabled for all 3.x+
+ if enable and _hook not in sys.meta_path:
+ sys.meta_path.insert(0, _hook) # insert at beginning. This could be made a parameter
+
+ # We could return the hook when there are ways of configuring it
+ #return _hook
+
+
+def remove_hooks():
+ if _hook in sys.meta_path:
+ sys.meta_path.remove(_hook)
+
+
+def detect_hooks():
+ """
+ Returns True if the import hooks are installed, False if not.
+ """
+ return _hook in sys.meta_path
+ # present = any([hasattr(hook, 'PY2FIXER') for hook in sys.meta_path])
+ # return present
+
+
+class hooks(object):
+ """
+ Acts as a context manager. Use like this:
+
+ >>> from past import translation
+ >>> with translation.hooks():
+ ... import mypy2module
+ >>> import requests # py2/3 compatible anyway
+ >>> # etc.
+ """
+ def __enter__(self):
+ self.hooks_were_installed = detect_hooks()
+ install_hooks()
+ return self
+
+ def __exit__(self, *args):
+ if not self.hooks_were_installed:
+ remove_hooks()
+
+
+class suspend_hooks(object):
+ """
+ Acts as a context manager. Use like this:
+
+ >>> from past import translation
+ >>> translation.install_hooks()
+ >>> import http.client
+ >>> # ...
+ >>> with translation.suspend_hooks():
+ >>> import requests # or others that support Py2/3
+
+ If the hooks were disabled before the context, they are not installed when
+ the context is left.
+ """
+ def __enter__(self):
+ self.hooks_were_installed = detect_hooks()
+ remove_hooks()
+ return self
+ def __exit__(self, *args):
+ if self.hooks_were_installed:
+ install_hooks()
+
+
+# alias
+autotranslate = install_hooks
diff --git a/src/clyphx/vendor/future/past/types/__init__.py b/src/clyphx/vendor/future/past/types/__init__.py
new file mode 100644
index 0000000..91dd270
--- /dev/null
+++ b/src/clyphx/vendor/future/past/types/__init__.py
@@ -0,0 +1,29 @@
+"""
+Forward-ports of types from Python 2 for use with Python 3:
+
+- ``basestring``: equivalent to ``(str, bytes)`` in ``isinstance`` checks
+- ``dict``: with list-producing .keys() etc. methods
+- ``str``: bytes-like, but iterating over them doesn't product integers
+- ``long``: alias of Py3 int with ``L`` suffix in the ``repr``
+- ``unicode``: alias of Py3 str with ``u`` prefix in the ``repr``
+
+"""
+
+from past import utils
+
+if utils.PY2:
+ import __builtin__
+ basestring = __builtin__.basestring
+ dict = __builtin__.dict
+ str = __builtin__.str
+ long = __builtin__.long
+ unicode = __builtin__.unicode
+ __all__ = []
+else:
+ from .basestring import basestring
+ from .olddict import olddict
+ from .oldstr import oldstr
+ long = int
+ unicode = str
+ # from .unicode import unicode
+ __all__ = ['basestring', 'olddict', 'oldstr', 'long', 'unicode']
diff --git a/src/clyphx/vendor/future/past/types/basestring.py b/src/clyphx/vendor/future/past/types/basestring.py
new file mode 100644
index 0000000..1cab22f
--- /dev/null
+++ b/src/clyphx/vendor/future/past/types/basestring.py
@@ -0,0 +1,39 @@
+"""
+An implementation of the basestring type for Python 3
+
+Example use:
+
+>>> s = b'abc'
+>>> assert isinstance(s, basestring)
+>>> from past.types import str as oldstr
+>>> s2 = oldstr(b'abc')
+>>> assert isinstance(s2, basestring)
+
+"""
+
+import sys
+
+from past.utils import with_metaclass, PY2
+
+if PY2:
+ str = unicode
+
+ver = sys.version_info[:2]
+
+
+class BaseBaseString(type):
+ def __instancecheck__(cls, instance):
+ return isinstance(instance, (bytes, str))
+
+ def __subclasshook__(cls, thing):
+ # TODO: What should go here?
+ raise NotImplemented
+
+
+class basestring(with_metaclass(BaseBaseString)):
+ """
+ A minimal backport of the Python 2 basestring type to Py3
+ """
+
+
+__all__ = ['basestring']
diff --git a/src/clyphx/vendor/future/past/types/olddict.py b/src/clyphx/vendor/future/past/types/olddict.py
new file mode 100644
index 0000000..f4f92a2
--- /dev/null
+++ b/src/clyphx/vendor/future/past/types/olddict.py
@@ -0,0 +1,96 @@
+"""
+A dict subclass for Python 3 that behaves like Python 2's dict
+
+Example use:
+
+>>> from past.builtins import dict
+>>> d1 = dict() # instead of {} for an empty dict
+>>> d2 = dict(key1='value1', key2='value2')
+
+The keys, values and items methods now return lists on Python 3.x and there are
+methods for iterkeys, itervalues, iteritems, and viewkeys etc.
+
+>>> for d in (d1, d2):
+... assert isinstance(d.keys(), list)
+... assert isinstance(d.values(), list)
+... assert isinstance(d.items(), list)
+"""
+
+import sys
+
+from past.utils import with_metaclass
+
+
+_builtin_dict = dict
+ver = sys.version_info[:2]
+
+
+class BaseOldDict(type):
+ def __instancecheck__(cls, instance):
+ return isinstance(instance, _builtin_dict)
+
+
+class olddict(with_metaclass(BaseOldDict, _builtin_dict)):
+ """
+ A backport of the Python 3 dict object to Py2
+ """
+ iterkeys = _builtin_dict.keys
+ viewkeys = _builtin_dict.keys
+
+ def keys(self):
+ return list(super(olddict, self).keys())
+
+ itervalues = _builtin_dict.values
+ viewvalues = _builtin_dict.values
+
+ def values(self):
+ return list(super(olddict, self).values())
+
+ iteritems = _builtin_dict.items
+ viewitems = _builtin_dict.items
+
+ def items(self):
+ return list(super(olddict, self).items())
+
+ def has_key(self, k):
+ """
+ D.has_key(k) -> True if D has a key k, else False
+ """
+ return k in self
+
+ # def __new__(cls, *args, **kwargs):
+ # """
+ # dict() -> new empty dictionary
+ # dict(mapping) -> new dictionary initialized from a mapping object's
+ # (key, value) pairs
+ # dict(iterable) -> new dictionary initialized as if via:
+ # d = {}
+ # for k, v in iterable:
+ # d[k] = v
+ # dict(**kwargs) -> new dictionary initialized with the name=value pairs
+ # in the keyword argument list. For example: dict(one=1, two=2)
+
+ # """
+ #
+ # if len(args) == 0:
+ # return super(olddict, cls).__new__(cls)
+ # # Was: elif isinstance(args[0], newbytes):
+ # # We use type() instead of the above because we're redefining
+ # # this to be True for all unicode string subclasses. Warning:
+ # # This may render newstr un-subclassable.
+ # elif type(args[0]) == olddict:
+ # return args[0]
+ # # elif isinstance(args[0], _builtin_dict):
+ # # value = args[0]
+ # else:
+ # value = args[0]
+ # return super(olddict, cls).__new__(cls, value)
+
+ def __native__(self):
+ """
+ Hook for the past.utils.native() function
+ """
+ return super(oldbytes, self)
+
+
+__all__ = ['olddict']
diff --git a/src/clyphx/vendor/future/past/types/oldstr.py b/src/clyphx/vendor/future/past/types/oldstr.py
new file mode 100644
index 0000000..a477d88
--- /dev/null
+++ b/src/clyphx/vendor/future/past/types/oldstr.py
@@ -0,0 +1,135 @@
+"""
+Pure-Python implementation of a Python 2-like str object for Python 3.
+"""
+
+from numbers import Integral
+
+from past.utils import PY2, with_metaclass
+
+if PY2:
+ from collections import Iterable
+else:
+ from collections.abc import Iterable
+
+_builtin_bytes = bytes
+
+
+class BaseOldStr(type):
+ def __instancecheck__(cls, instance):
+ return isinstance(instance, _builtin_bytes)
+
+
+def unescape(s):
+ """
+ Interprets strings with escape sequences
+
+ Example:
+ >>> s = unescape(r'abc\\def') # i.e. 'abc\\\\def'
+ >>> print(s)
+ 'abc\def'
+ >>> s2 = unescape('abc\\ndef')
+ >>> len(s2)
+ 8
+ >>> print(s2)
+ abc
+ def
+ """
+ return s.encode().decode('unicode_escape')
+
+
+class oldstr(with_metaclass(BaseOldStr, _builtin_bytes)):
+ """
+ A forward port of the Python 2 8-bit string object to Py3
+ """
+ # Python 2 strings have no __iter__ method:
+ @property
+ def __iter__(self):
+ raise AttributeError
+
+ def __dir__(self):
+ return [thing for thing in dir(_builtin_bytes) if thing != '__iter__']
+
+ # def __new__(cls, *args, **kwargs):
+ # """
+ # From the Py3 bytes docstring:
+
+ # bytes(iterable_of_ints) -> bytes
+ # bytes(string, encoding[, errors]) -> bytes
+ # bytes(bytes_or_buffer) -> immutable copy of bytes_or_buffer
+ # bytes(int) -> bytes object of size given by the parameter initialized with null bytes
+ # bytes() -> empty bytes object
+ #
+ # Construct an immutable array of bytes from:
+ # - an iterable yielding integers in range(256)
+ # - a text string encoded using the specified encoding
+ # - any object implementing the buffer API.
+ # - an integer
+ # """
+ #
+ # if len(args) == 0:
+ # return super(newbytes, cls).__new__(cls)
+ # # Was: elif isinstance(args[0], newbytes):
+ # # We use type() instead of the above because we're redefining
+ # # this to be True for all unicode string subclasses. Warning:
+ # # This may render newstr un-subclassable.
+ # elif type(args[0]) == newbytes:
+ # return args[0]
+ # elif isinstance(args[0], _builtin_bytes):
+ # value = args[0]
+ # elif isinstance(args[0], unicode):
+ # if 'encoding' not in kwargs:
+ # raise TypeError('unicode string argument without an encoding')
+ # ###
+ # # Was: value = args[0].encode(**kwargs)
+ # # Python 2.6 string encode() method doesn't take kwargs:
+ # # Use this instead:
+ # newargs = [kwargs['encoding']]
+ # if 'errors' in kwargs:
+ # newargs.append(kwargs['errors'])
+ # value = args[0].encode(*newargs)
+ # ###
+ # elif isinstance(args[0], Iterable):
+ # if len(args[0]) == 0:
+ # # What is this?
+ # raise ValueError('unknown argument type')
+ # elif len(args[0]) > 0 and isinstance(args[0][0], Integral):
+ # # It's a list of integers
+ # value = b''.join([chr(x) for x in args[0]])
+ # else:
+ # raise ValueError('item cannot be interpreted as an integer')
+ # elif isinstance(args[0], Integral):
+ # if args[0] < 0:
+ # raise ValueError('negative count')
+ # value = b'\x00' * args[0]
+ # else:
+ # value = args[0]
+ # return super(newbytes, cls).__new__(cls, value)
+
+ def __repr__(self):
+ s = super(oldstr, self).__repr__() # e.g. b'abc' on Py3, b'abc' on Py3
+ return s[1:]
+
+ def __str__(self):
+ s = super(oldstr, self).__str__() # e.g. "b'abc'" or "b'abc\\ndef'
+ # TODO: fix this:
+ assert s[:2] == "b'" and s[-1] == "'"
+ return unescape(s[2:-1]) # e.g. 'abc' or 'abc\ndef'
+
+ def __getitem__(self, y):
+ if isinstance(y, Integral):
+ return super(oldstr, self).__getitem__(slice(y, y+1))
+ else:
+ return super(oldstr, self).__getitem__(y)
+
+ def __getslice__(self, *args):
+ return self.__getitem__(slice(*args))
+
+ def __contains__(self, key):
+ if isinstance(key, int):
+ return False
+
+ def __native__(self):
+ return bytes(self)
+
+
+__all__ = ['oldstr']
diff --git a/src/clyphx/vendor/future/past/utils/__init__.py b/src/clyphx/vendor/future/past/utils/__init__.py
new file mode 100644
index 0000000..f6b2642
--- /dev/null
+++ b/src/clyphx/vendor/future/past/utils/__init__.py
@@ -0,0 +1,97 @@
+"""
+Various non-built-in utility functions and definitions for Py2
+compatibility in Py3.
+
+For example:
+
+ >>> # The old_div() function behaves like Python 2's / operator
+ >>> # without "from __future__ import division"
+ >>> from past.utils import old_div
+ >>> old_div(3, 2) # like 3/2 in Py2
+ 0
+ >>> old_div(3, 2.0) # like 3/2.0 in Py2
+ 1.5
+"""
+
+import sys
+import numbers
+
+PY3 = sys.version_info[0] >= 3
+PY2 = sys.version_info[0] == 2
+PYPY = hasattr(sys, 'pypy_translation_info')
+
+
+def with_metaclass(meta, *bases):
+ """
+ Function from jinja2/_compat.py. License: BSD.
+
+ Use it like this::
+
+ class BaseForm(object):
+ pass
+
+ class FormType(type):
+ pass
+
+ class Form(with_metaclass(FormType, BaseForm)):
+ pass
+
+ This requires a bit of explanation: the basic idea is to make a
+ dummy metaclass for one level of class instantiation that replaces
+ itself with the actual metaclass. Because of internal type checks
+ we also need to make sure that we downgrade the custom metaclass
+ for one level to something closer to type (that's why __call__ and
+ __init__ comes back from type etc.).
+
+ This has the advantage over six.with_metaclass of not introducing
+ dummy classes into the final MRO.
+ """
+ class metaclass(meta):
+ __call__ = type.__call__
+ __init__ = type.__init__
+ def __new__(cls, name, this_bases, d):
+ if this_bases is None:
+ return type.__new__(cls, name, (), d)
+ return meta(name, bases, d)
+ return metaclass('temporary_class', None, {})
+
+
+def native(obj):
+ """
+ On Py2, this is a no-op: native(obj) -> obj
+
+ On Py3, returns the corresponding native Py3 types that are
+ superclasses for forward-ported objects from Py2:
+
+ >>> from past.builtins import str, dict
+
+ >>> native(str(b'ABC')) # Output on Py3 follows. On Py2, output is 'ABC'
+ b'ABC'
+ >>> type(native(str(b'ABC')))
+ bytes
+
+ Existing native types on Py3 will be returned unchanged:
+
+ >>> type(native(b'ABC'))
+ bytes
+ """
+ if hasattr(obj, '__native__'):
+ return obj.__native__()
+ else:
+ return obj
+
+
+# An alias for future.utils.old_div():
+def old_div(a, b):
+ """
+ Equivalent to ``a / b`` on Python 2 without ``from __future__ import
+ division``.
+
+ TODO: generalize this to other objects (like arrays etc.)
+ """
+ if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral):
+ return a // b
+ else:
+ return a / b
+
+__all__ = ['PY3', 'PY2', 'PYPY', 'with_metaclass', 'native', 'old_div']
diff --git a/src/clyphx/vendor/future/queue/__init__.py b/src/clyphx/vendor/future/queue/__init__.py
new file mode 100644
index 0000000..22bd296
--- /dev/null
+++ b/src/clyphx/vendor/future/queue/__init__.py
@@ -0,0 +1,10 @@
+from __future__ import absolute_import
+import sys
+__future_module__ = True
+
+if sys.version_info[0] < 3:
+ from Queue import *
+else:
+ raise ImportError('This package should not be accessible on Python 3. '
+ 'Either you are trying to run from the python-future src folder '
+ 'or your installation of python-future is corrupted.')
diff --git a/src/clyphx/vendor/future/reprlib/__init__.py b/src/clyphx/vendor/future/reprlib/__init__.py
new file mode 100644
index 0000000..6ccf9c0
--- /dev/null
+++ b/src/clyphx/vendor/future/reprlib/__init__.py
@@ -0,0 +1,9 @@
+from __future__ import absolute_import
+import sys
+
+if sys.version_info[0] < 3:
+ from repr import *
+else:
+ raise ImportError('This package should not be accessible on Python 3. '
+ 'Either you are trying to run from the python-future src folder '
+ 'or your installation of python-future is corrupted.')
diff --git a/src/clyphx/vendor/future/socketserver/__init__.py b/src/clyphx/vendor/future/socketserver/__init__.py
new file mode 100644
index 0000000..c5b8c9c
--- /dev/null
+++ b/src/clyphx/vendor/future/socketserver/__init__.py
@@ -0,0 +1,9 @@
+from __future__ import absolute_import
+import sys
+
+if sys.version_info[0] < 3:
+ from SocketServer import *
+else:
+ raise ImportError('This package should not be accessible on Python 3. '
+ 'Either you are trying to run from the python-future src folder '
+ 'or your installation of python-future is corrupted.')
diff --git a/src/clyphx/vendor/future/tkinter/__init__.py b/src/clyphx/vendor/future/tkinter/__init__.py
new file mode 100644
index 0000000..bb730c3
--- /dev/null
+++ b/src/clyphx/vendor/future/tkinter/__init__.py
@@ -0,0 +1,28 @@
+from __future__ import absolute_import
+import sys
+
+if sys.version_info[0] < 3:
+ from Tkinter import *
+ from Tkinter import (_cnfmerge, _default_root, _flatten,
+ _support_default_root, _test,
+ _tkinter, _setit)
+
+ try: # >= 2.7.4
+ from Tkinter import (_join)
+ except ImportError:
+ pass
+
+ try: # >= 2.7.4
+ from Tkinter import (_stringify)
+ except ImportError:
+ pass
+
+ try: # >= 2.7.9
+ from Tkinter import (_splitdict)
+ except ImportError:
+ pass
+
+else:
+ raise ImportError('This package should not be accessible on Python 3. '
+ 'Either you are trying to run from the python-future src folder '
+ 'or your installation of python-future is corrupted.')
diff --git a/src/clyphx/vendor/future/tkinter/colorchooser.py b/src/clyphx/vendor/future/tkinter/colorchooser.py
new file mode 100644
index 0000000..6dde6e8
--- /dev/null
+++ b/src/clyphx/vendor/future/tkinter/colorchooser.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.colorchooser import *
+else:
+ try:
+ from tkColorChooser import *
+ except ImportError:
+ raise ImportError('The tkColorChooser module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/tkinter/commondialog.py b/src/clyphx/vendor/future/tkinter/commondialog.py
new file mode 100644
index 0000000..eb7ae8d
--- /dev/null
+++ b/src/clyphx/vendor/future/tkinter/commondialog.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.commondialog import *
+else:
+ try:
+ from tkCommonDialog import *
+ except ImportError:
+ raise ImportError('The tkCommonDialog module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/tkinter/constants.py b/src/clyphx/vendor/future/tkinter/constants.py
new file mode 100644
index 0000000..ffe0981
--- /dev/null
+++ b/src/clyphx/vendor/future/tkinter/constants.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.constants import *
+else:
+ try:
+ from Tkconstants import *
+ except ImportError:
+ raise ImportError('The Tkconstants module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/tkinter/dialog.py b/src/clyphx/vendor/future/tkinter/dialog.py
new file mode 100644
index 0000000..113370c
--- /dev/null
+++ b/src/clyphx/vendor/future/tkinter/dialog.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.dialog import *
+else:
+ try:
+ from Dialog import *
+ except ImportError:
+ raise ImportError('The Dialog module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/tkinter/dnd.py b/src/clyphx/vendor/future/tkinter/dnd.py
new file mode 100644
index 0000000..1ab4379
--- /dev/null
+++ b/src/clyphx/vendor/future/tkinter/dnd.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.dnd import *
+else:
+ try:
+ from Tkdnd import *
+ except ImportError:
+ raise ImportError('The Tkdnd module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/tkinter/filedialog.py b/src/clyphx/vendor/future/tkinter/filedialog.py
new file mode 100644
index 0000000..93a1538
--- /dev/null
+++ b/src/clyphx/vendor/future/tkinter/filedialog.py
@@ -0,0 +1,17 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.filedialog import *
+else:
+ try:
+ from FileDialog import *
+ except ImportError:
+ raise ImportError('The FileDialog module is missing. Does your Py2 '
+ 'installation include tkinter?')
+ try:
+ from tkFileDialog import *
+ except ImportError:
+ raise ImportError('The tkFileDialog module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/tkinter/font.py b/src/clyphx/vendor/future/tkinter/font.py
new file mode 100644
index 0000000..628f399
--- /dev/null
+++ b/src/clyphx/vendor/future/tkinter/font.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.font import *
+else:
+ try:
+ from tkFont import *
+ except ImportError:
+ raise ImportError('The tkFont module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/tkinter/messagebox.py b/src/clyphx/vendor/future/tkinter/messagebox.py
new file mode 100644
index 0000000..b43d870
--- /dev/null
+++ b/src/clyphx/vendor/future/tkinter/messagebox.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.messagebox import *
+else:
+ try:
+ from tkMessageBox import *
+ except ImportError:
+ raise ImportError('The tkMessageBox module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/tkinter/scrolledtext.py b/src/clyphx/vendor/future/tkinter/scrolledtext.py
new file mode 100644
index 0000000..1c69db6
--- /dev/null
+++ b/src/clyphx/vendor/future/tkinter/scrolledtext.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.scrolledtext import *
+else:
+ try:
+ from ScrolledText import *
+ except ImportError:
+ raise ImportError('The ScrolledText module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/tkinter/simpledialog.py b/src/clyphx/vendor/future/tkinter/simpledialog.py
new file mode 100644
index 0000000..dba93fb
--- /dev/null
+++ b/src/clyphx/vendor/future/tkinter/simpledialog.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.simpledialog import *
+else:
+ try:
+ from SimpleDialog import *
+ except ImportError:
+ raise ImportError('The SimpleDialog module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/tkinter/tix.py b/src/clyphx/vendor/future/tkinter/tix.py
new file mode 100644
index 0000000..8d1718a
--- /dev/null
+++ b/src/clyphx/vendor/future/tkinter/tix.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.tix import *
+else:
+ try:
+ from Tix import *
+ except ImportError:
+ raise ImportError('The Tix module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/tkinter/ttk.py b/src/clyphx/vendor/future/tkinter/ttk.py
new file mode 100644
index 0000000..081c1b4
--- /dev/null
+++ b/src/clyphx/vendor/future/tkinter/ttk.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+from future.utils import PY3
+
+if PY3:
+ from tkinter.ttk import *
+else:
+ try:
+ from ttk import *
+ except ImportError:
+ raise ImportError('The ttk module is missing. Does your Py2 '
+ 'installation include tkinter?')
diff --git a/src/clyphx/vendor/future/winreg/__init__.py b/src/clyphx/vendor/future/winreg/__init__.py
new file mode 100644
index 0000000..97243bb
--- /dev/null
+++ b/src/clyphx/vendor/future/winreg/__init__.py
@@ -0,0 +1,10 @@
+from __future__ import absolute_import
+import sys
+__future_module__ = True
+
+if sys.version_info[0] < 3:
+ from _winreg import *
+else:
+ raise ImportError('This package should not be accessible on Python 3. '
+ 'Either you are trying to run from the python-future src folder '
+ 'or your installation of python-future is corrupted.')
diff --git a/src/clyphx/vendor/future/xmlrpc/__init__.py b/src/clyphx/vendor/future/xmlrpc/__init__.py
new file mode 100644
index 0000000..e4f853e
--- /dev/null
+++ b/src/clyphx/vendor/future/xmlrpc/__init__.py
@@ -0,0 +1,9 @@
+from __future__ import absolute_import
+import sys
+
+if sys.version_info[0] < 3:
+ pass
+else:
+ raise ImportError('This package should not be accessible on Python 3. '
+ 'Either you are trying to run from the python-future src folder '
+ 'or your installation of python-future is corrupted.')
diff --git a/src/clyphx/vendor/future/xmlrpc/client.py b/src/clyphx/vendor/future/xmlrpc/client.py
new file mode 100644
index 0000000..a8d0827
--- /dev/null
+++ b/src/clyphx/vendor/future/xmlrpc/client.py
@@ -0,0 +1,5 @@
+from __future__ import absolute_import
+import sys
+
+assert sys.version_info[0] < 3
+from xmlrpclib import *
diff --git a/src/clyphx/vendor/future/xmlrpc/server.py b/src/clyphx/vendor/future/xmlrpc/server.py
new file mode 100644
index 0000000..a8d0827
--- /dev/null
+++ b/src/clyphx/vendor/future/xmlrpc/server.py
@@ -0,0 +1,5 @@
+from __future__ import absolute_import
+import sys
+
+assert sys.version_info[0] < 3
+from xmlrpclib import *
diff --git a/src/clyphx/vendor/typing.py b/src/clyphx/vendor/typing.py
new file mode 100644
index 0000000..dd16d9a
--- /dev/null
+++ b/src/clyphx/vendor/typing.py
@@ -0,0 +1,2550 @@
+from __future__ import absolute_import, unicode_literals
+
+import abc
+from abc import abstractmethod, abstractproperty
+import collections
+import functools
+import re as stdlib_re # Avoid confusion with the re we export.
+import sys
+import types
+import copy
+try:
+ import collections.abc as collections_abc
+except ImportError:
+ import collections as collections_abc # Fallback for PY3.2.
+
+
+# Please keep __all__ alphabetized within each category.
+__all__ = [
+ # Super-special typing primitives.
+ 'Any',
+ 'Callable',
+ 'ClassVar',
+ 'Final',
+ 'Generic',
+ 'Literal',
+ 'Optional',
+ 'Protocol',
+ 'Tuple',
+ 'Type',
+ 'TypeVar',
+ 'Union',
+
+ # ABCs (from collections.abc).
+ 'AbstractSet', # collections.abc.Set.
+ 'GenericMeta', # subclass of abc.ABCMeta and a metaclass
+ # for 'Generic' and ABCs below.
+ 'ByteString',
+ 'Container',
+ 'ContextManager',
+ 'Hashable',
+ 'ItemsView',
+ 'Iterable',
+ 'Iterator',
+ 'KeysView',
+ 'Mapping',
+ 'MappingView',
+ 'MutableMapping',
+ 'MutableSequence',
+ 'MutableSet',
+ 'Sequence',
+ 'Sized',
+ 'ValuesView',
+
+ # Structural checks, a.k.a. protocols.
+ 'Reversible',
+ 'SupportsAbs',
+ 'SupportsComplex',
+ 'SupportsFloat',
+ 'SupportsIndex',
+ 'SupportsInt',
+
+ # Concrete collection types.
+ 'Counter',
+ 'Deque',
+ 'Dict',
+ 'DefaultDict',
+ 'List',
+ 'Set',
+ 'FrozenSet',
+ 'NamedTuple', # Not really a type.
+ 'TypedDict', # Not really a type.
+ 'Generator',
+
+ # One-off things.
+ 'AnyStr',
+ 'cast',
+ 'final',
+ 'get_type_hints',
+ 'NewType',
+ 'no_type_check',
+ 'no_type_check_decorator',
+ 'NoReturn',
+ 'overload',
+ 'runtime_checkable',
+ 'Text',
+ 'TYPE_CHECKING',
+]
+
+# The pseudo-submodules 're' and 'io' are part of the public
+# namespace, but excluded from __all__ because they might stomp on
+# legitimate imports of those modules.
+
+
+def _qualname(x):
+ if sys.version_info[:2] >= (3, 3):
+ return x.__qualname__
+ else:
+ # Fall back to just name.
+ return x.__name__
+
+
+def _trim_name(nm):
+ whitelist = ('_TypeAlias', '_ForwardRef', '_TypingBase', '_FinalTypingBase')
+ if nm.startswith('_') and nm not in whitelist:
+ nm = nm[1:]
+ return nm
+
+
+class TypingMeta(type):
+ """Metaclass for most types defined in typing module
+ (not a part of public API).
+
+ This also defines a dummy constructor (all the work for most typing
+ constructs is done in __new__) and a nicer repr().
+ """
+
+ _is_protocol = False
+
+ def __new__(cls, name, bases, namespace):
+ return super(TypingMeta, cls).__new__(cls, str(name), bases, namespace)
+
+ @classmethod
+ def assert_no_subclassing(cls, bases):
+ for base in bases:
+ if isinstance(base, cls):
+ raise TypeError("Cannot subclass %s" %
+ (', '.join(map(_type_repr, bases)) or '()'))
+
+ def __init__(self, *args, **kwds):
+ pass
+
+ def _eval_type(self, globalns, localns):
+ """Override this in subclasses to interpret forward references.
+
+ For example, List['C'] is internally stored as
+ List[_ForwardRef('C')], which should evaluate to List[C],
+ where C is an object found in globalns or localns (searching
+ localns first, of course).
+ """
+ return self
+
+ def _get_type_vars(self, tvars):
+ pass
+
+ def __repr__(self):
+ qname = _trim_name(_qualname(self))
+ return '%s.%s' % (self.__module__, qname)
+
+
+class _TypingBase(object):
+ """Internal indicator of special typing constructs."""
+ __metaclass__ = TypingMeta
+ __slots__ = ('__weakref__',)
+
+ def __init__(self, *args, **kwds):
+ pass
+
+ def __new__(cls, *args, **kwds):
+ """Constructor.
+
+ This only exists to give a better error message in case
+ someone tries to subclass a special typing object (not a good idea).
+ """
+ if (len(args) == 3 and
+ isinstance(args[0], str) and
+ isinstance(args[1], tuple)):
+ # Close enough.
+ raise TypeError("Cannot subclass %r" % cls)
+ return super(_TypingBase, cls).__new__(cls)
+
+ # Things that are not classes also need these.
+ def _eval_type(self, globalns, localns):
+ return self
+
+ def _get_type_vars(self, tvars):
+ pass
+
+ def __repr__(self):
+ cls = type(self)
+ qname = _trim_name(_qualname(cls))
+ return '%s.%s' % (cls.__module__, qname)
+
+ def __call__(self, *args, **kwds):
+ raise TypeError("Cannot instantiate %r" % type(self))
+
+
+class _FinalTypingBase(_TypingBase):
+ """Internal mix-in class to prevent instantiation.
+
+ Prevents instantiation unless _root=True is given in class call.
+ It is used to create pseudo-singleton instances Any, Union, Optional, etc.
+ """
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ self = super(_FinalTypingBase, cls).__new__(cls, *args, **kwds)
+ if '_root' in kwds and kwds['_root'] is True:
+ return self
+ raise TypeError("Cannot instantiate %r" % cls)
+
+ def __reduce__(self):
+ return _trim_name(type(self).__name__)
+
+
+class _ForwardRef(_TypingBase):
+ """Internal wrapper to hold a forward reference."""
+
+ __slots__ = ('__forward_arg__', '__forward_code__',
+ '__forward_evaluated__', '__forward_value__')
+
+ def __init__(self, arg):
+ super(_ForwardRef, self).__init__(arg)
+ if not isinstance(arg, basestring):
+ raise TypeError('Forward reference must be a string -- got %r' % (arg,))
+ try:
+ code = compile(arg, '', 'eval')
+ except SyntaxError:
+ raise SyntaxError('Forward reference must be an expression -- got %r' %
+ (arg,))
+ self.__forward_arg__ = arg
+ self.__forward_code__ = code
+ self.__forward_evaluated__ = False
+ self.__forward_value__ = None
+
+ def _eval_type(self, globalns, localns):
+ if not self.__forward_evaluated__ or localns is not globalns:
+ if globalns is None and localns is None:
+ globalns = localns = {}
+ elif globalns is None:
+ globalns = localns
+ elif localns is None:
+ localns = globalns
+ self.__forward_value__ = _type_check(
+ eval(self.__forward_code__, globalns, localns),
+ "Forward references must evaluate to types.")
+ self.__forward_evaluated__ = True
+ return self.__forward_value__
+
+ def __eq__(self, other):
+ if not isinstance(other, _ForwardRef):
+ return NotImplemented
+ return (self.__forward_arg__ == other.__forward_arg__ and
+ self.__forward_value__ == other.__forward_value__)
+
+ def __hash__(self):
+ return hash((self.__forward_arg__, self.__forward_value__))
+
+ def __instancecheck__(self, obj):
+ raise TypeError("Forward references cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("Forward references cannot be used with issubclass().")
+
+ def __repr__(self):
+ return '_ForwardRef(%r)' % (self.__forward_arg__,)
+
+
+class _TypeAlias(_TypingBase):
+ """Internal helper class for defining generic variants of concrete types.
+
+ Note that this is not a type; let's call it a pseudo-type. It cannot
+ be used in instance and subclass checks in parameterized form, i.e.
+ ``isinstance(42, Match[str])`` raises ``TypeError`` instead of returning
+ ``False``.
+ """
+
+ __slots__ = ('name', 'type_var', 'impl_type', 'type_checker')
+
+ def __init__(self, name, type_var, impl_type, type_checker):
+ """Initializer.
+
+ Args:
+ name: The name, e.g. 'Pattern'.
+ type_var: The type parameter, e.g. AnyStr, or the
+ specific type, e.g. str.
+ impl_type: The implementation type.
+ type_checker: Function that takes an impl_type instance.
+ and returns a value that should be a type_var instance.
+ """
+ assert isinstance(name, basestring), repr(name)
+ assert isinstance(impl_type, type), repr(impl_type)
+ assert not isinstance(impl_type, TypingMeta), repr(impl_type)
+ assert isinstance(type_var, (type, _TypingBase)), repr(type_var)
+ self.name = name
+ self.type_var = type_var
+ self.impl_type = impl_type
+ self.type_checker = type_checker
+
+ def __repr__(self):
+ return "%s[%s]" % (self.name, _type_repr(self.type_var))
+
+ def __getitem__(self, parameter):
+ if not isinstance(self.type_var, TypeVar):
+ raise TypeError("%s cannot be further parameterized." % self)
+ if self.type_var.__constraints__ and isinstance(parameter, type):
+ if not issubclass(parameter, self.type_var.__constraints__):
+ raise TypeError("%s is not a valid substitution for %s." %
+ (parameter, self.type_var))
+ if isinstance(parameter, TypeVar) and parameter is not self.type_var:
+ raise TypeError("%s cannot be re-parameterized." % self)
+ return self.__class__(self.name, parameter,
+ self.impl_type, self.type_checker)
+
+ def __eq__(self, other):
+ if not isinstance(other, _TypeAlias):
+ return NotImplemented
+ return self.name == other.name and self.type_var == other.type_var
+
+ def __hash__(self):
+ return hash((self.name, self.type_var))
+
+ def __instancecheck__(self, obj):
+ if not isinstance(self.type_var, TypeVar):
+ raise TypeError("Parameterized type aliases cannot be used "
+ "with isinstance().")
+ return isinstance(obj, self.impl_type)
+
+ def __subclasscheck__(self, cls):
+ if not isinstance(self.type_var, TypeVar):
+ raise TypeError("Parameterized type aliases cannot be used "
+ "with issubclass().")
+ return issubclass(cls, self.impl_type)
+
+
+def _get_type_vars(types, tvars):
+ for t in types:
+ if isinstance(t, TypingMeta) or isinstance(t, _TypingBase):
+ t._get_type_vars(tvars)
+
+
+def _type_vars(types):
+ tvars = []
+ _get_type_vars(types, tvars)
+ return tuple(tvars)
+
+
+def _eval_type(t, globalns, localns):
+ if isinstance(t, TypingMeta) or isinstance(t, _TypingBase):
+ return t._eval_type(globalns, localns)
+ return t
+
+
+def _type_check(arg, msg):
+ """Check that the argument is a type, and return it (internal helper).
+
+ As a special case, accept None and return type(None) instead.
+ Also, _TypeAlias instances (e.g. Match, Pattern) are acceptable.
+
+ The msg argument is a human-readable error message, e.g.
+
+ "Union[arg, ...]: arg should be a type."
+
+ We append the repr() of the actual value (truncated to 100 chars).
+ """
+ if arg is None:
+ return type(None)
+ if isinstance(arg, basestring):
+ arg = _ForwardRef(arg)
+ if (
+ isinstance(arg, _TypingBase) and type(arg).__name__ == '_ClassVar' or
+ not isinstance(arg, (type, _TypingBase)) and not callable(arg)
+ ):
+ raise TypeError(msg + " Got %.100r." % (arg,))
+ # Bare Union etc. are not valid as type arguments
+ if (
+ type(arg).__name__ in ('_Union', '_Optional') and
+ not getattr(arg, '__origin__', None) or
+ isinstance(arg, TypingMeta) and arg._gorg in (Generic, Protocol)
+ ):
+ raise TypeError("Plain %s is not valid as type argument" % arg)
+ return arg
+
+
+def _type_repr(obj):
+ """Return the repr() of an object, special-casing types (internal helper).
+
+ If obj is a type, we return a shorter version than the default
+ type.__repr__, based on the module and qualified name, which is
+ typically enough to uniquely identify a type. For everything
+ else, we fall back on repr(obj).
+ """
+ if isinstance(obj, type) and not isinstance(obj, TypingMeta):
+ if obj.__module__ == '__builtin__':
+ return _qualname(obj)
+ return '%s.%s' % (obj.__module__, _qualname(obj))
+ if obj is Ellipsis:
+ return '...'
+ if isinstance(obj, types.FunctionType):
+ return obj.__name__
+ return repr(obj)
+
+
+class ClassVarMeta(TypingMeta):
+ """Metaclass for _ClassVar"""
+
+ def __new__(cls, name, bases, namespace):
+ cls.assert_no_subclassing(bases)
+ self = super(ClassVarMeta, cls).__new__(cls, name, bases, namespace)
+ return self
+
+
+class _ClassVar(_FinalTypingBase):
+ """Special type construct to mark class variables.
+
+ An annotation wrapped in ClassVar indicates that a given
+ attribute is intended to be used as a class variable and
+ should not be set on instances of that class. Usage::
+
+ class Starship:
+ stats = {} # type: ClassVar[Dict[str, int]] # class variable
+ damage = 10 # type: int # instance variable
+
+ ClassVar accepts only types and cannot be further subscribed.
+
+ Note that ClassVar is not a class itself, and should not
+ be used with isinstance() or issubclass().
+ """
+
+ __metaclass__ = ClassVarMeta
+ __slots__ = ('__type__',)
+
+ def __init__(self, tp=None, _root=False):
+ self.__type__ = tp
+
+ def __getitem__(self, item):
+ cls = type(self)
+ if self.__type__ is None:
+ return cls(_type_check(item,
+ '{} accepts only types.'.format(cls.__name__[1:])),
+ _root=True)
+ raise TypeError('{} cannot be further subscripted'
+ .format(cls.__name__[1:]))
+
+ def _eval_type(self, globalns, localns):
+ return type(self)(_eval_type(self.__type__, globalns, localns),
+ _root=True)
+
+ def __repr__(self):
+ r = super(_ClassVar, self).__repr__()
+ if self.__type__ is not None:
+ r += '[{}]'.format(_type_repr(self.__type__))
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__type__))
+
+ def __eq__(self, other):
+ if not isinstance(other, _ClassVar):
+ return NotImplemented
+ if self.__type__ is not None:
+ return self.__type__ == other.__type__
+ return self is other
+
+
+ClassVar = _ClassVar(_root=True)
+
+
+class _FinalMeta(TypingMeta):
+ """Metaclass for _Final"""
+
+ def __new__(cls, name, bases, namespace):
+ cls.assert_no_subclassing(bases)
+ self = super(_FinalMeta, cls).__new__(cls, name, bases, namespace)
+ return self
+
+
+class _Final(_FinalTypingBase):
+ """A special typing construct to indicate that a name
+ cannot be re-assigned or overridden in a subclass.
+ For example:
+
+ MAX_SIZE: Final = 9000
+ MAX_SIZE += 1 # Error reported by type checker
+
+ class Connection:
+ TIMEOUT: Final[int] = 10
+ class FastConnector(Connection):
+ TIMEOUT = 1 # Error reported by type checker
+
+ There is no runtime checking of these properties.
+ """
+
+ __metaclass__ = _FinalMeta
+ __slots__ = ('__type__',)
+
+ def __init__(self, tp=None, **kwds):
+ self.__type__ = tp
+
+ def __getitem__(self, item):
+ cls = type(self)
+ if self.__type__ is None:
+ return cls(_type_check(item,
+ '{} accepts only single type.'.format(cls.__name__[1:])),
+ _root=True)
+ raise TypeError('{} cannot be further subscripted'
+ .format(cls.__name__[1:]))
+
+ def _eval_type(self, globalns, localns):
+ new_tp = _eval_type(self.__type__, globalns, localns)
+ if new_tp == self.__type__:
+ return self
+ return type(self)(new_tp, _root=True)
+
+ def __repr__(self):
+ r = super(_Final, self).__repr__()
+ if self.__type__ is not None:
+ r += '[{}]'.format(_type_repr(self.__type__))
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__type__))
+
+ def __eq__(self, other):
+ if not isinstance(other, _Final):
+ return NotImplemented
+ if self.__type__ is not None:
+ return self.__type__ == other.__type__
+ return self is other
+
+
+Final = _Final(_root=True)
+
+
+def final(f):
+ """This decorator can be used to indicate to type checkers that
+ the decorated method cannot be overridden, and decorated class
+ cannot be subclassed. For example:
+
+ class Base:
+ @final
+ def done(self) -> None:
+ ...
+ class Sub(Base):
+ def done(self) -> None: # Error reported by type checker
+ ...
+ @final
+ class Leaf:
+ ...
+ class Other(Leaf): # Error reported by type checker
+ ...
+
+ There is no runtime checking of these properties.
+ """
+ return f
+
+
+class _LiteralMeta(TypingMeta):
+ """Metaclass for _Literal"""
+
+ def __new__(cls, name, bases, namespace):
+ cls.assert_no_subclassing(bases)
+ self = super(_LiteralMeta, cls).__new__(cls, name, bases, namespace)
+ return self
+
+
+class _Literal(_FinalTypingBase):
+ """A type that can be used to indicate to type checkers that the
+ corresponding value has a value literally equivalent to the
+ provided parameter. For example:
+
+ var: Literal[4] = 4
+
+ The type checker understands that 'var' is literally equal to the
+ value 4 and no other value.
+
+ Literal[...] cannot be subclassed. There is no runtime checking
+ verifying that the parameter is actually a value instead of a type.
+ """
+
+ __metaclass__ = _LiteralMeta
+ __slots__ = ('__values__',)
+
+ def __init__(self, values=None, **kwds):
+ self.__values__ = values
+
+ def __getitem__(self, item):
+ cls = type(self)
+ if self.__values__ is None:
+ if not isinstance(item, tuple):
+ item = (item,)
+ return cls(values=item,
+ _root=True)
+ raise TypeError('{} cannot be further subscripted'
+ .format(cls.__name__[1:]))
+
+ def _eval_type(self, globalns, localns):
+ return self
+
+ def __repr__(self):
+ r = super(_Literal, self).__repr__()
+ if self.__values__ is not None:
+ r += '[{}]'.format(', '.join(map(_type_repr, self.__values__)))
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__values__))
+
+ def __eq__(self, other):
+ if not isinstance(other, _Literal):
+ return NotImplemented
+ if self.__values__ is not None:
+ return self.__values__ == other.__values__
+ return self is other
+
+
+Literal = _Literal(_root=True)
+
+
+class AnyMeta(TypingMeta):
+ """Metaclass for Any."""
+
+ def __new__(cls, name, bases, namespace):
+ cls.assert_no_subclassing(bases)
+ self = super(AnyMeta, cls).__new__(cls, name, bases, namespace)
+ return self
+
+
+class _Any(_FinalTypingBase):
+ """Special type indicating an unconstrained type.
+
+ - Any is compatible with every type.
+ - Any assumed to have all methods.
+ - All values assumed to be instances of Any.
+
+ Note that all the above statements are true from the point of view of
+ static type checkers. At runtime, Any should not be used with instance
+ or class checks.
+ """
+ __metaclass__ = AnyMeta
+ __slots__ = ()
+
+ def __instancecheck__(self, obj):
+ raise TypeError("Any cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("Any cannot be used with issubclass().")
+
+
+Any = _Any(_root=True)
+
+
+class NoReturnMeta(TypingMeta):
+ """Metaclass for NoReturn."""
+
+ def __new__(cls, name, bases, namespace):
+ cls.assert_no_subclassing(bases)
+ self = super(NoReturnMeta, cls).__new__(cls, name, bases, namespace)
+ return self
+
+
+class _NoReturn(_FinalTypingBase):
+ """Special type indicating functions that never return.
+ Example::
+
+ from typing import NoReturn
+
+ def stop() -> NoReturn:
+ raise Exception('no way')
+
+ This type is invalid in other positions, e.g., ``List[NoReturn]``
+ will fail in static type checkers.
+ """
+ __metaclass__ = NoReturnMeta
+ __slots__ = ()
+
+ def __instancecheck__(self, obj):
+ raise TypeError("NoReturn cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("NoReturn cannot be used with issubclass().")
+
+
+NoReturn = _NoReturn(_root=True)
+
+
+class TypeVarMeta(TypingMeta):
+ def __new__(cls, name, bases, namespace):
+ cls.assert_no_subclassing(bases)
+ return super(TypeVarMeta, cls).__new__(cls, name, bases, namespace)
+
+
+class TypeVar(_TypingBase):
+ """Type variable.
+
+ Usage::
+
+ T = TypeVar('T') # Can be anything
+ A = TypeVar('A', str, bytes) # Must be str or bytes
+
+ Type variables exist primarily for the benefit of static type
+ checkers. They serve as the parameters for generic types as well
+ as for generic function definitions. See class Generic for more
+ information on generic types. Generic functions work as follows:
+
+ def repeat(x: T, n: int) -> List[T]:
+ '''Return a list containing n references to x.'''
+ return [x]*n
+
+ def longest(x: A, y: A) -> A:
+ '''Return the longest of two strings.'''
+ return x if len(x) >= len(y) else y
+
+ The latter example's signature is essentially the overloading
+ of (str, str) -> str and (bytes, bytes) -> bytes. Also note
+ that if the arguments are instances of some subclass of str,
+ the return type is still plain str.
+
+ At runtime, isinstance(x, T) and issubclass(C, T) will raise TypeError.
+
+ Type variables defined with covariant=True or contravariant=True
+ can be used do declare covariant or contravariant generic types.
+ See PEP 484 for more details. By default generic types are invariant
+ in all type variables.
+
+ Type variables can be introspected. e.g.:
+
+ T.__name__ == 'T'
+ T.__constraints__ == ()
+ T.__covariant__ == False
+ T.__contravariant__ = False
+ A.__constraints__ == (str, bytes)
+ """
+
+ __metaclass__ = TypeVarMeta
+ __slots__ = ('__name__', '__bound__', '__constraints__',
+ '__covariant__', '__contravariant__')
+
+ def __init__(self, name, *constraints, **kwargs):
+ super(TypeVar, self).__init__(name, *constraints, **kwargs)
+ bound = kwargs.get('bound', None)
+ covariant = kwargs.get('covariant', False)
+ contravariant = kwargs.get('contravariant', False)
+ self.__name__ = name
+ if covariant and contravariant:
+ raise ValueError("Bivariant types are not supported.")
+ self.__covariant__ = bool(covariant)
+ self.__contravariant__ = bool(contravariant)
+ if constraints and bound is not None:
+ raise TypeError("Constraints cannot be combined with bound=...")
+ if constraints and len(constraints) == 1:
+ raise TypeError("A single constraint is not allowed")
+ msg = "TypeVar(name, constraint, ...): constraints must be types."
+ self.__constraints__ = tuple(_type_check(t, msg) for t in constraints)
+ if bound:
+ self.__bound__ = _type_check(bound, "Bound must be a type.")
+ else:
+ self.__bound__ = None
+
+ def _get_type_vars(self, tvars):
+ if self not in tvars:
+ tvars.append(self)
+
+ def __repr__(self):
+ if self.__covariant__:
+ prefix = '+'
+ elif self.__contravariant__:
+ prefix = '-'
+ else:
+ prefix = '~'
+ return prefix + self.__name__
+
+ def __instancecheck__(self, instance):
+ raise TypeError("Type variables cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("Type variables cannot be used with issubclass().")
+
+
+# Some unconstrained type variables. These are used by the container types.
+# (These are not for export.)
+T = TypeVar('T') # Any type.
+KT = TypeVar('KT') # Key type.
+VT = TypeVar('VT') # Value type.
+T_co = TypeVar('T_co', covariant=True) # Any type covariant containers.
+V_co = TypeVar('V_co', covariant=True) # Any type covariant containers.
+VT_co = TypeVar('VT_co', covariant=True) # Value type covariant containers.
+T_contra = TypeVar('T_contra', contravariant=True) # Ditto contravariant.
+
+# A useful type variable with constraints. This represents string types.
+# (This one *is* for export!)
+AnyStr = TypeVar('AnyStr', bytes, unicode)
+
+
+def _replace_arg(arg, tvars, args):
+ """An internal helper function: replace arg if it is a type variable
+ found in tvars with corresponding substitution from args or
+ with corresponding substitution sub-tree if arg is a generic type.
+ """
+
+ if tvars is None:
+ tvars = []
+ if hasattr(arg, '_subs_tree') and isinstance(arg, (GenericMeta, _TypingBase)):
+ return arg._subs_tree(tvars, args)
+ if isinstance(arg, TypeVar):
+ for i, tvar in enumerate(tvars):
+ if arg == tvar:
+ return args[i]
+ return arg
+
+
+# Special typing constructs Union, Optional, Generic, Callable and Tuple
+# use three special attributes for internal bookkeeping of generic types:
+# * __parameters__ is a tuple of unique free type parameters of a generic
+# type, for example, Dict[T, T].__parameters__ == (T,);
+# * __origin__ keeps a reference to a type that was subscripted,
+# e.g., Union[T, int].__origin__ == Union;
+# * __args__ is a tuple of all arguments used in subscripting,
+# e.g., Dict[T, int].__args__ == (T, int).
+
+
+def _subs_tree(cls, tvars=None, args=None):
+ """An internal helper function: calculate substitution tree
+ for generic cls after replacing its type parameters with
+ substitutions in tvars -> args (if any).
+ Repeat the same following __origin__'s.
+
+ Return a list of arguments with all possible substitutions
+ performed. Arguments that are generic classes themselves are represented
+ as tuples (so that no new classes are created by this function).
+ For example: _subs_tree(List[Tuple[int, T]][str]) == [(Tuple, int, str)]
+ """
+
+ if cls.__origin__ is None:
+ return cls
+ # Make of chain of origins (i.e. cls -> cls.__origin__)
+ current = cls.__origin__
+ orig_chain = []
+ while current.__origin__ is not None:
+ orig_chain.append(current)
+ current = current.__origin__
+ # Replace type variables in __args__ if asked ...
+ tree_args = []
+ for arg in cls.__args__:
+ tree_args.append(_replace_arg(arg, tvars, args))
+ # ... then continue replacing down the origin chain.
+ for ocls in orig_chain:
+ new_tree_args = []
+ for arg in ocls.__args__:
+ new_tree_args.append(_replace_arg(arg, ocls.__parameters__, tree_args))
+ tree_args = new_tree_args
+ return tree_args
+
+
+def _remove_dups_flatten(parameters):
+ """An internal helper for Union creation and substitution: flatten Union's
+ among parameters, then remove duplicates and strict subclasses.
+ """
+
+ # Flatten out Union[Union[...], ...].
+ params = []
+ for p in parameters:
+ if isinstance(p, _Union) and p.__origin__ is Union:
+ params.extend(p.__args__)
+ elif isinstance(p, tuple) and len(p) > 0 and p[0] is Union:
+ params.extend(p[1:])
+ else:
+ params.append(p)
+ # Weed out strict duplicates, preserving the first of each occurrence.
+ all_params = set(params)
+ if len(all_params) < len(params):
+ new_params = []
+ for t in params:
+ if t in all_params:
+ new_params.append(t)
+ all_params.remove(t)
+ params = new_params
+ assert not all_params, all_params
+ # Weed out subclasses.
+ # E.g. Union[int, Employee, Manager] == Union[int, Employee].
+ # If object is present it will be sole survivor among proper classes.
+ # Never discard type variables.
+ # (In particular, Union[str, AnyStr] != AnyStr.)
+ all_params = set(params)
+ for t1 in params:
+ if not isinstance(t1, type):
+ continue
+ if any(isinstance(t2, type) and issubclass(t1, t2)
+ for t2 in all_params - {t1}
+ if not (isinstance(t2, GenericMeta) and
+ t2.__origin__ is not None)):
+ all_params.remove(t1)
+ return tuple(t for t in params if t in all_params)
+
+
+def _check_generic(cls, parameters):
+ # Check correct count for parameters of a generic cls (internal helper).
+ if not cls.__parameters__:
+ raise TypeError("%s is not a generic class" % repr(cls))
+ alen = len(parameters)
+ elen = len(cls.__parameters__)
+ if alen != elen:
+ raise TypeError("Too %s parameters for %s; actual %s, expected %s" %
+ ("many" if alen > elen else "few", repr(cls), alen, elen))
+
+
+_cleanups = []
+
+
+def _tp_cache(func):
+ maxsize = 128
+ cache = {}
+ _cleanups.append(cache.clear)
+
+ @functools.wraps(func)
+ def inner(*args):
+ key = args
+ try:
+ return cache[key]
+ except TypeError:
+ # Assume it's an unhashable argument.
+ return func(*args)
+ except KeyError:
+ value = func(*args)
+ if len(cache) >= maxsize:
+ # If the cache grows too much, just start over.
+ cache.clear()
+ cache[key] = value
+ return value
+
+ return inner
+
+
+class UnionMeta(TypingMeta):
+ """Metaclass for Union."""
+
+ def __new__(cls, name, bases, namespace):
+ cls.assert_no_subclassing(bases)
+ return super(UnionMeta, cls).__new__(cls, name, bases, namespace)
+
+
+class _Union(_FinalTypingBase):
+ """Union type; Union[X, Y] means either X or Y.
+
+ To define a union, use e.g. Union[int, str]. Details:
+
+ - The arguments must be types and there must be at least one.
+
+ - None as an argument is a special case and is replaced by
+ type(None).
+
+ - Unions of unions are flattened, e.g.::
+
+ Union[Union[int, str], float] == Union[int, str, float]
+
+ - Unions of a single argument vanish, e.g.::
+
+ Union[int] == int # The constructor actually returns int
+
+ - Redundant arguments are skipped, e.g.::
+
+ Union[int, str, int] == Union[int, str]
+
+ - When comparing unions, the argument order is ignored, e.g.::
+
+ Union[int, str] == Union[str, int]
+
+ - When two arguments have a subclass relationship, the least
+ derived argument is kept, e.g.::
+
+ class Employee: pass
+ class Manager(Employee): pass
+ Union[int, Employee, Manager] == Union[int, Employee]
+ Union[Manager, int, Employee] == Union[int, Employee]
+ Union[Employee, Manager] == Employee
+
+ - Similar for object::
+
+ Union[int, object] == object
+
+ - You cannot subclass or instantiate a union.
+
+ - You can use Optional[X] as a shorthand for Union[X, None].
+ """
+
+ __metaclass__ = UnionMeta
+ __slots__ = ('__parameters__', '__args__', '__origin__', '__tree_hash__')
+
+ def __new__(cls, parameters=None, origin=None, *args, **kwds):
+ self = super(_Union, cls).__new__(cls, parameters, origin, *args, **kwds)
+ if origin is None:
+ self.__parameters__ = None
+ self.__args__ = None
+ self.__origin__ = None
+ self.__tree_hash__ = hash(frozenset(('Union',)))
+ return self
+ if not isinstance(parameters, tuple):
+ raise TypeError("Expected parameters=")
+ if origin is Union:
+ parameters = _remove_dups_flatten(parameters)
+ # It's not a union if there's only one type left.
+ if len(parameters) == 1:
+ return parameters[0]
+ self.__parameters__ = _type_vars(parameters)
+ self.__args__ = parameters
+ self.__origin__ = origin
+ # Pre-calculate the __hash__ on instantiation.
+ # This improves speed for complex substitutions.
+ subs_tree = self._subs_tree()
+ if isinstance(subs_tree, tuple):
+ self.__tree_hash__ = hash(frozenset(subs_tree))
+ else:
+ self.__tree_hash__ = hash(subs_tree)
+ return self
+
+ def _eval_type(self, globalns, localns):
+ if self.__args__ is None:
+ return self
+ ev_args = tuple(_eval_type(t, globalns, localns) for t in self.__args__)
+ ev_origin = _eval_type(self.__origin__, globalns, localns)
+ if ev_args == self.__args__ and ev_origin == self.__origin__:
+ # Everything is already evaluated.
+ return self
+ return self.__class__(ev_args, ev_origin, _root=True)
+
+ def _get_type_vars(self, tvars):
+ if self.__origin__ and self.__parameters__:
+ _get_type_vars(self.__parameters__, tvars)
+
+ def __repr__(self):
+ if self.__origin__ is None:
+ return super(_Union, self).__repr__()
+ tree = self._subs_tree()
+ if not isinstance(tree, tuple):
+ return repr(tree)
+ return tree[0]._tree_repr(tree)
+
+ def _tree_repr(self, tree):
+ arg_list = []
+ for arg in tree[1:]:
+ if not isinstance(arg, tuple):
+ arg_list.append(_type_repr(arg))
+ else:
+ arg_list.append(arg[0]._tree_repr(arg))
+ return super(_Union, self).__repr__() + '[%s]' % ', '.join(arg_list)
+
+ @_tp_cache
+ def __getitem__(self, parameters):
+ if parameters == ():
+ raise TypeError("Cannot take a Union of no types.")
+ if not isinstance(parameters, tuple):
+ parameters = (parameters,)
+ if self.__origin__ is None:
+ msg = "Union[arg, ...]: each arg must be a type."
+ else:
+ msg = "Parameters to generic types must be types."
+ parameters = tuple(_type_check(p, msg) for p in parameters)
+ if self is not Union:
+ _check_generic(self, parameters)
+ return self.__class__(parameters, origin=self, _root=True)
+
+ def _subs_tree(self, tvars=None, args=None):
+ if self is Union:
+ return Union # Nothing to substitute
+ tree_args = _subs_tree(self, tvars, args)
+ tree_args = _remove_dups_flatten(tree_args)
+ if len(tree_args) == 1:
+ return tree_args[0] # Union of a single type is that type
+ return (Union,) + tree_args
+
+ def __eq__(self, other):
+ if isinstance(other, _Union):
+ return self.__tree_hash__ == other.__tree_hash__
+ elif self is not Union:
+ return self._subs_tree() == other
+ else:
+ return self is other
+
+ def __hash__(self):
+ return self.__tree_hash__
+
+ def __instancecheck__(self, obj):
+ raise TypeError("Unions cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("Unions cannot be used with issubclass().")
+
+
+Union = _Union(_root=True)
+
+
+class OptionalMeta(TypingMeta):
+ """Metaclass for Optional."""
+
+ def __new__(cls, name, bases, namespace):
+ cls.assert_no_subclassing(bases)
+ return super(OptionalMeta, cls).__new__(cls, name, bases, namespace)
+
+
+class _Optional(_FinalTypingBase):
+ """Optional type.
+
+ Optional[X] is equivalent to Union[X, None].
+ """
+
+ __metaclass__ = OptionalMeta
+ __slots__ = ()
+
+ @_tp_cache
+ def __getitem__(self, arg):
+ arg = _type_check(arg, "Optional[t] requires a single type.")
+ return Union[arg, type(None)]
+
+
+Optional = _Optional(_root=True)
+
+
+def _next_in_mro(cls):
+ """Helper for Generic.__new__.
+
+ Returns the class after the last occurrence of Generic or
+ Generic[...] in cls.__mro__.
+ """
+ next_in_mro = object
+ # Look for the last occurrence of Generic or Generic[...].
+ for i, c in enumerate(cls.__mro__[:-1]):
+ if isinstance(c, GenericMeta) and c._gorg is Generic:
+ next_in_mro = cls.__mro__[i + 1]
+ return next_in_mro
+
+
+def _make_subclasshook(cls):
+ """Construct a __subclasshook__ callable that incorporates
+ the associated __extra__ class in subclass checks performed
+ against cls.
+ """
+ if isinstance(cls.__extra__, abc.ABCMeta):
+ # The logic mirrors that of ABCMeta.__subclasscheck__.
+ # Registered classes need not be checked here because
+ # cls and its extra share the same _abc_registry.
+ def __extrahook__(cls, subclass):
+ res = cls.__extra__.__subclasshook__(subclass)
+ if res is not NotImplemented:
+ return res
+ if cls.__extra__ in getattr(subclass, '__mro__', ()):
+ return True
+ for scls in cls.__extra__.__subclasses__():
+ if isinstance(scls, GenericMeta):
+ continue
+ if issubclass(subclass, scls):
+ return True
+ return NotImplemented
+ else:
+ # For non-ABC extras we'll just call issubclass().
+ def __extrahook__(cls, subclass):
+ if cls.__extra__ and issubclass(subclass, cls.__extra__):
+ return True
+ return NotImplemented
+ return classmethod(__extrahook__)
+
+
+class GenericMeta(TypingMeta, abc.ABCMeta):
+ """Metaclass for generic types.
+
+ This is a metaclass for typing.Generic and generic ABCs defined in
+ typing module. User defined subclasses of GenericMeta can override
+ __new__ and invoke super().__new__. Note that GenericMeta.__new__
+ has strict rules on what is allowed in its bases argument:
+ * plain Generic is disallowed in bases;
+ * Generic[...] should appear in bases at most once;
+ * if Generic[...] is present, then it should list all type variables
+ that appear in other bases.
+ In addition, type of all generic bases is erased, e.g., C[int] is
+ stripped to plain C.
+ """
+
+ def __new__(cls, name, bases, namespace,
+ tvars=None, args=None, origin=None, extra=None, orig_bases=None):
+ """Create a new generic class. GenericMeta.__new__ accepts
+ keyword arguments that are used for internal bookkeeping, therefore
+ an override should pass unused keyword arguments to super().
+ """
+ if tvars is not None:
+ # Called from __getitem__() below.
+ assert origin is not None
+ assert all(isinstance(t, TypeVar) for t in tvars), tvars
+ else:
+ # Called from class statement.
+ assert tvars is None, tvars
+ assert args is None, args
+ assert origin is None, origin
+
+ # Get the full set of tvars from the bases.
+ tvars = _type_vars(bases)
+ # Look for Generic[T1, ..., Tn].
+ # If found, tvars must be a subset of it.
+ # If not found, tvars is it.
+ # Also check for and reject plain Generic,
+ # and reject multiple Generic[...].
+ gvars = None
+ for base in bases:
+ if base is Generic:
+ raise TypeError("Cannot inherit from plain Generic")
+ if (isinstance(base, GenericMeta) and
+ base.__origin__ in (Generic, Protocol)):
+ if gvars is not None:
+ raise TypeError(
+ "Cannot inherit from Generic[...] or"
+ " Protocol[...] multiple times.")
+ gvars = base.__parameters__
+ if gvars is None:
+ gvars = tvars
+ else:
+ tvarset = set(tvars)
+ gvarset = set(gvars)
+ if not tvarset <= gvarset:
+ raise TypeError(
+ "Some type variables (%s) "
+ "are not listed in %s[%s]" %
+ (", ".join(str(t) for t in tvars if t not in gvarset),
+ "Generic" if any(b.__origin__ is Generic
+ for b in bases) else "Protocol",
+ ", ".join(str(g) for g in gvars)))
+ tvars = gvars
+
+ initial_bases = bases
+ if extra is None:
+ extra = namespace.get('__extra__')
+ if extra is not None and type(extra) is abc.ABCMeta and extra not in bases:
+ bases = (extra,) + bases
+ bases = tuple(b._gorg if isinstance(b, GenericMeta) else b for b in bases)
+
+ # remove bare Generic from bases if there are other generic bases
+ if any(isinstance(b, GenericMeta) and b is not Generic for b in bases):
+ bases = tuple(b for b in bases if b is not Generic)
+ namespace.update({'__origin__': origin, '__extra__': extra})
+ self = super(GenericMeta, cls).__new__(cls, name, bases, namespace)
+ super(GenericMeta, self).__setattr__('_gorg',
+ self if not origin else origin._gorg)
+
+ self.__parameters__ = tvars
+ # Be prepared that GenericMeta will be subclassed by TupleMeta
+ # and CallableMeta, those two allow ..., (), or [] in __args___.
+ self.__args__ = tuple(Ellipsis if a is _TypingEllipsis else
+ () if a is _TypingEmpty else
+ a for a in args) if args else None
+ # Speed hack (https://github.com/python/typing/issues/196).
+ self.__next_in_mro__ = _next_in_mro(self)
+ # Preserve base classes on subclassing (__bases__ are type erased now).
+ if orig_bases is None:
+ self.__orig_bases__ = initial_bases
+
+ # This allows unparameterized generic collections to be used
+ # with issubclass() and isinstance() in the same way as their
+ # collections.abc counterparts (e.g., isinstance([], Iterable)).
+ if (
+ '__subclasshook__' not in namespace and extra or
+ # allow overriding
+ getattr(self.__subclasshook__, '__name__', '') == '__extrahook__'
+ ):
+ self.__subclasshook__ = _make_subclasshook(self)
+
+ if origin and hasattr(origin, '__qualname__'): # Fix for Python 3.2.
+ self.__qualname__ = origin.__qualname__
+ self.__tree_hash__ = (hash(self._subs_tree()) if origin else
+ super(GenericMeta, self).__hash__())
+ return self
+
+ def __init__(self, *args, **kwargs):
+ super(GenericMeta, self).__init__(*args, **kwargs)
+ if isinstance(self.__extra__, abc.ABCMeta):
+ self._abc_registry = self.__extra__._abc_registry
+ self._abc_cache = self.__extra__._abc_cache
+ elif self.__origin__ is not None:
+ self._abc_registry = self.__origin__._abc_registry
+ self._abc_cache = self.__origin__._abc_cache
+
+ # _abc_negative_cache and _abc_negative_cache_version
+ # realised as descriptors, since GenClass[t1, t2, ...] always
+ # share subclass info with GenClass.
+ # This is an important memory optimization.
+ @property
+ def _abc_negative_cache(self):
+ if isinstance(self.__extra__, abc.ABCMeta):
+ return self.__extra__._abc_negative_cache
+ return self._gorg._abc_generic_negative_cache
+
+ @_abc_negative_cache.setter
+ def _abc_negative_cache(self, value):
+ if self.__origin__ is None:
+ if isinstance(self.__extra__, abc.ABCMeta):
+ self.__extra__._abc_negative_cache = value
+ else:
+ self._abc_generic_negative_cache = value
+
+ @property
+ def _abc_negative_cache_version(self):
+ if isinstance(self.__extra__, abc.ABCMeta):
+ return self.__extra__._abc_negative_cache_version
+ return self._gorg._abc_generic_negative_cache_version
+
+ @_abc_negative_cache_version.setter
+ def _abc_negative_cache_version(self, value):
+ if self.__origin__ is None:
+ if isinstance(self.__extra__, abc.ABCMeta):
+ self.__extra__._abc_negative_cache_version = value
+ else:
+ self._abc_generic_negative_cache_version = value
+
+ def _get_type_vars(self, tvars):
+ if self.__origin__ and self.__parameters__:
+ _get_type_vars(self.__parameters__, tvars)
+
+ def _eval_type(self, globalns, localns):
+ ev_origin = (self.__origin__._eval_type(globalns, localns)
+ if self.__origin__ else None)
+ ev_args = tuple(_eval_type(a, globalns, localns) for a
+ in self.__args__) if self.__args__ else None
+ if ev_origin == self.__origin__ and ev_args == self.__args__:
+ return self
+ return self.__class__(self.__name__,
+ self.__bases__,
+ dict(self.__dict__),
+ tvars=_type_vars(ev_args) if ev_args else None,
+ args=ev_args,
+ origin=ev_origin,
+ extra=self.__extra__,
+ orig_bases=self.__orig_bases__)
+
+ def __repr__(self):
+ if self.__origin__ is None:
+ return super(GenericMeta, self).__repr__()
+ return self._tree_repr(self._subs_tree())
+
+ def _tree_repr(self, tree):
+ arg_list = []
+ for arg in tree[1:]:
+ if arg == ():
+ arg_list.append('()')
+ elif not isinstance(arg, tuple):
+ arg_list.append(_type_repr(arg))
+ else:
+ arg_list.append(arg[0]._tree_repr(arg))
+ return super(GenericMeta, self).__repr__() + '[%s]' % ', '.join(arg_list)
+
+ def _subs_tree(self, tvars=None, args=None):
+ if self.__origin__ is None:
+ return self
+ tree_args = _subs_tree(self, tvars, args)
+ return (self._gorg,) + tuple(tree_args)
+
+ def __eq__(self, other):
+ if not isinstance(other, GenericMeta):
+ return NotImplemented
+ if self.__origin__ is None or other.__origin__ is None:
+ return self is other
+ return self.__tree_hash__ == other.__tree_hash__
+
+ def __hash__(self):
+ return self.__tree_hash__
+
+ @_tp_cache
+ def __getitem__(self, params):
+ if not isinstance(params, tuple):
+ params = (params,)
+ if not params and self._gorg is not Tuple:
+ raise TypeError(
+ "Parameter list to %s[...] cannot be empty" % _qualname(self))
+ msg = "Parameters to generic types must be types."
+ params = tuple(_type_check(p, msg) for p in params)
+ if self in (Generic, Protocol):
+ # Generic can only be subscripted with unique type variables.
+ if not all(isinstance(p, TypeVar) for p in params):
+ raise TypeError(
+ "Parameters to %s[...] must all be type variables" % self.__name__)
+ if len(set(params)) != len(params):
+ raise TypeError(
+ "Parameters to %s[...] must all be unique" % self.__name__)
+ tvars = params
+ args = params
+ elif self in (Tuple, Callable):
+ tvars = _type_vars(params)
+ args = params
+ elif self.__origin__ in (Generic, Protocol):
+ # Can't subscript Generic[...] or Protocol[...].
+ raise TypeError("Cannot subscript already-subscripted %s" %
+ repr(self))
+ else:
+ # Subscripting a regular Generic subclass.
+ _check_generic(self, params)
+ tvars = _type_vars(params)
+ args = params
+
+ prepend = (self,) if self.__origin__ is None else ()
+ return self.__class__(self.__name__,
+ prepend + self.__bases__,
+ dict(self.__dict__),
+ tvars=tvars,
+ args=args,
+ origin=self,
+ extra=self.__extra__,
+ orig_bases=self.__orig_bases__)
+
+ def __subclasscheck__(self, cls):
+ if self.__origin__ is not None:
+ # These should only be modules within the standard library.
+ # singledispatch is an exception, because it's a Python 2 backport
+ # of functools.singledispatch.
+ whitelist = ['abc', 'functools', 'singledispatch']
+ if (sys._getframe(1).f_globals['__name__'] in whitelist or
+ # The second frame is needed for the case where we came
+ # from _ProtocolMeta.__subclasscheck__.
+ sys._getframe(2).f_globals['__name__'] in whitelist):
+ return False
+ raise TypeError("Parameterized generics cannot be used with class "
+ "or instance checks")
+ if self is Generic:
+ raise TypeError("Class %r cannot be used with class "
+ "or instance checks" % self)
+ return super(GenericMeta, self).__subclasscheck__(cls)
+
+ def __instancecheck__(self, instance):
+ # Since we extend ABC.__subclasscheck__ and
+ # ABC.__instancecheck__ inlines the cache checking done by the
+ # latter, we must extend __instancecheck__ too. For simplicity
+ # we just skip the cache check -- instance checks for generic
+ # classes are supposed to be rare anyways.
+ if hasattr(instance, "__class__"):
+ return issubclass(instance.__class__, self)
+ return False
+
+ def __setattr__(self, attr, value):
+ # We consider all the subscripted genrics as proxies for original class
+ if (
+ attr.startswith('__') and attr.endswith('__') or
+ attr.startswith('_abc_')
+ ):
+ super(GenericMeta, self).__setattr__(attr, value)
+ else:
+ super(GenericMeta, self._gorg).__setattr__(attr, value)
+
+
+def _copy_generic(self):
+ """Hack to work around https://bugs.python.org/issue11480 on Python 2"""
+ return self.__class__(self.__name__, self.__bases__, dict(self.__dict__),
+ self.__parameters__, self.__args__, self.__origin__,
+ self.__extra__, self.__orig_bases__)
+
+
+copy._copy_dispatch[GenericMeta] = _copy_generic
+
+
+# Prevent checks for Generic to crash when defining Generic.
+Generic = None
+
+
+def _generic_new(base_cls, cls, *args, **kwds):
+ # Assure type is erased on instantiation,
+ # but attempt to store it in __orig_class__
+ if cls.__origin__ is None:
+ if (base_cls.__new__ is object.__new__ and
+ cls.__init__ is not object.__init__):
+ return base_cls.__new__(cls)
+ else:
+ return base_cls.__new__(cls, *args, **kwds)
+ else:
+ origin = cls._gorg
+ if (base_cls.__new__ is object.__new__ and
+ cls.__init__ is not object.__init__):
+ obj = base_cls.__new__(origin)
+ else:
+ obj = base_cls.__new__(origin, *args, **kwds)
+ try:
+ obj.__orig_class__ = cls
+ except AttributeError:
+ pass
+ obj.__init__(*args, **kwds)
+ return obj
+
+
+class Generic(object):
+ """Abstract base class for generic types.
+
+ A generic type is typically declared by inheriting from
+ this class parameterized with one or more type variables.
+ For example, a generic mapping type might be defined as::
+
+ class Mapping(Generic[KT, VT]):
+ def __getitem__(self, key: KT) -> VT:
+ ...
+ # Etc.
+
+ This class can then be used as follows::
+
+ def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT:
+ try:
+ return mapping[key]
+ except KeyError:
+ return default
+ """
+
+ __metaclass__ = GenericMeta
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is Generic:
+ raise TypeError("Type Generic cannot be instantiated; "
+ "it can be used only as a base class")
+ return _generic_new(cls.__next_in_mro__, cls, *args, **kwds)
+
+
+class _TypingEmpty(object):
+ """Internal placeholder for () or []. Used by TupleMeta and CallableMeta
+ to allow empty list/tuple in specific places, without allowing them
+ to sneak in where prohibited.
+ """
+
+
+class _TypingEllipsis(object):
+ """Internal placeholder for ... (ellipsis)."""
+
+
+class TupleMeta(GenericMeta):
+ """Metaclass for Tuple (internal)."""
+
+ @_tp_cache
+ def __getitem__(self, parameters):
+ if self.__origin__ is not None or self._gorg is not Tuple:
+ # Normal generic rules apply if this is not the first subscription
+ # or a subscription of a subclass.
+ return super(TupleMeta, self).__getitem__(parameters)
+ if parameters == ():
+ return super(TupleMeta, self).__getitem__((_TypingEmpty,))
+ if not isinstance(parameters, tuple):
+ parameters = (parameters,)
+ if len(parameters) == 2 and parameters[1] is Ellipsis:
+ msg = "Tuple[t, ...]: t must be a type."
+ p = _type_check(parameters[0], msg)
+ return super(TupleMeta, self).__getitem__((p, _TypingEllipsis))
+ msg = "Tuple[t0, t1, ...]: each t must be a type."
+ parameters = tuple(_type_check(p, msg) for p in parameters)
+ return super(TupleMeta, self).__getitem__(parameters)
+
+ def __instancecheck__(self, obj):
+ if self.__args__ is None:
+ return isinstance(obj, tuple)
+ raise TypeError("Parameterized Tuple cannot be used "
+ "with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ if self.__args__ is None:
+ return issubclass(cls, tuple)
+ raise TypeError("Parameterized Tuple cannot be used "
+ "with issubclass().")
+
+
+copy._copy_dispatch[TupleMeta] = _copy_generic
+
+
+class Tuple(tuple):
+ """Tuple type; Tuple[X, Y] is the cross-product type of X and Y.
+
+ Example: Tuple[T1, T2] is a tuple of two elements corresponding
+ to type variables T1 and T2. Tuple[int, float, str] is a tuple
+ of an int, a float and a string.
+
+ To specify a variable-length tuple of homogeneous type, use Tuple[T, ...].
+ """
+
+ __metaclass__ = TupleMeta
+ __extra__ = tuple
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is Tuple:
+ raise TypeError("Type Tuple cannot be instantiated; "
+ "use tuple() instead")
+ return _generic_new(tuple, cls, *args, **kwds)
+
+
+class CallableMeta(GenericMeta):
+ """ Metaclass for Callable."""
+
+ def __repr__(self):
+ if self.__origin__ is None:
+ return super(CallableMeta, self).__repr__()
+ return self._tree_repr(self._subs_tree())
+
+ def _tree_repr(self, tree):
+ if self._gorg is not Callable:
+ return super(CallableMeta, self)._tree_repr(tree)
+ # For actual Callable (not its subclass) we override
+ # super(CallableMeta, self)._tree_repr() for nice formatting.
+ arg_list = []
+ for arg in tree[1:]:
+ if not isinstance(arg, tuple):
+ arg_list.append(_type_repr(arg))
+ else:
+ arg_list.append(arg[0]._tree_repr(arg))
+ if arg_list[0] == '...':
+ return repr(tree[0]) + '[..., %s]' % arg_list[1]
+ return (repr(tree[0]) +
+ '[[%s], %s]' % (', '.join(arg_list[:-1]), arg_list[-1]))
+
+ def __getitem__(self, parameters):
+ """A thin wrapper around __getitem_inner__ to provide the latter
+ with hashable arguments to improve speed.
+ """
+
+ if self.__origin__ is not None or self._gorg is not Callable:
+ return super(CallableMeta, self).__getitem__(parameters)
+ if not isinstance(parameters, tuple) or len(parameters) != 2:
+ raise TypeError("Callable must be used as "
+ "Callable[[arg, ...], result].")
+ args, result = parameters
+ if args is Ellipsis:
+ parameters = (Ellipsis, result)
+ else:
+ if not isinstance(args, list):
+ raise TypeError("Callable[args, result]: args must be a list."
+ " Got %.100r." % (args,))
+ parameters = (tuple(args), result)
+ return self.__getitem_inner__(parameters)
+
+ @_tp_cache
+ def __getitem_inner__(self, parameters):
+ args, result = parameters
+ msg = "Callable[args, result]: result must be a type."
+ result = _type_check(result, msg)
+ if args is Ellipsis:
+ return super(CallableMeta, self).__getitem__((_TypingEllipsis, result))
+ msg = "Callable[[arg, ...], result]: each arg must be a type."
+ args = tuple(_type_check(arg, msg) for arg in args)
+ parameters = args + (result,)
+ return super(CallableMeta, self).__getitem__(parameters)
+
+
+copy._copy_dispatch[CallableMeta] = _copy_generic
+
+
+class Callable(object):
+ """Callable type; Callable[[int], str] is a function of (int) -> str.
+
+ The subscription syntax must always be used with exactly two
+ values: the argument list and the return type. The argument list
+ must be a list of types or ellipsis; the return type must be a single type.
+
+ There is no syntax to indicate optional or keyword arguments,
+ such function types are rarely used as callback types.
+ """
+
+ __metaclass__ = CallableMeta
+ __extra__ = collections_abc.Callable
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is Callable:
+ raise TypeError("Type Callable cannot be instantiated; "
+ "use a non-abstract subclass instead")
+ return _generic_new(cls.__next_in_mro__, cls, *args, **kwds)
+
+
+def cast(typ, val):
+ """Cast a value to a type.
+
+ This returns the value unchanged. To the type checker this
+ signals that the return value has the designated type, but at
+ runtime we intentionally don't check anything (we want this
+ to be as fast as possible).
+ """
+ return val
+
+
+def _get_defaults(func):
+ """Internal helper to extract the default arguments, by name."""
+ code = func.__code__
+ pos_count = code.co_argcount
+ arg_names = code.co_varnames
+ arg_names = arg_names[:pos_count]
+ defaults = func.__defaults__ or ()
+ kwdefaults = func.__kwdefaults__
+ res = dict(kwdefaults) if kwdefaults else {}
+ pos_offset = pos_count - len(defaults)
+ for name, value in zip(arg_names[pos_offset:], defaults):
+ assert name not in res
+ res[name] = value
+ return res
+
+
+def get_type_hints(obj, globalns=None, localns=None):
+ """In Python 2 this is not supported and always returns None."""
+ return None
+
+
+def no_type_check(arg):
+ """Decorator to indicate that annotations are not type hints.
+
+ The argument must be a class or function; if it is a class, it
+ applies recursively to all methods and classes defined in that class
+ (but not to methods defined in its superclasses or subclasses).
+
+ This mutates the function(s) or class(es) in place.
+ """
+ if isinstance(arg, type):
+ arg_attrs = arg.__dict__.copy()
+ for attr, val in arg.__dict__.items():
+ if val in arg.__bases__ + (arg,):
+ arg_attrs.pop(attr)
+ for obj in arg_attrs.values():
+ if isinstance(obj, types.FunctionType):
+ obj.__no_type_check__ = True
+ if isinstance(obj, type):
+ no_type_check(obj)
+ try:
+ arg.__no_type_check__ = True
+ except TypeError: # built-in classes
+ pass
+ return arg
+
+
+def no_type_check_decorator(decorator):
+ """Decorator to give another decorator the @no_type_check effect.
+
+ This wraps the decorator with something that wraps the decorated
+ function in @no_type_check.
+ """
+
+ @functools.wraps(decorator)
+ def wrapped_decorator(*args, **kwds):
+ func = decorator(*args, **kwds)
+ func = no_type_check(func)
+ return func
+
+ return wrapped_decorator
+
+
+def _overload_dummy(*args, **kwds):
+ """Helper for @overload to raise when called."""
+ raise NotImplementedError(
+ "You should not call an overloaded function. "
+ "A series of @overload-decorated functions "
+ "outside a stub module should always be followed "
+ "by an implementation that is not @overload-ed.")
+
+
+def overload(func):
+ """Decorator for overloaded functions/methods.
+
+ In a stub file, place two or more stub definitions for the same
+ function in a row, each decorated with @overload. For example:
+
+ @overload
+ def utf8(value: None) -> None: ...
+ @overload
+ def utf8(value: bytes) -> bytes: ...
+ @overload
+ def utf8(value: str) -> bytes: ...
+
+ In a non-stub file (i.e. a regular .py file), do the same but
+ follow it with an implementation. The implementation should *not*
+ be decorated with @overload. For example:
+
+ @overload
+ def utf8(value: None) -> None: ...
+ @overload
+ def utf8(value: bytes) -> bytes: ...
+ @overload
+ def utf8(value: str) -> bytes: ...
+ def utf8(value):
+ # implementation goes here
+ """
+ return _overload_dummy
+
+
+_PROTO_WHITELIST = ['Callable', 'Iterable', 'Iterator',
+ 'Hashable', 'Sized', 'Container', 'Collection',
+ 'Reversible', 'ContextManager']
+
+
+class _ProtocolMeta(GenericMeta):
+ """Internal metaclass for Protocol.
+
+ This exists so Protocol classes can be generic without deriving
+ from Generic.
+ """
+ def __init__(cls, *args, **kwargs):
+ super(_ProtocolMeta, cls).__init__(*args, **kwargs)
+ if not cls.__dict__.get('_is_protocol', None):
+ cls._is_protocol = any(b is Protocol or
+ isinstance(b, _ProtocolMeta) and
+ b.__origin__ is Protocol
+ for b in cls.__bases__)
+ if cls._is_protocol:
+ for base in cls.__mro__[1:]:
+ if not (base in (object, Generic) or
+ base.__module__ == '_abcoll' and
+ base.__name__ in _PROTO_WHITELIST or
+ isinstance(base, TypingMeta) and base._is_protocol or
+ isinstance(base, GenericMeta) and base.__origin__ is Generic):
+ raise TypeError('Protocols can only inherit from other protocols,'
+ ' got %r' % base)
+ cls._callable_members_only = all(callable(getattr(cls, attr))
+ for attr in cls._get_protocol_attrs())
+
+ def _no_init(self, *args, **kwargs):
+ if type(self)._is_protocol:
+ raise TypeError('Protocols cannot be instantiated')
+ cls.__init__ = _no_init
+
+ def _proto_hook(cls, other):
+ if not cls.__dict__.get('_is_protocol', None):
+ return NotImplemented
+ if not isinstance(other, type):
+ # Similar error as for issubclass(1, int)
+ # (also not a chance for old-style classes)
+ raise TypeError('issubclass() arg 1 must be a new-style class')
+ for attr in cls._get_protocol_attrs():
+ for base in other.__mro__:
+ if attr in base.__dict__:
+ if base.__dict__[attr] is None:
+ return NotImplemented
+ break
+ else:
+ return NotImplemented
+ return True
+ if '__subclasshook__' not in cls.__dict__:
+ cls.__subclasshook__ = classmethod(_proto_hook)
+
+ def __instancecheck__(self, instance):
+ # We need this method for situations where attributes are assigned in __init__
+ if isinstance(instance, type):
+ # This looks like a fundamental limitation of Python 2.
+ # It cannot support runtime protocol metaclasses, On Python 2 classes
+ # cannot be correctly inspected as instances of protocols.
+ return False
+ if ((not getattr(self, '_is_protocol', False) or
+ self._callable_members_only) and
+ issubclass(instance.__class__, self)):
+ return True
+ if self._is_protocol:
+ if all(hasattr(instance, attr) and
+ (not callable(getattr(self, attr)) or
+ getattr(instance, attr) is not None)
+ for attr in self._get_protocol_attrs()):
+ return True
+ return super(GenericMeta, self).__instancecheck__(instance)
+
+ def __subclasscheck__(self, cls):
+ if (self.__dict__.get('_is_protocol', None) and
+ not self.__dict__.get('_is_runtime_protocol', None)):
+ if (sys._getframe(1).f_globals['__name__'] in ['abc', 'functools'] or
+ # This is needed because we remove subclasses from unions on Python 2.
+ sys._getframe(2).f_globals['__name__'] == 'typing'):
+ return False
+ raise TypeError("Instance and class checks can only be used with"
+ " @runtime_checkable protocols")
+ if (self.__dict__.get('_is_runtime_protocol', None) and
+ not self._callable_members_only):
+ if sys._getframe(1).f_globals['__name__'] in ['abc', 'functools']:
+ return super(GenericMeta, self).__subclasscheck__(cls)
+ raise TypeError("Protocols with non-method members"
+ " don't support issubclass()")
+ return super(_ProtocolMeta, self).__subclasscheck__(cls)
+
+ def _get_protocol_attrs(self):
+ attrs = set()
+ for base in self.__mro__[:-1]: # without object
+ if base.__name__ in ('Protocol', 'Generic'):
+ continue
+ annotations = getattr(base, '__annotations__', {})
+ for attr in list(base.__dict__.keys()) + list(annotations.keys()):
+ if (not attr.startswith('_abc_') and attr not in (
+ '__abstractmethods__', '__annotations__', '__weakref__',
+ '_is_protocol', '_is_runtime_protocol', '__dict__',
+ '__args__', '__slots__', '_get_protocol_attrs',
+ '__next_in_mro__', '__parameters__', '__origin__',
+ '__orig_bases__', '__extra__', '__tree_hash__',
+ '__doc__', '__subclasshook__', '__init__', '__new__',
+ '__module__', '_MutableMapping__marker',
+ '__metaclass__', '_gorg', '_callable_members_only')):
+ attrs.add(attr)
+ return attrs
+
+
+class Protocol(object):
+ """Base class for protocol classes. Protocol classes are defined as::
+
+ class Proto(Protocol):
+ def meth(self):
+ # type: () -> int
+ pass
+
+ Such classes are primarily used with static type checkers that recognize
+ structural subtyping (static duck-typing), for example::
+
+ class C:
+ def meth(self):
+ # type: () -> int
+ return 0
+
+ def func(x):
+ # type: (Proto) -> int
+ return x.meth()
+
+ func(C()) # Passes static type check
+
+ See PEP 544 for details. Protocol classes decorated with @typing.runtime_checkable
+ act as simple-minded runtime protocols that checks only the presence of
+ given attributes, ignoring their type signatures.
+
+ Protocol classes can be generic, they are defined as::
+
+ class GenProto(Protocol[T]):
+ def meth(self):
+ # type: () -> T
+ pass
+ """
+
+ __metaclass__ = _ProtocolMeta
+ __slots__ = ()
+ _is_protocol = True
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is Protocol:
+ raise TypeError("Type Protocol cannot be instantiated; "
+ "it can be used only as a base class")
+ return _generic_new(cls.__next_in_mro__, cls, *args, **kwds)
+
+
+def runtime_checkable(cls):
+ """Mark a protocol class as a runtime protocol, so that it
+ can be used with isinstance() and issubclass(). Raise TypeError
+ if applied to a non-protocol class.
+
+ This allows a simple-minded structural check very similar to the
+ one-offs in collections.abc such as Hashable.
+ """
+ if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
+ raise TypeError('@runtime_checkable can be only applied to protocol classes,'
+ ' got %r' % cls)
+ cls._is_runtime_protocol = True
+ return cls
+
+
+# Various ABCs mimicking those in collections.abc.
+# A few are simply re-exported for completeness.
+
+Hashable = collections_abc.Hashable # Not generic.
+
+
+class Iterable(Generic[T_co]):
+ __slots__ = ()
+ __extra__ = collections_abc.Iterable
+
+
+class Iterator(Iterable[T_co]):
+ __slots__ = ()
+ __extra__ = collections_abc.Iterator
+
+
+@runtime_checkable
+class SupportsInt(Protocol):
+ __slots__ = ()
+
+ @abstractmethod
+ def __int__(self):
+ pass
+
+
+@runtime_checkable
+class SupportsFloat(Protocol):
+ __slots__ = ()
+
+ @abstractmethod
+ def __float__(self):
+ pass
+
+
+@runtime_checkable
+class SupportsComplex(Protocol):
+ __slots__ = ()
+
+ @abstractmethod
+ def __complex__(self):
+ pass
+
+
+@runtime_checkable
+class SupportsIndex(Protocol):
+ __slots__ = ()
+
+ @abstractmethod
+ def __index__(self):
+ pass
+
+
+@runtime_checkable
+class SupportsAbs(Protocol[T_co]):
+ __slots__ = ()
+
+ @abstractmethod
+ def __abs__(self):
+ pass
+
+
+if hasattr(collections_abc, 'Reversible'):
+ class Reversible(Iterable[T_co]):
+ __slots__ = ()
+ __extra__ = collections_abc.Reversible
+else:
+ @runtime_checkable
+ class Reversible(Protocol[T_co]):
+ __slots__ = ()
+
+ @abstractmethod
+ def __reversed__(self):
+ pass
+
+
+Sized = collections_abc.Sized # Not generic.
+
+
+class Container(Generic[T_co]):
+ __slots__ = ()
+ __extra__ = collections_abc.Container
+
+
+# Callable was defined earlier.
+
+
+class AbstractSet(Sized, Iterable[T_co], Container[T_co]):
+ __slots__ = ()
+ __extra__ = collections_abc.Set
+
+
+class MutableSet(AbstractSet[T]):
+ __slots__ = ()
+ __extra__ = collections_abc.MutableSet
+
+
+# NOTE: It is only covariant in the value type.
+class Mapping(Sized, Iterable[KT], Container[KT], Generic[KT, VT_co]):
+ __slots__ = ()
+ __extra__ = collections_abc.Mapping
+
+
+class MutableMapping(Mapping[KT, VT]):
+ __slots__ = ()
+ __extra__ = collections_abc.MutableMapping
+
+
+if hasattr(collections_abc, 'Reversible'):
+ class Sequence(Sized, Reversible[T_co], Container[T_co]):
+ __slots__ = ()
+ __extra__ = collections_abc.Sequence
+else:
+ class Sequence(Sized, Iterable[T_co], Container[T_co]):
+ __slots__ = ()
+ __extra__ = collections_abc.Sequence
+
+
+class MutableSequence(Sequence[T]):
+ __slots__ = ()
+ __extra__ = collections_abc.MutableSequence
+
+
+class ByteString(Sequence[int]):
+ pass
+
+
+ByteString.register(str)
+ByteString.register(bytearray)
+
+
+class List(list, MutableSequence[T]):
+ __slots__ = ()
+ __extra__ = list
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is List:
+ raise TypeError("Type List cannot be instantiated; "
+ "use list() instead")
+ return _generic_new(list, cls, *args, **kwds)
+
+
+class Deque(collections.deque, MutableSequence[T]):
+ __slots__ = ()
+ __extra__ = collections.deque
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is Deque:
+ return collections.deque(*args, **kwds)
+ return _generic_new(collections.deque, cls, *args, **kwds)
+
+
+class Set(set, MutableSet[T]):
+ __slots__ = ()
+ __extra__ = set
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is Set:
+ raise TypeError("Type Set cannot be instantiated; "
+ "use set() instead")
+ return _generic_new(set, cls, *args, **kwds)
+
+
+class FrozenSet(frozenset, AbstractSet[T_co]):
+ __slots__ = ()
+ __extra__ = frozenset
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is FrozenSet:
+ raise TypeError("Type FrozenSet cannot be instantiated; "
+ "use frozenset() instead")
+ return _generic_new(frozenset, cls, *args, **kwds)
+
+
+class MappingView(Sized, Iterable[T_co]):
+ __slots__ = ()
+ __extra__ = collections_abc.MappingView
+
+
+class KeysView(MappingView[KT], AbstractSet[KT]):
+ __slots__ = ()
+ __extra__ = collections_abc.KeysView
+
+
+class ItemsView(MappingView[Tuple[KT, VT_co]],
+ AbstractSet[Tuple[KT, VT_co]],
+ Generic[KT, VT_co]):
+ __slots__ = ()
+ __extra__ = collections_abc.ItemsView
+
+
+class ValuesView(MappingView[VT_co]):
+ __slots__ = ()
+ __extra__ = collections_abc.ValuesView
+
+
+class ContextManager(Generic[T_co]):
+ __slots__ = ()
+
+ def __enter__(self):
+ return self
+
+ @abc.abstractmethod
+ def __exit__(self, exc_type, exc_value, traceback):
+ return None
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is ContextManager:
+ # In Python 3.6+, it is possible to set a method to None to
+ # explicitly indicate that the class does not implement an ABC
+ # (https://bugs.python.org/issue25958), but we do not support
+ # that pattern here because this fallback class is only used
+ # in Python 3.5 and earlier.
+ if (any("__enter__" in B.__dict__ for B in C.__mro__) and
+ any("__exit__" in B.__dict__ for B in C.__mro__)):
+ return True
+ return NotImplemented
+
+
+class Dict(dict, MutableMapping[KT, VT]):
+ __slots__ = ()
+ __extra__ = dict
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is Dict:
+ raise TypeError("Type Dict cannot be instantiated; "
+ "use dict() instead")
+ return _generic_new(dict, cls, *args, **kwds)
+
+
+class DefaultDict(collections.defaultdict, MutableMapping[KT, VT]):
+ __slots__ = ()
+ __extra__ = collections.defaultdict
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is DefaultDict:
+ return collections.defaultdict(*args, **kwds)
+ return _generic_new(collections.defaultdict, cls, *args, **kwds)
+
+
+class Counter(collections.Counter, Dict[T, int]):
+ __slots__ = ()
+ __extra__ = collections.Counter
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is Counter:
+ return collections.Counter(*args, **kwds)
+ return _generic_new(collections.Counter, cls, *args, **kwds)
+
+
+# Determine what base class to use for Generator.
+if hasattr(collections_abc, 'Generator'):
+ # Sufficiently recent versions of 3.5 have a Generator ABC.
+ _G_base = collections_abc.Generator
+else:
+ # Fall back on the exact type.
+ _G_base = types.GeneratorType
+
+
+class Generator(Iterator[T_co], Generic[T_co, T_contra, V_co]):
+ __slots__ = ()
+ __extra__ = _G_base
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is Generator:
+ raise TypeError("Type Generator cannot be instantiated; "
+ "create a subclass instead")
+ return _generic_new(_G_base, cls, *args, **kwds)
+
+
+# Internal type variable used for Type[].
+CT_co = TypeVar('CT_co', covariant=True, bound=type)
+
+
+# This is not a real generic class. Don't use outside annotations.
+class Type(Generic[CT_co]):
+ """A special construct usable to annotate class objects.
+
+ For example, suppose we have the following classes::
+
+ class User: ... # Abstract base for User classes
+ class BasicUser(User): ...
+ class ProUser(User): ...
+ class TeamUser(User): ...
+
+ And a function that takes a class argument that's a subclass of
+ User and returns an instance of the corresponding class::
+
+ U = TypeVar('U', bound=User)
+ def new_user(user_class: Type[U]) -> U:
+ user = user_class()
+ # (Here we could write the user object to a database)
+ return user
+
+ joe = new_user(BasicUser)
+
+ At this point the type checker knows that joe has type BasicUser.
+ """
+ __slots__ = ()
+ __extra__ = type
+
+
+def NamedTuple(typename, fields):
+ """Typed version of namedtuple.
+
+ Usage::
+
+ Employee = typing.NamedTuple('Employee', [('name', str), ('id', int)])
+
+ This is equivalent to::
+
+ Employee = collections.namedtuple('Employee', ['name', 'id'])
+
+ The resulting class has one extra attribute: _field_types,
+ giving a dict mapping field names to types. (The field names
+ are in the _fields attribute, which is part of the namedtuple
+ API.)
+ """
+ fields = [(n, t) for n, t in fields]
+ cls = collections.namedtuple(typename, [n for n, t in fields])
+ cls._field_types = dict(fields)
+ # Set the module to the caller's module (otherwise it'd be 'typing').
+ try:
+ cls.__module__ = sys._getframe(1).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError):
+ pass
+ return cls
+
+
+def _check_fails(cls, other):
+ try:
+ if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools', 'typing']:
+ # Typed dicts are only for static structural subtyping.
+ raise TypeError('TypedDict does not support instance and class checks')
+ except (AttributeError, ValueError):
+ pass
+ return False
+
+
+def _dict_new(cls, *args, **kwargs):
+ return dict(*args, **kwargs)
+
+
+def _typeddict_new(cls, _typename, _fields=None, **kwargs):
+ total = kwargs.pop('total', True)
+ if _fields is None:
+ _fields = kwargs
+ elif kwargs:
+ raise TypeError("TypedDict takes either a dict or keyword arguments,"
+ " but not both")
+
+ ns = {'__annotations__': dict(_fields), '__total__': total}
+ try:
+ # Setting correct module is necessary to make typed dict classes pickleable.
+ ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError):
+ pass
+
+ return _TypedDictMeta(_typename, (), ns)
+
+
+class _TypedDictMeta(type):
+ def __new__(cls, name, bases, ns, total=True):
+ # Create new typed dict class object.
+ # This method is called directly when TypedDict is subclassed,
+ # or via _typeddict_new when TypedDict is instantiated. This way
+ # TypedDict supports all three syntaxes described in its docstring.
+ # Subclasses and instances of TypedDict return actual dictionaries
+ # via _dict_new.
+ ns['__new__'] = _typeddict_new if name == b'TypedDict' else _dict_new
+ tp_dict = super(_TypedDictMeta, cls).__new__(cls, name, (dict,), ns)
+
+ anns = ns.get('__annotations__', {})
+ msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
+ anns = {n: _type_check(tp, msg) for n, tp in anns.items()}
+ for base in bases:
+ anns.update(base.__dict__.get('__annotations__', {}))
+ tp_dict.__annotations__ = anns
+ if not hasattr(tp_dict, '__total__'):
+ tp_dict.__total__ = total
+ return tp_dict
+
+ __instancecheck__ = __subclasscheck__ = _check_fails
+
+
+TypedDict = _TypedDictMeta(b'TypedDict', (dict,), {})
+TypedDict.__module__ = __name__
+TypedDict.__doc__ = \
+ """A simple typed name space. At runtime it is equivalent to a plain dict.
+
+ TypedDict creates a dictionary type that expects all of its
+ instances to have a certain set of keys, with each key
+ associated with a value of a consistent type. This expectation
+ is not checked at runtime but is only enforced by type checkers.
+ Usage::
+
+ Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
+
+ a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
+ b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
+
+ assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
+
+ The type info could be accessed via Point2D.__annotations__. TypedDict
+ supports an additional equivalent form::
+
+ Point2D = TypedDict('Point2D', x=int, y=int, label=str)
+ """
+
+
+def NewType(name, tp):
+ """NewType creates simple unique types with almost zero
+ runtime overhead. NewType(name, tp) is considered a subtype of tp
+ by static type checkers. At runtime, NewType(name, tp) returns
+ a dummy function that simply returns its argument. Usage::
+
+ UserId = NewType('UserId', int)
+
+ def name_by_id(user_id):
+ # type: (UserId) -> str
+ ...
+
+ UserId('user') # Fails type check
+
+ name_by_id(42) # Fails type check
+ name_by_id(UserId(42)) # OK
+
+ num = UserId(5) + 1 # type: int
+ """
+
+ def new_type(x):
+ return x
+
+ # Some versions of Python 2 complain because of making all strings unicode
+ new_type.__name__ = str(name)
+ new_type.__supertype__ = tp
+ return new_type
+
+
+# Python-version-specific alias (Python 2: unicode; Python 3: str)
+Text = unicode
+
+
+# Constant that's True when type checking, but False here.
+TYPE_CHECKING = False
+
+
+class IO(Generic[AnyStr]):
+ """Generic base class for TextIO and BinaryIO.
+
+ This is an abstract, generic version of the return of open().
+
+ NOTE: This does not distinguish between the different possible
+ classes (text vs. binary, read vs. write vs. read/write,
+ append-only, unbuffered). The TextIO and BinaryIO subclasses
+ below capture the distinctions between text vs. binary, which is
+ pervasive in the interface; however we currently do not offer a
+ way to track the other distinctions in the type system.
+ """
+
+ __slots__ = ()
+
+ @abstractproperty
+ def mode(self):
+ pass
+
+ @abstractproperty
+ def name(self):
+ pass
+
+ @abstractmethod
+ def close(self):
+ pass
+
+ @abstractproperty
+ def closed(self):
+ pass
+
+ @abstractmethod
+ def fileno(self):
+ pass
+
+ @abstractmethod
+ def flush(self):
+ pass
+
+ @abstractmethod
+ def isatty(self):
+ pass
+
+ @abstractmethod
+ def read(self, n=-1):
+ pass
+
+ @abstractmethod
+ def readable(self):
+ pass
+
+ @abstractmethod
+ def readline(self, limit=-1):
+ pass
+
+ @abstractmethod
+ def readlines(self, hint=-1):
+ pass
+
+ @abstractmethod
+ def seek(self, offset, whence=0):
+ pass
+
+ @abstractmethod
+ def seekable(self):
+ pass
+
+ @abstractmethod
+ def tell(self):
+ pass
+
+ @abstractmethod
+ def truncate(self, size=None):
+ pass
+
+ @abstractmethod
+ def writable(self):
+ pass
+
+ @abstractmethod
+ def write(self, s):
+ pass
+
+ @abstractmethod
+ def writelines(self, lines):
+ pass
+
+ @abstractmethod
+ def __enter__(self):
+ pass
+
+ @abstractmethod
+ def __exit__(self, type, value, traceback):
+ pass
+
+
+class BinaryIO(IO[bytes]):
+ """Typed version of the return of open() in binary mode."""
+
+ __slots__ = ()
+
+ @abstractmethod
+ def write(self, s):
+ pass
+
+ @abstractmethod
+ def __enter__(self):
+ pass
+
+
+class TextIO(IO[unicode]):
+ """Typed version of the return of open() in text mode."""
+
+ __slots__ = ()
+
+ @abstractproperty
+ def buffer(self):
+ pass
+
+ @abstractproperty
+ def encoding(self):
+ pass
+
+ @abstractproperty
+ def errors(self):
+ pass
+
+ @abstractproperty
+ def line_buffering(self):
+ pass
+
+ @abstractproperty
+ def newlines(self):
+ pass
+
+ @abstractmethod
+ def __enter__(self):
+ pass
+
+
+class io(object):
+ """Wrapper namespace for IO generic classes."""
+
+ __all__ = ['IO', 'TextIO', 'BinaryIO']
+ IO = IO
+ TextIO = TextIO
+ BinaryIO = BinaryIO
+
+
+io.__name__ = __name__ + b'.io'
+sys.modules[io.__name__] = io
+
+
+Pattern = _TypeAlias('Pattern', AnyStr, type(stdlib_re.compile('')),
+ lambda p: p.pattern)
+Match = _TypeAlias('Match', AnyStr, type(stdlib_re.match('', '')),
+ lambda m: m.re.pattern)
+
+
+class re(object):
+ """Wrapper namespace for re type aliases."""
+
+ __all__ = ['Pattern', 'Match']
+ Pattern = Pattern
+ Match = Match
+
+
+re.__name__ = __name__ + b'.re'
+sys.modules[re.__name__] = re
diff --git a/src/clyphx/xtriggers.py b/src/clyphx/xtriggers.py
index 27242fe..88a39fe 100644
--- a/src/clyphx/xtriggers.py
+++ b/src/clyphx/xtriggers.py
@@ -13,14 +13,20 @@
#
# You should have received a copy of the GNU Lesser General Public License
# along with ClyphX. If not, see .
+
from __future__ import absolute_import, unicode_literals
from builtins import super, dict, range
-
+from typing import TYPE_CHECKING
from functools import partial
import logging
-import Live
-from .core import XComponent
-from .action_list import ActionList
+
+from .core.xcomponent import XComponent
+from .core.legacy import ActionList
+from .core.live import forward_midi_cc, forward_midi_note
+
+if TYPE_CHECKING:
+ from typing import Any, Iterable, Sequence, Dict, Text, List, Tuple
+ from .core.live import Clip, Track
log = logging.getLogger(__name__)
@@ -30,14 +36,15 @@ class XTrigger(XComponent):
class XControlComponent(XTrigger):
- '''Control component for ClyphX.
+ '''A control on a MIDI controller.
'''
__module__ = __name__
def __init__(self, parent):
+ # type: (Any) -> None
super().__init__(parent)
- self._control_list = dict()
- self._xt_scripts = []
+ self._control_list = dict() # type: Dict[Tuple[int, int], Dict[Text, Any]]
+ self._xt_scripts = [] # type: List[Any]
def disconnect(self):
self._control_list = dict()
@@ -45,8 +52,8 @@ def disconnect(self):
super().disconnect()
def connect_script_instances(self, instantiated_scripts):
+ # type: (Iterable[Any]) -> None
'''Try to connect to ClyphX_XT instances.'''
- ClyphX_XT = None
for i in range(5):
try:
if i == 0:
@@ -59,6 +66,8 @@ def connect_script_instances(self, instantiated_scripts):
from ClyphX_XTD.ClyphX_XT import ClyphX_XT
elif i == 4:
from ClyphX_XTE.ClyphX_XT import ClyphX_XT
+ else:
+ continue
except ImportError:
pass
else:
@@ -69,6 +78,7 @@ def connect_script_instances(self, instantiated_scripts):
break
def assign_new_actions(self, string):
+ # type: (Text) -> None
'''Assign new actions to controls via xclips.'''
if self._xt_scripts:
for x in self._xt_scripts:
@@ -91,6 +101,7 @@ def assign_new_actions(self, string):
break
def receive_midi(self, bytes):
+ # type: (Sequence[int]) -> None
'''Receive user-defined midi messages.'''
if self._control_list:
ctrl_data = None
@@ -110,18 +121,30 @@ def receive_midi(self, bytes):
self._parent.handle_action_list_trigger(self.song().view.selected_track,
ctrl_data['name'])
+ def read_user_settings(self, settings, midi_map_handle):
+ # TODO
+ for name, data in settings.items():
+ msg_type, chnl, val, actions = map(str.strip, data.split(',', 3))
+ try:
+ status_byte = {'note': 144, 'cc': 176}[msg_type]
+ except KeyError:
+ log.error("MSG TYPE of user controls has to be 'NOTE' or 'CC'")
+ continue
+
+
def get_user_control_settings(self, data, midi_map_handle):
+ # type: (Iterable[Text], int) -> None
'''Receives control data from user settings file and builds
control dictionary.
'''
self._control_list = dict()
- for d in data:
+ for _d in data:
status_byte = None
channel = None
ctrl_num = None
on_action = None
off_action = None
- d = d.split('=')
+ d = _d.split('=')
ctrl_name = d[0].strip()
new_ctrl_data = d[1].split(',')
try:
@@ -148,23 +171,22 @@ def get_user_control_settings(self, data, midi_map_handle):
off_action = off_action,
name = ActionList(on_action),
)
- if status_byte == 144:
- fn = Live.MidiMap.forward_midi_note
- else:
- fn = Live.MidiMap.forward_midi_cc
+ fn = forward_midi_note if status_byte == 144 else forward_midi_cc
fn(self._parent._c_instance.handle(), midi_map_handle, channel, ctrl_num)
def rebuild_control_map(self, midi_map_handle):
+ # type: (int) -> None
'''Called from main when build_midi_map is called.'''
+ log.info('XControlComponent.rebuild_control_map')
for key in self._control_list.keys():
if key[0] >= 176:
# forwards a CC msg to the receive_midi method
- Live.MidiMap.forward_midi_cc(
+ forward_midi_cc(
self._parent._c_instance.handle(), midi_map_handle, key[0] - 176, key[1]
)
else:
# forwards a NOTE msg to the receive_midi method
- Live.MidiMap.forward_midi_note(
+ forward_midi_note(
self._parent._c_instance.handle(), midi_map_handle, key[0] - 144, key[1]
)
@@ -176,6 +198,7 @@ class XTrackComponent(XTrigger):
__module__ = __name__
def __init__(self, parent, track):
+ # type: (Any, Track) -> None
super().__init__(parent)
self._track = track
self._clip = None
@@ -183,7 +206,7 @@ def __init__(self, parent, track):
self._track.add_playing_slot_index_listener(self.play_slot_index_changed)
self._register_timer_callback(self.on_timer)
self._last_slot_index = -1
- self._triggered_clips = []
+ self._triggered_clips = [] # type: List[Clip]
self._triggered_lseq_clip = None
def disconnect(self):
@@ -219,6 +242,7 @@ def play_slot_index_changed(self):
self._clip.add_loop_jump_listener(self.on_loop_jump)
def get_xclip(self, slot_index):
+ # type: (int) -> Clip
'''Get the xclip associated with slot_index or None.'''
clip = None
if self._track and 0 <= slot_index < len(self._track.clip_slots):
@@ -265,14 +289,15 @@ class XCueComponent(XTrigger):
__module__ = __name__
def __init__(self, parent):
+ # type: (Any) -> None
super().__init__(parent)
self.song().add_current_song_time_listener(self.arrange_time_changed)
self.song().add_is_playing_listener(self.arrange_time_changed)
self.song().add_cue_points_listener(self.cue_points_changed)
- self._x_points = dict()
+ self._x_points = dict() # type: Dict[Text, Any]
self._x_point_time_to_watch_for = -1
self._last_arrange_position = -1
- self._sorted_times = []
+ self._sorted_times = [] # type: List[Any]
self.cue_points_changed()
def disconnect(self):
@@ -295,7 +320,7 @@ def cue_points_changed(self):
cp.add_time_listener(self.cue_points_changed)
if not cp.name_has_listener(self.cue_points_changed):
cp.add_name_listener(self.cue_points_changed)
- name = self._parent.get_name(cp.name)
+ name = cp.name.upper()
if len(name) > 2 and name[0] == '[' and name.count('[') == 1 and name.count(']') == 1:
cue_name = name.replace(name[name.index('['):name.index(']')+1].strip(), '')
self._x_points[cp.time] = cp
diff --git a/stubs/clyphx/actions/clip.pyi b/stubs/clyphx/actions/clip.pyi
deleted file mode 100644
index 7413422..0000000
--- a/stubs/clyphx/actions/clip.pyi
+++ /dev/null
@@ -1,12 +0,0 @@
-from ..core import XComponent
-
-class XClipActions(XComponent):
- # TODO
-
- def get_note_name_from_string(self, string: str) -> str:
- ...
-
- def string_to_note(self, string: str) -> int:
- ...
-
- # TODO
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 0000000..386eba6
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,16 @@
+from __future__ import absolute_import, unicode_literals
+import sys
+import os
+
+import pytest
+
+HERE = os.path.dirname(os.path.realpath(__file__))
+CODE = os.path.realpath(os.path.join(HERE, '..', 'src'))
+
+sys.path.insert(0, str(CODE))
+
+
+@pytest.fixture(scope='session')
+def user_settings():
+ here = os.path.dirname(os.path.realpath(__file__))
+ return os.path.join(here, 'fixtures', 'UserSettings.txt')
diff --git a/tests/fixtures/UserSettings.txt b/tests/fixtures/UserSettings.txt
new file mode 100644
index 0000000..546cff5
--- /dev/null
+++ b/tests/fixtures/UserSettings.txt
@@ -0,0 +1,290 @@
+# This file is part of ClyphX.
+#
+# ClyphX is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# ClyphX is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+# more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with ClyphX. If not, see .
+
+
+
+***************************** [SETTINGS NOTES] **************************
+
+
+# Please DO NOT change any of the spacing in this file.
+
+# Please DO NOT change the name of this file or its file extension. When done
+# making your changes to the settings below, just save the file.
+
+# After saving this file, you will need to restart Live for your changes
+# to take effect.
+
+# For Windows 7/Vista users, depending on how your privileges are set up, you
+# may not be able to save changes you make to this file. You may receive an
+# error such as Access Denied when trying to save. If this occurs, you will
+# need to drag this file onto your desktop, then make your changes and save.
+# When done, drag the file back into the ClyphX folder.
+
+
+
+***************************** [SNAPSHOT SETTINGS] **************************
+
+
+INCLUDE_NESTED_DEVICES_IN_SNAPSHOTS = On
+# Setting:
+# Off or On
+
+# Description:
+# Determines whether or not nested Devices (Devices inside of Racks) will be
+# included in Snapshots. This setting only applies if you're using Live 8.2.2
+# or later.
+
+
+
+SNAPSHOT_PARAMETER_LIMIT = 500
+# Setting:
+# Any whole number
+
+# Description:
+# Determines the number of parameters that Snapshots will be allowed to store.
+# If the limit is exceeded, you'll receive an error message.
+
+# Note:
+# Please use caution when adjusting this setting. Recalling Snapshots that have
+# stored 1000 or more parameters can cause delays and momentary freezing of
+# Live's GUI.
+
+
+
+***************************** [EXTRA PREFS] **************************
+
+
+PROCESS_XCLIPS_IF_TRACK_MUTED = True
+# Setting:
+# True or False
+
+# Description:
+# Determines whether or not X-Clips on a Muted Track will be processed.
+
+
+
+STARTUP_ACTIONS = Off
+# Setting:
+# Off or Action(s) to perform on set load.
+
+# Description:
+# Performs an Action List when a set is loaded.
+
+
+
+NAVIGATION_HIGHLIGHT = On
+# Setting:
+# On or Off
+
+# Description:
+# Displays a highlight around the selected Clip.
+
+
+
+EXCLUSIVE_ARM_ON_SELECT = Off
+# Setting:
+# On or Off
+
+# Description:
+# Upon selecting Tracks, if the selected Track can be armed, it will be armed
+# and any other armed Tracks will be disarmed.
+
+# Note:
+# This function may produce undesirable results if Select On Launch is on
+# in your Live preferences.
+
+
+
+EXCLUSIVE_SHOW_GROUP_ON_SELECT = Off
+# Setting:
+# On or Off
+
+# Description:
+# Upon selecting Tracks, if the selected Track is a Group Track, it will be
+# unfolded and any other Group Tracks will be folded.
+
+# Note:
+# This function may produce undesirable results if Select On Launch is on
+# in your Live preferences.
+
+
+
+CLIP_RECORD_LENGTH_SET_BY_GLOBAL_QUANTIZATION = Off
+# Setting:
+# On or Off
+
+# Description:
+# This changes the behavior of launching the selected Clip Slot so that
+# (under the Conditions listed below) you can easily record a new Clip with a
+# length defined by the Global Quantization value. This will do nothing if the
+# Conditions below aren't met.
+
+# Conditions:
+# - Selected Track is armed
+# - Selected Clip Slot has no Clip on it
+# - Global Quantization is not set to None
+
+
+
+DEFAULT_INSERTED_MIDI_CLIP_LENGTH = 0
+# Setting:
+# 0 (for Off) or 2 - 16 (for number of bars to use)
+
+# Description:
+# Upon inserting a blank MIDI Clip onto the selected Clip Slot, the Clip's
+# length will be set to the length (in bars) specified in the setting above.
+
+# Note:
+# This will not change the default zoom setting of the Clip, so you'll only see
+# the Clip's first bar. You'll need to zoom out to see the rest of the Clip.
+
+
+
+***************************** [CSLINKER] **************************
+
+
+# CsLinker allows you to link the grid selectors (colored borders around clips)
+# of two Control Surfaces either horizontally or vertically.
+
+# The Control Surface script names to use are as shown in Live's Control Surface
+# chooser. If a Control Surface's name has a space in it (like MXT Live), you
+# should use an underscore in place of the space (like MXT_Live).
+
+# Note, Push and Push2 cannot be used for matched linking. Additionally,
+# horizontal linking with Push2 may produce undesirable results due to Push2's
+# inclusion of Chain mixer settings along side normal Track mixer settings.
+
+# You can also omit 'CSLINKER_' in props names, e.g.: MATCHED_LINK
+
+
+CSLINKER_MATCHED_LINK = False
+# Setting:
+# True for matched link or False for horizonal/vertical link.
+
+# Description:
+# Determines whether the two Control Surfaces should have a matched link meaning
+# that they will lay on top of each other. This setting overrides
+# CSLINKER_HORIZONTAL_LINK and CSLINKER_MULTI_AXIS_LINK.
+
+
+
+CSLINKER_HORIZONTAL_LINK = True
+# Setting:
+# True for horizontal link or False for vertical link.
+
+# Description:
+# Determines whether the two Control Surfaces should be horizontal or vertically
+# linked.
+
+
+
+CSLINKER_MULTI_AXIS_LINK = False
+# Setting:
+# True for multi-axis link.
+
+# Determines whether movement should be sychronized in all directions (vertical
+# and horizontal) or purely on a single axis determined by the
+# CS_HORIZONTAL_LINK setting.
+
+
+
+CSLINKER_SCRIPT_1_NAME = None
+# Setting:
+# None (to turn linking off) or the name of a Control Surface.
+
+# Description:
+# The first Control Surface script that should be linked.
+
+
+
+CSLINKER_SCRIPT_2_NAME = None
+# Setting:
+# None (to turn linking off) or the name of a Control Surface.
+
+# Description:
+# The second Control Surface script that should be linked.
+
+
+
+******************************* [USER CONTROLS] *******************************
+
+
+# Below, you can specify a list of MIDI Controls to use as X-Controls.
+
+# The entry format is:
+# CONTROL_NAME = MSG_TYPE, MIDI_CHANNEL, NOTE_OR_CC_NUM, ON_ACTION_LIST
+
+# CONTROL_NAME = A unique one-word name (Identifier) for the control.
+# See [IDENTIFIER NOTE] below.
+# MSG_TYPE = The word Note or CC.
+# MIDI_CHANNEL = The MIDI Channel number in the range of 1 - 16
+# NOTE_OR_CC = The Note or CC number in the range of 0 - 127.
+# ON_ACTION_LIST = The Action List to perform when the control sends an on
+# message.
+
+# Example: MY_BTN1 = NOTE, 1, 10, 1/MUTE ; 2/MUTE
+MY_BTN1 = NOTE, 1, 10, 1/MUTE ; 2/MUTE
+
+# You can optionally specify an Action List to perform when the control sends
+# an off message. To do this, place a comma after the On Action List and then
+# specify the Off Action List.
+
+# Example: MY_BTN2 = CC, 16, 117, 1/MUTE ; 2/MUTE, 3/PLAY >
+MY_BTN2 = CC, 16, 117, 1/MUTE ; 2/MUTE, 3/PLAY >
+
+# To perform the same Action List for the On Action List and Off Action List,
+# just specify an asterisk for the Off Action List.
+
+# Example: MY_BTN3 = NOTE, 5, 0, 1/MUTE, *
+MY_BTN3 = NOTE, 5, 0, 1/MUTE, *
+
+# Below is an example list that has been commented out (the # at the beginning
+# of a line makes the line a comment). Your list should be formatted in the same
+# way except without the # at the beginning of each line.
+
+
+btn_1 = note, 1, 0, mute , *
+btn_2 = note, 1, 1, solo
+btn_3 = cc, 9, 2, arm
+btn_4 = cc, 9, 3, mon
+#>>>>>>>>DELETE THIS ENTIRE LINE AND START YOUR LIST HERE<<<<<<<<#
+
+
+******************************* [USER VARIABLES] *******************************
+
+
+# Below, you can specify a list of Variables to use in your Action Lists.
+
+# The entry format is: VARIABLE_NAME = VALUE
+
+# VARIABLE_NAME = A unique one-word name (Identifier) for the variable.
+# See [IDENTIFIER NOTE] below.
+# VALUE = Any value or word or combination of words. See the User Variables
+# section of the manual for more info on this.
+
+# The Variables listed below are just examples and can be removed.
+
+
+ex_var1 = 10
+ex_var2 = mute
+
+
+******************************* [IDENTIFIER NOTE] ******************************
+
+
+# Identifiers and Variable names should not contain characters other than
+# letters, numbers and underscores.
+
+# Also, Variable names and their values are not case-sensitive.
diff --git a/tests/test_parsing.py b/tests/test_parsing.py
new file mode 100644
index 0000000..226245f
--- /dev/null
+++ b/tests/test_parsing.py
@@ -0,0 +1,79 @@
+from __future__ import absolute_import, unicode_literals
+
+
+# region USER SETTINGS TEST
+
+RESULT = {
+ 'snapshot_settings': {
+ 'include_nested_devices_in_snapshots': True,
+ 'snapshot_parameter_limit': 500
+ },
+ 'extra_prefs': {
+ 'process_xclips_if_track_muted': True,
+ 'startup_actions': False,
+ 'navigation_highlight': True,
+ 'exclusive_arm_on_select': False,
+ 'exclusive_show_group_on_select': False,
+ 'clip_record_length_set_by_global_quantization': False,
+ 'default_inserted_midi_clip_length': 0
+ },
+ 'cslinker': {
+ 'cslinker_matched_link': False,
+ 'cslinker_horizontal_link': True,
+ 'cslinker_multi_axis_link': False,
+ 'cslinker_script_1_name': None,
+ 'cslinker_script_2_name': None
+ },
+ 'user_controls': {},
+ 'user_variables': {
+ 'ex_var1': '10',
+ 'ex_var2': 'mute'
+ },
+ 'identifier_note': {}
+}
+
+
+def test_user_settings(user_settings):
+ from clyphx.core import UserSettings
+ cfg = UserSettings(user_settings)
+
+ assert cfg.prefs == cfg.extra_prefs == RESULT['extra_prefs']
+ assert cfg.controls == cfg.user_controls == RESULT['user_controls']
+ assert cfg.snapshots == cfg.snapshots_settings == RESULT['snapshot_settings']
+ assert cfg.cs_linker == cfg.cslinker == RESULT['cslinker']
+ assert cfg.vars == cfg.user_variables == RESULT['user_variables']
+ assert cfg.identifier_note == RESULT['identifier_note']
+
+# endregion
+
+# region COMMAND PARSER TEST
+
+def test_tracks():
+ from clyphx.core.parse import Parser
+
+ parse = Parser()
+
+ for (source, target) in [
+ (
+ '[] 1, 3, 5, "My Track", A-MST/MUTE',
+ {'start': [['1', '3', '5', '"My Track"', 'A-MST']]},
+ ),
+ (
+ '[] >-5/CLIP(SEL-7) WARP',
+ {'start': [['>-5']]},
+ ),
+ (
+ '[] DUMMY : "My Track"/DEV(ALL) OFF',
+ {'stop': [['"My Track"']]},
+ ),
+ (
+ '[IDENT] REC ON ; 1-2/ARM : UNARM ; 3-4/REC OFF',
+ {'start': [None, ['1-2']], 'stop': [None, ['3-4']]},
+ ),
+ ]:
+ res = parse(source)
+ for step in ('start', 'stop'):
+ for i, value in enumerate(target.get(step, [])):
+ assert getattr(res, step)[i].tracks == value
+
+# rendregion
diff --git a/tools/parse_log.py b/tools/parse_log.py
deleted file mode 100644
index 63aa33c..0000000
--- a/tools/parse_log.py
+++ /dev/null
@@ -1,139 +0,0 @@
-#! /usr/bin/env python3
-
-from datetime import datetime
-from ast import literal_eval
-from platform import system
-from pathlib import Path
-import logging
-import os
-import re
-
-
-log = logging.getLogger(__name__)
-
-LINE = re.compile(r'(?P\S*?)\=(?P.*)')
-DTFMT = '%Y-%m-%dT%H:%M:%S.%f'
-
-
-class Environment:
- def __init__(self):
- if system() == 'Windows':
- self.mac = False
- elif system() == 'Darwin':
- self.mac = True
- else:
- raise NotImplementedError
-
- @property
- def user_folder(self):
- if not self.mac:
- return Path(os.environ['APPDATA']) / 'Ableton'
- else:
- raise NotImplementedError
-
- @property
- def app_folder(self):
- '''Returns Ableton Live folder.'''
- if self.mac:
- raise NotImplementedError
- else:
- if not getattr(self, '_app_folder', None):
- import winreg
-
- reg = winreg.ConnectRegistry(None, winreg.HKEY_CLASSES_ROOT)
- value = winreg.QueryValue(reg, r'ableton\Shell\open\command')
- path, _ = value.rsplit(' ', 1)
- self._app_folder = Path(path).parents[1]
- return self._app_folder
-
- @property
- def reports(self):
- path = self.user_folder / 'Live Reports/Usage'
- return list(path.glob('*.log'))
-
- @property
- def resources(self):
- folder = 'Contents/App-Resources' if self.mac else 'Resources'
- return self.app_folder / folder
-
-
-def parse_report(content):
- info = dict()
- logs = list()
-
- for line in content.splitlines():
- data = LINE.search(line).groupdict()
- try:
- dt = datetime.strptime(data['tag'], DTFMT)
- logger, *msg = data['value'].split(' ', 1)
- msg = literal_eval(msg[0]) if msg else None
- logs.append((dt, logger, msg))
-
- except ValueError:
- info.update({data['tag']: data['value']})
-
- return {'info': info, 'logs': logs}
-
-
-def get_last_report():
- env = Environment()
- report = env.reports()[-1]
-
- with open(report) as logfile:
- return parse_report(logfile.read())
-
-
-TIMESTAMP = r'(?P[T0-9\-\:\.])'
-LEVEL = r'(?P[a-z])'
-LOG_LINE = re.compile(r'^{}:\s{}:\s(?P.*)$'.format(TIMESTAMP, LEVEL), re.M)
-START_LOG = re.compile(r'^{}: info: Started: Live .*$'.format(TIMESTAMP))
-
-
-class SessionLog:
- def __init__(self, start):
- self.start = start
- self.logs = list()
-
- def append(self, log):
- self.logs.append(log)
-
-
-class LogFile:
- def __init__(self):
- self.sessions = list()
-
- def parse(self, path):
- raise NotImplementedError
-
- session = None
-
- for line in open(path):
- line = LOG_LINE.match(line).groupdict()
- if line['message'].startswith('Started: Live '):
- if session:
- self.sessions.append(session)
- session = SessionLog(start=line['timestamp'])
- if not session:
- # lines without previous start log
- continue
- session.append(line)
-
-
-def rotate_logfile():
- env = Environment()
- NUM = re.compile(r'Log.(\d*).txt')
-
- for path in env.user_folder.glob('*/**'):
- if path.name == 'Preferences':
- last = 0
- for logfile in path.glob('Log*.txt'):
- num = NUM.match(logfile.name)
- if num:
- last = max(last, int(num.group(1)))
- dest = path / f'Log.{last+1}.txt'
- try:
- path.joinpath('Log.txt').rename(dest)
- except PermissionError:
- log.error('Cannot rotate log. Is Live running?')
- else:
- log.info('Log rotated. Last logs moved to: %s', dest)
diff --git a/tools/vscode.py b/tools/vscode.py
deleted file mode 100644
index 2634212..0000000
--- a/tools/vscode.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#! /usr/bin/env python3
-from pathlib import Path
-from shutil import which
-import logging
-import json
-import os
-try:
- from .parse_log import Environment
-except ImportError:
- from parse_log import Environment
-
-log = logging.getLogger(__name__)
-log.setLevel(logging.INFO)
-
-BASEDIR = Path(__file__).parents[1]
-TEMPLATE = BASEDIR / '.vscode/.settings.json'
-
-
-class Settings:
- def __init__(self):
- self._config = json.loads(TEMPLATE.read_text())
- self.env = Environment()
- self.set_python_path()
- self.set_lib_paths()
- self.set_formatter()
- self.set_linter()
-
- @property
- def config(self):
- cfg = {k: v for k, v in sorted(self._config.items()) if k[0] != '['}
- cfg.update({k: dict(sorted(v.items())) for k, v in self._config.items() if k[0] == '['})
- return cfg
-
- def set_python_path(self):
- path = BASEDIR / '.bin/python.exe'
-
- if path.is_file():
- path = '${workspaceFolder}/.bin'
- else:
- path = which('python27')
- if not path:
- log.error('python2.7 executable not found')
- return
- path = Path(path).parent.as_posix()
-
- log.warning('Python 2.7 runtime found in %s', path)
- self._config['python.pythonPath'] = path
-
- def set_lib_paths(self):
- key = 'osx' if self.env.mac else 'windows'
-
- paths = [
- '${workspaceFolder}/src/clyphx',
- self.env.resources / 'Python/lib',
- *list(self.env.resources.joinpath('Python/site-packages').iterdir()),
- self.env.resources / 'Python/abl.live',
- self.env.resources / 'MIDI Remote Scripts',
- ]
-
- self._config.update({
- 'python.autoComplete.extraPaths': list(map(str, paths)),
- 'python.analysis.extraPaths': list(map(str, paths)),
- 'terminal.integrated.env.{}'.format(key): {
- 'PATH': os.pathsep.join(['${workspaceFolder}/.bin', '${env:PATH}']),
- 'PYTHONPATH': os.pathsep.join(map(str, paths)),
- }
- })
-
- env = BASEDIR / '.env'
- env.write_text('PYTHONPATH={}'.format(os.pathsep.join(map(str, paths))))
- log.warning('.env file saved in %s', env)
-
- def set_linter(self):
- '''Add linting (flake8) configuration to VSCode.
-
- Specific settings are managed in setup.cfg.
- '''
- path = which('flake8')
- if path:
- path = Path(path).as_posix()
- self._config.update({
- 'python.linting.flake8Enabled': True,
- 'python.linting.flake8Path': path,
- })
- log.warning('Linter found in %s', path)
- else:
- log.error('flake8 executable not found')
-
- def set_formatter(self):
- '''Add formatting (brunette) configuration to VSCode.
-
- Specific settings are managed in setup.cfg.
- '''
- path = which('brunette')
- if path:
- path = Path(path).as_posix()
- self._config.update({
- 'python.formatting.provider': 'black',
- 'python.formatting.blackArgs': [],
- 'python.linting.flake8Path': path,
- })
- log.warning('Formatter found in %s', path)
- else:
- log.error('brunette executable not found')
-
- @property
- def json(self):
- return json.dumps(self.config,
- ensure_ascii=False,
- sort_keys=False,
- indent=2)
- def write(self):
- filepath = TEMPLATE.with_name('settings.json')
- filepath.write_text(self.json)
- log.warning('Configuration saved in %s', filepath)
-
-
-if __name__ == '__main__':
- conf = Settings()
- conf.write()
diff --git a/tools/win.ps1 b/tools/win.ps1
index 0314020..2dc0330 100644
Binary files a/tools/win.ps1 and b/tools/win.ps1 differ