diff options
author | Brett Cannon <bcannon@gmail.com> | 2008-05-05 20:21:38 (GMT) |
---|---|---|
committer | Brett Cannon <bcannon@gmail.com> | 2008-05-05 20:21:38 (GMT) |
commit | 4b964f9c904744b7d7d88054e54a2e4ca8aeb395 (patch) | |
tree | c24ba51492fc3fc714c54f2e671f21172d063908 | |
parent | 5f2e0e5ccb988cdf65137034b33ee57198cc23b9 (diff) | |
download | cpython-4b964f9c904744b7d7d88054e54a2e4ca8aeb395.zip cpython-4b964f9c904744b7d7d88054e54a2e4ca8aeb395.tar.gz cpython-4b964f9c904744b7d7d88054e54a2e4ca8aeb395.tar.bz2 |
Add the 'json' package. Code taken from simplejson 1.9 and contributed by Bob
Ippolito.
Closes issue #2750.
34 files changed, 2886 insertions, 1 deletions
diff --git a/Doc/library/json.rst b/Doc/library/json.rst new file mode 100644 index 0000000..d438a7a --- /dev/null +++ b/Doc/library/json.rst @@ -0,0 +1,431 @@ +:mod:`json` JSON encoder and decoder +==================================== + +.. module:: json + :synopsis: encode and decode the JSON format +.. moduleauthor:: Bob Ippolito <bob@redivi.com> +.. sectionauthor:: Bob Ippolito <bob@redivi.com> +.. versionadded:: 2.6 + +JSON (JavaScript Object Notation) <http://json.org> is a subset of JavaScript +syntax (ECMA-262 3rd edition) used as a lightweight data interchange format. + +:mod:`json` exposes an API familiar to uses of the standard library marshal and +pickle modules. + +Encoding basic Python object hierarchies:: + + >>> import json + >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) + '["foo", {"bar": ["baz", null, 1.0, 2]}]' + >>> print json.dumps("\"foo\bar") + "\"foo\bar" + >>> print json.dumps(u'\u1234') + "\u1234" + >>> print json.dumps('\\') + "\\" + >>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True) + {"a": 0, "b": 0, "c": 0} + >>> from StringIO import StringIO + >>> io = StringIO() + >>> json.dump(['streaming API'], io) + >>> io.getvalue() + '["streaming API"]' + +Compact encoding:: + + >>> import json + >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':')) + '[1,2,3,{"4":5,"6":7}]' + +Pretty printing:: + + >>> import json + >>> print json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4) + { + "4": 5, + "6": 7 + } + +Decoding JSON:: + + >>> import json + >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') + [u'foo', {u'bar': [u'baz', None, 1.0, 2]}] + >>> json.loads('"\\"foo\\bar"') + u'"foo\x08ar' + >>> from StringIO import StringIO + >>> io = StringIO('["streaming API"]') + >>> json.load(io) + [u'streaming API'] + +Specializing JSON object decoding:: + + >>> import json + >>> def as_complex(dct): + ... if '__complex__' in dct: + ... return complex(dct['real'], dct['imag']) + ... return dct + ... + >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}', + ... object_hook=as_complex) + (1+2j) + >>> import decimal + >>> json.loads('1.1', parse_float=decimal.Decimal) + Decimal('1.1') + +Extending JSONEncoder:: + + >>> import json + >>> class ComplexEncoder(json.JSONEncoder): + ... def default(self, obj): + ... if isinstance(obj, complex): + ... return [obj.real, obj.imag] + ... return json.JSONEncoder.default(self, obj) + ... + >>> dumps(2 + 1j, cls=ComplexEncoder) + '[2.0, 1.0]' + >>> ComplexEncoder().encode(2 + 1j) + '[2.0, 1.0]' + >>> list(ComplexEncoder().iterencode(2 + 1j)) + ['[', '2.0', ', ', '1.0', ']'] + + +.. highlight:: none + +Using json.tool from the shell to validate and pretty-print:: + + $ echo '{"json":"obj"}' | python -mjson.tool + { + "json": "obj" + } + $ echo '{ 1.2:3.4}' | python -mjson.tool + Expecting property name: line 1 column 2 (char 2) + +.. highlight:: python + +.. note:: + + Note that the JSON produced by this module's default settings is a subset of + YAML, so it may be used as a serializer for that as well. + + +Basic Usage +----------- + +.. function:: dump(obj, fp[, skipkeys[, ensure_ascii[, check_circular[, allow_nan[, cls[, indent[, separators[, encoding[, default[, **kw]]]]]]]]]]) + + Serialize *obj* as a JSON formatted stream to *fp* (a + ``.write()``-supporting file-like object). + + If *skipkeys* is ``True`` (It is ``False`` by default.), then ``dict`` keys + that are not basic types (``str``, ``unicode``, ``int``, ``long``, + ``float``, ``bool``, ``None``) will be skipped instead of raising a + :exc:`TypeError`. + + If *ensure_ascii* is ``False`` (It is ``True`` by default.), then the some + chunks written to *fp* may be ``unicode`` instances, subject to normal + Python ``str`` to ``unicode`` coercion rules. Unless ``fp.write()`` + explicitly understands ``unicode`` (as in ``codecs.getwriter()``) this is + likely to cause an error. + + If *check_circular* is ``False``, then the circular reference check for + container types will be skipped and a circular reference will result in an + :exc:`OverflowError` (or worse). + + If *allow_nan* is ``False``, then it will be a :exc:`ValueError` to + serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in + strict compliance of the JSON specification, instead of using the JavaScript + equivalents (``NaN``, ``Infinity``, ``-Infinity``). + + If *indent* is a non-negative integer, then JSON array elements and object + members will be pretty-printed with that indent level. An indent level of 0 + will only insert newlines. ``None`` is the most compact representation. + + If *separators* is an ``(item_separator, dict_separator)`` tuple then it + will be used instead of the default ``(', ', ': ')`` separators. ``(',', + ':')`` is the most compact JSON representation. + + *encoding* is the character encoding for str instances, default is UTF-8. + + *default(obj)* is a function that should return a serializable version of + obj or raise :exc:`TypeError`. The default simply raises :exc:`TypeError`. + + To use a custom :class:`JSONEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with the + *cls* kwarg. + + +.. function:: dump(obj[, skipkeys[, ensure_ascii[, check_circular[, allow_nan[, cls[, indent[, separators[, encoding[, default[, **kw]]]]]]]]]]) + + Serialize *obj* to a JSON formatted ``str``. + + If *skipkeys* is ``True`` (It is ``False`` by default.), then ``dict`` keys + that are not basic types (``str``, ``unicode``, ``int``, ``long``, + ``float``, ``bool``, ``None``) will be skipped instead of raising a + :exc:`TypeError`. + + If *ensure_ascii* is ``False``, then the return value will be a ``unicode`` + instance subject to normal Python ``str`` to ``unicode`` coercion rules + instead of being escaped to an ASCII ``str``. + + If *check_circular* is ``False``, then the circular reference check for + container types will be skipped and a circular reference will result in an + :exc:`OverflowError` (or worse). + + If *allow_nan* is ``False``, then it will be a :exc:`ValueError` to + serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in + strict compliance of the JSON specification, instead of using the JavaScript + equivalents (``NaN``, ``Infinity``, ``-Infinity``). + + If *indent* is a non-negative integer, then JSON array elements and object + members will be pretty-printed with that indent level. An indent level of 0 + will only insert newlines. ``None`` is the most compact representation. + + If *separators* is an ``(item_separator, dict_separator)`` tuple then it + will be used instead of the default ``(', ', ': ')`` separators. ``(',', + ':')`` is the most compact JSON representation. + + *encoding* is the character encoding for str instances, default is UTF-8. + + *default(obj)* is a function that should return a serializable version of + obj or raise :exc:`TypeError`. The default simply raises :exc:`TypeError`. + + To use a custom :class:`JSONEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with the + *cls* kwarg. + + +.. function loads(s[, encoding[, cls[, object_hook[, parse_float[, parse_int[, parse_constant[, **kw]]]]]]]) + + Deserialize *s* (a ``str`` or ``unicode`` instance containing a JSON + document) to a Python object. + + If *s* is a ``str`` instance and is encoded with an ASCII based encoding + other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name must be + specified. Encodings that are not ASCII based (such as UCS-2) are not allowed + and should be decoded to ``unicode`` first. + + *object_hook* is an optional function that will be called with the result of + any object literal decode (a ``dict``). The return value of ``object_hook`` + will be used instead of the ``dict``. This feature can be used to implement + custom decoders (e.g. JSON-RPC class hinting). + + *parse_float*, if specified, will be called with the string of every JSON + float to be decoded. By default, this is equivalent to + ``float(num_str)``. This can be used to use another datatype or parser for + JSON floats (e.g. decimal.Decimal). + + *parse_int*, if specified, will be called with the string of every JSON int + to be decoded. By default this is equivalent to int(num_str). This can be + used to use another datatype or parser for JSON integers (e.g. float). + + *parse_constant*, if specified, will be called with one of the following + strings: -Infinity, Infinity, NaN, null, true, false. This can be used to + raise an exception if invalid JSON numbers are encountered. + + To use a custom :class:`JSONDecoder` subclass, specify it with the ``cls`` + kwarg. Additional keyword arguments will be passed to the constructor of the + class. + + +.. function load(fp[, encoding[, cls[, object_hook[, parse_float[, parse_int[, parse_constant[, **kw]]]]]]]) + + Deserialize *fp* (a ``.read()``-supporting file-like object containing a JSON + document) to a Python object. + + If the contents of *fp* is encoded with an ASCII based encoding other than + utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must be + specified. Encodings that are not ASCII based (such as UCS-2) are not + allowed, and should be wrapped with :func:`codecs.getreader(fp)(encoding)`, + or simply decoded to a ``unicode`` object and passed to ``loads()`` + + *object_hook* is an optional function that will be called with the result of + any object literal decode (a ``dict``). The return value of *object_hook* + will be used instead of the ``dict``. This feature can be used to implement + custom decoders (e.g. JSON-RPC class hinting). + + To use a custom :class:`JSONDecoder` subclass, specify it with the ``cls`` + kwarg. Additional keyword arguments will be passed to the constructor of the + class. + + +Encoders and decoders +--------------------- + +.. class:: JSONDecoder([encoding[, object_hook[, parse_float[, parse_int[, parse_constant[, strict]]]]]]) + + Simple JSON decoder + + Performs the following translations in decoding by default: + + +---------------+-------------------+ + | JSON | Python | + +===============+===================+ + | object | dict | + +---------------+-------------------+ + | array | list | + +---------------+-------------------+ + | string | unicode | + +---------------+-------------------+ + | number (int) | int, long | + +---------------+-------------------+ + | number (real) | float | + +---------------+-------------------+ + | true | True | + +---------------+-------------------+ + | false | False | + +---------------+-------------------+ + | null | None | + +---------------+-------------------+ + + It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as their + corresponding ``float`` values, which is outside the JSON spec. + + *encoding* determines the encoding used to interpret any ``str`` objects + decoded by this instance (utf-8 by default). It has no effect when decoding + ``unicode`` objects. + + Note that currently only encodings that are a superset of ASCII work, + strings of other encodings should be passed in as ``unicode``. + + *object_hook*, if specified, will be called with the result of every JSON + object decoded and its return value will be used in place of the given + ``dict``. This can be used to provide custom deserializations (e.g. to + support JSON-RPC class hinting). + + *parse_float*, if specified, will be called with the string of every JSON + float to be decoded. By default this is equivalent to float(num_str). This + can be used to use another datatype or parser for JSON floats + (e.g. decimal.Decimal). + + *parse_int*, if specified, will be called with the string of every JSON int + to be decoded. By default this is equivalent to int(num_str). This can be + used to use another datatype or parser for JSON integers (e.g. float). + + *parse_constant*, if specified, will be called with one of the following + strings: -Infinity, Infinity, NaN, null, true, false. This can be used to + raise an exception if invalid JSON numbers are encountered. + + + .. method:: decode(s) + + Return the Python representation of *s* (a ``str`` or ``unicode`` instance + containing a JSON document) + + .. method:: raw_decode(s) + + Decode a JSON document from *s* (a ``str`` or ``unicode`` beginning with a + JSON document) and return a 2-tuple of the Python representation and the + index in *s* where the document ended. + + This can be used to decode a JSON document from a string that may have + extraneous data at the end. + + +.. class:: JSONEncoder([skipkeys[, ensure_ascii[, check_circular[, allow_nan[, sort_keys[, indent[, separators[, encoding[, default]]]]]]]]]) + + Extensible JSON <http://json.org> encoder for Python data structures. + + Supports the following objects and types by default: + + +-------------------+---------------+ + | Python | JSON | + +===================+===============+ + | dict | object | + +-------------------+---------------+ + | list, tuple | array | + +-------------------+---------------+ + | str, unicode | string | + +-------------------+---------------+ + | int, long, float | number | + +-------------------+---------------+ + | True | true | + +-------------------+---------------+ + | False | false | + +-------------------+---------------+ + | None | null | + +-------------------+---------------+ + + To extend this to recognize other objects, subclass and implement a + ``.default()`` method with another method that returns a serializable object + for ``o`` if possible, otherwise it should call the superclass implementation + (to raise :exc:`TypeError`). + + If *skipkeys* is ``False`` (the default), then it is a :exc:`TypeError` to + attempt encoding of keys that are not str, int, long, float or None. If + *skipkeys* is ``True``, such items are simply skipped. + + If *ensure_ascii* is ``True``, the output is guaranteed to be ``str`` objects + with all incoming unicode characters escaped. If *ensure_ascii* is + ``False``, the output will be unicode object. + + If *check_circular* is ``True`` (the default), then lists, dicts, and custom + encoded objects will be checked for circular references during encoding to + prevent an infinite recursion (which would cause an :exc:`OverflowError`). + Otherwise, no such check takes place. + + If *allow_nan* is ``True`` (the default), then ``NaN``, ``Infinity``, and ``-Infinity`` + will be encoded as such. This behavior is not JSON specification compliant, + but is consistent with most JavaScript based encoders and decoders. + Otherwise, it will be a :exc:`ValueError` to encode such floats. + + If *sort_keys* is ``True`` (the default), then the output of dictionaries + will be sorted by key; this is useful for regression tests to ensure that + JSON serializations can be compared on a day-to-day basis. + + If *indent* is a non-negative integer (It is ``None`` by default.), then JSON + array elements and object members will be pretty-printed with that indent + level. An indent level of 0 will only insert newlines. ``None`` is the most + compact representation. + + If specified, *separators* should be a (item_separator, key_separator) tuple. + The default is ``(', ', ': ')``. To get the most compact JSON + representation, you should specify ``(',', ':')`` to eliminate whitespace. + + If specified, *default* is a function that gets called for objects that can't + otherwise be serialized. It should return a JSON encodable version of the + object or raise a :exc:`TypeError`. + + If *encoding* is not ``None``, then all input strings will be transformed + into unicode using that encoding prior to JSON-encoding. The default is + UTF-8. + + + .. method:: default(o) + + Implement this method in a subclass such that it returns a serializable + object for *o*, or calls the base implementation (to raise a + :exc:`TypeError`). + + For example, to support arbitrary iterators, you could implement default + like this:: + + def default(self, o): + try: + iterable = iter(o) + except TypeError: + pass + else: + return list(iterable) + return JSONEncoder.default(self, o) + + + .. method:: encode(o) + + Return a JSON string representation of a Python data structure, *o*. For + example:: + + >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) + '{"foo": ["bar", "baz"]}' + + + .. method:: iterencode(o) + + Encode the given object, *o*, and yield each string representation as + available. + + For example:: + + for chunk in JSONEncoder().iterencode(bigobject): + mysocket.write(chunk) diff --git a/Doc/library/netdata.rst b/Doc/library/netdata.rst index 1bf9b6d..9d4aa34 100644 --- a/Doc/library/netdata.rst +++ b/Doc/library/netdata.rst @@ -12,6 +12,7 @@ on the Internet. .. toctree:: email.rst + json.rst mailcap.rst mailbox.rst mhlib.rst diff --git a/Doc/whatsnew/2.6.rst b/Doc/whatsnew/2.6.rst index 03bd432..17493e7 100644 --- a/Doc/whatsnew/2.6.rst +++ b/Doc/whatsnew/2.6.rst @@ -1210,6 +1210,7 @@ Mullender that was in Python's :file:`Demo/classes/` directory for a long time. This implementation was significantly updated by Jeffrey Yasskin. + Other Language Changes ====================== @@ -2146,6 +2147,31 @@ complete list of changes, or look through the CVS logs for all the details. .. ====================================================================== .. whole new modules get described in subsections here +The :mod:`json` module +---------------------- + +The new :mod:`json` module supports the encoding and decoding of Python types in +JSON (Javascript Object Notation). JSON is a lightweight interchange format +often used in web applications. For more information about JSON, see +http://www.json.org. + +:mod:`json` comes with support for decoding and encoding most builtin Python +types. The following example encodes and decodes a dictionary:: + + >>> import json + >>> data = {"spam" : "foo", "parrot" : 42} + >>> in_json = json.dumps(data) # Encode the data + >>> in_json + '{"parrot": 42, "spam": "foo"}' + >>> json.loads(in_json) # Decode into a Python object + {"spam" : "foo", "parrot" : 42} + +It is also possible to write your own decoders and encoders to support more +types. Pretty-printing of the JSON strings is also supported. + +:mod:`json` (originally called simplejson) was written by Bob Ippolito. + + Improved SSL Support -------------------------------------------------- diff --git a/Lib/json/__init__.py b/Lib/json/__init__.py new file mode 100644 index 0000000..56116f4 --- /dev/null +++ b/Lib/json/__init__.py @@ -0,0 +1,318 @@ +r"""A simple, fast, extensible JSON encoder and decoder + +JSON (JavaScript Object Notation) <http://json.org> is a subset of +JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data +interchange format. + +json exposes an API familiar to uses of the standard library +marshal and pickle modules. + +Encoding basic Python object hierarchies:: + + >>> import json + >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) + '["foo", {"bar": ["baz", null, 1.0, 2]}]' + >>> print json.dumps("\"foo\bar") + "\"foo\bar" + >>> print json.dumps(u'\u1234') + "\u1234" + >>> print json.dumps('\\') + "\\" + >>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True) + {"a": 0, "b": 0, "c": 0} + >>> from StringIO import StringIO + >>> io = StringIO() + >>> json.dump(['streaming API'], io) + >>> io.getvalue() + '["streaming API"]' + +Compact encoding:: + + >>> import json + >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':')) + '[1,2,3,{"4":5,"6":7}]' + +Pretty printing (using repr() because of extraneous whitespace in the output):: + + >>> import json + >>> print repr(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)) + '{\n "4": 5, \n "6": 7\n}' + +Decoding JSON:: + + >>> import json + >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') + [u'foo', {u'bar': [u'baz', None, 1.0, 2]}] + >>> json.loads('"\\"foo\\bar"') + u'"foo\x08ar' + >>> from StringIO import StringIO + >>> io = StringIO('["streaming API"]') + >>> json.load(io) + [u'streaming API'] + +Specializing JSON object decoding:: + + >>> import json + >>> def as_complex(dct): + ... if '__complex__' in dct: + ... return complex(dct['real'], dct['imag']) + ... return dct + ... + >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}', + ... object_hook=as_complex) + (1+2j) + >>> import decimal + >>> json.loads('1.1', parse_float=decimal.Decimal) + Decimal('1.1') + +Extending JSONEncoder:: + + >>> import json + >>> class ComplexEncoder(json.JSONEncoder): + ... def default(self, obj): + ... if isinstance(obj, complex): + ... return [obj.real, obj.imag] + ... return json.JSONEncoder.default(self, obj) + ... + >>> dumps(2 + 1j, cls=ComplexEncoder) + '[2.0, 1.0]' + >>> ComplexEncoder().encode(2 + 1j) + '[2.0, 1.0]' + >>> list(ComplexEncoder().iterencode(2 + 1j)) + ['[', '2.0', ', ', '1.0', ']'] + + +Using json.tool from the shell to validate and +pretty-print:: + + $ echo '{"json":"obj"}' | python -mjson.tool + { + "json": "obj" + } + $ echo '{ 1.2:3.4}' | python -mjson.tool + Expecting property name: line 1 column 2 (char 2) + +Note that the JSON produced by this module's default settings +is a subset of YAML, so it may be used as a serializer for that as well. + +""" + +__version__ = '1.9' +__all__ = [ + 'dump', 'dumps', 'load', 'loads', + 'JSONDecoder', 'JSONEncoder', +] + +__author__ = 'Bob Ippolito <bob@redivi.com>' + +from .decoder import JSONDecoder +from .encoder import JSONEncoder + +_default_encoder = JSONEncoder( + skipkeys=False, + ensure_ascii=True, + check_circular=True, + allow_nan=True, + indent=None, + separators=None, + encoding='utf-8', + default=None, +) + +def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, + allow_nan=True, cls=None, indent=None, separators=None, + encoding='utf-8', default=None, **kw): + """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a + ``.write()``-supporting file-like object). + + If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types + (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) + will be skipped instead of raising a ``TypeError``. + + If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp`` + may be ``unicode`` instances, subject to normal Python ``str`` to + ``unicode`` coercion rules. Unless ``fp.write()`` explicitly + understands ``unicode`` (as in ``codecs.getwriter()``) this is likely + to cause an error. + + If ``check_circular`` is ``False``, then the circular reference check + for container types will be skipped and a circular reference will + result in an ``OverflowError`` (or worse). + + If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to + serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) + in strict compliance of the JSON specification, instead of using the + JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). + + If ``indent`` is a non-negative integer, then JSON array elements and object + members will be pretty-printed with that indent level. An indent level + of 0 will only insert newlines. ``None`` is the most compact representation. + + If ``separators`` is an ``(item_separator, dict_separator)`` tuple + then it will be used instead of the default ``(', ', ': ')`` separators. + ``(',', ':')`` is the most compact JSON representation. + + ``encoding`` is the character encoding for str instances, default is UTF-8. + + ``default(obj)`` is a function that should return a serializable version + of obj or raise TypeError. The default simply raises TypeError. + + To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with + the ``cls`` kwarg. + + """ + # cached encoder + if (skipkeys is False and ensure_ascii is True and + check_circular is True and allow_nan is True and + cls is None and indent is None and separators is None and + encoding == 'utf-8' and default is None and not kw): + iterable = _default_encoder.iterencode(obj) + else: + if cls is None: + cls = JSONEncoder + iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, + check_circular=check_circular, allow_nan=allow_nan, indent=indent, + separators=separators, encoding=encoding, + default=default, **kw).iterencode(obj) + # could accelerate with writelines in some versions of Python, at + # a debuggability cost + for chunk in iterable: + fp.write(chunk) + + +def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, + allow_nan=True, cls=None, indent=None, separators=None, + encoding='utf-8', default=None, **kw): + """Serialize ``obj`` to a JSON formatted ``str``. + + If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types + (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) + will be skipped instead of raising a ``TypeError``. + + If ``ensure_ascii`` is ``False``, then the return value will be a + ``unicode`` instance subject to normal Python ``str`` to ``unicode`` + coercion rules instead of being escaped to an ASCII ``str``. + + If ``check_circular`` is ``False``, then the circular reference check + for container types will be skipped and a circular reference will + result in an ``OverflowError`` (or worse). + + If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to + serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in + strict compliance of the JSON specification, instead of using the + JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). + + If ``indent`` is a non-negative integer, then JSON array elements and + object members will be pretty-printed with that indent level. An indent + level of 0 will only insert newlines. ``None`` is the most compact + representation. + + If ``separators`` is an ``(item_separator, dict_separator)`` tuple + then it will be used instead of the default ``(', ', ': ')`` separators. + ``(',', ':')`` is the most compact JSON representation. + + ``encoding`` is the character encoding for str instances, default is UTF-8. + + ``default(obj)`` is a function that should return a serializable version + of obj or raise TypeError. The default simply raises TypeError. + + To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with + the ``cls`` kwarg. + + """ + # cached encoder + if (skipkeys is False and ensure_ascii is True and + check_circular is True and allow_nan is True and + cls is None and indent is None and separators is None and + encoding == 'utf-8' and default is None and not kw): + return _default_encoder.encode(obj) + if cls is None: + cls = JSONEncoder + return cls( + skipkeys=skipkeys, ensure_ascii=ensure_ascii, + check_circular=check_circular, allow_nan=allow_nan, indent=indent, + separators=separators, encoding=encoding, default=default, + **kw).encode(obj) + + +_default_decoder = JSONDecoder(encoding=None, object_hook=None) + + +def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, + parse_int=None, parse_constant=None, **kw): + """Deserialize ``fp`` (a ``.read()``-supporting file-like object + containing a JSON document) to a Python object. + + If the contents of ``fp`` is encoded with an ASCII based encoding other + than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must + be specified. Encodings that are not ASCII based (such as UCS-2) are + not allowed, and should be wrapped with + ``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode`` + object and passed to ``loads()`` + + ``object_hook`` is an optional function that will be called with the + result of any object literal decode (a ``dict``). The return value of + ``object_hook`` will be used instead of the ``dict``. This feature + can be used to implement custom decoders (e.g. JSON-RPC class hinting). + + To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` + kwarg. + + """ + return loads(fp.read(), + encoding=encoding, cls=cls, object_hook=object_hook, + parse_float=parse_float, parse_int=parse_int, + parse_constant=parse_constant, **kw) + + +def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None, + parse_int=None, parse_constant=None, **kw): + """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON + document) to a Python object. + + If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding + other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name + must be specified. Encodings that are not ASCII based (such as UCS-2) + are not allowed and should be decoded to ``unicode`` first. + + ``object_hook`` is an optional function that will be called with the + result of any object literal decode (a ``dict``). The return value of + ``object_hook`` will be used instead of the ``dict``. This feature + can be used to implement custom decoders (e.g. JSON-RPC class hinting). + + ``parse_float``, if specified, will be called with the string + of every JSON float to be decoded. By default this is equivalent to + float(num_str). This can be used to use another datatype or parser + for JSON floats (e.g. decimal.Decimal). + + ``parse_int``, if specified, will be called with the string + of every JSON int to be decoded. By default this is equivalent to + int(num_str). This can be used to use another datatype or parser + for JSON integers (e.g. float). + + ``parse_constant``, if specified, will be called with one of the + following strings: -Infinity, Infinity, NaN, null, true, false. + This can be used to raise an exception if invalid JSON numbers + are encountered. + + To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` + kwarg. + + """ + if (cls is None and encoding is None and object_hook is None and + parse_int is None and parse_float is None and + parse_constant is None and not kw): + return _default_decoder.decode(s) + if cls is None: + cls = JSONDecoder + if object_hook is not None: + kw['object_hook'] = object_hook + if parse_float is not None: + kw['parse_float'] = parse_float + if parse_int is not None: + kw['parse_int'] = parse_int + if parse_constant is not None: + kw['parse_constant'] = parse_constant + return cls(encoding=encoding, **kw).decode(s) diff --git a/Lib/json/decoder.py b/Lib/json/decoder.py new file mode 100644 index 0000000..acaee25 --- /dev/null +++ b/Lib/json/decoder.py @@ -0,0 +1,349 @@ +"""Implementation of JSONDecoder +""" + +import re +import sys + +from json.scanner import Scanner, pattern +try: + from _json import scanstring as c_scanstring +except ImportError: + c_scanstring = None + +__all__ = ['JSONDecoder'] + +FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL + + +def _floatconstants(): + import struct + import sys + _BYTES = '7FF80000000000007FF0000000000000'.decode('hex') + if sys.byteorder != 'big': + _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1] + nan, inf = struct.unpack('dd', _BYTES) + return nan, inf, -inf + +NaN, PosInf, NegInf = _floatconstants() + + +def linecol(doc, pos): + lineno = doc.count('\n', 0, pos) + 1 + if lineno == 1: + colno = pos + else: + colno = pos - doc.rindex('\n', 0, pos) + return lineno, colno + + +def errmsg(msg, doc, pos, end=None): + lineno, colno = linecol(doc, pos) + if end is None: + fmt = '{0}: line {1} column {2} (char {3})' + return fmt.format(msg, lineno, colno, pos) + endlineno, endcolno = linecol(doc, end) + fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})' + return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end) + + +_CONSTANTS = { + '-Infinity': NegInf, + 'Infinity': PosInf, + 'NaN': NaN, + 'true': True, + 'false': False, + 'null': None, +} + + +def JSONConstant(match, context, c=_CONSTANTS): + s = match.group(0) + fn = getattr(context, 'parse_constant', None) + if fn is None: + rval = c[s] + else: + rval = fn(s) + return rval, None +pattern('(-?Infinity|NaN|true|false|null)')(JSONConstant) + + +def JSONNumber(match, context): + match = JSONNumber.regex.match(match.string, *match.span()) + integer, frac, exp = match.groups() + if frac or exp: + fn = getattr(context, 'parse_float', None) or float + res = fn(integer + (frac or '') + (exp or '')) + else: + fn = getattr(context, 'parse_int', None) or int + res = fn(integer) + return res, None +pattern(r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?')(JSONNumber) + + +STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS) +BACKSLASH = { + '"': u'"', '\\': u'\\', '/': u'/', + 'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t', +} + +DEFAULT_ENCODING = "utf-8" + + +def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match): + if encoding is None: + encoding = DEFAULT_ENCODING + chunks = [] + _append = chunks.append + begin = end - 1 + while 1: + chunk = _m(s, end) + if chunk is None: + raise ValueError( + errmsg("Unterminated string starting at", s, begin)) + end = chunk.end() + content, terminator = chunk.groups() + if content: + if not isinstance(content, unicode): + content = unicode(content, encoding) + _append(content) + if terminator == '"': + break + elif terminator != '\\': + if strict: + msg = "Invalid control character {0!r} at".format(terminator) + raise ValueError(errmsg(msg, s, end)) + else: + _append(terminator) + continue + try: + esc = s[end] + except IndexError: + raise ValueError( + errmsg("Unterminated string starting at", s, begin)) + if esc != 'u': + try: + m = _b[esc] + except KeyError: + msg = "Invalid \\escape: {0!r}".format(esc) + raise ValueError(errmsg(msg, s, end)) + end += 1 + else: + esc = s[end + 1:end + 5] + next_end = end + 5 + msg = "Invalid \\uXXXX escape" + try: + if len(esc) != 4: + raise ValueError + uni = int(esc, 16) + if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535: + msg = "Invalid \\uXXXX\\uXXXX surrogate pair" + if not s[end + 5:end + 7] == '\\u': + raise ValueError + esc2 = s[end + 7:end + 11] + if len(esc2) != 4: + raise ValueError + uni2 = int(esc2, 16) + uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00)) + next_end += 6 + m = unichr(uni) + except ValueError: + raise ValueError(errmsg(msg, s, end)) + end = next_end + _append(m) + return u''.join(chunks), end + + +# Use speedup +if c_scanstring is not None: + scanstring = c_scanstring +else: + scanstring = py_scanstring + +def JSONString(match, context): + encoding = getattr(context, 'encoding', None) + strict = getattr(context, 'strict', True) + return scanstring(match.string, match.end(), encoding, strict) +pattern(r'"')(JSONString) + + +WHITESPACE = re.compile(r'\s*', FLAGS) + + +def JSONObject(match, context, _w=WHITESPACE.match): + pairs = {} + s = match.string + end = _w(s, match.end()).end() + nextchar = s[end:end + 1] + # Trivial empty object + if nextchar == '}': + return pairs, end + 1 + if nextchar != '"': + raise ValueError(errmsg("Expecting property name", s, end)) + end += 1 + encoding = getattr(context, 'encoding', None) + strict = getattr(context, 'strict', True) + iterscan = JSONScanner.iterscan + while True: + key, end = scanstring(s, end, encoding, strict) + end = _w(s, end).end() + if s[end:end + 1] != ':': + raise ValueError(errmsg("Expecting : delimiter", s, end)) + end = _w(s, end + 1).end() + try: + value, end = iterscan(s, idx=end, context=context).next() + except StopIteration: + raise ValueError(errmsg("Expecting object", s, end)) + pairs[key] = value + end = _w(s, end).end() + nextchar = s[end:end + 1] + end += 1 + if nextchar == '}': + break + if nextchar != ',': + raise ValueError(errmsg("Expecting , delimiter", s, end - 1)) + end = _w(s, end).end() + nextchar = s[end:end + 1] + end += 1 + if nextchar != '"': + raise ValueError(errmsg("Expecting property name", s, end - 1)) + object_hook = getattr(context, 'object_hook', None) + if object_hook is not None: + pairs = object_hook(pairs) + return pairs, end +pattern(r'{')(JSONObject) + + +def JSONArray(match, context, _w=WHITESPACE.match): + values = [] + s = match.string + end = _w(s, match.end()).end() + # Look-ahead for trivial empty array + nextchar = s[end:end + 1] + if nextchar == ']': + return values, end + 1 + iterscan = JSONScanner.iterscan + while True: + try: + value, end = iterscan(s, idx=end, context=context).next() + except StopIteration: + raise ValueError(errmsg("Expecting object", s, end)) + values.append(value) + end = _w(s, end).end() + nextchar = s[end:end + 1] + end += 1 + if nextchar == ']': + break + if nextchar != ',': + raise ValueError(errmsg("Expecting , delimiter", s, end)) + end = _w(s, end).end() + return values, end +pattern(r'\[')(JSONArray) + + +ANYTHING = [ + JSONObject, + JSONArray, + JSONString, + JSONConstant, + JSONNumber, +] + +JSONScanner = Scanner(ANYTHING) + + +class JSONDecoder(object): + """Simple JSON <http://json.org> decoder + + Performs the following translations in decoding by default: + + +---------------+-------------------+ + | JSON | Python | + +===============+===================+ + | object | dict | + +---------------+-------------------+ + | array | list | + +---------------+-------------------+ + | string | unicode | + +---------------+-------------------+ + | number (int) | int, long | + +---------------+-------------------+ + | number (real) | float | + +---------------+-------------------+ + | true | True | + +---------------+-------------------+ + | false | False | + +---------------+-------------------+ + | null | None | + +---------------+-------------------+ + + It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as + their corresponding ``float`` values, which is outside the JSON spec. + """ + + _scanner = Scanner(ANYTHING) + __all__ = ['__init__', 'decode', 'raw_decode'] + + def __init__(self, encoding=None, object_hook=None, parse_float=None, + parse_int=None, parse_constant=None, strict=True): + """``encoding`` determines the encoding used to interpret any ``str`` + objects decoded by this instance (utf-8 by default). It has no + effect when decoding ``unicode`` objects. + + Note that currently only encodings that are a superset of ASCII work, + strings of other encodings should be passed in as ``unicode``. + + ``object_hook``, if specified, will be called with the result of + every JSON object decoded and its return value will be used in + place of the given ``dict``. This can be used to provide custom + deserializations (e.g. to support JSON-RPC class hinting). + + ``parse_float``, if specified, will be called with the string + of every JSON float to be decoded. By default this is equivalent to + float(num_str). This can be used to use another datatype or parser + for JSON floats (e.g. decimal.Decimal). + + ``parse_int``, if specified, will be called with the string + of every JSON int to be decoded. By default this is equivalent to + int(num_str). This can be used to use another datatype or parser + for JSON integers (e.g. float). + + ``parse_constant``, if specified, will be called with one of the + following strings: -Infinity, Infinity, NaN, null, true, false. + This can be used to raise an exception if invalid JSON numbers + are encountered. + + """ + self.encoding = encoding + self.object_hook = object_hook + self.parse_float = parse_float + self.parse_int = parse_int + self.parse_constant = parse_constant + self.strict = strict + + def decode(self, s, _w=WHITESPACE.match): + """ + Return the Python representation of ``s`` (a ``str`` or ``unicode`` + instance containing a JSON document) + + """ + obj, end = self.raw_decode(s, idx=_w(s, 0).end()) + end = _w(s, end).end() + if end != len(s): + raise ValueError(errmsg("Extra data", s, end, len(s))) + return obj + + def raw_decode(self, s, **kw): + """Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning + with a JSON document) and return a 2-tuple of the Python + representation and the index in ``s`` where the document ended. + + This can be used to decode a JSON document from a string that may + have extraneous data at the end. + + """ + kw.setdefault('context', self) + try: + obj, end = self._scanner.iterscan(s, **kw).next() + except StopIteration: + raise ValueError("No JSON object could be decoded") + return obj, end diff --git a/Lib/json/encoder.py b/Lib/json/encoder.py new file mode 100644 index 0000000..d8d4770 --- /dev/null +++ b/Lib/json/encoder.py @@ -0,0 +1,384 @@ +"""Implementation of JSONEncoder +""" + +import re + +try: + from _json import encode_basestring_ascii as c_encode_basestring_ascii +except ImportError: + c_encode_basestring_ascii = None + +__all__ = ['JSONEncoder'] + +ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') +ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') +HAS_UTF8 = re.compile(r'[\x80-\xff]') +ESCAPE_DCT = { + '\\': '\\\\', + '"': '\\"', + '\b': '\\b', + '\f': '\\f', + '\n': '\\n', + '\r': '\\r', + '\t': '\\t', +} +for i in range(0x20): + ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) + +# Assume this produces an infinity on all machines (probably not guaranteed) +INFINITY = float('1e66666') +FLOAT_REPR = repr + +def floatstr(o, allow_nan=True): + # Check for specials. Note that this type of test is processor- and/or + # platform-specific, so do tests which don't depend on the internals. + + if o != o: + text = 'NaN' + elif o == INFINITY: + text = 'Infinity' + elif o == -INFINITY: + text = '-Infinity' + else: + return FLOAT_REPR(o) + + if not allow_nan: + msg = "Out of range float values are not JSON compliant: " + repr(o) + raise ValueError(msg) + + return text + + +def encode_basestring(s): + """Return a JSON representation of a Python string + + """ + def replace(match): + return ESCAPE_DCT[match.group(0)] + return '"' + ESCAPE.sub(replace, s) + '"' + + +def py_encode_basestring_ascii(s): + if isinstance(s, str) and HAS_UTF8.search(s) is not None: + s = s.decode('utf-8') + def replace(match): + s = match.group(0) + try: + return ESCAPE_DCT[s] + except KeyError: + n = ord(s) + if n < 0x10000: + return '\\u{0:04x}'.format(n) + else: + # surrogate pair + n -= 0x10000 + s1 = 0xd800 | ((n >> 10) & 0x3ff) + s2 = 0xdc00 | (n & 0x3ff) + return '\\u{0:04x}\\u{1:04x}'.format(s1, s2) + return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' + + +if c_encode_basestring_ascii is not None: + encode_basestring_ascii = c_encode_basestring_ascii +else: + encode_basestring_ascii = py_encode_basestring_ascii + + +class JSONEncoder(object): + """Extensible JSON <http://json.org> encoder for Python data structures. + + Supports the following objects and types by default: + + +-------------------+---------------+ + | Python | JSON | + +===================+===============+ + | dict | object | + +-------------------+---------------+ + | list, tuple | array | + +-------------------+---------------+ + | str, unicode | string | + +-------------------+---------------+ + | int, long, float | number | + +-------------------+---------------+ + | True | true | + +-------------------+---------------+ + | False | false | + +-------------------+---------------+ + | None | null | + +-------------------+---------------+ + + To extend this to recognize other objects, subclass and implement a + ``.default()`` method with another method that returns a serializable + object for ``o`` if possible, otherwise it should call the superclass + implementation (to raise ``TypeError``). + + """ + __all__ = ['__init__', 'default', 'encode', 'iterencode'] + item_separator = ', ' + key_separator = ': ' + def __init__(self, skipkeys=False, ensure_ascii=True, + check_circular=True, allow_nan=True, sort_keys=False, + indent=None, separators=None, encoding='utf-8', default=None): + """Constructor for JSONEncoder, with sensible defaults. + + If skipkeys is False, then it is a TypeError to attempt + encoding of keys that are not str, int, long, float or None. If + skipkeys is True, such items are simply skipped. + + If ensure_ascii is True, the output is guaranteed to be str + objects with all incoming unicode characters escaped. If + ensure_ascii is false, the output will be unicode object. + + If check_circular is True, then lists, dicts, and custom encoded + objects will be checked for circular references during encoding to + prevent an infinite recursion (which would cause an OverflowError). + Otherwise, no such check takes place. + + If allow_nan is True, then NaN, Infinity, and -Infinity will be + encoded as such. This behavior is not JSON specification compliant, + but is consistent with most JavaScript based encoders and decoders. + Otherwise, it will be a ValueError to encode such floats. + + If sort_keys is True, then the output of dictionaries will be + sorted by key; this is useful for regression tests to ensure + that JSON serializations can be compared on a day-to-day basis. + + If indent is a non-negative integer, then JSON array + elements and object members will be pretty-printed with that + indent level. An indent level of 0 will only insert newlines. + None is the most compact representation. + + If specified, separators should be a (item_separator, key_separator) + tuple. The default is (', ', ': '). To get the most compact JSON + representation you should specify (',', ':') to eliminate whitespace. + + If specified, default is a function that gets called for objects + that can't otherwise be serialized. It should return a JSON encodable + version of the object or raise a ``TypeError``. + + If encoding is not None, then all input strings will be + transformed into unicode using that encoding prior to JSON-encoding. + The default is UTF-8. + + """ + self.skipkeys = skipkeys + self.ensure_ascii = ensure_ascii + self.check_circular = check_circular + self.allow_nan = allow_nan + self.sort_keys = sort_keys + self.indent = indent + self.current_indent_level = 0 + if separators is not None: + self.item_separator, self.key_separator = separators + if default is not None: + self.default = default + self.encoding = encoding + + def _newline_indent(self): + return '\n' + (' ' * (self.indent * self.current_indent_level)) + + def _iterencode_list(self, lst, markers=None): + if not lst: + yield '[]' + return + if markers is not None: + markerid = id(lst) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = lst + yield '[' + if self.indent is not None: + self.current_indent_level += 1 + newline_indent = self._newline_indent() + separator = self.item_separator + newline_indent + yield newline_indent + else: + newline_indent = None + separator = self.item_separator + first = True + for value in lst: + if first: + first = False + else: + yield separator + for chunk in self._iterencode(value, markers): + yield chunk + if newline_indent is not None: + self.current_indent_level -= 1 + yield self._newline_indent() + yield ']' + if markers is not None: + del markers[markerid] + + def _iterencode_dict(self, dct, markers=None): + if not dct: + yield '{}' + return + if markers is not None: + markerid = id(dct) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = dct + yield '{' + key_separator = self.key_separator + if self.indent is not None: + self.current_indent_level += 1 + newline_indent = self._newline_indent() + item_separator = self.item_separator + newline_indent + yield newline_indent + else: + newline_indent = None + item_separator = self.item_separator + first = True + if self.ensure_ascii: + encoder = encode_basestring_ascii + else: + encoder = encode_basestring + allow_nan = self.allow_nan + if self.sort_keys: + keys = dct.keys() + keys.sort() + items = [(k, dct[k]) for k in keys] + else: + items = dct.iteritems() + _encoding = self.encoding + _do_decode = (_encoding is not None + and not (_encoding == 'utf-8')) + for key, value in items: + if isinstance(key, str): + if _do_decode: + key = key.decode(_encoding) + elif isinstance(key, basestring): + pass + # JavaScript is weakly typed for these, so it makes sense to + # also allow them. Many encoders seem to do something like this. + elif isinstance(key, float): + key = floatstr(key, allow_nan) + elif isinstance(key, (int, long)): + key = str(key) + elif key is True: + key = 'true' + elif key is False: + key = 'false' + elif key is None: + key = 'null' + elif self.skipkeys: + continue + else: + raise TypeError("key {0!r} is not a string".format(key)) + if first: + first = False + else: + yield item_separator + yield encoder(key) + yield key_separator + for chunk in self._iterencode(value, markers): + yield chunk + if newline_indent is not None: + self.current_indent_level -= 1 + yield self._newline_indent() + yield '}' + if markers is not None: + del markers[markerid] + + def _iterencode(self, o, markers=None): + if isinstance(o, basestring): + if self.ensure_ascii: + encoder = encode_basestring_ascii + else: + encoder = encode_basestring + _encoding = self.encoding + if (_encoding is not None and isinstance(o, str) + and not (_encoding == 'utf-8')): + o = o.decode(_encoding) + yield encoder(o) + elif o is None: + yield 'null' + elif o is True: + yield 'true' + elif o is False: + yield 'false' + elif isinstance(o, (int, long)): + yield str(o) + elif isinstance(o, float): + yield floatstr(o, self.allow_nan) + elif isinstance(o, (list, tuple)): + for chunk in self._iterencode_list(o, markers): + yield chunk + elif isinstance(o, dict): + for chunk in self._iterencode_dict(o, markers): + yield chunk + else: + if markers is not None: + markerid = id(o) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = o + for chunk in self._iterencode_default(o, markers): + yield chunk + if markers is not None: + del markers[markerid] + + def _iterencode_default(self, o, markers=None): + newobj = self.default(o) + return self._iterencode(newobj, markers) + + def default(self, o): + """Implement this method in a subclass such that it returns a serializable + object for ``o``, or calls the base implementation (to raise a + ``TypeError``). + + For example, to support arbitrary iterators, you could implement + default like this:: + + def default(self, o): + try: + iterable = iter(o) + except TypeError: + pass + else: + return list(iterable) + return JSONEncoder.default(self, o) + + """ + raise TypeError(repr(o) + " is not JSON serializable") + + def encode(self, o): + """Return a JSON string representation of a Python data structure. + + >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) + '{"foo": ["bar", "baz"]}' + + """ + # This is for extremely simple cases and benchmarks. + if isinstance(o, basestring): + if isinstance(o, str): + _encoding = self.encoding + if (_encoding is not None + and not (_encoding == 'utf-8')): + o = o.decode(_encoding) + if self.ensure_ascii: + return encode_basestring_ascii(o) + else: + return encode_basestring(o) + # This doesn't pass the iterator directly to ''.join() because the + # exceptions aren't as detailed. The list call should be roughly + # equivalent to the PySequence_Fast that ''.join() would do. + chunks = list(self.iterencode(o)) + return ''.join(chunks) + + def iterencode(self, o): + """Encode the given object and yield each string representation as + available. + + For example:: + + for chunk in JSONEncoder().iterencode(bigobject): + mysocket.write(chunk) + + """ + if self.check_circular: + markers = {} + else: + markers = None + return self._iterencode(o, markers) diff --git a/Lib/json/scanner.py b/Lib/json/scanner.py new file mode 100644 index 0000000..4b065ab --- /dev/null +++ b/Lib/json/scanner.py @@ -0,0 +1,69 @@ +"""Iterator based sre token scanner + +""" + +import re +import sre_parse +import sre_compile +import sre_constants + +from re import VERBOSE, MULTILINE, DOTALL +from sre_constants import BRANCH, SUBPATTERN + +__all__ = ['Scanner', 'pattern'] + +FLAGS = (VERBOSE | MULTILINE | DOTALL) + +class Scanner(object): + def __init__(self, lexicon, flags=FLAGS): + self.actions = [None] + # Combine phrases into a compound pattern + s = sre_parse.Pattern() + s.flags = flags + p = [] + for idx, token in enumerate(lexicon): + phrase = token.pattern + try: + subpattern = sre_parse.SubPattern(s, + [(SUBPATTERN, (idx + 1, sre_parse.parse(phrase, flags)))]) + except sre_constants.error: + raise + p.append(subpattern) + self.actions.append(token) + + s.groups = len(p) + 1 # NOTE(guido): Added to make SRE validation work + p = sre_parse.SubPattern(s, [(BRANCH, (None, p))]) + self.scanner = sre_compile.compile(p) + + def iterscan(self, string, idx=0, context=None): + """Yield match, end_idx for each match + + """ + match = self.scanner.scanner(string, idx).match + actions = self.actions + lastend = idx + end = len(string) + while True: + m = match() + if m is None: + break + matchbegin, matchend = m.span() + if lastend == matchend: + break + action = actions[m.lastindex] + if action is not None: + rval, next_pos = action(m, context) + if next_pos is not None and next_pos != matchend: + # "fast forward" the scanner + matchend = next_pos + match = self.scanner.scanner(string, matchend).match + yield rval, matchend + lastend = matchend + + +def pattern(pattern, flags=FLAGS): + def decorator(fn): + fn.pattern = pattern + fn.regex = re.compile(pattern, flags) + return fn + return decorator diff --git a/Lib/json/tests/__init__.py b/Lib/json/tests/__init__.py new file mode 100644 index 0000000..a0797ea --- /dev/null +++ b/Lib/json/tests/__init__.py @@ -0,0 +1,37 @@ +import os +import sys +import unittest +import doctest + +here = os.path.dirname(__file__) + +def test_suite(): + suite = additional_tests() + loader = unittest.TestLoader() + for fn in os.listdir(here): + if fn.startswith("test") and fn.endswith(".py"): + modname = "json.tests." + fn[:-3] + __import__(modname) + module = sys.modules[modname] + suite.addTests(loader.loadTestsFromModule(module)) + return suite + +def additional_tests(): + import json + import json.encoder + import json.decoder + suite = unittest.TestSuite() + for mod in (json, json.encoder, json.decoder): + suite.addTest(doctest.DocTestSuite(mod)) + return suite + +def main(): + suite = test_suite() + runner = unittest.TextTestRunner() + runner.run(suite) + +if __name__ == '__main__': + import os + import sys + sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) + main() diff --git a/Lib/json/tests/test_decode.py b/Lib/json/tests/test_decode.py new file mode 100644 index 0000000..609f622 --- /dev/null +++ b/Lib/json/tests/test_decode.py @@ -0,0 +1,15 @@ +import decimal +from unittest import TestCase + +import json + +class TestDecode(TestCase): + def test_decimal(self): + rval = json.loads('1.1', parse_float=decimal.Decimal) + self.assert_(isinstance(rval, decimal.Decimal)) + self.assertEquals(rval, decimal.Decimal('1.1')) + + def test_float(self): + rval = json.loads('1', parse_int=float) + self.assert_(isinstance(rval, float)) + self.assertEquals(rval, 1.0) diff --git a/Lib/json/tests/test_default.py b/Lib/json/tests/test_default.py new file mode 100644 index 0000000..49f05ad --- /dev/null +++ b/Lib/json/tests/test_default.py @@ -0,0 +1,9 @@ +from unittest import TestCase + +import json + +class TestDefault(TestCase): + def test_default(self): + self.assertEquals( + json.dumps(type, default=repr), + json.dumps(repr(type))) diff --git a/Lib/json/tests/test_dump.py b/Lib/json/tests/test_dump.py new file mode 100644 index 0000000..d288c0d --- /dev/null +++ b/Lib/json/tests/test_dump.py @@ -0,0 +1,13 @@ +from unittest import TestCase +from cStringIO import StringIO + +import json + +class TestDump(TestCase): + def test_dump(self): + sio = StringIO() + json.dump({}, sio) + self.assertEquals(sio.getvalue(), '{}') + + def test_dumps(self): + self.assertEquals(json.dumps({}), '{}') diff --git a/Lib/json/tests/test_encode_basestring_ascii.py b/Lib/json/tests/test_encode_basestring_ascii.py new file mode 100644 index 0000000..352423e --- /dev/null +++ b/Lib/json/tests/test_encode_basestring_ascii.py @@ -0,0 +1,35 @@ +from unittest import TestCase + +import json.encoder + +CASES = [ + (u'/\\"\ucafe\ubabe\uab98\ufcde\ubcda\uef4a\x08\x0c\n\r\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?', '"/\\\\\\"\\ucafe\\ubabe\\uab98\\ufcde\\ubcda\\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?"'), + (u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'), + (u'controls', '"controls"'), + (u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'), + (u'{"object with 1 member":["array with 1 element"]}', '"{\\"object with 1 member\\":[\\"array with 1 element\\"]}"'), + (u' s p a c e d ', '" s p a c e d "'), + (u'\U0001d120', '"\\ud834\\udd20"'), + (u'\u03b1\u03a9', '"\\u03b1\\u03a9"'), + ('\xce\xb1\xce\xa9', '"\\u03b1\\u03a9"'), + (u'\u03b1\u03a9', '"\\u03b1\\u03a9"'), + ('\xce\xb1\xce\xa9', '"\\u03b1\\u03a9"'), + (u'\u03b1\u03a9', '"\\u03b1\\u03a9"'), + (u'\u03b1\u03a9', '"\\u03b1\\u03a9"'), + (u"`1~!@#$%^&*()_+-={':[,]}|;.</>?", '"`1~!@#$%^&*()_+-={\':[,]}|;.</>?"'), + (u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'), + (u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'), +] + +class TestEncodeBaseStringAscii(TestCase): + def test_py_encode_basestring_ascii(self): + self._test_encode_basestring_ascii(json.encoder.py_encode_basestring_ascii) + + def test_c_encode_basestring_ascii(self): + self._test_encode_basestring_ascii(json.encoder.c_encode_basestring_ascii) + + def _test_encode_basestring_ascii(self, encode_basestring_ascii): + fname = encode_basestring_ascii.__name__ + for input_string, expect in CASES: + result = encode_basestring_ascii(input_string) + self.assertEquals(result, expect) diff --git a/Lib/json/tests/test_fail.py b/Lib/json/tests/test_fail.py new file mode 100644 index 0000000..ee31bfa --- /dev/null +++ b/Lib/json/tests/test_fail.py @@ -0,0 +1,76 @@ +from unittest import TestCase + +import json + +# Fri Dec 30 18:57:26 2005 +JSONDOCS = [ + # http://json.org/JSON_checker/test/fail1.json + '"A JSON payload should be an object or array, not a string."', + # http://json.org/JSON_checker/test/fail2.json + '["Unclosed array"', + # http://json.org/JSON_checker/test/fail3.json + '{unquoted_key: "keys must be quoted}', + # http://json.org/JSON_checker/test/fail4.json + '["extra comma",]', + # http://json.org/JSON_checker/test/fail5.json + '["double extra comma",,]', + # http://json.org/JSON_checker/test/fail6.json + '[ , "<-- missing value"]', + # http://json.org/JSON_checker/test/fail7.json + '["Comma after the close"],', + # http://json.org/JSON_checker/test/fail8.json + '["Extra close"]]', + # http://json.org/JSON_checker/test/fail9.json + '{"Extra comma": true,}', + # http://json.org/JSON_checker/test/fail10.json + '{"Extra value after close": true} "misplaced quoted value"', + # http://json.org/JSON_checker/test/fail11.json + '{"Illegal expression": 1 + 2}', + # http://json.org/JSON_checker/test/fail12.json + '{"Illegal invocation": alert()}', + # http://json.org/JSON_checker/test/fail13.json + '{"Numbers cannot have leading zeroes": 013}', + # http://json.org/JSON_checker/test/fail14.json + '{"Numbers cannot be hex": 0x14}', + # http://json.org/JSON_checker/test/fail15.json + '["Illegal backslash escape: \\x15"]', + # http://json.org/JSON_checker/test/fail16.json + '["Illegal backslash escape: \\\'"]', + # http://json.org/JSON_checker/test/fail17.json + '["Illegal backslash escape: \\017"]', + # http://json.org/JSON_checker/test/fail18.json + '[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', + # http://json.org/JSON_checker/test/fail19.json + '{"Missing colon" null}', + # http://json.org/JSON_checker/test/fail20.json + '{"Double colon":: null}', + # http://json.org/JSON_checker/test/fail21.json + '{"Comma instead of colon", null}', + # http://json.org/JSON_checker/test/fail22.json + '["Colon instead of comma": false]', + # http://json.org/JSON_checker/test/fail23.json + '["Bad value", truth]', + # http://json.org/JSON_checker/test/fail24.json + "['single quote']", + # http://code.google.com/p/simplejson/issues/detail?id=3 + u'["A\u001FZ control characters in string"]', +] + +SKIPS = { + 1: "why not have a string payload?", + 18: "spec doesn't specify any nesting limitations", +} + +class TestFail(TestCase): + def test_failures(self): + for idx, doc in enumerate(JSONDOCS): + idx = idx + 1 + if idx in SKIPS: + json.loads(doc) + continue + try: + json.loads(doc) + except ValueError: + pass + else: + self.fail("Expected failure for fail%d.json: %r" % (idx, doc)) diff --git a/Lib/json/tests/test_float.py b/Lib/json/tests/test_float.py new file mode 100644 index 0000000..9df6d1ee --- /dev/null +++ b/Lib/json/tests/test_float.py @@ -0,0 +1,9 @@ +import math +from unittest import TestCase + +import json + +class TestFloat(TestCase): + def test_floats(self): + for num in [1617161771.7650001, math.pi, math.pi**100, math.pi**-100]: + self.assertEquals(float(json.dumps(num)), num) diff --git a/Lib/json/tests/test_indent.py b/Lib/json/tests/test_indent.py new file mode 100644 index 0000000..6055162 --- /dev/null +++ b/Lib/json/tests/test_indent.py @@ -0,0 +1,41 @@ +from unittest import TestCase + +import json +import textwrap + +class TestIndent(TestCase): + def test_indent(self): + h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth', + {'nifty': 87}, {'field': 'yes', 'morefield': False} ] + + expect = textwrap.dedent("""\ + [ + [ + "blorpie" + ], + [ + "whoops" + ], + [], + "d-shtaeou", + "d-nthiouh", + "i-vhbjkhnth", + { + "nifty": 87 + }, + { + "field": "yes", + "morefield": false + } + ]""") + + + d1 = json.dumps(h) + d2 = json.dumps(h, indent=2, sort_keys=True, separators=(',', ': ')) + + h1 = json.loads(d1) + h2 = json.loads(d2) + + self.assertEquals(h1, h) + self.assertEquals(h2, h) + self.assertEquals(d2, expect) diff --git a/Lib/json/tests/test_pass1.py b/Lib/json/tests/test_pass1.py new file mode 100644 index 0000000..216e252 --- /dev/null +++ b/Lib/json/tests/test_pass1.py @@ -0,0 +1,76 @@ +from unittest import TestCase + +import json + +# from http://json.org/JSON_checker/test/pass1.json +JSON = r''' +[ + "JSON Test Pattern pass1", + {"object with 1 member":["array with 1 element"]}, + {}, + [], + -42, + true, + false, + null, + { + "integer": 1234567890, + "real": -9876.543210, + "e": 0.123456789e-12, + "E": 1.234567890E+34, + "": 23456789012E666, + "zero": 0, + "one": 1, + "space": " ", + "quote": "\"", + "backslash": "\\", + "controls": "\b\f\n\r\t", + "slash": "/ & \/", + "alpha": "abcdefghijklmnopqrstuvwyz", + "ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ", + "digit": "0123456789", + "special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?", + "hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A", + "true": true, + "false": false, + "null": null, + "array":[ ], + "object":{ }, + "address": "50 St. James Street", + "url": "http://www.JSON.org/", + "comment": "// /* <!-- --", + "# -- --> */": " ", + " s p a c e d " :[1,2 , 3 + +, + +4 , 5 , 6 ,7 ], + "compact": [1,2,3,4,5,6,7], + "jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}", + "quotes": "" \u0022 %22 0x22 034 "", + "\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?" +: "A key can be any string" + }, + 0.5 ,98.6 +, +99.44 +, + +1066 + + +,"rosebud"] +''' + +class TestPass1(TestCase): + def test_parse(self): + # test in/out equivalence and parsing + res = json.loads(JSON) + out = json.dumps(res) + self.assertEquals(res, json.loads(out)) + try: + json.dumps(res, allow_nan=False) + except ValueError: + pass + else: + self.fail("23456789012E666 should be out of range") diff --git a/Lib/json/tests/test_pass2.py b/Lib/json/tests/test_pass2.py new file mode 100644 index 0000000..80d8433 --- /dev/null +++ b/Lib/json/tests/test_pass2.py @@ -0,0 +1,14 @@ +from unittest import TestCase +import json + +# from http://json.org/JSON_checker/test/pass2.json +JSON = r''' +[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]] +''' + +class TestPass2(TestCase): + def test_parse(self): + # test in/out equivalence and parsing + res = json.loads(JSON) + out = json.dumps(res) + self.assertEquals(res, json.loads(out)) diff --git a/Lib/json/tests/test_pass3.py b/Lib/json/tests/test_pass3.py new file mode 100644 index 0000000..77d87b2 --- /dev/null +++ b/Lib/json/tests/test_pass3.py @@ -0,0 +1,20 @@ +from unittest import TestCase + +import json + +# from http://json.org/JSON_checker/test/pass3.json +JSON = r''' +{ + "JSON Test Pattern pass3": { + "The outermost value": "must be an object or array.", + "In this test": "It is an object." + } +} +''' + +class TestPass3(TestCase): + def test_parse(self): + # test in/out equivalence and parsing + res = json.loads(JSON) + out = json.dumps(res) + self.assertEquals(res, json.loads(out)) diff --git a/Lib/json/tests/test_recursion.py b/Lib/json/tests/test_recursion.py new file mode 100644 index 0000000..b82a373 --- /dev/null +++ b/Lib/json/tests/test_recursion.py @@ -0,0 +1,65 @@ +from unittest import TestCase + +import json + +class JSONTestObject: + pass + +class RecursiveJSONEncoder(json.JSONEncoder): + recurse = False + def default(self, o): + if o is JSONTestObject: + if self.recurse: + return [JSONTestObject] + else: + return 'JSONTestObject' + return json.JSONEncoder.default(o) + +class TestRecursion(TestCase): + def test_listrecursion(self): + x = [] + x.append(x) + try: + json.dumps(x) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on list recursion") + x = [] + y = [x] + x.append(y) + try: + json.dumps(x) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on alternating list recursion") + y = [] + x = [y, y] + # ensure that the marker is cleared + json.dumps(x) + + def test_dictrecursion(self): + x = {} + x["test"] = x + try: + json.dumps(x) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on dict recursion") + x = {} + y = {"a": x, "b": x} + # ensure that the marker is cleared + json.dumps(x) + + def test_defaultrecursion(self): + enc = RecursiveJSONEncoder() + self.assertEquals(enc.encode(JSONTestObject), '"JSONTestObject"') + enc.recurse = True + try: + enc.encode(JSONTestObject) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on default recursion") diff --git a/Lib/json/tests/test_scanstring.py b/Lib/json/tests/test_scanstring.py new file mode 100644 index 0000000..87051bb --- /dev/null +++ b/Lib/json/tests/test_scanstring.py @@ -0,0 +1,102 @@ +import sys +import decimal +from unittest import TestCase + +import json.decoder + +class TestScanString(TestCase): + def test_py_scanstring(self): + self._test_scanstring(json.decoder.py_scanstring) + + def test_c_scanstring(self): + self._test_scanstring(json.decoder.c_scanstring) + + def _test_scanstring(self, scanstring): + self.assertEquals( + scanstring('"z\\ud834\\udd20x"', 1, None, True), + (u'z\U0001d120x', 16)) + + if sys.maxunicode == 65535: + self.assertEquals( + scanstring(u'"z\U0001d120x"', 1, None, True), + (u'z\U0001d120x', 6)) + else: + self.assertEquals( + scanstring(u'"z\U0001d120x"', 1, None, True), + (u'z\U0001d120x', 5)) + + self.assertEquals( + scanstring('"\\u007b"', 1, None, True), + (u'{', 8)) + + self.assertEquals( + scanstring('"A JSON payload should be an object or array, not a string."', 1, None, True), + (u'A JSON payload should be an object or array, not a string.', 60)) + + self.assertEquals( + scanstring('["Unclosed array"', 2, None, True), + (u'Unclosed array', 17)) + + self.assertEquals( + scanstring('["extra comma",]', 2, None, True), + (u'extra comma', 14)) + + self.assertEquals( + scanstring('["double extra comma",,]', 2, None, True), + (u'double extra comma', 21)) + + self.assertEquals( + scanstring('["Comma after the close"],', 2, None, True), + (u'Comma after the close', 24)) + + self.assertEquals( + scanstring('["Extra close"]]', 2, None, True), + (u'Extra close', 14)) + + self.assertEquals( + scanstring('{"Extra comma": true,}', 2, None, True), + (u'Extra comma', 14)) + + self.assertEquals( + scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, None, True), + (u'Extra value after close', 26)) + + self.assertEquals( + scanstring('{"Illegal expression": 1 + 2}', 2, None, True), + (u'Illegal expression', 21)) + + self.assertEquals( + scanstring('{"Illegal invocation": alert()}', 2, None, True), + (u'Illegal invocation', 21)) + + self.assertEquals( + scanstring('{"Numbers cannot have leading zeroes": 013}', 2, None, True), + (u'Numbers cannot have leading zeroes', 37)) + + self.assertEquals( + scanstring('{"Numbers cannot be hex": 0x14}', 2, None, True), + (u'Numbers cannot be hex', 24)) + + self.assertEquals( + scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, None, True), + (u'Too deep', 30)) + + self.assertEquals( + scanstring('{"Missing colon" null}', 2, None, True), + (u'Missing colon', 16)) + + self.assertEquals( + scanstring('{"Double colon":: null}', 2, None, True), + (u'Double colon', 15)) + + self.assertEquals( + scanstring('{"Comma instead of colon", null}', 2, None, True), + (u'Comma instead of colon', 25)) + + self.assertEquals( + scanstring('["Colon instead of comma": false]', 2, None, True), + (u'Colon instead of comma', 25)) + + self.assertEquals( + scanstring('["Bad value", truth]', 2, None, True), + (u'Bad value', 12)) diff --git a/Lib/json/tests/test_separators.py b/Lib/json/tests/test_separators.py new file mode 100644 index 0000000..32db341 --- /dev/null +++ b/Lib/json/tests/test_separators.py @@ -0,0 +1,42 @@ +import textwrap +from unittest import TestCase + +import json + + +class TestSeparators(TestCase): + def test_separators(self): + h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth', + {'nifty': 87}, {'field': 'yes', 'morefield': False} ] + + expect = textwrap.dedent("""\ + [ + [ + "blorpie" + ] , + [ + "whoops" + ] , + [] , + "d-shtaeou" , + "d-nthiouh" , + "i-vhbjkhnth" , + { + "nifty" : 87 + } , + { + "field" : "yes" , + "morefield" : false + } + ]""") + + + d1 = json.dumps(h) + d2 = json.dumps(h, indent=2, sort_keys=True, separators=(' ,', ' : ')) + + h1 = json.loads(d1) + h2 = json.loads(d2) + + self.assertEquals(h1, h) + self.assertEquals(h2, h) + self.assertEquals(d2, expect) diff --git a/Lib/json/tests/test_speedups.py b/Lib/json/tests/test_speedups.py new file mode 100644 index 0000000..8ff9a38 --- /dev/null +++ b/Lib/json/tests/test_speedups.py @@ -0,0 +1,15 @@ +import decimal +from unittest import TestCase + +from json import decoder +from json import encoder + +class TestSpeedups(TestCase): + def test_scanstring(self): + self.assertEquals(decoder.scanstring.__module__, "_json") + self.assert_(decoder.scanstring is decoder.c_scanstring) + + def test_encode_basestring_ascii(self): + self.assertEquals(encoder.encode_basestring_ascii.__module__, "_json") + self.assert_(encoder.encode_basestring_ascii is + encoder.c_encode_basestring_ascii) diff --git a/Lib/json/tests/test_unicode.py b/Lib/json/tests/test_unicode.py new file mode 100644 index 0000000..3ac4541 --- /dev/null +++ b/Lib/json/tests/test_unicode.py @@ -0,0 +1,55 @@ +from unittest import TestCase + +import json + +class TestUnicode(TestCase): + def test_encoding1(self): + encoder = json.JSONEncoder(encoding='utf-8') + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + s = u.encode('utf-8') + ju = encoder.encode(u) + js = encoder.encode(s) + self.assertEquals(ju, js) + + def test_encoding2(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + s = u.encode('utf-8') + ju = json.dumps(u, encoding='utf-8') + js = json.dumps(s, encoding='utf-8') + self.assertEquals(ju, js) + + def test_encoding3(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumps(u) + self.assertEquals(j, '"\\u03b1\\u03a9"') + + def test_encoding4(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumps([u]) + self.assertEquals(j, '["\\u03b1\\u03a9"]') + + def test_encoding5(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumps(u, ensure_ascii=False) + self.assertEquals(j, u'"{0}"'.format(u)) + + def test_encoding6(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumps([u], ensure_ascii=False) + self.assertEquals(j, u'["{0}"]'.format(u)) + + def test_big_unicode_encode(self): + u = u'\U0001d120' + self.assertEquals(json.dumps(u), '"\\ud834\\udd20"') + self.assertEquals(json.dumps(u, ensure_ascii=False), u'"\U0001d120"') + + def test_big_unicode_decode(self): + u = u'z\U0001d120x' + self.assertEquals(json.loads('"' + u + '"'), u) + self.assertEquals(json.loads('"z\\ud834\\udd20x"'), u) + + def test_unicode_decode(self): + for i in range(0, 0xd7ff): + u = unichr(i) + js = '"\\u{0:04x}"'.format(i) + self.assertEquals(json.loads(js), u) diff --git a/Lib/json/tool.py b/Lib/json/tool.py new file mode 100644 index 0000000..cbd258c --- /dev/null +++ b/Lib/json/tool.py @@ -0,0 +1,36 @@ +r"""Command-line tool to validate and pretty-print JSON + +Usage:: + + $ echo '{"json":"obj"}' | python -mjson.tool + { + "json": "obj" + } + $ echo '{ 1.2:3.4}' | python -mjson.tool + Expecting property name: line 1 column 2 (char 2) +""" +import sys +import json + +def main(): + if len(sys.argv) == 1: + infile = sys.stdin + outfile = sys.stdout + elif len(sys.argv) == 2: + infile = open(sys.argv[1], 'rb') + outfile = sys.stdout + elif len(sys.argv) == 3: + infile = open(sys.argv[1], 'rb') + outfile = open(sys.argv[2], 'wb') + else: + raise SystemExit("{0} [infile [outfile]]".format(sys.argv[0])) + try: + obj = json.load(infile) + except ValueError, e: + raise SystemExit(e) + json.dump(obj, outfile, sort_keys=True, indent=4) + outfile.write('\n') + + +if __name__ == '__main__': + main() diff --git a/Lib/test/test_json.py b/Lib/test/test_json.py new file mode 100644 index 0000000..7b8f3de --- /dev/null +++ b/Lib/test/test_json.py @@ -0,0 +1,17 @@ +"""Tests for json. + +The tests for json are defined in the json.tests package; +the test_suite() function there returns a test suite that's ready to +be run. +""" + +import json.tests +import test.test_support + + +def test_main(): + test.test_support.run_unittest(json.tests.test_suite()) + + +if __name__ == "__main__": + test_main() @@ -49,6 +49,9 @@ Extension Modules Library ------- +- Issue #2750: Add the 'json' package. Based on simplejson 1.9 and contributed + by Bob Ippolito. + - Issue #1734346: Support Unicode file names for zipfiles. - Issue #2581: distutils: Vista UAC/elevation support for bdist_wininst diff --git a/Modules/_json.c b/Modules/_json.c new file mode 100644 index 0000000..deee1e3 --- /dev/null +++ b/Modules/_json.c @@ -0,0 +1,609 @@ +#include "Python.h" + +#define DEFAULT_ENCODING "utf-8" +#define S_CHAR(c) (c >= ' ' && c <= '~' && c != '\\' && c != '"') +#define MIN_EXPANSION 6 + +#ifdef Py_UNICODE_WIDE +#define MAX_EXPANSION (2 * MIN_EXPANSION) +#else +#define MAX_EXPANSION MIN_EXPANSION +#endif + +static Py_ssize_t +ascii_escape_char(Py_UNICODE c, char *output, Py_ssize_t chars) +{ + Py_UNICODE x; + output[chars++] = '\\'; + switch (c) { + case '\\': output[chars++] = (char)c; break; + case '"': output[chars++] = (char)c; break; + case '\b': output[chars++] = 'b'; break; + case '\f': output[chars++] = 'f'; break; + case '\n': output[chars++] = 'n'; break; + case '\r': output[chars++] = 'r'; break; + case '\t': output[chars++] = 't'; break; + default: +#ifdef Py_UNICODE_WIDE + if (c >= 0x10000) { + /* UTF-16 surrogate pair */ + Py_UNICODE v = c - 0x10000; + c = 0xd800 | ((v >> 10) & 0x3ff); + output[chars++] = 'u'; + x = (c & 0xf000) >> 12; + output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10); + x = (c & 0x0f00) >> 8; + output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10); + x = (c & 0x00f0) >> 4; + output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10); + x = (c & 0x000f); + output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10); + c = 0xdc00 | (v & 0x3ff); + output[chars++] = '\\'; + } +#endif + output[chars++] = 'u'; + x = (c & 0xf000) >> 12; + output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10); + x = (c & 0x0f00) >> 8; + output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10); + x = (c & 0x00f0) >> 4; + output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10); + x = (c & 0x000f); + output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10); + } + return chars; +} + +static PyObject * +ascii_escape_unicode(PyObject *pystr) +{ + Py_ssize_t i; + Py_ssize_t input_chars; + Py_ssize_t output_size; + Py_ssize_t chars; + PyObject *rval; + char *output; + Py_UNICODE *input_unicode; + + input_chars = PyUnicode_GET_SIZE(pystr); + input_unicode = PyUnicode_AS_UNICODE(pystr); + /* One char input can be up to 6 chars output, estimate 4 of these */ + output_size = 2 + (MIN_EXPANSION * 4) + input_chars; + rval = PyString_FromStringAndSize(NULL, output_size); + if (rval == NULL) { + return NULL; + } + output = PyString_AS_STRING(rval); + chars = 0; + output[chars++] = '"'; + for (i = 0; i < input_chars; i++) { + Py_UNICODE c = input_unicode[i]; + if (S_CHAR(c)) { + output[chars++] = (char)c; + } + else { + chars = ascii_escape_char(c, output, chars); + } + if (output_size - chars < (1 + MAX_EXPANSION)) { + /* There's more than four, so let's resize by a lot */ + output_size *= 2; + /* This is an upper bound */ + if (output_size > 2 + (input_chars * MAX_EXPANSION)) { + output_size = 2 + (input_chars * MAX_EXPANSION); + } + if (_PyString_Resize(&rval, output_size) == -1) { + return NULL; + } + output = PyString_AS_STRING(rval); + } + } + output[chars++] = '"'; + if (_PyString_Resize(&rval, chars) == -1) { + return NULL; + } + return rval; +} + +static PyObject * +ascii_escape_str(PyObject *pystr) +{ + Py_ssize_t i; + Py_ssize_t input_chars; + Py_ssize_t output_size; + Py_ssize_t chars; + PyObject *rval; + char *output; + char *input_str; + + input_chars = PyString_GET_SIZE(pystr); + input_str = PyString_AS_STRING(pystr); + /* One char input can be up to 6 chars output, estimate 4 of these */ + output_size = 2 + (MIN_EXPANSION * 4) + input_chars; + rval = PyString_FromStringAndSize(NULL, output_size); + if (rval == NULL) { + return NULL; + } + output = PyString_AS_STRING(rval); + chars = 0; + output[chars++] = '"'; + for (i = 0; i < input_chars; i++) { + Py_UNICODE c = (Py_UNICODE)input_str[i]; + if (S_CHAR(c)) { + output[chars++] = (char)c; + } + else if (c > 0x7F) { + /* We hit a non-ASCII character, bail to unicode mode */ + PyObject *uni; + Py_DECREF(rval); + uni = PyUnicode_DecodeUTF8(input_str, input_chars, "strict"); + if (uni == NULL) { + return NULL; + } + rval = ascii_escape_unicode(uni); + Py_DECREF(uni); + return rval; + } + else { + chars = ascii_escape_char(c, output, chars); + } + /* An ASCII char can't possibly expand to a surrogate! */ + if (output_size - chars < (1 + MIN_EXPANSION)) { + /* There's more than four, so let's resize by a lot */ + output_size *= 2; + if (output_size > 2 + (input_chars * MIN_EXPANSION)) { + output_size = 2 + (input_chars * MIN_EXPANSION); + } + if (_PyString_Resize(&rval, output_size) == -1) { + return NULL; + } + output = PyString_AS_STRING(rval); + } + } + output[chars++] = '"'; + if (_PyString_Resize(&rval, chars) == -1) { + return NULL; + } + return rval; +} + +void +raise_errmsg(char *msg, PyObject *s, Py_ssize_t end) +{ + static PyObject *errmsg_fn = NULL; + PyObject *pymsg; + if (errmsg_fn == NULL) { + PyObject *decoder = PyImport_ImportModule("json.decoder"); + if (decoder == NULL) + return; + errmsg_fn = PyObject_GetAttrString(decoder, "errmsg"); + if (errmsg_fn == NULL) + return; + Py_XDECREF(decoder); + } + pymsg = PyObject_CallFunction(errmsg_fn, "(zOn)", msg, s, end); + PyErr_SetObject(PyExc_ValueError, pymsg); + Py_DECREF(pymsg); +/* + +def linecol(doc, pos): + lineno = doc.count('\n', 0, pos) + 1 + if lineno == 1: + colno = pos + else: + colno = pos - doc.rindex('\n', 0, pos) + return lineno, colno + +def errmsg(msg, doc, pos, end=None): + lineno, colno = linecol(doc, pos) + if end is None: + return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos) + endlineno, endcolno = linecol(doc, end) + return '%s: line %d column %d - line %d column %d (char %d - %d)' % ( + msg, lineno, colno, endlineno, endcolno, pos, end) + +*/ +} + +static PyObject * +join_list_unicode(PyObject *lst) +{ + static PyObject *ustr = NULL; + static PyObject *joinstr = NULL; + if (ustr == NULL) { + Py_UNICODE c = 0; + ustr = PyUnicode_FromUnicode(&c, 0); + } + if (joinstr == NULL) { + joinstr = PyString_FromString("join"); + } + if (joinstr == NULL || ustr == NULL) { + return NULL; + } + return PyObject_CallMethodObjArgs(ustr, joinstr, lst, NULL); +} + +static PyObject * +scanstring_str(PyObject *pystr, Py_ssize_t end, char *encoding, int strict) +{ + PyObject *rval; + Py_ssize_t len = PyString_GET_SIZE(pystr); + Py_ssize_t begin = end - 1; + Py_ssize_t next = begin; + char *buf = PyString_AS_STRING(pystr); + PyObject *chunks = PyList_New(0); + if (chunks == NULL) { + goto bail; + } + while (1) { + /* Find the end of the string or the next escape */ + Py_UNICODE c = 0; + PyObject *chunk = NULL; + for (next = end; next < len; next++) { + c = buf[next]; + if (c == '"' || c == '\\') { + break; + } + else if (strict && c <= 0x1f) { + raise_errmsg("Invalid control character at", pystr, begin); + goto bail; + } + } + if (!(c == '"' || c == '\\')) { + raise_errmsg("Unterminated string starting at", pystr, begin); + goto bail; + } + /* Pick up this chunk if it's not zero length */ + if (next != end) { + PyObject *strchunk = PyBuffer_FromMemory(&buf[end], next - end); + if (strchunk == NULL) { + goto bail; + } + chunk = PyUnicode_FromEncodedObject(strchunk, encoding, NULL); + Py_DECREF(strchunk); + if (chunk == NULL) { + goto bail; + } + if (PyList_Append(chunks, chunk)) { + goto bail; + } + Py_DECREF(chunk); + } + next++; + if (c == '"') { + end = next; + break; + } + if (next == len) { + raise_errmsg("Unterminated string starting at", pystr, begin); + goto bail; + } + c = buf[next]; + if (c != 'u') { + /* Non-unicode backslash escapes */ + end = next + 1; + switch (c) { + case '"': break; + case '\\': break; + case '/': break; + case 'b': c = '\b'; break; + case 'f': c = '\f'; break; + case 'n': c = '\n'; break; + case 'r': c = '\r'; break; + case 't': c = '\t'; break; + default: c = 0; + } + if (c == 0) { + raise_errmsg("Invalid \\escape", pystr, end - 2); + goto bail; + } + } + else { + c = 0; + next++; + end = next + 4; + if (end >= len) { + raise_errmsg("Invalid \\uXXXX escape", pystr, next - 1); + goto bail; + } + /* Decode 4 hex digits */ + for (; next < end; next++) { + Py_ssize_t shl = (end - next - 1) << 2; + Py_UNICODE digit = buf[next]; + switch (digit) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + c |= (digit - '0') << shl; break; + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': + c |= (digit - 'a' + 10) << shl; break; + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': + c |= (digit - 'A' + 10) << shl; break; + default: + raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5); + goto bail; + } + } +#ifdef Py_UNICODE_WIDE + /* Surrogate pair */ + if (c >= 0xd800 && c <= 0xdbff) { + Py_UNICODE c2 = 0; + if (end + 6 >= len) { + raise_errmsg("Invalid \\uXXXX\\uXXXX surrogate pair", pystr, + end - 5); + } + if (buf[next++] != '\\' || buf[next++] != 'u') { + raise_errmsg("Invalid \\uXXXX\\uXXXX surrogate pair", pystr, + end - 5); + } + end += 6; + /* Decode 4 hex digits */ + for (; next < end; next++) { + Py_ssize_t shl = (end - next - 1) << 2; + Py_UNICODE digit = buf[next]; + switch (digit) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + c2 |= (digit - '0') << shl; break; + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': + c2 |= (digit - 'a' + 10) << shl; break; + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': + c2 |= (digit - 'A' + 10) << shl; break; + default: + raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5); + goto bail; + } + } + c = 0x10000 + (((c - 0xd800) << 10) | (c2 - 0xdc00)); + } +#endif + } + chunk = PyUnicode_FromUnicode(&c, 1); + if (chunk == NULL) { + goto bail; + } + if (PyList_Append(chunks, chunk)) { + goto bail; + } + Py_DECREF(chunk); + } + + rval = join_list_unicode(chunks); + if (rval == NULL) { + goto bail; + } + Py_DECREF(chunks); + chunks = NULL; + return Py_BuildValue("(Nn)", rval, end); +bail: + Py_XDECREF(chunks); + return NULL; +} + + +static PyObject * +scanstring_unicode(PyObject *pystr, Py_ssize_t end, int strict) +{ + PyObject *rval; + Py_ssize_t len = PyUnicode_GET_SIZE(pystr); + Py_ssize_t begin = end - 1; + Py_ssize_t next = begin; + const Py_UNICODE *buf = PyUnicode_AS_UNICODE(pystr); + PyObject *chunks = PyList_New(0); + if (chunks == NULL) { + goto bail; + } + while (1) { + /* Find the end of the string or the next escape */ + Py_UNICODE c = 0; + PyObject *chunk = NULL; + for (next = end; next < len; next++) { + c = buf[next]; + if (c == '"' || c == '\\') { + break; + } + else if (strict && c <= 0x1f) { + raise_errmsg("Invalid control character at", pystr, begin); + goto bail; + } + } + if (!(c == '"' || c == '\\')) { + raise_errmsg("Unterminated string starting at", pystr, begin); + goto bail; + } + /* Pick up this chunk if it's not zero length */ + if (next != end) { + chunk = PyUnicode_FromUnicode(&buf[end], next - end); + if (chunk == NULL) { + goto bail; + } + if (PyList_Append(chunks, chunk)) { + goto bail; + } + Py_DECREF(chunk); + } + next++; + if (c == '"') { + end = next; + break; + } + if (next == len) { + raise_errmsg("Unterminated string starting at", pystr, begin); + goto bail; + } + c = buf[next]; + if (c != 'u') { + /* Non-unicode backslash escapes */ + end = next + 1; + switch (c) { + case '"': break; + case '\\': break; + case '/': break; + case 'b': c = '\b'; break; + case 'f': c = '\f'; break; + case 'n': c = '\n'; break; + case 'r': c = '\r'; break; + case 't': c = '\t'; break; + default: c = 0; + } + if (c == 0) { + raise_errmsg("Invalid \\escape", pystr, end - 2); + goto bail; + } + } + else { + c = 0; + next++; + end = next + 4; + if (end >= len) { + raise_errmsg("Invalid \\uXXXX escape", pystr, next - 1); + goto bail; + } + /* Decode 4 hex digits */ + for (; next < end; next++) { + Py_ssize_t shl = (end - next - 1) << 2; + Py_UNICODE digit = buf[next]; + switch (digit) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + c |= (digit - '0') << shl; break; + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': + c |= (digit - 'a' + 10) << shl; break; + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': + c |= (digit - 'A' + 10) << shl; break; + default: + raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5); + goto bail; + } + } +#ifdef Py_UNICODE_WIDE + /* Surrogate pair */ + if (c >= 0xd800 && c <= 0xdbff) { + Py_UNICODE c2 = 0; + if (end + 6 >= len) { + raise_errmsg("Invalid \\uXXXX\\uXXXX surrogate pair", pystr, + end - 5); + } + if (buf[next++] != '\\' || buf[next++] != 'u') { + raise_errmsg("Invalid \\uXXXX\\uXXXX surrogate pair", pystr, + end - 5); + } + end += 6; + /* Decode 4 hex digits */ + for (; next < end; next++) { + Py_ssize_t shl = (end - next - 1) << 2; + Py_UNICODE digit = buf[next]; + switch (digit) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + c2 |= (digit - '0') << shl; break; + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': + c2 |= (digit - 'a' + 10) << shl; break; + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': + c2 |= (digit - 'A' + 10) << shl; break; + default: + raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5); + goto bail; + } + } + c = 0x10000 + (((c - 0xd800) << 10) | (c2 - 0xdc00)); + } +#endif + } + chunk = PyUnicode_FromUnicode(&c, 1); + if (chunk == NULL) { + goto bail; + } + if (PyList_Append(chunks, chunk)) { + goto bail; + } + Py_DECREF(chunk); + } + + rval = join_list_unicode(chunks); + if (rval == NULL) { + goto bail; + } + Py_DECREF(chunks); + chunks = NULL; + return Py_BuildValue("(Nn)", rval, end); +bail: + Py_XDECREF(chunks); + return NULL; +} + +PyDoc_STRVAR(pydoc_scanstring, +"scanstring(basestring, end, encoding) -> (str, end)\n"); + +static PyObject * +py_scanstring(PyObject* self, PyObject *args) +{ + PyObject *pystr; + Py_ssize_t end; + char *encoding = NULL; + int strict = 0; + if (!PyArg_ParseTuple(args, "On|zi:scanstring", &pystr, &end, &encoding, &strict)) { + return NULL; + } + if (encoding == NULL) { + encoding = DEFAULT_ENCODING; + } + if (PyString_Check(pystr)) { + return scanstring_str(pystr, end, encoding, strict); + } + else if (PyUnicode_Check(pystr)) { + return scanstring_unicode(pystr, end, strict); + } + else { + PyErr_Format(PyExc_TypeError, + "first argument must be a string or unicode, not %.80s", + Py_TYPE(pystr)->tp_name); + return NULL; + } +} + +PyDoc_STRVAR(pydoc_encode_basestring_ascii, +"encode_basestring_ascii(basestring) -> str\n"); + +static PyObject * +py_encode_basestring_ascii(PyObject* self, PyObject *pystr) +{ + /* METH_O */ + if (PyString_Check(pystr)) { + return ascii_escape_str(pystr); + } + else if (PyUnicode_Check(pystr)) { + return ascii_escape_unicode(pystr); + } + else { + PyErr_Format(PyExc_TypeError, + "first argument must be a string or unicode, not %.80s", + Py_TYPE(pystr)->tp_name); + return NULL; + } +} + +static PyMethodDef json_methods[] = { + {"encode_basestring_ascii", (PyCFunction)py_encode_basestring_ascii, + METH_O, pydoc_encode_basestring_ascii}, + {"scanstring", (PyCFunction)py_scanstring, METH_VARARGS, + pydoc_scanstring}, + {NULL, NULL, 0, NULL} +}; + +PyDoc_STRVAR(module_doc, +"json speedups\n"); + +void +init_json(void) +{ + PyObject *m; + m = Py_InitModule3("_json", json_methods, module_doc); +} diff --git a/PC/VC6/pythoncore.dsp b/PC/VC6/pythoncore.dsp index 3d80857..803a225 100644 --- a/PC/VC6/pythoncore.dsp +++ b/PC/VC6/pythoncore.dsp @@ -149,6 +149,10 @@ SOURCE=..\..\Modules\_hotshot.c # End Source File
# Begin Source File
+SOURCE=..\..\Modules\_json.c
+# End Source File
+# Begin Source File
+
SOURCE=..\..\Modules\_localemodule.c
# End Source File
# Begin Source File
diff --git a/PC/VS7.1/pythoncore.vcproj b/PC/VS7.1/pythoncore.vcproj index 4d7943b..ce736f3 100644 --- a/PC/VS7.1/pythoncore.vcproj +++ b/PC/VS7.1/pythoncore.vcproj @@ -380,6 +380,9 @@ RelativePath="..\..\Modules\_hotshot.c"> </File> <File + RelativePath="..\..\Modules\json.c"> + </File> + <File RelativePath="..\..\Modules\_localemodule.c"> </File> <File diff --git a/PC/VS8.0/pythoncore.vcproj b/PC/VS8.0/pythoncore.vcproj index 624cf4d..45deec7 100644 --- a/PC/VS8.0/pythoncore.vcproj +++ b/PC/VS8.0/pythoncore.vcproj @@ -1003,6 +1003,10 @@ >
</File>
<File
+ RelativePath="..\..\Modules\_json.c"
+ >
+ </File>
+ <File
RelativePath="..\..\Modules\_localemodule.c"
>
</File>
diff --git a/PC/config.c b/PC/config.c index 0d19caf..f128382 100644 --- a/PC/config.c +++ b/PC/config.c @@ -54,6 +54,7 @@ extern void init_struct(void); extern void initdatetime(void); extern void init_fileio(void); extern void init_functools(void); +extern void init_json(void); extern void initzlib(void); extern void init_multibytecodec(void); @@ -132,6 +133,7 @@ struct _inittab _PyImport_Inittab[] = { {"datetime", initdatetime}, {"_fileio", init_fileio}, {"_functools", init_functools}, + {"_json", init_json}, {"xxsubtype", initxxsubtype}, {"zipimport", initzipimport}, diff --git a/PCbuild/pcbuild.sln b/PCbuild/pcbuild.sln index d534d7e..61090b4 100644 --- a/PCbuild/pcbuild.sln +++ b/PCbuild/pcbuild.sln @@ -29,7 +29,6 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "make_buildinfo", "make_buil EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{553EC33E-9816-4996-A660-5D6186A0B0B3}" ProjectSection(SolutionItems) = preProject - ..\Modules\getbuildinfo.c = ..\Modules\getbuildinfo.c readme.txt = readme.txt EndProjectSection EndProject diff --git a/PCbuild/pythoncore.vcproj b/PCbuild/pythoncore.vcproj index d8c1cdf..583a32b 100644 --- a/PCbuild/pythoncore.vcproj +++ b/PCbuild/pythoncore.vcproj @@ -1007,6 +1007,10 @@ > </File> <File + RelativePath="..\Modules\_json.c" + > + </File> + <File RelativePath="..\Modules\_localemodule.c" > </File> @@ -438,6 +438,8 @@ class PyBuildExt(build_ext): exts.append( Extension("_fileio", ["_fileio.c"]) ) # _functools exts.append( Extension("_functools", ["_functoolsmodule.c"]) ) + # _json speedups + exts.append( Extension("_json", ["_json.c"]) ) # Python C API test module exts.append( Extension('_testcapi', ['_testcapimodule.c']) ) # profilers (_lsprof is for cProfile.py) |