Coverage for pandalone.pandata : 87%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
#! python #-*- coding: utf-8 -*- # # Copyright 2013-2015 European Commission (JRC); # Licensed under the EUPL (the 'Licence'); # You may not use this work except in compliance with the Licence. # You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl
A :dfn:`pandas-model` is a tree of strings, numbers, sequences, dicts, pandas instances and resolvable URI-references, implemented by :class:`Pandel`. """
except ImportError: from mock import MagicMock # @UnusedImport
except ImportError: from urlparse import urljoin
(?P<name>[^[<]*?) # column-name \s* (?P<units> # start parenthesized-units optional-group \[ # units enclosed in [] [^\]]* \] | < # units enclosed in <> [^)]* > )? # end parenthesized-units \s*$''', re.X)
"""An item-descriptor with units, i.e. used as a table-column header."""
""" Parses *name-units* pairs (i.e. used as a table-column header).
:return: a United(name, units) named-tuple, or `None` if bad syntax; note that ``name=''`` but ``units=None`` when missing.
Examples::
>>> parse_value_with_units('value [units]') United(name='value', units='units')
>>> parse_value_with_units('foo bar <bar/krow>') United(name='foo bar', units='bar/krow')
>>> parse_value_with_units('no units') United(name='no units', units=None)
>>> parse_value_with_units('') United(name='', units=None)
But notice::
>>> assert parse_value_with_units('ok but [bad units') is None
>>> parse_value_with_units('<only units>') United(name='', units='only units')
>>> parse_value_with_units(None) Traceback (most recent call last): TypeError: expected string or buffer
"""
""" Customization functions for traversing, I/O, and converting self-or-descendant branch (sub)model values. """ """
:param list inp: the `args-list` to :meth:`Pandel._read_branch()`
:param out: The args to :meth:`Pandel._write_branch()`, that may be specified either as:
* an `args-list`, that will apply for all model data-types (lists, dicts & pandas), * a map of ``type`` --> ``args-list``, where the ``None`` key is the *catch-all* case, * a function returning the `args-list` for some branch-value, with signature: ``def get_write_branch_args(branch)``.
:param conv: The conversion-functions (:dfn:`convertors`) for the various model's data-types. The convertors have signature ``def convert(branch)``, and they may be specified either as:
* a map of ``(from_type, to_type)`` --> ``conversion_func()``, where the ``None`` key is the *catch-all* case, * a "master-switch" function returning the appropriate convertor depending on the requested conversion. The master-function's signature is ``def get_convertor(from_branch, to_branch)``.
The minimum convertors demanded by :class:`Pandel` are (at least, check the code for more):
* DataFrame <--> dict * Series <--> dict * ndarray <--> list """
pass
pass
# Workaround https://github.com/Julian/jsonschema/issues/178
""" A customized :class:`Draft4Validator` suporting instance-trees with pandas and numpy objects, natively.
Any pandas or numpy instance (for example ``obj``) is treated like that:
+----------------------------+-----------------------------------------+ | Python Type | JSON Equivalence | +============================+=========================================+ | :class:`pandas.DataFrame` | as ``object`` *json-type*, with | | | ``obj.columns`` as *keys*, and | | | ``obj[col].values`` as *values* | +----------------------------+-----------------------------------------+ | :class:`pandas.Series` | as ``object`` *json-type*, with | | | ``obj.index`` as *keys*, and | | | ``obj.values`` as *values* | +----------------------------+-----------------------------------------+ | :class:`np.ndarray`, | as ``array`` *json-type* | | :class:`list`, | | | :class:`tuple` | | +----------------------------+-----------------------------------------+
Note that the value of each dataFrame column is a :``ndarray`` instances.
The simplest validations of an object or a pandas-instance is like this:
>>> import pandas as pd
>>> schema = { ... 'type': 'object', ... } >>> pv = PandelVisitor(schema)
>>> pv.validate({'foo': 'bar'}) >>> pv.validate(pd.Series({'foo': 1})) >>> pv.validate([1,2]) ## A sequence is invalid here. Traceback (most recent call last): ... jsonschema.exceptions.ValidationError: [1, 2] is not of type 'object' <BLANKLINE> Failed validating 'type' in schema: {'type': 'object'} <BLANKLINE> On instance: [1, 2]
Or demanding specific properties with ``required`` and no ``additionalProperties``:
>>> schema = { ... 'type': 'object', ... 'required': ['foo'], ... 'additionalProperties': False, ... 'properties': { ... 'foo': {} ... } ... } >>> pv = PandelVisitor(schema)
>>> pv.validate(pd.Series({'foo': 1})) >>> pv.validate(pd.Series({'foo': 1, 'bar': 2})) ## Additional 'bar' is present! Traceback (most recent call last): ... jsonschema.exceptions.ValidationError: Additional properties are not allowed ('bar' was unexpected) <BLANKLINE> Failed validating 'additionalProperties' in schema: {'additionalProperties': False, 'properties': {'foo': {}}, 'required': ['foo'], 'type': 'object'} <BLANKLINE> On instance: bar 2 foo 1 dtype: int64
>>> pv.validate(pd.Series({})) ## Required 'foo' missing! Traceback (most recent call last): ... jsonschema.exceptions.ValidationError: 'foo' is a required property <BLANKLINE> Failed validating 'required' in schema: {'additionalProperties': False, 'properties': {'foo': {}}, 'required': ['foo'], 'type': 'object'} <BLANKLINE> On instance: Series([], dtype: float64)
"""
schema, types, resolver, format_checker)
# type(np.nan) == builtins.float! FIXME, are numpy-numbers --> # json-types OK?? "number": (numbers.Number, np.number), "integer": (int, np.integer), "boolean": (bool, np.bool_), # , np.bool8), "array": (list, tuple, np.ndarray), "object": (dict, pd.DataFrame, pd.Series) })
# Setup Draft4/3 validation # # Meta-validate schema # with original validators (and not self) # because this class inherits an empty (schema/rules) validator. # Falls back to 'Draft4' if no `$schema` exists. 'items': PandelVisitor._rule_items, 'additionalProperties': PandelVisitor._rule_additionalProperties, 'additionalItems': PandelVisitor._rule_additionalItems, }) 'properties': PandelVisitor._rule_properties_draft3, }) else: 'properties': PandelVisitor._rule_properties_draft4, 'required': PandelVisitor._rule_required_draft4, })
# Cannot use ``validator_class.check_schema()`` because # need to relay my args to ``validator_class.__init__()``. # Even better use myself, that i'm fatser (kind of...). raise SchemaError.create_from(error)
################################## ############ Visiting ########### ##################################
return ((k, v.values) for k, v in instance.iteritems()) return instance.iteritems()
else:
# set details if not already set by the called fn validator=k, validator_value=v, instance=instance, schema=_schema, )
################################## ############# Rules ############## ##################################
self._get_iprop(instance, prop), subschema, path=prop, schema_path=prop, ):
self._get_iprop(instance, prop), subschema, path=prop, schema_path=prop, ): validator="required", validator_value=subschema["required"], instance=instance, schema=schema, )
else: item, subschema, path=index, schema_path=index, ): yield error
return
not patterns or not re.search(patterns, iprop):
"Additional properties are not allowed (%s %s unexpected)" % jsonschema._utils.extras_msg(extras))
not self.is_type(instance, "array") or self.is_type(schema.get("items", {}), "object") ): return
"Additional items are not allowed (%s %s unexpected)" % jsonschema._utils.extras_msg( instance[len(schema.get("items", [])):]) )
""" Builds, validates and stores a *pandas-model*, a mergeable stack of JSON-schema abiding trees of strings and numbers, assembled with
* sequences, * dictionaries, * :class:`pandas.DataFrame`, * :class:`pandas.Series`, and * URI-references to other model-trees.
.. _pandel-overview:
**Overview**
The **making of a model** involves, among others, schema-validating, reading :dfn:`subtree-branches` from URIs, cloning, converting and merging multiple :dfn:`sub-models` in a single :dfn:`unified-model` tree, without side-effecting given input. All these happen in 4+1 steps::
....................... Model Construction ................. ------------ : _______ ___________ : / top_model /==>|Resolve|->|PreValidate|-+ : -----------' : |___0___| |_____1_____| | : ------------ : _______ ___________ | _____ ________ ______ : -------- / base-model/==>|Resolve|->|PreValidate|-+->|Merge|->|Validate|->|Curate|==>/ model / -----------' : |___0___| |_____1_____| |_ 2__| |___3____| |__4+__|: -------' ............................................................
All steps are executed "lazily" using generators (with :keyword:`yield`). Before proceeding to the next step, the previous one must have completed successfully. That way, any ad-hoc code in building-step-5(*curation*), for instance, will not suffer a horrible death due to badly-formed data.
[TODO] The **storing of a model** simply involves distributing model parts into different files and/or formats, again without side-effecting the unified-model.
.. _pandel-building-model:
**Building model**
Here is a detailed description of each building-step:
1. :meth:`_resolve` and substitute any `json-references <http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03>`_ present in the submodels with content-fragments fetched from the referred URIs. The submodels are **cloned** first, to avoid side-effecting them.
Although by default a combination of *JSON* and *CSV* files is expected, this can be customized, either by the content in the json-ref, within the model (see below), or as :ref:`explained <pandel-customization>` below.
The **extended json-refs syntax** supported provides for passing arguments into :meth:`_read_branch()` and :meth:`_write_branch()` methods. The syntax is easier to explain by showing what the default :attr:`_global_cntxt` corresponds to, for a ``DataFrame``::
{ "$ref": "http://example.com/example.json#/foo/bar", "$inp": ["AUTO"], "$out": ["CSV", "encoding=UTF-8"] }
And here what is required to read and (later) store into a HDF5 local file with a predefined name::
{ "$ref": "file://./filename.hdf5", "$inp": ["AUTO"], "$out": ["HDF5"] }
.. Warning:: Step NOT IMPLEMENTED YET!
2. Loosely :meth:`_prevalidate` each sub-model separately with :term:`json-schema`, where any pandas-instances (DataFrames and Series) are left as is. It is the duty of the developer to ensure that the prevalidation-schema is *loose enough* that it allows for various submodel-forms, prior to merging, to pass.
3. Recursively **clone** and :meth:`_merge` sub-models in a single unified-model tree. Branches from sub-models higher in the stack override the respective ones from the sub-models below, recursively. Different object types need to be **converted** appropriately (ie. merging a ``dict`` with a ``DataFrame`` results into a ``DataFrame``, so the dictionary has to convert to dataframe).
The required **conversions** into pandas classes can be customized as :ref:`explained <pandel-customization>` below. Series and DataFrames cannot merge together, and Sequences do not merge with any other object-type (themselfs included), they just "overwrite".
The default convertor-functions defined both for submodels and models are listed in the following table:
============ ========== ========================================= From: To: Method: ============ ========== ========================================= dict DataFrame ``pd.DataFrame`` (the constructor) DataFrame dict ``lambda df: df.to_dict('list')`` dict Series ``pd.Series`` (the constructor) Series dict :meth:`lambda sr: sr.to_dict()` ============ ========== =========================================
4. Strictly json-:meth:`_validate` the unified-model (ie enforcing ``required`` schema-rules).
The required **conversions** from pandas classes can be customized as :ref:`explained <pandel-customization>` below.
The default convertor-functions are the same as above.
5. (Optionally) Apply the :meth:`_curate` functions on the the model to enforce dependencies and/or any ad-hoc generation-rules among the data. You can think of bash-like expansion patterns, like ``${/some/path:=$HOME}`` or expressions like ``%len(../other/path)``.
.. _pandel-storing:
**Storing model**
When storing model-parts, if unspecified, the filenames to write into will be deduced from the jsonpointer-path of the ``$out``'s parent, by substituting "strange" chars with undescores(``_``).
.. Warning:: Functionality NOT IMPLEMENTED YET!
.. _pandel-customization:
**Customization**
Some operations within steps (namely *conversion* and *IO*) can be customized by the following means (from lower to higher precedance):
a. The global-default :class:`ModelOperations` instance on the :attr:`_global_cntxt`, applied on both submodels and unified-model.
For example to channel the whole reading/writing of models through `HDF5 <http://pandas.pydata.org/pandas-docs/stable/io.html#io-hdf5>`_ data-format, it would suffice to modify the :attr:`_global_cntxt` like that::
pm = FooPandelModel() ## some concrete model-maker io_args = ["HDF5"] pm.mod_global_operations(inp=io_args, out=io_args)
b. [TODO] Extra-properties on the json-schema applied on both submodels and unified-model for the specific path defined. The supported properties are the non-functional properties of :class:`ModelOperations`.
d. Specific-properties regarding *IO* operations within each submodel - see the *resolve* building-step, above.
c. Context-maps of ``json_paths`` --> :class:`ModelOperations` instances, installed by :meth:`add_submodel()` and :attr:`unified_contexts` on the model-maker. They apply to self-or-descedant subtree of each model.
The `json_path` is a strings obeying a simplified :term:`json-pointer` syntax (no char-normalizations yet), ie ``/some/foo/1/pointer``. An empty-string(``''``) matches all model.
When multiple convertors match for a model-value, the selected convertor to be used is the most specific one (the one with longest prefix). For instance, on the model::
[ { "foo": { "bar": 0 } } ]
all of the following would match the ``0`` value:
- the global-default :attr:`_global_cntxt`, - ``/``, and - ``/0/foo``
but only the last's context-props will be applied.
.. _Attributes:
**Atributes**
.. Attribute:: model
The model-tree that will receive the merged submodels after :meth:`build()` has been invoked. Depending on the submodels, the top-value can be any of the supported model data-types.
.. Attribute:: _submodel_tuples
The stack of (``submodel``, ``path_ops``) tuples. The list's 1st element is the :dfn:`base-model`, the last one, the :dfn:`top-model`. Use the :meth:`add_submodel()` to build this list.
.. Attribute:: _global_cntxt
A :class:`ModelOperations` instance acting as the global-default context for the unified-model and all submodels. Use :meth:`mod_global_operations()` to modify it.
.. Attribute:: _curate_funcs
The sequence of *curate* functions to be executed as the final step by :meth:`_curate()`. They are "normal" functions (not generators) with signature::
def curate_func(model_maker): pass ## ie: modify ``model_maker.model``.
Better specify this list of functions on construction time.
.. Attribute:: _errored
An internal boolean flag that becomes ``True`` if any build-step has failed, to halt proceeding to the next one. It is ``None`` if build has not started yet.
.. _pandel-examples:
**Examples**
The basic usage requires to subclass your own model-maker, just so that a *json-schema* is provided for both validation-steps, 2 & 4:
>>> from collections import OrderedDict as od ## Json is better with stable keys-order
>>> class MyModel(Pandel): ... def _get_json_schema(self, is_prevalidation): ... return { ## Define the json-schema. ... '$schema': 'http://json-schema.org/draft-04/schema#', ... 'required': [] if is_prevalidation else ['a', 'b'], ## Prevalidation is more loose. ... 'properties': { ... 'a': {'type': 'string'}, ... 'b': {'type': 'number'}, ... 'c': {'type': 'number'}, ... } ... }
Then you can instanciate it and add your submodels:
>>> mm = MyModel() >>> mm.add_submodel(od(a='foo', b=1)) ## submodel-1 (base) >>> mm.add_submodel(pd.Series(od(a='bar', c=2))) ## submodel-2 (top-model)
You then have to build the final unified-model (any validation errors would be reported at this point):
>>> mdl = mm.build()
Note that you can also access the unified-model in the :attr:`model` attribute. You can now interogate it:
>>> mdl['a'] == 'bar' ## Value overridden by top-model True >>> mdl['b'] == 1 ## Value left intact from base-model True >>> mdl['c'] == 2 ## New value from top-model True
Lets try to build with invalid submodels:
>>> mm = MyModel() >>> mm.add_submodel({'a': 1}) ## According to the schema, this should have been a string, >>> mm.add_submodel({'b': 'string'}) ## and this one, a number.
>>> sorted(mm.build_iter(), key=lambda ex: ex.message) ## Fetch a list with all validation errors. # doctest: +NORMALIZE_WHITESPACE [<ValidationError: "'string' is not of type 'number'">, <ValidationError: "1 is not of type 'string'">, <ValidationError: 'Gave-up building model after step 1.prevalidate (out of 4).'>]
>>> mdl = mm.model >>> mdl is None ## No model constructed, failed before merging. True
And lets try to build with valid submodels but invalid merged-one:
>>> mm = MyModel() >>> mm.add_submodel({'a': 'a str'}) >>> mm.add_submodel({'c': 1})
>>> sorted(mm.build_iter(), key=lambda ex: ex.message) # doctest: +NORMALIZE_WHITESPACE [<ValidationError: "'b' is a required property">, <ValidationError: 'Gave-up building model after step 3.validate (out of 4).'>]
"""
"""
:param sequence curate_funcs: See :attr:`_curate_funcs`. """
"""
Since it is the fall-back operation for *conversions* and *IO* operation, it must exist and have all its props well-defined for the class to work correctly.
:param ModelOperations operations: Replaces values of the installed context with non-empty values from this one. :param cntxt_kwargs: Replaces the keyworded-values on the existing `operations`. See :class:`ModelOperations` for supported keywords. """ if operations: assert isinstance( operations, ModelOperations), (type(operations), operations) self._global_cntxt = operations self._global_cntxt._replace(**cntxt_kwargs)
def unified_contexts(self): """ A map of ``json_paths`` --> :class:`ModelOperations` instances acting on the unified-model. """ return self._unified_contexts
def unified_contexts(self, path_ops): assert isinstance(path_ops, Mapping), (type(path_ops), path_ops) self._unified_contexts = path_ops
""" Finds which context to use while visiting model-nodes, by enforcing the precedance-rules described in the :ref:`Customizations <pandel-customization>`.
:param str path: the branch's jsonpointer-path :param str branch: the actual branch's node :return: the selected :class:`ModelOperations` """ pass
""" Reads model-branches during *resolve* step. """ pass # TODO: impl read_branch()
""" Writes model-branches during *distribute* step. """ pass # TODO: impl write_branch()
""" :return: a json schema, more loose when `prevalidation` for each case :rtype: dictionary """ # TODO: Make it a factory o pass
properties = schema.get("properties", {}) patterns = "|".join(schema.get("patternProperties", {})) extras = set() for prop in instance: if prop not in properties: if patterns and re.search(patterns, prop): continue extras.add(prop)
if validator.is_type(aP, "object"): for extra in extras: for error in validator.descend(instance[extra], aP, path=extra): yield error elif not aP and extras: error = "Additional properties are not allowed (%s %s unexpected)" yield ValidationError(error % jsonschema._utils.extras_msg(extras))
if (validator.is_type(instance, "object") or validator.is_type(instance, "DataFrame") or validator.is_type(instance, "Series")): for prop in required: if prop not in instance: yield ValidationError("%r is a required property" % prop)
{"ndarray": np.ndarray, "DataFrame": pd.DataFrame, 'Series': pd.Series})
"""' Recursively merge b into a, cloning both. """
# a.update(b) # DOES NOT append extra keys!
a[key], b_val, '%s/%s' % (path, key)) else:
(isinstance(b, Sequence) and not isinstance(b, basestring)): else: val = a
self._clone_and_merge_submodels(item, None, '%s[%i]' % (path, i)))
raise ValidationError("Cannot merge Nones at path(%s)!" % path)
else:
"Step-1" if False: yield
"Step-1"
"Step-2" if False: yield # Just mark method as generator.
"Step-3"
"Step-4: Invokes any curate-functions found in :attr:`_curate_funcs`." if False: yield # To be overriden, just mark method as generator. curfunc(self)
""" Pushes on top a submodel, along with its context-map.
:param model: the model-tree (sequence, mapping, pandas-types) :param dict path_ops: A map of ``json_paths`` --> :class:`ModelOperations` instances acting on the unified-model. The `path_ops` may often be empty.
**Examples**
To change the default DataFrame --> dictionary convertor for a submodel, use the following:
>>> mdl = {'foo': 'bar'} >>> submdl = ModelOperations(mdl, conv={(pd.DataFrame, dict): lambda df: df.to_dict('record')})
"""
assert isinstance(path_ops, Mapping), (type(path_ops), path_ops)
""" Iteratively build model, yielding any problems as :class:`ValidationError` instances.
For debugging, the unified model at :attr:`model` my contain intermediate results at any time, even if construction has failed. Check the :attr:`_errored` flag if neccessary. """
(self._prevalidate, 'prevalidate'), (self._merge, 'merge'), (self._validate, 'validate'), (self._curate, 'curate'), ]
self._errored = True yield ex
self._errored = True
nex = ValidationError( 'Model step-%i(%s) failed due to: %s' % (i, step_name, ex)) nex.cause = ex
yield nex
""" Attempts to build the model by exhausting :meth:`build_iter()`, or raises its 1st error.
Use this method when you do not want to waste time getting the full list of errors. """
resolve_jsonpointer(self.model, path, **kws)
""" Generates the ``jsonpath`` parts according to jsonpointer spec.
:param str jsonpath: a jsonpath to resolve within document :return: The parts of the path as generator), without converting any step to int, and None if None.
:author: Julian Berman, ankostis
Examples::
>>> list(iter_jsonpointer_parts('/a/b')) ['a', 'b']
>>> list(iter_jsonpointer_parts('/a//b')) ['a', '', 'b']
>>> list(iter_jsonpointer_parts('/')) ['']
>>> list(iter_jsonpointer_parts('')) []
But paths are strings begining (NOT_MPL: but not ending) with slash('/')::
>>> list(iter_jsonpointer_parts(None)) Traceback (most recent call last): AttributeError: 'NoneType' object has no attribute 'split'
>>> list(iter_jsonpointer_parts('a')) Traceback (most recent call last): jsonschema.exceptions.RefResolutionError: Jsonpointer-path(a) must start with '/'!
#>>> list(iter_jsonpointer_parts('/a/')) #Traceback (most recent call last): #jsonschema.exceptions.RefResolutionError: Jsonpointer-path(a) must NOT ends with '/'!
"""
# if jsonpath.endswith('/'): # msg = "Jsonpointer-path({}) must NOT finish with '/'!" # raise RefResolutionError(msg.format(jsonpath))
""" Like :func:`iter_jsonpointer_parts()` but accepting also non-absolute paths.
The 1st step of absolute-paths is always ''.
Examples::
>>> list(iter_jsonpointer_parts_relaxed('a')) ['a'] >>> list(iter_jsonpointer_parts_relaxed('a/')) ['a', ''] >>> list(iter_jsonpointer_parts_relaxed('a/b')) ['a', 'b']
>>> list(iter_jsonpointer_parts_relaxed('/a')) ['', 'a'] >>> list(iter_jsonpointer_parts_relaxed('/a/')) ['', 'a', '']
>>> list(iter_jsonpointer_parts_relaxed('/')) ['', '']
>>> list(iter_jsonpointer_parts_relaxed('')) ['']
"""
""" Resolve a ``jsonpointer`` within the referenced ``doc``.
:param doc: the referrant document :param str jsonpointer: a jsonpointer to resolve within document :return: the resolved doc-item or raises :class:`RefResolutionError` :raises: RefResolutionError (if cannot resolve jsonpointer path)
Examples:
>>> dt = { ... 'pi':3.14, ... 'foo':'bar', ... 'df': pd.DataFrame(np.ones((3,2)), columns=list('VN')), ... 'sub': { ... 'sr': pd.Series({'abc':'def'}), ... } ... } >>> resolve_jsonpointer(dt, '/pi', default=_scream) 3.14
>>> resolve_jsonpointer(dt, '/pi/BAD') Traceback (most recent call last): jsonschema.exceptions.RefResolutionError: Unresolvable JSON pointer('/pi/BAD')@(BAD)
>>> resolve_jsonpointer(dt, '/pi/BAD', 'Hi!') 'Hi!'
:author: Julian Berman, ankostis """ # Array indexes should be turned into integers except ValueError: pass "Unresolvable JSON pointer(%r)@(%s)" % (jsonpointer, part)) else:
""" Resolve a ``jsonpointer`` within the referenced ``doc``.
:param doc: the referrant document :param str jsonpointer: a jsonpointer to the node to modify :raises: RefResolutionError (if jsonpointer empty, missing, invalid-contet) """
# Will scream if used on 1st iteration. # # Array indexes should be turned into integers # else: "Expected numeric index(%s) for sequence at (%r)[%i]" % (part, jsonpointer, i)) else: "Index(%s) out of bounds(%i) of (%r)[%i]" % (part, doclen, jsonpointer, i))
else:
# Build branch with value-leaf. #
# Attach new-branch. # Inserting last sequence-element raises IndexError("list assignment index # out of range")
# except (IndexError, TypeError) as ex: # if isinstance(ex, IndexError) or 'list indices must be integers' in str(ex): # raise RefResolutionError("Incompatible content of JSON pointer(%r)@(%s)" % (jsonpointer, part)) # else: # doc = {} # parent_doc[parent_part] = doc # doc[part] = value
# Totally quick an dirty, TODO: Use json-validator to build all json-paths.
for obj in objlist: _visit(obj, path, paths)
else:
"""Denotes non-existent json-schema attribute in :class:`JSchema`."""
""" Facilitates the construction of json-schema-v4 nodes on :class:`PStep` code.
It does just rudimentary args-name check. Further validations should apply using a proper json-schema validator.
:param type: if omitted, derived as 'object' if it has children :param kws: for all the rest see http://json-schema.org/latest/json-schema-validation.html
"""
return {k: v for k, v in vars(self).items() if v is not _NONE}
""" Json coders/decoders capable for (almost) all python objects, by pickling them.
Example::
>>> import json >>> obj_list = [ ... 3.14, ... { ... 'aa': pd.DataFrame([]), ... 2: np.array([]), ... 33: {'foo': 'bar'}, ... }, ... pd.DataFrame(np.random.randn(10, 2)), ... ('b', pd.Series({})), ... ] >>> for o in obj_list + [obj_list]: ... s = json.dumps(o, cls=JSONCodec.Encoder) ... oo = json.loads(s, cls=JSONCodec.Decoder) ... assert trees_equal(o, oo) ...
.. seealso:: For pickle-limitations: https://docs.python.org/3.4/library/pickle.html#pickle-picklable """
JSONCodec._ver_key: JSONCodec._ver}
#file_ver = o[JSONCodec._ver_key] # if file_ver != JSONCodec._ver: # msg = 'Unsopported json-encoded version(%s != %s)!' # raise ValueError(msg % (file_ver, JSONCodec._ver)) encoding='utf8'))
if __name__ == '__main__': # pragma: no cover raise NotImplementedError |