Esempio n. 1
0
def sync(input_file, output_file, **kw):
    """
    Synchronise and re-sample data-sets defined in INPUT_FILE and writes shifts
    and synchronized data into the OUTPUT_FILE.

    INPUT_FILE: Data-sets input file (format: .xlsx, .json).

    OUTPUT_FILE: output file (format: .xlsx, .json).
    """
    kw['x_label'] = sh.bypass(*kw['x_label'])
    kw['y_label'] = sh.bypass(*kw['y_label'])
    kw = {k: v for k, v in kw.items() if v}
    return _process(dict(input_fpath=input_file, output_fpath=output_file, **kw))
Esempio n. 2
0
def load_interpolation_methods(methods_fpath, interpolation_method='linear'):
    """
    Load interpolation methods for each variable of each data-set.

    :param methods_fpath:
        File path (`.json`) of interpolation methods.

        It is like `{"<set-name>": {"<var-name>": "<interp>", ...}, ...}`.
    :type methods_fpath: str

    :param interpolation_method:
        Default interpolation method.
    :type interpolation_method: str

    :return:
        Interpolation methods for each variable of each data-set.

        It is like `{"<set-name>": {"<var-name>": "<interp>", ...}, ...}`.
    :rtype: collections.defaultdict
    """
    import json
    from syncing.model.interp import METHODS
    from syncing.model import define_interpolation_methods
    with open(methods_fpath) as f:
        methods = define_interpolation_methods(interpolation_method)
        for k, v in sh.stack_nested_keys(json.load(f)):
            methods[k[0]][sh.bypass(*k[1:])] = METHODS[v]
        return methods
Esempio n. 3
0
def read_json(input_fpath, data_names=None):
    """
    Reads the json file.

    :param input_fpath:
        Input file path.
    :type input_fpath: str

    :param data_names:
        Data names to filter out the data sets to synchronize.
    :type data_names: list

    :return:
        Raw data-sets.
    :rtype: dict[str, dict[str, numpy.array]]
    """
    import json
    import numpy as np
    data = {}
    with open(input_fpath) as file:
        for k, v in sh.stack_nested_keys(json.load(file)):
            if not data_names or k[0] in data_names:
                sh.get_nested_dicts(data,
                                    k[0])[sh.bypass(*k[1:])] = np.array(v)
        return data
Esempio n. 4
0
def sync(input_file, output_file, **kw):
    """
    Synchronise and re-sample data-sets defined in INPUT_FILE and writes shifts
    and synchronized data into the OUTPUT_FILE.

    INPUT_FILE: Data-sets input file (format: .xlsx, .json).

    OUTPUT_FILE: output file (format: .xlsx, .json).

    DATA_NAMES: to filter out the data sets to synchronize.
    """
    kw['x_label'] = sh.bypass(*kw['x_label'])
    kw['y_label'] = sh.bypass(*kw['y_label'])
    kw = {k: v for k, v in kw.items() if v}
    kw['input_fpath'], kw['output_fpath'] = input_file, output_file
    return _process(kw)
Esempio n. 5
0
def read_json(input_fpath):
    """
    Reads the json file.

    :param input_fpath:
        Input file path.
    :type input_fpath: str

    :return:
        Raw data-sets.
    :rtype: dict[str, dict[str, numpy.array]]
    """
    import json
    import numpy as np
    data = {}
    with open(input_fpath) as file:
        for k, v in sh.stack_nested_keys(json.load(file)):
            sh.get_nested_dicts(data, k[0])[sh.bypass(*k[1:])] = np.array(v)
        return data
Esempio n. 6
0
 def wrapper(*args, **kwargs):
     try:
         args, kwargs = parse_ranges(*args, **kwargs)
         return func(*args, **kwargs)
     except RangeValueError:
         return sh.bypass(*((sh.NONE, ) * n_out))
Esempio n. 7
0
    def setUp(self):
        ss_dsp = sh.Dispatcher(name='ss_dsp')

        fun = lambda a: (a + 1, 5, a - 1)
        dom = lambda kw: True
        ss_dsp.add_function(function=fun,
                            inputs=['a'],
                            outputs=['b', 'd', 'c'],
                            input_domain=dom,
                            weight=1)

        sdspfunc = sh.SubDispatchFunction(ss_dsp, 'SubDispatchFunction', ['a'],
                                          ['b', 'c'])

        sdsppipe = sh.SubDispatchPipe(ss_dsp, 'SubDispatchPipe', ['a'],
                                      ['b', 'c'])

        sdsp = sh.SubDispatch(ss_dsp, ['b', 'c'], output_type='list')

        s_dsp = sh.Dispatcher(name='s_dsp')
        s_dsp.add_function(None, sdspfunc, ['a'], ['b', 'c'])
        s_dsp.add_function(None, sdsppipe, ['a'], ['g'])
        s_dsp.add_function('SubDispatch', sdsp, ['d'], ['e', 'f'])

        dsp = sh.Dispatcher(name='model')
        dsp.add_data('A', default_value=0)
        dsp.add_data('D', default_value={'a': 3})

        dsp.add_dispatcher(dsp=s_dsp,
                           inputs={
                               'A': 'a',
                               'D': 'd'
                           },
                           outputs={
                               'b': 'B',
                               'c': 'C',
                               'e': 'E',
                               'f': 'F',
                               'g': 'G'
                           },
                           inp_weight={'A': 3})
        self.dsp = dsp
        self.sol = sol = dsp.dispatch()
        sites = set()
        webmap = dsp.web(node_data=('+set_value', ), run=True, sites=sites)
        self.site = sites.pop()
        self.url = '%s/' % self.site.url
        rules = webmap.rules()

        self.io = io = []
        for rule in rules.values():
            n = rule.split('/')[1:]
            if not n:
                continue

            s, k = sol.get_node(*n, node_attr='sol')
            k = k[-1]
            try:
                v = s.workflow.node[k]
            except KeyError:
                continue
            if 'results' not in v:
                continue
            inputs = s._wf_pred[k]  # List of the function's arguments.
            inputs = sh.bypass(
                *[inputs[k]['value'] for k in s.nodes[k]['inputs']])
            io.append((rule, inputs, v['results']))

        self.sol1 = sol = dsp.dispatch({'A': 1})
        self.io1 = io = []
        for rule in rules.values():
            n = rule.split('/')[1:]
            if not n:
                continue

            s, k = sol.get_node(*n, node_attr='sol')
            k = k[-1]
            try:
                v = s.workflow.node[k]
            except KeyError:
                continue
            if 'results' not in v:
                continue
            inputs = s._wf_pred[k]  # List of the function's arguments.
            inputs = sh.bypass(
                *[inputs[k]['value'] for k in s.nodes[k]['inputs']])
            io.append((rule, inputs, v['results']))
Esempio n. 8
0
 def test_bypass(self):
     self.assertEqual(sh.bypass('a', 'b', 'c'), ('a', 'b', 'c'))
     self.assertEqual(sh.bypass('a'), 'a')
Esempio n. 9
0
 def test_bypass(self):
     self.assertEqual(sh.bypass('a', 'b', 'c'), ('a', 'b', 'c'))
     self.assertEqual(sh.bypass('a'), 'a')