def variable(variable_name_or_alias):
    variable_definition = state.variables_definitions.definition_by_variable_name.get(variable_name_or_alias)
    if variable_definition is None:
        alias_variables_definitions = list(filter(
            lambda definition: definition.get('alias') == variable_name_or_alias,
            state.variables_definitions.definition_by_variable_name.values(),
            ))
        if alias_variables_definitions:
            return redirect(url_for('variable', variable_name_or_alias=alias_variables_definitions[0]['name']))
        else:
            raise NotFound('The variable {!r} is not defined.'.format(variable_name_or_alias))
    variable_dependencies = state.dependencies_by_formula_name.get(variable_definition['name'])
    if variable_dependencies is not None:
        variable_dependencies = sorted(variable_dependencies)
    variable_reverse_dependencies = sorted(valfilter(
        lambda val: variable_definition['name'] in val,
        state.dependencies_by_formula_name,
        ).keys()) or None
    return jsonify(valfilter(
        lambda val: val is not None,
        {
            'variable_definition': variable_definition,
            'variable_dependencies': variable_dependencies,
            'variable_reverse_dependencies': variable_reverse_dependencies,
            },
        ))
Пример #2
0
def flatten_args(args):
    ''' flatten arguments into a hash table of key-value pairs
    args itself is a hash obtained by util.arguments(). Some item value
    is a dictionary or DD, other itms are not. We need to merge dicts, and add
    non-dict items as a single dict of arguments. So that we can call make_key
    '''
    dicts = toolz.valfilter(lambda v: isinstance(v, dict), args)
    d = toolz.merge(dicts.values())
    items = toolz.valfilter(lambda v: not isinstance(v, dict), args)
    d.update(items)
    return d
Пример #3
0
def test_curried_namespace():
    exceptions = import_module('toolz.curried.exceptions')
    namespace = {}

    def should_curry(func):
        if not callable(func) or isinstance(func, toolz.curry):
            return False
        nargs = toolz.functoolz.num_required_args(func)
        if nargs is None or nargs > 1:
            return True
        return nargs == 1 and toolz.functoolz.has_keywords(func)

    def curry_namespace(ns):
        return {
            name: toolz.curry(f) if should_curry(f) else f
            for name, f in ns.items() if '__' not in name
        }

    from_toolz = curry_namespace(vars(toolz))
    from_exceptions = curry_namespace(vars(exceptions))
    namespace.update(toolz.merge(from_toolz, from_exceptions))

    namespace = toolz.valfilter(callable, namespace)
    curried_namespace = toolz.valfilter(callable, toolz.curried.__dict__)

    if namespace != curried_namespace:
        missing = set(namespace) - set(curried_namespace)
        if missing:
            raise AssertionError(
                'There are missing functions in toolz.curried:\n    %s' %
                '    \n'.join(sorted(missing)))
        extra = set(curried_namespace) - set(namespace)
        if extra:
            raise AssertionError(
                'There are extra functions in toolz.curried:\n    %s' %
                '    \n'.join(sorted(extra)))
        unequal = toolz.merge_with(list, namespace, curried_namespace)
        unequal = toolz.valfilter(lambda x: x[0] != x[1], unequal)
        messages = []
        for name, (orig_func, auto_func) in sorted(unequal.items()):
            if name in from_exceptions:
                messages.append(
                    '%s should come from toolz.curried.exceptions' % name)
            elif should_curry(getattr(toolz, name)):
                messages.append('%s should be curried from toolz' % name)
            else:
                messages.append(
                    '%s should come from toolz and NOT be curried' % name)
        raise AssertionError('\n'.join(messages))
Пример #4
0
def clear_cache(scope=None):
    """
    Clear all cached data.

    Parameters
    ----------
    scope : {None, 'step', 'iteration', 'forever'}, optional
        Clear cached values with a given scope.
        By default all cached values are removed.

    """
    if not scope:
        _TABLE_CACHE.clear()
        _COLUMN_CACHE.clear()
        _INJECTABLE_CACHE.clear()
        for m in _MEMOIZED.values():
            m.value.clear_cached()
        logger.debug('simulation cache cleared')
    else:
        for d in (_TABLE_CACHE, _COLUMN_CACHE, _INJECTABLE_CACHE):
            items = toolz.valfilter(lambda x: x.scope == scope, d)
            for k in items:
                del d[k]
        for m in toolz.filter(lambda x: x.scope == scope, _MEMOIZED.values()):
            m.value.clear_cached()
        logger.debug('cleared cached values with scope {!r}'.format(scope))
Пример #5
0
def assemble_ast(tag:str, idsclasses: Mapping[str, str], attrs: Mapping[str, str], body: list):
  """
  Small helper function for the template_2_ast function that assembles the appropriate ast element
  given the tag name, a dictionary of ids/classes from the tag name, further attrs, and a list of children or the body.
  For most components, there won't be any children.
  
  :param tag:
  :param idsclasses:
  :param attrs:
  :param body:
  :return:
  """
  iscomponent = re.match(r'^[A-Z]', tag)
  attrs['id'] = (attrs.get('id', '') + ' ' + idsclasses.get('id', '')).strip()
  attrs['class'] = (attrs.get('class', '') + ' ' + idsclasses.get('class', '')).strip()
  # remove the empty attributes to avoid clutter and save bytes.
  attrs = dict(t.valfilter(lambda x: not (isinstance(x, str) and x.strip() == ''), attrs))
  # special handling for the "style" attribute, since that can be a dictionary
  attrs = t.valmap(lambda val:' '.join('{}: {};'.format(k,v) for k,v in val.items())
                   if isinstance(val, dict) else val,
                   attrs)
  
  if iscomponent:
    return {'name': tag, 'props': attrs, 'children': body}
  else:
    return {'tag': tag, 'attrs': attrs, 'body': body}
Пример #6
0
 def _sniff_dialect(self, path):
     kwargs = self._kwargs
     dialect = sniff_dialect(path, self._sniff_nbytes,
                             encoding=self.encoding)
     kwargs = merge(dialect, keymap(alias, kwargs))
     return valfilter(lambda x: x is not None,
                      dict((d, kwargs[d])
                           for d in dialect_terms if d in kwargs))
Пример #7
0
def test_curried_namespace():
    exceptions = import_module('toolz.curried.exceptions')
    namespace = {}

    def should_curry(func):
        if not callable(func) or isinstance(func, toolz.curry):
            return False
        nargs = toolz.functoolz.num_required_args(func)
        if nargs is None or nargs > 1:
            return True
        return nargs == 1 and toolz.functoolz.has_keywords(func)


    def curry_namespace(ns):
        return dict(
            (name, toolz.curry(f) if should_curry(f) else f)
            for name, f in ns.items() if '__' not in name
        )

    from_toolz = curry_namespace(vars(toolz))
    from_exceptions = curry_namespace(vars(exceptions))
    namespace.update(toolz.merge(from_toolz, from_exceptions))

    namespace = toolz.valfilter(callable, namespace)
    curried_namespace = toolz.valfilter(callable, toolz.curried.__dict__)

    if namespace != curried_namespace:
        missing = set(namespace) - set(curried_namespace)
        if missing:
            raise AssertionError('There are missing functions in toolz.curried:\n    %s'
                                 % '    \n'.join(sorted(missing)))
        extra = set(curried_namespace) - set(namespace)
        if extra:
            raise AssertionError('There are extra functions in toolz.curried:\n    %s'
                                 % '    \n'.join(sorted(extra)))
        unequal = toolz.merge_with(list, namespace, curried_namespace)
        unequal = toolz.valfilter(lambda x: x[0] != x[1], unequal)
        messages = []
        for name, (orig_func, auto_func) in sorted(unequal.items()):
            if name in from_exceptions:
                messages.append('%s should come from toolz.curried.exceptions' % name)
            elif should_curry(getattr(toolz, name)):
                messages.append('%s should be curried from toolz' % name)
            else:
                messages.append('%s should come from toolz and NOT be curried' % name)
        raise AssertionError('\n'.join(messages))
Пример #8
0
 def issues(repo: 'repo',
            labels: t.Optional[str] = None,
            state: t.Optional[str] = None):
     return snug.Request(f'repos/{repo.owner}/{repo.name}/issues',
                         params=valfilter(notnone, {
                             'labels': labels,
                             'state': state,
                         }))
Пример #9
0
def _read_csv(path, schema, **kwargs):
    dtypes = dict(schema.to_pandas())

    dates = list(toolz.valfilter(lambda s: s == 'datetime64[ns]', dtypes))
    dtypes = toolz.dissoc(dtypes, *dates)

    return pd.read_csv(
        str(path), dtype=dtypes, parse_dates=dates, encoding='utf-8', **kwargs
    )
Пример #10
0
def required(cls: Union[Type[Schema], Type[Field]],
             name: Optional[str] = None,
             **kwargs) -> Field:
    if issubclass(cls, Schema):
        cls = partial(Nested, cls)
    return cls(**merge(
        kwargs, valfilter(not_none, {
            'load_from': name,
            'dump_to': name
        })))
Пример #11
0
def solve_mapping(rules: dict, mapped: dict) -> dict:
    columns_to_fields: dict = defaultdict(list)
    num_keys = len(list(rules.keys()))
    while len(list(columns_to_fields.keys())) != num_keys:
        only_one = valfilter(lambda x: len(x) == 1, mapped)
        # pylint: disable=cell-var-from-loop
        for k, v in only_one.items():
            columns_to_fields[v[0]].append(k)
            remove_v_from_list = partial(lfilter, lambda x: x != v[0])
            mapped = valmap(remove_v_from_list, mapped)
        # pylint: enable=cell-var-from-loop
    return columns_to_fields
Пример #12
0
Файл: csv.py Проект: gwulfs/odo
 def __init__(self, path, has_header=None, encoding='utf-8',
              sniff_nbytes=10000, **kwargs):
     self.path = path
     if has_header is None:
         self.has_header = (not os.path.exists(path) or
                            infer_header(path, sniff_nbytes))
     else:
         self.has_header = has_header
     self.encoding = encoding if encoding is not None else 'utf-8'
     kwargs = merge(sniff_dialect(path, sniff_nbytes, encoding=encoding),
                    keymap(alias, kwargs))
     self.dialect = valfilter(bool,
                              dict((d, kwargs[d])
                                   for d in dialect_terms if d in kwargs))
Пример #13
0
def issues(filter: t.Optional[str] = None,
           state: t.Optional[Issue.State] = None,
           labels: t.Optional[str] = None,
           sort: t.Optional[Issue.Sort] = None,
           since: t.Optional[datetime] = None):
    """a selection of assigned issues"""
    return snug.Request('issues',
                        params=valfilter(
                            notnone, {
                                'filter': filter,
                                'state': state,
                                'labels': labels,
                                'sort': sort,
                                'since': since,
                            }))
Пример #14
0
 def duplicates(self):
     hasDup = lambda val: len(val) > 1
     dups = toolz.valfilter(hasDup, self.media)
     return mediaStruct(dups)
Пример #15
0
def run_algorithm(start,
                  end,
                  initialize,
                  capital_base,
                  handle_data=None,
                  before_trading_start=None,
                  analyze=None,
                  data_frequency='daily',
                  data=None,
                  bundle=None,
                  bundle_timestamp=None,
                  default_extension=True,
                  extensions=(),
                  strict_extensions=True,
                  environ=os.environ):
    """Run a trading algorithm.

    Parameters
    ----------
    start : datetime
        The start date of the backtest.
    end : datetime
        The end date of the backtest..
    initialize : callable[context -> None]
        The initialize function to use for the algorithm. This is called once
        at the very begining of the backtest and should be used to set up
        any state needed by the algorithm.
    capital_base : float
        The starting capital for the backtest.
    handle_data : callable[(context, BarData) -> None], optional
        The handle_data function to use for the algorithm. This is called
        every minute when ``data_frequency == 'minute'`` or every day
        when ``data_frequency == 'daily'``.
    before_trading_start : callable[(context, BarData) -> None], optional
        The before_trading_start function for the algorithm. This is called
        once before each trading day (after initialize on the first day).
    analyze : callable[(context, pd.DataFrame) -> None], optional
        The analyze function to use for the algorithm. This function is called
        once at the end of the backtest and is passed the context and the
        performance data.
    data_frequency : {'daily', 'minute'}, optional
        The data frequency to run the algorithm at.
    data : pd.DataFrame, pd.Panel, or DataPortal, optional
        The ohlcv data to run the backtest with.
        This argument is mutually exclusive with:
        ``bundle``
        ``bundle_timestamp``
    bundle : str, optional
        The name of the data bundle to use to load the data to run the backtest
        with. This defaults to 'quantopian-quandl'.
        This argument is mutually exclusive with ``data``.
    bundle_timestamp : datetime, optional
        The datetime to lookup the bundle data for. This defaults to the
        current time.
        This argument is mutually exclusive with ``data``.
    default_extension : bool, optional
        Should the default zipline extension be loaded. This is found at
        ``$ZIPLINE_ROOT/extension.py``
    extensions : iterable[str], optional
        The names of any other extensions to load. Each element may either be
        a dotted module path like ``a.b.c`` or a path to a python file ending
        in ``.py`` like ``a/b/c.py``.
    strict_extensions : bool, optional
        Should the run fail if any extensions fail to load. If this is false,
        a warning will be raised instead.
    environ : mapping[str -> str], optional
        The os environment to use. Many extensions use this to get parameters.
        This defaults to ``os.environ``.

    Returns
    -------
    perf : pd.DataFrame
        The daily performance of the algorithm.

    See Also
    --------
    zipline.data.bundles.bundles : The available data bundles.
    """
    load_extensions(default_extension, extensions, strict_extensions, environ)

    non_none_data = valfilter(bool, {
        'data': data,
        'bundle': bundle,
    })
    if not non_none_data:
        # if neither data nor bundle are passed use 'quantopian-quandl'
        bundle = 'quantopian-quandl'

    if len(non_none_data) != 1:
        raise ValueError(
            'must specify one of `data`, `data_portal`, or `bundle`,'
            ' got: %r' % non_none_data,
        )

    if 'bundle' not in non_none_data and bundle_timestamp is not None:
        raise ValueError(
            'cannot specify `bundle_timestamp` without passing `bundle`',
        )

    return _run(
        handle_data=handle_data,
        initialize=initialize,
        before_trading_start=before_trading_start,
        analyze=analyze,
        algofile=None,
        algotext=None,
        defines=(),
        data_frequency=data_frequency,
        capital_base=capital_base,
        data=data,
        bundle=bundle,
        bundle_timestamp=bundle_timestamp,
        start=start,
        end=end,
        output=os.devnull,
        print_algo=False,
        local_namespace=False,
        environ=environ,
    )
Пример #16
0
revtypes.update({
    sa.types.DATETIME: 'datetime',
    sa.types.TIMESTAMP: 'datetime',
    sa.types.FLOAT: 'float64',
    sa.types.DATE: 'date',
    sa.types.BIGINT: 'int64',
    sa.types.INTEGER: 'int',
    sa.types.NUMERIC: 'float64',  # TODO: extend datashape to decimal
    sa.types.BIGINT: 'int64',
    sa.types.NullType: 'string',
    sa.types.Float: 'float64',
})

# interval types are special cased in discover_typeengine so remove them from
# revtypes
revtypes = valfilter(lambda x: not isinstance(x, sa.types.Interval), revtypes)


units_of_power = {
    0: 's',
    3: 'ms',
    6: 'us',
    9: 'ns'
}

# these aren't loaded by sqlalchemy by default
sa.dialects.registry.load('oracle')
sa.dialects.registry.load('postgresql')


def batch(sel, chunksize=10000):
def build_variable(variable_name_or_alias, saisie_variable_value_by_name):
    from .application import app  # Import here to avoid cyclic modules import.

    variable_definition = state.variables_definitions.definition_by_variable_name.get(variable_name_or_alias)
    if variable_definition is None:
        alias_variables_definitions = list(filter(
            lambda definition: definition.get('alias') == variable_name_or_alias,
            state.variables_definitions.definition_by_variable_name.values(),
            ))
        if alias_variables_definitions:
            raise RequestRedirect(url_for(
                'variable',
                variable_name_or_alias=alias_variables_definitions[0]['name'],
                **request.args
                ))
    if variable_definition is None:
        if variable_name_or_alias not in state.constants:
            return None
        variable_definition = dict(name=variable_name_or_alias)

    variable_dependencies = sorted(state.dependencies_by_formula_name.get(variable_name_or_alias) or [])
    variable_reverse_dependencies = sorted(valfilter(
        lambda val: variable_name_or_alias in val,
        state.dependencies_by_formula_name,
        ).keys())

    variable = OrderedDict()
    variable.update(variable_definition)
    if variable_dependencies:
        variable['dependencies'] = variable_dependencies
    if variable_reverse_dependencies:
        variable['reverse_dependencies'] = variable_reverse_dependencies

    if variable_definition.get('type') == 'variable_calculee' and (
            'formula_linecol' in variable_definition or 'pour_formula_linecol' in variable_definition
            ):
        ((startline, _), (endline, _)) = variable_definition.get('formula_linecol') or \
            variable_definition['pour_formula_linecol']
        source_file_path = os.path.join(app.config['M_SOURCE_FILES_DIR_PATH'], variable_definition['source_file_name'])
        with open(source_file_path) as source_file:
            formula_source = ''.join(source_file.readlines()[startline - 1:endline]).strip()
        formula_source_html = formula_source
        sorted_variable_dependencies = sorted(variable_dependencies, key = lambda value: -len(value))
        for dependency_name in sorted_variable_dependencies:
            dependency_description = state.variables_definitions.get_description(dependency_name)
            if dependency_description is not None:
                formula_source_html = formula_source_html.replace(
                    dependency_name,
                    u'<abbr title="{}">{}</abbr>'.format(dependency_description, dependency_name),
                    )
        variable['formula_source_html'] = formula_source_html
        evaluated_formula_source_html = formula_source
        for dependency_name in sorted_variable_dependencies:
            dependency_value = saisie_variable_value_by_name.get(dependency_name, 0)
            evaluated_formula_source_html = evaluated_formula_source_html.replace(
                dependency_name,
                str(dependency_value).rjust(len(dependency_name)),
                )
        variable['evaluated_formula_source_html'] = evaluated_formula_source_html
        variable['source_file_git_url'] = '{}{}#L{}-{}'.format(
            app.config['SOURCE_FILE_GIT_BASE_URL'],
            variable_definition['source_file_name'],
            startline,
            endline,
            )

    return variable