예제 #1
0
def prob5():
    factors = {}
    for i in range(1,21):
        prime_factors = utils.prime_factorization(i)
        utils.dict_merge(factors, prime_factors)
    product = utils.prime_defactorization(factors)
    return product
예제 #2
0
    def _load(self):
        f_path_tmpl = ROOT_PATH + "config_template.yml"
        f_path_user = ROOT_PATH + "config.yml"

        # Create config.yml if it doesn't exist
        try:
            with open(f_path_user, 'x') as f_user:
                pass
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise

        # Parse files "config_template.yml" and "config.yml"
        with open(f_path_tmpl, "r") as f_tmpl, open(f_path_user,
                                                    "r") as f_user:
            # Load template config and override with user values
            conf = yaml.load(f_tmpl)
            dict_merge(conf, yaml.load(f_user) or {})

            conf_final = conf.copy()

            # Add path to filenames
            conf_final['db'][
                'uri'] = 'sqlite:///' + ROOT_PATH + conf_final['db']['name']

            return conf_final
예제 #3
0
def merge_root(root):

    flattened = flatten_root(root)
    result = {}
    for f in reversed(flattened):
        dict_merge(result, f)

    return result
예제 #4
0
def write_extra_vars(cluster):
    """ Creates the `tmp/extra_vars.yml` file.
        Extra extra vars are computed as a merge between extra_vars defined 
        in the cluster api (highest priority) and a set of default extra vars 
        (lowest priority). """

    # NB. only the settings used in the test guide should be defined as a default here;
    # value for defaults extra vars should be mirrored from 'hack/ansible/group_vars/all/main.yml'
    default_extra_vars = {
        'kubernetes': {
            'vip': {
                'fqdn': 'k8s.example.com',
                'ip': '10.10.10.3'
            },
            'cni': {
                'weavenet': {
                    'manifestUrl':
                    quoted(
                        "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
                    )
                },
                'flannel': {
                    'manifestUrl':
                    'https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml'
                },
                'calico': {
                    'manifestUrl':
                    'https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/kubeadm/1.7/calico.yaml'
                }
            }
        },
        'kubeadm': {
            'binary': '/usr/bin/kubeadm',
            'token': 'abcdef.0123456789abcdef'
        }
    }

    utils.dict_merge(default_extra_vars, cluster.extra_vars)
    cluster.extra_vars = default_extra_vars

    # writes the `tmp/extra_vars.yml` file
    if not os.path.exists(vagrant_utils.tmp_folder):
        os.makedirs(vagrant_utils.tmp_folder)

    extra_vars_file = os.path.join(vagrant_utils.tmp_folder, 'extra_vars.yml')

    yaml.add_representer(quoted, quoted_presenter)
    yaml.add_representer(unicode, unicode_presenter)
    with open(extra_vars_file, 'w') as outfile:
        yaml.dump(cluster.extra_vars, outfile, default_flow_style=False)
예제 #5
0
    def __init__(self, config):
        default_config = {
            'train_size': 10000,
            'validation_size': 10000,
            'test_size': 10000,
            'bias': 0.3,
            'debug': True,
            'n_jobs': 1,
            'log_to_stdout': False
        }

        merged_config = dict_merge(default_config, self.load_config())
        merged_config = dict_merge(merged_config, config)
        self.__dict__.update(**merged_config)
예제 #6
0
def generate_scenarios(historical_series, goal_series=None, other_series=None, layout={}):
    s = historical_series.interpolate()
    forecast = generate_forecast_series(s, 2035)
    data = [
        go.Scatter(
            x=s.index, y=s, connectgaps=True, name='Mitattu',
            line=dict(color='grey'),
        ),
        go.Scatter(
            x=list(forecast.index), y=list(forecast), name='Nykytrendi', mode='lines',
            line=dict(color='blue', dash='dash'), opacity=0.5
        )
    ]
    if goal_series is not None:
        cs = s.combine_first(goal_series)
        cs = cs.reindex(range(cs.index.min(), 2035+1))
        cs = cs.interpolate(method='pchip')
        cs = cs.loc[s.index.max():]
        data.append(go.Scatter(
            x=cs.index, y=cs, name='Goal', mode='lines',
            line=dict(color='green', dash='dash')
        ))
        forecast = cs

    if other_series is not None:
        os = other_series
        os = os.loc[(os.index >= s.index.min()) & (os.index <= forecast.index.max())]
        data.append(go.Scatter(
            x=os.index, y=os, yaxis='y2'
        ))

    d = {
        "xaxis": dict(title='Vuosi', fixedrange=True),
        "yaxis": dict(fixedrange=True)
    }
    if other_series is not None:
        d['yaxis2'] = dict(
            title='Y2',
            fixedrange=True,
            overlaying='y',
            side='right'
        )

    dict_merge(d, layout)
    fig = go.Figure(data=data, layout=d)

    combined = pd.concat([s, forecast], axis='index')
    combined = combined[~combined.index.duplicated(keep='first')]
    
    return fig, combined
예제 #7
0
    def load_config(self):
        """ Returns the final logan config, result of the merging of default and user config

            It always return an empty Dict object not None!

            Returns:
                Dict derived from the logan configuration file
        """

        # Try to retrieve config from cache
        config = self.get_config_from_cache() or {}

        if not config:

            default_config = self.load_default_config()
            user_config = self.load_user_config()

            # Overrides the default config with user one
            config = dict_merge(default_config, user_config) or {}

            # Save the config to the cache
            cached = self.add_to_cache(config)

            if not cached:
                print "Failed to cache config file"

        return config
예제 #8
0
 def build_exposures(name):
     for date in years:
         yield dict_merge({
             'name': name,
             'date': date
         }, {i_id: build_exp()
             for i_id in index_ids})
예제 #9
0
def lock_summary():
    try:
        utils.dump_app_settings(utils.dict_merge(utils.get_app_settings(), dict(summary_viewable=False)))
        return redirect('/index-builder/summary')
    except Exception as ex:
        logger.info(ex)
        return jsonify(dict(error=str(ex), traceback=str(traceback.format_exc())))
예제 #10
0
    def load_config(self):
        """ Returns the final logan config, result of the merging of default and user config

            It always return an empty Dict object not None!

            Returns:
                Dict derived from the logan configuration file
        """

        # Try to retrieve config from cache
        config = self.get_config_from_cache() or {}

        if not config:

            default_config = self.load_default_config()
            user_config    = self.load_user_config()

            # Overrides the default config with user one
            config = dict_merge(default_config, user_config) or {}

            # Save the config to the cache
            cached = self.add_to_cache(config)

            if not cached:
                print "Failed to cache config file"

        return config
예제 #11
0
def calculate_preset(key, opts = {}):
    m = get_base_presets().get(key)
    if m:
        extends = mklist(m.get('extends', []))
        extends_dicts = map(calculate_preset, extends) + [m, opts]
        return dict_merge({}, *extends_dicts)
    else:
        return {}
예제 #12
0
def write_extra_vars(cluster):
    """ Creates the `tmp/extra_vars.yml` file.
        Extra extra vars are computed as a merge between extra_vars defined 
        in the cluster api (highest priority) and a set of default extra vars 
        (lowest priority). """
    
    # NB. only the settings used in the test guide should be defined as a default here;
    # value for defaults extra vars should be mirrored from 'hack/ansible/group_vars/all/main.yml'
    default_extra_vars = {
        'kubernetes': {
                'vip': {
                    'fqdn': 'k8s.example.com',
                    'ip': '10.10.10.3'
                },
                'cni': {
                    'weavenet': {
                        'manifestUrl': quoted("https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')")
                    },
                    'flannel': {
                        'manifestUrl': 'https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml'
                    },
                    'calico': {
                        'manifestUrl': 'https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/kubeadm/1.7/calico.yaml'
                    }
                }
            },
            'kubeadm': {
                'binary': '/usr/bin/kubeadm',
                'token': 'abcdef.0123456789abcdef'
            }
    }

    utils.dict_merge(default_extra_vars, cluster.extra_vars)
    cluster.extra_vars=default_extra_vars

    # writes the `tmp/extra_vars.yml` file
    if not os.path.exists(vagrant_utils.tmp_folder):
        os.makedirs(vagrant_utils.tmp_folder)

    extra_vars_file = os.path.join(vagrant_utils.tmp_folder, 'extra_vars.yml')
    
    yaml.add_representer(quoted, quoted_presenter)
    yaml.add_representer(unicode, unicode_presenter)
    with open(extra_vars_file, 'w') as outfile:
        yaml.dump(cluster.extra_vars, outfile, default_flow_style=False)
예제 #13
0
 def get_factor_data(factor_id):
     return dict_merge(
         dict(sectors=sectors.get(factor_id, {}),
              scores=factor_scores.get(factor_id, {}),
              score_defs=factor_score_defs.get(factor_id, {}),
              ret_summary=load_ret_summary(factor_scores.get(factor_id,
                                                             {})),
              returns=load_returns(factor_scores.get(factor_id, {}))),
         load_top_bottom())
예제 #14
0
def anomaliesSummary(request, deviceID, anomalyID=None):
    data = defaultdict(list)

    data_pre = request.data
    data_pre['deviceID'] = deviceID

    serializer = EASSerializer(data=data_pre)
    serializer.is_valid(raise_exception=True)
    kwargs = serializer.data

    from_size = kwargs.pop('from_size')
    size = kwargs.pop('size')
    must_list = kwargs.pop('must_list')
    must_not_list = [{"terms": {"scores": [0]}}]
    esorm = AnomaliesScoresORM()
    datetime_list = get_datetime_list(esorm, deviceID, must_not_list, must_list, **kwargs)
    if anomalyID:
        must_list += [{"term": {"anomalyID.keyword": anomalyID}}]
    else:
        anomalyID = "_all"

    anomalies = get_anomaly(anomalyID)

    must_display = [{"terms": {"anomalyID.keyword": list(anomalies.keys())}}] if anomalyID == "_all" else []

    query = {
        "size": size,
        "_source": ["userId", "username", "timestamp", "scores", "summary", "anomalyID"],
        "from": from_size,
        "sort": [{"timestamp": {"order": "desc"}}, {"scores": {"order": "desc"}}],
        "query": {
            "bool": {
                "must": must_list + must_display,
                "filter": [{"terms": {"timestamp": datetime_list}}, {"term": {"deviceID.keyword": deviceID}}],
                "must_not": must_not_list
            }
        }
    }
    res = esorm.search(False, query=query)
    docs = [i['_source'] for i in res['hits']['hits']]

    if anomalyID == "_all":
        for i in anomalies:
            anomaly = anomalies[i]
            anomaly['summary'] = anomaly['forensics']['summary']
            del anomaly['forensics']
    else:
        anomaly = anomalies
        anomaly['summary'] = anomaly['forensics']['summary']
        del anomaly['forensics']

    for doc in docs:
        anomaly = anomalies[doc["anomalyID"]] if anomalyID == "_all" else anomalies
        data[anomalyID].append(dict_merge(anomaly, doc, True))

    data["hits"] = res["hits"]["total"]
    return Response(data)
예제 #15
0
    def __init__(self, **kwargs):
        if 'base_algorithm' in kwargs:
            algo = kwargs.pop('base_algorithm')
            self.base_algorithm = algo
        else:
            self.base_algorithm = None

        self.parameters = {}
        if kwargs:
            self.parameters = utils.dict_merge(self.parameters, kwargs)
예제 #16
0
def unlock_factor_settings():
    try:
        user = utils.get_str_arg(request, 'user')
        factor_settings = utils.get_factor_settings(user)
        factor_settings['locked'] = False
        utils.dump_factor_settings(user, factor_settings)
        return jsonify(utils.dict_merge(utils.get_user_counts(), dict(success=True)))
    except Exception as ex:
        logger.info(ex)
        return jsonify(dict(error=str(ex), traceback=str(traceback.format_exc())))
예제 #17
0
def load_user_results(factors, indices, factor_settings):
    results = dict(
        settings={
            k: dict_merge(dict(label=factors[k]['label']), v)
            for k, v in factor_settings.items()
        })
    sectors = list(
        load_weighted_values(factors,
                             indices['sectors'].set_index(['date', 'name']),
                             factor_settings))
    if len(sectors):
        sectors = pd.concat(sectors, axis=1).sum(axis=1)
        sectors.name = 'val'
        sectors = sectors.reset_index(level='date')
        results['sectors'] = {
            k: g.to_dict(orient='records')
            for k, g in sectors.groupby(level='name')
        }

    barra = list(
        load_weighted_values(factors,
                             indices['barra'].set_index(['date', 'name']),
                             factor_settings))
    if len(barra):
        barra = pd.concat(barra, axis=1).sum(axis=1)
        barra.name = 'val'
        barra = barra.reset_index(level='date')
        results['barra'] = {
            k: g.to_dict(orient='records')
            for k, g in barra.groupby(level='name')
        }

    returns = {}
    excess_returns = list(
        load_weighted_values(factors, indices['returns']['excess'],
                             factor_settings))
    if len(excess_returns):
        excess_returns = pd.concat(excess_returns, axis=1).sum(axis=1)
        excess_returns.name = 'val'
        excess_returns.index.name = 'date'
        returns['excess'] = excess_returns.reset_index().to_dict(
            orient='records')

    annualized_returns = list(
        load_weighted_values(factors, indices['returns']['annualized'],
                             factor_settings))
    if len(annualized_returns):
        annualized_returns = pd.concat(annualized_returns, axis=1).sum(axis=1)
        annualized_returns.name = 'val'
        annualized_returns.index.name = 'date'
        returns['annualized'] = annualized_returns.reset_index().to_dict(
            orient='records')

    results['returns'] = returns
    return results
예제 #18
0
 def set_params(self, **params):
     if 'base_algorithm' in params:
         algo = params.pop('base_algorithm')
         self.base_algorithm = algo
     if not self.parameters:
         self.parameters = {}
     if params:
         self.parameters = self.parameters = utils.dict_merge(self.parameters, params)
     if self.base_algorithm and self.parameters:
         self._update_algorithm()
     return self
예제 #19
0
    def calculate_freck_config(self, freck_configs, freck_meta, develop=False):
        """Merges the default vars from the 'default_freck_config' method with the (potentially overlayed) vars that come from the user config.

        User vars will have precedence over the default_vars. This method should not be overwritten by a Freck.

        Args:
            freck_configs (list): the user-provided config vars
            freck_meta (dict): freck specific meta information
            develop (bool): development-mode, outputs debug information for when developing frecks

        Returns:
            dict: the merged config vars
        """

        freck_vars = {}
        for config in freck_configs:
            dict_merge(freck_vars, config)

        freck_config = copy.deepcopy(FRECK_DEFAULT_CONFIG)
        dict_merge(freck_config, copy.deepcopy(self.default_freck_config()))
        dict_merge(freck_config, copy.deepcopy(freck_vars))

        if develop:
            click.echo("===============================================")
            click.echo("Calculated config after merging:")
            click.echo(pprint.pformat(freck_config))
            click.echo("-----------")

        return freck_config
예제 #20
0
def lock_factor_settings():
    try:
        user = session.get('username')
        total_weight = sum(map(lambda x: x.get('weight', 0), session.get('factor_settings', {}).get('factors', {}).values()))
        if total_weight < 100:
            flash("Your total weights are less than 100!")
            return redirect(request.referrer)
        session['factor_settings'] = utils.dict_merge(session['factor_settings'], dict(locked=True))
        utils.dump_factor_settings(user, session['factor_settings'])
        return redirect(request.referrer)
    except Exception as ex:
        logger.info(ex)
        return jsonify(dict(error=str(ex), traceback=str(traceback.format_exc())))
예제 #21
0
def flatten_root(root, add_leaf_dicts=False):

    root_copy = copy.deepcopy(root)
    result = []
    for item in root_copy:

        if LEAF_DICT in item.keys():
            leaf_dict = item.pop(LEAF_DICT)
        else:
            leaf_dict = {}

        result_dict = {}
        for var, value_dicts in item.iteritems():
            result_dict[var] = {}
            for value_dict in value_dicts:
                dict_merge(result_dict[var], value_dict)

        if add_leaf_dicts:
            result_dict[LEAF_DICT] = leaf_dict
        result.append(result_dict)

    return result
예제 #22
0
    def _load(self):
        f_path_tmpl = ROOT_PATH + "config_template.yml"
        f_path_user = ROOT_PATH + "config.yml"

        # Create config.yml if it doesn't exist
        try:
            with open(f_path_user, 'x') as f_user:
                pass
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise

        # Parse files "config_template.yml" and "config.yml"
        with open(f_path_tmpl, "r") as f_tmpl, open(f_path_user, "r") as f_user:
            # Load template config and override with user values
            conf = yaml.load(f_tmpl)
            dict_merge(conf, yaml.load(f_user) or {})

            conf_final = conf.copy()

            # Add path to filenames
            conf_final['db']['uri'] = 'sqlite:///' + ROOT_PATH + conf_final['db']['name']

            return conf_final
예제 #23
0
파일: main.py 프로젝트: ahuff44/go-bot
    def get(self, url, params={}, headers={}):
        # 1) Prepare parameters
        if params:
            url = "{}?{}".format(url, urlencode(params))
        headers = utils.dict_merge(
            self.basic_headers(),
            headers,
        )

        self.log("GET:\n\turl={}\n\theaders={}".format(repr(url), repr(headers)))

        # 2) Send request
        response = requests.get(url, headers=headers)

        # 3) Read response
        return utils.Either.from_response(response)
예제 #24
0
    def get_metadata(self, filename):
        "Extracts the metadata from the given local file (might be symlink, or non canonical)"
        #absolute local path (local is within container)
        localpath = os.path.abspath(filename)
        #locapath to real path (skipping links)
        finalpath = self._get_final_path(localpath)
        #the complete realpath on the host (non resolved in case of symlink)
        realpath = self._to_realpath(localpath)

        self.logger.debug("localpath: %s, finalpath: %s, realpath: %s",
                          localpath, finalpath, realpath)

        meta = utils.dict_merge({}, self.default,
                                self.__extract_from_filename(realpath))
        #we should read the real file, which might be something completely different as a target of a symlink
        #basically we get the metadata from the link and the data from the target
        with Dataset(finalpath, 'r') as f:
            meta['global'] = {}
            for g_att in f.ncattrs():
                meta['global'][str(g_att)] = getattr(f, g_att)
            meta['variables'] = {}
            for var in f.variables:
                meta['variables'][var] = self.__extract_variable(
                    f.variables[var])
            meta['dimensions'] = {}
            for dim in f.dimensions:
                meta['dimensions'][dim] = self.__extract_dimension(
                    f.dimensions[dim])

        #the id will be removed when publishing and used as such
        meta[NetCDFFileHandler.EXTRA]['_id'] = self.__get_id(meta)
        if self.json_dump_dir is not None:
            meta_json = json.dumps(meta, indent=2, cls=SetEncoder)
            json_file = self._get_json_dump_location(realpath)
            try:
                self.logger.debug("Dumping json file to: %s", json_file)
                with open(json_file, 'w') as f:
                    f.write(meta_json)
            except Exception as e:
                #we try to write in localpath and report the error in realpath... that is sadly intentional
                #as the localpath is the internal representation of the realpath, which is the only thing the user
                #will ever see.
                self.logger.error('Could not write file %s: %s', json_file, e)
        return meta
예제 #25
0
    def _load_service_config(self, app):
        # Get the service from k8s to attach the domain correctly
        svc = self._scheduler._get_service(app, app).json()
        # Get minimum structure going if it is missing on the service
        if 'metadata' not in svc or 'annotations' not in svc['metadata']:
            default = {'metadata': {'annotations': {}}}
            svc = dict_merge(svc, default)

        # Check if any config has been set
        if 'deis.io/routerConfig' not in svc['metadata']['annotations']:
            config = {}
        else:
            config = json.loads(svc['metadata']['annotations']['deis.io/routerConfig'])

        # See if domains are available
        if 'domains' not in config:
            config['domains'] = []

        return svc, config
예제 #26
0
    def compile_vals(self, results):
        """
        Compile a single `vals` data value or structure given the list
        of `results` from running `export_records` and/or
        `delete_records` multiple times. Return the resulting `vals`
        value or structure, or None if there is no result.

        The method as implemented here assumes `results` is a list of
        dictionaries and attempts to merge them in a way that makes
        sense. Arrays are combined, and nested dicts are recursively
        merged.

        Override this to provide custom behavior for merging specific
        return values or data structures.
        """
        vals = {}
        for item in results:
            if isinstance(item, dict):
                vals = dict_merge(vals, item)
        return vals or None
예제 #27
0
def find_summary_data():
    try:
        factors = get_factors()
        archive = utils.get_str_arg(request, 'archive')
        current_user = session.get('username')
        is_admin = current_user == 'admin'
        summary = {}
        for factor_id, factor in factors.items():
            summary[factor_id] = dict(
                label=factor['label'],
                selections=dict(HI=[], LO=[]),
                avg=dict(HI=0, LO=0),
                ethical_wt=dict(HI=0, LO=0),
                reason_avg=dict(HI={}, LO={}),
            )

        users = list(utils.get_all_user_factors(archive=archive))
        for user, factor_settings in users:
            for factor_id, inputs in factor_settings.items():
                summary[factor_id]['selections'][inputs['strength']].append(utils.dict_merge(inputs, dict(user=user)))
        total_users = len(users)
        for factor in summary.values():
            for strength, selections in factor['selections'].items():
                factor['avg'][strength] = (sum(map(itemgetter('weight'), selections)) * 1.0) / total_users
                reason_avg = defaultdict(int)
                total_reason_users = len(selections)
                for s in selections:
                    for r_id in s['reasons']:
                        reason_avg[r_id] += 1
                factor['reason_avg'][strength] = {
                    r_id: ((total * 1.0) / total_reason_users) * 100
                    for r_id, total in reason_avg.items()
                }

        return jsonify(dict(
            data=summary,
            archives=utils.find_available_archives() if is_admin else []
        ))
    except Exception as ex:
        logger.info(ex)
        return jsonify(dict(error=str(ex), traceback=str(traceback.format_exc())))
예제 #28
0
    def get_metadata(self, filename):
        "Extracts the metadata from the given local file (might be symlink, or non canonical)"
        #absolute local path (local is within container)
        localpath = os.path.abspath(filename)
        #locapath to real path (skipping links)
        finalpath = self._get_final_path(localpath)
        #the complete realpath on the host (non resolved in case of symlink)
        realpath = self._to_realpath(localpath)

        self.logger.debug("localpath: %s, finalpath: %s, realpath: %s", localpath, finalpath, realpath)
        
        meta = utils.dict_merge({}, self.default, self.__extract_from_filename(realpath))
        #we should read the real file, which might be something completely different as a target of a symlink
        #basically we get the metadata from the link and the data from the target
        with Dataset(finalpath, 'r') as f:
            meta['global'] = {}
            for g_att in f.ncattrs():
                meta['global'][str(g_att)] = getattr(f, g_att)
            meta['variables'] = {}
            for var in f.variables:
                meta['variables'][var] = self.__extract_variable(f.variables[var])
            meta['dimensions'] = {}
            for dim in f.dimensions:
                meta['dimensions'][dim] = self.__extract_dimension(f.dimensions[dim])

        #the id will be removed when publishing and used as such
        meta[NetCDFFileHandler.EXTRA]['_id'] = self.__get_id(meta)
        if self.json_dump_dir is not None:
            meta_json = json.dumps(meta, indent=2, cls=SetEncoder)
            json_file = self._get_json_dump_location(realpath)
            try:
                self.logger.debug("Dumping json file to: %s", json_file)
                with open( json_file, 'w') as f:
                    f.write(meta_json)
            except Exception as e:
                #we try to write in localpath and report the error in realpath... that is sadly intentional
                #as the localpath is the internal representation of the realpath, which is the only thing the user
                #will ever see.
                self.logger.error('Could not write file %s: %s', json_file, e)
        return meta
예제 #29
0
def collapse_vals(vals):
    new_vals = {}
    for v in vals:
        new_vals = dict_merge(new_vals, v)
    return new_vals
예제 #30
0
    def process_leafs(self):

        frecks = []
        for freck_nr, leaf in enumerate(self.leafs):
            if FRECK_META_KEY not in leaf.keys():
                continue
            freck_name = leaf[FRECK_META_KEY][FRECK_NAME_KEY]

            runner, processed = self.freck_plugins[freck_name].process_leaf(
                copy.deepcopy(leaf), self.supported_runners, self.debug_freck)

            if not processed:
                log.debug("No frecks created for freck_name '{}'.".format(
                    freck_name))
                continue

            if self.debug_freck:
                click.echo("Processed leaf '{}'.".format(freck_name))
                click.echo("---")
                click.echo("Input:")
                click.echo(pprint.pformat(leaf))
                click.echo("---")
                click.echo("Result:")
                click.echo(pprint.pformat(processed))
                click.echo("===============================================")

            if isinstance(processed, dict):
                processed = [processed]

            # apply result on top of original configuration
            temp = []
            for p in processed:
                t = copy.deepcopy(leaf[FRECK_META_KEY])
                dict_merge(t, p)
                temp.append(t)

            processed = temp

            new_run = False
            for prep in processed:

                prep[FRECK_RUNNER_KEY] = runner
                prep[FRECK_INDEX_KEY] = freck_nr
                prep.setdefault(FRECK_ITEM_NAME_KEY,
                                "{}".format(prep[FRECK_NAME_KEY]))
                prep.setdefault(FRECK_NEW_RUN_AFTER_THIS_KEY, False)
                if FRECK_PRIORITY_KEY not in prep.keys():
                    prep[FRECK_PRIORITY_KEY] = FRECK_DEFAULT_PRIORITY + (
                        freck_nr * 1000)
                check_schema(prep, FRECKLES_POST_PREPROCESS_SCHEMA)

                if prep[FRECK_NEW_RUN_AFTER_THIS_KEY]:
                    new_run = True

            frecks.extend(processed)

            if new_run:
                frecks = self.sort_frecks(frecks)
                run_frecks = []

                for f in frecks:
                    run_frecks.append(f)
                    if f[FRECK_NEW_RUN_AFTER_THIS_KEY]:
                        yield self.sort_frecks(run_frecks)
                        run_frecks = []

                frecks = run_frecks

        yield self.sort_frecks(frecks)
예제 #31
0
def collapse_vals(vals):
    new_vals = {}
    for v in vals:
        if isinstance(v, dict):
            new_vals = dict_merge(new_vals, v)
    return new_vals
예제 #32
0
def load_factors(path):
    logger.info('caching factors...')
    factor_ids = range(1, 14)
    build_factor = lambda i: {
        'id': 'factor_{}'.format(i),
        'label': 'Factor {}'.format(i),
        'description': 'Description of "Factor {}"'.format(i),
        'index_name': 'index_{}'.format(i),
        'rating': random.randint(1, 2400) / 100.0
    }
    factors = pd.DataFrame(map(build_factor, factor_ids))
    factors = factors.to_dict(orient='records')
    factors = {r['id']: r for r in factors}

    sectors = {
        'factor_{}'.format(i): dict_merge(
            {
                str(sector): pct * 10000
                for sector, pct in zip(range(10, 61, 5), build_percentages(11))
            }, dict(Total=10000))
        for i in factor_ids
    }

    possible_cols = [3, 5, 6]
    possible_scores = [20, 25, 40, 50, 60, 75, 80]

    def build_scores():
        cols = possible_cols[random.randint(0, 2)]
        if cols == 3:
            return {
                str(score): pct
                for score, pct in zip([0, 50, 100], build_percentages(3))
            }
        random.shuffle(possible_scores)
        return {
            str(score): pct
            for score, pct in zip([0, 100] +
                                  possible_scores[2:cols -
                                                  1], build_percentages(cols))
        }

    factor_scores = {'factor_{}'.format(i): build_scores() for i in factor_ids}
    factor_score_defs = {
        factor_id: {score: 'Rating of {}'.format(score)
                    for score in scores}
        for factor_id, scores in factor_scores.items()
    }

    securities = map(lambda i: 'Company {}'.format(i), range(1, 11))

    def load_top_bottom():
        random.shuffle(securities)
        return dict(
            top={rank: sec
                 for rank, sec in enumerate(securities[:5])},
            bottom={rank: sec
                    for rank, sec in enumerate(securities[5:])})

    def load_ret_summary(factor_scores):
        build_mean = lambda: random.randint(-100, 100) / 100.0
        build_std = lambda: random.randint(-500, 500) / 10000.0
        build_ir = lambda: random.randint(-2500, 2500) / 10000.0
        return [{
            'name': score,
            'sa_Mean': build_mean(),
            'total_Mean': build_mean(),
            'sa_STD': build_std(),
            'total_STD': build_std(),
            'sa_IR': build_ir(),
            'total_IR': build_ir(),
        } for score in sorted(map(int, factor_scores.keys()))]

    def load_returns(factor_scores):
        build_ret = lambda: random.randint(-1000, 1000) / 1000.0
        ret_data = {}
        suffix = 'totret_mtd_usd_mean_cumulative'
        ret_data[suffix] = {
            score: [{
                'date': pd.Timestamp('20091201'),
                'val': 0
            }]
            for score in factor_scores
        }
        suffix = 'totret_mtd_usd_sect_adj_mean_cumulative'
        ret_data[suffix] = {
            score: [{
                'date': pd.Timestamp('20091201'),
                'val': 0
            }]
            for score in factor_scores
        }

        curr_cum_rets = {
            suffix: {score: 0
                     for score in factor_scores}
            for suffix in ret_data
        }

        for p in pd.period_range('20100101', '20131231', freq='M'):
            date = pd.Timestamp(p.start_time).date()
            for score in factor_scores:
                for suffix in ret_data:
                    curr_cum_rets[suffix][score] += build_ret()
                    ret_data[suffix][score].append({
                        'date':
                        date,
                        'val':
                        curr_cum_rets[suffix][score]
                    })
        return ret_data

    def get_factor_data(factor_id):
        return dict_merge(
            dict(sectors=sectors.get(factor_id, {}),
                 scores=factor_scores.get(factor_id, {}),
                 score_defs=factor_score_defs.get(factor_id, {}),
                 ret_summary=load_ret_summary(factor_scores.get(factor_id,
                                                                {})),
                 returns=load_returns(factor_scores.get(factor_id, {}))),
            load_top_bottom())

    factors = {
        k: dict_merge(v, get_factor_data(k))
        for k, v in factors.items()
    }
    logger.info('cached {} factors'.format(len(factors)))

    return factors
예제 #33
0
def get_anomaly_forensics(deviceID=None,
                          ID=None,
                          temp=None,
                          xrs=None,
                          user=None,
                          pageSize=None,
                          timestamp=None):
    try:
        data = {}
        es_orm = AnomaliesScoresORM()
        start = simple_datetime(timestamp, str, True)
        doc_id = get_doc_id(start, deviceID, ID, user)
        res = es_orm.get_obj_or_404(doc_id=doc_id)

        if res.get("scores", 1) != 0:
            if temp is None:
                if xrs == 'eas':
                    temp = get_anomaly(ID)
                elif xrs == 'ers':
                    temp = get_ers_models(deviceID)['params'][ID]
                else:
                    raise Exception

            data[ID] = temp
            data[ID]['scores'] = res.get('scores', -1)
            from_ = (pageSize - 1) * 5

            if res['details'].get('logs'):
                size = pageSize * 5
                log_size = len(res["details"]["logs"])
                ids = res['details']['logs'][from_:size]
                res['details']['logs'] = get_logs_with_ids(ids)
                res['details']['size'] = log_size
            else:
                index = res['details'].pop('index', None)
                index_list = res['details'].pop('index_list', None)
                query = res['details'].pop('query', {})
                index = index_list or index

            if index and query != {}:
                size = 5
                res['details']['logs'], res['details'][
                    'size'] = get_logs_with_query(index, query, from_, size)

                if 'display' in temp and xrs == 'eas' and 'agg_query' in temp[
                        'forensics']['graphs']['template'][
                            0]:  # if anomalyid has no graphs,exception,短路
                    aggs_querys = {}
                    for graph in temp['forensics']['graphs']['template']:
                        aggs_querys.update(graph['agg_query'])

                    _query = json.loads(query)
                    _query['aggs'] = aggs_querys

                    graphs_values = ElasticsearchORM(index).search(
                        query=_query)['aggregations']
                    remove_buckets(graphs_values)
                    res['graphs'] = graphs_values

                if ID in [
                        "23787c99-4b94-4514-a38e-f753b8f47e57",
                        "c91dd8fa-af7f-11e9-a5a5-144f8a006a90"
                ]:
                    for i in res['details']['logs']:
                        if "geoip" in i:
                            if i['geoip']['country_name'] in [
                                    "Taiwan", "Hong Kong", "Macao"
                            ]:
                                i['geoip']['country_name'] = "China " + i[
                                    'geoip']['country_name']

            dct = data[ID]['forensics']
            data[ID]['forensics'] = dict_merge(dct, res)

        # added by wendong, compatible with version 3.3
        if config.UCSS_VERSION == 3.3:
            for k, v in data.items():
                graphs = v["forensics"]["graphs"]
            _graphs = copy.deepcopy(graphs)
            for i in _graphs["template"]:
                if i["type"] == 1:
                    graphs["template"].remove(i)
                    continue
                elif i["type"] == 2:
                    graphs["histCnt"] = get_histCnt(graphs["histCnt"],
                                                    timestamp)
                elif i["type"] == 3:
                    graphs["timeseries"] = [
                        item["key_as_string"] for item in graphs["timeseries"]
                    ]

        return data

    except APIDataNotFound:
        logger.debug(
            "{}ScoresORM 404_id:{} start:{} deviceID ID:{} userId".format(
                xrs.upper(), doc_id, timestamp, ID))
        return {}

    except:
        logger.exception("{} {} {} {} {}\n".format(timestamp, deviceID, ID,
                                                   user, pageSize))
        return {}
예제 #34
0
#!/usr/bin/python
#coding: utf8
#Author: chenyunyun<*****@*****.**>

from utils import dict_merge, list_to_dict

RESPONSE_CALLBACK = dict_merge(
    list_to_dict(
        'set',
        lambda r: bool(int(r[0]))
    ),
    list_to_dict(
        'get',
        lambda r: r[0]
    ),
)
class Response(object):
    def __init__(self, command_name, code, body=None):
        self.command_name = command_name
        self.code = code
        self.body = body

    @property
    def ok(self):
        return self.code == 'code'

    @property
    def not_found(self):
        return self.code == 'not_found'

    @property
예제 #35
0
    def frklize_config(self,
                       root,
                       configs,
                       meta_dict_parent,
                       root_base_dict,
                       level,
                       add_level=False):

        for c in configs:

            meta_dict = copy.deepcopy(meta_dict_parent)

            try:
                config_template = get_config(c, self.verify_ssl)
                try:
                    temp_flattened = merge_root(root)
                    # make sure at least empty dicts exist for all possible keys, otherwise template rendering might fail when
                    # trying something like {{ vars.key1 | default("DefaultValue") }}
                    for key in self.all_keys:
                        if key not in temp_flattened.keys():
                            temp_flattened[key] = {}
                            temp_flattened["env"] = os.environ
                    rtemplate = Environment(
                        loader=BaseLoader()).from_string(config_template)
                    config_string = rtemplate.render(**temp_flattened)
                    c = yaml.load(config_string)
                except Exception, e:
                    raise FrecklesRunError(
                        "Error parsing/rendering config file: {}".format(e),
                        None)

            except:
                # means this is not a 'url' config
                pass

            # if none of the known keys are used in the config,
            # we assume it's a 'default_key'-dict
            base_dict = c

            if isinstance(base_dict, basestring):
                base_dict = {
                    self.default_leaf_key: {
                        self.default_leaf_default_key: base_dict
                    }
                }

            if not any(x in base_dict.keys() for x in self.all_keys):
                if len(base_dict.keys()) != 1:
                    raise Exception(
                        "If not using the full config format, leaf nodes are only allowed to have one key: {}"
                        .format(base_dict))

                key = base_dict.keys()[0]
                if not isinstance(base_dict[key], dict):
                    base_dict[key] = {DEFAULT_FRKL_KEY_MARKER: base_dict[key]}

                if any(x in base_dict[key].keys() for x in self.all_keys):
                    temp_base_dict = base_dict[key]
                    dict_merge(
                        temp_base_dict, {
                            self.default_leaf_key: {
                                self.default_leaf_default_key: key
                            }
                        })
                    base_dict = temp_base_dict
                else:
                    temp_base_dict = {
                        self.default_leaf_key: {
                            self.default_leaf_default_key: key
                        }
                    }
                    dict_merge(
                        temp_base_dict,
                        {self.default_leaf_value_dict_key: base_dict[key]})
                    base_dict = temp_base_dict

            stem = base_dict.pop(self.stem_key, NO_STEM_INDICATOR)

            # we want to take along all the 'base' non-stem variables
            if level == 0:
                dict_merge(root_base_dict, base_dict)

            temp = {}
            dict_merge(temp, root_base_dict)
            dict_merge(temp, base_dict)
            base_dict = temp

            for key in base_dict.keys():
                if key not in self.all_keys:
                    raise Exception("Key '{}' not allowed (in {})".format(
                        key, base_dict))

                if add_level:
                    base_dict[key][FRKL_META_LEVEL_KEY] = level

                meta_dict.setdefault(key, []).append(base_dict[key])

            if not stem:
                continue
            elif stem == NO_STEM_INDICATOR:
                leaf = copy.deepcopy(meta_dict)
                leaf[LEAF_DICT] = base_dict
                root.append(leaf)
            elif isinstance(
                    stem, (list, tuple)) and not isinstance(stem, basestring):
                self.frklize_config(root, stem, meta_dict, {}, level + 1)
            else:
                raise Exception("Value of {} must be list (is: '{}')".format(
                    self.stem_key, type(stem)))
예제 #36
0
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import threading
import io
import os
import caffe
import tempfile

import utils
from network import Network

# Load configuration file
config = {"server": {"host": "localhost", "port": 8000, "mode": "cpu"}}
config_file = sys.argv[1]
with open(config_file) as f:
    utils.dict_merge(config, yaml.load(f))

logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s - %(levelname)s - %(message)s')

if config["server"]["mode"] == "gpu":
    caffe.set_mode_gpu()

# Load networks
nets = {}
for name, ncfg in config["networks"].items():
    logging.info("Loading network %s" % name)
    nets[name] = Network(ncfg)


class Handler(BaseHTTPRequestHandler):