Beispiel #1
0
    def resolve(msg):

        request = msg.get_request()
        results = msg.get_results()
        slots = []

        for k, v in results.items():
            for m in v['adm']:
                slot = {
                    'did': k,
                    'adm': m,
                }
                slots.append(slot)

        slots.sort(key=lambda x: x['adm']['price'], reverse=True)
        count = request['adunit']['param']['count']
        slots = slots[:count]

        floor = request['adunit']['floor']
        slots = list(filter(lambda x: x['adm']['price'] >= floor, slots))

        players = set(results.keys())
        winners = set([i['did'] for i in slots])
        losers = players - winners

        result = {
            'adm': [dicttoolz.keyfilter(lambda x: x not in ['price'], i['adm']) for i in slots],
            'is_test': 0,
        }

        msg.set_decision(winners, losers)
        msg.set_response(result)
    def _map_event_to_dict(_include, sql_event):
        """Map event to a dictionary to be sent as an API response.

        In this implementation, the goal is to restructure event data as if it
        was returned by elasticsearch. This restructuration is needed because
        the API in the past used elasticsearch as the backend and the client
        implementation still expects data that has the same shape as
        elasticsearch would return.

        :param _include:
            Projection used to get records from database
        :type _include: list(str)
        :param sql_event: Event data returned when SQL query was executed
        :type sql_event: :class:`sqlalchemy.util._collections.result`
        :returns: Event as would have returned by elasticsearch
        :rtype: dict(str)

        """
        event = {
            attr: getattr(sql_event, attr)
            for attr in sql_event.keys()
        }
        event['@timestamp'] = event['timestamp']
        del event['reported_timestamp']

        event['message'] = {
            'text': event['message']
        }

        if 'node_instance_id' in event:
            del event['node_instance_id']

        context_fields = [
            'deployment_id',
            'execution_id',
            'workflow_id',
            'operation',
            'node_id',
            'node_name',
        ]
        event['context'] = {
            field: event[field]
            for field in context_fields
        }
        for field in context_fields:
            del event[field]

        if event['type'] == 'cloudify_event':
            event['message']['arguments'] = None
            del event['logger']
            del event['level']
        elif event['type'] == 'cloudify_log':
            del event['event_type']

        # Keep only keys passed in the _include request argument
        # TBD: Do the projection at the database level
        if _include is not None:
            event = dicttoolz.keyfilter(lambda key: key in _include, event)

        return event
Beispiel #3
0
 def is_satisfied(node: Node) -> bool:
     f, args, _ = node
     required = keyfilter(lambda x: x != 'return', args) #rettype = args['return']
     get_ret = lambda x: x[1]['return'] # type: Callable[[Node],type]
     acc_rets = map(get_ret, acc)
     acc_rets = list(acc_rets)
     satisfied = all([(t in acc_rets) for t in required.values()])
     return satisfied
Beispiel #4
0
def add_to_cloud_metrics(ttl, region, group_metrics, num_tenants, config,
                         log=None, _print=False):
    """
    Add total number of desired, actual and pending servers of a region
    to Cloud metrics.

    :param str region: which region's metric is collected
    :param group_metrics: List of :obj:`GroupMetric`
    :param int num_tenants: total number of tenants
    :param dict config: Config json dict containing convergence tenants info
    :param log: Optional logger
    :param bool _print: Should it print activity on stdout? Useful when running
        as a script

    :return: `Effect` with None
    """
    epoch = yield Effect(Func(time.time))
    metric_part = {'collectionTime': int(epoch * 1000),
                   'ttlInSeconds': ttl}

    tenanted_metrics, total = calc_total(group_metrics)
    if log is not None:
        log.msg(
            'total desired: {td}, total_actual: {ta}, total pending: {tp}',
            td=total.desired, ta=total.actual, tp=total.pending)
    if _print:
        print('total desired: {}, total actual: {}, total pending: {}'.format(
            total.desired, total.actual, total.pending))

    metrics = [('desired', total.desired), ('actual', total.actual),
               ('pending', total.pending), ('tenants', num_tenants),
               ('groups', len(group_metrics))]
    for tenant_id, metric in sorted(tenanted_metrics.items()):
        metrics.append(("{}.desired".format(tenant_id), metric.desired))
        metrics.append(("{}.actual".format(tenant_id), metric.actual))
        metrics.append(("{}.pending".format(tenant_id), metric.pending))

    # convergence tenants desired and actual
    conv_tenants = keyfilter(
        partial(tenant_is_enabled,
                get_config_value=lambda k: get_in([k], config)),
        tenanted_metrics)
    conv_desired = sum(m.desired for m in conv_tenants.itervalues())
    conv_actual = sum(m.actual for m in conv_tenants.itervalues())
    metrics.extend(
        [("conv_desired", conv_desired), ("conv_actual", conv_actual),
         ("conv_divergence", conv_desired - conv_actual)])

    data = [merge(metric_part,
                  {'metricValue': value,
                   'metricName': '{}.{}'.format(region, metric)})
            for metric, value in metrics]
    yield service_request(ServiceType.CLOUD_METRICS_INGEST,
                          'POST', 'ingest', data=data, log=log)
Beispiel #5
0
def get_todays_scaling_groups(convergence_tids, fname):
    """
    Get scaling groups that from tenants that are enabled till today
    """
    groups = yield Effect(GetAllGroups())
    non_conv_tenants = set(groups.keys()) - set(convergence_tids)
    last_tenants_len, last_date = yield get_last_info(fname)
    now = yield Effect(Func(datetime.utcnow))
    tenants, last_tenants_len, last_date = get_todays_tenants(
        non_conv_tenants, now, last_tenants_len, last_date)
    yield update_last_info(fname, last_tenants_len, last_date)
    yield do_return(
        keyfilter(lambda t: t in set(tenants + convergence_tids), groups))
Beispiel #6
0
 def test_success(self):
     """
     Returns todays scaling groups based on number of tenants fetched
     since last time. Updates the current fetch in file
     """
     seq = [
         (GetAllGroups(), const(self.groups)),
         (ReadFileLines("file"), const(["2", "0.0"])),
         (Func(datetime.utcnow), const(datetime(1970, 1, 2))),
         (WriteFileLines("file", [7, 86400.0]), noop)
     ]
     r = perform_sequence(seq, get_todays_scaling_groups(["t1"], "file"))
     self.assertEqual(
         r,
         keyfilter(lambda k: k in ["t{}".format(i) for i in range(1, 9)],
                   self.groups))
Beispiel #7
0
 def ruleHitsToSeverityCountMap(self, rule_hit_map):
     """
     In a rule -> hitlist mapping, count the total number of hits above or at a given severity
     level and return a cumulative dictionary
     """
     # Create a map severity -> count
     severity_counts = collections.defaultdict(int)
     for rule, hits in rule_hit_map.items():
         severity_counts[rule.severity] += len(hits)
     # Create string severity -> count map
     above_severity = lambda sev: sum(keyfilter(lambda k: k >= sev, severity_counts).values())
     return {"hits": above_severity(Severity.standard),
             "warnings": above_severity(Severity.warning),
             "errors": above_severity(Severity.dangerous),
             "infos": above_severity(Severity.info),
             "notices": above_severity(Severity.notice)}
Beispiel #8
0
 def test_error_writing(self):
     """
     Logs and ignores error writing to the file
     """
     seq = [
         (GetAllGroups(), const(self.groups)),
         (ReadFileLines("file"), const(["2", "0.0"])),
         (Func(datetime.utcnow), const(datetime(1970, 1, 2))),
         (WriteFileLines("file", [7, 86400.0]),
          lambda i: raise_(IOError("bad"))),
         (LogErr(mock.ANY, "error updating number of tenants", {}), noop)
     ]
     r = perform_sequence(seq, get_todays_scaling_groups(["t1"], "file"))
     self.assertEqual(
         r,
         keyfilter(lambda k: k in ["t{}".format(i) for i in range(1, 9)],
                   self.groups))
Beispiel #9
0
 def test_no_last_info(self):
     """
     Returns first 5 non-convergence tenants if could not fetch last info
     from file
     """
     seq = [
         (GetAllGroups(), const(self.groups)),
         (ReadFileLines("file"), lambda i: raise_(IOError("e"))),
         (LogErr(mock.ANY, "error reading previous number of tenants", {}),
          noop),
         (Func(datetime.utcnow), const(datetime(1970, 1, 2))),
         (WriteFileLines("file", [5, 86400.0]), noop)
     ]
     r = perform_sequence(seq, get_todays_scaling_groups(["t1"], "file"))
     self.assertEqual(
         r,
         keyfilter(lambda k: k in ["t{}".format(i) for i in range(1, 7)],
                   self.groups))
Beispiel #10
0
 def __call__(self, event_dict):
     """
     Process event and push it to Cloud feeds
     """
     if not event_dict.get("cloud_feed", False):
         return
     # Do further logging without cloud_feed to avoid coming back here
     # in infinite recursion
     log_keys = keyfilter(lambda k: k not in ("message", "cloud_feed"), event_dict)
     log = self.log.bind(system="otter.cloud_feed", cf_msg=event_dict["message"][0], event_data=log_keys)
     try:
         eff = self.add_event(event_dict, self.tenant_id, self.region, log)
     except UnsuitableMessage as me:
         log.err(None, "cf-unsuitable-message", unsuitable_message=me.unsuitable_message)
     else:
         return perform(self.get_disp(self.reactor, self.authenticator, log, self.service_configs), eff).addErrback(
             log.err, "cf-add-failure"
         )
Beispiel #11
0
def split_execute_convergence(event, max_length=event_max_length):
    """
    Try to split execute-convergence event out into multiple events if there
    are too many CLB nodes, too many servers, or too many steps.

    The problem is mainly the servers, since they take up the most space.

    Experimentally determined that probably logs cut off at around 75k,
    characters - we're going to limit it to 50k.

    :param dict event: The 'execute-convergence' type event dictionary to split
    :param int max_length: The maximum length of the entire JSON-formatted
        dictionary.

    :return: `list` of `tuple` of (`dict`, `str`).  The `dict`s in the tuple
        represents the spit up event dicts, and the `str` the format string
        for each.  If the event does not need to be split, the list will only
        have one tuple.
    """
    message = "Executing convergence"
    if _json_len(event) <= max_length:
        return [(event, message)]

    events = [(event, message)]
    large_things = sorted(('servers', 'lb_nodes'),
                          key=compose(_json_len, event.get),
                          reverse=True)

    # simplified event which serves as a base for the split out events
    base_event = keyfilter(
        lambda k: k not in ('desired', 'servers', 'lb_nodes', 'steps'),
        event)

    for thing in large_things:
        split_up_events = split(
            assoc(base_event, thing), event[thing], max_length,
            _json_len)
        events.extend([(e, message) for e in split_up_events])
        del event[thing]
        if _json_len(event) <= max_length:
            break

    return events
Beispiel #12
0
    def call_element_func(elval, cdepth, base_coords):
        context_kwds = dtz.keyfilter(lambda k: k in XLocation._fields,
                                     lasso._asdict())
        context_kwds['base_coords'] = base_coords
        context = XLocation(**context_kwds)
        try:
            proced, res_lasso = element_func(ranger, lasso, context, elval,
                                             *args, **kwds)
        except Exception as ex:
            msg_args = (elval, context, ex)
            raise ValueError("Value(%r) at %s: \n    %s" % msg_args)

        if proced:
            if not isinstance(res_lasso, Lasso):
                res_lasso = lasso._replace(values=res_lasso)

            for call_spec in sub_call_specs:
                res_lasso = ranger.make_call(res_lasso, *call_spec)
            elval = res_lasso and res_lasso.values

        return proced, elval
    def _map_event_to_dict(_include, sql_event):
        """Map event to a dictionary to be sent as an API response.

        In this implementation, the goal is to return a flat structure as
        opposed to the nested one that was returned by Elasticsearch in the
        past (see v1 implementation for more information).

        :param _include:
            Projection used to get records from database
        :type _include: list(str)
        :param sql_event: Event data returned when SQL query was executed
        :type sql_event: :class:`sqlalchemy.util._collections.result`
        :returns: Event as would have returned by elasticsearch
        :rtype: dict(str)

        """
        event = {
            attr: getattr(sql_event, attr)
            for attr in sql_event.keys()
        }

        for unused_field in Events.UNUSED_FIELDS:
            if unused_field in event:
                del event[unused_field]

        if event['type'] == 'cloudify_event':
            del event['logger']
            del event['level']
        elif event['type'] == 'cloudify_log':
            del event['event_type']

        # Keep only keys passed in the _include request argument
        # TBD: Do the projection at the database level
        if _include is not None:
            event = dicttoolz.keyfilter(lambda key: key in _include, event)

        return event
Beispiel #14
0
    def _yield_configs_and_defaults(self, config, search_terms, merged: bool,
                                    ciphered: bool):
        verbose = self.verbose
        get_classes = (self._classes_inc_parents
                       if verbose else self._classes_with_config_traits)
        all_classes = list(get_classes(self.all_app_configurables()))

        ## Merging needs to visit all hierarchy.
        own_traits = not (verbose or merged)

        search_map = prepare_search_map(all_classes, own_traits)

        if ciphered:
            from . import crypto

            def ciphered_filter(mapval):
                _, trait = mapval
                if isinstance(trait, crypto.Cipher):
                    return mapval

            search_map = dtz.valfilter(ciphered_filter, search_map)

        if search_terms:
            matcher = prepare_matcher(search_terms, self.regex)
            search_map = dtz.keyfilter(matcher, search_map)

        items = search_map.items()
        if self.sort:
            items = sorted(items)  # Sort by class-name (traits always sorted).

        classes_configured = {}
        for key, (cls, trait) in items:
            if self.list:
                yield key
                continue
            if not trait:
                ## Not --verbose and class not owning traits.
                continue

            clsname, trtname = key.split('.')

            ## Print own traits only, even when "merge" visits all.
            #
            sup = super(cls, cls)
            if not verbose and getattr(sup, trtname, None) is trait:
                continue

            ## Instanciate classes once, to merge values.
            #
            obj = classes_configured.get(cls)
            if obj is None:
                try:
                    ## Exceptional rule for Project-zygote.
                    #  TODO: delete when project rule is gone.
                    #
                    if cls.__name__ == 'Project':
                        cls.new_instance('test', None, config)
                    else:
                        obj = cls(config=config)
                except Exception as ex:
                    self.log.warning(
                        "Falied initializing class '%s' due to: %r", clsname,
                        ex)

                    ## Assign config-values as dummy-object's attributes.
                    #  Note: no merging of values now!
                    #
                    class C:
                        pass

                    obj = C()
                    obj.__dict__ = dict(config[clsname])
                classes_configured[cls] = obj

                ## Print 1 class-line for all its traits.
                #
                base_classes = ', '.join(p.__name__ for p in cls.__bases__)
                yield '%s(%s)' % (clsname, base_classes)

            if merged:
                try:
                    val = getattr(obj, trtname, '??')
                except trt.TraitError as ex:
                    self.log.warning("Cannot merge '%s' due to: %r", trtname,
                                     ex)
                    val = "<invalid due to: %s>" % ex
            else:
                val = repr(trait.default())
            yield '  +--%s = %s' % (trtname, val)
Beispiel #15
0
def test_keyfilter():
    assert keyfilter(iseven, {1: 2, 2: 3}) == {2: 3}
Beispiel #16
0
def add_to_cloud_metrics(ttl,
                         region,
                         group_metrics,
                         num_tenants,
                         config,
                         log=None,
                         _print=False):
    """
    Add total number of desired, actual and pending servers of a region
    to Cloud metrics.

    :param str region: which region's metric is collected
    :param group_metrics: List of :obj:`GroupMetric`
    :param int num_tenants: total number of tenants
    :param dict config: Config json dict containing convergence tenants info
    :param log: Optional logger
    :param bool _print: Should it print activity on stdout? Useful when running
        as a script

    :return: `Effect` with None
    """
    epoch = yield Effect(Func(time.time))
    metric_part = {'collectionTime': int(epoch * 1000), 'ttlInSeconds': ttl}

    tenanted_metrics, total = calc_total(group_metrics)
    if log is not None:
        log.msg('total desired: {td}, total_actual: {ta}, total pending: {tp}',
                td=total.desired,
                ta=total.actual,
                tp=total.pending)
    if _print:
        print('total desired: {}, total actual: {}, total pending: {}'.format(
            total.desired, total.actual, total.pending))

    metrics = [('desired', total.desired), ('actual', total.actual),
               ('pending', total.pending), ('tenants', num_tenants),
               ('groups', len(group_metrics))]
    for tenant_id, metric in sorted(tenanted_metrics.items()):
        metrics.append(("{}.desired".format(tenant_id), metric.desired))
        metrics.append(("{}.actual".format(tenant_id), metric.actual))
        metrics.append(("{}.pending".format(tenant_id), metric.pending))

    # convergence tenants desired and actual
    conv_tenants = keyfilter(
        partial(tenant_is_enabled,
                get_config_value=lambda k: get_in([k], config)),
        tenanted_metrics)
    conv_desired = sum(m.desired for m in conv_tenants.itervalues())
    conv_actual = sum(m.actual for m in conv_tenants.itervalues())
    metrics.extend([("conv_desired", conv_desired),
                    ("conv_actual", conv_actual),
                    ("conv_divergence", conv_desired - conv_actual)])

    data = [
        merge(metric_part, {
            'metricValue': value,
            'metricName': '{}.{}'.format(region, metric)
        }) for metric, value in metrics
    ]
    yield service_request(ServiceType.CLOUD_METRICS_INGEST,
                          'POST',
                          'ingest',
                          data=data,
                          log=log)
Beispiel #17
0
 def keyfilter(self, f: Callable[[A], bool]) -> 'Map[A, B]':
     return Map(dicttoolz.keyfilter(f, self))
def pick(whitelist, d):
    return keyfilter(lambda k: k in whitelist, d)
Beispiel #19
0
def test_keyfilter():
    assert keyfilter(iseven, {1: 2, 2: 3}) == {2: 3}
Beispiel #20
0
 def test_keyfilter(self):
     D, kw = self.D, self.kw
     assert keyfilter(iseven, D({1: 2, 2: 3}), **kw) == D({2: 3})
Beispiel #21
0
 def keyfilter(self, f: Callable[[A], bool]) -> 'Map[A, B]':
     return Map(dicttoolz.keyfilter(f, self))
Beispiel #22
0
def _remove_timestamp_from_plan(item):
    notstamp = lambda k: k != 'timestamp'
    item = dtz.keyfilter(notstamp, item)
    item['flag'] = dtz.keyfilter(notstamp, item['flag'])

    return item
Beispiel #23
0
from itertools import filterfalse
from toolz.dicttoolz import keyfilter, valfilter, itemfilter


def is_even(x):
    if x % 2 == 0: return True
    else: return False


def both_are_even(x):
    k, v = x
    if is_even(k) and is_even(v): return True
    else: return False


print(list(filterfalse(is_even, range(10))))
# [1, 3, 5, 7, 9]

print(list(keyfilter(is_even, {1: 2, 2: 3, 3: 4, 4: 5, 5: 6})))
# [2, 4]

print(list(valfilter(is_even, {1: 2, 2: 3, 3: 4, 4: 5, 5: 6})))
# [1, 3, 5]

print(list(itemfilter(both_are_even, {1: 5, 2: 4, 3: 3, 4: 2, 5: 1})))
# [2, 4]
Beispiel #24
0
def enrich_dict_immutable(dic):
    is_positive = lambda v: v >= 0
    filtered = dicttoolz.keyfilter(is_positive, dic)
    return dicttoolz.itemmap(_even_bar, filtered)
 def test_keyfilter(self):
     D, kw = self.D, self.kw
     assert keyfilter(iseven, D({1: 2, 2: 3}), **kw) == D({2: 3})
Beispiel #26
0
def choose_kwargs(from_, which):
    return keyfilter(lambda x: x in which, valfilter(lambda x: x is not None, from_))