Exemplo n.º 1
0
    def add_ranking(self, larger, smaller):
        """ Decide that 'larger' is greater than 'smaller' """
        key = pset([larger, smaller])

        slf = self

        if slf.cmp_items(smaller, larger) > 0:
            raise ValueError(f"{smaller} is already greater than {larger}")

        if key not in slf._remaining_pairs:
            return slf

        # Set larger as being greater than smaller
        slf = slf.set(_remaining_pairs=slf._remaining_pairs.remove(key))
        if larger not in slf._greater_than:
            slf = slf.set(_greater_than=slf._greater_than.set(larger, pset()))

        slf = slf.set(
            _greater_than=slf._greater_than.set(
                larger, slf._greater_than[larger].add(smaller)),
            _all_items=slf._all_items.add(larger).add(smaller),
        )

        # Set larger as being greater than everything which smaller is greater than
        for item in slf.everying_less_than(larger):
            slf = slf.add_ranking(larger, item)

        return slf
Exemplo n.º 2
0
 def test_record_recently_converged(self):
     """
     After converging, the group is added to ``recently_converged`` -- but
     *before* being removed from ``currently_converging``, to avoid race
     conditions.
     """
     currently = Reference(pset())
     recently = Reference(pmap())
     remove_from_currently = match_func(pset([self.group_id]), pset([]))
     sequence = [
         (ReadReference(currently), lambda i: pset()),
         add_to_currently(currently, self.group_id),
         (('ec', self.tenant_id, self.group_id, 3600),
          lambda i: (StepResult.SUCCESS, ScalingGroupStatus.ACTIVE)),
         (Func(time.time), lambda i: 100),
         add_to_recently(recently, self.group_id, 100),
         (ModifyReference(currently, remove_from_currently), noop),
         (DeleteNode(path='/groups/divergent/tenant-id_g1',
                     version=self.version), noop),
         (Log('mark-clean-success', {}), noop)
     ]
     eff = converge_one_group(
         currently, recently, self.tenant_id, self.group_id, self.version,
         3600, execute_convergence=self._execute_convergence)
     perform_sequence(sequence, eff)
Exemplo n.º 3
0
def read_schedule_json(obj):
    # reconstruct schedule information from json
    agents = pvector(obj['agents'])
    costs = pmap(obj['costs'])
    times = pset(map(as_timerange,obj['times']))
    forward = pmap({a: pmap({as_timerange(t): int(t['mid'])
                             for t in obj['meetings'][a] if t['mid'] != -1})
                    for a in agents})

    mids = pset([mid for ts in forward.values() for mid in ts.values()])

    # remove the mid 0, which marks an empty meeting (for unavailable times)
    if 0 in mids:
      mids = mids.remove(0)

    # update meetings and their requirements
    requirements = pmap({int(mid): pmap({r['type']: read_jsonable_requirement(r)
                                        for r in rs.values()})
                         for mid,rs in obj['requirements'].iteritems()})

    schedule = Schedule(agents=agents,times=times,forward=forward,
                        requirements=requirements,costs=costs)

    new_unsatisfied = schedule.unsatisfied
    for mid,rs in schedule.unsatisfied.iteritems():
      for rtype in rs:
        r = schedule.requirements[mid][rtype]
        if r.satisfied(schedule):
          new_unsatisfied = _mark_satisfied(new_unsatisfied,r)
        elif not r.satisfiable(schedule):
          raise RequirementException(r)
    schedule.unsatisfied = new_unsatisfied

    return schedule
Exemplo n.º 4
0
 def test_bulk_remove_from_rcv3(self):
     """Logs :obj:`BulkRemoveFromRCv3`."""
     adds = pbag([
         BulkRemoveFromRCv3(lb_node_pairs=pset([
             ('lb1', 'node1'), ('lb1', 'node2'),
             ('lb2', 'node2'), ('lb2', 'node3'),
             ('lb3', 'node4')])),
         BulkRemoveFromRCv3(lb_node_pairs=pset([
             ('lba', 'nodea'), ('lba', 'nodeb'),
             ('lb1', 'nodea')]))
     ])
     self.assert_logs(adds, [
         Log('convergence-remove-rcv3-nodes',
             fields={'lb_id': 'lb1', 'servers': ['node1', 'node2', 'nodea'],
                     'cloud_feed': True}),
         Log('convergence-remove-rcv3-nodes',
             fields={'lb_id': 'lb2', 'servers': ['node2', 'node3'],
                     'cloud_feed': True}),
         Log('convergence-remove-rcv3-nodes',
             fields={'lb_id': 'lb3', 'servers': ['node4'],
                     'cloud_feed': True}),
         Log('convergence-remove-rcv3-nodes',
             fields={'lb_id': 'lba', 'servers': ['nodea', 'nodeb'],
                     'cloud_feed': True})
     ])
Exemplo n.º 5
0
def test_is_disjoint():
    s1 = pset([1, 2, 3])
    s2 = pset([3, 4, 5])
    s3 = pset([4, 5])

    assert not s1.isdisjoint(s2)
    assert s1.isdisjoint(s3)
Exemplo n.º 6
0
def add_server_to_lb(server, description, load_balancer):
    """
    Add a server to a load balancing entity as described by `description`.

    :ivar server: The server to be added
    :type server: :class:`NovaServer`

    :ivar description: The description of the load balancer and how to add
        the server to it.
    :type description: :class:`ILBDescription` provider
    """
    if isinstance(description, CLBDescription):
        if server.servicenet_address:
            if load_balancer is None:
                return fail_convergence(
                    CLBHealthInfoNotFound(description.lb_id))
            if load_balancer.health_monitor:
                description = assoc_obj(description,
                                        condition=CLBNodeCondition.DRAINING)
            return AddNodesToCLB(
                lb_id=description.lb_id,
                address_configs=pset(
                    [(server.servicenet_address, description)]))
    elif isinstance(description, RCv3Description):
        return BulkAddToRCv3(lb_node_pairs=pset(
            [(description.lb_id, server.id)]))
Exemplo n.º 7
0
def test_is_disjoint():
    s1 = pset([1, 2, 3])
    s2 = pset([3, 4, 5])
    s3 = pset([4, 5])

    assert not s1.isdisjoint(s2)
    assert s1.isdisjoint(s3)
Exemplo n.º 8
0
 def wrap_deps(self, dep_vars, expr):
     depend = pset()
     fullpath = (self.path[0], tuple(self.path[1]), expr.calc)
     for name in dep_vars:
         name = self.resolve(name)
         v = self.scope.get_expr(name)
         if type(v) != Expr:
             self.scope[name] = Expr(cache=self.scope[name],
                                     path=name,
                                     rdepend=pset([fullpath]),
                                     scope=self.scope)
         elif fullpath not in v.rdepend:
             # Break circularity
             logger.debug("Adding rdepend %s to %s" % (fullpath, name))
             self.scope[name] = v.set(rdepend=v.rdepend.add(fullpath))
             assert (self.scope.get_expr(name).rdepend == v.rdepend.add(
                 fullpath))
         depend.add(name)
     return Expr(expr=expr,
                 cache=self.cache,
                 path=self.path,
                 depend=depend,
                 dep_vars=dep_vars,
                 rdepend=self.rdepend,
                 scope=self.scope)
Exemplo n.º 9
0
 def test_add_nodes_to_clbs(self):
     """Logs :obj:`AddNodesToCLB`."""
     adds = pbag([
         AddNodesToCLB(lb_id='lbid1',
                       address_configs=pset([('10.0.0.1',
                                              _clbd('lbid1', 1234))])),
         AddNodesToCLB(lb_id='lbid1',
                       address_configs=pset([('10.0.0.2',
                                              _clbd('lbid1', 1235))])),
         AddNodesToCLB(lb_id='lbid2',
                       address_configs=pset([('10.0.0.1',
                                              _clbd('lbid2', 4321))]))
     ])
     self.assert_logs(adds, [
         Log('convergence-add-clb-nodes',
             fields={
                 'lb_id': 'lbid1',
                 'addresses': ['10.0.0.1:1234', '10.0.0.2:1235'],
                 'cloud_feed': True
             }),
         Log('convergence-add-clb-nodes',
             fields={
                 'lb_id': 'lbid2',
                 'addresses': ['10.0.0.1:4321'],
                 'cloud_feed': True
             })
     ])
Exemplo n.º 10
0
def char(c):
    if isinstance(c, bytes):
        return char(pset(c))
    if isinstance(c, int):
        return char(pset((c, )))
    assert isinstance(c, PSet)
    return Characters(c)
Exemplo n.º 11
0
  def __init__(self,agents=pvector([]),times=pset([]),forward=pmap({}),
               costs=pmap({}),requirements=pmap({}),backward=None,
               unsatisfied=None):
    self.cache = {}

    #### schedule bounds
    self.agents = agents  # vector of valid agents
    self.times = times  # set of valid times

    #### the schedule itself
    self.forward = forward  # agents -> times -> meeting ids

    # mids -> meeting (time, agents)
    if backward is None: self.backward = _backward_from_forward(self.forward)
    else: self.backward = backward

    #### schedule constraints
    self.requirements = requirements  # mids -> requirement type -> requirement

    # mids -> requirement type
    if unsatisfied is None:
      self.unsatisfied = pmap({mid: pset(self.requirements[mid].keys())
                               for mid in self.requirements.keys()})
    else: self.unsatisfied = unsatisfied

    self.costs = costs  # map from agents to meeting time costs functions
Exemplo n.º 12
0
 def _extract_ids(t):
     if isinstance(t, TestSuite):
         result = pset()
         for sub_tests in t:
             result = result | _extract_ids(sub_tests)
         return result
     else:
         return pset([t.id()])
Exemplo n.º 13
0
 def reflexive_transitive_closure(self):
     closure = self.R | (pset(map(lambda m: (m, m), self.M)))
     while True:
         closure_until_now = closure | pset(
             (x, w) for x, y in closure for q, w in closure if q == y)
         if closure_until_now == closure: break
         closure = closure_until_now
     return Relation(M=self.M, R=closure_until_now)
Exemplo n.º 14
0
 def _extract_ids(t):
     if isinstance(t, TestSuite):
         result = pset()
         for sub_tests in t:
             result = result | _extract_ids(sub_tests)
         return result
     else:
         return pset([t.id()])
Exemplo n.º 15
0
def test_aug_assign_union():
    "Test left |= right"

    left = pset([1, 2, 3])
    right = pset([3, 2, 4])

    actual = union(left, right)

    assert actual == pset([1, 2, 3, 4])
Exemplo n.º 16
0
class ApplicationInitTests(make_with_init_tests(
    record_type=Application,
    kwargs=dict(
        name=u'site-example.com', image=DockerImage.from_string(u"image"),
        ports=pset(), volume=None, environment=pmap({}),
        links=pset(), restart_policy=RestartAlways(),
    ),
    expected_defaults={'links': pset(), 'restart_policy': RestartNever()},
)):
    """
Exemplo n.º 17
0
    def list_directory(self, path):
        if self.is_file(path=path):
            raise exceptions.NotADirectory(path)
        elif path not in self._tree:
            raise exceptions.FileNotFound(path)

        # FIXME: Inefficient
        return pset(child.basename() for child in self._tree[path]) | pset(
            subdirectory.basename() for subdirectory in self._tree
            if subdirectory.parent() == path and subdirectory != path)
Exemplo n.º 18
0
def test_is_immutable():
    s1 = pset([1])
    s2 = s1.add(2)

    assert s1 == pset([1])
    assert s2 == pset([1, 2])

    s3 = s2.remove(1)
    assert s2 == pset([1, 2])
    assert s3 == pset([2])
Exemplo n.º 19
0
def test_is_immutable():
    s1 = pset([1])
    s2 = s1.add(2)

    assert s1 == pset([1])
    assert s2 == pset([1, 2])

    s3 = s2.remove(1)
    assert s2 == pset([1, 2])
    assert s3 == pset([2])
Exemplo n.º 20
0
 def test_retries_uppercase(self):
     """
     If bulk adding is called with upper case LB ID and it only returns
     "lb node pair is already member" error with 409 then other pairs
     are retried
     """
     lbs = self.lbs[:]
     lbs[0] = lbs[0].upper()
     pairs = pset(zip(lbs, self.nodes))
     retried_data = r._sorted_data(pairs - pset([(lbs[0], self.nodes[0])]))
     errors = {"errors": [node_already_member(lbs[0], self.nodes[0])]}
     self._check_retries(pairs, self.data, retried_data, errors)
Exemplo n.º 21
0
 def _test_rcv3_step(self, step_class):
     steps = [
         step_class(lb_node_pairs=pset([("l1", "s1"), ("l1", "s2")])),
         step_class(lb_node_pairs=pset([("l2", "s1")])),
         step_class(lb_node_pairs=pset([("l1", "s3"), ("l2", "s3")]))
     ]
     self.assertEqual(
         optimize_steps(steps),
         pbag([
             step_class(lb_node_pairs=pset([("l1", "s1"), (
                 "l1", "s2"), ("l2", "s1"), ("l1", "s3"), ("l2", "s3")]))
         ]))
Exemplo n.º 22
0
    def _pods_to_routes(self, old, pods):
        """
        Extract the addressing information from some pods.

        :param old: The old routing information.  Used to log route changes.

        :param list[v1.Pod] pods: Some pods from which routing information can
            be extracted.

        :return: A mapping of the new routing information deriving solely from
            ``pods``.
        """
        def _introducer_tub(pod):
            return pod.metadata.annotations[
                u"leastauthority.com/introducer-tub-id"]

        def _storage_tub(pod):
            return pod.metadata.annotations[
                u"leastauthority.com/storage-tub-id"]

        def _introducer_port_number(pod):
            return int(
                pod.metadata.
                annotations[u"leastauthority.com/introducer-port-number"])

        def _storage_port_number(pod):
            return int(pod.metadata.
                       annotations[u"leastauthority.com/storage-port-number"])

        def _introducer_address(pod):
            return (pod.status.podIP, _introducer_port_number(pod))

        def _storage_address(pod):
            return (pod.status.podIP, _storage_port_number(pod))

        with start_action(action_type=u"router-update:set-pods",
                          count=len(pods)):
            new = pmap([(_introducer_tub(pod), (pod, _introducer_address(pod)))
                        for pod in pods] + [(_storage_tub(pod),
                                             (pod, _storage_address(pod)))
                                            for pod in pods])

            adding = pset(new.keys()) - pset(old.keys())
            removing = pset(old.keys()) - pset(new.keys())

            for tub_id in adding:
                Message.log(event_type=u"router-update:add",
                            pod=new[tub_id][0].metadata.name)
            for tub_id in removing:
                Message.log(event_type=u"router-update:remove",
                            pod=old[tub_id][0].metadata.name)

            return new
Exemplo n.º 23
0
 def get_pairs_data(lbr1, noder1, lbr2, noder2):
     new_pairs = pset(
         [(self.lbs[0], self.nodes[1]),  # test same server pairs
          (self.lbs[2], self.nodes[0]),  # test same lb pairs
          (lbr1, noder1), (lbr2, noder2)])
     # existing pairs with upper case LB
     lbs = self.lbs[:]
     lbs[0] = lbs[0].upper()
     existing_pairs = pset(zip(lbs, self.nodes))
     pairs = existing_pairs | new_pairs
     # The data will still be self.pairs since lbs[0] will be normalized
     return pairs, r._sorted_data(self.pairs | new_pairs)
Exemplo n.º 24
0
def remove_node_from_lb(node):
    """
    Remove a node from the load balancing entity.

    :ivar node: The node to be removed.
    :type node: :class:`ILBNode` provider
    """
    if isinstance(node, CLBNode):
        return RemoveNodesFromCLB(lb_id=node.description.lb_id,
                                  node_ids=pset([node.node_id]))
    elif isinstance(node, RCv3Node):
        return BulkRemoveFromRCv3(lb_node_pairs=pset(
            [(node.description.lb_id, node.cloud_server_id)]))
Exemplo n.º 25
0
 def get_pairs_data(lbr1, noder1, lbr2, noder2):
     new_pairs = pset([
         (self.lbs[0], self.nodes[1]),  # test same server pairs
         (self.lbs[2], self.nodes[0]),  # test same lb pairs
         (lbr1, noder1),
         (lbr2, noder2)
     ])
     # existing pairs with upper case LB
     lbs = self.lbs[:]
     lbs[0] = lbs[0].upper()
     existing_pairs = pset(zip(lbs, self.nodes))
     pairs = existing_pairs | new_pairs
     # The data will still be self.pairs since lbs[0] will be normalized
     return pairs, r._sorted_data(self.pairs | new_pairs)
Exemplo n.º 26
0
def _check_bulk_delete(attempted_pairs, result):
    """
    Checks if the RCv3 bulk delete command was successful.
    """
    response, body = result

    if response.code == 204:  # All done!
        return body

    errors = []
    non_members = pset()
    for error in body["errors"]:
        match = _SERVER_NOT_A_MEMBER_PATTERN.match(error)
        if match is not None:
            pair = match.groupdict()
            non_members = non_members.add(
                (normalize_lb_id(pair["lb_id"]), pair["server_id"]))
            continue

        match = _LB_INACTIVE_PATTERN.match(error)
        if match is not None:
            errors.append(LBInactive(match.group("lb_id")))
            continue

        match = _LB_DOESNT_EXIST_PATTERN.match(error)
        if match is not None:
            del_lb_id = normalize_lb_id(match.group("lb_id"))
            # consider all pairs with this LB to be removed
            removed = [(lb_id, node_id) for lb_id, node_id in attempted_pairs
                       if lb_id == del_lb_id]
            non_members |= pset(removed)
            continue

        match = _SERVER_DOES_NOT_EXIST.match(error)
        if match is not None:
            del_server_id = match.group("server_id")
            # consider all pairs with this server to be removed
            removed = [(lb_id, node_id) for lb_id, node_id in attempted_pairs
                       if node_id == del_server_id]
            non_members |= pset(removed)
        else:
            raise UnknownBulkResponse(body)

    if errors:
        raise BulkErrors(errors)
    elif non_members:
        to_retry = pset(attempted_pairs) - non_members
        return bulk_delete(to_retry) if to_retry else None
    else:
        raise UnknownBulkResponse(body)
Exemplo n.º 27
0
def test_supports_set_operations():
    s1 = pset([1, 2, 3])
    s2 = pset([3, 4, 5])

    assert s1 | s2 == s(1, 2, 3, 4, 5)
    assert s1.union(s2) == s1 | s2

    assert s1 & s2 == s(3)
    assert s1.intersection(s2) == s1 & s2

    assert s1 - s2 == s(1, 2)
    assert s1.difference(s2) == s1 - s2

    assert s1 ^ s2 == s(1, 2, 4, 5)
    assert s1.symmetric_difference(s2) == s1 ^ s2
Exemplo n.º 28
0
def test_supports_set_operations():
    s1 = pset([1, 2, 3])
    s2 = pset([3, 4, 5])

    assert s1 | s2 == s(1, 2, 3, 4, 5)
    assert s1.union(s2) == s1 | s2

    assert s1 & s2 == s(3)
    assert s1.intersection(s2) == s1 & s2

    assert s1 - s2 == s(1, 2)
    assert s1.difference(s2) == s1 - s2

    assert s1 ^ s2 == s(1, 2, 4, 5)
    assert s1.symmetric_difference(s2) == s1 ^ s2
Exemplo n.º 29
0
 def remove_deps(self):
     # Should only do this when value is moved or removed in the document
     fullpath = (self.path[0], tuple(self.path[1]), self.expr.calc)
     for name in self.dep_vars:
         name = self.resolve(name)
         v = self.scope.get_expr(name)
         if type(v) == Expr:
             self.scope[name] = v.set(rdepend=v.rdepend.remove(fullpath))
     return Expr(expr=None,
                 cache=self.cache,
                 path=self.path,
                 depend=pset(),
                 dep_vars=pset(),
                 rdepend=self.rdepend,
                 scope=self.scope)
Exemplo n.º 30
0
    def test_ignore_disappearing_divergent_flag(self):
        """
        When the divergent flag disappears just as we're starting to converge,
        the group does not get converged and None is returned as its result.

        This happens when a concurrent convergence iteration is just finishing
        up.
        """
        eff = self._converge_all_groups(['00_g1'])

        def get_bound_sequence(tid, gid):
            # since this GetStat is going to return None, no more effects will
            # be run. This is the crux of what we're testing.
            znode = '/groups/divergent/{}_{}'.format(tid, gid)
            return [
                (GetStat(path=znode), noop),
                (Log('converge-divergent-flag-disappeared',
                     fields={'znode': znode}),
                 noop)]

        sequence = [
            (ReadReference(ref=self.currently_converging), lambda i: pset()),
            (Log('converge-all-groups',
                 dict(group_infos=[self.group_infos[0]],
                      currently_converging=[])),
             noop),
            (ReadReference(ref=self.recently_converged), lambda i: pmap()),
            (Func(time.time), lambda i: 100),
            parallel_sequence([
                [(BoundFields(mock.ANY, fields={'tenant_id': '00',
                                                'scaling_group_id': 'g1'}),
                  nested_sequence(get_bound_sequence('00', 'g1')))],
             ]),
        ]
        self.assertEqual(perform_sequence(sequence, eff), [None])
Exemplo n.º 31
0
 def valid_updates(self,schedule):
   meeting = Meeting(mid=self.mid,agents=pvector([]),time=None)
   times = schedule.times - pset([t for a in self.agents
                                  for t in schedule.forward[a].keys()])
   if len(times):
     return [schedule.add_meeting(meeting.set(time=t),self.agents,self)
             for t in times]
Exemplo n.º 32
0
    def add(self, unit_name, image_name, ports=frozenset(), environment=None,
            volumes=frozenset(), mem_limit=None, cpu_shares=None,
            restart_policy=RestartNever(), command_line=None):
        if unit_name in self._units:
            return fail(AlreadyExists(unit_name))
        for port in ports:
            if port.external_port in self._used_ports:
                raise AddressInUse(address=(b"0.0.0.0", port.external_port))

        all_ports = set(range(2 ** 15, 2 ** 16))
        assigned_ports = []
        for port in ports:
            if port.external_port == 0:
                available_ports = pset(all_ports) - self._used_ports
                assigned = next(iter(available_ports))
                port = port.set(external_port=assigned)
            assigned_ports.append(port)
            self._used_ports = self._used_ports.add(port.external_port)

        self._units[unit_name] = Unit(
            name=unit_name,
            container_name=unit_name,
            container_image=image_name,
            ports=frozenset(assigned_ports),
            environment=environment,
            volumes=frozenset(volumes),
            activation_state=u'active',
            mem_limit=mem_limit,
            cpu_shares=cpu_shares,
            restart_policy=restart_policy,
            command_line=command_line,
        )
        return succeed(None)
Exemplo n.º 33
0
    def mark_all_providers_default(self):
        state = self

        new_default_names = pyrs.pset(state.providers_by_name.keys())
        state = state.set(default_provider_names=new_default_names)

        return state.touch()
Exemplo n.º 34
0
 def test_all_retries(self):
     """
     If bulk_delete returns "server not a member", lb or server deleted
     for all attempted pairs then there is no retry and returns None
     """
     errors = {
         "errors": [
             server_not_member(self.lbs[0].upper(), self.nodes[0]),
             "Cloud Server {} does not exist".format(self.nodes[1]),
             "Load Balancer Pool {} does not exist".format(
                 self.lbs[2].upper())
         ]
     }
     pairs = pset([
         (self.lbs[0], self.nodes[1]),  # test same server pairs
         (self.lbs[2], self.nodes[0])   # test same lb pairs
     ])
     pairs = self.pairs | pairs
     data = r._sorted_data(pairs)
     seq = [
         (self.svc_req_intent(data),
          const(stub_json_response(errors, 409))),
         (log_intent(
             "request-rcv3-bulk", errors, req_body=("jsonified", data)),
          noop)
     ]
     self.assertIsNone(perform_sequence(seq, r.bulk_delete(pairs)))
Exemplo n.º 35
0
def wrap_col_intention(columns):
    if isinstance(columns, Intention):
        return lambda row: columns.evaluate(row.columns).current_set
    elif hasattr(columns, 'current_set'):
        return lambda row: columns.current_set
    else:
        return lambda row: pset(columns)
Exemplo n.º 36
0
 def test_all_retries(self):
     """
     If bulk_delete returns "server not a member", lb or server deleted
     for all attempted pairs then there is no retry and returns None
     """
     errors = {
         "errors": [
             server_not_member(self.lbs[0].upper(), self.nodes[0]),
             "Cloud Server {} does not exist".format(self.nodes[1]),
             "Load Balancer Pool {} does not exist".format(
                 self.lbs[2].upper())
         ]
     }
     pairs = pset([
         (self.lbs[0], self.nodes[1]),  # test same server pairs
         (self.lbs[2], self.nodes[0])  # test same lb pairs
     ])
     pairs = self.pairs | pairs
     data = r._sorted_data(pairs)
     seq = [(self.svc_req_intent(data),
             const(stub_json_response(errors, 409))),
            (log_intent("request-rcv3-bulk",
                        errors,
                        req_body=("jsonified", data)), noop)]
     self.assertIsNone(perform_sequence(seq, r.bulk_delete(pairs)))
Exemplo n.º 37
0
 def test_multiple_errors(self):
     """
     If bulk add returns 409 then multiple errors returned are collected and
     raised as a single `BulkErrors`
     """
     errors = {
         "errors": [
             lb_inactive(self.lbs[0]),
             "Load Balancer Pool {} does not exist".format(self.lbs[1]),
             "Cloud Server {} is unprocessable".format(self.nodes[2])
         ]
     }
     seq = [(self.svc_req_intent(self.data),
             const(stub_json_response(errors, 409))),
            (log_intent("request-rcv3-bulk",
                        errors,
                        req_body=("jsonified", self.data)), noop)]
     with self.assertRaises(r.BulkErrors) as ec:
         perform_sequence(seq, r.bulk_add(self.pairs))
     self.assertEqual(
         ec.exception.errors,
         pset([
             r.LBInactive(self.lbs[0]),
             r.NoSuchLBError(self.lbs[1]),
             r.ServerUnprocessableError(self.nodes[2])
         ]))
Exemplo n.º 38
0
def run_set_performance():
    """
    == PyPy ==
    Big set from list: 0.0152490139008
    Big pset from list: 1.62447595596
    Random access set: 0.0192308425903
    Random access pset: 2.18643188477

    === CPython ===
    Big set from list: 0.0202131271362
    Big pset from list: 2.87654399872
    Random access set: 0.0950989723206
    Random access pset: 11.2261350155
    """
    l = [x for x in range(100000)]

    before = time.time()
    s1 = set(l)
    print("Big set from list: " + str(time.time() - before))

    before = time.time()
    s2 = pset(l, pre_size=2*len(l))
    print("Big pset from list: " + str(time.time() - before))

    before = time.time()
    random_access(s1)
    print("Random access set: " + str(time.time() - before))

    before = time.time()
    random_access(s2)
    print("Random access pset: " + str(time.time() - before))
Exemplo n.º 39
0
def _lbs_from_metadata(metadata):
    """
    Get the desired load balancer descriptions based on the metadata.

    :return: ``dict`` of `ILBDescription` providers
    """
    lbs = get_service_metadata('autoscale', metadata).get('lb', {})
    desired_lbs = []

    for lb_id, v in lbs.get('CloudLoadBalancer', {}).iteritems():
        # if malformed, skiped the whole key
        try:
            configs = json.loads(v)
            if isinstance(configs, list):
                desired_lbs.extend([
                    CLBDescription(lb_id=lb_id, port=c['port'])
                    for c in configs
                ])
        except (ValueError, KeyError, TypeError):
            pass

    desired_lbs.extend([
        RCv3Description(lb_id=lb_id) for lb_id in lbs.get('RackConnectV3', {})
    ])

    return pset(desired_lbs)
Exemplo n.º 40
0
def parse_file(excel_file):
  excel = pd.ExcelFile(excel_file)

  df = excel.parse('Schedule',index_col=0)
  df.columns = clean_up(df.columns)
  times,agents = parse_schedule(df)

  df = excel.parse('Meetings',index_col=None)
  df.columns = clean_up(df.columns)
  del df['area']
  df.name = clean_up(df.name)
  meetings = parse_student_meetings(df,3)

  offset = meetings[-1].mid+1
  df = excel.parse('Lab Meetings',index_col=None)
  df.columns = clean_up(df.columns)
  meetings += parse_lab_meetings(df,offset=offset)

  df = excel.parse('Schedule Preferences')
  df.columns = clean_up(df.columns)
  costs = parse_costs(df)

  final_meetings = {}
  for requirement in meetings:
    old = final_meetings.get(requirement.mid,pset())
    final_meetings[requirement.mid] = old.add(requirement)

  return Schedule(list(agents),pmap(),pmap(times),costs,
                  pmap(final_meetings),pmap())
Exemplo n.º 41
0
 def test_dont_filter_out_non_recently_converged(self):
     """
     If a group was converged in the past but not recently, it will be
     cleaned from the ``recently_converged`` map, and it will be converged.
     """
     # g1: converged a while ago; divergent -> removed and converged
     # g2: converged recently; not divergent -> not converged
     # g3: converged a while ago; not divergent -> removed and not converged
     eff = self._converge_all_groups(['00_g1'])
     sequence = [
         (ReadReference(ref=self.currently_converging), lambda i: pset([])),
         (Log('converge-all-groups',
              dict(group_infos=[self.group_infos[0]],
                   currently_converging=[])),
          noop),
         (ReadReference(ref=self.recently_converged),
          lambda i: pmap({'g1': 4, 'g2': 10, 'g3': 0})),
         (Func(time.time), lambda i: 20),
         (ModifyReference(self.recently_converged,
                          match_func("literally anything",
                                     pmap({'g2': 10}))),
          noop),
         parallel_sequence([[self._expect_group_converged('00', 'g1')]])
     ]
     self.assertEqual(perform_sequence(sequence, eff), ['converged g1!'])
Exemplo n.º 42
0
 def get_pairs_data(lbr1, noder1, lbr2, noder2):
     pairs = pset(
         [(self.lbs[0], self.nodes[1]),  # test same server pairs
          (self.lbs[2], self.nodes[0]),  # test same lb pairs
          (lbr1, noder1), (lbr2, noder2)])
     pairs |= self.pairs
     return pairs, r._sorted_data(pairs)
Exemplo n.º 43
0
def decode(obj):
    if isinstance(obj, ExtType):
        if obj.code == TYPE_PSET:
            unpacked_data = unpackb(obj.data, use_list=False, encoding='utf-8')
            return pset(decode(item) for item in unpacked_data)
        if obj.code == TYPE_PLIST:
            unpacked_data = unpackb(obj.data, use_list=False, encoding='utf-8')
            return plist(decode(item) for item in unpacked_data)
        if obj.code == TYPE_PBAG:
            unpacked_data = unpackb(obj.data, use_list=False, encoding='utf-8')
            return pbag(decode(item) for item in unpacked_data)
        if obj.code == TYPE_FUNC:
            module_name, func_name = unpackb(obj.data,
                                             use_list=False,
                                             encoding='utf-8')

            return getattr(sys.modules[module_name], func_name)
        module_name, class_name, *data = unpackb(obj.data,
                                                 use_list=False,
                                                 encoding='utf-8')
        cls = getattr(sys.modules[module_name], class_name)
        if obj.code == TYPE_MBOX:
            return cls.decode(data)
        return cls(*(decode(item) for item in data))
    if isinstance(obj, tuple):
        return pvector(decode(item) for item in obj)
    if isinstance(obj, dict):
        new_dict = dict()
        for key in obj.keys():
            new_dict[decode(key)] = decode(obj[key])
        return pmap(new_dict)
    return obj
Exemplo n.º 44
0
 def dispatcher(self, operation, resp):
     return SequenceDispatcher([
         (TenantScope(mock.ANY, "tid"),
          nested_sequence([
              ((operation, pset([("lb_id", "server_id")])), lambda i: resp)
          ]))
     ])
Exemplo n.º 45
0
Arquivo: actor.py Projeto: tlvu/mochi
def decode(obj):
    if isinstance(obj, ExtType):
        if obj.code == TYPE_PSET:
            unpacked_data = unpackb(obj.data,
                                    use_list=False,
                                    encoding='utf-8')
            return pset(decode(item) for item in unpacked_data)
        if obj.code == TYPE_PLIST:
            unpacked_data = unpackb(obj.data,
                                    use_list=False,
                                    encoding='utf-8')
            return plist(decode(item) for item in unpacked_data)
        if obj.code == TYPE_PBAG:
            unpacked_data = unpackb(obj.data,
                                    use_list=False,
                                    encoding='utf-8')
            return pbag(decode(item) for item in unpacked_data)
        module_name, class_name, *data = unpackb(obj.data,
                                                 use_list=False,
                                                 encoding='utf-8')
        cls = getattr(sys.modules[module_name],
                      class_name)
        return cls(*(decode(item) for item in data))
    if isinstance(obj, tuple):
        return pvector(decode(item) for item in obj)
    if isinstance(obj, dict):
        new_dict = dict()
        for key in obj.keys():
            new_dict[decode(key)] = decode(obj[key])
        return pmap(new_dict)
    return obj
Exemplo n.º 46
0
class Relation(PClass):

    M = field(type=PSet, mandatory=True)
    R = field(type=PSet, initial=pset())

    def __iter__(self):
        return self.R.__iter__()

    def __next__(self):
        return self.R.__next__()

    def contains(self, x, y):
        return (x, y) in self.R

    def add(self, x, y):
        return Relation(M=self.M, R=self.R | pset([(x, y)]))

    def remove(self, x, y):
        return Relation(M=self.M, R=self.R - pset([(x, y)]))

    def union(self, R2):
        return Relation(M=self.M, R=self.R | R2.R)

    def intersection(self, R2):
        return Relation(M=self.M, R=self.R & R2.R)

    def subtraction(self, R2):
        return Relation(M=self.M, R=self.R - R2.R)

    def inverse(self):
        return Relation(M=self.M, R=pset(map(lambda t: t[::-1], self.R)))

    def composition(self, R2):
        return Relation(M=self.M,
                        R=pset((t1[0], t2[1]) for t1 in self.R for t2 in R2.R
                               if t1[1] == t2[0]))

    def is_reflexive(self):
        return pset() == pset(
            filter(lambda m: True
                   if not self.contains(m, m) else False, self.M))

    def is_symetric(self):
        return pset() == pset(
            filter(lambda t: True
                   if not self.contains(t[1], t[0]) else False, self.R))

    def is_transitive(self):
        return pset() == pset(
            (t1, t2) for t1 in self.R for t2 in self.R
            if t1[1] == t2[0] and not self.contains(t1[0], t2[1]))

    def reflexive_transitive_closure(self):
        closure = self.R | (pset(map(lambda m: (m, m), self.M)))
        while True:
            closure_until_now = closure | pset(
                (x, w) for x, y in closure for q, w in closure if q == y)
            if closure_until_now == closure: break
            closure = closure_until_now
        return Relation(M=self.M, R=closure_until_now)
Exemplo n.º 47
0
 def test_reactivate_group_on_success_with_no_steps(self):
     """
     When the group started in ERROR state, and convergence succeeds, the
     group is put back into ACTIVE, even if there were no steps to execute.
     """
     self.manifest['state'].status = ScalingGroupStatus.ERROR
     for serv in self.servers:
         serv.desired_lbs = pset()
     sequence = [
         parallel_sequence([]),
         (Log(msg='execute-convergence', fields=mock.ANY), noop),
         (Log(msg='execute-convergence-results', fields=mock.ANY), noop),
         (UpdateGroupStatus(scaling_group=self.group,
                            status=ScalingGroupStatus.ACTIVE),
          noop),
         (Log('group-status-active',
              dict(cloud_feed=True, status='ACTIVE')),
          noop),
         (UpdateServersCache(
             "tenant-id", "group-id", self.now,
             [thaw(self.servers[0].json.set("_is_as_active", True)),
              thaw(self.servers[1].json.set("_is_as_active", True))]),
          noop)
     ]
     self.state_active = {
         'a': {'id': 'a', 'links': [{'href': 'link1', 'rel': 'self'}]},
         'b': {'id': 'b', 'links': [{'href': 'link2', 'rel': 'self'}]}
     }
     self.cache[0]["_is_as_active"] = True
     self.cache[1]["_is_as_active"] = True
     self.assertEqual(
         perform_sequence(self.get_seq() + sequence, self._invoke()),
         (StepResult.SUCCESS, ScalingGroupStatus.ACTIVE))
Exemplo n.º 48
0
def _lbs_from_metadata(metadata):
    """
    Get the desired load balancer descriptions based on the metadata.

    :return: ``dict`` of `ILBDescription` providers
    """
    lbs = get_service_metadata('autoscale', metadata).get('lb', {})
    desired_lbs = []

    for lb_id, v in lbs.get('CloudLoadBalancer', {}).iteritems():
        # if malformed, skiped the whole key
        try:
            configs = json.loads(v)
            if isinstance(configs, list):
                desired_lbs.extend([
                    CLBDescription(lb_id=lb_id, port=c['port'])
                    for c in configs])
        except (ValueError, KeyError, TypeError):
            pass

    desired_lbs.extend([
        RCv3Description(lb_id=lb_id) for lb_id in lbs.get('RackConnectV3', {})
    ])

    return pset(desired_lbs)
Exemplo n.º 49
0
 def test_multiple_errors(self):
     """
     If bulk add returns 409 then multiple errors returned are collected and
     raised as a single `BulkErrors`
     """
     errors = {
         "errors": [
             lb_inactive(self.lbs[0]),
             "Load Balancer Pool {} does not exist".format(self.lbs[1]),
             "Cloud Server {} is unprocessable".format(self.nodes[2])
         ]
     }
     seq = [
         (self.svc_req_intent(self.data),
          const(stub_json_response(errors, 409))),
         (log_intent(
             "request-rcv3-bulk", errors,
             req_body=("jsonified", self.data)),
          noop)
     ]
     with self.assertRaises(r.BulkErrors) as ec:
         perform_sequence(seq, r.bulk_add(self.pairs))
     self.assertEqual(
         ec.exception.errors,
         pset([r.LBInactive(self.lbs[0]),
               r.NoSuchLBError(self.lbs[1]),
               r.ServerUnprocessableError(self.nodes[2])])
     )
Exemplo n.º 50
0
 def matches_at_position(self, string, position, stack=pset()):
     for match in self.rule.matches_at_position(string, position, stack):
         (yield Node(string,
                     position,
                     match.length,
                     rule=self,
                     children=(match, )))
Exemplo n.º 51
0
 def matches_at_position(self, string, position, stack=pset()):
     if (self, position) in stack:
         # Prevent infinite recursion for zero-length matches.
         return
     stack = stack.add((self, position))
     for match in self.referent.matches_at_position(string, position, stack=stack):
         yield Node(string, position, match.length, rule=self, children=(match, ))
Exemplo n.º 52
0
 def test_no_steps(self):
     """
     If state of world matches desired, no steps are executed, but the
     `active` servers are still updated, and SUCCESS is the return value.
     """
     for serv in self.servers:
         serv.desired_lbs = pset()
     sequence = [
         parallel_sequence([]),
         (Log('execute-convergence', mock.ANY), noop),
         (Log('execute-convergence-results',
              {'results': [], 'worst_status': 'SUCCESS'}), noop),
         (UpdateServersCache(
             "tenant-id", "group-id", self.now,
             [thaw(self.servers[0].json.set('_is_as_active', True)),
              thaw(self.servers[1].json.set("_is_as_active", True))]),
          noop)
     ]
     self.state_active = {
         'a': {'id': 'a', 'links': [{'href': 'link1', 'rel': 'self'}]},
         'b': {'id': 'b', 'links': [{'href': 'link2', 'rel': 'self'}]}
     }
     self.cache[0]["_is_as_active"] = True
     self.cache[1]["_is_as_active"] = True
     self.assertEqual(
         perform_sequence(self.get_seq() + sequence, self._invoke()),
         (StepResult.SUCCESS, ScalingGroupStatus.ACTIVE))
Exemplo n.º 53
0
 def all_parses(self, string):
     """
     Returns an iterator of all parse trees of the string for the given rule.
     """
     for match in self.matches_at_position(string, 0, stack=pset()):
         if match.length == len(string):
             yield match
Exemplo n.º 54
0
def run_set_performance():
    """
    == PyPy ==
    Big set from list: 0.0152490139008
    Big pset from list: 1.62447595596
    Random access set: 0.0192308425903
    Random access pset: 2.18643188477

    === CPython ===
    Big set from list: 0.0202131271362
    Big pset from list: 2.87654399872
    Random access set: 0.0950989723206
    Random access pset: 11.2261350155
    """
    l = [x for x in range(100000)]

    before = time.time()
    s1 = set(l)
    print("Big set from list: " + str(time.time() - before))

    before = time.time()
    s2 = pset(l, pre_size=2 * len(l))
    print("Big pset from list: " + str(time.time() - before))

    before = time.time()
    random_access(s1)
    print("Random access set: " + str(time.time() - before))

    before = time.time()
    random_access(s2)
    print("Random access pset: " + str(time.time() - before))
Exemplo n.º 55
0
def parse_schedule(schedule):
  times = {}
  for col in schedule.columns:
    times[col] = pset(map(parse_time,
                          clean_up(schedule[schedule[col] != 1].index.values)))

  return (times, schedule.columns)
Exemplo n.º 56
0
    def test_no_lbs(self):
        """
        When no loadBalancers are specified, the returned
        DesiredServerGroupState has an empty mapping for desired_lbs. If no
        draining_timeout is provided, returned DesiredServerGroupState has
        draining_timeout as 0.0
        """
        server_config = {'name': 'test', 'flavorRef': 'f'}
        lc = {'args': {'server': server_config}}

        expected_server_config = {
            'server': {
                'name': 'test',
                'flavorRef': 'f',
                'metadata': {
                    'rax:auto_scaling_group_id': 'uuid',
                    'rax:autoscale:group:id': 'uuid'}}}
        state = get_desired_server_group_state('uuid', lc, 2)
        self.assertEqual(
            state,
            DesiredServerGroupState(
                server_config=expected_server_config,
                capacity=2,
                desired_lbs=pset(),
                draining_timeout=0.0))
        self.assert_server_config_hashable(state)