Example #1
0
 def test_record_recently_converged(self):
     """
     After converging, the group is added to ``recently_converged`` -- but
     *before* being removed from ``currently_converging``, to avoid race
     conditions.
     """
     currently = Reference(pset())
     recently = Reference(pmap())
     remove_from_currently = match_func(pset([self.group_id]), pset([]))
     sequence = [
         (ReadReference(currently), lambda i: pset()),
         add_to_currently(currently, self.group_id),
         (('ec', self.tenant_id, self.group_id, 3600),
          lambda i: (StepResult.SUCCESS, ScalingGroupStatus.ACTIVE)),
         (Func(time.time), lambda i: 100),
         add_to_recently(recently, self.group_id, 100),
         (ModifyReference(currently, remove_from_currently), noop),
         (DeleteNode(path='/groups/divergent/tenant-id_g1',
                     version=self.version), noop),
         (Log('mark-clean-success', {}), noop)
     ]
     eff = converge_one_group(
         currently, recently, self.tenant_id, self.group_id, self.version,
         3600, execute_convergence=self._execute_convergence)
     perform_sequence(sequence, eff)
Example #2
0
 def test_bulk_remove_from_rcv3(self):
     """Logs :obj:`BulkRemoveFromRCv3`."""
     adds = pbag([
         BulkRemoveFromRCv3(lb_node_pairs=pset([
             ('lb1', 'node1'), ('lb1', 'node2'),
             ('lb2', 'node2'), ('lb2', 'node3'),
             ('lb3', 'node4')])),
         BulkRemoveFromRCv3(lb_node_pairs=pset([
             ('lba', 'nodea'), ('lba', 'nodeb'),
             ('lb1', 'nodea')]))
     ])
     self.assert_logs(adds, [
         Log('convergence-remove-rcv3-nodes',
             fields={'lb_id': 'lb1', 'servers': ['node1', 'node2', 'nodea'],
                     'cloud_feed': True}),
         Log('convergence-remove-rcv3-nodes',
             fields={'lb_id': 'lb2', 'servers': ['node2', 'node3'],
                     'cloud_feed': True}),
         Log('convergence-remove-rcv3-nodes',
             fields={'lb_id': 'lb3', 'servers': ['node4'],
                     'cloud_feed': True}),
         Log('convergence-remove-rcv3-nodes',
             fields={'lb_id': 'lba', 'servers': ['nodea', 'nodeb'],
                     'cloud_feed': True})
     ])
Example #3
0
def test_is_disjoint():
    s1 = pset([1, 2, 3])
    s2 = pset([3, 4, 5])
    s3 = pset([4, 5])

    assert not s1.isdisjoint(s2)
    assert s1.isdisjoint(s3)
Example #4
0
def add_server_to_lb(server, description, load_balancer):
    """
    Add a server to a load balancing entity as described by `description`.

    :ivar server: The server to be added
    :type server: :class:`NovaServer`

    :ivar description: The description of the load balancer and how to add
        the server to it.
    :type description: :class:`ILBDescription` provider
    """
    if isinstance(description, CLBDescription):
        if server.servicenet_address:
            if load_balancer is None:
                return fail_convergence(
                    CLBHealthInfoNotFound(description.lb_id))
            if load_balancer.health_monitor:
                description = assoc_obj(description,
                                        condition=CLBNodeCondition.DRAINING)
            return AddNodesToCLB(
                lb_id=description.lb_id,
                address_configs=pset(
                    [(server.servicenet_address, description)]))
    elif isinstance(description, RCv3Description):
        return BulkAddToRCv3(lb_node_pairs=pset(
            [(description.lb_id, server.id)]))
Example #5
0
  def __init__(self,agents=pvector([]),times=pset([]),forward=pmap({}),
               costs=pmap({}),requirements=pmap({}),backward=None,
               unsatisfied=None):
    self.cache = {}

    #### schedule bounds
    self.agents = agents  # vector of valid agents
    self.times = times  # set of valid times

    #### the schedule itself
    self.forward = forward  # agents -> times -> meeting ids

    # mids -> meeting (time, agents)
    if backward is None: self.backward = _backward_from_forward(self.forward)
    else: self.backward = backward

    #### schedule constraints
    self.requirements = requirements  # mids -> requirement type -> requirement

    # mids -> requirement type
    if unsatisfied is None:
      self.unsatisfied = pmap({mid: pset(self.requirements[mid].keys())
                               for mid in self.requirements.keys()})
    else: self.unsatisfied = unsatisfied

    self.costs = costs  # map from agents to meeting time costs functions
Example #6
0
def read_schedule_json(obj):
    # reconstruct schedule information from json
    agents = pvector(obj['agents'])
    costs = pmap(obj['costs'])
    times = pset(map(as_timerange,obj['times']))
    forward = pmap({a: pmap({as_timerange(t): int(t['mid'])
                             for t in obj['meetings'][a] if t['mid'] != -1})
                    for a in agents})

    mids = pset([mid for ts in forward.values() for mid in ts.values()])

    # remove the mid 0, which marks an empty meeting (for unavailable times)
    if 0 in mids:
      mids = mids.remove(0)

    # update meetings and their requirements
    requirements = pmap({int(mid): pmap({r['type']: read_jsonable_requirement(r)
                                        for r in rs.values()})
                         for mid,rs in obj['requirements'].iteritems()})

    schedule = Schedule(agents=agents,times=times,forward=forward,
                        requirements=requirements,costs=costs)

    new_unsatisfied = schedule.unsatisfied
    for mid,rs in schedule.unsatisfied.iteritems():
      for rtype in rs:
        r = schedule.requirements[mid][rtype]
        if r.satisfied(schedule):
          new_unsatisfied = _mark_satisfied(new_unsatisfied,r)
        elif not r.satisfiable(schedule):
          raise RequirementException(r)
    schedule.unsatisfied = new_unsatisfied

    return schedule
Example #7
0
 def _extract_ids(t):
     if isinstance(t, TestSuite):
         result = pset()
         for sub_tests in t:
             result = result | _extract_ids(sub_tests)
         return result
     else:
         return pset([t.id()])
Example #8
0
def test_is_immutable():
    s1 = pset([1])
    s2 = s1.add(2)

    assert s1 == pset([1])
    assert s2 == pset([1, 2])

    s3 = s2.remove(1)
    assert s2 == pset([1, 2])
    assert s3 == pset([2])
Example #9
0
 def get_pairs_data(lbr1, noder1, lbr2, noder2):
     new_pairs = pset(
         [(self.lbs[0], self.nodes[1]),  # test same server pairs
          (self.lbs[2], self.nodes[0]),  # test same lb pairs
          (lbr1, noder1), (lbr2, noder2)])
     # existing pairs with upper case LB
     lbs = self.lbs[:]
     lbs[0] = lbs[0].upper()
     existing_pairs = pset(zip(lbs, self.nodes))
     pairs = existing_pairs | new_pairs
     # The data will still be self.pairs since lbs[0] will be normalized
     return pairs, r._sorted_data(self.pairs | new_pairs)
Example #10
0
def remove_node_from_lb(node):
    """
    Remove a node from the load balancing entity.

    :ivar node: The node to be removed.
    :type node: :class:`ILBNode` provider
    """
    if isinstance(node, CLBNode):
        return RemoveNodesFromCLB(lb_id=node.description.lb_id,
                                  node_ids=pset([node.node_id]))
    elif isinstance(node, RCv3Node):
        return BulkRemoveFromRCv3(lb_node_pairs=pset(
            [(node.description.lb_id, node.cloud_server_id)]))
Example #11
0
def _check_bulk_delete(attempted_pairs, result):
    """
    Checks if the RCv3 bulk delete command was successful.
    """
    response, body = result

    if response.code == 204:  # All done!
        return body

    errors = []
    non_members = pset()
    for error in body["errors"]:
        match = _SERVER_NOT_A_MEMBER_PATTERN.match(error)
        if match is not None:
            pair = match.groupdict()
            non_members = non_members.add(
                (normalize_lb_id(pair["lb_id"]), pair["server_id"]))
            continue

        match = _LB_INACTIVE_PATTERN.match(error)
        if match is not None:
            errors.append(LBInactive(match.group("lb_id")))
            continue

        match = _LB_DOESNT_EXIST_PATTERN.match(error)
        if match is not None:
            del_lb_id = normalize_lb_id(match.group("lb_id"))
            # consider all pairs with this LB to be removed
            removed = [(lb_id, node_id) for lb_id, node_id in attempted_pairs
                       if lb_id == del_lb_id]
            non_members |= pset(removed)
            continue

        match = _SERVER_DOES_NOT_EXIST.match(error)
        if match is not None:
            del_server_id = match.group("server_id")
            # consider all pairs with this server to be removed
            removed = [(lb_id, node_id) for lb_id, node_id in attempted_pairs
                       if node_id == del_server_id]
            non_members |= pset(removed)
        else:
            raise UnknownBulkResponse(body)

    if errors:
        raise BulkErrors(errors)
    elif non_members:
        to_retry = pset(attempted_pairs) - non_members
        return bulk_delete(to_retry) if to_retry else None
    else:
        raise UnknownBulkResponse(body)
Example #12
0
def test_supports_set_operations():
    s1 = pset([1, 2, 3])
    s2 = pset([3, 4, 5])

    assert s1 | s2 == s(1, 2, 3, 4, 5)
    assert s1.union(s2) == s1 | s2

    assert s1 & s2 == s(3)
    assert s1.intersection(s2) == s1 & s2

    assert s1 - s2 == s(1, 2)
    assert s1.difference(s2) == s1 - s2

    assert s1 ^ s2 == s(1, 2, 4, 5)
    assert s1.symmetric_difference(s2) == s1 ^ s2
Example #13
0
 def test_reactivate_group_on_success_with_no_steps(self):
     """
     When the group started in ERROR state, and convergence succeeds, the
     group is put back into ACTIVE, even if there were no steps to execute.
     """
     self.manifest['state'].status = ScalingGroupStatus.ERROR
     for serv in self.servers:
         serv.desired_lbs = pset()
     sequence = [
         parallel_sequence([]),
         (Log(msg='execute-convergence', fields=mock.ANY), noop),
         (Log(msg='execute-convergence-results', fields=mock.ANY), noop),
         (UpdateGroupStatus(scaling_group=self.group,
                            status=ScalingGroupStatus.ACTIVE),
          noop),
         (Log('group-status-active',
              dict(cloud_feed=True, status='ACTIVE')),
          noop),
         (UpdateServersCache(
             "tenant-id", "group-id", self.now,
             [thaw(self.servers[0].json.set("_is_as_active", True)),
              thaw(self.servers[1].json.set("_is_as_active", True))]),
          noop)
     ]
     self.state_active = {
         'a': {'id': 'a', 'links': [{'href': 'link1', 'rel': 'self'}]},
         'b': {'id': 'b', 'links': [{'href': 'link2', 'rel': 'self'}]}
     }
     self.cache[0]["_is_as_active"] = True
     self.cache[1]["_is_as_active"] = True
     self.assertEqual(
         perform_sequence(self.get_seq() + sequence, self._invoke()),
         (StepResult.SUCCESS, ScalingGroupStatus.ACTIVE))
Example #14
0
 def test_multiple_errors(self):
     """
     If bulk add returns 409 then multiple errors returned are collected and
     raised as a single `BulkErrors`
     """
     errors = {
         "errors": [
             lb_inactive(self.lbs[0]),
             "Load Balancer Pool {} does not exist".format(self.lbs[1]),
             "Cloud Server {} is unprocessable".format(self.nodes[2])
         ]
     }
     seq = [
         (self.svc_req_intent(self.data),
          const(stub_json_response(errors, 409))),
         (log_intent(
             "request-rcv3-bulk", errors,
             req_body=("jsonified", self.data)),
          noop)
     ]
     with self.assertRaises(r.BulkErrors) as ec:
         perform_sequence(seq, r.bulk_add(self.pairs))
     self.assertEqual(
         ec.exception.errors,
         pset([r.LBInactive(self.lbs[0]),
               r.NoSuchLBError(self.lbs[1]),
               r.ServerUnprocessableError(self.nodes[2])])
     )
Example #15
0
 def test_all_retries(self):
     """
     If bulk_delete returns "server not a member", lb or server deleted
     for all attempted pairs then there is no retry and returns None
     """
     errors = {
         "errors": [
             server_not_member(self.lbs[0].upper(), self.nodes[0]),
             "Cloud Server {} does not exist".format(self.nodes[1]),
             "Load Balancer Pool {} does not exist".format(
                 self.lbs[2].upper())
         ]
     }
     pairs = pset([
         (self.lbs[0], self.nodes[1]),  # test same server pairs
         (self.lbs[2], self.nodes[0])   # test same lb pairs
     ])
     pairs = self.pairs | pairs
     data = r._sorted_data(pairs)
     seq = [
         (self.svc_req_intent(data),
          const(stub_json_response(errors, 409))),
         (log_intent(
             "request-rcv3-bulk", errors, req_body=("jsonified", data)),
          noop)
     ]
     self.assertIsNone(perform_sequence(seq, r.bulk_delete(pairs)))
Example #16
0
 def test_dont_filter_out_non_recently_converged(self):
     """
     If a group was converged in the past but not recently, it will be
     cleaned from the ``recently_converged`` map, and it will be converged.
     """
     # g1: converged a while ago; divergent -> removed and converged
     # g2: converged recently; not divergent -> not converged
     # g3: converged a while ago; not divergent -> removed and not converged
     eff = self._converge_all_groups(['00_g1'])
     sequence = [
         (ReadReference(ref=self.currently_converging), lambda i: pset([])),
         (Log('converge-all-groups',
              dict(group_infos=[self.group_infos[0]],
                   currently_converging=[])),
          noop),
         (ReadReference(ref=self.recently_converged),
          lambda i: pmap({'g1': 4, 'g2': 10, 'g3': 0})),
         (Func(time.time), lambda i: 20),
         (ModifyReference(self.recently_converged,
                          match_func("literally anything",
                                     pmap({'g2': 10}))),
          noop),
         parallel_sequence([[self._expect_group_converged('00', 'g1')]])
     ]
     self.assertEqual(perform_sequence(sequence, eff), ['converged g1!'])
Example #17
0
 def get_pairs_data(lbr1, noder1, lbr2, noder2):
     pairs = pset(
         [(self.lbs[0], self.nodes[1]),  # test same server pairs
          (self.lbs[2], self.nodes[0]),  # test same lb pairs
          (lbr1, noder1), (lbr2, noder2)])
     pairs |= self.pairs
     return pairs, r._sorted_data(pairs)
Example #18
0
def run_set_performance():
    """
    == PyPy ==
    Big set from list: 0.0152490139008
    Big pset from list: 1.62447595596
    Random access set: 0.0192308425903
    Random access pset: 2.18643188477

    === CPython ===
    Big set from list: 0.0202131271362
    Big pset from list: 2.87654399872
    Random access set: 0.0950989723206
    Random access pset: 11.2261350155
    """
    l = [x for x in range(100000)]

    before = time.time()
    s1 = set(l)
    print("Big set from list: " + str(time.time() - before))

    before = time.time()
    s2 = pset(l, pre_size=2*len(l))
    print("Big pset from list: " + str(time.time() - before))

    before = time.time()
    random_access(s1)
    print("Random access set: " + str(time.time() - before))

    before = time.time()
    random_access(s2)
    print("Random access pset: " + str(time.time() - before))
 def all_parses(self, string):
     """
     Returns an iterator of all parse trees of the string for the given rule.
     """
     for match in self.matches_at_position(string, 0, stack=pset()):
         if match.length == len(string):
             yield match
Example #20
0
def parse_file(excel_file):
  excel = pd.ExcelFile(excel_file)

  df = excel.parse('Schedule',index_col=0)
  df.columns = clean_up(df.columns)
  times,agents = parse_schedule(df)

  df = excel.parse('Meetings',index_col=None)
  df.columns = clean_up(df.columns)
  del df['area']
  df.name = clean_up(df.name)
  meetings = parse_student_meetings(df,3)

  offset = meetings[-1].mid+1
  df = excel.parse('Lab Meetings',index_col=None)
  df.columns = clean_up(df.columns)
  meetings += parse_lab_meetings(df,offset=offset)

  df = excel.parse('Schedule Preferences')
  df.columns = clean_up(df.columns)
  costs = parse_costs(df)

  final_meetings = {}
  for requirement in meetings:
    old = final_meetings.get(requirement.mid,pset())
    final_meetings[requirement.mid] = old.add(requirement)

  return Schedule(list(agents),pmap(),pmap(times),costs,
                  pmap(final_meetings),pmap())
Example #21
0
def parse_schedule(schedule):
  times = {}
  for col in schedule.columns:
    times[col] = pset(map(parse_time,
                          clean_up(schedule[schedule[col] != 1].index.values)))

  return (times, schedule.columns)
Example #22
0
 def test_no_steps(self):
     """
     If state of world matches desired, no steps are executed, but the
     `active` servers are still updated, and SUCCESS is the return value.
     """
     for serv in self.servers:
         serv.desired_lbs = pset()
     sequence = [
         parallel_sequence([]),
         (Log('execute-convergence', mock.ANY), noop),
         (Log('execute-convergence-results',
              {'results': [], 'worst_status': 'SUCCESS'}), noop),
         (UpdateServersCache(
             "tenant-id", "group-id", self.now,
             [thaw(self.servers[0].json.set('_is_as_active', True)),
              thaw(self.servers[1].json.set("_is_as_active", True))]),
          noop)
     ]
     self.state_active = {
         'a': {'id': 'a', 'links': [{'href': 'link1', 'rel': 'self'}]},
         'b': {'id': 'b', 'links': [{'href': 'link2', 'rel': 'self'}]}
     }
     self.cache[0]["_is_as_active"] = True
     self.cache[1]["_is_as_active"] = True
     self.assertEqual(
         perform_sequence(self.get_seq() + sequence, self._invoke()),
         (StepResult.SUCCESS, ScalingGroupStatus.ACTIVE))
Example #23
0
def _lbs_from_metadata(metadata):
    """
    Get the desired load balancer descriptions based on the metadata.

    :return: ``dict`` of `ILBDescription` providers
    """
    lbs = get_service_metadata('autoscale', metadata).get('lb', {})
    desired_lbs = []

    for lb_id, v in lbs.get('CloudLoadBalancer', {}).iteritems():
        # if malformed, skiped the whole key
        try:
            configs = json.loads(v)
            if isinstance(configs, list):
                desired_lbs.extend([
                    CLBDescription(lb_id=lb_id, port=c['port'])
                    for c in configs])
        except (ValueError, KeyError, TypeError):
            pass

    desired_lbs.extend([
        RCv3Description(lb_id=lb_id) for lb_id in lbs.get('RackConnectV3', {})
    ])

    return pset(desired_lbs)
Example #24
0
 def valid_updates(self,schedule):
   meeting = Meeting(mid=self.mid,agents=pvector([]),time=None)
   times = schedule.times - pset([t for a in self.agents
                                  for t in schedule.forward[a].keys()])
   if len(times):
     return [schedule.add_meeting(meeting.set(time=t),self.agents,self)
             for t in times]
Example #25
0
    def test_no_lbs(self):
        """
        When no loadBalancers are specified, the returned
        DesiredServerGroupState has an empty mapping for desired_lbs. If no
        draining_timeout is provided, returned DesiredServerGroupState has
        draining_timeout as 0.0
        """
        server_config = {'name': 'test', 'flavorRef': 'f'}
        lc = {'args': {'server': server_config}}

        expected_server_config = {
            'server': {
                'name': 'test',
                'flavorRef': 'f',
                'metadata': {
                    'rax:auto_scaling_group_id': 'uuid',
                    'rax:autoscale:group:id': 'uuid'}}}
        state = get_desired_server_group_state('uuid', lc, 2)
        self.assertEqual(
            state,
            DesiredServerGroupState(
                server_config=expected_server_config,
                capacity=2,
                desired_lbs=pset(),
                draining_timeout=0.0))
        self.assert_server_config_hashable(state)
Example #26
0
    def test_ignore_disappearing_divergent_flag(self):
        """
        When the divergent flag disappears just as we're starting to converge,
        the group does not get converged and None is returned as its result.

        This happens when a concurrent convergence iteration is just finishing
        up.
        """
        eff = self._converge_all_groups(['00_g1'])

        def get_bound_sequence(tid, gid):
            # since this GetStat is going to return None, no more effects will
            # be run. This is the crux of what we're testing.
            znode = '/groups/divergent/{}_{}'.format(tid, gid)
            return [
                (GetStat(path=znode), noop),
                (Log('converge-divergent-flag-disappeared',
                     fields={'znode': znode}),
                 noop)]

        sequence = [
            (ReadReference(ref=self.currently_converging), lambda i: pset()),
            (Log('converge-all-groups',
                 dict(group_infos=[self.group_infos[0]],
                      currently_converging=[])),
             noop),
            (ReadReference(ref=self.recently_converged), lambda i: pmap()),
            (Func(time.time), lambda i: 100),
            parallel_sequence([
                [(BoundFields(mock.ANY, fields={'tenant_id': '00',
                                                'scaling_group_id': 'g1'}),
                  nested_sequence(get_bound_sequence('00', 'g1')))],
             ]),
        ]
        self.assertEqual(perform_sequence(sequence, eff), [None])
 def matches_at_position(self, string, position, stack=pset()):
     if (self, position) in stack:
         # Prevent infinite recursion for zero-length matches.
         return
     stack = stack.add((self, position))
     for match in self.referent.matches_at_position(string, position, stack=stack):
         yield Node(string, position, match.length, rule=self, children=(match, ))
Example #28
0
File: actor.py Project: tlvu/mochi
def decode(obj):
    if isinstance(obj, ExtType):
        if obj.code == TYPE_PSET:
            unpacked_data = unpackb(obj.data,
                                    use_list=False,
                                    encoding='utf-8')
            return pset(decode(item) for item in unpacked_data)
        if obj.code == TYPE_PLIST:
            unpacked_data = unpackb(obj.data,
                                    use_list=False,
                                    encoding='utf-8')
            return plist(decode(item) for item in unpacked_data)
        if obj.code == TYPE_PBAG:
            unpacked_data = unpackb(obj.data,
                                    use_list=False,
                                    encoding='utf-8')
            return pbag(decode(item) for item in unpacked_data)
        module_name, class_name, *data = unpackb(obj.data,
                                                 use_list=False,
                                                 encoding='utf-8')
        cls = getattr(sys.modules[module_name],
                      class_name)
        return cls(*(decode(item) for item in data))
    if isinstance(obj, tuple):
        return pvector(decode(item) for item in obj)
    if isinstance(obj, dict):
        new_dict = dict()
        for key in obj.keys():
            new_dict[decode(key)] = decode(obj[key])
        return pmap(new_dict)
    return obj
Example #29
0
    def add(self, unit_name, image_name, ports=frozenset(), environment=None,
            volumes=frozenset(), mem_limit=None, cpu_shares=None,
            restart_policy=RestartNever(), command_line=None):
        if unit_name in self._units:
            return fail(AlreadyExists(unit_name))
        for port in ports:
            if port.external_port in self._used_ports:
                raise AddressInUse(address=(b"0.0.0.0", port.external_port))

        all_ports = set(range(2 ** 15, 2 ** 16))
        assigned_ports = []
        for port in ports:
            if port.external_port == 0:
                available_ports = pset(all_ports) - self._used_ports
                assigned = next(iter(available_ports))
                port = port.set(external_port=assigned)
            assigned_ports.append(port)
            self._used_ports = self._used_ports.add(port.external_port)

        self._units[unit_name] = Unit(
            name=unit_name,
            container_name=unit_name,
            container_image=image_name,
            ports=frozenset(assigned_ports),
            environment=environment,
            volumes=frozenset(volumes),
            activation_state=u'active',
            mem_limit=mem_limit,
            cpu_shares=cpu_shares,
            restart_policy=restart_policy,
            command_line=command_line,
        )
        return succeed(None)
Example #30
0
 def dispatcher(self, operation, resp):
     return SequenceDispatcher([
         (TenantScope(mock.ANY, "tid"),
          nested_sequence([
              ((operation, pset([("lb_id", "server_id")])), lambda i: resp)
          ]))
     ])