def data_from_arg(arg): """extract data from a constructor arg and make immutable. Parameters ---------- arg : dict, pmap, int, float, str Data payloas as passed to one of the constructors. If dict or pmap, that is used as the data payload, if other value, then presumed to be a simple payload of {'value': arg}. Returns ------- pyrsistent.pmap Immutable dict-like object Raises ------ EventException Raised on bad arg input. """ if isinstance(arg, dict): return freeze(arg) elif is_pmap(arg): return copy.copy(arg) elif isinstance(arg, int) or isinstance(arg, float) or isinstance( arg, str): return freeze({'value': arg}) else: raise EventException( 'Could not interpret data from {a}'.format(a=arg))
def filter_records(root, head, update, filters=()): """Apply the filters to the records.""" root, head, update = freeze(root), freeze(head), freeze(update) for filter_ in filters: root, head, update = filter_(root, head, update) return thaw(root), thaw(head), thaw(update)
def test_success(self): """ The data is returned as a tuple of ([NovaServer], [CLBNode/RCv3Node]). """ clb_nodes = [CLBNode(node_id="node1", address="ip1", description=CLBDescription(lb_id="lb1", port=80))] rcv3_nodes = [RCv3Node(node_id="node2", cloud_server_id="a", description=RCv3Description(lb_id="lb2"))] eff = get_all_convergence_data( "tid", "gid", self.now, get_scaling_group_servers=_constant_as_eff(("tid", "gid", self.now), self.servers), get_clb_contents=_constant_as_eff((), clb_nodes), get_rcv3_contents=_constant_as_eff((), rcv3_nodes), ) expected_servers = [ server( "a", ServerState.ACTIVE, servicenet_address="10.0.0.1", links=freeze([{"href": "link1", "rel": "self"}]), json=freeze(self.servers[0]), ), server( "b", ServerState.ACTIVE, created=1, servicenet_address="10.0.0.2", links=freeze([{"href": "link2", "rel": "self"}]), json=freeze(self.servers[1]), ), ] self.assertEqual(resolve_stubs(eff), (expected_servers, clb_nodes + rcv3_nodes))
def test_without_image_id(self): """ Create server that has missing image in it in various ways. (for the case of BFV) """ for image in ({}, {'id': None}, ""): self.servers[0]['image'] = image self.assertEqual( NovaServer.from_server_details_json(self.servers[0]), NovaServer(id='a', state=ServerState.ACTIVE, image_id=None, flavor_id='valid_flavor', created=self.createds[0], servicenet_address='', links=freeze(self.servers[0]['links']), json=freeze(self.servers[0]))) del self.servers[0]['image'] self.assertEqual( NovaServer.from_server_details_json(self.servers[0]), NovaServer(id='a', state=ServerState.ACTIVE, image_id=None, flavor_id='valid_flavor', created=self.createds[0], servicenet_address='', links=freeze(self.servers[0]['links']), json=freeze(self.servers[0])))
def test_without_image_id(self): """ Create server that has missing image in it in various ways. (for the case of BFV) """ for image in ({}, {"id": None}): self.servers[0]["image"] = image self.assertEqual( NovaServer.from_server_details_json(self.servers[0]), NovaServer( id="a", state=ServerState.ACTIVE, image_id=None, flavor_id="valid_flavor", created=self.createds[0][1], servicenet_address="", links=freeze(self.links[0]), json=freeze(self.servers[0]), ), ) del self.servers[0]["image"] self.assertEqual( NovaServer.from_server_details_json(self.servers[0]), NovaServer( id="a", state=ServerState.ACTIVE, image_id=None, flavor_id="valid_flavor", created=self.createds[0][1], servicenet_address="", links=freeze(self.links[0]), json=freeze(self.servers[0]), ), )
def test_middleware_will_be_called_on_first_user_dispatch(self): combined_reducer = combine_reducer({ "n": normal_reducer, "a": reducer_a }) store = create_store(combined_reducer, enhancer=apply_middleware(logging_middleware)) static_action = StaticAction(type="AppendAction", payload="900") store.dispatch(static_action) self.assertEqual(len(logger), 1) self.assertNotEqual(logger[0]["old_state"], store.state) self.assertEqual( logger[0]["old_state"], freeze({ "a": pmap({"static": True}), "n": pmap({"my_type": "normal"}), })) self.assertEqual(logger[0]["new_state"], store.state) self.assertEqual( store.state, freeze({ "a": { "static": True, 'action': (static_action.type, static_action.payload) }, "n": pmap({"my_type": "normal"}), }))
def keep_only_update_source_in_field(field, root, head, update): """Remove elements from root and head where ``source`` matches the update. This is useful if the update needs to overwrite all elements with the same source. .. note:: If the update doesn't contain exactly one source in ``field``, the records are returned with no modifications. Args: field(str): the field to filter out. root(dict): the root record, whose ``field`` will be cleaned. head(dict): the head record, whose ``field`` will be cleaned. update(dict): the update record, from which the ``source`` is read. Returns: tuple: ``(root, head, update)`` with some elements filtered out from ``root`` and ``head``. """ update_sources = set(get_value(update, '.'.join([field, 'source']), [])) if len(update_sources) != 1: return root, head, update source = update_sources.pop() root = freeze(root) head = freeze(head) if field in root: root = root.set(field, remove_elements_with_source(source, root[field])) if field in head: head = head.set(field, remove_elements_with_source(source, head[field])) return thaw(root), thaw(head), update
def setUp(self): self.tenant_id = 'tenant-id' self.group_id = 'group-id' self.state = GroupState(self.tenant_id, self.group_id, 'group-name', {}, {}, None, {}, False, ScalingGroupStatus.ACTIVE, desired=2) self.group = mock_group(self.state, self.tenant_id, self.group_id) self.lc = {'args': {'server': {'name': 'foo'}, 'loadBalancers': []}} self.desired_lbs = s(CLBDescription(lb_id='23', port=80)) self.servers = ( server('a', ServerState.ACTIVE, servicenet_address='10.0.0.1', desired_lbs=self.desired_lbs, links=freeze([{'href': 'link1', 'rel': 'self'}])), server('b', ServerState.ACTIVE, servicenet_address='10.0.0.2', desired_lbs=self.desired_lbs, links=freeze([{'href': 'link2', 'rel': 'self'}])) ) self.state_active = {} self.cache = [thaw(self.servers[0].json), thaw(self.servers[1].json)] self.gsgi = GetScalingGroupInfo(tenant_id='tenant-id', group_id='group-id') self.manifest = { # Many details elided! 'state': self.state, 'launchConfiguration': self.lc, } self.gsgi_result = (self.group, self.manifest) self.now = datetime(1970, 1, 1)
def from_server_details_json(cls, server_json): """ Create a :obj:`NovaServer` instance from a server details JSON dictionary, although without any 'server' or 'servers' initial resource key. See http://docs.rackspace.com/servers/api/v2/cs-devguide/content/ Get_Server_Details-d1e2623.html :return: :obj:`NovaServer` instance """ try: server_state = ServerState.lookupByName(server_json['status']) except ValueError: server_state = ServerState.UNKNOWN_TO_OTTER if server_json.get("OS-EXT-STS:task_state", "") == "deleting": server_state = ServerState.DELETED metadata = server_json.get('metadata', {}) return cls( id=server_json['id'], state=server_state, created=timestamp_to_epoch(server_json['created']), image_id=server_json.get('image', {}).get('id'), flavor_id=server_json['flavor']['id'], links=freeze(server_json['links']), desired_lbs=_lbs_from_metadata(metadata), servicenet_address=_servicenet_address(server_json), json=freeze(server_json))
def from_server_details_json(cls, server_json): """ Create a :obj:`NovaServer` instance from a server details JSON dictionary, although without any 'server' or 'servers' initial resource key. See http://docs.rackspace.com/servers/api/v2/cs-devguide/content/ Get_Server_Details-d1e2623.html :return: :obj:`NovaServer` instance """ try: server_state = ServerState.lookupByName(server_json['status']) except ValueError: server_state = ServerState.UNKNOWN_TO_OTTER if server_json.get("OS-EXT-STS:task_state", "") == "deleting": server_state = ServerState.DELETED metadata = server_json.get('metadata', {}) return cls(id=server_json['id'], state=server_state, created=timestamp_to_epoch(server_json['created']), image_id=get_in(["image", "id"], server_json), flavor_id=server_json['flavor']['id'], links=freeze(server_json['links']), desired_lbs=_lbs_from_metadata(metadata), servicenet_address=_servicenet_address(server_json), json=freeze(server_json))
def test_success(self): """ The data is returned as a tuple of ([NovaServer], [CLBNode/RCv3Node]). """ clb_nodes = [CLBNode(node_id='node1', address='ip1', description=CLBDescription(lb_id='lb1', port=80))] rcv3_nodes = [RCv3Node(node_id='node2', cloud_server_id='a', description=RCv3Description(lb_id='lb2'))] eff = get_all_launch_server_data( 'tid', 'gid', self.now, get_scaling_group_servers=_constant_as_eff( ('tid', 'gid', self.now), self.servers), get_clb_contents=_constant_as_eff((), clb_nodes), get_rcv3_contents=_constant_as_eff((), rcv3_nodes)) expected_servers = [ server('a', ServerState.ACTIVE, servicenet_address='10.0.0.1', links=freeze([{'href': 'link1', 'rel': 'self'}]), json=freeze(self.servers[0])), server('b', ServerState.ACTIVE, created=1, servicenet_address='10.0.0.2', links=freeze([{'href': 'link2', 'rel': 'self'}]), json=freeze(self.servers[1])) ] self.assertEqual(resolve_stubs(eff), {'servers': expected_servers, 'lb_nodes': clb_nodes + rcv3_nodes})
def data_from_arg(arg): """extract data from a constructor arg and make immutable. Parameters ---------- arg : dict, pmap, int, float, str Data payloas as passed to one of the constructors. If dict or pmap, that is used as the data payload, if other value, then presumed to be a simple payload of {'value': arg}. Returns ------- pyrsistent.pmap Immutable dict-like object Raises ------ EventException Raised on bad arg input. """ if isinstance(arg, dict): return freeze(arg) elif is_pmap(arg): return copy.copy(arg) elif isinstance(arg, int) or isinstance(arg, float) or isinstance(arg, str): return freeze({'value': arg}) else: raise EventException('Could not interpret data from {a}'.format(a=arg))
def set_in(mapping, keys, new_value): """ Take the old dictionary and traverses the dictionary via the list of keys. The returned dictionary will be the same as the old dictionary, but with the resultant value set as ``new_value``. Note that if more than 1 key is passed, and any of the keys (except for the last) do not already exist, raises KeyError or IndexError. Note that the new value does not need to be a pyrsistent data structure - this function will freeze everything first. :param dict mapping: The dictionary to change values for. :param iterable keys: An ordered collection of keys :param new_value: The value to set the keys to :return: A copy of the old dictionary as PMap, with the new value. """ if len(keys) < 1: raise ValueError("Must provide one or more keys") if isinstance(mapping, dict): mapping = freeze(mapping) if len(keys) == 1: return mapping.set(keys[0], freeze(new_value)) else: child = mapping.get(keys[0], pmap()) return mapping.set(keys[0], set_in(child, keys[1:], new_value))
def test_store_can_handle_combined_reducers_with_singledispatch(self): combined = combine_reducer([reducer_a, normal_reducer]) expected_state = freeze({ "normal_reducer": { "my_type": "normal" }, "reducer_a": { "static": True } }) store = create_store(combined) self.assertEqual(expected_state, store.state) dyn_action = DynamicAction() expected_state = freeze({ "normal_reducer": { "my_type": "normal" }, "reducer_a": { "static": True, "dynamic": "dynamo" } }) actual_state = store.dispatch(dyn_action) self.assertEqual(expected_state, actual_state)
async def prefetch_globals(self): self.studip_settings = await self.get_studip_json("studip/settings") self.studip_course_type = {} for key, value in self.studip_settings["SEM_TYPE"].items(): value = value.set("id", int(key)) self.studip_course_type[int(key)] = value self.studip_course_type[str(key)] = value self.studip_course_type[str(value["name"])] = value self.studip_course_type = freeze(self.studip_course_type) self.studip_course_class = {} for key, value in self.studip_settings["SEM_CLASS"].items(): value = value.set("id", int(key)) self.studip_course_class[int(key)] = value self.studip_course_class[str(key)] = value self.studip_course_class[str(value["name"])] = value self.studip_course_class = freeze(self.studip_course_class) self.studip_file_tou = {} async for tou in studip_iter(self.get_studip_json, "studip/content_terms_of_use_list"): self.studip_file_tou[ tou["id"]] = tou # id is a str like UNDEF_LICENSE self.studip_file_tou = freeze(self.studip_file_tou) self.studip_folder_type = await self.get_studip_json( "studip/file_system/folder_types") self.studip_semester = {} async for sem in studip_iter(self.get_studip_json, "semesters"): self.studip_semester[self.extract_id(sem)] = sem self.studip_semester = freeze(self.studip_semester)
def _to_sample(person, images): # Random images needed for representation interpolation (3.5) x1 = _get_random_image() x2 = _get_random_image() return m(id=person["id_class"] - 1, images=freeze(list(images)), x1=freeze(x1), x2=freeze(x2))
def json_deserializer(dct): for k, v in dct.items(): if k == "uuid": try: dct[k] = uuid.UUID(hex=v) except ValueError: dct[k] = freeze(v) else: dct[k] = freeze(v) return dct
def test_expand_path(): assert expand_path(freeze({'foo': 1, 'bar': 2}), [], [any]) == [ ['bar'], ['foo'], ] assert expand_path(freeze({'foo': {'bar': {'baz': 1}}}), [], [any, any, any]) == [ ['foo', 'bar', 'baz'] ] assert expand_path(freeze({'foo': {'bar': {'baz': 1, 'qux': 4}}}), [], ['foo', any, any]) == [ ['foo', 'bar', 'baz'], ['foo', 'bar', 'qux'] ]
class Settings(NamedTuple): SOURCE_DATABASE_URL: str TARGET_DATABASE_URL: str QUERY_MODIFIERS: dict = freeze({}) IGNORE_TABLES: List[str] = freeze([]) EXTEND_RELATIONS: List[Mapping[str, str]] = freeze([]) IGNORE_RELATIONS: List[Mapping[str, str]] = freeze([]) OUTPUT_DIRECTORY: Optional[str] = None @classmethod def load(cls, path): import commentjson with open(path) as f: data = commentjson.load(f) return cls(**freeze(data)) @classmethod @lru_cache() def empty(cls): return cls( SOURCE_DATABASE_URL="", TARGET_DATABASE_URL="", IGNORE_TABLES=freeze(["example1", "migrations"]), EXTEND_RELATIONS=freeze([{ "pk": "product.id", "fk": "product_ownership.product_id" }]), IGNORE_RELATIONS=freeze([{ "pk": "product.id", "fk": "client.favorite_product_id" }]), QUERY_MODIFIERS=freeze({ "_default": { "conditions": [], "limit": 300 }, "users": { "conditions": ["email ilike '*****@*****.**'"] }, }), ) @property @lru_cache() def json(self): return json.dumps( {k: thaw(v) for k, v in self._asdict().items()}, indent=4, sort_keys=True, )
def test_with_servicenet(self): """ Create server that has servicenet IP in it. """ self.assertEqual( NovaServer.from_server_details_json(self.servers[1]), NovaServer(id='b', state=ServerState.BUILD, image_id='valid_image', flavor_id='valid_flavor', created=self.createds[1], servicenet_address='10.0.0.1', links=freeze(self.servers[1]['links']), json=freeze(self.servers[1])))
def test_without_address(self): """ Handles server json that does not have "addresses" in it. """ self.assertEqual( NovaServer.from_server_details_json(self.servers[0]), NovaServer(id='a', state=ServerState.ACTIVE, image_id='valid_image', flavor_id='valid_flavor', created=self.createds[0], servicenet_address='', links=freeze(self.servers[0]['links']), json=freeze(self.servers[0])))
def test_broken_predicate(): broken_predicates = [ lambda: None, lambda a, b, c: None, lambda a, b, c, d=None: None, lambda *args: None, lambda **kwargs: None, ] for pred in broken_predicates: try: freeze({}).transform([pred], None) assert False except ValueError as e: assert str(e) == "callable in transform path must take 1 or 2 arguments"
def test_without_private(self): """ Creates server that does not have private/servicenet IP in it. """ self.servers[0]['addresses'] = {'public': 'p'} self.assertEqual( NovaServer.from_server_details_json(self.servers[0]), NovaServer(id='a', state=ServerState.ACTIVE, image_id='valid_image', flavor_id='valid_flavor', created=self.createds[0], servicenet_address='', links=freeze(self.servers[0]['links']), json=freeze(self.servers[0])))
def load(cls, path): import commentjson with open(path) as f: data = commentjson.load(f) return cls(**freeze(data))
def test_remove_pclass(): class MyClass(PClass): a = field() b = field() m = freeze({'foo': MyClass(a=1, b=2)}) assert m.transform(['foo', 'b'], discard) == {'foo': MyClass(a=1)}
def latest(**kwargs): [latest] = filter_records( records=url_records( url_mangle(URL_LATEST_TEMPLATE, kwargs), UbuntuBase, ), search_record=filter_for_pclass( pclass_type=UbuntuBase, pclass_kwargs=dict( (k, kwargs[k]) for k in ("ubuntu_name", "ubuntu_variant", "release_cycle") ) ) ) return filter_records( records=url_records( url_mangle(URL_DETAILS_TEMPLATE, kwargs), UbuntuDetail, ), search_record=filter_for_pclass( pclass_type=UbuntuDetail, pclass_kwargs=freeze(kwargs).set( "release_date", latest.release_date, ) ) )
def __init__(self) -> None: if SEED is not None: np.random.seed(SEED) def reflect(u: np.ndarray, w: np.ndarray, a: float) -> np.ndarray: return u - 2 * np.broadcast_to(w, u.shape) * (np.reshape( np.dot(u, w) - a, (len(u), 1))) control_points = np.random.rand(BOARD_SIZE, 2) - 0.5 reflect_control_points = partial(reflect, control_points) down_reflect = reflect_control_points(np.array([0, 1]), -0.5) up_reflect = reflect_control_points(np.array([0, 1]), 0.5) left_reflect = reflect_control_points(np.array([1, 0]), -0.5) right_reflect = reflect_control_points(np.array([1, 0]), 0.5) extended_points = np.concatenate( (control_points, up_reflect, down_reflect, left_reflect, right_reflect)) voronoi = sp.spatial.Voronoi(extended_points) self.cycles = freeze( np.array(voronoi.regions)[voronoi.point_region[:voronoi.npoints // 5]]) edges = edges_from_cycles(self.cycles) verts = verts_from_edges(edges) self.points, self.blue_base, self.red_base, self.blue_base_cs, self.red_base_cs = self.make_border( voronoi.vertices, edges) self.xs = verts | edges | self.blue_base | self.red_base
def cstruct(self) -> dict: """Return example workflow cstruct with required data.""" cstruct = freeze({ 'initial_state': 'draft', 'states': { 'draft': { 'acm': { 'principals': ['moderator'], 'permissions': [['view', 'Deny']] } }, 'announced': { 'acl': [] } }, 'transitions': { 'to_announced': { 'from_state': 'draft', 'to_state': 'announced', 'permission': 'do_transition', 'callback': None, } }, }) return cstruct
def _nevow_request_to_request_map(req): """ Convert a Nevow request object into an immutable request map. """ headers = req.requestHeaders content_type, character_encoding = _get_content_type(headers) iri = URL.from_text(req.uri.decode('utf-8')).to_iri() host = _get_first_header(headers, b'host').decode('utf-8') scheme = u'https' if req.isSecure() else u'http' if u':' in host: host, port = host.split(u':', 1) port = int(port) else: port = {u'https': 443, u'http': 80}.get(scheme) return m( body=req.content, content_type=content_type, content_length=_get_first_header(headers, b'content-length'), character_encoding=character_encoding, headers=freeze(dict(headers.getAllRawHeaders())), remote_addr=req.getClientIP(), request_method=req.method, server_name=host, server_port=port, scheme=scheme, #ssl_client_cert=XXX, uri=iri, #query_string path_info=url_path(iri), protocol=getattr(req, 'clientproto', None))
def _add_workflow(self, registry, name): from . import add_workflow cstruct = freeze({ 'initial_state': 'draft', 'states': { 'draft': { 'acm': { 'principals': ['moderator'], 'permissions': [['view', 'Deny']] } }, 'announced': { 'acl': [] }, 'participate': { 'acl': [] } }, 'transitions': { 'to_announced': { 'from_state': 'draft', 'to_state': 'announced', 'permission': 'do_transition', 'callback': None, }, 'to_participate': { 'from_state': 'announced', 'to_state': 'participate', 'permission': 'do_transition', 'callback': None, } }, }) add_workflow(registry, cstruct, name)
def group_across( rr: list[dict[str, dict]], across_key: str ) -> dict[Index, set[Element]]: groups: dict[Index, set[Element]] = defaultdict(set) for i, r in enumerate(rr): tags = r.pop("tags") assert isinstance(tags, dict) across_value = str(tags.pop(across_key)) index = pmap( {key: value for key, value in tags.items() if isinstance(value, str)} ) data: Mapping[tuple[str, str], Any] = pmap( { (field_name, attribute_name): freeze(attribute_value) for field_name, field_dict in r.items() for attribute_name, attribute_value in field_dict.items() } ) element = Element(i, across_value, data) groups[index].add(element) return groups
def prepare_launch_config(scaling_group_uuid, launch_config): """ Prepare a launch_config for the specified scaling_group. This is responsible for returning a copy of the launch config that has metadata and unique server names added. :param IScalingGroup scaling_group: The scaling group this server is getting launched for. :param dict launch_config: The complete launch_config args we want to build servers from. :return dict: The prepared launch config. """ launch_config = freeze(launch_config) lb_descriptions = json_to_LBConfigs(launch_config.get('loadBalancers', [])) launch_config = prepare_server_launch_config( scaling_group_uuid, launch_config, lb_descriptions) suffix = generate_server_name() launch_config = set_server_name(launch_config, suffix) return thaw(launch_config)
def test_ptest(): m = freeze({'a': 2, 'runners': {1: {'a': 44}, 2: {'b': 44}, 3: {'c': 66}}}) m2 = m['runners'][2].set('b', 100) e = m.evolver() e['a'] = 222 e['runners'][1] m3 = 3
def generate( cls, event_type, data=None, id=None, stream_id=None, timestamp=None, version=None ): """ Generate an Event `id` *must* be a UUID, will default to a random uuid4() if not supplied Arguments: event_type -- String representing the event type data -- PMap of command data id -- Event id; ideally a UUID, will default to uuid4() stream_id -- Stream id; ideally a UUID, optional timestamp -- Datetime representing when the event happened, default to datetime.utcnow() version -- Event version within its stream """ if not id: id = uuid4() return cls(**{ 'id': str(id), 'type': event_type, 'data': freeze(data) or pmap(), 'stream_id': stream_id or '', 'timestamp': timestamp or datetime.utcnow(), 'version': version or 0, })
def sample_dag( cls, source_database: Database, relations: RelationDAG, query_modifiers: Any, directory: Path, ) -> List["TableSample"]: "Samples a database according to its relation graph" # Change current working dir to our output directory # This way we avoid TableSample instances containing that information os.chdir(directory) # Prepare the sqlite db relations.key_schema.drop_all(cls.key_database.engine) relations.key_schema.create_all(cls.key_database.engine) samples = [] for table in relations.topologically_sorted: sample = cls( table, conditions=freeze(query_modifiers[table]["conditions"]), limit=query_modifiers[table]["limit"], ).sample(source_database) samples.append(sample.table) return samples
def __init__( self, schema, sources=None, derivations=None, initial_config=None, skip_load_on_init=False, ): # Very bad things happen if schema is modified schema = freeze(schema) # ensure we have a valid JSON Schema _validate_schema(schema) self._schema = schema DefaultSettingValidator = _extend_with_default(Draft4Validator) self._config = initial_config or {} # update self._config with default values from the schema # since this uses setdefault, it shouldn't override initial_config # Uses thawed copy of schema because jsonschema wants a regular dict DefaultSettingValidator(thaw(schema)).validate(self._config) self._validator = Draft4Validator(self._schema) if sources is None: self._sources = [EnvironmentConfigLoader()] else: self._sources = sources self._derivations = derivations if not skip_load_on_init: self.update_config()
def prepare_launch_config(scaling_group_uuid, launch_config): """ Prepare a launch_config for the specified scaling_group. This is responsible for returning a copy of the launch config that has metadata and unique server names added. :param IScalingGroup scaling_group: The scaling group this server is getting launched for. :param dict launch_config: The complete launch_config args we want to build servers from. :return dict: The prepared launch config. """ launch_config = freeze(launch_config) lb_descriptions = json_to_LBConfigs(launch_config.get('loadBalancers', [])) launch_config = prepare_server_launch_config(scaling_group_uuid, launch_config, lb_descriptions) suffix = generate_server_name() launch_config = set_server_name(launch_config, suffix) return thaw(launch_config)
def _deserialize_meta(cstruct: dict, name: str) -> PMap: schema = create_workflow_meta_schema(cstruct) try: appstruct = schema.deserialize(cstruct) except Invalid as err: msg = 'Error add workflow with name {0}: {1}' raise ConfigurationError(msg.format(name, str(err.asdict()))) return freeze(appstruct)
def test_key_value_predicate(): m = freeze({ 'foo': 1, 'bar': 2, }) assert m.transform([ lambda k, v: (k, v) == ('foo', 1), ], lambda v: v * 3) == {"foo": 3, "bar": 2}
def test_with_servicenet(self): """ Create server that has servicenet IP in it. """ self.assertEqual( NovaServer.from_server_details_json(self.servers[1]), NovaServer( id="b", state=ServerState.BUILD, image_id="valid_image", flavor_id="valid_flavor", created=self.createds[1][1], servicenet_address="10.0.0.1", links=freeze(self.links[1]), json=freeze(self.servers[1]), ), )
def test_deleting_server(self): """ A server whose "OS-EXT-STS:task_state" is "deleting" will be considered as DELETED """ self.servers[0]["OS-EXT-STS:task_state"] = "deleting" self.assertEqual( NovaServer.from_server_details_json(self.servers[0]), NovaServer(id='a', state=ServerState.DELETED, image_id='valid_image', flavor_id='valid_flavor', created=self.createds[0], desired_lbs=pset(), servicenet_address='', links=freeze(self.servers[0]['links']), json=freeze(self.servers[0])))
def set_route_mapping(self, route_mapping): """ Record a new route mapping. :param route_mapping: A new value for the private ``_route_mapping`` attribute. """ self._route_mapping = freeze(route_mapping)
def test_create_servers(self): """Logs :obj:`CreateServer`.""" cfg = {'configgy': 'configged', 'nested': {'a': 'b'}} cfg2 = {'configgy': 'configged', 'nested': {'a': 'c'}} creates = pbag([ CreateServer(server_config=freeze(cfg)), CreateServer(server_config=freeze(cfg)), CreateServer(server_config=freeze(cfg2)) ]) self.assert_logs(creates, [ Log('convergence-create-servers', fields={'num_servers': 2, 'server_config': cfg, 'cloud_feed': True}), Log('convergence-create-servers', fields={'num_servers': 1, 'server_config': cfg2, 'cloud_feed': True}) ])
def ui_state_reduce(state, action): action_type = action['type'] if action_type == ActionType.LOADED_NOTE: return state.update({ 'current_note_id': action['id'], 'is_editing_title': False, 'is_editing_text': False, }) elif action_type == ActionType.CREATE_NOTE: return state.update({ 'current_note_id': action['id'], 'is_editing_title': True, 'is_editing_text': False, }) elif action_type == ActionType.FOCUS_FILTER_TERM_ENTRY: if action['has_focus']: return state.update({ 'filter_term_entry_focus': True, 'is_editing_title': False, 'is_editing_text': False, }) else: return state.update({ 'filter_term_entry_focus': False, }) elif action_type == ActionType.CHANGE_FILTER_TERM: return state.update({ 'filter_term': action['filter_term'], }) elif action_type == ActionType.TOGGLE_EDIT_NOTE_TEXT: return state.update({ 'is_editing_text': not state['is_editing_text'], 'is_editing_title': False}) elif action_type == ActionType.TOGGLE_EDIT_NOTE_TITLE: return state.update({ 'is_editing_title': not state['is_editing_title'], 'is_editing_text': False}) elif action_type == ActionType.FINISH_EDIT_NOTE_TITLE: return state.set('is_editing_title', False) elif action_type == ActionType.FINISH_EDIT_NOTE_TEXT: return state.set('is_editing_text', False) elif action_type == ActionType.MOVE_PANED_POSITION: return state.set('paned_position', action['position']) elif action_type == ActionType.LOADED_UI_STATE: return state.update(freeze(action['ui_state'])) else: return state
def test_without_private(self): """ Creates server that does not have private/servicenet IP in it. """ self.servers[0]["addresses"] = {"public": "p"} self.assertEqual( NovaServer.from_server_details_json(self.servers[0]), NovaServer( id="a", state=ServerState.ACTIVE, image_id="valid_image", flavor_id="valid_flavor", created=self.createds[0][1], servicenet_address="", links=freeze(self.links[0]), json=freeze(self.servers[0]), ), )
def packer_amis(self): """ :return: A ``dict`` of ``{aws_region: ami_id}`` found in the ``artifacts``. """ for artifact in self.artifacts: if artifact['type'] == 'amazon-ebs': return _unserialize_packer_dict(artifact["id"]) return freeze({})
def test_event_same(self): """test Event.same() static method.""" ev1 = copy.copy(self.canned_event) ev2 = copy.copy(self.canned_event) self.assertTrue(Event.same(ev1, ev2)) # make a new one with same data but new timestamp. ev3 = Event(freeze(dict(time=self.aware_ts, data=ev1.data()))) self.assertFalse(Event.same(ev1, ev3))
def setUp(self, cluster): """ Deploy PostgreSQL to a node. """ self.cluster = cluster self.node_1, self.node_2 = cluster.nodes postgres_deployment = { u"version": 1, u"nodes": { self.node_1.address: [POSTGRES_APPLICATION_NAME], self.node_2.address: [], }, } self.postgres_deployment_moved = { u"version": 1, u"nodes": { self.node_1.address: [], self.node_2.address: [POSTGRES_APPLICATION_NAME], }, } self.postgres_application = { u"version": 1, u"applications": { POSTGRES_APPLICATION_NAME: { u"image": POSTGRES_IMAGE, u"ports": [{ u"internal": POSTGRES_INTERNAL_PORT, u"external": POSTGRES_EXTERNAL_PORT, }], u"volume": { u"dataset_id": POSTGRES_APPLICATION.volume.dataset.dataset_id, # The location within the container where the data # volume will be mounted; see: # https://github.com/docker-library/postgres/blob/ # docker/Dockerfile.template u"mountpoint": POSTGRES_VOLUME_MOUNTPOINT, u"maximum_size": "%d" % (REALISTIC_BLOCKDEVICE_SIZE,), }, }, }, } self.postgres_application_different_port = thaw(freeze( self.postgres_application).transform( [u"applications", POSTGRES_APPLICATION_NAME, u"ports", 0, u"external"], POSTGRES_EXTERNAL_PORT + 1)) cluster.flocker_deploy(self, postgres_deployment, self.postgres_application)
def test_lbs_from_metadata_ignores_unsupported_lb_types(self): """ Creating from server json ignores unsupported LB types """ self.servers[0]['metadata'] = { "rax:autoscale:lb:1": '[{"port":80},{"port":90}]', "rax:autoscale:lb:RackConnect:{0}".format(uuid4()): None, "rax:autoscale:lb:Neutron:456": None } self.assertEqual( NovaServer.from_server_details_json(self.servers[0]), NovaServer(id='a', state=ServerState.ACTIVE, image_id='valid_image', flavor_id='valid_flavor', created=self.createds[0], desired_lbs=pset(), servicenet_address='', links=freeze(self.servers[0]['links']), json=freeze(self.servers[0])))
def import_resources(root: IResource, registry: Registry, filename: str): """Import resources from a JSON file.""" request = _create_request(root, registry) resources_info = _load_resources_info(filename) for resource_info in resources_info: expected_path = _get_expected_path(resource_info) if _resource_exists(expected_path, root): logger.info("Skipping {}.".format(expected_path)) else: logger.info("Creating {}".format(expected_path)) _create_resource(freeze(resource_info), request, registry, root)
def import_resources(root: IResource, registry: Registry, filename: str): """Import resources from a JSON file with dummy `god` user.""" request = create_fake_god_request(registry) resources_info = _load_info(filename) for resource_info in resources_info: expected_path = _get_expected_path(resource_info) if _resource_exists(expected_path, root): logger.info('Skipping {}'.format(expected_path)) else: logger.info('Creating {}'.format(expected_path)) _create_resource(freeze(resource_info), request, registry, root)