Example #1
0
def make_mesos_container_info(task_config: MesosTaskConfig) -> addict.Dict:
    container_info = addict.Dict(
        type=task_config.containerizer,
        volumes=thaw(task_config.volumes),
    )
    port_mappings = [
        addict.Dict(host_port=task_config.ports[0]['begin'],
                    container_port=8888)
    ]
    if container_info.type == 'DOCKER':
        container_info.docker = addict.Dict(
            image=task_config.image,
            network='BRIDGE',
            port_mappings=port_mappings,
            parameters=thaw(task_config.docker_parameters),
            force_pull_image=(not task_config.use_cached_image),
        )
    elif container_info.type == 'MESOS':
        container_info.network_infos = addict.Dict(port_mappings=port_mappings)
        # For this to work, image_providers needs to be set to 'docker' on mesos agents (as opposed
        # to 'appc' or 'oci'; we're still running docker images, we're just
        # using the UCR to do it).
        if 'image' in task_config:
            container_info.mesos.image = addict.Dict(
                type='DOCKER',  # not 'APPC' or 'OCI'
                docker=addict.Dict(name=task_config.image),
                cached=task_config.use_cached_image,
            )
    return container_info
Example #2
0
def keep_only_update_source_in_field(field, root, head, update):
    """Remove elements from root and head where ``source`` matches the update.

    This is useful if the update needs to overwrite all elements with the same
    source.

    .. note::
        If the update doesn't contain exactly one source in ``field``, the
        records are returned with no modifications.

    Args:
        field(str): the field to filter out.
        root(dict): the root record, whose ``field`` will be cleaned.
        head(dict): the head record, whose ``field`` will be cleaned.
        update(dict): the update record, from which the ``source`` is read.

    Returns:
        tuple: ``(root, head, update)`` with some elements filtered out from
            ``root`` and ``head``.
    """
    update_sources = set(get_value(update, '.'.join([field, 'source']), []))
    if len(update_sources) != 1:
        return root, head, update
    source = update_sources.pop()

    root = freeze(root)
    head = freeze(head)
    if field in root:
        root = root.set(field,
                        remove_elements_with_source(source, root[field]))
    if field in head:
        head = head.set(field,
                        remove_elements_with_source(source, head[field]))

    return thaw(root), thaw(head), update
Example #3
0
    def test_reactivate_group_on_success_after_steps(self):
        """
        When the group started in ERROR state, and convergence succeeds, the
        group is put back into ACTIVE.
        """
        self.manifest['state'].status = ScalingGroupStatus.ERROR

        def plan(*args, **kwargs):
            return pbag([TestStep(Effect("step"))])

        sequence = [
            parallel_sequence([]),
            (Log(msg='execute-convergence', fields=mock.ANY), noop),
            parallel_sequence([
                [("step", lambda i: (StepResult.SUCCESS, []))]
            ]),
            (Log(msg='execute-convergence-results', fields=mock.ANY), noop),
            (UpdateGroupStatus(scaling_group=self.group,
                               status=ScalingGroupStatus.ACTIVE),
             noop),
            (Log('group-status-active',
                 dict(cloud_feed=True, status='ACTIVE')),
             noop),
            (UpdateServersCache(
                "tenant-id", "group-id", self.now,
                [thaw(self.servers[0].json.set('_is_as_active', True)),
                 thaw(self.servers[1].json.set('_is_as_active', True))]),
             noop),
        ]
        self.assertEqual(
            perform_sequence(self.get_seq() + sequence, self._invoke(plan)),
            (StepResult.SUCCESS, ScalingGroupStatus.ACTIVE))
Example #4
0
def filter_records(root, head, update, filters=()):
    """Apply the filters to the records."""
    root, head, update = freeze(root), freeze(head), freeze(update)
    for filter_ in filters:
        root, head, update = filter_(root, head, update)

    return thaw(root), thaw(head), thaw(update)
Example #5
0
 def test_reactivate_group_on_success_with_no_steps(self):
     """
     When the group started in ERROR state, and convergence succeeds, the
     group is put back into ACTIVE, even if there were no steps to execute.
     """
     self.manifest['state'].status = ScalingGroupStatus.ERROR
     for serv in self.servers:
         serv.desired_lbs = pset()
     sequence = [
         parallel_sequence([]),
         (Log(msg='execute-convergence', fields=mock.ANY), noop),
         (Log(msg='execute-convergence-results', fields=mock.ANY), noop),
         (UpdateGroupStatus(scaling_group=self.group,
                            status=ScalingGroupStatus.ACTIVE),
          noop),
         (Log('group-status-active',
              dict(cloud_feed=True, status='ACTIVE')),
          noop),
         (UpdateServersCache(
             "tenant-id", "group-id", self.now,
             [thaw(self.servers[0].json.set("_is_as_active", True)),
              thaw(self.servers[1].json.set("_is_as_active", True))]),
          noop)
     ]
     self.state_active = {
         'a': {'id': 'a', 'links': [{'href': 'link1', 'rel': 'self'}]},
         'b': {'id': 'b', 'links': [{'href': 'link2', 'rel': 'self'}]}
     }
     self.cache[0]["_is_as_active"] = True
     self.cache[1]["_is_as_active"] = True
     self.assertEqual(
         perform_sequence(self.get_seq() + sequence, self._invoke()),
         (StepResult.SUCCESS, ScalingGroupStatus.ACTIVE))
Example #6
0
 def setUp(self):
     self.tenant_id = 'tenant-id'
     self.group_id = 'group-id'
     self.state = GroupState(self.tenant_id, self.group_id, 'group-name',
                             {}, {}, None, {}, False,
                             ScalingGroupStatus.ACTIVE, desired=2)
     self.group = mock_group(self.state, self.tenant_id, self.group_id)
     self.lc = {'args': {'server': {'name': 'foo'}, 'loadBalancers': []}}
     self.desired_lbs = s(CLBDescription(lb_id='23', port=80))
     self.servers = (
         server('a', ServerState.ACTIVE, servicenet_address='10.0.0.1',
                desired_lbs=self.desired_lbs,
                links=freeze([{'href': 'link1', 'rel': 'self'}])),
         server('b', ServerState.ACTIVE, servicenet_address='10.0.0.2',
                desired_lbs=self.desired_lbs,
                links=freeze([{'href': 'link2', 'rel': 'self'}]))
     )
     self.state_active = {}
     self.cache = [thaw(self.servers[0].json), thaw(self.servers[1].json)]
     self.gsgi = GetScalingGroupInfo(tenant_id='tenant-id',
                                     group_id='group-id')
     self.manifest = {  # Many details elided!
         'state': self.state,
         'launchConfiguration': self.lc,
     }
     self.gsgi_result = (self.group, self.manifest)
     self.now = datetime(1970, 1, 1)
Example #7
0
 def test_no_steps(self):
     """
     If state of world matches desired, no steps are executed, but the
     `active` servers are still updated, and SUCCESS is the return value.
     """
     for serv in self.servers:
         serv.desired_lbs = pset()
     sequence = [
         parallel_sequence([]),
         (Log('execute-convergence', mock.ANY), noop),
         (Log('execute-convergence-results',
              {'results': [], 'worst_status': 'SUCCESS'}), noop),
         (UpdateServersCache(
             "tenant-id", "group-id", self.now,
             [thaw(self.servers[0].json.set('_is_as_active', True)),
              thaw(self.servers[1].json.set("_is_as_active", True))]),
          noop)
     ]
     self.state_active = {
         'a': {'id': 'a', 'links': [{'href': 'link1', 'rel': 'self'}]},
         'b': {'id': 'b', 'links': [{'href': 'link2', 'rel': 'self'}]}
     }
     self.cache[0]["_is_as_active"] = True
     self.cache[1]["_is_as_active"] = True
     self.assertEqual(
         perform_sequence(self.get_seq() + sequence, self._invoke()),
         (StepResult.SUCCESS, ScalingGroupStatus.ACTIVE))
Example #8
0
 def test_agent_yml(self):
     """
     ```task_configure_flocker_agent`` writes a ``/etc/flocker/agent.yml``
     file which contains the backend configuration passed to it.
     """
     control_address = BASIC_AGENT_YML["control-service"]["hostname"]
     expected_pool = u"some-test-pool"
     expected_backend_configuration = dict(pool=expected_pool)
     commands = task_configure_flocker_agent(
         control_node=control_address,
         dataset_backend=backend_loader.get(
             BASIC_AGENT_YML["dataset"]["backend"]),
         dataset_backend_configuration=expected_backend_configuration,
         logging_config=thaw(BASIC_AGENT_YML["logging"]),
     )
     [put_agent_yml] = list(effect.intent
                            for effect in commands.intent.effects
                            if isinstance(effect.intent, Put))
     # Seems like transform should be usable here but I don't know how.
     expected_agent_config = BASIC_AGENT_YML.set(
         "dataset",
         BASIC_AGENT_YML["dataset"].update(expected_backend_configuration))
     self.assertEqual(
         put(
             content=yaml.safe_dump(thaw(expected_agent_config)),
             path=THE_AGENT_YML_PATH,
             log_content_filter=_remove_dataset_fields,
         ).intent,
         put_agent_yml,
     )
Example #9
0
 def test_agent_yml(self):
     """
     ```task_configure_flocker_agent`` writes a ``/etc/flocker/agent.yml``
     file which contains the backend configuration passed to it.
     """
     control_address = BASIC_AGENT_YML["control-service"]["hostname"]
     expected_pool = u"some-test-pool"
     expected_backend_configuration = dict(pool=expected_pool)
     commands = task_configure_flocker_agent(
         control_node=control_address,
         dataset_backend=backend_loader.get(
             BASIC_AGENT_YML["dataset"]["backend"]
         ),
         dataset_backend_configuration=expected_backend_configuration,
         logging_config=thaw(BASIC_AGENT_YML["logging"]),
     )
     [put_agent_yml] = list(
         effect.intent
         for effect in
         commands.intent.effects
         if isinstance(effect.intent, Put)
     )
     # Seems like transform should be usable here but I don't know how.
     expected_agent_config = BASIC_AGENT_YML.set(
         "dataset",
         BASIC_AGENT_YML["dataset"].update(expected_backend_configuration)
     )
     self.assertEqual(
         put(
             content=yaml.safe_dump(thaw(expected_agent_config)),
             path=THE_AGENT_YML_PATH,
             log_content_filter=_remove_dataset_fields,
         ).intent,
         put_agent_yml,
     )
Example #10
0
    async def getxattr(self):
        import json
        from pyrsistent import thaw

        xattrs = {}
        xattrs["known-tokens"] = json.dumps(self.known_tokens)

        if self.is_folder:
            # list_contents is not cached, so we don't know here whether that information is available
            # see studip_fuse.launcher.aioimpl.asyncio.alru_realpath.CachingRealPath for an implementation
            pass
        else:
            try:
                download = await self.open_file()
                if download.is_loading:
                    xattrs["contents-status"] = "pending"
                    xattrs[
                        "contents-exception"] = "InvalidStateError: operation is not complete yet"
                elif download.is_completed:
                    xattrs["contents-status"] = "available"
                    xattrs["contents-exception"] = ""
                elif download.exception():
                    xattrs["contents-status"] = "failed"
                    xattrs["contents-exception"] = download.exception()
                else:
                    xattrs["contents-status"] = "unknown"
                    xattrs[
                        "contents-exception"] = "InvalidStateError: operation was not started yet"
            except FuseOSError as e:
                xattrs["contents-status"] = "unavailable"
                xattrs["contents-exception"] = e
        if isinstance(xattrs.get("contents-exception", None), BaseException):
            exc = xattrs["contents-exception"]
            xattrs["contents-exception"] = "%s: %s" % (type(exc).__name__, exc)

        url = "/studip/dispatch.php/"
        if self._file:
            url += "file/details/%s?cid=%s" % (self._file["id"],
                                               self._course["course_id"])
        elif self._folder:
            url += "course/files/index/%s?cid=%s" % (self._folder["id"],
                                                     self._course["course_id"])
        elif self._course:
            url += "course/files?cid=%s" % (self._course["course_id"])
        elif self._semester:
            url += "my_courses/set_semester?sem_select=%s" % (
                self._semester["id"])
        else:
            url += "my_courses"
        xattrs["url"] = self.session.studip_url(url)

        xattrs["json"] = json.dumps({
            "semester": thaw(self._semester),
            "course": thaw(self._course),
            "folder": thaw(self._folder),
            "file": thaw(self._file)
        })
        return xattrs
Example #11
0
    def test_linking(self, cluster):
        """
        A link from an origin container to a destination container allows the
        origin container to establish connections to the destination container
        when the containers are running on different machines using an address
        obtained from ``<ALIAS>_PORT_<PORT>_TCP_{ADDR,PORT}``-style environment
        set in the origin container's environment.
        """
        _, destination_port = find_free_port()
        _, origin_port = find_free_port()

        [destination, origin] = cluster.nodes

        busybox = pmap({
            u"image": u"busybox",
        })

        destination_container = busybox.update({
            u"name": random_name(self),
            u"node_uuid": destination.uuid,
            u"ports": [{u"internal": 8080, u"external": destination_port}],
            u"command_line": BUSYBOX_HTTP,
        })
        self.addCleanup(
            cluster.remove_container, destination_container[u"name"]
        )

        origin_container = busybox.update({
            u"name": random_name(self),
            u"node_uuid": origin.uuid,
            u"links": [{u"alias": "DEST", u"local_port": 80,
                        u"remote_port": destination_port}],
            u"ports": [{u"internal": 9000, u"external": origin_port}],
            u"command_line": [
                u"sh", u"-c", u"""\
echo -n '#!/bin/sh
nc $DEST_PORT_80_TCP_ADDR $DEST_PORT_80_TCP_PORT
' > /tmp/script.sh;
chmod +x /tmp/script.sh;
nc -ll -p 9000 -e /tmp/script.sh
                """]})
        self.addCleanup(
            cluster.remove_container, origin_container[u"name"]
        )
        running = gatherResults([
            cluster.create_container(thaw(destination_container)),
            cluster.create_container(thaw(origin_container)),
            # Wait for the link target container to be accepting connections.
            verify_socket(destination.public_address, destination_port),
            # Wait for the link source container to be accepting connections.
            verify_socket(origin.public_address, origin_port),
        ])

        running.addCallback(
            lambda _: self.assert_busybox_http(
                origin.public_address, origin_port))
        return running
Example #12
0
    def test_success(self):
        """
        Executes the plan and returns SUCCESS when that's the most severe
        result.
        """
        dgs = get_desired_group_state(self.group_id, self.lc, 2)
        deleted = server(
            'c', ServerState.DELETED, servicenet_address='10.0.0.3',
            desired_lbs=self.desired_lbs,
            links=freeze([{'href': 'link3', 'rel': 'self'}]))
        self.servers += (deleted,)

        steps = [
            TestStep(
                Effect(
                    {'dgs': dgs,
                     'servers': self.servers,
                     'lb_nodes': (),
                     'now': 0})
                .on(lambda _: (StepResult.SUCCESS, [])))]

        def plan(dgs, servers, lb_nodes, now, build_timeout):
            self.assertEqual(build_timeout, 3600)
            return steps

        sequence = [
            parallel_sequence([]),
            (Log('execute-convergence',
                 dict(servers=self.servers, lb_nodes=(), steps=steps,
                      now=self.now, desired=dgs)), noop),
            parallel_sequence([
                [({'dgs': dgs, 'servers': self.servers,
                   'lb_nodes': (), 'now': 0},
                  noop)]
            ]),
            (Log('execute-convergence-results',
                 {'results': [{'step': steps[0],
                               'result': StepResult.SUCCESS,
                               'reasons': []}],
                  'worst_status': 'SUCCESS'}), noop),
            # Note that servers arg is non-deleted servers
            (UpdateServersCache(
                "tenant-id", "group-id", self.now,
                [thaw(self.servers[0].json.set("_is_as_active", True)),
                 thaw(self.servers[1].json.set("_is_as_active", True))]),
             noop)
        ]

        # all the servers updated in cache in beginning
        self.cache.append(thaw(deleted.json))

        self.assertEqual(
            perform_sequence(self.get_seq() + sequence, self._invoke(plan)),
            (StepResult.SUCCESS, ScalingGroupStatus.ACTIVE))
Example #13
0
    def perform_packer_configure(self, dispatcher, intent):
        """
        Copy the prototype configuration files and provisioning scripts to a
        temporary location and modify one of the configurations with the values
        found in ``intent``.
        """
        temporary_configuration_directory = self.working_directory.child(
            'packer_configuration')
        temporary_configuration_directory.makedirs()
        intent.configuration_directory.copyTo(
            temporary_configuration_directory)

        template_name = (u"template_{distribution}_{template}.json".format(
            distribution=intent.distribution,
            template=intent.template,
        ))
        template_path = temporary_configuration_directory.child(template_name)

        with template_path.open('r') as infile:
            configuration = json.load(infile)

        configuration['builders'][0]['region'] = intent.build_region
        configuration['builders'][0]['source_ami'] = intent.source_ami_map[
            intent.build_region]
        configuration['builders'][0]['ami_regions'] = thaw(
            intent.publish_regions)
        output_template_path = template_path.temporarySibling()
        with output_template_path.open('w') as outfile:
            _json_dump(configuration, outfile)
        # XXX temporarySibling sets alwaysCreate = True for some reason.
        output_template_path.alwaysCreate = False
        return output_template_path
Example #14
0
def publish_installer_images_effects(options):
    # Create configuration directory
    configuration_path = yield Effect(
        intent=PackerConfigure(
            build_region=options["build_region"],
            publish_regions=options["regions"],
            template=options["template"],
            distribution=options["distribution"],
            source_ami=options["source_ami"],
        )
    )
    # Build the Docker images
    ami_map = yield Effect(
        intent=PackerBuild(
            configuration_path=configuration_path,
        )
    )
    # Publish the regional AMI map to S3
    yield Effect(
        intent=WriteToS3(
            content=json.dumps(thaw(ami_map), encoding="utf-8"),
            target_bucket=options['target_bucket'],
            target_key=options["template"],
        )
    )
Example #15
0
    def collapse(self, field_spec_list, name, reducer, append=False):
        """
        Collapses this event's columns, represented by the fieldSpecList
        into a single column. The collapsing itself is done with the reducer
        function. Optionally the collapsed column could be appended to the
        existing columns, or replace them (the default).

        Parameters
        ----------
        field_spec_list : list
            List of columns to collapse. If you need to retrieve deep
            nested values that ['can.be', 'done.with', 'this.notation'].
        name : str
            Name of new column with collapsed data.
        reducer : function
            Function to pass to reducer.
        append : bool, optional
            Set True to add new column to existing data dict, False to create
            a new Event with just the collapsed data.

        Returns
        -------
        Event
            New event object.
        """
        data = thaw(self.data()) if append else dict()
        vals = list()

        for i in field_spec_list:
            vals.append(self.get(i))

        data[name] = reducer(vals)

        return self.set_data(data)
Example #16
0
    def collapse(self, field_spec_list, name, reducer, append=False):
        """
        Collapses this event's columns, represented by the fieldSpecList
        into a single column. The collapsing itself is done with the reducer
        function. Optionally the collapsed column could be appended to the
        existing columns, or replace them (the default).

        Parameters
        ----------
        field_spec_list : list
            List of columns to collapse. If you need to retrieve deep
            nested values that ['can.be', 'done.with', 'this.notation'].
        name : str
            Name of new column with collapsed data.
        reducer : function
            Function to pass to reducer.
        append : bool, optional
            Set True to add new column to existing data dict, False to create
            a new Event with just the collapsed data.

        Returns
        -------
        Event
            New event object.
        """
        data = thaw(self.data()) if append else dict()
        vals = list()

        for i in field_spec_list:
            vals.append(self.get(i))

        data[name] = reducer(vals)

        return self.set_data(data)
Example #17
0
    def to_point(self, cols=None):
        """
        Returns a flat array starting with the timestamp, followed by the values.

        Can be given an optional list of columns so the returned list will
        have the values in order. Primarily for the TimeSeries wire format.

        Parameters
        ----------
        cols : list, optional
            List of data columns to order the data points in so the
            TimeSeries wire format lines up correctly. If not specified,
            the points will be whatever order that dict.values() decides
            to return it in.

        Returns
        -------
        list
            Epoch ms followed by points.
        """
        points = [self.timerange().to_json()]

        data = thaw(self.data())

        if isinstance(cols, list):
            points += [data.get(x, None) for x in cols]
        else:
            points += [x for x in list(data.values())]

        return points
Example #18
0
    def to_point(self, cols=None):
        """
        Returns a flat array starting with the timestamp, followed by the values.

        Can be given an optional list of columns so the returned list will
        have the values in order. Primarily for the TimeSeries wire format.

        Parameters
        ----------
        cols : list, optional
            List of data columns to order the data points in so the
            TimeSeries wire format lines up correctly. If not specified,
            the points will be whatever order that dict.values() decides
            to return it in.

        Returns
        -------
        list
            Epoch ms followed by points.
        """
        points = [self.timerange().to_json()]

        data = thaw(self.data())

        if isinstance(cols, list):
            points += [data.get(x, None) for x in cols]
        else:
            points += [x for x in list(data.values())]

        return points
Example #19
0
 def test_sequence(self):
     """
     The function generates a packer configuration file, runs packer
     build and uploads the AMI ids to a given S3 bucket.
     """
     options = default_options()
     configuration_path = self.make_temporary_directory()
     ami_map = PACKER_OUTPUT_US_ALL.output
     perform_sequence(
         seq=[
             (
                 PackerConfigure(
                     build_region=options["build_region"],
                     publish_regions=options["regions"],
                     source_ami=options["source_ami"],
                     template=options["template"],
                     distribution=options["distribution"],
                 ),
                 lambda intent: configuration_path,
             ),
             (PackerBuild(configuration_path=configuration_path), lambda intent: ami_map),
             (
                 WriteToS3(
                     content=json.dumps(thaw(ami_map), encoding="utf-8"),
                     target_bucket=options["target_bucket"],
                     target_key=options["template"],
                 ),
                 lambda intent: None,
             ),
         ],
         eff=publish_installer_images_effects(options=options),
     )
Example #20
0
 def json(self):
     return json.dumps(
         {k: thaw(v)
          for k, v in self._asdict().items()},
         indent=4,
         sort_keys=True,
     )
Example #21
0
    def test_sequence(self):
        """
        The function generates a packer configuration file, runs packer
        build and uploads the AMI ids to a given S3 bucket.
        """
        options = PublishInstallerImagesOptions()
        options.parseOptions(
            [b'--source-ami-map', b'{"us-west-1": "ami-1234"}']
        )

        configuration_path = self.make_temporary_directory()
        ami_map = PACKER_OUTPUT_US_ALL.output
        perform_sequence(
            seq=[
                (PackerConfigure(
                    build_region=options["build_region"],
                    publish_regions=options["regions"],
                    source_ami_map=options["source-ami-map"],
                    template=options["template"],
                ), lambda intent: configuration_path),
                (PackerBuild(
                    configuration_path=configuration_path,
                ), lambda intent: ami_map),
                (StandardOut(
                    content=json.dumps(
                        thaw(ami_map),
                        encoding='utf-8',
                    ) + b"\n",
                ), lambda intent: None),
            ],
            eff=publish_installer_images_effects(options=options)
        )
Example #22
0
    def summarize(oo: list[Categorical | None]) -> Any | None:
        counter: Counter = Counter()

        for o in oo:
            if isinstance(o, Categorical):
                counter.update(o.counter)
            else:
                counter.update({o: 1})

        values = list(counter.keys())

        count_list: list[dict[str, Any]] = list()

        seen: set[Hashable] = set()
        for a in values:
            if a in seen:
                continue

            merge = set(b for b in values if check_almost_equal(a, b)) - seen

            count = sum(counter[b] for b in merge)
            count_list.append(dict(value=thaw(a), count=count))

            seen.update(merge)

        if len(count_list) == 1:
            (d,) = count_list
            return d["value"]

        return count_list
Example #23
0
def test_thaw_can_handle_subclasses_of_persistent_base_types():
    class R(PRecord):
        x = field()

    result = thaw(R(x=1))
    assert result == {'x': 1}
    assert type(result) is dict
Example #24
0
def _insert_to_list(item, objects_list, position=None):
    """Inserts value into list at proper position (as close to requested position as possible but not before it).

    If no position provided element will be added at the end of the list.
    Args:
        item: Value to insert into objects_list with `ORDER_KEY` key
        objects_list(list): List where value should be inserted
        position(int): If set then use this position instead `ORDER_KEY` key

    Returns(tuple): (position on which it was placed, merged objects_list).
    """
    item = thaw(item)
    if not position and ORDER_KEY in item:
        position = item[ORDER_KEY]
    if position is not None:
        for idx, element in enumerate(objects_list):
            if isinstance(element, Iterable) and ORDER_KEY in element:
                if element[ORDER_KEY] > position:
                    objects_list.insert(idx, item)
                    return idx, objects_list
            elif idx > position:
                objects_list.insert(idx, item)
                return idx, objects_list
    objects_list.append(item)
    return len(objects_list) - 1, objects_list
Example #25
0
    def _freeze(self, action=None):
        """
        Freeze this message for logging, registering it with C{action}.

        @param action: The L{Action} which is the context for this message. If
            C{None}, the L{Action} will be deduced from the current call
            stack.

        @return: A L{PMap} with added C{timestamp}, C{task_uuid}, and
            C{task_level} entries.
        """
        if action is None:
            action = currentAction()
        if action is None:
            task_uuid = unicode(uuid4())
            task_level = [1]
        else:
            task_uuid = action._identification['task_uuid']
            task_level = thaw(action._nextTaskLevel().level)
        timestamp = self._timestamp()
        return self._contents.update({
            'timestamp': timestamp,
            'task_uuid': task_uuid,
            'task_level': task_level,
        })
Example #26
0
def main():
    from pprint import pprint
    from pyrsistent import thaw
    import sys
    with open(sys.argv[1]) as f:
        tasks = to_tasks(Message.new(x) for x in parse_json_stream(f))
        pprint(thaw(tasks))
 def _event_to_item(self, e):
     raw = thaw(e)
     if type(raw) is dict:
         resp = {}
         for k, v in raw.items():
             if type(v) is str:
                 resp[k] = {'S': v}
             elif type(v) is bool:
                 resp[k] = {'BOOL': v}
             elif isinstance(v, (int, float)):
                 resp[k] = {'N': str(v)}
             elif type(v) is dict:
                 resp[k] = self._event_to_item(v)
             elif type(v) is list:
                 if len(v) > 0:
                     vals = []
                     for i in v:
                         vals.append(self._event_to_item(i))
                     resp[k] = {'L': vals}
         return {'M': resp}
     elif type(raw) is str:
         return {'S': raw}
     elif type(raw) in [int, float]:
         return {'N': str(raw)}
     else:
         print("Missed converting key %s type %s" % (raw, type(raw)))
Example #28
0
    def _is_valid_linear_event(self, event):
        """
        Check to see if an even has good values when doing
        linear fill since we need to keep a completely intact
        event for the values.

        While we are inspecting the data payload, make a note if
        any of the paths are pointing at a list. Then it
        will trigger that filling code later.
        """

        valid = True

        field_path = self._field_path_to_array(self._field_spec[0])

        val = nested_get(thaw(event.data()), field_path)

        # this is pointing at a path that does not exist, issue a warning
        # can call the event valid so it will be emitted. can't fill what
        # isn't there.
        if val == 'bad_path':
            self._warn('path does not exist: {0}'.format(field_path),
                       ProcessorWarning)
            return valid

        # a tracked field path is not valid so this is
        # not a valid linear event. also, if it is not a numeric
        # value, mark it as invalid and let _interpolate_event_list()
        # complain about/skip it.
        if not is_valid(val) or not isinstance(val, numbers.Number):
            valid = False

        return valid
Example #29
0
    def _freeze(self, action=None):
        """
        Freeze this message for logging, registering it with C{action}.

        @param action: The L{Action} which is the context for this message. If
            C{None}, the L{Action} will be deduced from the current call
            stack.

        @return: A L{PMap} with added C{timestamp}, C{task_uuid}, and
            C{task_level} entries.
        """
        if action is None:
            action = current_action()
        if action is None:
            task_uuid = unicode(uuid4())
            task_level = [1]
        else:
            task_uuid = action._identification[TASK_UUID_FIELD]
            task_level = thaw(action._nextTaskLevel().level)
        timestamp = self._timestamp()
        new_values = {
            TIMESTAMP_FIELD: timestamp,
            TASK_UUID_FIELD: task_uuid,
            TASK_LEVEL_FIELD: task_level
        }
        if "action_type" not in self._contents and ("message_type"
                                                    not in self._contents):
            new_values["message_type"] = ""
        return self._contents.update(new_values)
Example #30
0
    def __init__(
        self,
        schema,
        sources=None,
        derivations=None,
        initial_config=None,
        skip_load_on_init=False,
    ):
        # Very bad things happen if schema is modified
        schema = freeze(schema)
        # ensure we have a valid JSON Schema
        _validate_schema(schema)
        self._schema = schema

        DefaultSettingValidator = _extend_with_default(Draft4Validator)
        self._config = initial_config or {}
        # update self._config with default values from the schema
        # since this uses setdefault, it shouldn't override initial_config
        # Uses thawed copy of schema because jsonschema wants a regular dict
        DefaultSettingValidator(thaw(schema)).validate(self._config)

        self._validator = Draft4Validator(self._schema)

        if sources is None:
            self._sources = [EnvironmentConfigLoader()]
        else:
            self._sources = sources

        self._derivations = derivations

        if not skip_load_on_init:
            self.update_config()
Example #31
0
def prepare_launch_config(scaling_group_uuid, launch_config):
    """
    Prepare a launch_config for the specified scaling_group.

    This is responsible for returning a copy of the launch config that
    has metadata and unique server names added.

    :param IScalingGroup scaling_group: The scaling group this server is
        getting launched for.
    :param dict launch_config: The complete launch_config args we want to build
        servers from.

    :return dict: The prepared launch config.
    """
    launch_config = freeze(launch_config)

    lb_descriptions = json_to_LBConfigs(launch_config.get('loadBalancers', []))

    launch_config = prepare_server_launch_config(scaling_group_uuid,
                                                 launch_config,
                                                 lb_descriptions)

    suffix = generate_server_name()
    launch_config = set_server_name(launch_config, suffix)

    return thaw(launch_config)
Example #32
0
    def _is_valid_linear_event(self, event):
        """
        Check to see if an even has good values when doing
        linear fill since we need to keep a completely intact
        event for the values.

        While we are inspecting the data payload, make a note if
        any of the paths are pointing at a list. Then it
        will trigger that filling code later.
        """

        valid = True

        field_path = self._field_path_to_array(self._field_spec[0])

        val = nested_get(thaw(event.data()), field_path)

        # this is pointing at a path that does not exist, issue a warning
        # can call the event valid so it will be emitted. can't fill what
        # isn't there.
        if val == 'bad_path':
            self._warn('path does not exist: {0}'.format(field_path), ProcessorWarning)
            return valid

        # a tracked field path is not valid so this is
        # not a valid linear event. also, if it is not a numeric
        # value, mark it as invalid and let _interpolate_event_list()
        # complain about/skip it.
        if not is_valid(val) or not isinstance(val, numbers.Number):
            valid = False

        return valid
Example #33
0
def make_mesos_resources(
    task_config: MesosTaskConfig,
    role: str,
) -> List[addict.Dict]:
    return [
        addict.Dict(
            name='cpus',
            type='SCALAR',
            role=role,
            scalar=addict.Dict(value=task_config.cpus),
        ),
        addict.Dict(name='mem',
                    type='SCALAR',
                    role=role,
                    scalar=addict.Dict(value=task_config.mem)),
        addict.Dict(name='disk',
                    type='SCALAR',
                    role=role,
                    scalar=addict.Dict(value=task_config.disk)),
        addict.Dict(name='gpus',
                    type='SCALAR',
                    role=role,
                    scalar=addict.Dict(value=task_config.gpus)),
        addict.Dict(
            name='ports',
            type='RANGES',
            role=role,
            ranges=addict.Dict(range=thaw(task_config.ports)),
        ),
    ]
Example #34
0
def update_material(root, head, update):
    if "erratum" in get_value(thaw(update), 'dois.material'):
        return root, head, update
    for field in FIELDS_WITH_MATERIAL_KEY:
        update = update.transform(
            [field, ny], lambda element: element.set("material", "erratum"))
    return root, head, update
Example #35
0
    def to_point(self, cols=None):
        """
        Returns a flat array starting with the timestamp, followed by the values.
        Doesn't include the groupByKey (key).

        Can be given an optional list of columns so the returned list will
        have the values in order. Primarily for the TimeSeries wire format.

        Parameters
        ----------
        cols : list, optional
            List of columns to order the points in so the TimeSeries
            wire format is rendered corectly.

        Returns
        -------
        list
            Epoch ms followed by points.
        """
        points = [self.index_as_string()]

        data = thaw(self.data())

        if isinstance(cols, list):
            points += [data.get(x, None) for x in cols]
        else:
            points += [x for x in list(data.values())]

        return points
Example #36
0
    def test_application_volumes(self):
        """
        ``datasets_from_deployment`` returns dataset dictionaries for the
        volumes attached to applications on all nodes.
        """
        expected_hostname = u"node1.example.com"
        expected_dataset = Dataset(dataset_id=u"jalkjlk")
        volume = AttachedVolume(manifestation=Manifestation(
            dataset=expected_dataset, primary=True),
                                mountpoint=FilePath(b"/blah"))

        node = Node(
            hostname=expected_hostname,
            applications=frozenset({
                Application(name=u'mysql-clusterhq', image=object()),
                Application(name=u'site-clusterhq.com',
                            image=object(),
                            volume=volume)
            }),
        )

        deployment = Deployment(nodes=frozenset([node]))
        expected = dict(dataset_id=expected_dataset.dataset_id,
                        primary=expected_hostname,
                        metadata=thaw(expected_dataset.metadata))
        self.assertEqual([expected],
                         list(datasets_from_deployment(deployment)))
Example #37
0
    def to_dict(self) -> dict:
        """Convert the experiment to a dictionary.

        Returns:
            A dict with all data from the sacred data model.
        """
        return thaw(self._data)
Example #38
0
def test_thaw_can_handle_subclasses_of_persistent_base_types():
    class R(PRecord):
        x = field()

    result = thaw(R(x=1))
    assert result == {'x': 1}
    assert type(result) is dict
Example #39
0
    def test_primary_and_replica_manifestations(self):
        """
        ``datasets_from_deployment`` does not return replica manifestations
        on other nodes.
        """

        expected_hostname = u"node1.example.com"
        expected_dataset = Dataset(dataset_id=u"jalkjlk")
        volume = AttachedVolume(manifestation=Manifestation(
            dataset=expected_dataset, primary=True),
                                mountpoint=FilePath(b"/blah"))

        node1 = Node(
            hostname=expected_hostname,
            applications=frozenset({
                Application(name=u'mysql-clusterhq', image=object()),
                Application(name=u'site-clusterhq.com',
                            image=object(),
                            volume=volume)
            }),
        )
        expected_manifestation = Manifestation(dataset=expected_dataset,
                                               primary=False)
        node2 = Node(hostname=u"node2.example.com",
                     applications=frozenset(),
                     other_manifestations=frozenset([expected_manifestation]))

        deployment = Deployment(nodes=frozenset([node1, node2]))
        expected = dict(dataset_id=expected_dataset.dataset_id,
                        primary=expected_hostname,
                        metadata=thaw(expected_dataset.metadata))
        self.assertEqual([expected],
                         list(datasets_from_deployment(deployment)))
Example #40
0
def update_servers_cache(group, now, servers, lb_nodes, lbs,
                         include_deleted=True):
    """
    Updates the cache, adding servers, with a flag if autoscale is active on
    each one. All arguments after ``now`` are resources specific to
    ``launch_server`` config that are used by that planner. Here we only cache
    servers that are in desired LBs since as it is needed by REST API and
    ignore ``lbs``.

    :param group: scaling group
    :param list servers: list of NovaServer objects
    :param list lb_nodes: list of CLBNode objects
    :param dict lbs: load balancer objects keyed on ID (currently ignored)
    :param include_deleted: Include deleted servers in cache. Defaults to True.
    """
    server_dicts = []
    for server in servers:
        sd = thaw(server.json)
        if is_autoscale_active(server, lb_nodes):
            sd["_is_as_active"] = True
        if server.state != ServerState.DELETED or include_deleted:
            server_dicts.append(sd)

    return Effect(
        UpdateServersCache(group.tenant_id, group.uuid, now, server_dicts))
Example #41
0
    def _freeze(self, action=None):
        """
        Freeze this message for logging, registering it with C{action}.

        @param action: The L{Action} which is the context for this message. If
            C{None}, the L{Action} will be deduced from the current call
            stack.

        @return: A L{PMap} with added C{timestamp}, C{task_uuid}, and
            C{task_level} entries.
        """
        if action is None:
            action = currentAction()
        if action is None:
            task_uuid = unicode(uuid4())
            task_level = [1]
        else:
            task_uuid = action._identification['task_uuid']
            task_level = thaw(action._nextTaskLevel().level)
        timestamp = self._timestamp()
        return self._contents.update({
            'timestamp': timestamp,
            'task_uuid': task_uuid,
            'task_level': task_level,
        })
Example #42
0
def prepare_launch_config(scaling_group_uuid, launch_config):
    """
    Prepare a launch_config for the specified scaling_group.

    This is responsible for returning a copy of the launch config that
    has metadata and unique server names added.

    :param IScalingGroup scaling_group: The scaling group this server is
        getting launched for.
    :param dict launch_config: The complete launch_config args we want to build
        servers from.

    :return dict: The prepared launch config.
    """
    launch_config = freeze(launch_config)

    lb_descriptions = json_to_LBConfigs(launch_config.get('loadBalancers', []))

    launch_config = prepare_server_launch_config(
        scaling_group_uuid, launch_config, lb_descriptions)

    suffix = generate_server_name()
    launch_config = set_server_name(launch_config, suffix)

    return thaw(launch_config)
Example #43
0
 def test_sequence(self):
     """
     The function generates a packer configuration file, runs packer
     build and uploads the AMI ids to a given S3 bucket.
     """
     options = default_options()
     configuration_path = self.make_temporary_directory()
     ami_map = PACKER_OUTPUT_US_ALL.output
     perform_sequence(seq=[
         (PackerConfigure(
             build_region=options["build_region"],
             publish_regions=options["regions"],
             source_ami=options["source_ami"],
             template=options["template"],
             distribution=options["distribution"],
         ), lambda intent: configuration_path),
         (PackerBuild(configuration_path=configuration_path, ),
          lambda intent: ami_map),
         (WriteToS3(
             content=json.dumps(
                 thaw(ami_map),
                 encoding='utf-8',
             ),
             target_bucket=options["target_bucket"],
             target_key=options["template"],
         ), lambda intent: None),
     ],
                      eff=publish_installer_images_effects(options=options))
Example #44
0
    def test_sequence(self):
        """
        The function generates a packer configuration file, runs packer
        build and uploads the AMI ids to a given S3 bucket.
        """
        options = PublishInstallerImagesOptions()
        options.parseOptions(
            [b'--source-ami-map', b'{"us-west-1": "ami-1234"}'])

        configuration_path = self.make_temporary_directory()
        ami_map = PACKER_OUTPUT_US_ALL.output
        perform_sequence(seq=[
            (PackerConfigure(
                build_region=options["build_region"],
                publish_regions=options["regions"],
                source_ami_map=options["source-ami-map"],
                template=options["template"],
                distribution=options["distribution"],
            ), lambda intent: configuration_path),
            (PackerBuild(configuration_path=configuration_path, ),
             lambda intent: ami_map),
            (StandardOut(content=json.dumps(
                thaw(ami_map),
                encoding='utf-8',
            ) + b"\n", ), lambda intent: None),
        ],
                         eff=publish_installer_images_effects(options=options))
Example #45
0
    def query_modifiers(self) -> Mapping[Table, Mapping[str, Any]]:
        """
        QUERY_MODIFIERS key.
        Scenographer will take this key in account while sampling.
        This method will return the equivalent Table instances
        (binded to source_database)
        """
        mods = self.options.QUERY_MODIFIERS
        default_limit = dig(thaw(mods), "_default", "limit") or 30
        default_conditions = dig(thaw(mods), "_default",
                                 "conditions") or list()

        non_specified_entrypoints = operator.sub(
            set([t.name for t in self.relation_dag.entrypoints]),
            set(mods.keys()),
        )
        if non_specified_entrypoints:
            logger.warning(
                "Entrypoints are advised to be added as query modifiers. "
                "They define what the final sample will look like")
            logger.warning(
                "These entrypoints are not specified: {}",
                non_specified_entrypoints,
            )

        modifiers = {}
        for table in self.relation_dag.tables:
            if table.name not in mods:
                limit = default_limit
                conditions = default_conditions

            else:
                limit, conditions = (
                    mods[table.name].get("limit"),
                    mods[table.name].get("conditions"),
                )
                if not limit and not conditions:
                    logger.warning("QUERY_MODIFIER for {} malformed.",
                                   table.name)
                    continue

                limit = limit or default_limit
                conditions = conditions or default_conditions

            modifiers[table] = {"limit": limit, "conditions": conditions}

        return modifiers
Example #46
0
def test_config_with_reporters_success():
    config = minimal_valid_config.transform(
        ['reporters'],
        lambda reporters: reporters.append(valid_reporter_config))

    parsed = validate_and_parse_config(thaw(config))
    assert bool(parsed)
    assert parsed == config
Example #47
0
def test_config_with_reporters_fail():
    for key in ['type', 'command']:
        invalid_reporter = valid_reporter_config.remove(key)
        config = minimal_valid_config.transform(
            ['reporters'],
            lambda reporters: reporters.append(invalid_reporter))
        with pytest.raises(SystemExit):
            validate_and_parse_config(thaw(config))
Example #48
0
  def _tojson_helper(self):
    def setup_time(time,scheduled):
      result = time.JSONable()
      result['mid'] = scheduled
      return result

    result = {'agents': thaw(self.agents),
              'times': thaw(self.times),
              'requirements': thaw({mid: {r.type: r for r in rs.values()}
                                    for mid,rs in
                                    self.requirements.iteritems()}),
              'unsatisfied': thaw(self.unsatisfied),
              'costs': thaw(self.costs),
              'meetings': {a: [setup_time(t,ts.get(t,default=-1))
                               for t in self.times]
                           for a,ts in self.forward.iteritems()}}
    return result
Example #49
0
    def event_list_as_list(self):
        """return a python list of the event list.

        Returns
        -------
        list
            Thawed version of internal immutable data structure.
        """
        return thaw(self.event_list())
Example #50
0
    def stringify(self):
        """Produce a json string of the internal data.

        Returns
        -------
        str
            String representation of this object's internal data.
        """
        return json.dumps(thaw(self.data()))
Example #51
0
    def setUp(self, cluster):
        """
        Deploy PostgreSQL to a node.
        """
        self.cluster = cluster

        self.node_1, self.node_2 = cluster.nodes

        postgres_deployment = {
            u"version": 1,
            u"nodes": {
                self.node_1.address: [POSTGRES_APPLICATION_NAME],
                self.node_2.address: [],
            },
        }

        self.postgres_deployment_moved = {
            u"version": 1,
            u"nodes": {
                self.node_1.address: [],
                self.node_2.address: [POSTGRES_APPLICATION_NAME],
            },
        }

        self.postgres_application = {
            u"version": 1,
            u"applications": {
                POSTGRES_APPLICATION_NAME: {
                    u"image": POSTGRES_IMAGE,
                    u"ports": [{
                        u"internal": POSTGRES_INTERNAL_PORT,
                        u"external": POSTGRES_EXTERNAL_PORT,
                    }],
                    u"volume": {
                        u"dataset_id":
                            POSTGRES_APPLICATION.volume.dataset.dataset_id,
                        # The location within the container where the data
                        # volume will be mounted; see:
                        # https://github.com/docker-library/postgres/blob/
                        # docker/Dockerfile.template
                        u"mountpoint": POSTGRES_VOLUME_MOUNTPOINT,
                        u"maximum_size":
                            "%d" % (REALISTIC_BLOCKDEVICE_SIZE,),
                    },
                },
            },
        }

        self.postgres_application_different_port = thaw(freeze(
            self.postgres_application).transform(
                [u"applications", POSTGRES_APPLICATION_NAME, u"ports", 0,
                 u"external"], POSTGRES_EXTERNAL_PORT + 1))

        cluster.flocker_deploy(self, postgres_deployment,
                               self.postgres_application)
 def check_service(self, database, config, subscriptions, k8s_state, aws):
     expected = new_service(config.kubernetes_namespace, self.kube_model)
     actual = k8s_state.services.item_by_name(expected.metadata.name)
     # Don't actually care about the status.  That belongs to the server
     # anyway.
     tweaked = actual.set(u"status", None)
     assert_that(
         tweaked,
         GoodEquals(expected),
     )
     Message.log(check_service=thaw(expected))
Example #53
0
    def as_effect(self):
        """Produce an :obj:`Effect` to update a stack."""
        stack_config = dissoc(thaw(self.stack_config), 'stack_name')
        eff = update_stack(stack_name=self.stack.name, stack_id=self.stack.id,
                           stack_args=stack_config)

        def report_success(result):
            retry_msg = 'Waiting for stack to update'
            return ((StepResult.RETRY, [ErrorReason.String(retry_msg)])
                    if self.retry else (StepResult.SUCCESS, []))

        return eff.on(success=report_success)
 def report(self, result):
     result = self.common.set("result", result)
     context = start_action(system="reporter:post")
     with context.context():
         posting = DeferredContext(
             treq.post(
                 self.location.encode("ascii"),
                 json.dumps(thaw(result)),
                 timeout=30,
             )
         )
         return posting.addActionFinish()
Example #55
0
    def to_json(self):
        """
        Returns the collection as json object.

        This is actually like json.loads(s) - produces the
        actual vanilla data structure.

        Returns
        -------
        list
            A thawed list of Event objects.
        """
        return thaw(self._event_list)
Example #56
0
    def test_event_merge(self):
        """Test Event.merge()/merge_events()"""
        # same timestamp, different keys

        # good ones, same ts, different payloads
        pay1 = dict(foo='bar', baz='quux')
        ev1 = Event(self.aware_ts, pay1)

        pay2 = dict(foo2='bar', baz2='quux')
        ev2 = Event(self.aware_ts, pay2)

        merged = Event.merge([ev1, ev2])
        self.assertEqual(set(thaw(merged[0].data())), set(dict(pay1, **pay2)))
Example #57
0
 def test_update_cluster_state(self):
     """
     ``NonManifestDatasets.update_cluster_state`` returns a new
     ``DeploymentState`` instance with its ``nonmanifest_datasets`` field
     replaced with the value of the ``NonManifestDatasets.datasets`` field.
     """
     dataset = Dataset(dataset_id=unicode(uuid4()))
     datasets = {dataset.dataset_id: dataset}
     nonmanifest = NonManifestDatasets(datasets=datasets)
     deployment = DeploymentState()
     updated = nonmanifest.update_cluster_state(deployment)
     self.assertEqual(
         datasets, thaw(updated.nonmanifest_datasets)
     )
Example #58
0
  def default(self, obj):
    if isinstance(obj,set):
      return sorted(list(obj))
    elif isinstance(obj,TimeRange):
      return obj.JSONable()
    elif isinstance(obj,AllOfRequirement):
      obj = obj.JSONable()
      return obj
    elif isinstance(obj,OneOfRequirement):
      obj = obj.JSONable()
      return obj
    elif isinstance(obj,PRecord):
      return thaw(obj)

    return json.JSONEncoder.default(self, obj)