Beispiel #1
0
def _retry_exception(f, steps=(0.1,) * 10, sleep=sleep):
    """
    Retry a function if it raises an exception.

    :return: Whatever the function returns.
    """
    steps = iter(steps)

    while True:
        try:
            Message.new(
                message_type=(u"flocker:provision:libcloud:retry_exception:trying"), function=fullyQualifiedName(f)
            ).write()
            return f()
        except:
            # Try to get the next sleep time from the steps iterator.  Do it
            # without raising an exception (StopIteration) to preserve the
            # current exception context.
            for step in steps:
                write_traceback()
                sleep(step)
                break
            else:
                # Didn't hit the break, so didn't iterate at all, so we're out
                # of retry steps.  Fail now.
                raise
Beispiel #2
0
 def g(recurse):
     with start_action(action_type="a-recurse={}".format(recurse)):
         Message.log(message_type="m-recurse={}".format(recurse))
         if recurse:
             set(g(False))
         else:
             yield
Beispiel #3
0
        def make_node(node):
            public_address = _filter_ipv4(node.public_ips)[0]
            if isinstance(public_address, unicode):
                public_address = public_address.encode("ascii")

            if self._use_private_addresses:
                private_address = _filter_ipv4(node.private_ips)[0]
            else:
                private_address = None

            if isinstance(private_address, unicode):
                private_address = private_address.encode("ascii")

            Message.log(
                message_type="flocker:provision:libcloud:node_created",
                name=node.name,
                id=node.id,
                public_address=public_address,
                private_address=private_address,
            )
            return LibcloudNode(
                provisioner=self,
                node=node, address=public_address,
                private_address=private_address,
                distribution=distribution)
Beispiel #4
0
 def _get(self, blockdevice_id):
     for volume in self.list_volumes():
         if volume.blockdevice_id == blockdevice_id:
             return volume
     Message.new(Error="Could Not Find Volume "
                 + str(blockdevice_id)).write(_logger)
     raise UnknownVolume(blockdevice_id)
    def list_volumes(self):
        """
        Return ``BlockDeviceVolume`` instances for all the files in the
        ``unattached`` directory and all per-host directories.

        See ``IBlockDeviceAPI.list_volumes`` for parameter and return type
        documentation.
        """
        volumes = []
        try:
            # Query for volume folder by name VOL_FLOCKER
            # and get list of volumes. The array may have
            # other volumes not owned by Flocker
            vol_folder = self.mgmt.request(XtremIOMgmt.VOLUME_FOLDERS,
                                           name=XtremIOMgmt.BASE_PATH + str(self._cluster_id))['content']
            # Get the number of volumes
            Message.new(NoOfVolumesFound=vol_folder['num-of-vols']).write(_logger)
            if int(vol_folder['num-of-vols']) > 0:
                for vol in vol_folder['direct-list']:
                    # Message.new(VolumeName=vol[1]).write(_logger)
                    volume = self._get_vol_details(vol[1])
                    volumes.append(volume)
                    # Message.new(volume=volume).write(_logger)
        except Exception as exe:
            pass
            # Message.new(Error=exe).write(_logger)

        return volumes
    def _build_config(self, ignored):
        """
        Build a Flocker deployment configuration for the given cluster
        and parameters.
        The configuration consists of identically configured applications
        (containers) uniformly spread over all cluster nodes.

        :return dict: containing the json we need to send to compose to
            create the datasets and containers we want.
        """
        Message.log(action="Building config")
        application_root = {}
        applications = {}
        application_root["version"] = 1
        application_root["applications"] = applications
        for node in self.nodes:
            for i in range(self.per_node):
                name = "app_%s_%d" % (node.public_address, i)
                applications[name] = deepcopy(self.application_template)

        deployment_root = {}
        nodes = {}
        deployment_root["nodes"] = nodes
        deployment_root["version"] = 1
        for node in self.nodes:
            addr = "%s" % node.public_address
            nodes[addr] = []
            for i in range(self.per_node):
                name = "app_%s_%d" % (node.public_address, i)
                nodes[addr].append(name)

        return {"applications": application_root,
                "deployment": deployment_root}
Beispiel #7
0
def openssl_verify(cafile, certificatefile, **kwargs):
    """
    Use OpenSSL CLI to verify a certificate was signed by a given certificate
    authority.

    :param str cafile: The name of the certificate authority file.
    :param str certificatefile: The name of the certificate file to be checked
        against the supplied authority.
    :return: A ``bool`` that is True if the certificate was verified,
        otherwise False if verification failed or an error occurred.
    """
    command = [b"openssl", b"verify", b"-CAfile", cafile, certificatefile]
    try:
        result = run_process(command, **kwargs)
        return result.output.strip() == b"{}: OK".format(certificatefile)
    except CalledProcessError as e:
        result = run_process([
            "openssl", "x509", "-text", "-in", cafile], **kwargs)
        cafile_info = result.output
        result = run_process([
            "openssl", "x509", "-text", "-in", certificatefile], **kwargs)
        certificate_info = result.output
        error = str(e)
        error = error + "\n" + cafile_info + "\n" + certificate_info
        Message.new(
            message_type="flocker.ca.functional:openssl_verify_error",
            error=error).write(Logger())
        return False
Beispiel #8
0
    def create_volume(self, dataset_id, size):
        """
        Create a block device using the ICinderVolumeManager.
        The cluster_id and dataset_id are stored as metadata on the volume.

        See:

        http://docs.rackspace.com/cbs/api/v1.0/cbs-devguide/content/POST_createVolume_v1__tenant_id__volumes_volumes.html
        """
        metadata = {
            CLUSTER_ID_LABEL: unicode(self.cluster_id),
            DATASET_ID_LABEL: unicode(dataset_id),
        }
        action_type = u"blockdevice:cinder:create_volume"
        with start_action(action_type=action_type):
            requested_volume = self.cinder_volume_manager.create(
                size=Byte(size).to_GB().value,
                metadata=metadata,
            )
            Message.new(blockdevice_id=requested_volume.id).write()
            created_volume = wait_for_volume(
                volume_manager=self.cinder_volume_manager,
                expected_volume=requested_volume,
            )
        return _blockdevicevolume_from_cinder_volume(
            cinder_volume=created_volume,
        )
 def create_network(self,name,nwtype):
     self.authenticate_user()
     try:
         networkid = self.network_obj.query_by_name(name)
         if networkid:
          Message.new(Debug="Network Already Exists").write(_logger)
         else: 
          self.network_obj.create(name,nwtype)
         
         "Adding Host Ports to Network"
         f = open ('/etc/iscsi/initiatorname.iscsi','r')
         for line in f:
            if line[0] != '#':
               current_line=line.split('=')
               host_port = current_line[1]
               if "\n" in host_port[1]:
                 host_port = host_port.split('\n')[0]
               tz = self.network_obj.show(name)
               if ("endpoints" in tz):
                endpoints = tz['endpoints']
                if host_port not in endpoints:
                 self.network_obj.add_endpoint(name,endpoint=host_port)
               break
     except utils.SOSError as e:
         if(e.err_code == utils.SOSError.HTTP_ERR):
             raise utils.SOSError(
                 utils.SOSError.HTTP_ERR,
                 "coprhd create network HTTP_ERR" + e.err_text)
         elif(e.err_code == utils.SOSError.SOS_FAILURE_ERR):
             raise utils.SOSError(
                 utils.SOSError.SOS_FAILURE_ERR,
                 "coprhd create network failed" + e.err_text)
         else:
             Message.new(Debug="coprhd create network failed").write(_logger)
Beispiel #10
0
    def _get_device_path_api(self, volume):
        """
        Return the device path reported by the Cinder API.

        :param volume: The Cinder ``Volume`` which is attached.
        :returns: ``FilePath`` of the device created by the virtio_blk
            driver.
        """
        if volume.attachments:
            attachment = volume.attachments[0]
            if len(volume.attachments) > 1:
                # As far as we know you can not have more than one attachment,
                # but, perhaps we're wrong and there should be a test for the
                # multiple attachment case.  FLOC-1854.
                # Log a message if this ever happens.
                Message.new(
                    message_type=(
                        u'flocker:node:agents:blockdevice:openstack:'
                        u'get_device_path:'
                        u'unexpected_multiple_attachments'
                    ),
                    volume_id=unicode(volume.id),
                    attachment_devices=u','.join(
                        unicode(a['device']) for a in volume.attachments
                    ),
                ).write()
        else:
            raise UnattachedVolume(volume.id)

        return FilePath(attachment['device'])
 def add_initiators(self,sync, hostlabel, protocol, portwwn,initname):
     self.authenticate_user()
     portwwn = None
     try:
        f = open ('/etc/iscsi/initiatorname.iscsi','r')
        for line in f:
           if ( line[0] != '#' ):
             s1=line.split('=')
             portwwn = str(s1[1])
             if "\n" in portwwn:
                portwwn = portwwn.split('\n')[0]
             break
        initname = portwwn
        initiatorwwn = None
        self.hostinitiator_obj.create(sync,hostlabel,protocol,initiatorwwn,portwwn)
     except utils.SOSError as e:
         if(e.err_code == utils.SOSError.HTTP_ERR):
             if(e.err_text.find('same Initiator Port already exists') != -1):
              Message.new(Debug="coprhd add initiators already added").write(_logger)
             else:
              raise utils.SOSError(
                 utils.SOSError.HTTP_ERR,
                 "coprhd add initiators HTTP_ERR" + e.err_text)
         elif(e.err_code == utils.SOSError.SOS_FAILURE_ERR):
             raise utils.SOSError(
                 utils.SOSError.SOS_FAILURE_ERR,
                 "coprhd add initiators failed" + e.err_text)
         else:
             Message.new(Debug="coprhd add initiators failed").write(_logger)
Beispiel #12
0
    def get_device_path(self, blockdevice_id):
        Message.new(operation=u'get_device_path',
                    blockdevice_id=blockdevice_id).write()
        lun_name = self._get_lun_name_from_blockdevice_id(blockdevice_id)
        lun = self._client.get_lun_by_name(lun_name)
        if lun == {}:
            raise UnknownVolume(blockdevice_id)

        alu = lun['lun_id']

        rc, out = self._client.get_storage_group(self._group)
        if rc != 0:
            raise Exception(rc, out)
        lunmap = self._client.parse_sg_content(out)['lunmap']
        try:
            # The LUN has already been added to this storage group....perhaps
            # by a previous attempt to attach in which the OS device did not
            # appear.
            hlu = lunmap[alu]
        except KeyError:
            raise UnattachedVolume(blockdevice_id)
        hlu_bus_path = _hlu_bus_paths(hlu)[0]

        # XXX This will only operate on one of the resulting device paths.
        # /sys/class/scsi_disk/x:x:x:HLU/device/block/sdvb for example.
        device_path = _device_paths_for_hlu_bus_path(hlu_bus_path)[0]

        if not _device_path_is_usable(device_path):
            raise UnattachedVolume(blockdevice_id)
        Message.new(operation=u'get_device_path_output',
                    blockdevice_id=blockdevice_id,
                    device_path=device_path.path).write()
        return device_path
Beispiel #13
0
    def create_volume(self, dataset_id, size):
        """
        Create a block device using the ICinderVolumeManager.
        The cluster_id and dataset_id are stored as metadata on the volume.

        See:

        http://docs.rackspace.com/cbs/api/v1.0/cbs-devguide/content/POST_createVolume_v1__tenant_id__volumes_volumes.html
        """
        metadata = {
            CLUSTER_ID_LABEL: unicode(self.cluster_id),
            DATASET_ID_LABEL: unicode(dataset_id),
        }
        action_type = u"blockdevice:cinder:create_volume"
        with start_action(action_type=action_type):
            # There could be difference between user-requested and
            # Cinder-created volume sizes due to several reasons:
            # 1) Round off from converting user-supplied 'size' to 'GiB' int.
            # 2) Cinder-specific size constraints.
            # XXX: Address size mistach (see
            # (https://clusterhq.atlassian.net/browse/FLOC-1874).
            requested_volume = self.cinder_volume_manager.create(
                size=Byte(size).to_GiB().value,
                metadata=metadata,
            )
            Message.new(blockdevice_id=requested_volume.id).write()
            created_volume = wait_for_volume(
                volume_manager=self.cinder_volume_manager,
                expected_volume=requested_volume,
            )
        return _blockdevicevolume_from_cinder_volume(
            cinder_volume=created_volume,
        )
Beispiel #14
0
def detach_destroy_volumes(api):
    """
    Detach and destroy all volumes known to this API.
    If we failed to detach a volume for any reason,
    sleep for 1 second and retry until we hit CLEANUP_RETRY_LIMIT.
    This is to facilitate best effort cleanup of volume
    environment after each test run, so that future runs
    are not impacted.
    """
    volumes = api.list_volumes()
    retry = 0
    action_type = u"agent:blockdevice:cleanup:details"
    with start_action(action_type=action_type):
        while retry < CLEANUP_RETRY_LIMIT and len(volumes) > 0:
            for volume in volumes:
                try:
                    if volume.attached_to is not None:
                        api.detach_volume(volume.blockdevice_id)
                    api.destroy_volume(volume.blockdevice_id)
                except:
                    write_traceback(_logger)

            time.sleep(1.0)
            volumes = api.list_volumes()
            retry += 1

        if len(volumes) > 0:
            Message.new(u"agent:blockdevice:failedcleanup:volumes",
                        volumes=volumes).write()
Beispiel #15
0
    def create_volume(self, dataset_id, size):
        """
        Create a block device using the ICinderVolumeManager.
        The cluster_id and dataset_id are stored as metadata on the volume.

        See:

        http://docs.rackspace.com/cbs/api/v1.0/cbs-devguide/content/POST_createVolume_v1__tenant_id__volumes_volumes.html
        """
        metadata = {
            CLUSTER_ID_LABEL: unicode(self.cluster_id),
            DATASET_ID_LABEL: unicode(dataset_id),
        }
        requested_volume = self.cinder_volume_manager.create(
            size=int(Byte(size).to_GiB().value),
            metadata=metadata,
        )
        Message.new(message_type=CINDER_CREATE,
                    blockdevice_id=requested_volume.id).write()
        created_volume = wait_for_volume_state(
            volume_manager=self.cinder_volume_manager,
            expected_volume=requested_volume,
            desired_state=u'available',
            transient_states=(u'creating',),
        )
        return _blockdevicevolume_from_cinder_volume(
            cinder_volume=created_volume,
        )
Beispiel #16
0
    def get_device_path(self, blockdevice_id):
        """
        Return the device path that has been allocated to the block device on
        the host to which it is currently attached.

        :param unicode blockdevice_id: The unique identifier for the block
            device.
        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.
        :raises UnattachedVolume: If the supplied ``blockdevice_id`` is
            not attached to a host.
        :returns: A ``FilePath`` for the device.
        """
        # raises UnknownVolume
        volume = self._get(blockdevice_id)

        # raises UnattachedVolume
        if volume.attached_to is None:
            Message.new(Error="Could get Device Path "
                        + str(blockdevice_id)
                        + "is not attached").write(_logger)
            raise UnattachedVolume(blockdevice_id)

        # Check the "actual volume" for attachment
        sio_volume = self._client.get_volume_by_id(
            str(blockdevice_id))
        sdcs = self._client.get_sdc_for_volume(sio_volume)
        if len(sdcs) == 0:
            Message.new(Error="Could get Device Path "
                        + str(blockdevice_id)
                        + "is not attached").write(_logger)
            raise UnattachedVolume(blockdevice_id)

        # return the real path of the device
        return self._get_dev_from_blockdeviceid(volume.blockdevice_id)
 def log_totals(result):
     Message.log(
         message_type="flocker.benchmark.container_setup:finish",
         container_count=self.container_count,
         error_count=self.error_count,
     )
     return result
Beispiel #18
0
def create_cloudformation_stack(template_url, parameters, aws_config):
    """
    Create a CloudFormation stack.

    :param unicode template_url: Cloudformation template URL on S3.
    :param dict parameters: The parameters required by the template.
    :param dict aws_config: environment variables to be merged with the current
        process environment before running the ``aws`` sub-command.

    :returns: A ``Deferred`` which fires when the stack has been created.
    """
    # Request stack creation.
    stack_name = CLOUDFORMATION_STACK_NAME + str(int(time.time()))
    output = aws_output(
        ['cloudformation', 'create-stack',
         '--disable-rollback',
         '--parameters', json.dumps(parameters),
         '--stack-name', stack_name,
         '--template-url', template_url],
        aws_config
    )
    output = json.loads(output)
    stack_id = output['StackId']
    Message.new(cloudformation_stack_id=stack_id)
    return wait_for_stack_status(stack_id, 'CREATE_COMPLETE', aws_config)
 def log_progress():
     Message.log(
         message_type="flocker.benchmark.container_setup:progress",
         container_count=self.container_count,
         error_count=self.error_count,
         total_containers=total,
     )
Beispiel #20
0
    def wait_for_volume(cls, blockdevice_id, time_limit=60):
        """
        Wait for a ``Volume`` with the same ``id`` as ``expected_volume`` to be
        listed

        :param Volume expected_volume: The ``Volume`` to wait for.
        :param int time_limit: The maximum time, in seconds, to wait for the
            ``expected_volume`` to have ``expected_status``.
        :raises Exception: If ``expected_volume`` is not
            listed within ``time_limit``.
        :returns: The listed ``Volume`` that matches ``expected_volume``.
        """
        start_time = time.time()
        while True:
            exists = cls._dev_exists_from_blockdeviceid(
                blockdevice_id)
            if exists:
                return

            elapsed_time = time.time() - start_time
            if elapsed_time < time_limit:
                time.sleep(0.1)
            else:
                Message.new(Error="Could Find Device for Volume "
                            + "Timeout on: "
                            + str(blockdevice_id)).write(_logger)
                raise Exception(
                    'Timed out while waiting for volume. '
                    'Expected Volume: {!r}, '
                    'Elapsed Time: {!r}, '
                    'Time Limit: {!r}.'.format(
                        blockdevice_id, elapsed_time, time_limit
                    )
                )
    def return_multipath_device(self, blockdevice_id):
        """

        :param blockdevice_id:
        :return: DeviveAbsPath - Multipath device path
        """
        lunid = self.data.get_lun_map(blockdevice_id)
        try :
            #Query multipath for the device name
            output = check_output([b"multipath -v2 -ll"], shell=True)
            #multipath -v2 -ll sample output as below :
            #3514f0c5461400172 dm-5 XtremIO ,XtremApp
            #size=1.0M features='0' hwhandler='0' wp=rw
            #`-+- policy='queue-length 0' prio=0 status=active
            # |- 7:0:0:2 sdg 8:96 active faulty running
            # `- 3:0:0:2 sdf 8:80 active faulty running

            # Parse the above output for the device name under /dev/mapper
            for row in output.split('\n'):
                if re.search(r'XtremApp', row, re.I) :
                    deviceName = row.split(' ')[0]
                if re.search(r'\d:\d:\d:' + str(lunid), row, re.I):
                    deviceAbsPath = EMCXtremIOBlockDeviceAPI.DEFAULT_MULTIPATH_DEVICE_PATH + deviceName
                    if os.path.exists(deviceAbsPath):
                        output = check_output([b"mkfs.ext3 " + deviceAbsPath], shell=True)
                        return deviceAbsPath
        except Exception as ex :
            Message.new(value="Exception when quering for multipath device").write(_logger)
            raise UnknownVolume(blockdevice_id)
Beispiel #22
0
    def list_volumes(self):
        """
        Return all volumes that belong to this Flocker cluster.
        """
        try:
            ebs_volumes = self.connection.get_all_volumes()
            message_type = BOTO_LOG_RESULT + u':listed_volumes'
            Message.new(
                message_type=message_type,
                volume_ids=list(volume.id for volume in ebs_volumes),
            ).write()
        except EC2ResponseError as e:
            # Work around some internal race-condition in EBS by retrying,
            # since this error makes no sense:
            if e.code == NOT_FOUND:
                return self.list_volumes()
            else:
                raise

        volumes = []
        for ebs_volume in ebs_volumes:
            if _is_cluster_volume(self.cluster_id, ebs_volume):
                volumes.append(
                    _blockdevicevolume_from_ebs_volume(ebs_volume)
                )
        message_type = BOTO_LOG_RESULT + u':listed_cluster_volumes'
        Message.new(
            message_type=message_type,
            volume_ids=list(volume.blockdevice_id for volume in volumes),
        ).write()
        return volumes
def _pytest_collection_modifyitems(task_arguments, worker, items, invenio_rule,
                                   option, batch_recids_, all_recids_):
    """Report allowed recids and jsonpaths to master and await start.

    :type config: :py:class:_pytest.config.Config
    :type items: list
    """
    _ensure_only_one_test_function_exists_in_check(items)
    item = items[0]

    # TODO: There could be a flag that declares whether there are side-effects
    # in the DB, instead of checking for use of record_fetching_fixturenames

    # Inform the worker about allowed paths and recids
    if _test_func_uses_record_related_fixtures(item):
        worker.allowed_paths, worker.allowed_recids = \
            _get_restrictions_from_check_class(item, task_arguments,
                                               batch_recids_, all_recids_)
    else:
        worker.allowed_paths, worker.allowed_recids = None, None

    # Check if there are conflicts with other workers
    worker.status = StatusWorker.ready
    with worker.lock():
        blockers = _worker_conflicts_with_currently_running(worker)
        worker.retry_after_ids = {bl.uuid for bl in blockers}
        if blockers:
            Message.log(message_type='detected conflicting workers', value=str(blockers))
            del items[:]
        else:
            option.invenio_reporters = load_reporters(invenio_rule, option.invenio_execution)
            worker.status = StatusWorker.running
Beispiel #24
0
    def resize_volume(self, blockdevice_id, size):
        """
        Resize an unattached ``blockdevice_id``.

        This changes the amount of storage available.  It does not change the
        data on the volume (including the filesystem).

        :param unicode blockdevice_id: The unique identifier for the block
            device being detached.
        :param int size: The required size, in bytes, of the volume.

        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.

        :returns: ``None``
        """
        # raises UnknownVolume
        volume = self._get(blockdevice_id)

        # raises AlreadyAttachedVolume, do we want this?
        # is says only an unattached volume, if it is attached
        # do we detach and then resize thenr reattach? Or should we
        # just assume that all things that call this function know
        # that the volume is detached already?
        if volume.attached_to is not None:
            Message.new(Error="Cannot Resize Volume "
                        + str(blockdevice_id)
                        + "is attached").write(_logger)
            raise AlreadyAttachedVolume(blockdevice_id)

        sio_volume = self._client.get_volume_by_id(str(blockdevice_id))

        size_in_gb = int(Byte(size).to_GiB().value)
        self._client.resize_volume(sio_volume, size_in_gb)
Beispiel #25
0
def run_process(command, *args, **kwargs):
    """
    Run a child process, capturing its stdout and stderr.

    :param list command: An argument list to use to launch the child process.

    :raise CalledProcessError: If the child process has a non-zero exit status.

    :return: A ``_ProcessResult`` instance describing the result of the child
         process.
    """
    kwargs["stdout"] = PIPE
    kwargs["stderr"] = STDOUT
    action = start_action(
        action_type="run_process", command=command, args=args, kwargs=kwargs)
    with action:
        process = Popen(command, *args, **kwargs)
        output = process.stdout.read()
        status = process.wait()
        result = _ProcessResult(command=command, output=output, status=status)
        # TODO: We should be using a specific logging type for this.
        Message.new(
            command=result.command,
            output=result.output,
            status=result.status,
        ).write()
        if result.status:
            raise _CalledProcessError(
                returncode=status, cmd=command, output=output,
            )
    return result
Beispiel #26
0
    def stop(self):
        """
        Stop the scenario from being maintained by stopping all the
        loops that may be executing.

        :return Deferred[Optional[Dict[unicode, Any]]]: Scenario metrics.
        """
        self.is_started = False
        if self.monitor_loop.running:
            self.monitor_loop.stop()

        if self.loop.running:
            self.loop.stop()

        outstanding_requests = self.rate_measurer.outstanding()

        if outstanding_requests > 0:
            msg = (
                "There are {num_requests} outstanding requests. " "Waiting {num_seconds} seconds for them to complete."
            ).format(num_requests=outstanding_requests, num_seconds=self.timeout)
            Message.log(key="outstanding_requests", value=msg)

        with start_action(action_type=u"flocker:benchmark:scenario:stop", scenario="request_load"):

            def no_outstanding_requests():
                return self.rate_measurer.outstanding() == 0

            scenario_stopped = loop_until(self.reactor, no_outstanding_requests, repeat(1))
            timeout(self.reactor, scenario_stopped, self.timeout)
            scenario = DeferredContext(scenario_stopped)

            def handle_timeout(failure):
                failure.trap(CancelledError)
                msg = ("Force stopping the scenario. " "There are {num_requests} outstanding requests").format(
                    num_requests=outstanding_requests
                )
                Message.log(key="force_stop_request", value=msg)

            scenario.addErrback(handle_timeout)

            def scenario_cleanup(ignored):
                """
                Calls the scenario cleanup, and wraps it inside an eliot
                start action, so we can see the logs if something goes
                wrong within the cleanup

                :return Deferred: that will fire once the cleanup has been
                    completed
                """
                with start_action(action_type=u"flocker:benchmark:scenario:cleanup", scenario="request_load"):
                    return self.request.run_cleanup()

            scenario.addBoth(scenario_cleanup)

            def return_metrics(_ignore):
                return self.rate_measurer.get_metrics()

            scenario.addCallback(return_metrics)

            return scenario.addActionFinish()
Beispiel #27
0
 def instance_error(failure):
     Message.log(
         message_type="flocker:provision:aws:async_get_node:failed"
     )
     instance.terminate()
     write_failure(failure)
     return failure
    def handlePLAINTEXTServer(self, header):
        """
        Parse a complete HTTP-like Foolscap negotiation request and begin proxying
        to a destination selected based on the extract TubID.
        """
        # the client sends us a GET message
        lines = header.split("\r\n")
        if not lines[0].startswith("GET "):
            raise BananaError("not right")
        command, url, version = lines[0].split()
        if not url.startswith("/id/"):
            # probably a web browser
            raise BananaError("not right")
        targetTubID = url[4:]

        Message.log(event_type=u"handlePLAINTEXTServer", tub_id=targetTubID)

        if targetTubID == "":
            # they're asking for an old UnauthenticatedTub. Refuse.
            raise NegotiationError("secure Tubs require encryption")
        if isSubstring("Upgrade: TLS/1.0\r\n", header):
            wantEncrypted = True
        else:
            wantEncrypted = False

        Message.log(event_type=u"handlePLAINTEXTServer", want_encrypted=wantEncrypted)

        self._handleTubRequest(header, targetTubID)
Beispiel #29
0
def _node_is_booting(instance):
    """
    Check if an instance is still booting, where booting is defined
    as either a pending or rebooting instance that is expected to
    become running.

    :param boto.ec2.instance.Instance instance: The instance to check.
    """
    try:
        instance.update()
    except EC2ResponseError as e:
        _check_response_error(
            e,
            u"flocker:provision:aws:node_is_booting:retry"
        )
    Message.new(
        message_type=u"flocker:provision:aws:node_is_booting:update",
        instance_state=instance.state,
        ip_address=instance.ip_address,
    ).write()

    # Sometimes an instance can be reported as running but without a public
    # address being set, we consider that instance to be still pending.
    return (instance.state == u'pending' or instance.state == u'rebooting' or
            (instance.state == u'running' and instance.ip_address is None))
Beispiel #30
0
def extract_external_port(
    client, container_identifier, internal_port
):
    """
    Inspect a running container for the external port number on which a
    particular internal port is exposed.

    :param docker.Client client: The Docker client to use to perform the
        inspect.
    :param unicode container_identifier: The unique identifier of the container
        to inspect.
    :param int internal_port: An internal, exposed port on the container.

    :return: The external port number on which ``internal_port`` from the
        container is exposed.
    :rtype: int
    """
    container_details = client.inspect_container(container_identifier)
    # If the container isn't running, this section is not present.
    network_settings = container_details[u"NetworkSettings"]
    ports = network_settings[u"Ports"]
    details = ports[u"{}/tcp".format(internal_port)]
    host_port = int(details[0][u"HostPort"])
    Message.new(
        message_type=u"acceptance:extract_external_port", host_port=host_port
    ).write()
    return host_port
Beispiel #31
0
def check_supported_volume_size(
        size_in_bytes, dataset_id, alloc_granularity=8):
    """
    Checks appropriate volume size for Backend (ScaleIO)
    :param bytes size_in_bytes: size of volume in bytes
    """
    gibs = Byte(size_in_bytes).to_GiB().value
    # modulus should be 0.0 or throw unsupported volume
    if gibs % 8 != 0:
        Message.new(Error="Volume size unsupported"
                    + str(size_in_bytes)
                    + str(dataset_id)).write(_logger)
        raise UnsupportedVolumeSize(dataset_id=dataset_id)
Beispiel #32
0
def get_proposition_type(name):
    if name in PROPOSITION_TYPES:
        proposition_type = PROPOSITION_TYPES[name]
    else:
        proposition_type = session.query(PropositionType).filter_by(
            name=name).scalar()
        PROPOSITION_TYPES[name] = proposition_type
        if proposition_type is None:
            Message.log(log_level='WARNING',
                        message="proposition type does not exist",
                        name=name)

    return proposition_type
Beispiel #33
0
    def attach_volume(self, blockdevice_id, attach_to):
        """
        Attach ``blockdevice_id`` to ``host``.

        :param unicode blockdevice_id: The unique identifier for the block
            device being attached.
        :param unicode attach_to: An identifier like the one returned by the
            ``compute_instance_id`` method indicating the node to which to
            attach the volume.
        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.
        :raises AlreadyAttachedVolume: If the supplied ``blockdevice_id`` is
            already attached.
        :returns: A ``BlockDeviceVolume`` with a ``host`` attribute set to
            ``host``.
        """
        # Raises UnknownVolume
        volume = self._get(blockdevice_id)
        # raises AlreadyAttachedVolume
        if volume.attached_to is not None:
            Message.new(Error="Could Not Destroy Volume "
                        + str(blockdevice_id)
                        + "is already attached").write(_logger)
            raise AlreadyAttachedVolume(blockdevice_id)

        # Get the SDC Object by the GUID of the host.
        sdc = self._client.get_sdc_by_guid(
            self._instance_id.upper())

        # Try mapping volumes

        # TODO errors are currently hard to get from sclaeio-py
        # https://github.com/swevm/scaleio-py/issues/6
        # ultimately we should be able to get more specific
        # errors about why the failure happened such as
        # ``{"message":"Only a single SDC may be mapped to this
        # volume at a time","httpStatusCode":500,"errorCode":306}``
        try:
            self._client.map_volume_to_sdc(
                self._client.get_volume_by_id(
                    str(blockdevice_id)), sdcObj=sdc,
                allowMultipleMappings=False)
        except Exception as e:
            # TODO real errors need to be returned by scaleio-py
            Message.new(Error=str(blockdevice_id) + " "
                        + str(e)).write(_logger)
            raise AlreadyAttachedVolume(blockdevice_id)

        attached_volume = volume.set(
            attached_to=self._instance_id)
        return attached_volume
    def stop(self):
        """
        Stop the scenario from being maintained by stopping all the
        loops that may be executing.

        :return: A Deferred that fires when the scenario has stopped.
        """
        if self.monitor_loop.running:
            self.monitor_loop.stop()

        if self.loop.running:
            self.loop.stop()

        outstanding_requests = self.rate_measurer.outstanding()

        if outstanding_requests > 0:
            msg = (
                "There are {num_requests} outstanding requests. "
                "Waiting {num_seconds} seconds for them to complete."
            ).format(
                num_requests=outstanding_requests,
                num_seconds=self.timeout
            )
            Message.log(key='outstanding_requests', value=msg)

        with start_action(
            action_type=u'flocker:benchmark:scenario:stop',
            scenario='write_request_load'
        ):
            def handle_timeout(failure):
                failure.trap(CancelledError)
                msg = (
                    "Force stopping the scenario. "
                    "There are {num_requests} outstanding requests"
                ).format(
                    num_requests=outstanding_requests
                )
                Message.log(key='force_stop_request', value=msg)

            def no_outstanding_requests():
                return self.rate_measurer.outstanding() == 0

            scenario_stopped = loop_until(self.reactor,
                                          no_outstanding_requests,
                                          repeat(1))
            timeout(self.reactor, scenario_stopped, self.timeout)
            scenario_stopped.addErrback(handle_timeout)

            scenario = DeferredContext(scenario_stopped)
            scenario.addActionFinish()
            return scenario.result
    def initialize_connection(self):
        """
        The model followed with EMC XtremIO can be explained as follows:
        Each node has a initiator group created, when logged in for the first time. To this initiator group
        the initiator name is added for all the interfaces available on the node. The volumes are associated with
        the initiator group, thus making sure multipathing is established automatically.
        """

        sys = self.mgmt.request('clusters', 'GET', idx=1)['content']
        use_chap = (sys.get('chap-authentication-mode', 'disabled') !=
                    'disabled')
        discovery_chap = (sys.get('chap-discovery-mode', 'disabled') !=
                          'disabled')
        initiator = self._get_initiator()
        try:
            # check if the IG already exists
            self.mgmt.request('initiator-groups', 'GET',
                              name=self._get_ig())['content']
        except DeviceExceptionObjNotFound:
            # create an initiator group to hold the the initiator
            data = {'ig-name': self._get_ig()}
            self.mgmt.request('initiator-groups', 'POST', data)
        try:
            init = self.mgmt.request('initiators', 'GET',
                                     name=initiator)['content']
            if use_chap:
                chap_passwd = init['chap-authentication-initiator-' 'password']
                # delete the initiator to create a new one with password
                if not chap_passwd:
                    Message.new(
                        Info=
                        'initiator has no password while using chap removing it'
                    )
                    self.mgmt.request('initiators', 'DELETE', name=initiator)
                    # check if the initiator already exists
                    raise DeviceExceptionObjNotFound
        except DeviceExceptionObjNotFound:
            # create an initiator
            data = {
                'initiator-name': initiator,
                'ig-id': self._get_ig(),
                'port-address': initiator
            }
            if use_chap:
                data['initiator-authentication-user-name'] = 'chap_user'
                chap_passwd = self._get_password()
                data['initiator-authentication-password'] = chap_passwd
            if discovery_chap:
                data['initiator-discovery-user-name'] = 'chap_user'
                data['initiator-discovery-' 'password'] = self._get_password()
            self.mgmt.request('initiators', 'POST', data)
Beispiel #36
0
    def create_volume(self, dataset_id, size):
        try:
            Message.new(
                Info='Creating Volume: ' + str(dataset_id), size=size,
                user=self._config.user, passwd=self._config.password,
                mgmt_addr=self._config.mgmt_addr).write(_logger)
            fname = str(self._cluster_id) + str(dataset_id)
            ai = self._api.app_instances.create(name=fname)
            si = ai.storage_instances.create(name=fname)
            volsize = size / self._allocation_unit
            vol = si.volumes.create(name=fname, size=volsize)
            Message.new(
                Info='Datera API Volume Created: ',
                fname=fname, dataset=str(dataset_id),
                volsize=volsize, size=size).write(_logger)
            blkdev_id = unicode(str(self._cluster_id + vol['uuid']))
            volume = BlockDeviceVolume(
                size=size, attached_to=None,
                dataset_id=dataset_id,
                blockdevice_id=blkdev_id)
            self._vols[blkdev_id] = {'dataset_id': dataset_id,
                                     'size': size,
                                     'attached_to': None,
                                     'ai_name': ai['name'],
                                     'volume': volume}
            Message.new(
                Info='Created volume for ' + str(dataset_id)).write(_logger)
        except ApiError as ex:
            Message.new(
                Info='ERROR creating volume for ' + str(dataset_id),
                resp=ex.message).write(_logger)
            volume = None
            raise DeviceExceptionAPIError

        return volume
Beispiel #37
0
def ensure_acl_exists(si, ii):
    """
    Make sure initiator exists in storage_instance acl
    """
    for i in si.acl_policy.list():
        if ii['path'] in i['initiators']:
            return
    try:
        si.acl_policy.initiators.add(ii)
        Message.new(
            Info='Adding initiator to ACL : ',
            storage_inst=si['name'], initiator=ii['path']).write(_logger)
    except ApiError as ex:
        raise DeviceExceptionAPIError
Beispiel #38
0
 def g(which):
     Message.log(message_type=u"{}-a".format(which))
     with start_action(action_type=which):
         Message.log(message_type=u"{}-b".format(which))
         yield
         Message.log(message_type=u"{}-c".format(which))
     Message.log(message_type=u"{}-d".format(which))
Beispiel #39
0
 def g():
     Message.log(message_type=u"a")
     with start_action(action_type=u"confounding-factor"):
         Message.log(message_type=u"b")
         yield None
         Message.log(message_type=u"c")
     Message.log(message_type=u"d")
Beispiel #40
0
def login_to_target(si):
    # Give new volume a chance to show up
    time.sleep(ISCSI_LOGIN_TIME_DELAY)
    iqn = si['access']['iqn']
    for ip in si['access']['ips']:
        c = "iscsiadm -m node -T {} --portal {} --op=new"
        c += "  >  /dev/null 2>&1"
        cmd = c.format(iqn, ip)
        os.system(cmd)
        c = "iscsiadm -m node -T {} --portal {} -l > /dev/null 2>&1"
        cmd = c.format(iqn, ip)
        os.system(cmd)
        Message.new(
            Info='iSCSI Login to target : ', target=iqn, ip=ip).write(_logger)
    def deploy(self):
        """
        Deploy the new configuration: create the requested containers
        and dataset in the cluster nodes.

        :return Deferred: that will fire once the request to create all
            the containers and datasets has been sent.
        """
        Message.log(action="Listing current nodes")
        d = self.client.list_nodes()
        d.addCallback(self._set_nodes)
        d.addCallback(self._build_config)
        d.addCallback(self._configure)
        return d
 def _check_version(self):
     """
     Checks version of EMC XtremIO
     """
     sys = self.mgmt.request('clusters', idx=1)['content']
     ver = [int(n) for n in sys['sys-sw-version'].split('-')[0].split('.')]
     if ver < self.MIN_XMS_VERSION:
         Message.new(Error='Invalid XtremIO version ' +
                     sys['sys-sw-version'])
         raise (DeviceVersionMismatch(
             'Invalid XtremIO version, version 2.4 or up is required'))
     else:
         msg = "EMCXtremIO SW version " + sys['sys-sw-version']
         Message.new(version=msg).write(_logger)
Beispiel #43
0
    def _blocking_container_runs(self, container_name):
        """
        Blocking API to check if container is running.

        :param unicode container_name: The name of the container whose
            state we're checking.

        :return: ``True`` if container is running, otherwise ``False``.
        """
        result = self._client.inspect_container(container_name)
        Message.new(message_type="flocker:docker:container_state",
                    container=container_name,
                    state=result).write()
        return result['State']['Running']
def render_template(msg: TemplatedMessage, transport_name: str, client_settings: ClientSettings) -> str:

    if msg.template not in client_settings.allowed_templates:
        raise ValueError("Template not allowed for this client")

    template_name = f"{transport_name}_{msg.template}.j2"
    try:
        template = env.get_template(template_name)
    except TemplateNotFound:
        Message.log(msg="not found", template=template_name)
        template = env.get_template(f"{msg.template}.j2")

    context = {"sender": msg.sender, **msg.variables}
    return template.render(context)
 def detach_volume(self, blockdevice_id):
     """
     :param: volume id = blockdevice_id
     :raises: unknownvolume exception if not found
     """
     vol = self._get_vol_details(blockdevice_id)
     if vol.attached_to is not None:
         self.data.destroy_lun_map(blockdevice_id,
                                   self._compute_instance_id)
         self.data.rescan_scsi()
     else:
         Message.new(Info="Volume" + blockdevice_id +
                     "not attached").write(_logger)
         raise UnattachedVolume(blockdevice_id)
def load_and_add_voting_result(filepath, voting_phase_name, log_level="INFO"):
    with open(filepath) as csvfile:
        with start_action(log_level="INFO", action_type="load_csv"):
            reader = csv.reader(csvfile)
            rows = list(reader)

    voting_phase = session.query(VotingPhase).filter_by(
        name=voting_phase_name).one()

    for row_number, row in enumerate(rows[1:]):
        with start_action(log_level="INFO", action_type="add_voting_result"):

            Message.log(data="current", row_number=row_number + 1, row=row)
            proposition = (session.query(Proposition).filter_by(
                voting_identifier=row[1].strip()).join(Ballot).filter_by(
                    voting=voting_phase)).one()

            Message.log(status="found proposition",
                        proposition=proposition.voting_identifier)
            Message.log(data="before",
                        row_number=row_number + 1,
                        proposition_result=proposition.ballot.result)

            if not proposition.ballot.result:
                proposition.ballot.result = {}
            proposition.ballot.result[proposition.voting_identifier] = {
                'state': row[3].strip()
            }
            proposition.status = 'finished'

            Message.log(data="after",
                        row_number=row_number + 1,
                        proposition_result=proposition.ballot.result)
def _converge_pods(actual, config, subscriptions, k8s, aws):
    # We don't ever have to create a Pod.  We'll just delete the ones we don't
    # need anymore.
    deletes = []
    for pod in actual.pods:
        sid = pod.metadata.annotations[u"subscription"]
        if sid not in actual.subscriptions:
            Message.log(condition=u"undesired", subscription=sid)
            deletes.append(pod.metadata)

    def delete(metadata):
        return k8s.delete(k8s.k8s.model.v1.Pod(metadata=metadata))

    return list(partial(delete, metadata) for metadata in deletes)
 def filter_results(zones):
     Message.log(zone_names=list(zone.name for zone in zones))
     for zone in zones:
         # XXX Bleuch zone.name should be a Name!
         if Name(zone.name) == name:
             d = route53.list_resource_record_sets(zone_id=zone.identifier)
             d.addCallback(
                 lambda rrsets, zone=zone: _ZoneState(
                     zone=zone,
                     rrsets=rrsets,
                 ),
             )
             return d
     raise KeyError(name)
Beispiel #49
0
 def _cleanUp(self):
     # Unfortunately Fixtures / testtools doesn't care if we return a
     # Deferred here.
     if self._transport is not None:
         Message.log(
             message_type=u"test:cli:running-tahoe-lafs-node:signal",
             node_kind=self.node_kind.__name__,
         )
         self._transport.signalProcess("KILL")
     else:
         Message.log(
             message_type=u"test:cli:running-tahoe-lafs-node:no-signal",
             node_kind=self.node_kind.__name__,
         )
def _converge_replicasets(actual, config, subscriptions, k8s, aws):
    # We don't ever have to create a ReplicaSet.  We'll just delete the ones
    # we don't need anymore.
    deletes = []
    for replicaset in actual.replicasets:
        sid = replicaset.metadata.annotations[u"subscription"]
        if sid not in actual.subscriptions:
            Message.log(condition=u"undesired", subscription=sid)
            deletes.append(replicaset.metadata)

    def delete(metadata):
        return k8s.delete(k8s.k8s.model.v1beta1.ReplicaSet(metadata=metadata))

    return list(partial(delete, metadata) for metadata in deletes)
Beispiel #51
0
 def got_nodes(nodes):
     for node in nodes:
         if node.uuid == target_node.uuid:
             Message.log(
                 message_type=(
                     u"flocker:provision:libcloud:refresh_node"),
                 name=node.name,
                 id=node.id,
                 state=node.state,
                 public_ips=node.public_ips,
                 private_ips=node.private_ips,
             )
             return node
     return None
Beispiel #52
0
    def stop(self):
        """
        Stop the scenario from being maintained by stopping all the
        loops that may be executing.

        :return Deferred[Optional[Dict[unicode, Any]]]: Scenario metrics.
        """
        self.is_started = False
        if self.monitor_loop.running:
            self.monitor_loop.stop()

        if self.loop.running:
            self.loop.stop()

        outstanding_requests = self.rate_measurer.outstanding()

        if outstanding_requests > 0:
            msg = (
                "There are {num_requests} outstanding requests. "
                "Waiting {num_seconds} seconds for them to complete.").format(
                    num_requests=outstanding_requests,
                    num_seconds=self.timeout)
            Message.log(key='outstanding_requests', value=msg)

        with start_action(action_type=u'flocker:benchmark:scenario:stop',
                          scenario='request_load'):

            def no_outstanding_requests():
                return self.rate_measurer.outstanding() == 0

            scenario_stopped = loop_until(self.reactor,
                                          no_outstanding_requests, repeat(1))
            timeout(self.reactor, scenario_stopped, self.timeout)
            scenario = DeferredContext(scenario_stopped)

            def handle_timeout(failure):
                failure.trap(CancelledError)
                msg = ("Force stopping the scenario. "
                       "There are {num_requests} outstanding requests").format(
                           num_requests=outstanding_requests)
                Message.log(key='force_stop_request', value=msg)

            scenario.addErrback(handle_timeout)

            def return_metrics(_ignore):
                return self.rate_measurer.get_metrics()

            scenario.addCallback(return_metrics)

            return scenario.addActionFinish()
Beispiel #53
0
def _check_response_error(e, message_type):
    """
    Check if an exception is a transient one.
    If it is, then it is simply logged, otherwise it is raised.

    :param boto.exception import EC2ResponseErro e: The exception to check.
    :param str message_type: The message type for logging.
    """
    if e.error_code != BOTO_INSTANCE_NOT_FOUND:
        raise e
    Message.new(
        message_type=message_type,
        reason=e.error_code,
    ).write()
    def activate(self, details):
        """
        Activate a new subscription in the subscription manager.

        This makes new subscription state available to subsequently
        executed rules.
        """
        assume(details.subscription_id not in
               self.database.list_all_subscription_identifiers())
        Message.log(activating=details.subscription_id)
        self.database.create_subscription(
            subscription_id=details.subscription_id,
            details=details,
        )
 def destroy_volume_folder(self):
     """
     Destroy the volume folder
     :param: none
     """
     try:
         Message.new(Info="Destroying Volume folder" +
                     str(self._cluster_id)).write(_logger)
         self.mgmt.request(XtremIOMgmt.VOLUME_FOLDERS,
                           XtremIOMgmt.DELETE,
                           name=XtremIOMgmt.BASE_PATH +
                           str(self._cluster_id))
     except DeviceExceptionObjNotFound as exc:
         raise UnknownVolume(self._cluster_id)
Beispiel #56
0
    def test_everything(self):
        """
        Load every single definition from the specification.

        This is a smoke test.  If it breaks, write some more specific tests
        and then fix the problem.
        """
        spec = Swagger.from_path(self.spec_path)
        for name in sorted(spec.definitions):
            Message.log(name=name)
            try:
                spec.pclass_for_definition(name)
            except NotClassLike:
                # Some stuff, indeed, is not ...
                pass
Beispiel #57
0
def run():
    start = time.time()
    for i in range(N):
        with start_action(action_type="my_action"):
            with start_action(action_type="my_action2"):
                Message.log(
                    message_type="my_message",
                    integer=3, string=b"abcdeft", string2="dgsjdlkgjdsl",
                    list=[1, 2, 3, 4])
    end = time.time()

    # Each iteration has 5 messages: start/end of my_action, start/end of
    # my_action2, and my_message.
    print("%.6f per message" % ((end - start) / (N * 5),))
    print("%s messages/sec" % (int(N / (end-start)),))
Beispiel #58
0
def treq_get(dispatcher, intent):
    """
    Performer to execute an HTTP GET.

    :param dispatcher: The dispatcher used to dispatch this performance.
    :param HTTPGet intent: The intent to be performed.
    """
    action = startAction(action_type=u"flocker:provision:_effect:treq_get")
    with action.context():
        Message.log(url=intent.url)
        # Do not use persistent HTTP connections, because they will not be
        # cleaned up by the end of the test.
        d = DeferredContext(get(intent.url, persistent=False))
        d.addActionFinish()
        return d.result
Beispiel #59
0
def main(reactor):
    print("Logging to example-eliot.log...")
    logWriter = ThreadedFileWriter(open("example-eliot.log", "ab"), reactor)

    # Manually start the service, which will add it as a
    # destination. Normally we'd register ThreadedFileWriter with the usual
    # Twisted Service/Application infrastructure.
    logWriter.startService()

    # Log a message:
    Message.log(value="hello", another=1)

    # Manually stop the service.
    done = logWriter.stopService()
    return done
Beispiel #60
0
def get_form_data(model, form_class, cell_class, view_name, request):
    form = form_class(request, request.link(model, name='+' + view_name))
    controls = list(request.POST.items())
    with start_action(action_type='validate_form',
                      controls=dict(c for c in controls
                                    if not c[0].startswith('_')),
                      form=form):
        try:
            return form.validate(controls), None
        except deform.ValidationFailure:
            Message.log(validation_errors=form.error.asdict())
            if request.app.settings.common.fail_on_form_validation_error:
                raise form.error
            return None, cell_class(request=request, form=form,
                                    model=model).show()