def openssl_verify(cafile, certificatefile, **kwargs): """ Use OpenSSL CLI to verify a certificate was signed by a given certificate authority. :param str cafile: The name of the certificate authority file. :param str certificatefile: The name of the certificate file to be checked against the supplied authority. :return: A ``bool`` that is True if the certificate was verified, otherwise False if verification failed or an error occurred. """ command = [b"openssl", b"verify", b"-CAfile", cafile, certificatefile] try: result = run_process(command, **kwargs) return result.output.strip() == b"{}: OK".format(certificatefile) except CalledProcessError as e: result = run_process(["openssl", "x509", "-text", "-in", cafile], **kwargs) cafile_info = result.output result = run_process( ["openssl", "x509", "-text", "-in", certificatefile], **kwargs) certificate_info = result.output error = str(e) error = error + "\n" + cafile_info + "\n" + certificate_info Message.new(message_type="flocker.ca.functional:openssl_verify_error", error=error).write(Logger()) return False
def openssl_verify(cafile, certificatefile, **kwargs): """ Use OpenSSL CLI to verify a certificate was signed by a given certificate authority. :param str cafile: The name of the certificate authority file. :param str certificatefile: The name of the certificate file to be checked against the supplied authority. :return: A ``bool`` that is True if the certificate was verified, otherwise False if verification failed or an error occurred. """ command = [b"openssl", b"verify", b"-CAfile", cafile, certificatefile] try: result = run_process(command, **kwargs) return result.output.strip() == b"{}: OK".format(certificatefile) except CalledProcessError as e: result = run_process([ "openssl", "x509", "-text", "-in", cafile], **kwargs) cafile_info = result.output result = run_process([ "openssl", "x509", "-text", "-in", certificatefile], **kwargs) certificate_info = result.output error = str(e) error = error + "\n" + cafile_info + "\n" + certificate_info Message.new( message_type="flocker.ca.functional:openssl_verify_error", error=error).write(Logger()) return False
def extract_external_port( client, container_identifier, internal_port ): """ Inspect a running container for the external port number on which a particular internal port is exposed. :param docker.Client client: The Docker client to use to perform the inspect. :param unicode container_identifier: The unique identifier of the container to inspect. :param int internal_port: An internal, exposed port on the container. :return: The external port number on which ``internal_port`` from the container is exposed. :rtype: int """ container_details = client.inspect_container(container_identifier) # If the container isn't running, this section is not present. network_settings = container_details[u"NetworkSettings"] ports = network_settings[u"Ports"] details = ports[u"{}/tcp".format(internal_port)] host_port = int(details[0][u"HostPort"]) Message.new( message_type=u"acceptance:extract_external_port", host_port=host_port ).write() return host_port
def create_network(self,name,nwtype): self.authenticate_user() try: networkid = self.network_obj.query_by_name(name) if networkid: Message.new(Debug="Network Already Exists").write(_logger) else: self.network_obj.create(name,nwtype) "Adding Host Ports to Network" f = open ('/etc/iscsi/initiatorname.iscsi','r') for line in f: if line[0] != '#': current_line=line.split('=') host_port = current_line[1] if "\n" in host_port[1]: host_port = host_port.split('\n')[0] tz = self.network_obj.show(name) if ("endpoints" in tz): endpoints = tz['endpoints'] if host_port not in endpoints: self.network_obj.add_endpoint(name,endpoint=host_port) break except utils.SOSError as e: if(e.err_code == utils.SOSError.HTTP_ERR): raise utils.SOSError( utils.SOSError.HTTP_ERR, "coprhd create network HTTP_ERR" + e.err_text) elif(e.err_code == utils.SOSError.SOS_FAILURE_ERR): raise utils.SOSError( utils.SOSError.SOS_FAILURE_ERR, "coprhd create network failed" + e.err_text) else: Message.new(Debug="coprhd create network failed").write(_logger)
def _node_is_booting(instance): """ Check if an instance is still booting, where booting is defined as either a pending or rebooting instance that is expected to become running. :param boto.ec2.instance.Instance instance: The instance to check. """ try: instance.update() except EC2ResponseError as e: _check_response_error( e, u"flocker:provision:aws:node_is_booting:retry" ) Message.new( message_type=u"flocker:provision:aws:node_is_booting:update", instance_state=instance.state, ip_address=instance.ip_address, ).write() # Sometimes an instance can be reported as running but without a public # address being set, we consider that instance to be still pending. return (instance.state == u'pending' or instance.state == u'rebooting' or (instance.state == u'running' and instance.ip_address is None))
def get_device_path(self, blockdevice_id): Message.new(operation=u'get_device_path', blockdevice_id=blockdevice_id).write() lun_name = self._get_lun_name_from_blockdevice_id(blockdevice_id) lun = self._client.get_lun_by_name(lun_name) if lun == {}: raise UnknownVolume(blockdevice_id) alu = lun['lun_id'] rc, out = self._client.get_storage_group(self._group) if rc != 0: raise Exception(rc, out) lunmap = self._client.parse_sg_content(out)['lunmap'] try: # The LUN has already been added to this storage group....perhaps # by a previous attempt to attach in which the OS device did not # appear. hlu = lunmap[alu] except KeyError: raise UnattachedVolume(blockdevice_id) hlu_bus_path = _hlu_bus_paths(hlu)[0] # XXX This will only operate on one of the resulting device paths. # /sys/class/scsi_disk/x:x:x:HLU/device/block/sdvb for example. device_path = _device_paths_for_hlu_bus_path(hlu_bus_path)[0] if not _device_path_is_usable(device_path): raise UnattachedVolume(blockdevice_id) Message.new(operation=u'get_device_path_output', blockdevice_id=blockdevice_id, device_path=device_path.path).write() return device_path
def add_initiators(self,sync, hostlabel, protocol, portwwn,initname): self.authenticate_user() portwwn = None try: f = open ('/etc/iscsi/initiatorname.iscsi','r') for line in f: if ( line[0] != '#' ): s1=line.split('=') portwwn = str(s1[1]) if "\n" in portwwn: portwwn = portwwn.split('\n')[0] break initname = portwwn initiatorwwn = None self.hostinitiator_obj.create(sync,hostlabel,protocol,initiatorwwn,portwwn) except utils.SOSError as e: if(e.err_code == utils.SOSError.HTTP_ERR): if(e.err_text.find('same Initiator Port already exists') != -1): Message.new(Debug="coprhd add initiators already added").write(_logger) else: raise utils.SOSError( utils.SOSError.HTTP_ERR, "coprhd add initiators HTTP_ERR" + e.err_text) elif(e.err_code == utils.SOSError.SOS_FAILURE_ERR): raise utils.SOSError( utils.SOSError.SOS_FAILURE_ERR, "coprhd add initiators failed" + e.err_text) else: Message.new(Debug="coprhd add initiators failed").write(_logger)
def run_process(command, *args, **kwargs): """ Run a child process, capturing its stdout and stderr. :param list command: An argument list to use to launch the child process. :raise CalledProcessError: If the child process has a non-zero exit status. :return: A ``_ProcessResult`` instance describing the result of the child process. """ kwargs["stdout"] = PIPE kwargs["stderr"] = STDOUT action = start_action(action_type="run_process", command=command, args=args, kwargs=kwargs) with action: process = Popen(command, *args, **kwargs) output = process.stdout.read() status = process.wait() result = _ProcessResult(command=command, output=output, status=status) # TODO: We should be using a specific logging type for this. Message.new( command=result.command, output=result.output, status=result.status, ).write() if result.status: raise _CalledProcessError( returncode=status, cmd=command, output=output, ) return result
def get_device_path(self, blockdevice_id): """ Return the device path that has been allocated to the block device on the host to which it is currently attached. :param unicode blockdevice_id: The unique identifier for the block device. :raises UnknownVolume: If the supplied ``blockdevice_id`` does not exist. :raises UnattachedVolume: If the supplied ``blockdevice_id`` is not attached to a host. :returns: A ``FilePath`` for the device. """ # raises UnknownVolume volume = self._get(blockdevice_id) # raises UnattachedVolume if volume.attached_to is None: Message.new(Error="Could get Device Path " + str(blockdevice_id) + "is not attached").write(_logger) raise UnattachedVolume(blockdevice_id) # Check the "actual volume" for attachment sio_volume = self._client.get_volume_by_id( str(blockdevice_id)) sdcs = self._client.get_sdc_for_volume(sio_volume) if len(sdcs) == 0: Message.new(Error="Could get Device Path " + str(blockdevice_id) + "is not attached").write(_logger) raise UnattachedVolume(blockdevice_id) # return the real path of the device return self._get_dev_from_blockdeviceid(volume.blockdevice_id)
def wait_for_volume(cls, blockdevice_id, time_limit=60): """ Wait for a ``Volume`` with the same ``id`` as ``expected_volume`` to be listed :param Volume expected_volume: The ``Volume`` to wait for. :param int time_limit: The maximum time, in seconds, to wait for the ``expected_volume`` to have ``expected_status``. :raises Exception: If ``expected_volume`` is not listed within ``time_limit``. :returns: The listed ``Volume`` that matches ``expected_volume``. """ start_time = time.time() while True: exists = cls._dev_exists_from_blockdeviceid( blockdevice_id) if exists: return elapsed_time = time.time() - start_time if elapsed_time < time_limit: time.sleep(0.1) else: Message.new(Error="Could Find Device for Volume " + "Timeout on: " + str(blockdevice_id)).write(_logger) raise Exception( 'Timed out while waiting for volume. ' 'Expected Volume: {!r}, ' 'Elapsed Time: {!r}, ' 'Time Limit: {!r}.'.format( blockdevice_id, elapsed_time, time_limit ) )
def list_volumes(self): """ Return all volumes that belong to this Flocker cluster. """ try: ebs_volumes = self.connection.get_all_volumes() message_type = BOTO_LOG_RESULT + u':listed_volumes' Message.new( message_type=message_type, volume_ids=list(volume.id for volume in ebs_volumes), ).write() except EC2ResponseError as e: # Work around some internal race-condition in EBS by retrying, # since this error makes no sense: if e.code == NOT_FOUND: return self.list_volumes() else: raise volumes = [] for ebs_volume in ebs_volumes: if _is_cluster_volume(self.cluster_id, ebs_volume): volumes.append(_blockdevicevolume_from_ebs_volume(ebs_volume)) message_type = BOTO_LOG_RESULT + u':listed_cluster_volumes' Message.new( message_type=message_type, volume_ids=list(volume.blockdevice_id for volume in volumes), ).write() return volumes
def detach_destroy_volumes(api): """ Detach and destroy all volumes known to this API. If we failed to detach a volume for any reason, sleep for 1 second and retry until we hit CLEANUP_RETRY_LIMIT. This is to facilitate best effort cleanup of volume environment after each test run, so that future runs are not impacted. """ volumes = api.list_volumes() retry = 0 action_type = u"agent:blockdevice:cleanup:details" with start_action(action_type=action_type): while retry < CLEANUP_RETRY_LIMIT and len(volumes) > 0: for volume in volumes: try: if volume.attached_to is not None: api.detach_volume(volume.blockdevice_id) api.destroy_volume(volume.blockdevice_id) except: write_traceback(_logger) time.sleep(1.0) volumes = api.list_volumes() retry += 1 if len(volumes) > 0: Message.new(u"agent:blockdevice:failedcleanup:volumes", volumes=volumes).write()
def emit(self, record): fields = vars(record) # Only log certain things. The log is massively too verbose # otherwise. if fields.get("msg", ":").split(":")[0] in self._to_log: Message.new(message_type=u'flocker:provision:aws:boto_logs', **fields).write()
def list_volumes(self): """ Return all volumes that belong to this Flocker cluster. """ try: ebs_volumes = self.connection.get_all_volumes() message_type = BOTO_LOG_RESULT + u':listed_volumes' Message.new( message_type=message_type, volume_ids=list(volume.id for volume in ebs_volumes), ).write() except EC2ResponseError as e: # Work around some internal race-condition in EBS by retrying, # since this error makes no sense: if e.code == NOT_FOUND: return self.list_volumes() else: raise volumes = [] for ebs_volume in ebs_volumes: if _is_cluster_volume(self.cluster_id, ebs_volume): volumes.append( _blockdevicevolume_from_ebs_volume(ebs_volume) ) message_type = BOTO_LOG_RESULT + u':listed_cluster_volumes' Message.new( message_type=message_type, volume_ids=list(volume.blockdevice_id for volume in volumes), ).write() return volumes
def _get_device_path_api(self, volume): """ Return the device path reported by the Cinder API. :param volume: The Cinder ``Volume`` which is attached. :returns: ``FilePath`` of the device created by the virtio_blk driver. """ if volume.attachments: attachment = volume.attachments[0] if len(volume.attachments) > 1: # As far as we know you can not have more than one attachment, # but, perhaps we're wrong and there should be a test for the # multiple attachment case. FLOC-1854. # Log a message if this ever happens. Message.new( message_type=( u'flocker:node:agents:blockdevice:openstack:' u'get_device_path:' u'unexpected_multiple_attachments' ), volume_id=unicode(volume.id), attachment_devices=u','.join( unicode(a['device']) for a in volume.attachments ), ).write() else: raise UnattachedVolume(volume.id) return FilePath(attachment['device'])
def create_cloudformation_stack(template_url, parameters, aws_config): """ Create a CloudFormation stack. :param unicode template_url: Cloudformation template URL on S3. :param dict parameters: The parameters required by the template. :param dict aws_config: environment variables to be merged with the current process environment before running the ``aws`` sub-command. :returns: A ``Deferred`` which fires when the stack has been created. """ # Request stack creation. stack_name = CLOUDFORMATION_STACK_NAME + str(int(time.time())) output = aws_output( ['cloudformation', 'create-stack', '--disable-rollback', '--parameters', json.dumps(parameters), '--stack-name', stack_name, '--template-url', template_url], aws_config ) output = json.loads(output) stack_id = output['StackId'] Message.new(cloudformation_stack_id=stack_id) return wait_for_stack_status(stack_id, 'CREATE_COMPLETE', aws_config)
def list_volumes(self): """ Return ``BlockDeviceVolume`` instances for all the files in the ``unattached`` directory and all per-host directories. See ``IBlockDeviceAPI.list_volumes`` for parameter and return type documentation. """ volumes = [] try: # Query for volume folder by name VOL_FLOCKER # and get list of volumes. The array may have # other volumes not owned by Flocker vol_folder = self.mgmt.request(XtremIOMgmt.VOLUME_FOLDERS, name=XtremIOMgmt.BASE_PATH + str(self._cluster_id))['content'] # Get the number of volumes Message.new(NoOfVolumesFound=vol_folder['num-of-vols']).write(_logger) if int(vol_folder['num-of-vols']) > 0: for vol in vol_folder['direct-list']: # Message.new(VolumeName=vol[1]).write(_logger) volume = self._get_vol_details(vol[1]) volumes.append(volume) # Message.new(volume=volume).write(_logger) except Exception as exe: pass # Message.new(Error=exe).write(_logger) return volumes
def create_volume(self, dataset_id, size): """ Create a block device using the ICinderVolumeManager. The cluster_id and dataset_id are stored as metadata on the volume. See: http://docs.rackspace.com/cbs/api/v1.0/cbs-devguide/content/POST_createVolume_v1__tenant_id__volumes_volumes.html """ metadata = { CLUSTER_ID_LABEL: unicode(self.cluster_id), DATASET_ID_LABEL: unicode(dataset_id), } action_type = u"blockdevice:cinder:create_volume" with start_action(action_type=action_type): requested_volume = self.cinder_volume_manager.create( size=Byte(size).to_GB().value, metadata=metadata, ) Message.new(blockdevice_id=requested_volume.id).write() created_volume = wait_for_volume( volume_manager=self.cinder_volume_manager, expected_volume=requested_volume, ) return _blockdevicevolume_from_cinder_volume( cinder_volume=created_volume, )
def return_multipath_device(self, blockdevice_id): """ :param blockdevice_id: :return: DeviveAbsPath - Multipath device path """ lunid = self.data.get_lun_map(blockdevice_id) try : #Query multipath for the device name output = check_output([b"multipath -v2 -ll"], shell=True) #multipath -v2 -ll sample output as below : #3514f0c5461400172 dm-5 XtremIO ,XtremApp #size=1.0M features='0' hwhandler='0' wp=rw #`-+- policy='queue-length 0' prio=0 status=active # |- 7:0:0:2 sdg 8:96 active faulty running # `- 3:0:0:2 sdf 8:80 active faulty running # Parse the above output for the device name under /dev/mapper for row in output.split('\n'): if re.search(r'XtremApp', row, re.I) : deviceName = row.split(' ')[0] if re.search(r'\d:\d:\d:' + str(lunid), row, re.I): deviceAbsPath = EMCXtremIOBlockDeviceAPI.DEFAULT_MULTIPATH_DEVICE_PATH + deviceName if os.path.exists(deviceAbsPath): output = check_output([b"mkfs.ext3 " + deviceAbsPath], shell=True) return deviceAbsPath except Exception as ex : Message.new(value="Exception when quering for multipath device").write(_logger) raise UnknownVolume(blockdevice_id)
def list_volumes(self): """ Return ``BlockDeviceVolume`` instances for all the files in the ``unattached`` directory and all per-host directories. See ``IBlockDeviceAPI.list_volumes`` for parameter and return type documentation. """ volumes = [] try: # Query for volume folder by name VOL_FLOCKER # and get list of volumes. The array may have # other volumes not owned by Flocker vol_folder = self.mgmt.request(XtremIOMgmt.VOLUME_FOLDERS, name=XtremIOMgmt.BASE_PATH + str(self._cluster_id))['content'] # Get the number of volumes Message.new( NoOfVolumesFound=vol_folder['num-of-vols']).write(_logger) if int(vol_folder['num-of-vols']) > 0: for vol in vol_folder['direct-list']: # Message.new(VolumeName=vol[1]).write(_logger) volume = self._get_vol_details(vol[1]) volumes.append(volume) # Message.new(volume=volume).write(_logger) except Exception as exe: pass # Message.new(Error=exe).write(_logger) return volumes
def create_volume(self, dataset_id, size): """ Create a block device using the ICinderVolumeManager. The cluster_id and dataset_id are stored as metadata on the volume. See: http://docs.rackspace.com/cbs/api/v1.0/cbs-devguide/content/POST_createVolume_v1__tenant_id__volumes_volumes.html """ metadata = { CLUSTER_ID_LABEL: unicode(self.cluster_id), DATASET_ID_LABEL: unicode(dataset_id), } requested_volume = self.cinder_volume_manager.create( size=int(Byte(size).to_GiB().value), metadata=metadata, ) Message.new(message_type=CINDER_CREATE, blockdevice_id=requested_volume.id).write() created_volume = wait_for_volume_state( volume_manager=self.cinder_volume_manager, expected_volume=requested_volume, desired_state=u'available', transient_states=(u'creating',), ) return _blockdevicevolume_from_cinder_volume( cinder_volume=created_volume, )
def return_multipath_device(self, blockdevice_id): """ :param blockdevice_id: :return: DeviveAbsPath - Multipath device path """ lunid = self.data.get_lun_map(blockdevice_id) try: #Query multipath for the device name output = check_output([b"multipath -v2 -ll"], shell=True) #multipath -v2 -ll sample output as below : #3514f0c5461400172 dm-5 XtremIO ,XtremApp #size=1.0M features='0' hwhandler='0' wp=rw #`-+- policy='queue-length 0' prio=0 status=active # |- 7:0:0:2 sdg 8:96 active faulty running # `- 3:0:0:2 sdf 8:80 active faulty running # Parse the above output for the device name under /dev/mapper for row in output.split('\n'): if re.search(r'XtremApp', row, re.I): deviceName = row.split(' ')[0] if re.search(r'\d:\d:\d:' + str(lunid), row, re.I): deviceAbsPath = EMCXtremIOBlockDeviceAPI.DEFAULT_MULTIPATH_DEVICE_PATH + deviceName if os.path.exists(deviceAbsPath): output = check_output([b"mkfs.ext3 " + deviceAbsPath], shell=True) return deviceAbsPath except Exception as ex: Message.new(value="Exception when quering for multipath device" ).write(_logger) raise UnknownVolume(blockdevice_id)
def _get(self, blockdevice_id): for volume in self.list_volumes(): if volume.blockdevice_id == blockdevice_id: return volume Message.new(Error="Could Not Find Volume " + str(blockdevice_id)).write(_logger) raise UnknownVolume(blockdevice_id)
def create_volume(self, dataset_id, size): """ Create a block device using the ICinderVolumeManager. The cluster_id and dataset_id are stored as metadata on the volume. See: http://docs.rackspace.com/cbs/api/v1.0/cbs-devguide/content/POST_createVolume_v1__tenant_id__volumes_volumes.html """ metadata = { CLUSTER_ID_LABEL: unicode(self.cluster_id), DATASET_ID_LABEL: unicode(dataset_id), } requested_volume = self.cinder_volume_manager.create( size=int(Byte(size).to_GiB().value), metadata=metadata, display_name="flocker-{}".format(dataset_id), ) Message.new(message_type=CINDER_CREATE, blockdevice_id=requested_volume.id).write() created_volume = wait_for_volume_state( volume_manager=self.cinder_volume_manager, expected_volume=requested_volume, desired_state=u'available', transient_states=(u'creating', ), ) return _blockdevicevolume_from_cinder_volume( cinder_volume=created_volume, )
def resize_volume(self, blockdevice_id, size): """ Resize an unattached ``blockdevice_id``. This changes the amount of storage available. It does not change the data on the volume (including the filesystem). :param unicode blockdevice_id: The unique identifier for the block device being detached. :param int size: The required size, in bytes, of the volume. :raises UnknownVolume: If the supplied ``blockdevice_id`` does not exist. :returns: ``None`` """ # raises UnknownVolume volume = self._get(blockdevice_id) # raises AlreadyAttachedVolume, do we want this? # is says only an unattached volume, if it is attached # do we detach and then resize thenr reattach? Or should we # just assume that all things that call this function know # that the volume is detached already? if volume.attached_to is not None: Message.new(Error="Cannot Resize Volume " + str(blockdevice_id) + "is attached").write(_logger) raise AlreadyAttachedVolume(blockdevice_id) sio_volume = self._client.get_volume_by_id(str(blockdevice_id)) size_in_gb = int(Byte(size).to_GiB().value) self._client.resize_volume(sio_volume, size_in_gb)
def _get_device_path_api(self, volume): """ Return the device path reported by the Cinder API. :param volume: The Cinder ``Volume`` which is attached. :returns: ``FilePath`` of the device created by the virtio_blk driver. """ if volume.attachments: attachment = volume.attachments[0] if len(volume.attachments) > 1: # As far as we know you can not have more than one attachment, # but, perhaps we're wrong and there should be a test for the # multiple attachment case. FLOC-1854. # Log a message if this ever happens. Message.new( message_type=(u'flocker:node:agents:blockdevice:openstack:' u'get_device_path:' u'unexpected_multiple_attachments'), volume_id=unicode(volume.id), attachment_devices=u','.join( unicode(a['device']) for a in volume.attachments), ).write() else: raise UnattachedVolume(volume.id) return FilePath(attachment['device'])
def check_multipath(self): """" Method to check if multipathing kernel modules are installed and if multipath deamon process is running """ multipath_on = False try: #Check whether kernel modules are installed output = check_output([b"lsmod | grep multipath"], shell=True) if re.search(r'dm_multipath', output, re.I): #Check if multipathd is running. Multipathd will not start without /etc/multipath.d file output = check_output([b"ps -ef | grep multipathd"], shell=True) if re.search(r'/sbin/multipathd', output, re.I): if os.path.exists("/etc/multipath.conf") and re.search( r'XtremIO', check_output([b"cat /etc/multipath.conf"], shell=True), re.I): multipath_on = True else: raise Exception except Exception as exe: Message.new( value="check_multipath returned exception").write(_logger) raise Exception return multipath_on
def create_volume(self, dataset_id, size): """ Create a block device using the ICinderVolumeManager. The cluster_id and dataset_id are stored as metadata on the volume. See: http://docs.rackspace.com/cbs/api/v1.0/cbs-devguide/content/POST_createVolume_v1__tenant_id__volumes_volumes.html """ metadata = { CLUSTER_ID_LABEL: unicode(self.cluster_id), DATASET_ID_LABEL: unicode(dataset_id), } action_type = u"blockdevice:cinder:create_volume" with start_action(action_type=action_type): # There could be difference between user-requested and # Cinder-created volume sizes due to several reasons: # 1) Round off from converting user-supplied 'size' to 'GiB' int. # 2) Cinder-specific size constraints. # XXX: Address size mistach (see # (https://clusterhq.atlassian.net/browse/FLOC-1874). requested_volume = self.cinder_volume_manager.create( size=Byte(size).to_GiB().value, metadata=metadata, ) Message.new(blockdevice_id=requested_volume.id).write() created_volume = wait_for_volume( volume_manager=self.cinder_volume_manager, expected_volume=requested_volume, ) return _blockdevicevolume_from_cinder_volume( cinder_volume=created_volume, )
def run_process(command, *args, **kwargs): """ Run a child process, capturing its stdout and stderr. :param list command: An argument list to use to launch the child process. :raise CalledProcessError: If the child process has a non-zero exit status. :return: A ``_ProcessResult`` instance describing the result of the child process. """ kwargs["stdout"] = PIPE kwargs["stderr"] = STDOUT action = start_action( action_type="run_process", command=command, args=args, kwargs=kwargs) with action: process = Popen(command, *args, **kwargs) output = process.stdout.read() status = process.wait() result = _ProcessResult(command=command, output=output, status=status) # TODO: We should be using a specific logging type for this. Message.new( command=result.command, output=result.output, status=result.status, ).write() if result.status: raise _CalledProcessError( returncode=status, cmd=command, output=output, ) return result
def _retry_exception(f, steps=(0.1,) * 10, sleep=sleep): """ Retry a function if it raises an exception. :return: Whatever the function returns. """ steps = iter(steps) while True: try: Message.new( message_type=(u"flocker:provision:libcloud:retry_exception:trying"), function=fullyQualifiedName(f) ).write() return f() except: # Try to get the next sleep time from the steps iterator. Do it # without raising an exception (StopIteration) to preserve the # current exception context. for step in steps: write_traceback() sleep(step) break else: # Didn't hit the break, so didn't iterate at all, so we're out # of retry steps. Fail now. raise
def find_unit(units, unit_name): Message.new(message_type="flocker:node:functional:deploy:find_unit", units=list(unit.name for unit in units), desired_unit=unit_name).write() for unit in units: if unit.name == unit_name: return unit
def _retry_exception(f, steps=(0.1, ) * 10, sleep=sleep): """ Retry a function if it raises an exception. :return: Whatever the function returns. """ steps = iter(steps) while True: try: Message.new( message_type=( u"flocker:provision:libcloud:retry_exception:trying"), function=fullyQualifiedName(f), ).write() return f() except: # Try to get the next sleep time from the steps iterator. Do it # without raising an exception (StopIteration) to preserve the # current exception context. for step in steps: write_traceback() sleep(step) break else: # Didn't hit the break, so didn't iterate at all, so we're out # of retry steps. Fail now. raise
def find_unit(units, unit_name): Message.new( message_type="flocker:node:functional:deploy:find_unit", units=list(unit.name for unit in units), desired_unit=unit_name ).write() for unit in units: if unit.name == unit_name: return unit
def succeed_or_fail(result): Message.new( system="log-agent:os-detection:journald", result=result, ).write() if result == 0: return True return False
def emit(self, record): fields = vars(record) # Only log certain things. The log is massively too verbose # otherwise. if fields.get("msg", ":").split(":")[0] in self._to_log: Message.new( message_type=BOTO_LOG_HEADER, **fields ).write()
def _check(unit): command = _HOST_COMMAND + [b"/usr/bin/systemctl", b"status"] + [unit] Message.new( system="log-agent:os-detection:journald", unit=unit, command=command, ).write() return getProcessValue(command[0], command[1:], env=environ)
def _initiator_create(self, initiator): """ Create an initiator """ Message.new(Info=" Creating initiator : ", initiator=initiator).write(_logger) return self._api.initiators.create(name=socket.gethostname(), id=initiator)
def succeed_or_check_control(result): Message.new( system="log-agent:os-detection:journald", result=result, ).write() if result == 0: return True return _check(b"flocker-control").addCallback(succeed_or_fail)
def _run_flaky_test(self, case, result, flaky): """ Run a test that has been decorated with the `@flaky` decorator. :param TestCase case: A ``testtools.TestCase`` to run. :param TestResult result: A ``TestResult`` object that conforms to the testtools extended result interface. :param _FlakyAnnotation flaky: A description of the conditions of flakiness. :return: A ``TestResult`` with the result of running the flaky test. """ result.startTest(case) successes = 0 results = [] # Optimization to stop running early if there's no way that we can # reach the minimum number of successes. max_fails = flaky.max_runs - flaky.min_passes while (successes < flaky.min_passes and len(results) - successes <= max_fails): was_successful, result_type, details = self._attempt_test(case) if was_successful: successes += 1 results.append((result_type, details)) successful = successes >= flaky.min_passes flaky_data = flaky.to_dict() flaky_data.update({'runs': len(results), 'passes': successes}) flaky_details = { 'flaky': text_content(pformat(flaky_data)), } combined_details = _combine_details( [flaky_details] + list(r[1] for r in results)) if successful: skip_reported = False for result_type, details in results: if result_type == _ResultType.skip: result.addSkip(case, details=details) skip_reported = True if not skip_reported: Message.new( message_type=u"flocker:test:flaky", id=case.id(), successes=successes, passes=len(results), min_passes=flaky.min_passes, max_runs=flaky.max_runs, ).write() result.addSuccess(case, details=combined_details) else: # XXX: How are we going to report on tests that sometimes fail, # sometimes error, sometimes skip? Currently we just error. result.addError(case, details=combined_details) result.stopTest(case) return result
def log(restart, reason=None): Message.new( message_type=_eliot_system(u"restart_for_volume_change"), restart=restart, state_is_none=state is None, configuration_is_none=configuration is None, reason=reason, ).write() return restart
def check_keys(results): for key_data, comment in results: agent_key = Key.fromString(key_data, type='blob') Message.new(message_type="flocker.provision.ssh:agent_key", key_fingerprint=agent_key.fingerprint(), commnet=comment).write() if agent_key == key: return True raise KeyNotFound(expected_key=key)
def connect_to_postgres(): try: return connect(host=host, user=u"postgres", port=port, database=database) except (InterfaceError, ProgrammingError) as e: Message.new( message_type=u"acceptance:integration:postgres_connect", exception=unicode(e.__class__), reason=unicode(e)).write() return False
def detach_delete_all_disks(driver): """ Detaches and deletes all disks for this cloud service. Primarily used for cleanup after tests :returns: A ``list`` of ``BlockDeviceVolume``s. """ Message.new(Info='Cleaning Up Detaching/Disks').write(_logger) for v in driver.list_volumes(): driver.destroy_volume(v.blockdevice_id)
def test_destination_stdout(self): """ Eliot messages are written to stdout. """ fake_stdout = self.StubStdout() message_formats = {'flocker:eliot:test': 'Running %(command)s\n'} eliot_to_stdout(message_formats, {}, stdout=fake_stdout) Message.new(message_type='flocker:eliot:test', command="some command").write() self.assertEqual(fake_stdout.flushed_data, "Running some command\n")
def emit(self, record): """ Write log message to the Eliot stream. :param record: :return: """ msg = self.format(record) Message.new(message_type=messages.MESSAGE_TYPE_ELIOT_LOG, message_level=record.levelname, message=msg).write()
def can_connect(): with closing(socket.socket()) as s: conn = s.connect_ex((host, port)) Message.new( message_type="acceptance:verify_socket", host=host, port=port, result=conn, ).write() return conn == 0
def _observe(self, event): flattened = loads(eventAsJSON(event)) # We get a timestamp from Eliot. flattened.pop(u"log_time") # This is never serializable anyway. "Legacy" log events (from # twisted.python.log) don't have this so make it optional. flattened.pop(u"log_logger", None) Message.new(message_type=u"eliot:twisted", **flattened).write(self.logger)
def can_connect(): with closing(socket()) as s: s.settimeout(SOCKET_TIMEOUT_FOR_POLLING) conn = s.connect_ex((host, port)) Message.new( message_type="acceptance:verify_socket", host=host, port=port, result=conn, ).write() return conn == 0
def emit(self, record): Message.new(message_type=u"eliot:stdlib", log_level=record.levelname, logger=record.name, message=record.getMessage()).write(self.logger) if record.exc_info: write_traceback( logger=self.logger, exc_info=record.exc_info, )