def stop(iface, resource_config, **_):
    """Stops all instances associated with Autoscaling group."""

    autoscaling_group = iface.properties

    instances = autoscaling_group.get(INSTANCES, [])
    minsize = autoscaling_group.get('MinSize')
    maxsize = autoscaling_group.get('MaxSize')
    desired_cap = autoscaling_group.get('DesiredCapacity')

    # If rules would allow scaling
    if minsize != 0 and desired_cap != 0 and maxsize != 0:
        stop_parameters = {
            RESOURCE_NAME: iface.resource_id,
            'MinSize': 0,
            'MaxSize': 0,
            'DesiredCapacity': 0
        }
        iface.update(stop_parameters)
        raise OperationRetry(
            'Updating %s ID# "%s" parameters before deletion.' %
            (iface.type_name, iface.resource_id))

    # Retry until there are no instances.
    if len(instances) > 0:
        raise OperationRetry('%s ID# "%s" is deleting associated instances.' %
                             (iface.type_name, iface.resource_id))
def check_api(client_callable, arguments=None, _progress_handler=None):
    """ Check for API Response and handle generically. """

    try:
        if isinstance(arguments, dict):
            response = client_callable(**arguments)
        elif arguments is None:
            response = client_callable()
        elif _progress_handler is not None:
            response = client_callable(arguments,
                                       progress_callback=_progress_handler)
        else:
            response = client_callable(arguments)
    except ConnectionError as e:
        raise OperationRetry('Retrying after error: {0}'.format(str(e)))
    except CloudifyClientError as e:
        if e.status_code == 502:
            raise OperationRetry('Retrying after error: {0}'.format(str(e)))

        if e.status_code == 404:
            raise SecretNotFoundError('Not Found error {0}'.format(str(e)))

        else:
            ctx.logger.error('Ignoring error: {0}'.format(str(e)))
    else:
        ctx.logger.debug('Returning response: {0}'.format(response))
        return response
    return None
Beispiel #3
0
def check_api(
        client_callable,
        arguments=None,
        _progress_handler=None):

    try:
        if isinstance(arguments, dict):
            response = \
                client_callable(**arguments)
        elif arguments is None:
            response = client_callable()
        elif _progress_handler is not None:
            response = \
                client_callable(
                    arguments,
                    progress_callback=_progress_handler)
        else:
            response = \
                client_callable(arguments)
    except ConnectionError as e:
        sleep(5)
        raise OperationRetry('Retrying after error: {0}'.format(str(e)))
    except CloudifyClientError as e:
        if e.status_code == 502:
            sleep(5)
            raise OperationRetry('Retrying after error: {0}'.format(str(e)))
        else:
            sleep(5)
            ctx.logger.error('Ignoring error: {0}'.format(str(e)))
    else:
        sleep(5)
        ctx.logger.debug('Returning response: {0}'.format(response))
        return response
    return None
Beispiel #4
0
def start(ctx, iface, resource_config, **_):
    '''Starts AWS EC2 Instances'''

    if iface.status in [RUNNING] and ctx.operation.retry_number > 0:
        current_properties = iface.properties
        ip = current_properties.get('PrivateIpAddress')
        pip = current_properties.get('PublicIpAddress')
        if ctx.node.properties['use_public_ip']:
            ctx.instance.runtime_properties['ip'] = pip
        else:
            ctx.instance.runtime_properties['ip'] = ip
        ctx.instance.runtime_properties['public_ip_address'] = pip
        ctx.instance.runtime_properties['private_ip_address'] = ip
        if not _handle_password(iface):
            raise OperationRetry('Waiting for {0} ID# {1} password.'.format(
                iface.type_name, iface.resource_id))
        return

    elif ctx.operation.retry_number == 0:
        params = \
            dict() if not resource_config else resource_config.copy()
        iface.start(
            {INSTANCE_IDS: params.get(INSTANCE_IDS, [iface.resource_id])})

    raise OperationRetry('{0} ID# {1} is still in a pending state.'.format(
        iface.type_name, iface.resource_id))
def set_hostname():
    hostname = execute_command('hostname')
    # Re-try ``hostname`` command in case it failed
    if hostname is False:
        raise OperationRetry('Re-try running {0}'.format('hostname'))

    # Check ``hostname`` output
    hostname = hostname.rsplit('\n')
    if hostname:
        ctx.instance.runtime_properties['hostname'] = hostname[0]

    # In case ``hostname`` is empty then re-try again
    else:
        raise OperationRetry('hostname output is empty !!, re-try again')
def wait_until_status(resource,
                      resource_type,
                      status,
                      error_statuses):
    """
    This method is build in order to check the status of the openstack
    resource and whether is is ready to be used or not
    :param resource: Current instance of openstack resource
    :param str resource_type: Resource type need to check status for
    :param str status: desired status need to check the resource on
    :param list error_statuses: List of error statuses that we should raise
     error about if the remote openstack resource matches them
    :return: Instance of the current openstack object contains the updated
    status
    """
    # Check the openstack resource status
    openstack_resource, ready = get_ready_resource_status(resource,
                                                          resource_type,
                                                          status,
                                                          error_statuses)
    if ready and openstack_resource:
        return openstack_resource
    else:
        message = '{0} {1} current state not ready: {2}'\
                .format(resource_type,
                        openstack_resource.id,
                        openstack_resource.status)

        raise OperationRetry(message)
 def wrapper(**kwargs):
     try:
         kwargs['resource_definition'] = \
             retrieve_resource_definition(**kwargs)
         kwargs['api_mapping'] = retrieve_mapping(**kwargs)
         task(**kwargs)
     except (KuberentesMappingNotFoundError,
             KuberentesInvalidPayloadClassError,
             KuberentesInvalidApiClassError,
             KuberentesInvalidApiMethodError) as e:
         raise NonRecoverableError(str(e))
     except OperationRetry as e:
         _, exc_value, exc_traceback = sys.exc_info()
         raise OperationRetry(
             '{0}'.format(str(e)),
             retry_after=15,
             causes=[exception_to_error_cause(exc_value, exc_traceback)]
         )
     except NonRecoverableError as e:
         _, exc_value, exc_traceback = sys.exc_info()
         raise NonRecoverableError(
             '{0}'.format(str(e)),
             causes=[exception_to_error_cause(exc_value, exc_traceback)]
         )
     except Exception as e:
         _, exc_value, exc_traceback = sys.exc_info()
         raise RecoverableError(
             '{0}'.format(str(e)),
             causes=[exception_to_error_cause(exc_value, exc_traceback)]
         )
def delete(openstack_resource):
    """
    Delete current openstack volume instance
    :param openstack_resource: instance of openstack volume resource
    """
    # Before delete the volume we should check if volume has associated
    # snapshots that must be cleaned
    search_opts = {
        'volume_id': openstack_resource.resource_id,
    }
    _delete_volume_snapshot(openstack_resource, search_opts)

    # Only trigger delete api when it is the first time we run this task,
    # otherwise we should check the if the volume is really deleted or not
    # by keep calling get volume api
    if VOLUME_TASK_DELETE not in ctx.instance.runtime_properties:
        # we should be never be here twise if we correctly removed instance
        # Delete volume resource
        openstack_resource.delete()
        ctx.instance.runtime_properties[VOLUME_TASK_DELETE] = True
        # save flag as current state before external call
        ctx.instance.update()

    # Make sure that volume are deleting
    try:
        openstack_resource.get()
        raise OperationRetry('Volume {0} is still deleting'.format(
            openstack_resource.resource_id))
    except openstack.exceptions.ResourceNotFound:
        ctx.logger.info('Volume {0} is deleted successfully'.format(
            openstack_resource.resource_id))
Beispiel #9
0
def custom_resource_delete(client, api_mapping, resource_definition, **kwargs):
    try:
        read_response = _do_resource_read(client,
                                          api_mapping,
                                          _retrieve_id(ctx.instance),
                                          **kwargs)
        ctx.instance.runtime_properties[INSTANCE_RUNTIME_PROPERTY_KUBERNETES] \
            = read_response
    except KuberentesApiOperationError as e:
        if '"code":404' in str(e):
            ctx.logger.debug(
                'Ignoring error: {0}'.format(str(e)))
        else:
            raise RecoverableError(
                'Raising error: {0}'.format(str(e)))
    else:
        delete_response = _do_resource_delete(
            client,
            api_mapping,
            resource_definition,
            _retrieve_id(ctx.instance),
            **kwargs
        )

        raise OperationRetry(
            'Delete response: {0}'.format(delete_response))
 def wrapper_inner(**kwargs):
     '''Inner, worker function'''
     ctx = kwargs['ctx']
     resource_type = kwargs.get('resource_type', 'AWS Resource')
     iface = kwargs['iface']
     # Run the operation if this is the first pass
     if not ctx.instance.runtime_properties.get('__deleted', False):
         function(**kwargs)
         ctx.instance.runtime_properties['__deleted'] = True
     # Get a resource interface and query for the status
     status = iface.status
     ctx.logger.debug('%s ID# "%s" reported status: %s' %
                      (resource_type, iface.resource_id, status))
     if not status or (status_deleted and status in status_deleted):
         for key in [EXT_RES_ARN, EXT_RES_ID, 'resource_config']:
             if key in ctx.instance.runtime_properties:
                 del ctx.instance.runtime_properties[key]
         return
     elif status_pending and status in status_pending:
         raise OperationRetry(
             '%s ID# "%s" is still in a pending state.' %
             (resource_type, iface.resource_id))
     raise NonRecoverableError(
         '%s ID# "%s" reported an unexpected status: "%s"' %
         (resource_type, iface.resource_id, status))
def run(playbook_args, ansible_env_vars, _ctx, **_):
    _ctx.logger.debug('playbook_args: {0}'.format(playbook_args))

    try:
        playbook = AnsiblePlaybookFromFile(**playbook_args)
        utils.assign_environ(ansible_env_vars)
        process = {}
        process['env'] = ansible_env_vars
        process['args'] = playbook.process_args
        # Prepare the script which need to be run
        playbook.execute(
            process_execution,
            script_func=execute,
            script_path='ansible-playbook',
            ctx=_ctx,
            process=process
        )
    except CloudifyAnsibleSDKError as sdk_error:
        raise NonRecoverableError(sdk_error)
    except ProcessException as process_error:
        if process_error.exit_code in UNREACHABLE_CODES:
            raise OperationRetry(
                'One or more hosts are unreachable.')
        if process_error.exit_code not in SUCCESS_CODES:
            raise NonRecoverableError(
                'One or more hosts failed.')
Beispiel #12
0
def run(playbook_args, ansible_env_vars, _ctx, **_):
    secure_log_playbook_args(_ctx, playbook_args)

    try:
        playbook = AnsiblePlaybookFromFile(**playbook_args)
        utils.assign_environ(ansible_env_vars)
        process = {}
        process['env'] = ansible_env_vars
        process['args'] = playbook.process_args
        # check if ansible_playbook_executable_path was provided
        # if not provided default to "ansible-playbook" which will use the
        # executable included in the plugin
        script_path = \
            playbook_args.get("ansible_playbook_executable_path",
                              "ansible-playbook")

        # Prepare the script which need to be run
        playbook.execute(process_execution,
                         script_func=execute,
                         script_path=script_path,
                         ctx=_ctx,
                         process=process)
    except CloudifyAnsibleSDKError as sdk_error:
        raise NonRecoverableError(sdk_error)
    except ProcessException as process_error:
        if process_error.exit_code in UNREACHABLE_CODES:
            raise OperationRetry('One or more hosts are unreachable.')
        if process_error.exit_code not in SUCCESS_CODES:
            raise NonRecoverableError('One or more hosts failed.')
def setup_kubernetes_node_data_type():
    ctx.logger.debug('Setup kubernetes node data '
                     'type for deployment id {0}'.format(ctx.deployment.id))

    cfy_client = manager.get_rest_client()
    try:
        response = cfy_client.deployments.outputs.get(ctx.deployment.id)

    except CloudifyClientError as ex:
        ctx.logger.debug('Unable to parse outputs for deployment'
                         ' {0}'.format(ctx.deployment.id))

        raise OperationRetry('Re-try getting deployment outputs again.')

    except Exception:
        response = generate_traceback_exception()

        ctx.logger.error('Error traceback {0} with message {1}'.format(
            response['traceback'], response['message']))

        raise NonRecoverableError("Failed to get outputs")

    else:
        dep_outputs = response.get('outputs')
        ctx.logger.debug('Deployment outputs: {0}'.format(dep_outputs))
        node_data_type = dep_outputs.get('deployment-node-data-type')

        if node_data_type:
            os.environ['CFY_K8S_NODE_TYPE'] = node_data_type

        else:
            os.environ['CFY_K8S_NODE_TYPE'] =\
                'cloudify.nodes.ApplicationServer.kubernetes.Node'
def assoc(ctx, **_):
    """associate instance with ELB classic LB"""
    instance_id = \
        ctx.source.instance.runtime_properties.get(
            EXTERNAL_RESOURCE_ID)
    lb = ctx.target.instance.runtime_properties.get(EXTERNAL_RESOURCE_ID)
    iface = \
        ELBClassicLoadBalancer(ctx.target.node, lb, logger=ctx.logger)
    if ctx.operation.retry_number == 0:
        iface.register_instances({
            RESOURCE_NAME: lb,
            'Instances': [{
                'InstanceId': instance_id
            }]
        })
    if 'instances' not in ctx.target.instance.runtime_properties.keys():
        ctx.target.instance.runtime_properties['instances'] = []
    instances_list = ctx.target.instance.runtime_properties['instances']
    if instance_id not in instances_list:
        instances_list.append(instance_id)
    ctx.target.instance.runtime_properties['instances'] = instances_list
    actual_instance_ids = \
        [i['InstanceId'] for i in iface.properties['Instances']]
    if instance_id not in actual_instance_ids:
        raise OperationRetry(
            'Waiting for Instance {0} to be added to ELB {1}.'.format(
                instance_id, lb))
def detach(ctx, iface, resource_config, **_):
    '''Detach an AWS EC2 Internet Gateway from a VPC'''
    params = dict() if not resource_config else resource_config.copy()

    internet_gateway_id = params.get(INTERNETGATEWAY_ID)
    if not internet_gateway_id:
        internet_gateway_id = iface.resource_id

    params.update({INTERNETGATEWAY_ID: internet_gateway_id})

    vpc_id = params.get(VPC_ID)
    if not vpc_id:
        targ = \
            utils.find_rel_by_node_type(ctx.instance, VPC_TYPE) or \
            utils.find_rel_by_node_type(ctx.instance, VPC_TYPE_DEPRECATED)

        # Attempt to use the VPC ID from parameters.
        # Fallback to connected VPC.
        params[VPC_ID] = \
            vpc_id or \
            targ.target.instance.runtime_properties.get(EXTERNAL_RESOURCE_ID)

    try:
        iface.detach(params)
    except ClientError as e:
        raise OperationRetry(e.message)
def delete(iface, resource_config, **_):
    '''Deletes an AWS RDS Option Group'''
    try:
        iface.delete(resource_config)
    except ClientError as exc:
        if exc.response['Error']['Code'] == 'InvalidOptionGroupStateFault':
            raise OperationRetry(exc.response['Error']['Message'])
        raise exc
def _create_volume_snapshot(volume_resource, snapshot_name, snapshot_type):
    """
    This method will handle creating volume snapshot and make sure it is
    created successfully
    :param volume_resource: instance of openstack volume resource
    :param str snapshot_name: The name of the snapshot
    :param str snapshot_type: The type of the snapshot
    """

    # Prepare config for snapshot
    snapshot_config = {
        'name': snapshot_name,
        'volume_id': volume_resource.resource_id,
        'force': True,
        'description': snapshot_type
    }

    # Get an instance of snapshot volume ready to create the desired
    # snapshot volume
    snapshot = \
        _prepare_volume_snapshot_instance(volume_resource, snapshot_config)

    # Check if the snapshot id exists or not, if it exists that mean the
    # snapshot volume created but still checking its status to make sure it
    # is ready to use
    if VOLUME_SNAPSHOT_ID in ctx.instance.runtime_properties:
        snapshot.resource_id = \
            ctx.instance.runtime_properties[VOLUME_SNAPSHOT_ID]

    # Check if the snapshot volume task exists or not, if it does not exist
    # that means, this is the first time we are running this operation task,
    # otherwise it still checking the status to make sure it is finished
    if VOLUME_SNAPSHOT_TASK not in ctx.instance.runtime_properties:
        # Create snapshot
        snapshot_response = snapshot.create()
        snapshot_id = snapshot_response.id
        snapshot.resource_id = snapshot_id
        ctx.instance.runtime_properties[VOLUME_SNAPSHOT_TASK] = True
        ctx.instance.runtime_properties[VOLUME_SNAPSHOT_ID] = snapshot_id
        # save flag as current state before external call
        ctx.instance.update()

    # Check the status of the snapshot process
    snapshot_resource, ready = \
        get_ready_resource_status(snapshot,
                                  VOLUME_SNAPSHOT_OPENSTACK_TYPE,
                                  VOLUME_STATUS_AVAILABLE,
                                  VOLUME_ERROR_STATUSES)

    if not ready:
        raise OperationRetry('Volume snapshot is still in {0} status'.format(
            snapshot_resource.status))
    else:
        # Once the snapshot is ready to user, we should clear volume
        # snapshot task & snapshot volume id from runtime properties in order
        # to allow trigger the operation multiple times
        del ctx.instance.runtime_properties[VOLUME_SNAPSHOT_TASK]
        del ctx.instance.runtime_properties[VOLUME_SNAPSHOT_ID]
Beispiel #18
0
def execute(resource_config,
            file_to_source='exec',
            subprocess_args_overrides=None,
            ignore_failure=False,
            retry_on_failure=False,
            **_):
    """ Execute some file in an extracted archive. """

    resource_config = \
        resource_config or ctx.node.properties['resource_config']

    resource_dir = resource_config.get('resource_dir', '')
    resource_list = resource_config.get('resource_list', [])
    template_variables = resource_config.get('template_variables', {})

    if not isinstance(resource_dir, basestring):
        raise NonRecoverableError("'resource_dir' must be a string.")

    if not isinstance(resource_list, list):
        raise NonRecoverableError("'resource_list' must be a list.")

    if not isinstance(template_variables, dict):
        raise NonRecoverableError("'template_variables' must be a dictionary.")

    if resource_dir:
        tmp_dir = get_package_dir(resource_dir, resource_list,
                                  template_variables)
        # in case of resource_dir is zip
        cwd = os.path.join(tmp_dir, os.path.splitext(resource_dir)[0])
    else:
        cwd = get_package_dir(resource_dir, resource_list, template_variables)
    command = ['bash', '-c', 'source {0}'.format(file_to_source)]

    subprocess_args = \
        {
            'args': command,
            'stdin': subprocess.PIPE,
            'stdout': subprocess.PIPE,
            'stderr': subprocess.PIPE,
            'cwd': cwd
        }

    handle_overrides(subprocess_args_overrides, subprocess_args)

    ctx.logger.debug('Args: {0}'.format(subprocess_args))

    process = subprocess.Popen(**subprocess_args)

    out, err = process.communicate()
    ctx.logger.debug('Out: {0}'.format(out))
    ctx.logger.debug('Err: {0}'.format(err))

    if process.returncode and retry_on_failure:
        raise OperationRetry('Retrying: {0}'.format(err))

    elif process.returncode and not ignore_failure:
        raise NonRecoverableError('Failed: {0}'.format(err))
def start(ctx, iface, resource_config, **_):
    '''Starts AWS EC2 Instances'''

    if iface.status in [RUNNING] and ctx.operation.retry_number > 0:
        assign_ip_properties(ctx, iface.properties)
        if not _handle_password(iface):
            raise OperationRetry('Waiting for {0} ID# {1} password.'.format(
                iface.type_name, iface.resource_id))
        return

    elif ctx.operation.retry_number == 0:
        params = \
            dict() if not resource_config else resource_config.copy()
        iface.start(
            {INSTANCE_IDS: params.get(INSTANCE_IDS, [iface.resource_id])})

    raise OperationRetry('{0} ID# {1} is still in a pending state.'.format(
        iface.type_name, iface.resource_id))
def handle_result(result, _ctx, ignore_failures=False, ignore_dark=False):
    _ctx.logger.debug('result: {0}'.format(result))
    _get_instance(_ctx).runtime_properties['result'] = result
    failures = result.get('failures')
    dark = result.get('dark')
    if failures and not ignore_failures:
        raise NonRecoverableError(
            'These Ansible nodes failed: {0}'.format(failures))
    elif dark and not ignore_dark:
        raise OperationRetry('These Ansible nodes were dark: {0}'.format(dark))
Beispiel #21
0
def execute(ctx, iface, name=None, clientRequestToken=None, **_):
    try:
        execute_response = iface.execute(name, clientRequestToken)
        ctx.instance.runtime_properties[
            'execute_pipeline_response'] = execute_response
    except ClientError:
        error_traceback = utils.get_traceback_exception()
        raise OperationRetry('Re-try start_pipeline_execution operation.',
                             retry_after=2,
                             causes=[error_traceback])
 def is_resource_ready(self):
     if self.status in ['Running', 'Succeeded']:
         ctx.logger.debug(self.status_message)
     elif self.status in ['Pending', 'Unknown']:
         raise OperationRetry(self.status_message)
     elif self.status in ['Failed']:
         raise NonRecoverableError(self.status_message)
     else:
         ctx.logger.error('Unexpected status. Please report: {0}'.format(
             self.status))
         return False
     return True
def start_check(service_name):
    status_string = ''
    systemctl_status = execute_command(
        ['sudo', 'systemctl', 'status', '{}.service'.format(service_name)])
    if not isinstance(systemctl_status, basestring):
        raise OperationRetry(
            'check sudo systemctl status {}.service'.format(service_name))
    for line in systemctl_status.split('\n'):
        if 'Active:' in line:
            status = line.strip()
            zstatus = status.split(' ')
            ctx.logger.debug('{} status line: {}'.format(
                service_name, repr(zstatus)))
            if len(zstatus) > 1:
                status_string = zstatus[1]

    ctx.logger.info('{} status: {}'.format(service_name, repr(status_string)))
    if 'active' != status_string:
        raise OperationRetry('Wait a little more.')
    else:
        ctx.logger.info('Service {} is started.'.format(service_name))
Beispiel #24
0
def delete(**_):
    gcp_config = utils.get_gcp_config()
    name = ctx.instance.runtime_properties.get(constants.NAME)
    if name:
        if re.match(PATTERN, name):
            name = name.split('/roles/')[-1]
        role = Role(gcp_config, ctx.logger, name=name)
        role_dict = role.get()
        deleted = role_dict.get('deleted')
        if not deleted:
            role.delete()
            raise OperationRetry(DELETING_MESSAGE.format(deleted=deleted))
        def wrapper_inner(**kwargs):
            '''Inner, worker function'''
            ctx = kwargs['ctx']
            _, _, _, operation_name = ctx.operation.name.split('.')
            resource_type = kwargs.get('resource_type', 'AWS Resource')
            iface = kwargs['iface']
            # Run the operation if this is the first pass
            if ctx.operation.retry_number == 0:
                function(**kwargs)
                # issue 128 and issue 129
                # by updating iface object with actual details from the
                # AWS response assuming that actual state is available
                # at ctx.instance.runtime_properties['create_response']
                # and ctx.instance.runtime_properties[EXT_RES_ID]
                # correctly updated after creation

                # At first let's verify was a new AWS resource
                # really created
                if iface.resource_id != \
                        ctx.instance.runtime_properties.get(EXT_RES_ID):
                    # Assuming new resource was really created,
                    # so updating iface object
                    iface.resource_id = \
                        ctx.instance.runtime_properties.get(EXT_RES_ID)
                    # If sequence of install -> uninstall workflows was
                    # executed, we should remove '__deleted'
                    # flag set in the decorator wait_for_delete below
                    if '__deleted' in ctx.instance.runtime_properties:
                        del ctx.instance.runtime_properties['__deleted']

            # Get a resource interface and query for the status
            status = iface.status
            ctx.logger.debug('%s ID# "%s" reported status: %s' %
                             (resource_type, iface.resource_id, status))
            if status_pending and status in status_pending:
                raise OperationRetry(
                    '%s ID# "%s" is still in a pending state.' %
                    (resource_type, iface.resource_id))

            elif status_good and status in status_good:
                if operation_name in ['create', 'configure']:
                    ctx.instance.runtime_properties['create_response'] = \
                        utils.JsonCleanuper(iface.properties).to_dict()

            elif not status and fail_on_missing:
                raise NonRecoverableError(
                    '%s ID# "%s" no longer exists but "fail_on_missing" set' %
                    (resource_type, iface.resource_id))
            elif status_good and status not in status_good and fail_on_missing:
                raise NonRecoverableError(
                    '%s ID# "%s" reported an unexpected status: "%s"' %
                    (resource_type, iface.resource_id, status))
Beispiel #26
0
    def _decorator(self, *args, **kwargs):
        try:
            response = func(self, *args, **kwargs)
        except ServerNotFoundError as e:
            raise OperationRetry(
                'Warning: {0}. '
                'If problem persists, error may be fatal.'.format(e.message))
        if 'error' in response:
            self.logger.error('Response with error {0}'.format(
                response['error']))

            raise GCPError(response['error'])
        return response
Beispiel #27
0
def _wait_for_status(kwargs,
                     _ctx,
                     _operation,
                     function,
                     status_pending,
                     status_good,
                     fail_on_missing):
    _, _, _, operation_name = _operation.name.split('.')
    resource_type = kwargs.get('resource_type', 'AWS Resource')
    iface = kwargs['iface']
    # Run the operation if this is the first pass
    if _operation.retry_number == 0:
        function(**kwargs)
        # issue 128 and issue 129
        # by updating iface object with actual details from the
        # AWS response assuming that actual state is available
        # at ctx.instance.runtime_properties['create_response']
        # and ctx.instance.runtime_properties[EXT_RES_ID]
        # correctly updated after creation

        # At first let's verify was a new AWS resource
        # really created
        if iface.resource_id != \
                _ctx.instance.runtime_properties.get(EXT_RES_ID):
            # Assuming new resource was really created,
            # so updating iface object
            iface.resource_id = \
                _ctx.instance.runtime_properties.get(EXT_RES_ID)

    # Get a resource interface and query for the status
    status = iface.status
    ctx.logger.debug('%s ID# "%s" reported status: %s'
                     % (resource_type, iface.resource_id, status))
    if status_pending and status in status_pending:
        raise OperationRetry(
            '%s ID# "%s" is still in a pending state.'
            % (resource_type, iface.resource_id))

    elif status_good and status in status_good:
        if operation_name in ['create', 'configure']:
            _ctx.instance.runtime_properties['create_response'] = \
                utils.JsonCleanuper(iface.properties).to_dict()

    elif not status and fail_on_missing:
        raise NonRecoverableError(
            '%s ID# "%s" no longer exists but "fail_on_missing" set'
            % (resource_type, iface.resource_id))
    elif status_good and status not in status_good and fail_on_missing:
        raise NonRecoverableError(
            '%s ID# "%s" reported an unexpected status: "%s"'
            % (resource_type, iface.resource_id, status))
Beispiel #28
0
def execute(_command):

    subprocess_args = {
        'args': _command.split(),
        'stdout': subprocess.PIPE,
        'stderr': subprocess.PIPE
    }
    ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
    process = subprocess.Popen(**subprocess_args)
    output, error = process.communicate()
    if process.returncode:
        raise OperationRetry('Running `{0}` returns error `{1}`.'.format(
            command, error))
    return process
def _create_volume_backup(volume_resource, backup_name):
    """
    This method will handle creating volume backup and make sure it is
    created successfully
    :param volume_resource: instance of openstack volume resource
    :param str backup_name: The backup name
    """
    # Prepare config for backup
    # Prepare config for backup
    backup_config = {
        'name': backup_name,
        'volume_id': volume_resource.resource_id
    }

    backup = _prepare_volume_backup_instance(volume_resource, backup_config)

    # Check if the backup id exists or not, if it exists that means the
    # backup volume created but still checking its status to make sure it
    # is ready to use
    if VOLUME_BACKUP_ID in ctx.instance.runtime_properties:
        backup.resource_id = \
            ctx.instance.runtime_properties[VOLUME_BACKUP_ID]

    # Check if the backup call is called before or not, so that we can only
    # trigger it only once
    if VOLUME_BACKUP_TASK not in ctx.instance.runtime_properties:
        # Create backup
        backup_response = backup.create()
        backup_id = backup_response.id
        backup.resource_id = backup_id
        ctx.instance.runtime_properties[VOLUME_BACKUP_TASK] = True
        ctx.instance.runtime_properties[VOLUME_BACKUP_ID] = backup_id
        # save flag as current state before external call
        ctx.instance.update()

    backup_resource, ready = \
        get_ready_resource_status(backup,
                                  VOLUME_BACKUP_OPENSTACK_TYPE,
                                  VOLUME_STATUS_AVAILABLE,
                                  VOLUME_ERROR_STATUSES)

    if not ready:
        raise OperationRetry('Volume backup is still in {0} status'.format(
            backup_resource.status))
    else:
        del ctx.instance.runtime_properties[VOLUME_BACKUP_TASK]
        del ctx.instance.runtime_properties[VOLUME_BACKUP_ID]
def delete(ctx, iface, resource_config, **_):
    """Deletes an AWS EC2 ElasticIP"""

    # Create a copy of the resource config for clean manipulation.
    params = \
        dict() if not resource_config else resource_config.copy()

    allocation_id = params.get(ALLOCATION_ID)
    if not allocation_id:
        allocation_id = \
            ctx.instance.runtime_properties.get(
                'allocation_id')

    elasticip_id = params.get(ELASTICIP_ID)
    if not elasticip_id:
        elasticip_id = iface.resource_id

    if allocation_id:
        params[ALLOCATION_ID] = allocation_id
        try:
            del params[ELASTICIP_ID]
        except KeyError:
            pass
    elif elasticip_id:
        params[ELASTICIP_ID] = elasticip_id
        try:
            del params[ALLOCATION_ID]
        except KeyError:
            pass

    if ctx.node.properties.get('use_unassociated_addresses', False):
        address = ctx.instance.runtime_properties.pop('unassociated_address',
                                                      None)
        if address:
            ctx.logger.info(
                'Not deleting address {address}'.format(address=address))
            return

    try:
        iface.delete(params)
    except ClientError as e:
        if 'AuthFailure' is text_type(e):
            raise OperationRetry('Address has not released yet.')
        else:
            pass