def resource_by_id(_client, _id, _type):
    _resources_client = getattr(_client, _type)
    try:
        _resources = _resources_client.list(_include=['id'])
    except CloudifyClientError as ex:
        raise NonRecoverableError('{0} list failed {1}.'.format(
            _type, text_type(ex)))
    else:
        return [text_type(_r['id']) == _id for _r in _resources]
    def test_dep_workflow_in_state_pollster_raises(self):
        test_name = 'test_dep_workflow_in_state_pollster_raises'
        _ctx = self.get_mock_ctx(test_name)
        current_ctx.set(_ctx)

        with mock.patch('cloudify.manager.get_rest_client') as mock_client:
            cfy_mock_client = MockCloudifyRestClient()
            response = cfy_mock_client.executions.get()
            response['id'] = test_name

            def mock_return(*args, **kwargs):
                del args, kwargs
                raise CloudifyClientError('Mistake')

            cfy_mock_client.executions.get = mock_return
            mock_client.return_value = cfy_mock_client
            output = \
                self.assertRaises(
                    NonRecoverableError,
                    dep_workflow_in_state_pollster,
                    cfy_mock_client,
                    test_name,
                    'terminated',
                    0)
            self.assertIn('failed', text_type(output))
def dep_system_workflows_finished(_client, _check_all_in_deployment=False):

    _offset = int(getenv('_PAGINATION_OFFSET', 0))
    _size = int(getenv('_PAGINATION_SIZE', 1000))

    while True:

        try:
            _execs = _client.executions.list(include_system_workflows=True,
                                             _offset=_offset,
                                             _size=_size)
        except CloudifyClientError as ex:
            raise NonRecoverableError('Executions list failed {0}.'.format(
                text_type(ex)))

        for _exec in _execs:

            if _exec.get('is_system_workflow'):
                if _exec.get('status') not in ('terminated', 'completed',
                                               'failed', 'cancelled'):
                    return False

            if _check_all_in_deployment:
                if _check_all_in_deployment == _exec.get('deployment_id'):
                    if _exec.get('status') not in ('terminated', 'completed',
                                                   'failed', 'cancelled'):
                        return False

        if _execs.metadata.pagination.total <= \
                _execs.metadata.pagination.offset:
            break

        _offset = _offset + _size

    return True
Beispiel #4
0
def download_file(url, destination=None, keep_name=False):
    """Download file.

    :param url: Location of the file to download
    :type url: str
    :param destination:
        Location where the file should be saved (autogenerated by default)
    :param keep_name: use the filename from the url as destination filename
    :type destination: str | None
    :returns: Location where the file was saved
    :rtype: str

    """
    CHUNK_SIZE = 1024

    if not destination:
        if keep_name:
            path = urlparse(url).path
            name = os.path.basename(path)
            destination = os.path.join(tempfile.mkdtemp(), name)
        else:
            fd, destination = tempfile.mkstemp()
            os.close(fd)

    ctx.logger.info('Downloading {0} to {1}...'.format(url, destination))

    try:
        response = requests.get(url, stream=True)
    except requests.exceptions.RequestException as ex:
        raise NonRecoverableError('Failed to download {0}. ({1})'.format(
            url, text_type(ex)))

    final_url = response.url
    if final_url != url:
        ctx.logger.debug('Redirected to {0}'.format(final_url))

    try:
        with open(destination, 'wb') as destination_file:
            for chunk in response.iter_content(CHUNK_SIZE):
                destination_file.write(chunk)
    except IOError as ex:
        raise NonRecoverableError('Failed to download {0}. ({1})'.format(
            url, text_type(ex)))

    return destination
def dep_workflow_in_state_pollster(_client,
                                   _dep_id,
                                   _state,
                                   _workflow_id=None,
                                   _log_redirect=False,
                                   _execution_id=None):

    exec_get_fields = \
        ['status', 'workflow_id', 'created_at', 'id']

    try:
        _exec = \
            _client.executions.get(execution_id=_execution_id,
                                   _include=exec_get_fields)

        ctx.logger.debug('The exec get response form {0} is {1}'.format(
            _dep_id, _exec))

    except CloudifyClientError as ex:
        raise NonRecoverableError('Executions get failed {0}.'.format(
            text_type(ex)))

    if _log_redirect and _exec.get('id'):
        ctx.logger.debug('_exec info for _log_redirect is {0}'.format(_exec))
        dep_logs_redirect(_client, _exec.get('id'))

    if _exec.get('status') == _state:
        ctx.logger.debug('The status for _exec info id'
                         ' {0} is {1}'.format(_execution_id, _state))

        return True
    elif _exec.get('status') == 'failed':
        raise NonRecoverableError('Execution {0} failed.'.format(
            text_type(_exec)))

    return False
    def test_poll_workflow_after_execute_failed(self):
        _ctx = self.get_mock_ctx('test_poll_workflow_after_execute_failed')
        _ctx.logger.log = mock.MagicMock(return_value=None)
        current_ctx.set(_ctx)

        with mock.patch(
                'cloudify_deployment_proxy.polling.poll_with_timeout') \
                as mocked_fn:
            mocked_fn.return_value = False
            output = \
                self.assertRaises(
                    NonRecoverableError,
                    poll_workflow_after_execute,
                    None, None, None, None, None, None, None)
            self.assertIn('Execution timeout', text_type(output))
    def test_execute_start_rest_client_error(self):
        # Tests that execute start fails on rest client error

        test_name = 'test_execute_start_rest_client_error'
        _ctx = self.get_mock_ctx(test_name)
        current_ctx.set(_ctx)
        _ctx.instance.runtime_properties['deployment'] = {}

        with mock.patch('cloudify.manager.get_rest_client') as mock_client:
            cfy_mock_client = MockCloudifyRestClient()
            cfy_mock_client.executions.start = REST_CLIENT_EXCEPTION
            mock_client.return_value = cfy_mock_client
            error = self.assertRaises(NonRecoverableError,
                                      execute_start,
                                      deployment_id=test_name,
                                      workflow_id='install')
            self.assertIn('action start failed', text_type(error))
        del _ctx, mock_client
    def dp_get_client_response(self,
                               _client,
                               _client_attr,
                               _client_args):

        _generic_client = \
            getattr(self.client, _client)

        _special_client = \
            getattr(_generic_client, _client_attr)

        try:
            response = _special_client(**_client_args)
        except CloudifyClientError as ex:
            raise NonRecoverableError(
                'Client action {0} failed: {1}.'.format(_client_attr,
                                                        text_type(ex)))
        else:
            return response
    def test_execute_start_timeout(self):
        # Tests that execute start fails on timeout

        test_name = 'test_execute_start_timeout'
        _ctx = self.get_mock_ctx(test_name)
        current_ctx.set(_ctx)
        _ctx.instance.runtime_properties['deployment'] = {}

        with mock.patch('cloudify.manager.get_rest_client') as mock_client:
            mock_client.return_value = MockCloudifyRestClient()
            poll_with_timeout_test = \
                'cloudify_deployment_proxy.polling.poll_with_timeout'
            with mock.patch(poll_with_timeout_test) as poll:
                poll.return_value = False
                error = self.assertRaises(NonRecoverableError,
                                          execute_start,
                                          deployment_id=test_name,
                                          workflow_id='install',
                                          timeout=.001)
                self.assertIn('Execution timeout', text_type(error))
        del _ctx, mock_client
    def test_resource_by_id_client_error(self):
        test_name = 'test_resource_by_id_client_error'
        _ctx = self.get_mock_ctx(test_name)
        current_ctx.set(_ctx)

        def mock_return(*args, **kwargs):
            del args, kwargs
            raise CloudifyClientError('Mistake')

        with mock.patch('cloudify.manager.get_rest_client') as mock_client:
            cfy_mock_client = MockCloudifyRestClient()
            cfy_mock_client.deployments.list = mock_return
            mock_client.return_value = mock_return
            output = \
                self.assertRaises(
                    NonRecoverableError,
                    resource_by_id,
                    cfy_mock_client,
                    test_name,
                    'deployments')
            self.assertIn('failed', text_type(output))
    def test_dep_system_workflows_finished_raises(self):
        test_name = 'test_dep_system_workflows_finished_raises'
        _ctx = self.get_mock_ctx(test_name)
        current_ctx.set(_ctx)

        with mock.patch('cloudify.manager.get_rest_client') as mock_client:
            cfy_mock_client = MockCloudifyRestClient()
            list_response = cfy_mock_client.blueprints.list()
            list_response[0]['id'] = test_name

            def mock_return(*args, **kwargs):
                del args, kwargs
                raise CloudifyClientError('Mistake')

            cfy_mock_client.executions.list = mock_return
            mock_client.return_value = cfy_mock_client
            output = \
                self.assertRaises(
                    NonRecoverableError,
                    dep_system_workflows_finished,
                    cfy_mock_client)
            self.assertIn('failed', text_type(output))
def dep_logs_redirect(_client, execution_id):
    COUNT_EVENTS = "received_events"

    if not ctx.instance.runtime_properties.get(COUNT_EVENTS):
        ctx.instance.runtime_properties[COUNT_EVENTS] = {}

    last_event = int(ctx.instance.runtime_properties[COUNT_EVENTS].get(
        execution_id, 0))

    full_count = last_event + 100

    while full_count > last_event:
        events, full_count = _client.events.get(execution_id, last_event, 250,
                                                True)
        for event in events:
            ctx.logger.debug('Event {0} for execution_id {1}'.format(
                event, execution_id))
            instance_prompt = event.get('node_instance_id', "")
            if instance_prompt:
                if event.get('operation'):
                    instance_prompt += ("." +
                                        event.get('operation').split('.')[-1])

            if instance_prompt:
                instance_prompt = "[" + instance_prompt + "] "

            message = "%s %s%s" % (event.get('reported_timestamp', ""),
                                   instance_prompt if instance_prompt else "",
                                   event.get('message', ""))
            message = text_type(message)

            ctx.logger.debug(
                'Message {0} for Event {1} for execution_id {1}'.format(
                    message, event))

            level = event.get('level', logging.INFO)

            # If the event dict had a 'level' key, then the value is
            # a string. In that case, convert it to uppercase and get
            # the matching Python logging constant.
            if isinstance(level, text_type):
                level = logging.getLevelName(level.upper())

            # In the (very) odd case that the level is still not an int
            # (can happen if the original level value wasn't recognized
            # by Python's logging library), then use 'INFO'.
            if not isinstance(level, int):
                level = logging.INFO

            ctx.logger.log(level, message)

        last_event += len(events)
        # returned infinite count
        if full_count < 0:
            full_count = last_event + 100
        # returned nothing, let's do it next time
        if len(events) == 0:
            ctx.logger.log(
                20, "Waiting for log messages "
                "(execution: {0})...".format(execution_id))
            break

    ctx.instance.runtime_properties[COUNT_EVENTS][execution_id] = last_event
Beispiel #13
0
    def test_upload_plugins(self):
        # Tests that deployments upload plugins

        test_name = 'test_delete_deployment_success'
        _ctx = self.get_mock_ctx(test_name)
        current_ctx.set(_ctx)

        get_local_path = mock.Mock(return_value="some_path")

        with mock.patch('cloudify.manager.get_rest_client') as mock_client:
            plugin = mock.Mock()
            plugin.id = "CustomPlugin"

            cfy_mock_client = MockCloudifyRestClient()
            cfy_mock_client.plugins.upload = mock.Mock(return_value=plugin)
            mock_client.return_value = cfy_mock_client
            with mock.patch('cloudify_deployment_proxy.get_local_path',
                            get_local_path):
                zip_files = mock.Mock(return_value="_zip")
                with mock.patch('cloudify_deployment_proxy.zip_files',
                                zip_files):
                    # empty plugins
                    deployment = DeploymentProxyBase({'plugins': []})
                    deployment._upload_plugins()
                    zip_files.assert_not_called()
                    get_local_path.assert_not_called()

                    # dist of plugins
                    deployment = DeploymentProxyBase({
                        'plugins': {
                            'base_plugin': {
                                'wagon_path': '_wagon_path',
                                'plugin_yaml_path': '_plugin_yaml_path'
                            }
                        }
                    })
                    os_mock = mock.Mock()
                    with mock.patch('cloudify_deployment_proxy.os', os_mock):
                        deployment._upload_plugins()
                    zip_files.assert_called_with(["some_path", "some_path"])
                    get_local_path.assert_has_calls([
                        mock.call('_wagon_path', create_temp=True),
                        mock.call('_plugin_yaml_path', create_temp=True)
                    ])
                    os_mock.remove.assert_has_calls([
                        mock.call('some_path'),
                        mock.call('some_path'),
                        mock.call('_zip')
                    ])

            get_local_path = mock.Mock(return_value="some_path")
            # zip_files = mock.Mock(return_value="_zip")
            with mock.patch('cloudify_deployment_proxy.get_local_path',
                            get_local_path):
                zip_files = mock.Mock(return_value="_zip")
                with mock.patch('cloudify_deployment_proxy.zip_files',
                                zip_files):
                    # list of plugins
                    deployment = DeploymentProxyBase({
                        'plugins': [{
                            'wagon_path': '_wagon_path',
                            'plugin_yaml_path': '_plugin_yaml_path'
                        }]
                    })
                    os_mock = mock.Mock()
                    with mock.patch('cloudify_deployment_proxy.os', os_mock):
                        deployment._upload_plugins()
                    zip_files.assert_called_with(["some_path", "some_path"])
                    get_local_path.assert_has_calls([
                        mock.call('_wagon_path', create_temp=True),
                        mock.call('_plugin_yaml_path', create_temp=True)
                    ])
                    os_mock.remove.assert_has_calls([
                        mock.call('some_path'),
                        mock.call('some_path'),
                        mock.call('_zip')
                    ])

            # raise error if wrong plugins list
            deployment = DeploymentProxyBase({'plugins': True})
            error = self.assertRaises(NonRecoverableError,
                                      deployment._upload_plugins)
            self.assertIn('Wrong type in plugins: True', str(error))

            # raise error if wrong wagon/yaml values
            deployment = DeploymentProxyBase(
                {'plugins': [{
                    'wagon_path': '',
                    'plugin_yaml_path': ''
                }]})
            error = self.assertRaises(NonRecoverableError,
                                      deployment._upload_plugins)
            self.assertIn(
                "You should provide both values wagon_path: '' "
                "and plugin_yaml_path: ''", text_type(error))
    def __init__(self, operation_inputs):
        """
        Sets the properties that all operations need.
        :param operation_inputs: The inputs from the operation.
        """

        full_operation_name = ctx.operation.name
        self.operation_name = full_operation_name.split('.').pop()

        # These should not make their way into the Operation inputs.
        os.environ['_PAGINATION_OFFSET'] = \
            text_type(operation_inputs.pop('pagination_offset', 0))
        os.environ['_PAGINATION_SIZE'] = \
            text_type(operation_inputs.pop('pagination_size', 1000))

        # cloudify client
        self.client_config = get_desired_value(
            'client', operation_inputs,
            ctx.instance.runtime_properties,
            ctx.node.properties
        )

        if self.client_config:
            self.client = CloudifyClient(**self.client_config)
        else:
            self.client = manager.get_rest_client()

        # plugins
        self.plugins = get_desired_value(
            'plugins', operation_inputs,
            ctx.instance.runtime_properties,
            ctx.node.properties
        )

        # secrets
        self.secrets = get_desired_value(
            'secrets', operation_inputs,
            ctx.instance.runtime_properties,
            ctx.node.properties
        )

        # resource_config
        self.config = get_desired_value(
            'resource_config', operation_inputs,
            ctx.instance.runtime_properties,
            ctx.node.properties)

        # Blueprint-related properties
        self.blueprint = self.config.get('blueprint', {})
        self.blueprint_id = self.blueprint.get('id') or ctx.instance.id
        self.blueprint_file_name = self.blueprint.get('main_file_name')
        self.blueprint_archive = self.blueprint.get('blueprint_archive')

        # Deployment-related properties
        self.deployment = self.config.get('deployment', {})
        self.deployment_id = self.deployment.get('id') or ctx.instance.id
        self.deployment_inputs = self.deployment.get('inputs', {})
        self.deployment_outputs = self.deployment.get('outputs')
        self.deployment_all_outputs = self.deployment.get('all_outputs', True)
        self.deployment_logs = self.deployment.get('logs', {})

        # Node-instance-related properties
        self.node_instance_proxy = self.config.get('node_instance')

        # Execution-related properties
        self.workflow_id = \
            operation_inputs.get('workflow_id',
                                 'create_deployment_environment')
        self.workflow_state = \
            operation_inputs.get(
                'workflow_state',
                'terminated')
        self.reexecute = \
            self.config.get('reexecute') \
            or ctx.instance.runtime_properties.get('reexecute') \
            or False

        # Polling-related properties
        self.interval = operation_inputs.get('interval', POLLING_INTERVAL)
        self.state = operation_inputs.get('state', 'terminated')
        self.timeout = operation_inputs.get('timeout', EXECUTIONS_TIMEOUT)

        # This ``execution_id`` will be set once execute workflow done
        # successfully
        self.execution_id = None