Esempio n. 1
0
def update(blueprint_id, include_logs, json_output, logger, client,
           tenant_name, force):
    """Update the plugins of all the deployments of the given blueprint. This
    will update the deployments one by one until all succeeded.

    `BLUEPRINT_ID` the blueprint's ID to perform the plugins update with.
    """
    utils.explicit_tenant_name_message(tenant_name, logger)
    logger.info('Updating the plugins of the deployments of the blueprint '
                '{}'.format(blueprint_id))
    plugins_update = client.plugins_update.update_plugins(blueprint_id, force)
    events_logger = get_events_logger(json_output)
    execution = execution_events_fetcher.wait_for_execution(
        client,
        client.executions.get(plugins_update.execution_id),
        events_handler=events_logger,
        include_logs=include_logs,
        timeout=None  # don't timeout ever
    )

    if execution.error:
        logger.info("Execution of workflow '{0}' for blueprint "
                    "'{1}' failed. [error={2}]".format(execution.workflow_id,
                                                       blueprint_id,
                                                       execution.error))
        logger.info('Failed updating plugins for blueprint {0}. '
                    'Plugins update ID: {1}. Execution id: {2}'.format(
                        blueprint_id, plugins_update.id, execution.id))
        raise SuppressedCloudifyCliError()
    logger.info("Finished executing workflow '{0}'".format(
        execution.workflow_id))
    logger.info('Successfully updated plugins for blueprint {0}. '
                'Plugins update ID: {1}. Execution id: {2}'.format(
                    blueprint_id, plugins_update.id, execution.id))
Esempio n. 2
0
def ls(execution_id, include_logs):
    logger = get_logger()
    management_ip = utils.get_management_server_ip()
    logger.info("Getting events from management server {0} for "
                "execution id '{1}' "
                "[include_logs={2}]".format(management_ip,
                                            execution_id,
                                            include_logs))
    client = utils.get_rest_client(management_ip)
    try:

        execution_events = ExecutionEventsFetcher(
            client,
            execution_id,
            include_logs=include_logs)
        events = execution_events.fetch_all()
        events_logger = get_events_logger()
        events_logger(events)
        logger.info('\nTotal events: {0}'.format(len(events)))
    except CloudifyClientError, e:
        if e.status_code != 404:
            raise
        msg = ("Execution '{0}' not found on management server"
               .format(execution_id))
        raise CloudifyCliError(msg)
Esempio n. 3
0
 def test_json_events_logger(self):
     events_logger = logger.get_events_logger(json_output=True)
     events = [{'key': 'value1'}, {'key': 'value2'}]
     with utils.mock_stdout() as output:
         events_logger(events)
     self.assertEqual('{0}\n{1}\n'.format(json.dumps(events[0]),
                                          json.dumps(events[1])),
                      output.getvalue())
Esempio n. 4
0
    def test_text_events_logger(self):
        events_logger = logger.get_events_logger(json_output=False)
        events = [{'key': 'output'}, {'key': 'hide'}]

        def mock_create_message(event):
            return None if event['key'] == 'hide' else event['key']

        with utils.mock_logger('cloudify_cli.logger._lgr') as output:
            with patch('cloudify.logs.create_event_message_prefix',
                       mock_create_message):
                events_logger(events)
        self.assertEqual(events[0]['key'], output.getvalue())
Esempio n. 5
0
def wait_for_and_validate_execution(client, execution):
    execution = wait_for_execution(client,
                                   execution,
                                   get_events_logger(False),
                                   True,
                                   timeout=None,
                                   logger=logger)
    if execution.status != Execution.TERMINATED:
        raise Exception(
            "Unexpected status of execution {}: {} (expected: {})".format(
                execution.id, execution.status, Execution.TERMINATED))
    return execution
def main(args):
    workflow_id = args[1]
    configure_loggers()
    logger = get_logger()
    manager_ip = utils.get_management_server_ip()
    client = utils.get_rest_client(manager_ip)
    deployments = client.deployments.list()
    results = map(lambda d: deployment_failed_tasks(client, workflow_id, d),
                  deployments)
    failure_detected = False
    for res in results:
        if res.get('type') == RESULT_TASKS:
            tasks = res.get('failed_tasks')
            exc = res.get('execution')
            if tasks:
                failure_detected = True
                msg = FAILURE_MSG_FORMAT.format(exc.deployment_id,
                                                workflow_id,
                                                exc.id)
                logger.info(msg)
                get_events_logger()(tasks)
                logger.info('Total tasks failed: {0}\n'.format(len(tasks)))
            else:
                msg = OK_MSG_FORMAT.format(exc.deployment_id,
                                           workflow_id,
                                           exc.id)
                logger.info(msg)
        elif res.get('type') == RESULT_NOT_INSTALLED:
            deployment = res.get('deployment')
            logger.info(NOT_INSTALLED_MSG_FORMAT.format(deployment.id))
        else:
            deployment = res.get('deployment')
            failure_detected = True
            logger.info(NO_EXECUTION_MSG_FORMAT.format(deployment.id,
                                                       workflow_id))
    if failure_detected:
        logger.info('Failure detected.')
    return int(failure_detected)
Esempio n. 7
0
def main(args):
    workflow_id = args[1]
    configure_loggers()
    logger = get_logger()
    manager_ip = utils.get_management_server_ip()
    client = utils.get_rest_client(manager_ip)
    deployments = client.deployments.list()
    results = map(lambda d: deployment_failed_tasks(client, workflow_id, d),
                  deployments)
    failure_detected = False
    for res in results:
        if res.get('type') == RESULT_TASKS:
            tasks = res.get('failed_tasks')
            exc = res.get('execution')
            if tasks:
                failure_detected = True
                msg = FAILURE_MSG_FORMAT.format(exc.deployment_id, workflow_id,
                                                exc.id)
                logger.info(msg)
                get_events_logger()(tasks)
                logger.info('Total tasks failed: {0}\n'.format(len(tasks)))
            else:
                msg = OK_MSG_FORMAT.format(exc.deployment_id, workflow_id,
                                           exc.id)
                logger.info(msg)
        elif res.get('type') == RESULT_NOT_INSTALLED:
            deployment = res.get('deployment')
            logger.info(NOT_INSTALLED_MSG_FORMAT.format(deployment.id))
        else:
            deployment = res.get('deployment')
            failure_detected = True
            logger.info(
                NO_EXECUTION_MSG_FORMAT.format(deployment.id, workflow_id))
    if failure_detected:
        logger.info('Failure detected.')
    return int(failure_detected)
Esempio n. 8
0
def ls(execution_id, include_logs, tail):
    logger = get_logger()
    management_ip = utils.get_management_server_ip()
    logger.info("Getting events from management server {0} for "
                "execution id '{1}' "
                "[include_logs={2}]".format(management_ip,
                                            execution_id,
                                            include_logs))
    client = utils.get_rest_client(management_ip)
    try:
        execution_events = ExecutionEventsFetcher(
            client,
            execution_id,
            include_logs=include_logs)

        events_logger = get_events_logger()

        if tail:
            execution = wait_for_execution(client,
                                           client.executions.get(execution_id),
                                           events_handler=events_logger,
                                           include_logs=include_logs,
                                           timeout=None)   # don't timeout ever
            if execution.error:
                logger.info("Execution of workflow '{0}' for deployment "
                            "'{1}' failed. [error={2}]"
                            .format(execution.workflow_id,
                                    execution.deployment_id,
                                    execution.error))
                raise SuppressedCloudifyCliError()
            else:
                logger.info("Finished executing workflow '{0}' on deployment "
                            "'{1}'".format(execution.workflow_id,
                                           execution.deployment_id))
        else:
            # don't tail, get only the events created until now and return
            events = execution_events.fetch_and_process_events(
                events_handler=events_logger)
            logger.info('\nTotal events: {0}'.format(events))
    except CloudifyClientError, e:
        if e.status_code != 404:
            raise
        msg = ("Execution '{0}' not found on management server"
               .format(execution_id))
        raise CloudifyCliError(msg)
Esempio n. 9
0
def ls(execution_id, include_logs, tail, json):
    logger = get_logger()
    management_ip = utils.get_management_server_ip()
    logger.info('Listing events for execution id {0} '
                '[include_logs={1}]'.format(execution_id, include_logs))
    client = utils.get_rest_client(management_ip)
    try:
        execution_events = ExecutionEventsFetcher(
            client,
            execution_id,
            include_logs=include_logs)

        events_logger = get_events_logger(json)

        if tail:
            execution = wait_for_execution(client,
                                           client.executions.get(execution_id),
                                           events_handler=events_logger,
                                           include_logs=include_logs,
                                           timeout=None)   # don't timeout ever
            if execution.error:
                logger.info('Execution of workflow {0} for deployment '
                            '{1} failed. [error={2}]'.format(
                                execution.workflow_id,
                                execution.deployment_id,
                                execution.error))
                raise SuppressedCloudifyCliError()
            else:
                logger.info('Finished executing workflow {0} on deployment '
                            '{1}'.format(
                                execution.workflow_id,
                                execution.deployment_id))
        else:
            # don't tail, get only the events created until now and return
            events = execution_events.fetch_and_process_events(
                events_handler=events_logger)
            logger.info('\nTotal events: {0}'.format(events))
    except CloudifyClientError as e:
        if e.status_code != 404:
            raise
        raise CloudifyCliError('Execution {0} not found'.format(execution_id))
Esempio n. 10
0
def start(workflow_id, deployment_id, timeout, force, allow_custom_parameters,
          include_logs, parameters):
    logger = get_logger()
    parameters = utils.inputs_to_dict(parameters, 'parameters')
    management_ip = utils.get_management_server_ip()
    logger.info("Executing workflow '{0}' on deployment '{1}' at"
                " management server {2} [timeout={3} seconds]".format(
                    workflow_id, deployment_id, management_ip, timeout))

    events_logger = get_events_logger()

    events_message = "* Run 'cfy events list --include-logs " \
                     "--execution-id {0}' to retrieve the " \
                     "execution's events/logs"
    try:
        client = utils.get_rest_client(management_ip)
        try:
            execution = client.executions.start(
                deployment_id,
                workflow_id,
                parameters=parameters,
                allow_custom_parameters=allow_custom_parameters,
                force=force)
        except (exceptions.DeploymentEnvironmentCreationInProgressError,
                exceptions.DeploymentEnvironmentCreationPendingError) as e:
            # wait for deployment environment creation workflow
            if isinstance(
                    e, exceptions.DeploymentEnvironmentCreationPendingError):
                status = 'pending'
            else:
                status = 'in progress'

            logger.info(
                'Deployment environment creation is {0}!'.format(status))
            logger.info('Waiting for create_deployment_environment '
                        'workflow execution to finish...')
            now = time.time()
            wait_for_execution(client,
                               _get_deployment_environment_creation_execution(
                                   client, deployment_id),
                               events_handler=events_logger,
                               include_logs=include_logs,
                               timeout=timeout)
            remaining_timeout = time.time() - now
            timeout -= remaining_timeout
            # try to execute user specified workflow
            execution = client.executions.start(
                deployment_id,
                workflow_id,
                parameters=parameters,
                allow_custom_parameters=allow_custom_parameters,
                force=force)

        execution = wait_for_execution(client,
                                       execution,
                                       events_handler=events_logger,
                                       include_logs=include_logs,
                                       timeout=timeout)
        if execution.error:
            logger.info("Execution of workflow '{0}' for deployment "
                        "'{1}' failed. [error={2}]".format(
                            workflow_id, deployment_id, execution.error))
            logger.info(events_message.format(execution.id))
            raise SuppressedCloudifyCliError()
        else:
            logger.info("Finished executing workflow '{0}' on deployment"
                        " '{1}'".format(workflow_id, deployment_id))
            logger.info(events_message.format(execution.id))
    except ExecutionTimeoutError, e:
        logger.info("Execution of workflow '{0}' "
                    "for deployment '{1}' timed out. "
                    "* Run 'cfy executions cancel "
                    "--execution-id {2}' to cancel"
                    " the running workflow.".format(workflow_id, deployment_id,
                                                    e.execution_id))
        events_tail_message = "* Run 'cfy events list --tail --include-logs " \
                              "--execution-id {0}' to retrieve the " \
                              "execution's events/logs"
        logger.info(events_tail_message.format(e.execution_id))
        raise SuppressedCloudifyCliError()
Esempio n. 11
0
def start(workflow_id, deployment_id, timeout, force,
          allow_custom_parameters, include_logs, parameters):
    logger = get_logger()
    parameters = json_to_dict(parameters, 'parameters')
    management_ip = utils.get_management_server_ip()
    logger.info("Executing workflow '{0}' on deployment '{1}' at"
                " management server {2} [timeout={3} seconds]"
                .format(workflow_id,
                        deployment_id,
                        management_ip,
                        timeout))

    events_logger = get_events_logger()

    events_message = "* Run 'cfy events list --include-logs " \
                     "--execution-id {0}' for retrieving the " \
                     "execution's events/logs"
    try:
        client = utils.get_rest_client(management_ip)
        try:
            execution = client.executions.start(
                deployment_id,
                workflow_id,
                parameters=parameters,
                allow_custom_parameters=allow_custom_parameters,
                force=force)
        except exceptions.DeploymentEnvironmentCreationInProgressError:
            # wait for deployment environment creation workflow to end
            logger.info('Deployment environment creation is in progress!')
            logger.info('Waiting for create_deployment_environment '
                        'workflow execution to finish...')
            now = time.time()
            wait_for_execution(client,
                               deployment_id,
                               _get_deployment_environment_creation_execution(
                                   client, deployment_id),
                               events_handler=events_logger,
                               include_logs=include_logs,
                               timeout=timeout)
            remaining_timeout = time.time() - now
            timeout -= remaining_timeout
            # try to execute user specified workflow
            execution = client.executions.start(
                deployment_id,
                workflow_id,
                parameters=parameters,
                allow_custom_parameters=allow_custom_parameters,
                force=force)

        execution = wait_for_execution(client,
                                       deployment_id,
                                       execution,
                                       events_handler=events_logger,
                                       include_logs=include_logs,
                                       timeout=timeout)
        if execution.error:
            logger.info("Execution of workflow '{0}' for deployment "
                        "'{1}' failed. [error={2}]"
                        .format(workflow_id,
                                deployment_id,
                                execution.error))
            logger.info(events_message.format(execution.id))
            raise SuppressedCloudifyCliError()
        else:
            logger.info("Finished executing workflow '{0}' on deployment"
                        "'{1}'".format(workflow_id, deployment_id))
            logger.info(events_message.format(execution.id))
    except ExecutionTimeoutError, e:
        logger.info("Execution of workflow '{0}' "
                    "for deployment '{1}' timed out. "
                    "* Run 'cfy executions cancel "
                    "--execution-id {2}' to cancel"
                    " the running workflow."
                    .format(workflow_id, deployment_id, e.execution_id))
        logger.info(events_message.format(e.execution_id))
        raise SuppressedCloudifyCliError()
Esempio n. 12
0
def update(deployment_id,
           blueprint_path,
           inputs,
           blueprint_filename,
           archive_location,
           skip_install,
           skip_uninstall,
           workflow_id,
           force,
           include_logs,
           json):
    logger = get_logger()
    rest_host = utils.get_rest_host()
    client = utils.get_rest_client(rest_host)

    processed_inputs = utils.inputs_to_dict(inputs, 'inputs')

    blueprint_or_archive_path = blueprint_path.name \
        if blueprint_path else archive_location
    logger.info('Updating deployment {dep_id} using blueprint {path}'.format(
        dep_id=deployment_id, path=blueprint_or_archive_path))

    deployment_update = client.deployment_updates.update(
        deployment_id,
        blueprint_or_archive_path,
        application_file_name=blueprint_filename,
        inputs=processed_inputs,
        workflow_id=workflow_id,
        skip_install=skip_install,
        skip_uninstall=skip_uninstall,
        force=force)

    events_logger = get_events_logger(json)

    execution = wait_for_execution(
        client,
        client.executions.get(deployment_update.execution_id),
        events_handler=events_logger,
        include_logs=include_logs,
        timeout=None)  # don't timeout ever
    if execution.error:
        logger.info("Execution of workflow '{0}' for deployment "
                    "'{1}' failed. [error={2}]"
                    .format(execution.workflow_id,
                            execution.deployment_id,
                            execution.error))
        logger.info('Failed updating deployment {dep_id}. Deployment update '
                    'id: {depup_id}. Execution id: {exec_id}'
                    .format(depup_id=deployment_update.id,
                            dep_id=deployment_id,
                            exec_id=execution.id))
        raise SuppressedCloudifyCliError()
    else:
        logger.info("Finished executing workflow '{0}' on deployment "
                    "'{1}'".format(execution.workflow_id,
                                   execution.deployment_id))
        logger.info('Successfully updated deployment {dep_id}. '
                    'Deployment update id: {depup_id}. Execution id: {exec_id}'
                    .format(depup_id=deployment_update.id,
                            dep_id=deployment_id,
                            exec_id=execution.id))
Esempio n. 13
0
def start(workflow_id, deployment_id, timeout, force,
          allow_custom_parameters, include_logs, parameters, json):
    logger = get_logger()
    parameters = utils.inputs_to_dict(parameters, 'parameters')
    rest_host = utils.get_rest_host()
    logger.info("Executing workflow '{0}' on deployment '{1}' at"
                " management server {2} [timeout={3} seconds]"
                .format(workflow_id,
                        deployment_id,
                        rest_host,
                        timeout))

    events_logger = get_events_logger(json)

    events_message = "* Run 'cfy events list --include-logs " \
                     "--execution-id {0}' to retrieve the " \
                     "execution's events/logs"
    original_timeout = timeout

    try:
        client = utils.get_rest_client(rest_host)
        try:
            execution = client.executions.start(
                deployment_id,
                workflow_id,
                parameters=parameters,
                allow_custom_parameters=allow_custom_parameters,
                force=force)
        except (exceptions.DeploymentEnvironmentCreationInProgressError,
                exceptions.DeploymentEnvironmentCreationPendingError) as e:
            # wait for deployment environment creation workflow
            if isinstance(
                    e,
                    exceptions.DeploymentEnvironmentCreationPendingError):
                status = 'pending'
            else:
                status = 'in progress'

            logger.info('Deployment environment creation is {0}...'.format(
                status))
            logger.debug('Waiting for create_deployment_environment '
                         'workflow execution to finish...')
            now = time.time()
            wait_for_execution(client,
                               _get_deployment_environment_creation_execution(
                                   client, deployment_id),
                               events_handler=events_logger,
                               include_logs=include_logs,
                               timeout=timeout)
            remaining_timeout = time.time() - now
            timeout -= remaining_timeout
            # try to execute user specified workflow
            execution = client.executions.start(
                deployment_id,
                workflow_id,
                parameters=parameters,
                allow_custom_parameters=allow_custom_parameters,
                force=force)

        execution = wait_for_execution(client,
                                       execution,
                                       events_handler=events_logger,
                                       include_logs=include_logs,
                                       timeout=timeout)
        if execution.error:
            logger.info('Execution of workflow {0} for deployment '
                        '{1} failed. [error={2}]'.format(
                            workflow_id,
                            deployment_id,
                            execution.error))
            logger.info(events_message.format(execution.id))
            raise SuppressedCloudifyCliError()
        else:
            logger.info('Finished executing workflow {0} on deployment '
                        '{1}'.format(workflow_id, deployment_id))
            logger.info(events_message.format(execution.id))
    except ExecutionTimeoutError as e:
        logger.info(
            "Timed out waiting for workflow '{0}' of deployment '{1}' to "
            "end. The execution may still be running properly; however, "
            "the command-line utility was instructed to wait up to {3} "
            "seconds for its completion.\n\n"
            "* Run 'cfy executions list' to determine the execution's "
            "status.\n"
            "* Run 'cfy executions cancel --execution-id {2}' to cancel"
            " the running workflow.".format(
                workflow_id, deployment_id, e.execution_id, original_timeout))

        events_tail_message = "* Run 'cfy events list --tail --include-logs " \
                              "--execution-id {0}' to retrieve the " \
                              "execution's events/logs"
        logger.info(events_tail_message.format(e.execution_id))
        raise SuppressedCloudifyCliError()