def test_wait_for_log_after_execution_finishes(self): """wait_for_execution continues polling logs, after execution status is terminated """ # prepare mock executions.get() calls - first return a status='started' # then continue returning status='terminated' executions = chain( [MagicMock(status=Execution.STARTED)], repeat(MagicMock(status=Execution.TERMINATED)) ) # prepare mock events.get() calls - first return empty events 100 times # and only then return a 'workflow_succeeded' event events = chain( repeat(([], 0), 100), [([{'event_type': 'workflow_succeeded'}], 1)], repeat(([], 0)) ) self.client.executions.get = MagicMock(side_effect=executions) self.client.events.get = MagicMock(side_effect=events) mock_execution = MagicMock(status=Execution.STARTED) wait_for_execution(self.client, mock_execution, timeout=None) calls_count = len(self.client.events.get.mock_calls) self.assertEqual(calls_count, 101, """wait_for_execution didnt keep polling events after execution terminated (expected 101 calls, got %d)""" % calls_count)
def test_wait_for_execution_after_log_succeeded(self): """wait_for_execution continues polling the execution status, even after it received a "workflow succeeded" log """ # prepare mock executions.get() calls - return a status='started' # execution the first 100 times, and then return a 'terminated' one executions = chain( [MagicMock(status=Execution.STARTED)] * 100, repeat(MagicMock(status=Execution.TERMINATED)) ) # prepare mock events.get() calls - return a 'workflow_succeeded' # immediately, and there's no events after that events = chain( [([{'event_type': 'workflow_succeeded'}], 1)], repeat(([], 0)) ) self.client.executions.get = MagicMock(side_effect=executions) self.client.events.get = MagicMock(side_effect=events) mock_execution = MagicMock(status=Execution.STARTED) wait_for_execution(self.client, mock_execution, timeout=None) calls_count = len(self.client.executions.get.mock_calls) self.assertEqual(calls_count, 101, """wait_for_execution didnt keep polling the execution status after it received a workflow_succeeded event (expected 101 calls, got %d)""" % calls_count)
def worker(dep_id): try: execution = client.executions.start(dep_id, workflow_id) execution = wait_for_execution( client, execution, events_handler=threadsafe_events_logger, include_logs=include_logs, timeout=900 ) if execution.error: log_to_summary( "Execution of workflow '{0}' for " "deployment '{1}' failed. [error={2}]".format(workflow_id, dep_id, execution.error) ) else: threadsafe_log( "Finished executing workflow " "'{0}' on deployment" " '{1}'".format(workflow_id, dep_id) ) except ExecutionTimeoutError as e: log_to_summary( "Execution of workflow '{0}' " "for deployment '{1}' timed out. " "* Run 'cfy executions cancel " "--execution-id {2}' to cancel" " the running workflow.".format(workflow_id, deployment_id, e.execution_id) )
def update(blueprint_id, include_logs, json_output, logger, client, tenant_name, force): """Update the plugins of all the deployments of the given blueprint. This will update the deployments one by one until all succeeded. `BLUEPRINT_ID` the blueprint's ID to perform the plugins update with. """ utils.explicit_tenant_name_message(tenant_name, logger) logger.info('Updating the plugins of the deployments of the blueprint ' '{}'.format(blueprint_id)) plugins_update = client.plugins_update.update_plugins(blueprint_id, force) events_logger = get_events_logger(json_output) execution = execution_events_fetcher.wait_for_execution( client, client.executions.get(plugins_update.execution_id), events_handler=events_logger, include_logs=include_logs, timeout=None # don't timeout ever ) if execution.error: logger.info("Execution of workflow '{0}' for blueprint " "'{1}' failed. [error={2}]".format(execution.workflow_id, blueprint_id, execution.error)) logger.info('Failed updating plugins for blueprint {0}. ' 'Plugins update ID: {1}. Execution id: {2}'.format( blueprint_id, plugins_update.id, execution.id)) raise SuppressedCloudifyCliError() logger.info("Finished executing workflow '{0}'".format( execution.workflow_id)) logger.info('Successfully updated plugins for blueprint {0}. ' 'Plugins update ID: {1}. Execution id: {2}'.format( blueprint_id, plugins_update.id, execution.id))
def wait_for_and_validate_execution(client, execution): execution = wait_for_execution(client, execution, get_events_logger(False), True, timeout=None, logger=logger) if execution.status != Execution.TERMINATED: raise Exception( "Unexpected status of execution {}: {} (expected: {})".format( execution.id, execution.status, Execution.TERMINATED)) return execution
def worker(dep_id): timeout = 900 try: kwargs = {} if install_script is not None: kwargs = { 'parameters': { 'install_script': install_script }, 'allow_custom_parameters': True } execution = client.executions.start( dep_id, workflow_id, **kwargs ) execution = wait_for_execution( client, execution, events_handler=threadsafe_events_logger, include_logs=include_logs, timeout=timeout ) if execution.error: log_to_summary("Execution of workflow '{0}' for " "deployment '{1}' failed. [error={2}]" .format(workflow_id, dep_id, execution.error)) else: threadsafe_log("Finished executing workflow " "'{0}' on deployment" " '{1}'".format(workflow_id, dep_id)) except ExecutionTimeoutError as e: log_to_summary( "Timed out waiting for workflow '{0}' of deployment '{1}' to " "end. The execution may still be running properly; however, " "the command-line utility was instructed to wait up to {3} " "seconds for its completion.\n\n" "* Run 'cfy executions list' to determine the execution's " "status.\n" "* Run 'cfy executions cancel --execution-id {2}' to cancel" " the running workflow.".format( workflow_id, deployment_id, e.execution_id, timeout))
def ls(execution_id, include_logs, tail): logger = get_logger() management_ip = utils.get_management_server_ip() logger.info("Getting events from management server {0} for " "execution id '{1}' " "[include_logs={2}]".format(management_ip, execution_id, include_logs)) client = utils.get_rest_client(management_ip) try: execution_events = ExecutionEventsFetcher( client, execution_id, include_logs=include_logs) events_logger = get_events_logger() if tail: execution = wait_for_execution(client, client.executions.get(execution_id), events_handler=events_logger, include_logs=include_logs, timeout=None) # don't timeout ever if execution.error: logger.info("Execution of workflow '{0}' for deployment " "'{1}' failed. [error={2}]" .format(execution.workflow_id, execution.deployment_id, execution.error)) raise SuppressedCloudifyCliError() else: logger.info("Finished executing workflow '{0}' on deployment " "'{1}'".format(execution.workflow_id, execution.deployment_id)) else: # don't tail, get only the events created until now and return events = execution_events.fetch_and_process_events( events_handler=events_logger) logger.info('\nTotal events: {0}'.format(events)) except CloudifyClientError, e: if e.status_code != 404: raise msg = ("Execution '{0}' not found on management server" .format(execution_id)) raise CloudifyCliError(msg)
def ls(execution_id, include_logs, tail, json): logger = get_logger() management_ip = utils.get_management_server_ip() logger.info('Listing events for execution id {0} ' '[include_logs={1}]'.format(execution_id, include_logs)) client = utils.get_rest_client(management_ip) try: execution_events = ExecutionEventsFetcher( client, execution_id, include_logs=include_logs) events_logger = get_events_logger(json) if tail: execution = wait_for_execution(client, client.executions.get(execution_id), events_handler=events_logger, include_logs=include_logs, timeout=None) # don't timeout ever if execution.error: logger.info('Execution of workflow {0} for deployment ' '{1} failed. [error={2}]'.format( execution.workflow_id, execution.deployment_id, execution.error)) raise SuppressedCloudifyCliError() else: logger.info('Finished executing workflow {0} on deployment ' '{1}'.format( execution.workflow_id, execution.deployment_id)) else: # don't tail, get only the events created until now and return events = execution_events.fetch_and_process_events( events_handler=events_logger) logger.info('\nTotal events: {0}'.format(events)) except CloudifyClientError as e: if e.status_code != 404: raise raise CloudifyCliError('Execution {0} not found'.format(execution_id))
def worker(dep_id): try: execution = client.executions.start( dep_id, workflow_id, ) execution = wait_for_execution( client, execution, events_handler=threadsafe_events_logger, include_logs=include_logs, timeout=900 ) if execution.error: log_to_summary("Execution of workflow '{0}' for " "deployment '{1}' failed. [error={2}]" .format(workflow_id, dep_id, execution.error)) else: threadsafe_log("Finished executing workflow " "'{0}' on deployment" " '{1}'".format(workflow_id, dep_id)) except ExecutionTimeoutError as e: log_to_summary("Execution of workflow '{0}' " "for deployment '{1}' timed out. " "* Run 'cfy executions cancel " "--execution-id {2}' to cancel" " the running workflow." .format( workflow_id, deployment_id, e.execution_id ))
def start(workflow_id, deployment_id, timeout, force, allow_custom_parameters, include_logs, parameters): logger = get_logger() parameters = utils.inputs_to_dict(parameters, 'parameters') management_ip = utils.get_management_server_ip() logger.info("Executing workflow '{0}' on deployment '{1}' at" " management server {2} [timeout={3} seconds]".format( workflow_id, deployment_id, management_ip, timeout)) events_logger = get_events_logger() events_message = "* Run 'cfy events list --include-logs " \ "--execution-id {0}' to retrieve the " \ "execution's events/logs" try: client = utils.get_rest_client(management_ip) try: execution = client.executions.start( deployment_id, workflow_id, parameters=parameters, allow_custom_parameters=allow_custom_parameters, force=force) except (exceptions.DeploymentEnvironmentCreationInProgressError, exceptions.DeploymentEnvironmentCreationPendingError) as e: # wait for deployment environment creation workflow if isinstance( e, exceptions.DeploymentEnvironmentCreationPendingError): status = 'pending' else: status = 'in progress' logger.info( 'Deployment environment creation is {0}!'.format(status)) logger.info('Waiting for create_deployment_environment ' 'workflow execution to finish...') now = time.time() wait_for_execution(client, _get_deployment_environment_creation_execution( client, deployment_id), events_handler=events_logger, include_logs=include_logs, timeout=timeout) remaining_timeout = time.time() - now timeout -= remaining_timeout # try to execute user specified workflow execution = client.executions.start( deployment_id, workflow_id, parameters=parameters, allow_custom_parameters=allow_custom_parameters, force=force) execution = wait_for_execution(client, execution, events_handler=events_logger, include_logs=include_logs, timeout=timeout) if execution.error: logger.info("Execution of workflow '{0}' for deployment " "'{1}' failed. [error={2}]".format( workflow_id, deployment_id, execution.error)) logger.info(events_message.format(execution.id)) raise SuppressedCloudifyCliError() else: logger.info("Finished executing workflow '{0}' on deployment" " '{1}'".format(workflow_id, deployment_id)) logger.info(events_message.format(execution.id)) except ExecutionTimeoutError, e: logger.info("Execution of workflow '{0}' " "for deployment '{1}' timed out. " "* Run 'cfy executions cancel " "--execution-id {2}' to cancel" " the running workflow.".format(workflow_id, deployment_id, e.execution_id)) events_tail_message = "* Run 'cfy events list --tail --include-logs " \ "--execution-id {0}' to retrieve the " \ "execution's events/logs" logger.info(events_tail_message.format(e.execution_id)) raise SuppressedCloudifyCliError()
def start(workflow_id, deployment_id, timeout, force, allow_custom_parameters, include_logs, parameters): logger = get_logger() parameters = json_to_dict(parameters, 'parameters') management_ip = utils.get_management_server_ip() logger.info("Executing workflow '{0}' on deployment '{1}' at" " management server {2} [timeout={3} seconds]" .format(workflow_id, deployment_id, management_ip, timeout)) events_logger = get_events_logger() events_message = "* Run 'cfy events list --include-logs " \ "--execution-id {0}' for retrieving the " \ "execution's events/logs" try: client = utils.get_rest_client(management_ip) try: execution = client.executions.start( deployment_id, workflow_id, parameters=parameters, allow_custom_parameters=allow_custom_parameters, force=force) except exceptions.DeploymentEnvironmentCreationInProgressError: # wait for deployment environment creation workflow to end logger.info('Deployment environment creation is in progress!') logger.info('Waiting for create_deployment_environment ' 'workflow execution to finish...') now = time.time() wait_for_execution(client, deployment_id, _get_deployment_environment_creation_execution( client, deployment_id), events_handler=events_logger, include_logs=include_logs, timeout=timeout) remaining_timeout = time.time() - now timeout -= remaining_timeout # try to execute user specified workflow execution = client.executions.start( deployment_id, workflow_id, parameters=parameters, allow_custom_parameters=allow_custom_parameters, force=force) execution = wait_for_execution(client, deployment_id, execution, events_handler=events_logger, include_logs=include_logs, timeout=timeout) if execution.error: logger.info("Execution of workflow '{0}' for deployment " "'{1}' failed. [error={2}]" .format(workflow_id, deployment_id, execution.error)) logger.info(events_message.format(execution.id)) raise SuppressedCloudifyCliError() else: logger.info("Finished executing workflow '{0}' on deployment" "'{1}'".format(workflow_id, deployment_id)) logger.info(events_message.format(execution.id)) except ExecutionTimeoutError, e: logger.info("Execution of workflow '{0}' " "for deployment '{1}' timed out. " "* Run 'cfy executions cancel " "--execution-id {2}' to cancel" " the running workflow." .format(workflow_id, deployment_id, e.execution_id)) logger.info(events_message.format(e.execution_id)) raise SuppressedCloudifyCliError()
def _wait(client, execution): wait_for_execution(client, execution, events_handler=_events_logger, include_logs=True, logger=logger)
def update(deployment_id, blueprint_path, inputs, blueprint_filename, archive_location, skip_install, skip_uninstall, workflow_id, force, include_logs, json): logger = get_logger() rest_host = utils.get_rest_host() client = utils.get_rest_client(rest_host) processed_inputs = utils.inputs_to_dict(inputs, 'inputs') blueprint_or_archive_path = blueprint_path.name \ if blueprint_path else archive_location logger.info('Updating deployment {dep_id} using blueprint {path}'.format( dep_id=deployment_id, path=blueprint_or_archive_path)) deployment_update = client.deployment_updates.update( deployment_id, blueprint_or_archive_path, application_file_name=blueprint_filename, inputs=processed_inputs, workflow_id=workflow_id, skip_install=skip_install, skip_uninstall=skip_uninstall, force=force) events_logger = get_events_logger(json) execution = wait_for_execution( client, client.executions.get(deployment_update.execution_id), events_handler=events_logger, include_logs=include_logs, timeout=None) # don't timeout ever if execution.error: logger.info("Execution of workflow '{0}' for deployment " "'{1}' failed. [error={2}]" .format(execution.workflow_id, execution.deployment_id, execution.error)) logger.info('Failed updating deployment {dep_id}. Deployment update ' 'id: {depup_id}. Execution id: {exec_id}' .format(depup_id=deployment_update.id, dep_id=deployment_id, exec_id=execution.id)) raise SuppressedCloudifyCliError() else: logger.info("Finished executing workflow '{0}' on deployment " "'{1}'".format(execution.workflow_id, execution.deployment_id)) logger.info('Successfully updated deployment {dep_id}. ' 'Deployment update id: {depup_id}. Execution id: {exec_id}' .format(depup_id=deployment_update.id, dep_id=deployment_id, exec_id=execution.id))
def start(workflow_id, deployment_id, timeout, force, allow_custom_parameters, include_logs, parameters, json): logger = get_logger() parameters = utils.inputs_to_dict(parameters, 'parameters') rest_host = utils.get_rest_host() logger.info("Executing workflow '{0}' on deployment '{1}' at" " management server {2} [timeout={3} seconds]" .format(workflow_id, deployment_id, rest_host, timeout)) events_logger = get_events_logger(json) events_message = "* Run 'cfy events list --include-logs " \ "--execution-id {0}' to retrieve the " \ "execution's events/logs" original_timeout = timeout try: client = utils.get_rest_client(rest_host) try: execution = client.executions.start( deployment_id, workflow_id, parameters=parameters, allow_custom_parameters=allow_custom_parameters, force=force) except (exceptions.DeploymentEnvironmentCreationInProgressError, exceptions.DeploymentEnvironmentCreationPendingError) as e: # wait for deployment environment creation workflow if isinstance( e, exceptions.DeploymentEnvironmentCreationPendingError): status = 'pending' else: status = 'in progress' logger.info('Deployment environment creation is {0}...'.format( status)) logger.debug('Waiting for create_deployment_environment ' 'workflow execution to finish...') now = time.time() wait_for_execution(client, _get_deployment_environment_creation_execution( client, deployment_id), events_handler=events_logger, include_logs=include_logs, timeout=timeout) remaining_timeout = time.time() - now timeout -= remaining_timeout # try to execute user specified workflow execution = client.executions.start( deployment_id, workflow_id, parameters=parameters, allow_custom_parameters=allow_custom_parameters, force=force) execution = wait_for_execution(client, execution, events_handler=events_logger, include_logs=include_logs, timeout=timeout) if execution.error: logger.info('Execution of workflow {0} for deployment ' '{1} failed. [error={2}]'.format( workflow_id, deployment_id, execution.error)) logger.info(events_message.format(execution.id)) raise SuppressedCloudifyCliError() else: logger.info('Finished executing workflow {0} on deployment ' '{1}'.format(workflow_id, deployment_id)) logger.info(events_message.format(execution.id)) except ExecutionTimeoutError as e: logger.info( "Timed out waiting for workflow '{0}' of deployment '{1}' to " "end. The execution may still be running properly; however, " "the command-line utility was instructed to wait up to {3} " "seconds for its completion.\n\n" "* Run 'cfy executions list' to determine the execution's " "status.\n" "* Run 'cfy executions cancel --execution-id {2}' to cancel" " the running workflow.".format( workflow_id, deployment_id, e.execution_id, original_timeout)) events_tail_message = "* Run 'cfy events list --tail --include-logs " \ "--execution-id {0}' to retrieve the " \ "execution's events/logs" logger.info(events_tail_message.format(e.execution_id)) raise SuppressedCloudifyCliError()