def stop_dispatch_processes(cls): logger.info('Shutting down all dispatch processes') try: docl.execute('pkill -9 -f cloudify/dispatch.py') except sh.ErrorReturnCode as e: if e.exit_code != 1: raise
def _copy_docker_conf_file(self): # the docker_conf.json file is used to pass information # to the dockercompute plugin. (see # integration_tests_plugins/dockercompute) docl.execute('mkdir -p /root/dockercompute') with tempfile.NamedTemporaryFile() as f: json.dump( { # The dockercompute plugin needs to know where to find the # docker host 'docker_host': docl.docker_host(), # Used to know from where to mount the plugins storage dir # on dockercompute node instances containers 'plugins_storage_dir': self.plugins_storage_dir, # Used for cleanup purposes 'env_label': self.env_label }, f) f.flush() docl.copy_file_to_manager( source=f.name, target='/root/dockercompute/docker_conf.json')
def verify_deployment_environment_creation_complete(deployment_id): # a workaround for waiting for the deployment environment creation to # complete client = create_rest_client() execs = client.executions.list(deployment_id) if not execs \ or execs[0].status != Execution.TERMINATED \ or execs[0].workflow_id != 'create_deployment_environment': # cyclic imports :( from integration_tests import docl log_path = ('/var/log/cloudify/mgmtworker/' 'cloudify.management_worker.log') logs = docl.execute('tail -n 100 {0}'.format(log_path)) raise RuntimeError( "Expected a single execution for workflow " "'create_deployment_environment' with status 'terminated'; " "Found these executions instead: {0}.\nLast 100 lines for " "management worker log:\n{1}".format( json.dumps(execs.items, indent=2), logs))
def execute_on_manager(command, quiet=True): """ Execute a shell command on the cloudify manager container. """ return docl.execute(command, quiet)
def reset_data_and_restart(): docl.execute('rm -rf {0}'.format(RIEMANN_CONFIGS_DIR)) docl.execute('mkdir -p {0}'.format(RIEMANN_CONFIGS_DIR)) docl.execute('systemctl restart cloudify-riemann')