def restart_service(service_name): """ restart service by name in the manager container """ docl.execute('systemctl stop {0}'.format(service_name)) docl.execute('systemctl start {0}'.format(service_name))
def stop_dispatch_processes(cls): logger.info('Shutting down all dispatch processes') try: docl.execute('pkill -9 -f cloudify/dispatch.py') except sh.ErrorReturnCode as e: if e.exit_code != 1: raise
def _update_hooks_config(self, new_config): with tempfile.NamedTemporaryFile() as f: yaml.dump(yaml.load(new_config), f, default_flow_style=False) f.flush() docl.copy_file_to_manager(source=f.name, target=self.HOOKS_CONFIG_PATH) docl.execute('chown cfyuser: {0}'.format(self.HOOKS_CONFIG_PATH))
def stop_dispatch_processes(cls): logger.info('Shutting down all dispatch processes') try: docl.execute('pkill -9 -f cloudify/dispatch.py') except sh.ErrorReturnCode as e: if e.exit_code != 1: raise
def _update_hooks_config(self, new_config): with tempfile.NamedTemporaryFile() as f: yaml.dump(yaml.load(new_config), f, default_flow_style=False) f.flush() docl.copy_file_to_manager(source=f.name, target=self.HOOKS_CONFIG_PATH) docl.execute('chown cfyuser: {0}'.format(self.HOOKS_CONFIG_PATH))
def set_ldap(config_data): logger.info('Setting LDAP configuration') _prepare_set_ldap_script() execute("{manager_python} {script_path} --config '{cfg_data}'".format( manager_python=MANAGER_PYTHON, script_path='/tmp/set_ldap.py', cfg_data=json.dumps(config_data)))
def _restore_security_config(self): tmp_config_path = os.path.join(self.workdir, 'rest-security.conf') docl.copy_file_to_manager(tmp_config_path, self.REST_SEC_CONFIG_PATH) docl.execute('chown cfyuser: {securityconf}'.format( securityconf=self.REST_SEC_CONFIG_PATH, )) self.restart_service('cloudify-restservice')
def reset_data_and_restart(): docl.execute('rm -rf {0}'.format(RIEMANN_CONFIGS_DIR)) docl.execute('mkdir -p {0}'.format(RIEMANN_CONFIGS_DIR)) # This is how the dir is currently set up during the bootstrap docl.execute('chown -R riemann:{0} {1}'.format(CLOUDIFY_USER, RIEMANN_CONFIGS_DIR)) docl.execute('chmod 770 {0}'.format(RIEMANN_CONFIGS_DIR)) docl.execute('systemctl restart cloudify-riemann')
def _restore_security_config(self): tmp_config_path = os.path.join(self.workdir, 'rest-security.conf') docl.copy_file_to_manager(tmp_config_path, self.REST_SEC_CONFIG_PATH) docl.execute('chown cfyuser: {securityconf}'.format( securityconf=self.REST_SEC_CONFIG_PATH, )) self.restart_service('cloudify-restservice')
def reset_storage(): logger.info('Resetting PostgreSQL DB') # reset the storage by calling a script on the manager, to access # localhost-only APIs (rabbitmq management api) execute("{manager_python} {script_path} --config {config_path}".format( manager_python=MANAGER_PYTHON, script_path=SCRIPT_PATH, config_path=CONFIG_PATH))
def setUpClass(cls): """Configure database timezone.""" super(EventsAlternativeTimezoneTest, cls).setUpClass() # Container is launched once per unittest.TestCase class. # Timezone configuration just needs to updated at the class level. # Between tests cases tables are re-created, # but timezone configuration is preserved. postgres_conf = get_postgres_conf() run_query("ALTER USER {} SET TIME ZONE '{}'".format( postgres_conf.username, cls.TIMEZONE)) # restart all users of the db so that they get a new session which # uses the just-set timezone docl.execute( "systemctl restart cloudify-amqp-postgres cloudify-restservice")
def _copy_docker_conf_file(self): # the docker_conf.json file is used to pass information # to the dockercompute plugin. (see # integration_tests_plugins/dockercompute) docl.execute('mkdir -p {0}'.format(constants.DOCKER_COMPUTE_DIR)) with tempfile.NamedTemporaryFile() as f: json.dump({ # The dockercompute plugin needs to know where to find the # docker host 'docker_host': docl.docker_host(), # Used for cleanup purposes 'env_label': self.env_label }, f) f.flush() docl.copy_file_to_manager( source=f.name, target=os.path.join(constants.DOCKER_COMPUTE_DIR, 'docker_conf.json')) self.chown(constants.CLOUDIFY_USER, constants.DOCKER_COMPUTE_DIR)
def clear_directory(dir_path, quiet=True): """ Remove all contents from a directory """ # Add a wildcard to the end, to remove everything *inside* the folder command = 'rm -rf {0}/*'.format(dir_path) # Need to invoke a shell directly, because `docker exec` ignores # wildcards by default command = "sh -c '{0}'".format(command) return docl.execute(command, quiet)
def test_hook_config_invalid_yaml(self): new_config = """ test_hook invalid: true """ with tempfile.NamedTemporaryFile() as f: f.write(new_config) f.flush() docl.copy_file_to_manager(source=f.name, target=self.HOOKS_CONFIG_PATH) docl.execute('chown cfyuser: {0}'.format(self.HOOKS_CONFIG_PATH)) self._start_a_workflow() workflow_started_error = "ERROR - The hook consumer received " \ "`workflow_started` event but the hook " \ "config file is invalid yaml" workflow_succeeded_error = "ERROR - The hook consumer received " \ "`workflow_succeeded` event but the " \ "hook config file is invalid yaml" self._assert_messages_in_log( [workflow_started_error, workflow_succeeded_error])
def test_hook_config_invalid_yaml(self): new_config = """ test_hook invalid: true """ with tempfile.NamedTemporaryFile() as f: f.write(new_config) f.flush() docl.copy_file_to_manager(source=f.name, target=self.HOOKS_CONFIG_PATH) docl.execute('chown cfyuser: {0}'.format(self.HOOKS_CONFIG_PATH)) self._start_a_workflow() workflow_started_error = "ERROR - The hook consumer received " \ "`workflow_started` event but the hook " \ "config file is invalid yaml" workflow_succeeded_error = "ERROR - The hook consumer received " \ "`workflow_succeeded` event but the " \ "hook config file is invalid yaml" self._assert_messages_in_log([workflow_started_error, workflow_succeeded_error])
def run_collector_scripts_and_assert(self, messages): docl.execute('mkdir -p {0}'.format(LOG_PATH)) docl.execute('echo > {0}'.format(join(LOG_PATH, LOG_FILE))) for script in COLLECTOR_SCRIPTS: docl.execute('{0} {1}.py'.format( MANAGER_PYTHON, join(SCRIPTS_DESTINATION_PATH, script))) assert_messages_in_log(self.workdir, messages, join(LOG_PATH, LOG_FILE))
def _copy_docker_conf_file(self): # the docker_conf.json file is used to pass information # to the dockercompute plugin. (see # integration_tests_plugins/dockercompute) docl.execute('mkdir -p /root/dockercompute') with tempfile.NamedTemporaryFile() as f: json.dump( { # The dockercompute plugin needs to know where to find the # docker host 'docker_host': docl.docker_host(), # Used to know from where to mount the plugins storage dir # on dockercompute node instances containers 'plugins_storage_dir': self.plugins_storage_dir, # Used for cleanup purposes 'env_label': self.env_label }, f) f.flush() docl.copy_file_to_manager( source=f.name, target='/root/dockercompute/docker_conf.json')
def _assert_hello_world_metric(self, deployment_id): self.test_case.logger.info('Verifying deployment metrics...') # This query finds all the time series that begin with the # deployment ID (which should be all the series created by diamond) # and have values in the last 5 seconds result = docl.execute('curl -G "{url}" --data-urlencode ' '"q=select * from /^{dep}\./i ' 'where time > now() - 5s"'.format( url=self.influxdb_url, dep=deployment_id), quiet=True) if result == '[]': self.test_case.fail( 'Monitoring events list for deployment with ID `{0}` ' 'were not found on influxDB'.format(deployment_id))
def verify_deployment_environment_creation_complete(deployment_id): # a workaround for waiting for the deployment environment creation to # complete client = create_rest_client() execs = client.executions.list(deployment_id=deployment_id) if not execs \ or execs[0].status != Execution.TERMINATED \ or execs[0].workflow_id != 'create_deployment_environment': log_path = '/var/log/cloudify/mgmtworker/mgmtworker.log' logs = docl.execute('tail -n 100 {0}'.format(log_path)) raise RuntimeError( "Expected a single execution for workflow " "'create_deployment_environment' with status 'terminated'; " "Found these executions instead: {0}.\nLast 100 lines for " "management worker log:\n{1}".format( json.dumps(execs.items, indent=2), logs))
def verify_deployment_env_created(deployment_id): # A workaround for waiting for the deployment environment creation to # complete client = create_rest_client() execs = client.executions.list(deployment_id=deployment_id) if not execs \ or execs[0].status != Execution.TERMINATED \ or execs[0].workflow_id != 'create_deployment_environment': log_path = '/var/log/cloudify/mgmtworker/mgmtworker.log' logs = docl.execute('tail -n 100 {0}'.format(log_path)) raise RuntimeError( "Expected a single execution for workflow " "'create_deployment_environment' with status 'terminated'; " "Found these executions instead: {0}.\nLast 100 lines for " "management worker log:\n{1}".format( json.dumps(execs.items, indent=2), logs))
def on_manager_created(self): docl.execute('mkdir -p {0}'.format(constants.PLUGIN_STORAGE_DIR)) self.chown(constants.CLOUDIFY_USER, constants.PLUGIN_STORAGE_DIR) self.start_events_printer()
def execute_on_manager(command, quiet=True): """ Execute a shell command on the cloudify manager container. """ return docl.execute(command, quiet)
def delete_manager_file(file_path, quiet=True): """ Remove file from a cloudify manager """ return docl.execute('rm -rf {0}'.format(file_path), quiet=quiet)
def reset_storage(): logger.info('Resetting PostgreSQL DB') # reset the storage by calling a script on the manager, to access # localhost-only APIs (rabbitmq management api) execute("/opt/manager/env/bin/python {script_path} --config {config_path}". format(script_path=SCRIPT_PATH, config_path=CONFIG_PATH))
def run_postgresql_command(cmd): return docl.execute('sudo -u postgres psql cloudify_db ' '-c "{0}"'.format(cmd))
def create_api_token(): """ Create a new valid API token """ command = 'sudo {0}'.format(ADMIN_TOKEN_SCRIPT) docl.execute(command)
def set_ldap(config_data): logger.info('Setting LDAP configuration') _prepare_set_ldap_script() execute("/opt/manager/env/bin/python {script_path} --config '{cfg_data}'". format(script_path='/tmp/set_ldap.py', cfg_data=json.dumps(config_data)))
def reset_data_and_restart(): docl.execute('rm -rf {0}'.format(RIEMANN_CONFIGS_DIR)) docl.execute('mkdir -p {0}'.format(RIEMANN_CONFIGS_DIR)) docl.execute('systemctl restart cloudify-riemann')
def chown(owner, path_in_container, recursive=True): chown = 'chown -R' if recursive else 'chown' docl.execute('{0} {1}:{1} {2}'.format(chown, owner, path_in_container))
def run_postgresql_command(cmd): return docl.execute('sudo -u postgres psql cloudify_db ' '-c "{0}"'.format(cmd))
def chown(owner, path_in_container, recursive=True): chown = 'chown -R' if recursive else 'chown' docl.execute('{0} {1}:{1} {2}'.format(chown, owner, path_in_container))
def create_api_token(): """ Create a new valid API token """ command = 'sudo {0}'.format(ADMIN_TOKEN_SCRIPT) docl.execute(command)
def test_v_4_6_0_restore_snapshot_and_restart_services(self): snapshot_path = self._get_snapshot('snap_4_6_0_hello_world.zip') self._upload_and_restore_snapshot(snapshot_path) docl.execute('cfy_manager restart --force') self.assertTrue(self._all_services_restarted_properly())
def on_manager_created(self): docl.execute('mkdir -p {0}'.format(constants.PLUGIN_STORAGE_DIR)) self.chown(constants.CLOUDIFY_USER, constants.PLUGIN_STORAGE_DIR) self.start_events_printer()