def test_deployments_execute(self): execute_response = Execution({'status': 'terminated'}) get_execution_response = Execution({ 'status': 'terminated', 'workflow_id': 'mock_wf', 'deployment_id': 'deployment-id', 'blueprint_id': 'blueprint-id', 'error': '', 'id': id, 'created_at': datetime.datetime.now(), 'parameters': {} }) self.client.executions.start = MagicMock(return_value=execute_response) self.client.executions.get = MagicMock( return_value=get_execution_response) self.client.events.get = MagicMock(return_value=([], 0)) cli_runner.run_cli('cfy executions start ' '-d a-deployment-id -w install')
def _mock_executions_get(self, execution_id): self.update_execution_status() if self.executions_status != Execution.TERMINATED: execution = Execution({ 'id': 'execution_id', 'status': Execution.STARTED }) else: execution = Execution({ 'id': 'execution_id', 'status': Execution.TERMINATED }) return execution
def restore(self, snapshot_id, recreate_deployments_envs=True, force=False, tenant_name=None): """ Restores the snapshot whose id matches the provided snapshot id. :param snapshot_id: The id of the snapshot to be restored. :param recreate_deployments_envs: If manager should recreate deployment environments. :param force: Skip clearing the manager and checking whether it is actually clean. :param tenant_name: Name of the tenant to which old (pre 4.0) snapshots should be restored """ assert snapshot_id uri = '/snapshots/{0}/restore'.format(snapshot_id) params = { 'recreate_deployments_envs': recreate_deployments_envs, 'force': force, 'tenant_name': tenant_name } response = self.api.post(uri, data=params) return Execution(response)
def restore(self, snapshot_id, force=False, restore_certificates=False, no_reboot=False, ignore_plugin_failure=False): """ Restores the snapshot whose id matches the provided snapshot id. :param snapshot_id: The id of the snapshot to be restored. :param force: Skip clearing the manager and checking whether it is actually clean. :param restore_certificates: Whether to try and restore the certificates from the snapshot. :param no_reboot: Do not reboot after certificates restore. :param ignore_plugin_failure: Ignore any plugin installation failures and continue with restore """ assert snapshot_id uri = '/snapshots/{0}/restore'.format(snapshot_id) params = { 'force': force, 'restore_certificates': restore_certificates, 'no_reboot': no_reboot, 'ignore_plugin_failure': ignore_plugin_failure } response = self.api.post(uri, data=params) return Execution(response)
def restore(self, snapshot_id, recreate_deployments_envs=True, force=False, restore_certificates=False, no_reboot=False): """ Restores the snapshot whose id matches the provided snapshot id. :param snapshot_id: The id of the snapshot to be restored. :param recreate_deployments_envs: If manager should recreate deployment environments. :param force: Skip clearing the manager and checking whether it is actually clean. :param restore_certificates: Whether to try and restore the certificates from the snapshot. :param no_reboot: Do not reboot after certificates restore. """ assert snapshot_id uri = '/snapshots/{0}/restore'.format(snapshot_id) params = { 'recreate_deployments_envs': recreate_deployments_envs, 'force': force, 'restore_certificates': restore_certificates, 'no_reboot': no_reboot } response = self.api.post(uri, data=params) return Execution(response)
def execution_mock(status, wf_id='mock_wf'): return Execution({ 'status': status, 'workflow_id': wf_id, 'deployment_id': 'deployment-id', 'blueprint_id': 'blueprint-id', 'error': '', 'id': uuid4(), 'created_at': datetime.datetime.now(), 'parameters': {} })
def create(self, log_bundle_id, queue=False): """Creates a new log bundle. :param log_bundle_id: ID of the log bundle that will be created. :param queue: Whether to queue if other system workflows are in progress. :return: The execution for creating the log bundle. """ uri = self.base_url + log_bundle_id params = {'queue': queue} response = self.api.put(uri, data=params, expected_status_code=201) return Execution(response)
def execution_mock(status, wf_id='mock_wf'): return Execution({ 'status': status, 'workflow_id': wf_id, 'deployment_id': 'deployment-id', 'blueprint_id': 'blueprint-id', 'error': '', 'id': uuid4(), 'created_at': datetime.now().isoformat()[:-3], 'parameters': {}, 'permission': 'creator', 'created_by': 'admin', 'tenant_name': DEFAULT_TENANT_NAME })
def create(self, snapshot_id, include_metrics, include_credentials): """ Creates a new snapshot. :param snapshot_id: Snapshot id of the snapshot that will be created. :return: The created snapshot. """ assert snapshot_id uri = '/snapshots/{0}'.format(snapshot_id) params = { 'include_metrics': include_metrics, 'include_credentials': include_credentials } response = self.api.put(uri, data=params, expected_status_code=201) return Execution(response)
def test_executions_get(self): execution = Execution({ 'status': 'terminated', 'workflow_id': 'mock_wf', 'deployment_id': 'deployment-id', 'blueprint_id': 'blueprint-id', 'error': '', 'id': uuid4(), 'created_at': datetime.datetime.now(), 'parameters': {} }) self.client.executions.get = MagicMock(return_value=execution) cli_runner.run_cli('cfy executions get -e execution-id')
def validate(self, path, entity_id, blueprint_filename=None, visibility=VisibilityState.TENANT, progress_callback=None, skip_size_limit=True): """ Validates a blueprint with Cloudify's manager. :param path: Main blueprint yaml file path. :param entity_id: Id of the uploaded blueprint. :param blueprint_filename: The archive's main blueprint yaml filename. :param visibility: The visibility of the blueprint, can be 'private', 'tenant' or 'global'. :param progress_callback: Progress bar callback method :param skip_size_limit: Indicator whether to check size limit on blueprint folder Blueprint path should point to the main yaml file of the response to be uploaded. Its containing folder will be packed to an archive and get uploaded to the manager. Validation is basically an upload without the storage part being done. """ tempdir = tempfile.mkdtemp() tar_path = None application_file = None try: if not urlparse(path).scheme or os.path.exists(path): # path is not a URL, create archive tar_path, application_file = self._validate_blueprint_size( path, tempdir, skip_size_limit) response = self._validate(tar_path or path, blueprint_id=entity_id, application_file_name=application_file or blueprint_filename, visibility=visibility, progress_callback=progress_callback) finally: shutil.rmtree(tempdir) if response: # on cloudify earlier than 6.4, response is None (204 no content) return Execution(response)
def create(self, snapshot_id, include_credentials, include_logs=True, include_events=True, queue=False, include_metrics=None): """ Creates a new snapshot. :param snapshot_id: Snapshot id of the snapshot that will be created. :param include_metrics: Deprecated parameter, should not be used. :return: The created snapshot. """ assert snapshot_id uri = '/snapshots/{0}'.format(snapshot_id) params = { 'include_credentials': include_credentials, 'include_logs': include_logs, 'include_events': include_events, 'queue': queue } response = self.api.put(uri, data=params, expected_status_code=201) return Execution(response)
def get_execution(self, execution_id): # same issue as get_brokers return Execution({'id': execution_id, 'status': 'started'})
class AgentsTests(CliCommandTest): def setUp(self): super(AgentsTests, self).setUp() self.use_manager() @staticmethod def _agent_filters(node_ids=None, node_instance_ids=None, deployment_ids=None, install_methods=None): return {cfy.AGENT_FILTER_NODE_IDS: node_ids, cfy.AGENT_FILTER_NODE_INSTANCE_IDS: node_instance_ids, cfy.AGENT_FILTER_DEPLOYMENT_ID: deployment_ids, cfy.AGENT_FILTER_INSTALL_METHODS: install_methods} DEFAULT_TOPOLOGY = [ _node_instance(DEFAULT_TENANT_NAME, 't0d0node1_1', 'node1', 'd0'), _node_instance(DEFAULT_TENANT_NAME, 't0d0node1_2', 'node1', 'd0'), _node_instance(DEFAULT_TENANT_NAME, 't0d0node2_1', 'node2', 'd0'), _node_instance(DEFAULT_TENANT_NAME, 't0d1node1_1', 'node1', 'd1'), _node_instance(DEFAULT_TENANT_NAME, 't0d1node1_2', 'node1', 'd1'), _node_instance(DEFAULT_TENANT_NAME, 't0d1node3_1', 'node3', 'd1'), _node_instance('other_tenant', 't1d0node1_1', 'node1', 'd0'), _node_instance('other_tenant', 't1d0node1_2', 'node1', 'd0'), _node_instance('other_tenant', 't1d1node3_1', 'node3', 'd1'), _node_instance('other_tenant', 't1d2node4_1', 'node4', 'd2'), ] def mock_client(self, topology): def _topology_filter(predicate, **kwargs): tenant_name = self.client._client.headers.get( CLOUDIFY_TENANT_HEADER) if not tenant_name: tenant_name = DEFAULT_TENANT_NAME results = list() all_tenants = kwargs.get('_all_tenants', False) for node_instance in topology: ni_tenant_name = node_instance['tenant_name'] if (all_tenants or ni_tenant_name == tenant_name) \ and predicate(node_instance): results.append(node_instance) return results def list_node_instances(**kwargs): def _matcher(node_instance): ni_id = node_instance['id'] ni_node_id = node_instance['node_id'] ni_dep_id = node_instance['deployment_id'] return ni_id in kwargs.get('id', [ni_id]) and \ ni_node_id in kwargs.get('node_id', [ni_node_id]) and \ ni_dep_id in kwargs.get('deployment_id', [ni_dep_id]) return _topology_filter(_matcher, **kwargs) def list_deployments(**kwargs): tenant_name = self.client._client.headers.get( CLOUDIFY_TENANT_HEADER) if not tenant_name: tenant_name = DEFAULT_TENANT_NAME all_node_instances = _topology_filter(lambda x: True, **kwargs) deployments = {(x['tenant_name'], x['deployment_id']) for x in all_node_instances} deployments = [Deployment({'id': b, 'tenant_name': a}) for a, b in deployments] results = list() searched_ids = kwargs['id'] for dep in deployments: if (not searched_ids) or dep.id in searched_ids: results.append(dep) return results def list_nodes(**kwargs): node_ids = kwargs.get('id') all_node_instances = _topology_filter(lambda x: True, **kwargs) nodes = {(x['tenant_name'], x['deployment_id'], x['node_id']) for x in all_node_instances} nodes = [Node({'id': c, 'deployment_id': b, 'tenant_name': a}) for (a, b, c) in nodes] return list(filter( lambda x: (node_ids is None) or x['id'] in node_ids, nodes)) self.client.node_instances.list = list_node_instances self.client.deployments.list = list_deployments self.client.nodes.list = list_nodes def assert_execution_started(self, client_mock, deployment_id, filters): self.assertIn( ((deployment_id, 'workflow', filters), { 'allow_custom_parameters': True }), client_mock.call_args_list) # Tests for get_node_instances_map def test_parameters_error(self): self.mock_client({}) self.assertRaises( CloudifyCliError, get_filters_map, self.client, self.logger, AgentsTests._agent_filters( node_instance_ids=['a1'], deployment_ids=['d1'] ), [DEFAULT_TENANT_NAME]) def test_filters_map_empty(self): self.mock_client({}) results = get_filters_map( self.client, self.logger, AgentsTests._agent_filters(), False) self.assertFalse(results) def test_filters_map_empty_node_instances(self): self.mock_client({}) self.assertRaises( CloudifyCliError, get_filters_map, self.client, self.logger, AgentsTests._agent_filters(node_instance_ids=['t0d0node1_1']), False) def test_filters_map_empty_deployment_ids(self): self.mock_client({}) self.assertRaises( CloudifyCliError, get_filters_map, self.client, self.logger, AgentsTests._agent_filters(deployment_ids=['d0']), False) def test_filters_map_all(self): self.mock_client(AgentsTests.DEFAULT_TOPOLOGY) results = get_filters_map( self.client, self.logger, AgentsTests._agent_filters(), True) self.assertEquals({ DEFAULT_TENANT_NAME: { 'd0': {}, 'd1': {} }, 'other_tenant': { 'd0': {}, 'd1': {}, 'd2': {} } }, results) def test_filters_map_node_id_single_tenant(self): self.mock_client(AgentsTests.DEFAULT_TOPOLOGY) results = get_filters_map( self.client, self.logger, AgentsTests._agent_filters( node_ids=['node1']), False) self.assertEquals({ DEFAULT_TENANT_NAME: { 'd0': {'node_ids': ['node1']}, 'd1': {'node_ids': ['node1']} } }, results) def test_filters_map_node_id_all_tenants(self): self.mock_client(AgentsTests.DEFAULT_TOPOLOGY) results = get_filters_map( self.client, self.logger, AgentsTests._agent_filters( node_ids=['node1']), True) self.assertEquals({ DEFAULT_TENANT_NAME: { 'd0': { 'node_ids': ['node1'] }, 'd1': { 'node_ids': ['node1'] } }, 'other_tenant': { 'd0': { 'node_ids': ['node1'] } } }, results) def test_filters_map_dep_id_single_tenant(self): self.mock_client(AgentsTests.DEFAULT_TOPOLOGY) results = get_filters_map( self.client, self.logger, AgentsTests._agent_filters( deployment_ids=['d0']), False) self.assertEquals({ DEFAULT_TENANT_NAME: { 'd0': {} } }, results) def test_filters_map_dep_id_all_tenants(self): self.mock_client(AgentsTests.DEFAULT_TOPOLOGY) results = get_filters_map( self.client, self.logger, AgentsTests._agent_filters( deployment_ids=['d0']), True) self.assertEquals({ DEFAULT_TENANT_NAME: { 'd0': {} }, 'other_tenant': { 'd0': {} } }, results) def test_filters_map_bad_dep_id(self): self.mock_client(AgentsTests.DEFAULT_TOPOLOGY) self.assertRaises( CloudifyCliError, get_filters_map, self.client, self.logger, AgentsTests._agent_filters(deployment_ids=['error']), False) # Tests for get_deployments_and_run_workers def test_empty_node_instances_map(self): self.mock_client({}) self.assertRaises( CloudifyCliError, get_deployments_and_run_workers, self.client, self._agent_filters(), [], self.logger, '', False) @patch.object(ExecutionsClient, 'start') def test_full_topology(self, exec_client_mock): self.mock_client(AgentsTests.DEFAULT_TOPOLOGY) get_deployments_and_run_workers( self.client, self._agent_filters(), True, self.logger, 'workflow', False ) self.assert_execution_started(exec_client_mock, 'd1', {}) self.assert_execution_started(exec_client_mock, 'd0', {}) self.assert_execution_started(exec_client_mock, 'd2', {}) self.assertEquals(len(exec_client_mock.call_args_list), 5) @patch.object(ExecutionsClient, 'start') def test_full_topology_one_nonstarted(self, exec_client_mock): topology = list(AgentsTests.DEFAULT_TOPOLOGY) topology.append(_node_instance(DEFAULT_TENANT_NAME, 't0d1node4_1', 'node4', 'd1', 'creating')) self.mock_client(topology) get_deployments_and_run_workers( self.client, self._agent_filters(), True, self.logger, 'workflow', False ) self.assertEquals(len(exec_client_mock.call_args_list), 4) @patch.object(ExecutionsClient, 'start') def test_node_instances_map_none(self, exec_client_mock): self.mock_client(AgentsTests.DEFAULT_TOPOLOGY) get_deployments_and_run_workers( self.client, self._agent_filters(install_methods=['provided']), True, self.logger, 'workflow', False ) self.assertEquals(exec_client_mock.call_count, 5) for call in exec_client_mock.call_args_list: self.assertTrue(call[0][2]['install_methods'] == ['provided']) @patch.object(ExecutionsClient, 'get', return_value=Execution({'status': 'terminated'})) @patch.object(EventsClient, 'list', return_value=ListResponse( [], Metadata({'pagination': { 'total': 0, 'offset': 0, 'size': 10}}))) def test_execution_tracking(self, events_list_mock, exec_get_mock): self.mock_client(AgentsTests.DEFAULT_TOPOLOGY) def _mock_execution_start(*args, **kwargs): tenant_name = args[0].api.headers.get(CLOUDIFY_TENANT_HEADER) deployment_id = args[1] return Execution({'id': str(uuid.uuid4()), 'deployment_id': deployment_id, 'tenant_name': tenant_name}) def _wait_side_effect(*args, **kwargs): client_tenant = args[0]._client.headers[CLOUDIFY_TENANT_HEADER] execution = args[1] self.assertEquals(client_tenant, execution['tenant_name']) return DEFAULT with patch('cloudify_cli.commands.agents.wait_for_execution', return_value=PropertyMock(error=False), side_effect=_wait_side_effect): with patch.object(ExecutionsClient, 'start', _mock_execution_start): get_deployments_and_run_workers( self.client, self._agent_filters(), True, self.logger, 'workflow', True)
def _mock_execution_start(*args, **kwargs): tenant_name = args[0].api.headers.get(CLOUDIFY_TENANT_HEADER) deployment_id = args[1] return Execution({'id': str(uuid.uuid4()), 'deployment_id': deployment_id, 'tenant_name': tenant_name})
def get(self, id): return Execution({'id': '111', 'status': 'terminated'})
def test_snapshots_create(self): self.client.snapshots.create = MagicMock( return_value=Execution({'id': 'some_id'})) cli_runner.run_cli('cfy snapshots create -s a-snapshot-id')