def get_managers(self, network='default'): # same issue as get_brokers return [ ManagerItem({ 'private_ip': '127.0.0.1', 'networks': { 'default': '127.0.0.1' }, }) ]
def _test_local_agent_from_package(agent_name, fs, ssl_cert, request, *_): agent_queue = '{0}-queue'.format(agent_name) blueprint_path = resources.get_resource( 'blueprints/agent-from-package/local-agent-blueprint.yaml') logger.info('Initiating local env') inputs = { 'resource_base': fs.root_path, 'source_url': get_source_uri(), 'requirements_file': get_requirements_uri(), 'name': agent_name, 'queue': agent_queue, 'file_server_port': fs.port, 'ssl_cert_path': ssl_cert.local_cert_path() } managers = [ ManagerItem({ 'networks': { 'default': '127.0.0.1' }, 'ca_cert_content': ssl_cert.DUMMY_CERT, 'hostname': 'cloudify' }) ] with patch('cloudify.endpoint.LocalEndpoint.get_managers', return_value=managers): env = local.init_env(name=request.node.name, blueprint_path=blueprint_path, inputs=inputs) env.execute('install', task_retries=0) agent_dict = get_agent_dict(env) ssl_cert.verify_remote_cert(agent_dict['agent_dir']) env.execute('uninstall', task_retries=1) wait_for_daemon_dead(agent_queue)
class ClusterTest(CliCommandTest): MANAGERS_LIST = [ ManagerItem({ 'id': '0', 'hostname': 'hostname_1', 'private_ip': '1.2.3.4', 'public_ip': '2.2.3.4', 'version': '5.0', 'edition': 'premium', 'distribution': 'centos', 'distro_release': 'core', 'fs_sync_node_id': 'hgujriewgthuiyenfjk' }), ManagerItem({ 'id': '1', 'hostname': 'hostname_2', 'private_ip': '1.2.3.5', 'public_ip': '2.2.3.5', 'version': '5.0', 'edition': 'premium', 'distribution': 'centos', 'distro_release': 'core', 'fs_sync_node_id': 'hgujriewgthuiyenfjk' }), ManagerItem({ 'id': '2', 'hostname': 'hostname_3', 'private_ip': '1.2.3.6', 'public_ip': '2.2.3.6', 'version': '5.0', 'edition': 'premium', 'distribution': 'centos', 'distro_release': 'core', 'fs_sync_node_id': 'hgujriewgthuiyenfjk' }) ] BROKERS_LIST = [ RabbitMQBrokerItem({ 'name': 'broker1', 'host': '3.2.3.4', 'port': '15671', 'params': {}, 'ca_cert_content': 'CA CONTENT', 'networks': { 'default': '3.2.3.4' } }) ] def setUp(self): super(ClusterTest, self).setUp() self.client.manager.get_status = mock.MagicMock() self.client.maintenance_mode.status = mock.MagicMock() self.client.manager.get_managers = mock.MagicMock() self.client.manager.get_brokers = mock.MagicMock( return_value=self.BROKERS_LIST) self.client.manager.get_managers().items = self.MANAGERS_LIST def test_list_nodes(self): self.use_manager() self.client.manager.get_status.side_effect = [{ 'services': [{ 'instances': [{ 'state': 'running' }], 'display_name': 'Service-1' }, { 'instances': [{ 'state': 'remote' }], 'display_name': 'Service-2' }, { 'instances': [{ 'state': 'down' }], 'display_name': 'Service-3' }, { 'instances': [{ 'state': 'running' }], 'display_name': 'Service-4' }] }, ConnectionError, { 'services': [{ 'instances': [{ 'state': 'running' }], 'display_name': 'Service-BlaBla' }, { 'instances': [{ 'state': 'down' }], 'display_name': 'Service-1' }] }] outcome = self.invoke('cfy cluster status') supposed_to_be_in_list = [ 'Active', 'Offline', 'hostname_1', '1.2.3.5', 'broker1', '15671', '3.2.3.4' ] not_supposed_to_be_in_list = [ 'Service-1', 'Service-2', 'Service-3', 'Service-4', 'Service-BlaBla', 'N/A', 'down', 'remote', 'running', 'id', 'fs_sync_node_id' ] for supposed_to_be_in in supposed_to_be_in_list: self.assertIn(supposed_to_be_in, outcome.output) for not_supposed_to_be_in in not_supposed_to_be_in_list: self.assertNotIn(not_supposed_to_be_in, outcome.output) def test_list_nodes_verbose(self): self.use_manager() self.client.manager.get_status.side_effect = [{ 'services': [{ 'instances': [{ 'state': 'running' }], 'display_name': 'Service-1' }, { 'instances': [{ 'state': 'remote' }], 'display_name': 'Service-2' }, { 'instances': [{ 'state': 'down' }], 'display_name': 'Service-3' }, { 'instances': [{ 'state': 'running' }], 'display_name': 'Service-4' }] }, ConnectionError, { 'services': [{ 'instances': [{ 'state': 'running' }], 'display_name': 'Service-BlaBla' }, { 'instances': [{ 'state': 'down' }], 'display_name': 'Service-1' }] }] outcome = self.invoke('cfy cluster status -v') supposed_to_be_in_list = [ 'Service-1', 'Service-2', 'Service-3', 'Service-4', 'Service-BlaBla', 'down', 'remote', 'running', 'Active', 'Offline', 'hostname_1', '1.2.3.5', 'N/A', 'broker1', '15671', '3.2.3.4' ] for supposed_to_be_in in supposed_to_be_in_list: self.assertIn(supposed_to_be_in, outcome.output) self.assertNotIn('id', outcome.output) self.assertNotIn('fs_sync_node_id', outcome.output) def test_remove_node(self): self.use_manager() list_result = mock.Mock() list_result.items = self.MANAGERS_LIST self.client.manager.get_managers = mock.MagicMock( return_value=list_result) self.client.manager.remove_manager = mock.MagicMock( return_value=self.MANAGERS_LIST[0]) outcome = self.invoke('cfy cluster remove hostname_1') self.assertIn('Node hostname_1 was removed successfully!', outcome.output) def test_remove_non_existing_node(self): self.use_manager() self.client.manager.remove_manager = mock.Mock( return_value=self.MANAGERS_LIST[0]) self.assertRaises(ClickInvocationException, self.invoke, 'cfy cluster remove hostname_BlaBla')
def get_managers(self, network=None): return [ManagerItem(m) for m in self._managers]
class ClusterTest(CliCommandTest): MANAGERS_LIST = [ ManagerItem({ 'id': '0', 'hostname': 'hostname_1', 'private_ip': '1.2.3.4', 'public_ip': '2.2.3.4', 'version': '5.0', 'edition': 'premium', 'distribution': 'centos', 'distro_release': 'core', 'fs_sync_node_id': 'hgujriewgthuiyenfjk' }), ManagerItem({ 'id': '1', 'hostname': 'hostname_2', 'private_ip': '1.2.3.5', 'public_ip': '2.2.3.5', 'version': '5.0', 'edition': 'premium', 'distribution': 'centos', 'distro_release': 'core', 'fs_sync_node_id': 'hgujriewgthuiyenfjk' }), ManagerItem({ 'id': '2', 'hostname': 'hostname_3', 'private_ip': '1.2.3.6', 'public_ip': '2.2.3.6', 'version': '5.0', 'edition': 'premium', 'distribution': 'centos', 'distro_release': 'core', 'fs_sync_node_id': 'hgujriewgthuiyenfjk' }) ] BROKERS_LIST = [ RabbitMQBrokerItem({ 'name': 'broker1', 'host': '3.2.3.4', 'port': '15671', 'params': {}, 'ca_cert_content': 'CA CONTENT', 'networks': { 'default': '3.2.3.4' } }) ] DB_NODES_LIST = [DBNodeItem({'name': 'db_1', 'host': '3.2.3.5'})] def setUp(self): super(ClusterTest, self).setUp() self.client.cluster_status.get_status = mock.MagicMock() self.client.manager.get_managers = mock.MagicMock() self.client.manager.get_brokers = mock.MagicMock() self.client.manager.get_db_nodes = mock.MagicMock() self.client.manager.get_managers().items = self.MANAGERS_LIST self.client.manager.get_brokers().items = self.BROKERS_LIST self.client.manager.get_db_nodes().items = self.DB_NODES_LIST def test_command_basic_run(self): self.use_manager() self.invoke('cfy cluster status') def test_cluster_status_by_unauthorized_user(self): self.use_manager() with mock.patch.object(self.client.cluster_status, 'get_status') as status: status.side_effect = UserUnauthorizedError('Unauthorized user') outcome = self.invoke('cfy cluster status') self.assertIn('User is unauthorized', outcome.logs) def test_cluster_status_no_manager_server_defined(self): # Running a command which requires a target manager server without # first calling "cfy profiles use" or providing a target server # explicitly self.invoke('cfy cluster status', 'This command is only available when using a manager') def test_cluster_status_content(self): self.use_manager() self.client.cluster_status.get_status.side_effect = [{ 'status': 'OK', 'services': { 'Service-1': { 'status': 'Active', 'is_remote': False }, 'Service-2': { 'status': 'Active', 'is_remote': True }, 'Service-3': { 'status': 'Inactive', 'is_remote': False }, 'Service-4': { 'status': 'Active', 'is_remote': False } } }, ConnectionError, { 'services': { 'Service-BlaBla': { 'status': 'Active', 'is_remote': False }, 'Service-1': { 'status': 'Inactive', 'is_remote': False } } }] outcome = self.invoke('cfy cluster status') supposed_to_be_in_list = [ "OK", 'Active', 'Service-1', 'Service-2', 'Service-3', 'Service-4', 'Inactive' ] not_supposed_to_be_in_list = ['remote'] for supposed_to_be_in in supposed_to_be_in_list: self.assertIn(supposed_to_be_in, outcome.output) for not_supposed_to_be_in in not_supposed_to_be_in_list: self.assertNotIn(not_supposed_to_be_in, outcome.output) def test_cluster_status_json_format(self): self.use_manager() self.client.cluster_status.get_status.side_effect = [{ 'status': 'OK', 'services': { 'Service-1': { 'status': 'Active', 'is_remote': False }, 'Service-2': { 'status': 'Active', 'is_remote': True }, 'Service-3': { 'status': 'Inactive', 'is_remote': False }, 'Service-4': { 'status': 'Active', 'is_remote': False } } }, ConnectionError, { 'services': { 'Service-BlaBla': { 'status': 'Active', 'is_remote': False }, 'Service-1': { 'status': 'Inactive', 'is_remote': False } } }] outcome = self.invoke('cfy cluster status --json') supposed_to_be_in_list = [ "OK", 'Active', 'Service-1', 'Service-2', 'Service-3', 'Service-4', 'Inactive', 'remote' ] for supposed_to_be_in in supposed_to_be_in_list: self.assertIn(supposed_to_be_in, outcome.output) def test_remove_node(self): self.use_manager() list_result = mock.Mock() list_result.items = self.MANAGERS_LIST self.client.manager.get_managers = mock.MagicMock( return_value=list_result) self.client.manager.remove_manager = mock.MagicMock( return_value=self.MANAGERS_LIST[0]) outcome = self.invoke('cfy cluster remove hostname_1') self.assertIn('Node hostname_1 was removed successfully!', outcome.output) def test_remove_non_existing_node(self): self.use_manager() self.client.manager.remove_manager = mock.Mock( return_value=self.MANAGERS_LIST[0]) self.assertRaises(ClickInvocationException, self.invoke, 'cfy cluster remove hostname_BlaBla')
def _manager_env(self): port = 8756 fs = FileServer(root_path=self.temp_folder, port=port) fs.start() self.addCleanup(fs.stop) if os.name == 'nt': package_name = 'cloudify-windows-agent.exe' else: dist = platform.dist() package_name = '{0}-{1}-agent.tar.gz'.format( dist[0].lower(), dist[2].lower()) resources_dir = os.path.join(self.temp_folder, 'resources') agent_dir = os.path.join(resources_dir, 'packages', 'agents') agent_script_dir = os.path.join(resources_dir, 'cloudify_agent') os.makedirs(agent_dir) os.makedirs(agent_script_dir) os.makedirs(os.path.join(self.temp_folder, 'cloudify')) agent_path = os.path.join(agent_dir, package_name) shutil.copyfile(agent_package.get_package_path(), agent_path) self.addCleanup(agent_package.cleanup) new_env = { constants.MANAGER_FILE_SERVER_ROOT_KEY: resources_dir, constants.REST_PORT_KEY: str(port), constants.MANAGER_NAME: 'cloudify' } original_create_op_context = operations._get_cloudify_context def mock_create_op_context(agent, task_name): context = original_create_op_context(agent, task_name) context['__cloudify_context']['local'] = True return context # Need to patch, to avoid broker_ssl_enabled being True @contextmanager def get_amqp_client(agent): yield get_client() managers = [ ManagerItem({ 'networks': { 'default': '127.0.0.1' }, 'ca_cert_content': agent_ssl_cert.DUMMY_CERT, 'hostname': 'cloudify' }) ] patches = [ patch.dict(os.environ, new_env), patch('cloudify_agent.operations._get_amqp_client', get_amqp_client), patch('cloudify.endpoint.LocalEndpoint.get_managers', return_value=managers), patch('cloudify_agent.operations._get_cloudify_context', mock_create_op_context), get_tenant_mock() ] for p in patches: p.start() try: yield finally: for p in patches: p.stop() fs.stop()