def test_validate_schema_for_successful_validation(m_lru_cache, m_open, m_validate): # Given: Existing schema m_open.return_value.__enter__().read.return_value = '''{ "title": "Schema for Job Config", "id": "#generic-hook-v1", "properties": { "mock": { "$ref": "${base_url}/link/config#/properties/mock" } } }''' # And: Validator that succeeds validation m_validate.return_value = None # And: Config that needs to be validated config = { 'mock-obj': 'mock-value' } # When: I validate against existing schema ret_value = service.validate_schema(config) # Then: Validation succeeds dict_compare(ret_value, config) dict_compare(m_validate.call_args[0][0], config)
def test_get_health_when_celery_is_disabled(get_store, client, ping): """ Should get the health status when elastic search is enabled """ # Given: Operational external services" EtcdInfo = namedtuple('Info', ('machines',)) client.Client.return_value = EtcdInfo(['machine1']) get_store.return_value.health.return_value = {'type': 'mock'} # When: I get the health of external services health_status = health.get_health(check_celery=False) # Then: Expected health status is returned dict_compare(health_status, { 'etcd': { 'status': HEALTH_OK, 'details': { 'machines': ['machine1'] } }, 'store': { 'status': HEALTH_OK, 'details': { 'type': 'mock' } } })
def test_init_deployment(mock_time): """ Should initialize deployment instance """ # Given: Mock implementation for time mock_time.time.return_value = 0.12 # When: I create a deployment instance deployment = Deployment(Mock(spec=Provider), Mock(spec=Environment), 'mock-app', template_args={ 'arg-1': 'value1', 'arg_2': 'value2'}) # Then: Deployment gets initialized as expected eq_(deployment.nodes, 1) eq_(deployment.version, '120') dict_compare(deployment.template_args, { 'name': 'mock-app', 'version': '120', 'service_type': 'app', 'arg_1': 'value1', 'arg_2': 'value2' }) eq_(deployment.service_name_prefix, 'mock-app-120-app') eq_(deployment.template_name, '[email protected]')
def test_update_runtime_units(self): # Given: Upstreams that needs to be updated units = [ { 'name': 'unit1', 'machine': 'machine1', 'active': 'active', 'sub': 'dead' } ] # When: I promote state for existing deployment self.store.update_runtime_units('test-deployment1-v1', units) # Then: Deployment state is changed to promoted and set to never expire deployment = self._get_raw_document_without_internal_id( 'test-deployment1-v1') expected_deployment = dict_merge(deployment, { 'runtime': { 'units': units }, 'modified': NOW, }) dict_compare(deployment, expected_deployment)
def test_massage_config(): """ Should massage configuration as required by search. """ # Given: Configuration that needs to be massaged. config = { 'key1': 'value1', 'key2': { 'value': 'value2', 'encrypted': True }, 'key3': [ { 'key3.1': { 'value': 'value3.1' } } ] } # When: I massage the config result = massage_config(config) # Then: Config gets massaged as expected dict_compare(result, { 'key1': 'value1', 'key2': '', 'key3': [ { 'key3.1': 'value3.1' } ] })
def test_create_new_job(self): # Given: Job that needs to be created job = { 'meta-info': { 'git': { 'owner': 'find-create-owner', 'repo': 'find-create-repo', 'ref': 'find-create-ref', 'commit': 'find-create-commit' }, 'job-id': 'find-create-job-id' }, 'state': JOB_STATE_NEW } # When: I execute find or create for non existing job self.store.update_job(job) # Then: Existing job is returned created_job = self._get_raw_document_without_internal_id( 'find-create-job-id') expected_job = dict_merge({ 'modified': NOW, '_expiry': NOW }, job) dict_compare(created_job, expected_job)
def test_fetch_units_matching_with_multiple_match(mock_run): """ Should return empty list when there are no matching units found """ # Given: Fleet provider provider = _get_fleet_provider() # And no existing units for given service prefix mock_run.return_value = '''[email protected] 442337f12da14ad7830cda843079730b/10.249.0.235 active running [email protected] 0a5239ec591e4981905c792e99341f03/10.229.23.106 activating start-pre invalidrow ''' # noqa # When: I try to fetch units with no matching unit units = list(provider.fetch_units_matching('cluster-deployer-develop-')) # Then: Empty list is returned eq_(len(units), 2, 'Expecting 2 units to be returned. Found: %d' % len(units)) dict_compare(units[0], { 'unit': '*****@*****.**', 'machine': '442337f12da14ad7830cda843079730b/10.249.0.235', 'active': 'active', 'sub': 'running' }) dict_compare(units[1], { 'unit': '*****@*****.**', 'machine': '0a5239ec591e4981905c792e99341f03/10.229.23.106', 'active': 'activating', 'sub': 'start-pre' })
def test_add_event(self): # When: I add event to mongo store self.store.add_event( 'MOCK_EVENT', details={'mock': 'details'}, search_params={ 'meta-info': { 'mock': 'search' } }) # Then: Event gets added as expected event = self.store._events.find_one({'type': 'MOCK_EVENT'}) del(event['_id']) dict_compare(event, { 'component': 'orchestrator', 'type': 'MOCK_EVENT', 'date': NOW, 'meta-info': { 'mock': 'search' }, 'details': { 'mock': 'details' } })
def test_clone_deployment(m_uuid): """ Should clone exiting deployment and reset version """ # Given: New job id m_uuid.return_value = 'new-job-id' # When: I clone existing deployment cloned = clone_deployment({ 'deployment': { 'name': 'mock', 'version': 'v1' }, 'meta-info': { 'job-id': 'old-job-id' } }) # Then: Expected cloned deployment is created dict_compare(cloned, { 'deployment': { 'name': 'mock' }, 'meta-info': { 'job-id': 'new-job-id' } })
def test_sync_units(m_filter_units, m_get_store): # Given: Existing deployment m_get_store.return_value.get_deployment.return_value = { 'deployment': { 'name': 'test', 'version': 'v1', 'mode': DEPLOYMENT_MODE_BLUEGREEN } } # And: Discovered Nodes m_filter_units.return_value = [{ 'name': 'app-unit' }] # When: I synchronize units for existing deployment ret_value = sync_units('mock') # Then: Upstreams are synchronized as expected: dict_compare(ret_value, { 'deployment_id': 'mock', 'state': 'success', 'units': m_filter_units.return_value })
def test_sync_upstreams_with_error_fetching_nodes( m_get_discovered_nodes, m_get_store): # Given: Existing deployment m_get_store.return_value.get_deployment.return_value = { 'deployment': { 'name': 'test', 'version': 'v1', 'mode': DEPLOYMENT_MODE_BLUEGREEN }, 'proxy': { 'hosts': { 'mock-host': { 'locations': { 'home': { 'port': 8090 } } } } } } # And: Discovered Nodes m_get_discovered_nodes.side_effect = Exception('Mock') # When: I synchronize upstreams for existing deployment ret_value = sync_upstreams('mock') # Then: Upstreams are synchronized as expected: dict_compare(ret_value, { 'deployment_id': 'mock', 'state': 'failed', 'error': 'Mock' })
def test_evaluate_config_with_no_deployers(): """ Should evaluate config as expected :return: None """ # Given: Config that needs to be evaluated config = { 'variables': { 'var1': 'value1', 'var2': { 'value': '{{var1}}-var2value', 'template': True, 'priority': 2, }, }, 'key1': { 'value': 'test-{{var1}}-{{var2}}-{{var3}}', 'template': True } } # When: I evaluate the config result = service.evaluate_config(config, { 'var1': 'default1', 'var2': 'default2', 'var3': 'default3' }) # Then: Expected config is returned dict_compare(result, { 'key1': 'test-value1-value1-var2value-default3', 'deployers': {} })
def test_init_deployment(mock_time): """ Should initialize deployment instance """ # Given: Mock implementation for time mock_time.time.return_value = 0.12 # When: I create a deployment instance deployment = Deployment(Mock(spec=Provider), Mock(spec=Environment), 'mock-app', template_args={ 'arg-1': 'value1', 'arg_2': 'value2' }) # Then: Deployment gets initialized as expected eq_(deployment.nodes, 1) eq_(deployment.version, '120') dict_compare( deployment.template_args, { 'name': 'mock-app', 'version': '120', 'service_type': 'app', 'arg_1': 'value1', 'arg_2': 'value2' }) eq_(deployment.service_name_prefix, 'mock-app-120-app') eq_(deployment.template_name, '[email protected]')
def test_filter_deployments_with_state(self): # When: I filter deployments from the store with given state deployments = self.store.filter_deployments(state=DEPLOYMENT_STATE_NEW) # Then: Expected deployments are returned eq_(len(deployments), 1) dict_compare(deployments[0], EXISTING_DEPLOYMENTS['test-deployment1-v2'])
def test_filter_all_jobs(self): # When: I filter jobs from the store jobs = self.store.filter_jobs() # Then: All jobs are returned eq_(len(jobs), 2) dict_compare(jobs[0], EXISTING_JOBS['job-1']) dict_compare(jobs[1], EXISTING_JOBS['job-2'])
def test_transform_string_values(): """ Should transform string values inside config as expected. :return: """ # Given: Config that needs to be transformed config = { 'key1': 'value1', 'port': 1212, 'enabled': 'True', 'nested-port-key': { 'port': u'2321', 'nodes': u'12', 'min-nodes': '13', 'enabled': 'False', 'force-ssl': 'true' }, 'array-config': [ { 'port': '123', 'nodes': '13', 'min-nodes': '14', 'attempts': '10', 'enabled': False }, 'testval' ], 'null-key': None } # When: I transform string values in config result = service.transform_string_values(config) # Then: Transformed config is returned dict_compare(result, { 'key1': 'value1', 'port': 1212, 'enabled': True, 'nested-port-key': { 'port': 2321, 'nodes': 12, 'min-nodes': 13, 'enabled': False, 'force-ssl': True }, 'array-config': [ { 'port': 123, 'nodes': 13, 'min-nodes': 14, 'attempts': 10, 'enabled': False }, 'testval' ], 'null-key': None })
def test_filter_deployments_with_excluded_names(self): # When: I filter deployments from the store with given state deployments = self.store.filter_deployments( exclude_names=('test-deployment1',)) # Then: Expected deployments are returned eq_(len(deployments), 1) dict_compare(deployments[0], EXISTING_DEPLOYMENTS['test-deployment2-v2'])
def test_filter_deployments_with_version(self): # When: I filter deployments from the store with given version deployments = self.store.filter_deployments( 'test-deployment1', version='v1') # Then: Expected deployments are returned eq_(len(deployments), 1) dict_compare(deployments[0], EXISTING_DEPLOYMENTS['test-deployment1-v1'])
def test_get_job(self): # When I get existing job job = self.store.get_job('job-1') # Expected Deployment is returned expected_job = copy.deepcopy(EXISTING_JOBS['job-1']) del(expected_job['_expiry']) dict_compare(job, expected_job)
def test_filter_deployment_ids(self): # When: I filter deployments from the store for ids only deployments = self.store.filter_deployments('test-deployment1', only_ids=True) # Then: Expected deployment ids are returned eq_(len(deployments), 2) dict_compare(deployments[0], {'id': 'test-deployment1-v1'}) dict_compare(deployments[1], {'id': 'test-deployment1-v2'})
def test_get_nodes__with_meta_for_non_existing_upstream(self): # Given: Existing nodes registered in etcd for given upstream self.etcd_cl.read.side_effect = KeyError # When: I get existing nodes nodes = self.client.get_nodes_with_meta('test') # Then: Empty nodes dictionary is returned dict_compare(nodes, {})
def test_get_deployment(self): # When I get existing deployment deployment = self.store.get_deployment('test-deployment1-v1') # Expected Deployment is returned expected_deployment = copy.deepcopy( EXISTING_DEPLOYMENTS['test-deployment1-v1']) del(expected_deployment['_expiry']) dict_compare(deployment, expected_deployment)
def test_filter_deployments_by_name(self): # When: I filter deployments from the store deployments = self.store.filter_deployments('test-deployment1') # Then: Expected deployments are returned eq_(len(deployments), 2) dict_compare(deployments[0], EXISTING_DEPLOYMENTS['test-deployment1-v1']) dict_compare(deployments[1], EXISTING_DEPLOYMENTS['test-deployment1-v2'])
def test_load(self): """ Should read config from GithubConfigProvider """ # When: I load config using provider ret_value = self.provider.load( 'totem.yml', 'local', 'totem', 'cluster-orchestrator', 'develop') # Then: Config gets loaded dict_compare(ret_value, MOCK_CONFIG)
def test_load_for_partial_path(self, m_get): """ Should return empty config from GithubConfigProvider for partial path """ # When: I load config using provider ret_value = self.provider.load( 'totem.yml', 'totem', 'cluster-orchestrator') # Then: Config gets loaded dict_compare(ret_value, {})
def test_filter_jobs_by_criteria(self): # When: I filter jobs from the store jobs = self.store.filter_jobs( owner=EXISTING_JOBS['job-1']['meta-info']['git']['owner'], repo=EXISTING_JOBS['job-1']['meta-info']['git']['repo'], ref=EXISTING_JOBS['job-1']['meta-info']['git']['ref'], commit=EXISTING_JOBS['job-1']['meta-info']['git']['commit'], state_in=[JOB_STATE_NEW, JOB_STATE_SCHEDULED] ) # Then: All jobs are returned eq_(len(jobs), 1) dict_compare(jobs[0], EXISTING_JOBS['job-1'])
def test_to_dict_for_orchestrator_error(): # Given: Instance of OrchestratorError error = OrchestratorError('mockerror') # When: I create dict representation for exception result = error.to_dict() # Then: Expected representation is returned dict_compare(result, { 'message': 'mockerror', 'code': 'ORCHESTRATOR_ERROR', 'details': {} })
def test_load_for_non_existing_path(self, m_get): """ Should return empty config when config is not found in github """ # Given: Existing config m_get.return_value.status_code = 404 # When: I load config using provider ret_value = self.provider.load( 'totem.yml', 'totem', 'cluster-orchestrator', 'develop') # Then: EmptyConfig gets loaded dict_compare(ret_value, {})
def test_sync_upstreams(m_get_discovered_nodes, m_get_store): # Given: Existing deployment m_get_store.return_value.get_deployment.return_value = { 'deployment': { 'name': 'test', 'version': 'v1', 'mode': DEPLOYMENT_MODE_BLUEGREEN }, 'proxy': { 'hosts': { 'mock-host': { 'locations': { 'home': { 'port': 8090 } } } } } } # And: Discovered Nodes m_get_discovered_nodes.return_value = { 'upstream1': { 'endpoints': { 'endpoint1': { 'endpoint': 'host1:8091', } } } } # When: I synchronize upstreams for existing deployment ret_value = sync_upstreams('mock') # Then: Upstreams are synchronized as expected: dict_compare(ret_value, { 'deployment_id': 'mock', 'state': 'success', 'upstreams': { '8090': [{ 'endpoints': { 'endpoint1': { 'endpoint': 'host1:8091', } }, 'name': 'upstream1' }] } })
def test_update_state(self): # When: I promote state for existing job self.store.update_state('job-1', JOB_STATE_FAILED) # Then: Deployment state is changed to promoted and set to never expire job = self._get_raw_document_without_internal_id( 'job-1') expected_job = dict_merge({ '_expiry': NOW, 'modified': NOW, 'state': JOB_STATE_FAILED }, EXISTING_JOBS['job-1']) dict_compare(job, expected_job)
def test_update_existing_job(self): # When: I execute find or create for existing job self.store.update_job(dict_merge({ 'state': JOB_STATE_SCHEDULED }, EXISTING_JOBS['job-1'])) # Then: Existing job is returned updated_job = self._get_raw_document_without_internal_id('job-1') expected_job = dict_merge({ 'state': JOB_STATE_SCHEDULED, '_expiry': NOW, 'modified': NOW, }, EXISTING_JOBS['job-1']) dict_compare(updated_job, expected_job)