def test_rate_logger_length_one(self, traptor): traptor.kafka_rate['test'] = deque([2]) traptor.twitter_rate['test'] = deque([2]) traptor.logger = MagicMock() traptor._log_rates(4.0, 4.0) assert traptor.logger.method_calls == [ call.info('Twitter Rate', extra={ 'count': 1, 'tags': ['traptor_type:None', 'traptor_id:None'], 'rule_value': 'test', 'max_tps': 1.0, 'component': 'traptor', 'min_tps': 0.0, 'average_tps': 0.25, 'traptor_version': version.__version__ }), call.info('Kafka Rate', extra={ 'count': 1, 'tags': ['traptor_type:None', 'traptor_id:None'], 'rule_value': 'test', 'max_tps': 1.0, 'component': 'traptor', 'min_tps': 0.0, 'average_tps': 0.25, 'traptor_version': version.__version__ }) ]
def test_remove_hosts_from_graphite_should_log_dirs_removed(logger): reset_tmpdir() current_hosts = ['server-1', 'server-2', 'server-3'] old_hosts = ['server-4', 'server-5'] create_stats('server', instance_id='1') create_stats('server', instance_id='2') create_stats('server', instance_id='3') expected_deleted_paths = [ create_stats('server', instance_id='4'), create_stats('server', instance_id='5'), ] remove_hosts_from_graphite( '{tmpdir}/graphite/storage'.format(tmpdir=tmpdir), match='*server*', keep='1 2 3'.split()) expected_calls = [ call.info('Removing old graphite directory: {}'.format( expected_deleted_paths[0])), call.info('Removing old graphite directory: {}'.format( expected_deleted_paths[1])) ] logger.info.assert_has_calls(expected_calls)
def test_run_in_regions_dryrun_skip_some(self): m_conf = Mock(spec_set=ManheimConfig) m_conf_r1 = Mock(spec_set=ManheimConfig) m_conf_r2 = Mock(spec_set=ManheimConfig) m_conf_r3 = Mock(spec_set=ManheimConfig) def se_conf_for_region(rname): if rname == 'r1': return m_conf_r1 if rname == 'r2': return m_conf_r2 if rname == 'r3': return m_conf_r3 m_conf.config_for_region.side_effect = se_conf_for_region with patch('%s.logger' % pbm, autospec=True) as mock_logger: with patch('%s.ManheimConfig.from_file' % pbm) as mock_cff: mock_cff.return_value = m_conf runner.CustodianRunner('acctName')._run_step_in_regions( 'dryrun', self.cls2, ['r2', 'r3']) assert self.cls2.mock_calls == [ call.run_in_region('r2', m_conf_r2), call.run_in_region('r3', m_conf_r3), call('r3', m_conf_r3), call().dryrun() ] assert m_conf.config_for_region.mock_calls == [call('r2'), call('r3')] assert mock_logger.mock_calls == [ call.info(bold('SKIPPING Step cls2 in REGION 1 of 2 (r2)')), call.info(bold('Step cls2 in REGION 2 of 2 (r3)')) ]
def test_dispatch(self, mock_logger): """Alert Merger - Dispatch to Alert Processor Lambda""" self.merger.table.add_alerts([ # An alert without any merge criteria Alert('no_merging', {}, {'output'}), # 2 Alerts which will be merged (and will be be too large to send the entire record) Alert('merge_me', {'key': True}, {'output'}, created=datetime(year=2000, month=1, day=1), merge_by_keys=['key'], merge_window=timedelta(minutes=5)), Alert('merge_me', { 'key': True, 'other': 'abc' * 50 }, {'output'}, created=datetime(year=2000, month=1, day=1, minute=1), merge_by_keys=['key'], merge_window=timedelta(minutes=5)), # Alert which has already sent successfully (will be deleted) Alert('already_sent', {}, {'output'}, outputs_sent={'output'}) ]) self.merger.dispatch() mock_logger.assert_has_calls([ call.info('Merged %d alerts into a new alert with ID %s', 2, ANY), call.info('Dispatching %s to %s (attempt %d)', ANY, _ALERT_PROCESSOR, 1), call.info('Dispatching %s to %s (attempt %d)', ANY, _ALERT_PROCESSOR, 1) ])
def test_update_services_no_ec2(self): mock_autoscale = Mock(spec_set=_AwsService) mock_vpc = Mock(spec_set=_AwsService) services = { 'AutoScaling': mock_autoscale, 'VPC': mock_vpc, } ta_results = { 'AutoScaling': { 'foo': 20, 'bar': 40, }, 'EC2': { 'baz': 5, }, 'VPC': { 'VPC Elastic IP addresses (EIPs)': 11, } } with patch('awslimitchecker.trustedadvisor' '.logger', autospec=True) as mock_logger: self.cls._update_services(ta_results, services) assert mock_logger.mock_calls == [ call.debug("Updating TA limits on all services"), call.info( "TrustedAdvisor returned check results for unknown " "service '%s'", 'EC2'), call.info("Done updating TA limits on all services"), ] assert mock_autoscale.mock_calls == [ call._set_ta_limit('bar', 40), call._set_ta_limit('foo', 20), ] assert mock_vpc.mock_calls == []
def test_update_services(self): mock_as_foo = Mock(spec_set=AwsLimit) mock_as_bar = Mock(spec_set=AwsLimit) mock_ec2_baz = Mock(spec_set=AwsLimit) mock_vpc = Mock(spec_set=AwsLimit) ta_services = { 'AutoScaling': { 'foo': mock_as_foo, 'bar': mock_as_bar }, 'EC2': { 'baz': mock_ec2_baz }, 'VPC': { 'VPC Elastic IP addresses (EIPs)': mock_vpc }, } ta_results = { 'AutoScaling': { 'foo': 20, 'bar': 40, }, 'EC2': { 'baz': 5, 'blam': 10, }, 'OtherService': { 'blarg': 1, }, 'VPC': { 'VPC Elastic IP addresses (EIPs)': 11, } } with patch('awslimitchecker.trustedadvisor' '.logger', autospec=True) as mock_logger: self.cls.ta_services = ta_services self.cls._update_services(ta_results) assert mock_logger.mock_calls == [ call.debug("Updating TA limits on all services"), call.info("TrustedAdvisor returned check results for unknown " "limit '%s' (service %s)", 'blam', 'EC2'), call.info("TrustedAdvisor returned check results for unknown " "service '%s'", 'OtherService'), call.info("Done updating TA limits on all services"), ] assert mock_as_foo.mock_calls == [ call._set_ta_limit(20) ] assert mock_as_bar.mock_calls == [ call._set_ta_limit(40) ] assert mock_ec2_baz.mock_calls == [ call._set_ta_limit(5) ] assert mock_vpc.mock_calls == [ call._set_ta_limit(11) ]
def test_update_services(self): mock_as_foo = Mock(spec_set=AwsLimit) mock_as_bar = Mock(spec_set=AwsLimit) mock_ec2_baz = Mock(spec_set=AwsLimit) mock_ec2_blarg = Mock(spec_set=AwsLimit) mock_vpc = Mock(spec_set=AwsLimit) ta_services = { 'AutoScaling': { 'foo': mock_as_foo, 'bar': mock_as_bar }, 'EC2': { 'baz': mock_ec2_baz, 'blarg': mock_ec2_blarg }, 'VPC': { 'VPC Elastic IP addresses (EIPs)': mock_vpc }, } ta_results = { 'AutoScaling': { 'foo': 20, 'bar': 40, }, 'EC2': { 'baz': 5, 'blam': 10, 'blarg': 'Unlimited', }, 'OtherService': { 'blarg': 1, }, 'VPC': { 'VPC Elastic IP addresses (EIPs)': 11, } } with patch('awslimitchecker.trustedadvisor' '.logger', autospec=True) as mock_logger: self.cls.ta_services = ta_services self.cls._update_services(ta_results) assert mock_logger.mock_calls == [ call.debug("Updating TA limits on all services"), call.info( "TrustedAdvisor returned check results for unknown " "limit '%s' (service %s)", 'blam', 'EC2'), call.info( "TrustedAdvisor returned check results for unknown " "service '%s'", 'OtherService'), call.info("Done updating TA limits on all services"), ] assert mock_as_foo.mock_calls == [call._set_ta_limit(20)] assert mock_as_bar.mock_calls == [call._set_ta_limit(40)] assert mock_ec2_baz.mock_calls == [call._set_ta_limit(5)] assert mock_ec2_blarg.mock_calls == [call._set_ta_unlimited()] assert mock_vpc.mock_calls == [call._set_ta_limit(11)]
def test_none(self): self.cls.refresh_timeout = None check_dt = datetime(2016, 12, 16, hour=10, minute=30, second=12, tzinfo=utc) now_dt = datetime(2016, 12, 16, hour=11, minute=30, second=12, tzinfo=utc) statuses = [ {'statuses': [{'status': 'none'}]}, {'statuses': [{'status': 'enqueued'}]}, {'statuses': [{'status': 'processing'}]}, {'statuses': [{'status': 'none'}]} ] m_s = self.mock_conn.describe_trusted_advisor_check_refresh_statuses with patch('%s.logger' % pbm, autospec=True) as mock_logger: with patch('%s.sleep' % pbm, autospec=True) as mock_sleep: with patch('%s._get_check_result' % pb, autospec=True) as gcr: with patch('%s.datetime_now' % pbm) as mock_dt_now: mock_dt_now.return_value = now_dt m_s.side_effect = statuses gcr.return_value = ({'foo': 'bar'}, check_dt) res = self.cls._poll_for_refresh('abc123') assert res == {'foo': 'bar'} assert self.mock_conn.mock_calls == [ call.describe_trusted_advisor_check_refresh_statuses( checkIds=['abc123']), call.describe_trusted_advisor_check_refresh_statuses( checkIds=['abc123']), call.describe_trusted_advisor_check_refresh_statuses( checkIds=['abc123']), call.describe_trusted_advisor_check_refresh_statuses( checkIds=['abc123']) ] assert gcr.mock_calls == [call(self.cls, 'abc123')] assert mock_sleep.mock_calls == [ call(30), call(30), call(30) ] assert mock_dt_now.mock_calls == [ call(), call(), call(), call(), call() ] assert mock_logger.mock_calls == [ call.warning('Polling for TA check %s refresh...', 'abc123'), call.debug('Checking refresh status'), call.info('Refresh status: %s; sleeping 30s', 'none'), call.debug('Checking refresh status'), call.info('Refresh status: %s; sleeping 30s', 'enqueued'), call.debug('Checking refresh status'), call.info('Refresh status: %s; sleeping 30s', 'processing'), call.debug('Checking refresh status'), call.warning('Trusted Advisor check refresh status went ' 'from "%s" to "%s"; refresh is either complete ' 'or timed out on AWS side. Continuing', 'processing', 'none'), call.info('Done polling for check refresh'), call.debug('Check shows last refresh time of: %s', check_dt) ]
def test_success_all_options(self): m_sts = Mock() m_sts.assume_role.return_value = { 'Credentials': { 'AccessKeyId': 'AKID', 'SecretAccessKey': 'SKey', 'SessionToken': 'SToken', 'Expiration': datetime(2018, 10, 8, 12, 13, 14) }, 'AssumedRoleUser': { 'AssumedRoleId': 'ARid', 'Arn': 'UserARN' }, 'PackedPolicySize': 123 } m_sess = Mock() m_sess.client.return_value = m_sts type(self.m_conf).assume_role = PropertyMock(return_value={ 'role_arn': 'assumeRoleArn', 'external_id': 'eID', 'duration_seconds': '1234' }) with patch('%s.logger' % pbm, autospec=True) as mock_logger: with patch.dict(os.environ, {}, clear=True): with patch('%s.boto3.session.Session' % pbm) as mock_boto: mock_boto.return_value = m_sess assume_role(self.m_conf) assert os.environ == { 'AWS_ACCESS_KEY_ID': 'AKID', 'AWS_SECRET_ACCESS_KEY': 'SKey', 'AWS_SESSION_TOKEN': 'SToken' } expected_args = { 'RoleArn': 'assumeRoleArn', 'RoleSessionName': 'manheim-c7n-tools_aName', 'ExternalId': 'eID', 'DurationSeconds': 1234 } assert mock_boto.mock_calls == [ call(region_name='us-east-1'), call().client('sts'), call().client().assume_role(**expected_args) ] assert mock_logger.mock_calls == [ call.info( 'Calling sts:AssumeRole via boto3 with arguments: %s', expected_args ), call.info( 'Exported AssumeRole credentials; AccessKeyId %s expires at ' '%s; AssumedRoleUser ARN: %s', 'AKID', datetime(2018, 10, 8, 12, 13, 14), 'UserARN' ) ]
def test_update_services(self): def se_set(lname, val): if lname == 'blam': raise ValueError("foo") mock_autoscale = Mock(spec_set=_AwsService) mock_ec2 = Mock(spec_set=_AwsService) mock_ec2._set_ta_limit.side_effect = se_set mock_vpc = Mock(spec_set=_AwsService) services = { 'AutoScaling': mock_autoscale, 'EC2': mock_ec2, 'VPC': mock_vpc, } ta_results = { 'AutoScaling': { 'foo': 20, 'bar': 40, }, 'EC2': { 'baz': 5, 'blam': 10, }, 'OtherService': { 'blarg': 1, }, 'VPC': { 'VPC Elastic IP addresses (EIPs)': 11, } } with patch('awslimitchecker.trustedadvisor' '.logger', autospec=True) as mock_logger: self.cls._update_services(ta_results, services) assert mock_logger.mock_calls == [ call.debug("Updating TA limits on all services"), call.info( "TrustedAdvisor returned check results for unknown " "limit '%s' (service %s)", 'blam', 'EC2'), call.info( "TrustedAdvisor returned check results for unknown " "service '%s'", 'OtherService'), call.info("Done updating TA limits on all services"), ] assert mock_autoscale.mock_calls == [ call._set_ta_limit('bar', 40), call._set_ta_limit('foo', 20), ] assert mock_ec2.mock_calls == [ call._set_ta_limit('baz', 5), call._set_ta_limit('blam', 10), call._set_ta_limit('VPC Elastic IP addresses (EIPs)', 11) ]
def test_continous_color_cycle_ran_out(self, mock_np_random_rand, mock_messagebar): """Test that i can also cycle line and markers. I mean the product line_cycler * marker_cycler""" color_cycler = (cycler('color', ['r', 'g'])) line_cycler = (cycler('linestyle', ['-', '--'])) mock_np_random_rand.side_effect = np.array(['123', '456', '789']) color_cycle_len = len(color_cycler) color_cycle = color_cycler() used_style_color_combo = set() color_line_cycle = utils.ContinousColorCycle(color_cycle, color_cycle_len, line_cycler, used_style_color_combo) res = [] res.append(dict_to_tuple(next(color_line_cycle))) res.append(dict_to_tuple(next(color_line_cycle))) res.append(dict_to_tuple(next(color_line_cycle))) res.append(dict_to_tuple(next(color_line_cycle))) res.append(dict_to_tuple(next(color_line_cycle))) res.append(dict_to_tuple(next(color_line_cycle))) res.append(dict_to_tuple(next(color_line_cycle))) res = tuple(res) print(str(res)) print(str(mock_messagebar.mock_calls)) assert res == ((('color', 'r'), ('linestyle', '-')), (('color', 'g'), ('linestyle', '-')), (('color', 'r'), ('linestyle', '--')), (('color', 'g'), ('linestyle', '--')), (('color', '123'), ('linestyle', '-')), (('color', '456'), ('linestyle', '--')), (('color', '789'), ('linestyle', '-'))) assert mock_messagebar.mock_calls == [ call.info( bar_msg= 'Style cycler ran out of unique combinations. Using random color!' ), call.info( bar_msg= 'Style cycler ran out of unique combinations. Using random color!' ), call.info( bar_msg= 'Style cycler ran out of unique combinations. Using random color!' ) ]
def test_update_services(self): def se_set(lname, val): if lname == 'blam': raise ValueError("foo") mock_autoscale = Mock(spec_set=_AwsService) mock_ec2 = Mock(spec_set=_AwsService) mock_ec2._set_ta_limit.side_effect = se_set mock_vpc = Mock(spec_set=_AwsService) services = { 'AutoScaling': mock_autoscale, 'EC2': mock_ec2, 'VPC': mock_vpc, } ta_results = { 'AutoScaling': { 'foo': 20, 'bar': 40, }, 'EC2': { 'baz': 5, 'blam': 10, }, 'OtherService': { 'blarg': 1, }, 'VPC': { 'VPC Elastic IP addresses (EIPs)': 11, } } with patch('awslimitchecker.trustedadvisor' '.logger', autospec=True) as mock_logger: self.cls._update_services(ta_results, services) assert mock_logger.mock_calls == [ call.debug("Updating TA limits on all services"), call.info("TrustedAdvisor returned check results for unknown " "limit '%s' (service %s)", 'blam', 'EC2'), call.info("TrustedAdvisor returned check results for unknown " "service '%s'", 'OtherService'), call.info("Done updating TA limits on all services"), ] assert mock_autoscale.mock_calls == [ call._set_ta_limit('bar', 40), call._set_ta_limit('foo', 20), ] assert mock_ec2.mock_calls == [ call._set_ta_limit('baz', 5), call._set_ta_limit('blam', 10), call._set_ta_limit('VPC Elastic IP addresses (EIPs)', 11) ]
def test_load(self): self._write_banks() self._create_dealer() self.dealer.load() self.mocked_log.assert_has_calls( [ call.debug("Loading banks"), call.info("Loading features bank '%s'", BANK_PATH_1), call.info("Loading features bank '%s'", BANK_PATH_2), ] ) self.mocked_log.warning.assert_not_called()
def test_run(self): def se_ras(klass): if mock_ras.call_count < 4: return None raise RuntimeError() with patch('%s.read_and_send' % pb, autospec=True) as mock_ras: with patch('%s.sleep' % pbm, autospec=True) as mock_sleep: with patch('%s.logger' % pbm, autospec=True) as mock_logger: mock_ras.side_effect = se_ras with pytest.raises(RuntimeError): self.cls.run() assert mock_ras.mock_calls == [ call(self.cls), call(self.cls), call(self.cls), call(self.cls) ] assert mock_sleep.mock_calls == [ call(60.0), call(60.0), call(60.0) ] assert mock_logger.mock_calls == [ call.info('Running sensor daemon loop...'), call.debug('Sleeping %ss', 60.0), call.debug('Sleeping %ss', 60.0), call.debug('Sleeping %ss', 60.0) ]
def test_update_limits_from_api_low_max_instances(self): fixtures = result_fixtures.VPC() response = fixtures.test_update_limits_from_api_low_max_instances mock_conn = Mock() mock_client_conn = Mock() mock_client_conn.describe_account_attributes.return_value = response cls = _VpcService(21, 43) cls.resource_conn = mock_conn cls.conn = mock_client_conn with patch('awslimitchecker.services.vpc.logger') as mock_logger: cls._update_limits_from_api() assert mock_conn.mock_calls == [] assert mock_client_conn.mock_calls == [ call.describe_account_attributes() ] assert mock_logger.mock_calls == [ call.info("Querying EC2 DescribeAccountAttributes for limits"), call.debug('Done setting limits from API') ] limit_name = 'Network interfaces per Region' assert cls.limits[limit_name].api_limit is None assert cls.limits[limit_name].get_limit() == DEFAULT_ENI_LIMIT
def test_healthcheck_no_active(self): type(self.mock_redir).active_node_ip_port = None type(self.mock_redir).log_enabled = True if sys.version_info[0] < 3: type(self.mock_request).uri = '/vault-redirector-health' type(self.mock_request).path = '/vault-redirector-health' expected = 'foobar' expected_msg = 'No Active Vault' else: # in py3 these are byte literals type(self.mock_request).uri = b'/vault-redirector-health' type(self.mock_request).path = b'/vault-redirector-health' expected = b'foobar' expected_msg = b'No Active Vault' with patch('%s.logger' % pbm) as mock_logger: with patch('%s.VaultRedirectorSite.status_response' % pbm ) as mock_status: mock_status.return_value = 'foobar' resp = self.cls.healthcheck(self.mock_request) assert self.mock_request.mock_calls == [ call.setResponseCode(503, message=expected_msg), call.setHeader("Content-Type", "application/json") ] assert resp == expected assert mock_logger.mock_calls == [ call.info('RESPOND %d for %s%s request for /vault-redirector-health' ' from %s:%s', 503, '', 'GET', '1.2.3.4', 12345) ]
def test_run_invalid_region_name(self): m_conf = Mock(spec_set=ManheimConfig) type(m_conf).regions = PropertyMock(return_value=['r1', 'r2', 'r3']) with patch('%s.CustodianRunner.ordered_step_classes' % pbm, self.steps): with patch.multiple('%s.CustodianRunner' % pbm, autospec=True, _steps_to_run=DEFAULT, _run_step_in_regions=DEFAULT, _validate_account=DEFAULT) as mocks: mocks['_steps_to_run'].return_value = [self.cls2, self.cls3] with patch('%s.logger' % pbm, autospec=True) as mock_logger: with patch('%s.ManheimConfig.from_file' % pbm) as mock_cff: mock_cff.return_value = m_conf with pytest.raises(RuntimeError) as exc: cls = runner.CustodianRunner('acctName') cls.run('dryrun', regions=['notValid'], step_names=['cls2', 'cls3', 'cls4'], skip_steps=['cls4']) assert str(exc.value) == 'ERROR: All specified region names must be ' \ 'listed in the "regions" section of the ' \ 'config file (manheim-c7n-tools.yml)' assert mocks['_steps_to_run'].mock_calls == [ call(cls, ['cls2', 'cls3', 'cls4'], ['cls4']) ] assert mocks['_run_step_in_regions'].mock_calls == [] assert self.cls1.mock_calls == [] assert self.cls2.mock_calls == [] assert self.cls3.mock_calls == [] assert self.cls4.mock_calls == [] assert mock_logger.mock_calls == [ call.info(bold('Beginning dryrun - 2 of 4 steps selected')) ] assert mocks['_validate_account'].mock_calls == [call(cls)]
def test_clone_repo_dry_run(self): type(self.bi).ssh_clone_url = 'ssh_url' type(self.bi).https_clone_url = 'https_url' self.cls.dry_run = True ex = Exception('foo') def se_clone(url, path, branch=None): if url == 'ssh_url': raise ex return True with patch('%s.path_for_repo' % pb) as mock_path, \ patch('%s.Repo' % pbm) as mock_repo, \ patch('%s.logger' % pbm) as mock_logger: mock_path.return_value = '/repo/path' mock_repo.clone_from.side_effect = se_clone res = self.cls.clone_repo() assert mock_path.mock_calls == [call()] assert mock_repo.mock_calls == [] assert mock_logger.mock_calls == [ call.debug("Cloning %s branch %s into: %s", 'my/repo', 'master', '/repo/path'), call.info("DRY RUN - not actually cloning %s into %s", 'my/repo', '/repo/path') ] assert res == ('/repo/path', '(DRY RUN)')
def test_render(self): expected_location = 'http://mynode:1234/foo/bar' expected_server = 'vault-redirector/%s/TwistedWeb/16.1.0' % _VERSION if sys.version_info[0] < 3: type(self.mock_request).uri = '/foo/bar' type(self.mock_request).path = '/foo/bar' else: # in py3 these are byte literals type(self.mock_request).uri = b'/foo/bar' type(self.mock_request).path = b'/foo/bar' self.mock_request.reset_mock() with patch('%s.logger' % pbm) as mock_logger: resp = self.cls.render(self.mock_request) assert self.mock_request.mock_calls == [ call.setHeader('server', expected_server), call.setResponseCode(307), call.setHeader('Location', expected_location), call.setHeader("Content-Type", "text/html; charset=UTF-8") ] assert resp == self.empty_resp assert mock_logger.mock_calls == [ call.info('RESPOND 307 to %s for %s%s request for %s from %s:%s', expected_location, '', 'GET', '/foo/bar', '1.2.3.4', 12345) ]
def test_discover_owfs(self): self.cls.owfs_paths = ['/foo', '/bar', '/baz'] def se_exists(path): if path.startswith('/baz'): return True if path == '/bar': return True return False with patch('%s.logger' % pbm, autospec=True) as mock_logger: with patch('%s.os.path.exists' % pbm, autospec=True) as mock_ex: mock_ex.side_effect = se_exists res = self.cls._discover_owfs() assert res == '/baz' assert mock_ex.mock_calls == [ call('/foo'), call('/bar'), call('/bar/settings/units/temperature_scale'), call('/baz'), call('/baz/settings/units/temperature_scale') ] assert mock_logger.mock_calls == [ call.debug('Attempting to find OWFS path/mountpoint from list ' 'of common options: %s', ['/foo', '/bar', '/baz']), call.debug('Path %s does not exist; skipping', '/foo'), call.debug('Path %s exists but does not appear to have OWFS ' 'mounted', '/bar'), call.info('Found OWFS mounted at: %s', '/baz') ]
def test_run(self): def se_ras(klass): if mock_ras.call_count < 4: return None raise RuntimeError() with patch('%s.read_and_send' % pb, autospec=True) as mock_ras: with patch('%s.sleep' % pbm, autospec=True) as mock_sleep: with patch('%s.logger' % pbm, autospec=True) as mock_logger: mock_ras.side_effect = se_ras with pytest.raises(RuntimeError): self.cls.run() assert mock_ras.mock_calls == [ call(self.cls), call(self.cls), call(self.cls), call(self.cls) ] assert mock_sleep.mock_calls == [call(60.0), call(60.0), call(60.0)] assert mock_logger.mock_calls == [ call.info('Running sensor daemon loop...'), call.debug('Sleeping %ss', 60.0), call.debug('Sleeping %ss', 60.0), call.debug('Sleeping %ss', 60.0) ]
def test_connect_resource(self): mock_conn = Mock() mock_meta = Mock() mock_client = Mock() mock_cc = Mock() type(mock_cc).region_name = 'myregion' type(mock_client)._client_config = mock_cc type(mock_meta).client = mock_client type(mock_conn).meta = mock_meta cls = ConnectableTester() cls.api_name = 'myapi' kwargs = {'foo': 'fooval', 'bar': 'barval'} with patch('%s._boto3_connection_kwargs' % pb, new_callable=PropertyMock) as mock_kwargs: mock_kwargs.return_value = kwargs with patch('%s.logger' % pbm) as mock_logger: with patch('%s.boto3.resource' % pbm) as mock_resource: mock_resource.return_value = mock_conn cls.connect_resource() assert mock_kwargs.mock_calls == [call()] assert mock_logger.mock_calls == [ call.info("Connected to %s (resource) in region %s", 'myapi', 'myregion') ] assert mock_resource.mock_calls == [ call( 'myapi', foo='fooval', bar='barval' ) ] assert cls.resource_conn == mock_resource.return_value
def test_poll_dont_have_ta(self): self.cls.have_ta = False tmp = self.mock_conn.describe_trusted_advisor_check_result with patch('%s._get_limit_check_id' % pb, autospec=True) as mock_id: with patch('awslimitchecker.trustedadvisor' '.logger', autospec=True) as mock_logger: res = self.cls._poll() assert tmp.mock_calls == [] assert mock_id.mock_calls == [ call(self.cls) ] assert mock_logger.mock_calls == [ call.info('Beginning TrustedAdvisor poll'), call.info('TrustedAdvisor.have_ta is False; not polling TA') ] assert res == {}
def test_load_default_file(self): default_file = '''{'Axes_set_ylabel': {'fontsize': 10}, 'wlevels_Axes_plot': {'DEFAULT': {'marker': 'v', 'markersize': 6, 'linewidth': 1, 'linestyle': '-'}}, 'geology_Axes_bar': {'edgecolor': 'black'}, 'obsid_Axes_bar': {'edgecolor': 'black', 'linewidth': 0.5, 'fill': False}, 'dems_Axes_plot': {'DEFAULT': {'marker': 'None', 'linewidth': 1, 'linestyle': '-'}}, 'Axes_set_xlabel': {'fontsize': 10}, 'Axes_set_ylim': None, 'plot_width': None, 'grid_Axes_grid': {'color': '0.65', 'b': True, 'linestyle': '-', 'which': 'both'}, 'legend_Axes_legend': {'loc': 0, 'framealpha': 1, 'fontsize': 10}, 'legend_Frame_set_fill': False, 'plot_height': None, 'layer_Axes_annotate': {'va': 'center', 'xytext': (5, 0), 'fontsize': 9, 'bbox': {'alpha': 0.6, 'fc': 'white', 'boxstyle': 'square,pad=0.05', 'edgecolor': 'white'}, 'ha': 'left', 'textcoords': 'offset points'}, 'ticklabels_Text_set_fontsize': {'fontsize': 10}, 'legend_Text_set_fontsize': 10, 'Figure_subplots_adjust': {}, 'Axes_set_xlim': None, 'obsid_Axes_annotate': {'va': 'top', 'xytext': (0, 10), 'fontsize': 9, 'bbox': {'alpha': 0.4, 'fc': 'white', 'boxstyle': 'square,pad=0.05', 'edgecolor': 'white'}, 'rotation': 0, 'ha': 'center', 'textcoords': 'offset points'}, 'drillstop_Axes_plot': {'color': 'black', 'marker': '^', 'markersize': 8, 'linestyle': ''}, 'legend_Frame_set_facecolor': '1'}''' as_dict = ast.literal_eval(default_file) self.midvatten.ms.settingsdict['secplot_loaded_template'] = '' with utils.tempinput(default_file, 'utf-8') as f1: @mock.patch('midvatten_utils.MessagebarAndLog') @mock.patch('os.path.join') def _test(self, filename, mock_join, mock_messagebar): mock_join.return_value = filename self.midvatten.ms.settingsdict['secplot_templates'] = filename secplottemplates = PlotTemplates(self.sectionplot, self.template_list, self.edit_button, self.load_button, self.save_as_button, self.import_button, self.remove_button, self.template_folder, 'secplot_templates', 'secplot_loaded_template', defs.secplot_default_template(), self.midvatten.ms) return secplottemplates, mock_messagebar secplottemplates, mock_messagebar = _test(self, f1) assert call.info(log_msg='Loaded template from default template file.') in mock_messagebar.mock_calls assert utils.anything_to_string_representation(secplottemplates.loaded_template) == utils.anything_to_string_representation(as_dict)
def test_add_view_obs_points_obs_lines_add(self, mock_messagebar): db_utils.sql_alter_db('''DROP VIEW IF EXISTS view_obs_points;''') db_utils.sql_alter_db('''DROP VIEW IF EXISTS view_obs_lines;''') db_utils.sql_alter_db( '''DELETE FROM views_geometry_columns WHERE view_name IN ('view_obs_points', 'view_obs_lines');''' ) assert not any([ db_utils.verify_table_exists('view_obs_points'), db_utils.verify_table_exists('view_obs_lines') ]) views_geometry_columns = db_utils.sql_load_fr_db( '''SELECT view_name FROM views_geometry_columns WHERE view_name IN ('view_obs_points', 'view_obs_lines') ORDER BY view_name;''' )[1] print(str(views_geometry_columns)) assert views_geometry_columns == [] utils.add_view_obs_points_obs_lines() print(str(mock_messagebar.mock_calls)) assert call.info( bar_msg= 'Views added. Please reload layers (Midvatten>Load default db-layers to qgis or "F7").' ) in mock_messagebar.mock_calls assert all([ db_utils.verify_table_exists('view_obs_points'), db_utils.verify_table_exists('view_obs_lines') ]) views_geometry_columns = db_utils.sql_load_fr_db( '''SELECT view_name FROM views_geometry_columns WHERE view_name IN ('view_obs_points', 'view_obs_lines') ORDER BY view_name;''' )[1] print(str(views_geometry_columns)) assert views_geometry_columns == [('view_obs_lines', ), ('view_obs_points', )]
def test_connect_resource(self): mock_conn = Mock() mock_meta = Mock() mock_client = Mock() mock_cc = Mock() type(mock_cc).region_name = 'myregion' type(mock_client)._client_config = mock_cc type(mock_meta).client = mock_client type(mock_conn).meta = mock_meta cls = ConnectableTester() cls.api_name = 'myapi' kwargs = {'foo': 'fooval', 'bar': 'barval'} with patch('%s._boto3_connection_kwargs' % pb, new_callable=PropertyMock, create=True) as mock_kwargs: mock_kwargs.return_value = kwargs with patch('%s.logger' % pbm) as mock_logger: with patch('%s.boto3.resource' % pbm) as mock_resource: with patch('%s._max_retries_config' % pb, new_callable=PropertyMock) as m_mrc: m_mrc.return_value = None mock_resource.return_value = mock_conn cls.connect_resource() assert mock_kwargs.mock_calls == [call()] assert mock_logger.mock_calls == [ call.info("Connected to %s (resource) in region %s", 'myapi', 'myregion') ] assert mock_resource.mock_calls == [ call('myapi', foo='fooval', bar='barval') ] assert m_mrc.mock_calls == [call()] assert cls.resource_conn == mock_resource.return_value
def test_healthcheck_no_active(self): type(self.mock_redir).active_node_ip_port = None type(self.mock_redir).log_enabled = True if sys.version_info[0] < 3: type(self.mock_request).uri = '/vault-redirector-health' type(self.mock_request).path = '/vault-redirector-health' expected = 'foobar' expected_msg = 'No Active Vault' else: # in py3 these are byte literals type(self.mock_request).uri = b'/vault-redirector-health' type(self.mock_request).path = b'/vault-redirector-health' expected = b'foobar' expected_msg = b'No Active Vault' with patch('%s.logger' % pbm) as mock_logger: with patch('%s.VaultRedirectorSite.status_response' % pbm) as mock_status: mock_status.return_value = 'foobar' resp = self.cls.healthcheck(self.mock_request) assert self.mock_request.mock_calls == [ call.setResponseCode(503, message=expected_msg), call.setHeader("Content-Type", "application/json") ] assert resp == expected assert mock_logger.mock_calls == [ call.info( 'RESPOND %d for %s%s request for /vault-redirector-health' ' from %s:%s', 503, '', 'GET', '1.2.3.4', 12345) ]
def test_register_callbacks(self): mock_listener = Mock(spec_set=InputEventListener) self.cls.listener = mock_listener with patch('%s.logger' % pbm) as mock_logger: self.cls.register_callbacks() assert mock_logger.mock_calls == [ call.debug("registering callbacks"), call.debug('registering callback for %s ON', 0), call.debug('registering callback for %s OFF', 0), call.debug('registering callback for %s ON', 1), call.debug('registering callback for %s OFF', 1), call.debug('registering callback for %s ON', 2), call.debug('registering callback for %s OFF', 2), call.debug('registering callback for %s ON', 3), call.debug('registering callback for %s OFF', 3), call.debug('done registering callbacks'), call.info('Initial pin states: %s', [10, 11, 12, 13]) ] assert mock_listener.mock_calls == [ call.register(0, IODIR_ON, self.cls.handle_input_on), call.register(0, IODIR_OFF, self.cls.handle_input_off), call.register(1, IODIR_ON, self.cls.handle_input_on), call.register(1, IODIR_OFF, self.cls.handle_input_off), call.register(2, IODIR_ON, self.cls.handle_input_on), call.register(2, IODIR_OFF, self.cls.handle_input_off), call.register(3, IODIR_ON, self.cls.handle_input_on), call.register(3, IODIR_OFF, self.cls.handle_input_off), ] assert self.cls.current_values == [10, 11, 12, 13]
def test_dont_log_stderr_on_success_if_disabled(self, proc, file, logger): proc.return_value.returncode = 0 file.return_value = BytesIO(b'stderr') job = TestLogStderrOnFailureOnlyTask() job.run() self.assertNotIn(call.info('Program stderr:\nstderr'), logger.mock_calls)
def test_log_stderr_on_success_by_default(self, proc, file, logger): proc.return_value.returncode = 0 file.return_value = BytesIO(b'stderr') job = TestExternalProgramTask() job.run() self.assertIn(call.info('Program stderr:\nstderr'), logger.mock_calls)
def test_connect(self): mock_conn = Mock() mock_cc = Mock() type(mock_cc).region_name = 'myregion' type(mock_conn)._client_config = mock_cc cls = ConnectableTester() cls.api_name = 'myapi' kwargs = {'foo': 'fooval', 'bar': 'barval'} with patch('%s._boto3_connection_kwargs' % pb, new_callable=PropertyMock, create=True) as mock_kwargs: mock_kwargs.return_value = kwargs with patch('%s.logger' % pbm) as mock_logger: with patch('%s.boto3.client' % pbm) as mock_client: mock_client.return_value = mock_conn cls.connect() assert mock_kwargs.mock_calls == [call()] assert mock_logger.mock_calls == [ call.info("Connected to %s in region %s", 'myapi', 'myregion') ] assert mock_client.mock_calls == [ call( 'myapi', foo='fooval', bar='barval' ) ] assert cls.conn == mock_client.return_value
def test_handle_input_on(self): mock_evt = Mock(spec_set=InterruptEvent) type(mock_evt).pin_num = 3 type(mock_evt).timestamp = 1234.5678 self.cls.current_values = [5, 5, 5, 5] with patch('%s.handle_change' % pb) as mock_handle: with patch('%s.logger' % pbm) as mock_logger: with patch('%s.no_state_change' % pb) as mock_no_change: with patch('%s.set_output' % pb) as mock_set: mock_no_change.return_value = False self.cls.handle_input_on(mock_evt) assert mock_logger.mock_calls == [ call.info("Received ON event for pin %s", 3), ] assert mock_handle.mock_calls == [call(3, 1, 1234.5678)] assert self.mock_chip.mock_calls == [] assert self.opin0.mock_calls == [] assert self.opin1.mock_calls == [] assert self.opin2.mock_calls == [] assert self.opin3.mock_calls == [] assert self.cls.current_values == [5, 5, 5, 1] assert mock_no_change.mock_calls == [call(3, 1)] assert mock_set.mock_calls == [call(3, 1)]
def test_handle_input_off_no_change(self): mock_evt = Mock(spec_set=InterruptEvent) type(mock_evt).pin_num = 1 type(mock_evt).timestamp = 1234.5678 self.cls.current_values = [5, 0, 5, 5] with patch('%s.handle_change' % pb) as mock_handle: with patch('%s.logger' % pbm) as mock_logger: with patch('%s.no_state_change' % pb) as mock_no_change: with patch('%s.set_output' % pb) as mock_set: mock_no_change.return_value = True self.cls.handle_input_off(mock_evt) assert mock_logger.mock_calls == [ call.info("Ignoring duplicate event for pin %s state %s", 1, 0) ] assert mock_handle.mock_calls == [] assert self.mock_chip.mock_calls == [] assert self.opin0.mock_calls == [] assert self.opin1.mock_calls == [] assert self.opin2.mock_calls == [] assert self.opin3.mock_calls == [] assert self.cls.current_values == [5, 0, 5, 5] assert mock_no_change.mock_calls == [call(1, 0)] assert mock_set.mock_calls == []
def test_load_default_file(self): default_file = '''{'Axes_set_ylabel': {'fontsize': 10}, 'wlevels_Axes_plot': {'DEFAULT': {'marker': 'v', 'markersize': 6, 'linewidth': 1, 'linestyle': '-'}}, 'geology_Axes_bar': {'edgecolor': 'black'}, 'obsid_Axes_bar': {'edgecolor': 'black', 'linewidth': 0.5, 'fill': False}, 'dems_Axes_plot': {'DEFAULT': {'marker': 'None', 'linewidth': 1, 'linestyle': '-'}}, 'Axes_set_xlabel': {'fontsize': 10}, 'Axes_set_ylim': None, 'plot_width': None, 'grid_Axes_grid': {'color': '0.65', 'b': True, 'linestyle': '-', 'which': 'both'}, 'legend_Axes_legend': {'loc': 0, 'framealpha': 1, 'fontsize': 10}, 'legend_Frame_set_fill': False, 'plot_height': None, 'layer_Axes_annotate': {'va': 'center', 'xytext': (5, 0), 'fontsize': 9, 'bbox': {'alpha': 0.6, 'fc': 'white', 'boxstyle': 'square,pad=0.05', 'edgecolor': 'white'}, 'ha': 'left', 'textcoords': 'offset points'}, 'ticklabels_Text_set_fontsize': {'fontsize': 10}, 'legend_Text_set_fontsize': 10, 'Figure_subplots_adjust': {}, 'Axes_set_xlim': None, 'obsid_Axes_annotate': {'va': 'top', 'xytext': (0, 10), 'fontsize': 9, 'bbox': {'alpha': 0.4, 'fc': 'white', 'boxstyle': 'square,pad=0.05', 'edgecolor': 'white'}, 'rotation': 0, 'ha': 'center', 'textcoords': 'offset points'}, 'drillstop_Axes_plot': {'color': 'black', 'marker': '^', 'markersize': 8, 'linestyle': ''}, 'legend_Frame_set_facecolor': '1'}''' as_dict = ast.literal_eval(default_file) self.midvatten.ms.settingsdict['secplot_loaded_template'] = '' with utils.tempinput(default_file, 'utf-8') as f1: @mock.patch('midvatten_utils.MessagebarAndLog') @mock.patch('os.path.join') def _test(self, filename, mock_join, mock_messagebar): mock_join.return_value = filename self.midvatten.ms.settingsdict['secplot_templates'] = filename secplottemplates = PlotTemplates( self.sectionplot, self.template_list, self.edit_button, self.load_button, self.save_as_button, self.import_button, self.remove_button, self.template_folder, 'secplot_templates', 'secplot_loaded_template', defs.secplot_default_template(), self.midvatten.ms) return secplottemplates, mock_messagebar secplottemplates, mock_messagebar = _test(self, f1) assert call.info(log_msg='Loaded template from default template file.' ) in mock_messagebar.mock_calls assert utils.anything_to_string_representation( secplottemplates.loaded_template ) == utils.anything_to_string_representation(as_dict)
def test_connect(self): mock_conn = Mock() mock_cc = Mock() type(mock_cc).region_name = 'myregion' type(mock_conn)._client_config = mock_cc cls = ConnectableTester() cls.api_name = 'myapi' kwargs = {'foo': 'fooval', 'bar': 'barval'} with patch('%s._boto3_connection_kwargs' % pb, new_callable=PropertyMock, create=True) as mock_kwargs: mock_kwargs.return_value = kwargs with patch('%s.logger' % pbm) as mock_logger: with patch('%s.boto3.client' % pbm) as mock_client: mock_client.return_value = mock_conn cls.connect() assert mock_kwargs.mock_calls == [call()] assert mock_logger.mock_calls == [ call.info("Connected to %s in region %s", 'myapi', 'myregion') ] assert mock_client.mock_calls == [ call('myapi', foo='fooval', bar='barval') ] assert cls.conn == mock_client.return_value
def test_get_api_id_aws(self): def se_exc(*args, **kwargs): raise Exception() conf = Mock() args = Mock(tf_path='tfpath') with patch.multiple( pbm, autospec=True, logger=DEFAULT, TerraformRunner=DEFAULT, AWSInfo=DEFAULT ) as mocks: mocks['TerraformRunner'].return_value._get_outputs.side_effect = \ se_exc mocks['AWSInfo'].return_value.get_api_id.return_value = 'myaid' res = get_api_id(conf, args) assert res == 'myaid' assert mocks['TerraformRunner'].mock_calls == [ call(conf, 'tfpath'), call()._get_outputs() ] assert mocks['AWSInfo'].mock_calls == [ call(conf), call().get_api_id() ] assert mocks['logger'].mock_calls == [ call.debug('Trying to get Terraform rest_api_id output'), call.info('Unable to find API rest_api_id from Terraform state; ' 'querying AWS.', exc_info=1), call.debug('AWS API ID: \'%s\'', 'myaid') ]
def test_set_account_info_env(self): self.cls.aws_account_id = None self.cls.aws_region = None with patch('%s.logger' % pbm, autospec=True) as mock_logger: with patch('%s.client' % pbm, autospec=True) as mock_client: mock_client.return_value.get_user.return_value = { 'User': {'Arn': 'arn:aws:iam::123456789:user/foo'} } type(mock_client.return_value)._client_config = Mock( region_name='myregion') with patch.dict( '%s.os.environ' % pbm, {'AWS_REGION': 'ar'}, clear=True): self.cls._set_account_info() assert self.cls.aws_account_id == '123456789' assert self.cls.aws_region == 'myregion' assert mock_client.mock_calls == [ call('iam', region_name='ar'), call().get_user(), call('lambda', region_name='ar') ] assert mock_logger.mock_calls == [ call.debug('Connecting to IAM with region_name=%s', 'ar'), call.info('Found AWS account ID as %s; region: %s', '123456789', 'myregion') ]
def test_setup_mongodb(self): with patch('%s.MongoClient' % pbm, autospec=True) as mock_client: with patch('%s.logger' % pbm, autospec=True) as mock_logger: setup_mongodb('h', 12) assert mock_client.mock_calls == [ call('h', 12, connect=True, connectTimeoutMS=5000, serverSelectionTimeoutMS=5000, socketTimeoutMS=5000, waitQueueTimeoutMS=5000), call().get_database(MONGO_DB_NAME), call().get_database().get_collection('dbtest'), call().get_database().get_collection().update( {'_id': 'setup_mongodb'}, {'dt': FakeDatetime(2015, 1, 10, 12, 13, 14), '_id': 'setup_mongodb' }, j=True, upsert=True, w=1 ), call().close() ] assert mock_logger.mock_calls == [ call.debug('Connecting to MongoDB via pymongo at %s:%s', 'h', 12), call.info('Connected to MongoDB via pymongo at %s:%s', 'h', 12), call.debug('Trying a DB upsert'), call.debug('MongoDB write completed successfully.') ]
def test_add_view_obs_points_obs_lines(self, mock_messagebar): utils.add_view_obs_points_obs_lines() print(str(mock_messagebar.mock_calls)) assert mock_messagebar.mock_calls == [ call.info( bar_msg='Views not added for PostGIS databases (not needed)!') ]
def test_connect_exception(self): def se_exc(*args, **kwargs): raise Exception('foo') with patch.multiple( pbm, autospec=True, logger=DEFAULT, setup_mongodb=DEFAULT, ConnectionPool=DEFAULT, ) as mocks: mocks['ConnectionPool'].side_effect = se_exc with pytest.raises(SystemExit) as excinfo: connect_mongodb('myhost', 1234) assert excinfo.value.code == 2 assert mocks['setup_mongodb'].mock_calls == [call('myhost', 1234)] assert mocks['ConnectionPool'].mock_calls == [ call(uri='mongodb://myhost:1234') ] assert mocks['logger'].mock_calls == [ call.info('Connecting to MongoDB via txmongo at %s', 'mongodb://myhost:1234'), call.critical('Error connecting to MongoDB at %s', 'mongodb://myhost:1234', exc_info=1) ]
def test_list_accounts(self): with patch('%s.logger' % pbm, autospec=True) as mock_logger: with patch('%s.open' % pbm, mock_open(read_data='foo'), create=True) as m_open: with patch('%s.yaml.load' % pbm, autospec=True) as mock_load: with patch('%s.ManheimConfig' % pbm, autospec=True) as mock_conf: mock_load.return_value = [{ 'account_name': 'a1', 'account_id': 1111, 'foo': 'bar', 'baz': 2, 'regions': ['us-east-1'] }, { 'account_name': 'a2', 'account_id': 2222, 'foo': 'bar1', 'baz': 4, 'regions': ['us-east-2'] }] res = ManheimConfig.list_accounts('/tmp/conf.yml') assert res == {'a1': 1111, 'a2': 2222} assert mock_logger.mock_calls == [ call.info('Loading config from: %s', '/tmp/conf.yml') ] assert m_open.mock_calls == [ call('/tmp/conf.yml', 'r'), call().__enter__(), call().read(), call().__exit__(None, None, None) ] assert mock_load.mock_calls == [call('foo', Loader=yaml.SafeLoader)] assert mock_conf.mock_calls == []
def test_load_hard_coded_settings(self, mock_messagebar, mock_join, mock_hardcoded_template): self.midvatten.ms.settingsdict['secplot_loaded_template'] = '' self.midvatten.ms.settingsdict['secplot_templates'] = '' mock_join.return_value = '' test_dict = {"hardcoded": 1} mock_hardcoded_template.return_value = test_dict secplottemplates = PlotTemplates( self.sectionplot, self.template_list, self.edit_button, self.load_button, self.save_as_button, self.import_button, self.remove_button, self.template_folder, 'secplot_templates', 'secplot_loaded_template', defs.secplot_default_template(), self.midvatten.ms) test = utils.anything_to_string_representation( secplottemplates.loaded_template) reference = utils.anything_to_string_representation(test_dict) assert call.warning( bar_msg= 'Default template not found, loading hard coded default template.' ) in mock_messagebar.mock_calls assert call.info( log_msg='Loaded template from default hard coded template.' ) in mock_messagebar.mock_calls assert test == reference
def test_always_log_stderr_on_failure(self, proc, file, logger): proc.return_value.returncode = 1 file.return_value = BytesIO(b'stderr') with self.assertRaises(ExternalProgramRunError): job = TestLogStderrOnFailureOnlyTask() job.run() self.assertIn(call.info('Program stderr:\nstderr'), logger.mock_calls)
def test_remote_bank(self): self._create_dealer(["@{:s}:{:d}".format(HOST, PORT)]) self.dealer.load() self.mocked_log.assert_has_calls( [call.debug("Loading banks"), call.info("Connecting to remote server at %s:%d", HOST, PORT)] ) self.mocked_log.warning.assert_not_called()
def test_dont_log_stderr_on_success(self, proc, file, logger): proc.return_value.returncode = 0 file.return_value = BytesIO(b'spark normal error output') job = TestSparkSubmitTask() job.run() self.assertNotIn(call.info( 'Program stderr:\nspark normal error output'), logger.mock_calls)
def test_discover_engine(self): with patch('%s.utils_discover_engine' % pbm, autospec=True) as mock_de: with patch('%s.logger' % pbm, autospec=True) as mock_logger: mock_de.return_value = ('engine_addr', 1234) res = self.cls.discover_engine() assert res == ('engine_addr', 1234) assert mock_de.mock_calls == [call()] assert mock_logger.mock_calls == [ call.info('Discovered Engine at %s:%s', 'engine_addr', 1234) ]
def test_run_cmd(self, capsys): mock_stdout = Mock() mock_stdout.read.side_effect = ['f', 'o', 'o', ''] with patch('%s.logger' % pbm, autospec=True) as mock_logger: with patch('%s.subprocess.Popen' % pbm) as mock_popen: mock_popen.return_value.stdout = mock_stdout mock_popen.return_value.returncode = 0 mock_popen.return_value.pid = 1234 out, retcode = run_cmd(['foo', 'bar']) assert out == 'foo' assert retcode == 0 out, err = capsys.readouterr() assert out == '' assert err == '' assert mock_logger.mock_calls == [ call.info('Running command%s: %s', '', ['foo', 'bar']), call.debug('Started process; pid=%s', 1234), call.info('Command exited with code %d', 0), call.debug("Command output:\n%s", 'foo') ]
def test_run(self): with patch('%s._alwaystrue' % pb) as mock_true: with patch('%s.handle_files' % pb) as mock_handle: with patch('%s.logger' % pbm) as mock_logger: mock_true.side_effect = [True, True, False] self.cls.run() assert mock_true.mock_calls == [call(), call(), call()] assert mock_handle.mock_calls == [call(), call()] assert mock_logger.mock_calls == [ call.info("Beginning file handling loop") ]
def test_load_from_msettings(self, mock_messagebar): test_str = '''{"test": 1}''' self.midvatten.ms.settingsdict['secplot_loaded_template'] = test_str self.midvatten.ms.settingsdict['secplot_templates'] = '' secplottemplates = PlotTemplates(self.sectionplot, self.template_list, self.edit_button, self.load_button, self.save_as_button, self.import_button, self.remove_button, self.template_folder, 'secplot_templates', 'secplot_loaded_template', defs.secplot_default_template(), self.midvatten.ms) assert call.info(log_msg='Loaded template from midvatten settings secplot_loaded_template.') in mock_messagebar.mock_calls assert utils.anything_to_string_representation(secplottemplates.loaded_template) == test_str
def test_handle_failed_job(self, proc, file, logger): proc.return_value.returncode = 1 file.return_value = BytesIO(b'stderr') try: job = TestExternalProgramTask() job.run() except ExternalProgramRunError as e: self.assertEqual(e.err, 'stderr') self.assertIn('STDERR: stderr', six.text_type(e)) self.assertIn(call.info('Program stderr:\nstderr'), logger.mock_calls) else: self.fail('Should have thrown ExternalProgramRunError')
def test_get_active_node(self): get_json = testdata.test_get_active_node with patch('%s.requests.get' % pbm) as mock_get: mock_get.return_value.json.return_value = get_json with patch('%s.logger' % pbm) as mock_logger: res = self.cls.get_active_node() url = 'http://consul:123/v1/health/service/vault' assert mock_get.mock_calls[0] == call(url) assert mock_logger.mock_calls == [ call.debug('Polling active node from: %s', url), call.info('Got active node as: %s', 'node2:8200') ] assert res == 'node2:8200'
def test_handle_files(self): flist = [ 'foobar', 'pinevent_1420863332.123456_pin2_state1', 'pinevent_csds_pin3_state1', 'pinevent_1420863326.123456_pin3_state0', 'pinevent_1420863326.123456_pin2_state1', 'xsfjef_fhejfec_dfhe', 'pinevent_1420863326.456789_pin3_state2', ] ex = Exception('foo') def se_handle(fname, evt_datetime, pin, state): if fname == 'pinevent_1420863332.123456_pin2_state1': raise ex type(self.config).QUEUE_PATH = '/foo/bar' with patch('%s.logger' % pbm) as mock_logger: with patch('%s.os.listdir' % pbm) as mock_listdir: with patch('%s.handle_one_file' % pb) as mock_handle: with patch('%s.os.unlink' % pbm) as mock_unlink: mock_listdir.return_value = flist mock_handle.side_effect = se_handle self.cls.handle_files() assert mock_logger.mock_calls == [ call.info("Found %d new events", 3), call.debug('File handled; removing: %s', 'pinevent_1420863326.123456_pin2_state1'), call.debug('File handled; removing: %s', 'pinevent_1420863326.123456_pin3_state0'), call.exception('Execption while handling event file %s', 'pinevent_1420863332.123456_pin2_state1'), ] assert mock_listdir.mock_calls == [call('/foo/bar')] assert mock_handle.mock_calls == [ call('pinevent_1420863326.123456_pin2_state1', datetime(2015, 1, 9, 23, 15, 26, 123456), 2, 1), call('pinevent_1420863326.123456_pin3_state0', datetime(2015, 1, 9, 23, 15, 26, 123456), 3, 0), call('pinevent_1420863332.123456_pin2_state1', datetime(2015, 1, 9, 23, 15, 32, 123456), 2, 1), ] assert mock_unlink.mock_calls == [ call('/foo/bar/pinevent_1420863326.123456_pin2_state1'), call('/foo/bar/pinevent_1420863326.123456_pin3_state0') ]