def test_bid_competitive_data(): bid = munchify({ "data": { "tenderers": [ fake.procuringEntity() ] } }) if len(used_identifier_id) == 3: del used_identifier_id[0] id = bid.data.tenderers[0].identifier.id while (id in used_identifier_id): bid = munchify({ "data": { "tenderers": [ fake.procuringEntity() ] } }) id = bid.data.tenderers[0].identifier.id used_identifier_id.append(id) bid.data.tenderers[0].address.countryName_en = translate_country_en(bid.data.tenderers[0].address.countryName) bid.data.tenderers[0].address.countryName_ru = translate_country_ru(bid.data.tenderers[0].address.countryName) bid.data['status'] = 'draft' return bid
def test_munchify(): b = munchify({"urmom": {"sez": {"what": "what"}}}) assert b.urmom.sez.what == "what" b = munchify({"lol": ("cats", {"hah": "i win again"}), "hello": [{"french": "salut", "german": "hallo"}]}) assert b.hello[0].french == "salut" assert b.lol[1].hah == "i win again"
def prepare_test_tender_data(procedure_intervals, mode): # Get actual intervals by mode name if mode in procedure_intervals: intervals = procedure_intervals[mode] else: intervals = procedure_intervals['default'] LOGGER.log_message(Message(intervals)) # Set acceleration value for certain modes if mode in ['openua', 'openeu']: assert isinstance(intervals['accelerator'], int), \ "Accelerator should be an 'int', " \ "not '{}'".format(type(intervals['accelerator']).__name__) assert intervals['accelerator'] >= 0, \ "Accelerator should not be less than 0" else: assert 'accelerator' not in intervals.keys(), \ "Accelerator is not available for mode '{0}'".format(mode) if mode == 'single': return munchify({'data': test_tender_data(intervals)}) elif mode == 'multi': return munchify({'data': test_tender_data_multiple_items(intervals)}) elif mode == 'limited': return munchify({'data': test_tender_data_limited(intervals)}) elif mode == 'openua': return munchify({'data': test_tender_data_openua(intervals)}) raise ValueError("Invalid mode for prepare_test_tender_data")
def test_munchify(): b = munchify({'urmom': {'sez': {'what': 'what'}}}) assert b.urmom.sez.what == 'what' b = munchify({'lol': ('cats', {'hah': 'i win again'}), 'hello': [{'french': 'salut', 'german': 'hallo'}]}) assert b.hello[0].french == 'salut' assert b.lol[1].hah == 'i win again'
def prepare_test_tender_data(period_intervals, mode): if mode == 'single': return munchify({'data': test_tender_data(period_intervals)}) elif mode == 'multi': return munchify({'data': test_tender_data_multiple_items(period_intervals)}) elif mode == 'limited': return munchify({'data': test_tender_data_limited(period_intervals)}) raise ValueError('Invalid mode for test_tender_data')
def prepare_test_tender_data(period_interval=2, mode="single"): if mode == "single": return munchify({"data": test_tender_data(period_interval=period_interval)}) elif mode == "multi": return munchify({"data": test_tender_data_multiple_lots(period_interval=period_interval)}) elif mode == "meat": return munchify({"data": test_meat_tender_data(period_interval=period_interval)}) raise ValueError("A very specific bad thing happened")
def test_get_location_error(self): setup_routing(self.app, routs=["tender_subpage_item"]) self.assertEqual(self.client.get_question(self.empty_tender, question_id=TEST_KEYS.question_id), munchify(loads(location_error('questions')))) self.assertEqual(self.client.get_lot(self.empty_tender, lot_id=TEST_KEYS.lot_id), munchify(loads(location_error('lots')))) self.assertEqual(self.client.get_bid(self.empty_tender, bid_id=TEST_KEYS.bid_id, access_token=API_KEY), munchify(loads(location_error('bids'))))
def test_delete_location_error(self): setup_routing(self.app, routs=["tender_subpage_item_delete"]) self.assertEqual( self.client.delete_bid(self.empty_tender, TEST_KEYS.error_id, API_KEY), munchify(loads(location_error("bids"))), ) self.assertEqual( self.client.delete_lot(self.empty_tender, TEST_KEYS.error_id), munchify(loads(location_error("lots"))) )
def test_munchify_cycle(): # dict1 -> dict2 -> dict1 x = dict(id="x") y = dict(x=x, id="y") x['y'] = y m = munchify(x) assert m.id == "x" assert m.y.id == "y" assert m.y.x is m # dict -> list -> dict x = dict(id="x") y = ["y", x] x["y"] = y m = munchify(x) assert m.id == "x" assert m.y[0] == "y" assert m.y[1] is m # dict -> tuple -> dict x = dict(id="x") y = ("y", x) x["y"] = y m = munchify(x) assert m.id == "x" assert m.y[0] == "y" assert m.y[1] is m # dict1 -> list -> dict2 -> list z = dict(id="z") y = ["y", z] z["y"] = y x = dict(id="x", y=y) m = munchify(x) assert m.id == "x" assert m.y[0] == "y" assert m.y[1].id == "z" assert m.y[1].y is m.y # dict1 -> tuple -> dict2 -> tuple z = dict(id="z") y = ("y", z) z["y"] = y x = dict(id="x", y=y) m = munchify(x) assert m.id == "x" assert m.y[0] == "y" assert m.y[1].id == "z" assert m.y[1].y is m.y
def setUp(self): self.app = Bottle() setup_routing(self.app) self.server = WSGIServer(('localhost', 20602), self.app, log=None) self.server.start() self.client = plan_client.PlansClient('', host_url=HOST_URL, api_version=API_VERSION) with open(ROOT + 'plans.json') as plans: self.plans = munchify(load(plans)) with open(ROOT + 'plan_' + TEST_PLAN_KEYS.plan_id + '.json') as plan: self.plan = munchify(load(plan))
def init(config, codeRegistry, verbose=False, tags=None): '''cwd is where segment file was loaded.''' mdb = codeRegistry.getService('fashion.prime.modelAccess') args = munchify(mdb.getSingleton("fashion.prime.args")) if "force" in args: f = args.force else: f = False pf = munchify(mdb.getSingleton("fashion.prime.portfolio")) codeRegistry.addService(MirrorService(Path(pf.projectPath), Path(pf.mirrorPath), force=f)) codeRegistry.addService(TemplateService()) codeRegistry.addService(GenerateService(codeRegistry))
def prepare_test_tender_data(procedure_intervals, tender_parameters, submissionMethodDetails, accelerator, funders): # Get actual intervals by mode name mode = tender_parameters['mode'] if mode in procedure_intervals: intervals = procedure_intervals[mode] else: intervals = procedure_intervals['default'] LOGGER.log_message(Message(intervals)) tender_parameters['intervals'] = intervals # Set acceleration value for certain modes assert isinstance(intervals['accelerator'], int), \ "Accelerator should be an 'int', " \ "not '{}'".format(type(intervals['accelerator']).__name__) assert intervals['accelerator'] >= 0, \ "Accelerator should not be less than 0" if mode == 'negotiation': return munchify({'data': test_tender_data_limited(tender_parameters)}) elif mode == 'negotiation.quick': return munchify({'data': test_tender_data_limited(tender_parameters)}) elif mode == 'openeu': return munchify({'data': test_tender_data_openeu( tender_parameters, submissionMethodDetails)}) elif mode == 'openua': return munchify({'data': test_tender_data_openua( tender_parameters, submissionMethodDetails)}) elif mode == 'openua_defense': return munchify({'data': test_tender_data_openua_defense( tender_parameters, submissionMethodDetails)}) elif mode == 'open_competitive_dialogue': return munchify({'data': test_tender_data_competitive_dialogue( tender_parameters, submissionMethodDetails)}) elif mode == 'reporting': return munchify({'data': test_tender_data_limited(tender_parameters)}) elif mode == 'open_framework': return munchify({'data': test_tender_data_framework_agreement( tender_parameters, submissionMethodDetails)}) elif mode == 'belowThreshold': return munchify({'data': test_tender_data( tender_parameters, submissionMethodDetails=submissionMethodDetails, funders=funders, accelerator=accelerator)}) elif mode == 'open_esco': return munchify({'data': test_tender_data_esco( tender_parameters, submissionMethodDetails)}) # The previous line needs an explicit keyword argument because, # unlike previous functions, this one has three arguments. raise ValueError("Invalid mode for prepare_test_tender_data")
def setUp(self): #self._testMethodName self.app = Bottle() setup_routing(self.app) self.server = WSGIServer(('localhost', 20602), self.app, log=None) self.server.start() self.client = tender_client.TendersClient('', host_url=HOST_URL, api_version=API_VERSION) with open(ROOT + 'tenders.json') as tenders: self.tenders = munchify(load(tenders)) with open(ROOT + TEST_KEYS.tender_id + '.json') as tender: self.tender = munchify(load(tender))
def setUp(self): #self._testMethodName self.app = Bottle() setup_routing(self.app) self.server = WSGIServer(('localhost', 20602), self.app, log=None) self.server.start() self.client = client.Client(API_KEY, host_url=HOST_URL, api_version=API_VERSION) with open(ROOT + TEST_KEYS.tender_id + '.json') as tender: self.tender = munchify(load(tender)) self.tender.update({'access':{"token": API_KEY}}) with open(ROOT + TEST_KEYS.empty_tender + '.json') as tender: self.empty_tender = munchify(load(tender)) self.empty_tender.update({'access':{"token": API_KEY}})
def test_bid_data_meat_tender(): return munchify( { "data": { "tenderers": [ { "address": { "countryName": "Україна", "locality": "м. Вінниця", "postalCode": "21100", "region": "м. Вінниця", "streetAddress": fake.street_address(), }, "contactPoint": {"name": fake.name(), "telephone": fake.phone_number()}, "identifier": {"scheme": u"UA-EDR", "id": u"{:08d}".format(fake.pyint())}, "name": fake.company(), } ], "parameters": [ { "code": "ee3e24bc17234a41bd3e3a04cc28e9c6", "value": fake.random_element(elements=(0.15, 0.1, 0.05, 0)), }, { "code": "48cfd91612c04125ab406374d7cc8d93", "value": fake.random_element(elements=(0.05, 0.01, 0)), }, ], "value": {"amount": 500}, } } )
def test_confirm_data(id): return munchify({ "data": { "status": "active", "id": id } })
def test_submit_claim_data(claim_id): return munchify({ "data": { "id": claim_id, "status": "claim" } })
def test_invalid_checkpoint(self): """Invalid checkpoint names return helpful error message.""" node = tut.sphinx.checkpoint.TutCheckpoint( 'checkpoint', ('blarf',), {'path': os.getcwd()}, content='', lineno=0, content_offset=0, block_text=None, state=munchify({ 'document': { 'settings': { 'env': { 'relfn2path': lambda p: (p, p) }, }, }, }), state_machine=None, ) with self.assertRaises(ValueError) as git_error: node.run() self.assertEqual( str(git_error.exception), "git checkpoint 'blarf' does not exist.", )
def test_supplier_data(): return munchify({ "data": { "suppliers": [ { "address": { "countryName": u"Україна", "locality": u"м. Вінниця", "postalCode": "21100", "region": u"м. Вінниця", "streetAddress": u"вул. Островського, 33" }, "contactPoint": { "email": "*****@*****.**", "name": u"Сергій Олексюк", "telephone": "+380 (432) 21-69-30" }, "identifier": { "id": "13313462", "legalName": u"Державне комунальне підприємство громадського харчування «Школяр»", "scheme": "UA-EDR", "uri": "http://sch10.edu.vn.ua/" }, "name": u"ДКП «Школяр»" } ], "value": { "amount": 475000, "currency": "UAH", "valueAddedTaxIncluded": True } } })
def test_complaint_data(): return munchify({ "data": { "author": { "address": { "countryName": "Україна", "locality": "м. Вінниця", "postalCode": "21100", "region": "м. Вінниця", "streetAddress": fake.street_address() }, "contactPoint": { "name": fake.name(), "telephone": fake.phone_number() }, "identifier": { "scheme": u"UA-EDR", "id": u"0000{}".format(fake.pyint()), "uri": fake.image_url(width=None, height=None) }, "name": fake.company() }, "description": fake.sentence(nb_words=10, variable_nb_words=True), "title": fake.sentence(nb_words=6, variable_nb_words=True) } })
def test_bid_data(): return munchify({ "data": { "tenderers": [ { "address": { "countryName": "Україна", "locality": "м. Вінниця", "postalCode": "21100", "region": "м. Вінниця", "streetAddress": fake.street_address() }, "contactPoint": { "name": fake.name(), "telephone": fake.phone_number() }, "identifier": { "scheme": u"UA-EDR", "id": u"0000{}".format(fake.pyint()), }, "name": fake.company() } ], "value": { "amount": 500 } } })
def test_tender_data_limited(params): data = test_tender_data(params) del data["submissionMethodDetails"] del data["minimalStep"] del data["enquiryPeriod"] del data["tenderPeriod"] for lot in data.get('lots', []): lot.pop('minimalStep', None) data["procuringEntity"]["kind"] = "general" data.update({"procurementMethodType": params['mode'], "procurementMethod": "limited"}) if params['mode'] == "negotiation": cause_variants = ( "artContestIP", "noCompetition", "twiceUnsuccessful", "additionalPurchase", "additionalConstruction", "stateLegalServices" ) cause = fake.random_element(cause_variants) elif params['mode'] == "negotiation.quick": cause_variants = ('quick',) if params['mode'] in ("negotiation", "negotiation.quick"): cause = fake.random_element(cause_variants) data.update({ "cause": cause, "causeDescription": fake.description() }) return munchify(data)
def get_user(request): from bodhi.models import User userid = unauthenticated_userid(request) if userid is not None: user = request.db.query(User).filter_by(name=unicode(userid)).first() # Why munch? https://github.com/fedora-infra/bodhi/issues/473 return munchify(user.__json__(request=request))
def execute(self, tags=None): '''Execute all the xforms planned in self.execList.''' verbose = self.dba.isVerbose() for xfName in self.execList: xfo = self.objects[xfName] try: with ModelAccess(self.dba, self.schemaRepo, xfo) as mdb: self.setMdb(mdb) if verbose: print("Executing {0}".format(xfo.name)) cfg = self.codeRegistry.getObjectConfig(xfo.name) Definition = Query() defs = self.dba.table('fashion.prime.module.definition').search(Definition.moduleName == cfg.moduleName) assert len(defs) == 1 defn = munchify(defs[0]) tplSvc = self.codeRegistry.getService('fashion.core.template') tplSvc.setDefinitionPath( defn.absDirname, defn.templatePath) tplSvc.setConfigurationPath( cfg.absDirname, cfg.templatePath) xfo.execute(self.codeRegistry, verbose, tags) except: logging.error("aborting, xform error: {0}".format(xfName)) traceback.print_exc()
def get_unprotected_datastores(self): datastores = [] unassigned_groups = munchify(self._send('QueryUnassignedDatastoreGroupArrays.xml')) for array in _listify(unassigned_groups.QueryUnassignedDatastoreGroupArraysResponse.get('returnval', [])): unassigned_datastores = munchify(self._send('QueryUnassignedDatastoreGroups.xml', key=array['#text'])) for datastore_group in _listify(unassigned_datastores.QueryUnassignedDatastoreGroupsResponse.get('returnval', [])): datastores.append(dict(key=datastore_group.key, vms=[vm.key['#text'] for vm in _listify(datastore_group.vm)])) for array in self.get_arrays(): for pool in array['pools']: if pool['enabled']: for device in pool['devices']: for datastore in datastores: if 'key' in device and device['key'] == datastore['key']: datastore['name'] = device['name'] datastore['pair'] = pool['key'] return datastores
def test_complaint_data(lot=False): data = munchify({ "data": { "author": { "address": { "countryName": u"Україна", "countryName_ru": u"Украина", "countryName_en": "Ukraine", "locality": u"м. Вінниця", "postalCode": "21100", "region": u"Вінницька область", "streetAddress": fake.street_address() }, "contactPoint": { "name": fake.name(), "telephone": fake.phone_number() }, "identifier": { "scheme": u"UA-EDR", "id": u"{:08d}".format(fake.pyint()), "uri": fake.image_url(width=None, height=None) }, "name": fake.company() }, "description": fake.sentence(nb_words=10, variable_nb_words=True), "title": fake.sentence(nb_words=6, variable_nb_words=True) } }) if lot: data = test_lot_complaint_data(data) return data
def from_file(fname=DEFAULT_CONFIG_FILENAME, testing=False): if testing: return TEST try: with open(os.path.expanduser(fname)) as fd: conf = yaml.load(fd) conf = munchify(conf) return conf except IOError: print("A configuration file named '%s' is missing" % fname) s_conf = yaml.dump(unmunchify(DEFAULT), explicit_start=True, indent=True, default_flow_style=False) print(""" Creating this file %s You still have to create directories with data and put your data in! """ % s_conf) time.sleep(3) try: with open(os.path.expanduser(fname), "w") as fd: fd.write(s_conf) except IOError: print("Can create '%s'" % fname) print("Trying anyway with default configuration") return DEFAULT
def wait_for_task(self, response): from time import sleep for key, value in response.items(): if not key.endswith('Response'): continue if '#text' in value['returnval']: task_key = value['returnval']['#text'] else: for key, obj in value['returnval'].items(): if key.endswith('Task') and '#text' in obj: task_key = obj['#text'] specSet = [dict(propSet=[dict(type="Task", all=True)], objectSet=[dict(obj=dict(type="Task", value=task_key), partialUpdates=False, selectSet=[])])] state = 'queued' with self.property_collector() as property_collector_key: while state not in ('success', 'error'): response = munchify(self._send('RetrievePropertiesEx.xml', key=property_collector_key, specSet=specSet)) item = _listify(response.RetrievePropertiesExResponse.returnval.objects)[0] state = _get_proprety(item, 'info').val.state sleep(1) if state in ('error',): raise SrmClientException(item.propSet[1].val.error.localizedMessage)
def log_object_data(data, file_name=None, format="yaml"): """Log object data in pretty format (JSON or YAML) Two output formats are supported: "yaml" and "json". If a file name is specified, the output is written into that file. If you would like to get similar output everywhere, use the following snippet somewhere in your code before actually using Munch. For instance, put it into your __init__.py, or, if you use zc.buildout, specify it in "initialization" setting of zc.recipe.egg. from munch import Munch Munch.__str__ = lambda self: Munch.toYAML(self, allow_unicode=True, default_flow_style=False) Munch.__repr__ = Munch.__str__ """ if not isinstance(data, Munch): data = munchify(data) if format.lower() == "json": data = data.toJSON(indent=2) else: data = data.toYAML(allow_unicode=True, default_flow_style=False) format = "yaml" LOGGER.log_message(Message(data.decode("utf-8"), "INFO")) if file_name: output_dir = BuiltIn().get_variable_value("${OUTPUT_DIR}") with open(os.path.join(output_dir, file_name + "." + format), "w") as file_obj: file_obj.write(data)
def test_fetch_got_data(self): queue_items = munchify( { "items": [ { "data": {"id": 1}, "meta": { "type": "user", "sync": {"event_type": "created", "ack_key": "User-1234-1", "revision": 1}, }, } ] } ) http_client = mock.create_autospec(basecrm.HttpClient) http_client.get.return_value = (200, {}, queue_items) sync = basecrm.SyncService(http_client) self.assertEquals(sync.fetch(self.device_uuid, self.session_id), queue_items["items"]) http_client.get.assert_called_once_with( "/sync/{session_id}/queues/main".format(session_id=self.session_id), params=None, headers={"X-Basecrm-Device-UUID": self.device_uuid}, raw=True, )
def set_access_key(tender, access_token): tender.access = munchify({"token": access_token}) return tender
def read_yaml2cls(yml_file): """read yml file and convert to class""" with open(yml_file, "r") as stream: data = yaml.safe_load(stream) cls_data = munchify(data) return cls_data
def test__get_resource_item_from_public(self, mock_api_client): item = { 'id': uuid.uuid4().hex, 'dateModified': datetime.datetime.utcnow().isoformat() } api_clients_queue = Queue() client_dict = { 'id': uuid.uuid4().hex, 'request_interval': 0.02, 'client': mock_api_client } api_clients_queue.put(client_dict) api_clients_info =\ {client_dict['id']: {'drop_cookies': False, 'request_durations': {}}} retry_queue = Queue() return_dict = { 'data': { 'id': item['id'], 'dateModified': datetime.datetime.utcnow().isoformat() } } mock_api_client.get_resource_item.return_value = return_dict worker = ResourceItemWorker(api_clients_queue=api_clients_queue, config_dict=self.worker_config, retry_resource_items_queue=retry_queue, api_clients_info=api_clients_info) # Success test self.assertEqual(worker.api_clients_queue.qsize(), 1) api_client = worker._get_api_client_dict() self.assertEqual(api_client['request_interval'], 0.02) self.assertEqual(worker.api_clients_queue.qsize(), 0) public_item = worker._get_resource_item_from_public(api_client, item) self.assertEqual(worker.retry_resource_items_queue.qsize(), 0) self.assertEqual(public_item, return_dict['data']) # Not actual document form public item['dateModified'] = datetime.datetime.utcnow().isoformat() api_client = worker._get_api_client_dict() self.assertEqual(worker.api_clients_queue.qsize(), 0) self.assertEqual(api_client['request_interval'], 0) public_item = worker._get_resource_item_from_public(api_client, item) self.assertEqual(public_item, None) sleep(worker.config['retry_default_timeout'] * 2) self.assertEqual(worker.retry_resource_items_queue.qsize(), 1) self.assertEqual(worker.api_clients_queue.qsize(), 1) # InvalidResponse mock_api_client.get_resource_item.side_effect =\ InvalidResponse('invalid response') self.assertEqual(worker.retry_resource_items_queue.qsize(), 1) api_client = worker._get_api_client_dict() self.assertEqual(worker.api_clients_queue.qsize(), 0) public_item = worker._get_resource_item_from_public(api_client, item) self.assertEqual(public_item, None) sleep(worker.config['retry_default_timeout'] * 2) self.assertEqual(worker.retry_resource_items_queue.qsize(), 2) self.assertEqual(worker.api_clients_queue.qsize(), 1) # RequestFailed status_code=429 mock_api_client.get_resource_item.side_effect = RequestFailed( munchify({'status_code': 429})) api_client = worker._get_api_client_dict() self.assertEqual(worker.api_clients_queue.qsize(), 0) self.assertEqual(api_client['request_interval'], 0) public_item = worker._get_resource_item_from_public(api_client, item) self.assertEqual(public_item, None) sleep(worker.config['retry_default_timeout'] * 2) self.assertEqual(worker.retry_resource_items_queue.qsize(), 3) self.assertEqual(worker.api_clients_queue.qsize(), 1) api_client = worker._get_api_client_dict() self.assertEqual(worker.api_clients_queue.qsize(), 0) self.assertEqual(api_client['request_interval'], worker.config['client_inc_step_timeout']) # RequestFailed status_code=429 with drop cookies api_client['request_interval'] = 2 public_item = worker._get_resource_item_from_public(api_client, item) sleep(api_client['request_interval']) self.assertEqual(worker.api_clients_queue.qsize(), 1) self.assertEqual(public_item, None) self.assertEqual(api_client['request_interval'], 0) sleep(worker.config['retry_default_timeout'] * 2) self.assertEqual(worker.retry_resource_items_queue.qsize(), 4) # RequestFailed with status_code not equal 429 mock_api_client.get_resource_item.side_effect = RequestFailed( munchify({'status_code': 404})) api_client = worker._get_api_client_dict() self.assertEqual(worker.api_clients_queue.qsize(), 0) public_item = worker._get_resource_item_from_public(api_client, item) self.assertEqual(public_item, None) self.assertEqual(worker.api_clients_queue.qsize(), 1) self.assertEqual(api_client['request_interval'], 0) sleep(worker.config['retry_default_timeout'] * 2) self.assertEqual(worker.retry_resource_items_queue.qsize(), 5) # ResourceNotFound mock_api_client.get_resource_item.side_effect = RNF( munchify({'status_code': 404})) api_client = worker._get_api_client_dict() self.assertEqual(worker.api_clients_queue.qsize(), 0) public_item = worker._get_resource_item_from_public(api_client, item) self.assertEqual(public_item, None) self.assertEqual(worker.api_clients_queue.qsize(), 1) self.assertEqual(api_client['request_interval'], 0) sleep(worker.config['retry_default_timeout'] * 2) self.assertEqual(worker.retry_resource_items_queue.qsize(), 6) # Exception api_client = worker._get_api_client_dict() mock_api_client.get_resource_item.side_effect =\ Exception('text except') public_item = worker._get_resource_item_from_public(api_client, item) self.assertEqual(public_item, None) self.assertEqual(api_client['request_interval'], 0) sleep(worker.config['retry_default_timeout'] * 2) self.assertEqual(worker.retry_resource_items_queue.qsize(), 7) del worker
def test_create_question(self): setup_routing(self.app, routs=["tender_subpage_item_create"]) question = munchify({'data': 'question'}) self.assertEqual(self.client.create_question(self.tender, question), question)
def test_storage(self, mock_api_client): s3_key = 'key' s3_secret_key = 'secret' s3_bucket = 'bucket' queue = Queue() retry_queue = Queue() api_clients_queue = Queue() api_client_dict = { 'client': mock_api_client, 'request_interval': 0 } queue_resource_item = { 'resource': 'tenders', 'id': uuid.uuid4().hex, 'dateModified': datetime.datetime.now().isoformat(), '_rev': '1-' + uuid.uuid4().hex } resource_item = { 'resource': 'tenders', 'id': uuid.uuid4().hex, 'dateModified': datetime.datetime.now().isoformat(), '_rev': '1-' + uuid.uuid4().hex } archive_doc = { 'id': resource_item['id'], 'dateModified': datetime.datetime.now().isoformat(), '_rev': '1-' + uuid.uuid4().hex, 'rev': '1-' + uuid.uuid4().hex } db = MagicMock() archive_db = MagicMock() conn = MockConnection(s3_key, s3_secret_key) conn.create_bucket(s3_bucket) secret_archive = S3Storage(conn, s3_bucket) bridge = ArchiveWorker(config_dict=self.worker_config, log_dict=self.log_dict, resource_items_queue=queue, retry_resource_items_queue=retry_queue, api_clients_queue=api_clients_queue, db=db, archive_db=archive_db, secret_archive_db=secret_archive) bridge.db.get.side_effect = [resource_item, resource_item] bridge.archive_db.get.side_effect = [munchify(archive_doc), munchify(archive_doc)] bridge._get_api_client_dict = MagicMock(side_effect=[api_client_dict, api_client_dict]) bridge.archive_db.save = MagicMock() # Try get resource item dump from cdb resource_item['_rev'] = '1-' + uuid.uuid4().hex secret_doc = { 'id': resource_item['id'] } bridge._action_resource_item_from_cdb = MagicMock(side_effect=[secret_doc, secret_doc]) queue.put(queue_resource_item) bridge._run() self.assertEqual(bridge.log_dict['exceptions_count'], 0) self.assertEqual(bridge.log_dict['add_to_retry'], 0) data = bridge.secret_archive_db.get(queue_resource_item['id']) self.assertEqual(secret_doc, data.get('data')) # Test invalid key data = bridge.secret_archive_db.get('invalid') self.assertTrue(data is None)
def test_create_contract(self): setup_routing(self.app, routs=["contract_create"]) contract = munchify({'data': 'contract'}) self.client.create_contract(contract)
def test_get_tender_location_error(self): setup_routing(self.app, routs=["tender"]) tender = self.client.get_tender(TEST_KEYS.error_id) self.assertEqual(tender, munchify(loads(location_error('tender'))))
else: ENV_VAR_KEY = key.upper() if default_value == '' or default_value is None: try: return (os.environ[ENV_VAR_KEY]) except Exception: warnings.warn( "You should pass %s using --%s or using environment variable %r" % (key, key, ENV_VAR_KEY)) return (default_value) else: return (default_value) DEFAULT = munchify({ "CSV_DATA_DIR": from_env("CSV_DATA_DIR", "~/data"), "OUTPUT_DIR": from_env("OUTPUT_DIR", "~/out") }) TEST = munchify({"CSV_DATA_DIR": "data", "OUTPUT_DIR": "out"}) def from_file(fname=DEFAULT_CONFIG_FILENAME, testing=False): if testing: return TEST try: with open(os.path.expanduser(fname)) as fd: conf = yaml.load(fd) conf = munchify(conf) return conf except IOError: print("A configuration file named '%s' is missing" % fname)
def test_create_cancellation(self): setup_routing(self.app, routs=["tender_subpage_item_create"]) cancellation = munchify({'data': 'cancellation'}) self.assertEqual( self.client.create_cancellation(self.limited_tender, cancellation), cancellation)
def test_create_award(self): setup_routing(self.app, routs=["tender_subpage_item_create"]) award = munchify({'data': 'award'}) self.assertEqual(self.client.create_award(self.limited_tender, award), award)
def test_create_lot(self): setup_routing(self.app, routs=["tender_subpage_item_create"]) lot = munchify({'data': 'lot'}) self.assertEqual(self.client.create_lot(self.tender, lot), lot)
def test_create_bid(self): setup_routing(self.app, routs=["tender_subpage_item_create"]) bid = munchify({'data': 'bid'}) self.assertEqual(self.client.create_bid(self.tender, bid), bid)
def test_empty_config_results_in_no_definitions(self): config = munch.munchify({'definitions': []}) definitions = self._get_definitions(config) self.assertEqual(0, len(definitions))
def test__action_resource_item_from_cdb(self, mock_api_client): item = { 'id': uuid.uuid4().hex, 'dateModified': datetime.datetime.utcnow().isoformat(), 'resource': 'tenders' } api_clients_queue = Queue() api_clients_queue.put({ 'client': mock_api_client, 'request_interval': 0.02}) retry_queue = Queue() return_dict = { 'data': { 'id': item['id'], 'dateModified': datetime.datetime.utcnow().isoformat() } } mock_api_client.get_resource_dump.return_value = return_dict worker = ArchiveWorker(api_clients_queue=api_clients_queue, config_dict=self.worker_config, retry_resource_items_queue=retry_queue, log_dict=self.log_dict) # Success test self.assertEqual(worker.api_clients_queue.qsize(), 1) api_client = worker._get_api_client_dict() self.assertEqual(api_client['request_interval'], 0.02) self.assertEqual(worker.api_clients_queue.qsize(), 0) public_item = worker._action_resource_item_from_cdb(api_client, item) self.assertEqual(worker.retry_resource_items_queue.qsize(), 0) self.assertEqual(public_item, return_dict['data']) # InvalidResponse mock_api_client.get_resource_dump.side_effect = InvalidResponse('invalid response') self.assertEqual(self.log_dict['exceptions_count'], 0) self.assertEqual(worker.retry_resource_items_queue.qsize(), 0) api_client = worker._get_api_client_dict() self.assertEqual(worker.api_clients_queue.qsize(), 0) public_item = worker._action_resource_item_from_cdb(api_client, item) self.assertEqual(public_item, None) self.assertEqual(worker.log_dict['exceptions_count'], 1) self.assertEqual(worker.log_dict['add_to_retry'], 1) sleep(worker.config['retry_default_timeout'] * 2) self.assertEqual(worker.retry_resource_items_queue.qsize(), 1) self.assertEqual(worker.api_clients_queue.qsize(), 1) # RequestFailed status_code=429 mock_api_client.get_resource_dump.side_effect = RequestFailed( munchify({'status_code': 429})) api_client = worker._get_api_client_dict() self.assertEqual(worker.api_clients_queue.qsize(), 0) self.assertEqual(api_client['request_interval'], 0) public_item = worker._action_resource_item_from_cdb(api_client, item) self.assertEqual(public_item, None) self.assertEqual(worker.log_dict['exceptions_count'], 2) self.assertEqual(worker.log_dict['add_to_retry'], 2) sleep(worker.config['retry_default_timeout'] * 2) self.assertEqual(worker.retry_resource_items_queue.qsize(), 2) self.assertEqual(worker.api_clients_queue.qsize(), 1) api_client = worker._get_api_client_dict() self.assertEqual(worker.api_clients_queue.qsize(), 0) self.assertEqual(api_client['request_interval'], worker.config['client_inc_step_timeout']) # RequestFailed status_code=429 with drop cookies api_client['request_interval'] = 2 public_item = worker._action_resource_item_from_cdb(api_client, item) sleep(api_client['request_interval']) self.assertEqual(worker.api_clients_queue.qsize(), 1) self.assertEqual(public_item, None) self.assertEqual(api_client['request_interval'], 0) self.assertEqual(worker.log_dict['exceptions_count'], 3) self.assertEqual(worker.log_dict['add_to_retry'], 3) sleep(worker.config['retry_default_timeout'] * 2) self.assertEqual(worker.retry_resource_items_queue.qsize(), 3) # RequestFailed with status_code not equal 429 mock_api_client.get_resource_dump.side_effect = RequestFailed( munchify({'status_code': 404})) api_client = worker._get_api_client_dict() self.assertEqual(worker.api_clients_queue.qsize(), 0) public_item = worker._action_resource_item_from_cdb(api_client, item) self.assertEqual(public_item, None) self.assertEqual(worker.api_clients_queue.qsize(), 1) self.assertEqual(api_client['request_interval'], 0) self.assertEqual(worker.log_dict['exceptions_count'], 4) self.assertEqual(worker.log_dict['add_to_retry'], 4) sleep(worker.config['retry_default_timeout'] * 2) self.assertEqual(worker.retry_resource_items_queue.qsize(), 4) # ResourceNotFound mock_api_client.get_resource_dump.side_effect = RNF( munchify({'status_code': 404})) api_client = worker._get_api_client_dict() self.assertEqual(worker.api_clients_queue.qsize(), 0) public_item = worker._action_resource_item_from_cdb(api_client, item) self.assertEqual(public_item, None) self.assertEqual(worker.api_clients_queue.qsize(), 1) self.assertEqual(api_client['request_interval'], 0) self.assertEqual(worker.log_dict['exceptions_count'], 4) self.assertEqual(worker.log_dict['add_to_retry'], 4) self.assertEqual(worker.log_dict['not_found_count'], 1) sleep(worker.config['retry_default_timeout'] * 2) self.assertEqual(worker.retry_resource_items_queue.qsize(), 4) # Exception api_client = worker._get_api_client_dict() mock_api_client.get_resource_dump.side_effect = Exception('text except') public_item = worker._action_resource_item_from_cdb(api_client, item) self.assertEqual(public_item, None) self.assertEqual(api_client['request_interval'], 0) self.assertEqual(worker.log_dict['exceptions_count'], 5) self.assertEqual(worker.log_dict['add_to_retry'], 5) sleep(worker.config['retry_default_timeout'] * 2) self.assertEqual(worker.retry_resource_items_queue.qsize(), 5) del worker
def test__run(self, mock_api_client): queue = Queue() retry_queue = Queue() api_clients_queue = Queue() api_client_dict = { 'client': mock_api_client, 'request_interval': 0 } queue_resource_item = { 'resource': 'tenders', 'id': uuid.uuid4().hex, 'dateModified': datetime.datetime.now().isoformat(), '_rev': '1-' + uuid.uuid4().hex } resource_item = { 'resource': 'tenders', 'id': uuid.uuid4().hex, 'dateModified': datetime.datetime.now().isoformat(), '_rev': '1-' + uuid.uuid4().hex } archive_doc = { 'id': resource_item['id'], 'dateModified': datetime.datetime.now().isoformat(), '_rev': '1-' + uuid.uuid4().hex, 'rev': '1-' + uuid.uuid4().hex } db = MagicMock() archive_db = MagicMock() secret_archive_db = MagicMock() bridge = ArchiveWorker(config_dict=self.worker_config, log_dict=self.log_dict, resource_items_queue=queue, retry_resource_items_queue=retry_queue, api_clients_queue=api_clients_queue, db=db, archive_db=archive_db, secret_archive_db=secret_archive_db) # Try get item from resource items queue bridge._run() self.assertEqual(bridge.log_dict['exceptions_count'], 0) self.assertEqual(bridge.log_dict['add_to_retry'], 0) # Get resource from edge db bridge.db.get.side_effect = [Exception('DB exception'), None, resource_item, resource_item, resource_item, resource_item, resource_item, resource_item, resource_item, resource_item, resource_item, resource_item] queue.put(queue_resource_item) bridge._run() self.assertEqual(bridge.log_dict['exceptions_count'], 1) self.assertEqual(bridge.log_dict['add_to_retry'], 1) queue.put(queue_resource_item) bridge._run() self.assertEqual(bridge.log_dict['exceptions_count'], 1) self.assertEqual(bridge.log_dict['add_to_retry'], 1) resource_item['dateModified'] = datetime.datetime.now().isoformat() bridge.archive_db.get.side_effect = [Exception('Archive DB exception'), None, munchify(archive_doc), munchify(archive_doc), munchify(archive_doc), munchify(archive_doc), munchify(archive_doc), munchify(archive_doc), munchify(archive_doc)] bridge.archive_db.save = MagicMock() # Put resource to public db queue.put(queue_resource_item) bridge._run() self.assertEqual(bridge.log_dict['exceptions_count'], 2) self.assertEqual(bridge.log_dict['add_to_retry'], 2) # Try get api client from clients queue queue.put(queue_resource_item) bridge._get_api_client_dict = MagicMock(side_effect=[None, api_client_dict, api_client_dict, api_client_dict, None, api_client_dict, api_client_dict, api_client_dict, api_client_dict, api_client_dict, api_client_dict]) bridge._run() self.assertEqual(bridge.log_dict['exceptions_count'], 2) self.assertEqual(bridge.log_dict['add_to_retry'], 3) # Try get resource item dump from cdb resource_item['_rev'] = '1-' + uuid.uuid4().hex secret_doc = { 'id': resource_item['id'] } bridge._action_resource_item_from_cdb = MagicMock(side_effect=[Exception('From CDB exception'), secret_doc, secret_doc, secret_doc, Exception('Delete'), secret_doc, secret_doc, secret_doc, secret_doc]) queue.put(queue_resource_item) bridge._run() self.assertEqual(bridge.log_dict['exceptions_count'], 3) self.assertEqual(bridge.log_dict['add_to_retry'], 4) bridge.secret_archive_db.get.side_effect = [Exception('Secret DB exception'), None, secret_doc, secret_doc, secret_doc, secret_doc, secret_doc] queue.put(queue_resource_item) bridge._run() self.assertEqual(bridge.log_dict['exceptions_count'], 4) self.assertEqual(bridge.log_dict['add_to_retry'], 5) queue.put(queue_resource_item) bridge._run() self.assertEqual(bridge.log_dict['exceptions_count'], 4) self.assertEqual(bridge.log_dict['add_to_retry'], 5) queue.put(queue_resource_item) bridge._run() self.assertEqual(bridge.log_dict['exceptions_count'], 5) self.assertEqual(bridge.log_dict['add_to_retry'], 5) # Delete resource from edge db queue.put(queue_resource_item) bridge.db.save.side_effect = [True, Exception('Delete from edge')] bridge._run() self.assertEqual(bridge.log_dict['exceptions_count'], 5) self.assertEqual(bridge.log_dict['add_to_retry'], 5) queue.put(queue_resource_item) bridge._run() self.assertEqual(bridge.log_dict['exceptions_count'], 6) self.assertEqual(bridge.log_dict['add_to_retry'], 5)
def test_get_lots(self): setup_routing(self.app, routs=["tender_subpage"]) lots = munchify({'data': self.tender['data'].get('lots', [])}) self.assertEqual(self.client.get_lots(self.tender), lots)
def test_get_plan_location_error(self): setup_routing(self.app, routs=["plan"]) tender = self.client.get_plan(TEST_PLAN_KEYS.error_id) self.assertEqual(tender, munchify(loads(location_error('plan'))))
def test_create_tender(self): setup_routing(self.app, routs=["tender_create"]) tender = munchify({'data': 'tender'}) self.client.create_tender(self.tender)
SETTINGS = munchify( { # Location of user-specific settings.py file containing a SETTINGS dict. # With this dict you can change anything that is set here, conveniently from your home directory. # Default: $HOME/.config/seml/settings.py "USER_SETTINGS_PATH": Path.home() / ".config/seml/settings.py", "DATABASE": { # location of the MongoDB config. Default: $HOME/.config/seml/monogdb.config "MONGODB_CONFIG_PATH": Path.home() / ".config/seml/mongodb.config" }, "SLURM_DEFAULT": { 'experiments_per_job': 1, 'sbatch_options': { 'time': '0-08:00', 'nodes': 1, 'cpus-per-task': 1, 'mem': '8G', }, }, "SBATCH_OPTIONS_TEMPLATES": { # This is a special template used for `seml jupyter` "JUPYTER": { 'cpus-per-task': 2, 'mem': '16G', 'gres': "gpu:1", 'qos': 'interactive', 'job-name': 'jupyter', 'output': 'jupyter-%j.out', }, # Extend this with your custom templates. "GPU": { 'cpus-per-task': 2, 'mem': '16G', 'gres': "gpu:1", }, }, "STATES": { "STAGED": ["STAGED", "QUEUED" ], # QUEUED for backward compatibility "PENDING": ["PENDING"], "RUNNING": ["RUNNING"], "FAILED": ["FAILED"], "KILLED": ["KILLED"], "INTERRUPTED": ["INTERRUPTED"], "COMPLETED": ["COMPLETED"], }, "SLURM_STATES": { "PENDING": [ "PENDING", "CONFIGURING", "REQUEUE_FED", "REQUEUE_HOLD", "REQUEUED", "RESIZING" ], "RUNNING": [ "RUNNING", "SIGNALING" ], # Python code can still be executed while in SIGNALING "PAUSED": ["STOPPED", "SUSPENDED", "SPECIAL_EXIT"], "INTERRUPTED": ["CANCELLED"], # Caused by user command "FAILED": [ "FAILED", "BOOT_FAIL", "DEADLINE", "NODE_FAIL", "OUT_OF_MEMORY", "PREEMPTED", "REVOKED", "TIMEOUT" ], # REVOKED is not failed, but would need code that handles multi-cluster operation "COMPLETED": ["COMPLETED", "COMPLETING", "STAGE_OUT"], }, "VALID_SEML_CONFIG_VALUES": [ 'executable', 'name', 'output_dir', 'conda_environment', 'project_root_dir' ], "VALID_SLURM_CONFIG_VALUES": [ 'experiments_per_job', 'max_jobs_per_batch', 'sbatch_options_template', 'sbatch_options' ], "LOGIN_NODE_NAMES": ["fs"], "OBSERVERS": { "NEPTUNE": { "AUTH_TOKEN": "YOUR_AUTH_TOKEN", }, "SLACK": { "WEBHOOK": "YOUR_WEBHOOK", }, "MATTERMOST": { "WEBHOOK": "YOUR_WEBHOOK", "DEFAULT_CHANNEL": "YOUR_DEFAULT_CHANNEL", } }, }, )
def test_get_questions(self): setup_routing(self.app, routs=["tender_subpage"]) questions = munchify( {'data': self.tender['data'].get('questions', [])}) self.assertEqual(self.client.get_questions(self.tender), questions)
def test_related_question(question, relation, obj_id): question.data.update({"questionOf": relation, "relatedItem": obj_id}) return munchify(question)
from openprocurement_client.tests._server import (tender_partition, location_error, setup_routing, ROOT) HOST_URL = "http://localhost:20602" API_KEY = 'e9c3ccb8e8124f26941d5f9639a4ebc3' API_VERSION = '0.10' TEST_KEYS = munchify({ "tender_id": '823d50b3236247adad28a5a66f74db42', "empty_tender": 'f3849ade33534174b8402579152a5f41', "question_id": '615ff8be8eba4a81b300036d6bec991c', "lot_id": '563ef5d999f34d36a5a0e4e4d91d7be1', "bid_id": 'f7fc1212f9f140bba5c4e3cd4f2b62d9', "bid_document_id": "ff001412c60c4164a0f57101e4eaf8aa", "bid_qualification_document_id": "7519d21b32af432396acd6e2c9e18ee5", "bid_financial_document_id": "7519d21b32af432396acd6e2c9e18ee5", "bid_eligibility_document_id": "7519d21b32af432396acd6e2c9e18ee5", "award_id": '7054491a5e514699a56e44d32e23edf7', "qualification_id": "cec4b82d2708465291fb4af79f8a3e52", "document_id": '330822cbbd724671a1d2ff7c3a51dd52', "new_document_id": '12345678123456781234567812345678', "error_id": '111a11a1111111aaa11111a1a1a111a1' }) TEST_KEYS_LIMITED = munchify({ "tender_id": '668c3156c8cb496fb28359909cde6e96', "cancellation_id": "0dd6f9e8cc4f4d1c9c404d842b56d0d7", "cancellation_document_id": "1afca9faaf2b4f9489ee264b136371c6",
def test_question_answer_data(): return munchify({ "data": { "answer": fake.sentence(nb_words=40, variable_nb_words=True) } })
def test_get_contract_location_error(self): setup_routing(self.app, routes=["contract"]) contract = self.client.get_contract(TEST_CONTRACT_KEYS.error_id) self.assertEqual(contract, munchify(loads(location_error('contract'))))
def test_tender_data(params, periods=("enquiry", "tender"), submissionMethodDetails=None): submissionMethodDetails = submissionMethodDetails \ if submissionMethodDetails else "quick" now = get_now() value_amount = round( random.uniform(3000, 99999999999.99), 2) # max value equals to budget of Ukraine in hryvnias data = { "mode": "test", "submissionMethodDetails": submissionMethodDetails, "description": fake.description(), "description_en": fake_en.sentence(nb_words=10, variable_nb_words=True), "description_ru": fake_ru.sentence(nb_words=10, variable_nb_words=True), "title": fake.title(), "title_en": fake_en.catch_phrase(), "title_ru": fake_ru.catch_phrase(), "procuringEntity": fake.procuringEntity(), "value": { "amount": value_amount, "currency": u"UAH", "valueAddedTaxIncluded": True }, "minimalStep": { "amount": round(random.uniform(0.005, 0.03) * value_amount, 2), "currency": u"UAH" }, "items": [], "features": [] } accelerator = params['intervals']['accelerator'] data['procurementMethodDetails'] = 'quick, ' \ 'accelerator={}'.format(accelerator) data["procuringEntity"]["kind"] = "other" if data.get("mode") == "test": data["title"] = u"[ТЕСТУВАННЯ] {}".format(data["title"]) data["title_en"] = u"[TESTING] {}".format(data["title_en"]) data["title_ru"] = u"[ТЕСТИРОВАНИЕ] {}".format(data["title_ru"]) period_dict = {} inc_dt = now for period_name in periods: period_dict[period_name + "Period"] = {} for i, j in zip(range(2), ("start", "end")): inc_dt += timedelta(minutes=params['intervals'][period_name][i]) period_dict[period_name + "Period"][j + "Date"] = inc_dt.astimezone(TZ).isoformat() data.update(period_dict) cpv_group = fake.cpv()[:4] if params.get('number_of_lots'): data['lots'] = [] for lot_number in range(params['number_of_lots']): lot_id = uuid4().hex new_lot = test_lot_data(data['value']['amount']) data['lots'].append(new_lot) data['lots'][lot_number]['id'] = lot_id for i in range(params['number_of_items']): new_item = test_item_data(cpv_group) new_item['relatedLot'] = lot_id data['items'].append(new_item) value_amount = round( sum(lot['value']['amount'] for lot in data['lots']), 2) minimalStep = min(lot['minimalStep']['amount'] for lot in data['lots']) data['value']['amount'] = value_amount data['minimalStep']['amount'] = minimalStep if params.get('lot_meat'): new_feature = test_feature_data() new_feature['featureOf'] = "lot" data['lots'][0]['id'] = data['lots'][0].get('id', uuid4().hex) new_feature['relatedItem'] = data['lots'][0]['id'] data['features'].append(new_feature) else: for i in range(params['number_of_items']): new_item = test_item_data(cpv_group) data['items'].append(new_item) if params.get('tender_meat'): new_feature = test_feature_data() new_feature.featureOf = "tenderer" data['features'].append(new_feature) if params.get('item_meat'): new_feature = test_feature_data() new_feature['featureOf'] = "item" data['items'][0]['id'] = data['items'][0].get('id', uuid4().hex) new_feature['relatedItem'] = data['items'][0]['id'] data['features'].append(new_feature) if not data['features']: del data['features'] data['status'] = 'draft' return munchify(data)
from munch import munchify from pathlib import Path __all__ = ("SETTINGS",) SETTINGS = munchify( { "DATABASE": { # location of the MongoDB config. Default: $HOME/.config/seml/monogdb.config "MONGODB_CONFIG_PATH": f'{str(Path.home())}/.config/seml/mongodb.config' }, } )
def test_create_plan(self): setup_routing(self.app, routes=["plan_create"]) plan = munchify({'data': 'plan'}) self.assertEqual(self.client.create_plan(plan), plan)
def test_get_awards(self): setup_routing(self.app, routes=["tender_subpage"]) awards = munchify({'data': self.tender['data'].get('awards', [])}) self.assertEqual(self.client.get_awards(self.tender), awards)
def test_create_complaint(self): setup_routing(self.app, routs=["tender_subpage_item_create"]) complaint = munchify({'data': 'complaint'}) self.assertEqual( self.client.create_complaint(self.limited_tender, complaint), complaint)
def test_get_plans_failed(self, mock_request): mock_request.return_value = munchify({'status_code': 412}) self.client.params['offset'] = 'offset_value' with self.assertRaises(KeyError) as e: self.client.get_plans() self.assertEqual(e.exception.message, 'offset')