def test_facebook_token(self, _get_external_data, get_new_token): self.app.test_request_context().push() _get_external_data.return_value = FACEBOOK_GRAPH_DATA from rockpack.mainsite.services.oauth.api import FacebookUser long_lived_fb_token = "fdsuioncf3w8ryl38yb7y4eius" get_new_token.return_value = FacebookUser("facebook", long_lived_fb_token, 3600) user = self._new_user() token = uuid.uuid4().hex eu = FacebookUser("facebook", token, 3600) eu._user_data = FACEBOOK_GRAPH_DATA.copy() ExternalToken.update_token(user, eu) self.session.commit() e = ExternalToken.query.filter_by(external_token=long_lived_fb_token).one() self.assertEquals("facebook", e.external_system) self.assertEquals(user.username, e.user_rel.username) # test we can overwrite token new_token = uuid.uuid4().hex eu = FacebookUser("facebook", new_token, 172800) eu._user_data = FACEBOOK_GRAPH_DATA.copy() ExternalToken.update_token(user, eu) self.session.commit() e = ExternalToken.query.filter_by(user=user.id) self.assertEquals(1, e.count(), "only one token should exist") e = e.one() self.assertEquals(new_token, e.external_token, "saved token should match new token")
def test_update_project_returns_extra(self): """This tests for backwards-compatibility with an essex/folsom bug. Non-indexed attributes were returned in an 'extra' attribute, instead of on the entity itself; for consistency and backwards compatibility, those attributes should be included twice. This behavior is specific to the SQL driver. """ tenant_id = uuid.uuid4().hex arbitrary_key = uuid.uuid4().hex arbitrary_value = uuid.uuid4().hex tenant = { "id": tenant_id, "name": uuid.uuid4().hex, "domain_id": DEFAULT_DOMAIN_ID, arbitrary_key: arbitrary_value, } ref = self.identity_man.create_project({}, tenant_id, tenant) self.assertEqual(arbitrary_value, ref[arbitrary_key]) self.assertIsNone(ref.get("extra")) tenant["name"] = uuid.uuid4().hex ref = self.identity_api.update_project(tenant_id, tenant) self.assertEqual(arbitrary_value, ref[arbitrary_key]) self.assertEqual(arbitrary_value, ref["extra"][arbitrary_key])
def test_serializer(self): transport = _FakeTransport(self.conf) serializer = msg_serializer.NoOpSerializer() notifier = messaging.Notifier(transport, 'test.localhost', driver='test', topic='test', serializer=serializer) message_id = uuid.uuid4() self.mox.StubOutWithMock(uuid, 'uuid4') uuid.uuid4().AndReturn(message_id) timeutils.set_time_override() self.mox.StubOutWithMock(serializer, 'serialize_entity') serializer.serialize_entity({}, 'bar').AndReturn('sbar') self.mox.ReplayAll() notifier.info({}, 'test.notify', 'bar') message = { 'message_id': str(message_id), 'publisher_id': 'test.localhost', 'event_type': 'test.notify', 'priority': 'INFO', 'payload': 'sbar', 'timestamp': str(timeutils.utcnow.override_time), } self.assertEquals(_impl_test.NOTIFICATIONS, [({}, message, 'INFO')])
def test_unauthorized(self): BUCKETNAME = uuid.uuid4().hex DESCRIPTION = uuid.uuid4().hex result = yield request( "POST", "%s/%s" % (self.url, BUCKETNAME), username=self.username, password=self.password, data={"description":DESCRIPTION}) self.assertEqual(result.code, 201) result = yield request( "GET", "%s/%s" % (self.url, BUCKETNAME), username="******", password="******") self.assertEqual(result.code, 401) result = yield request( "GET", "%s/%s" % (self.url, BUCKETNAME), username=self.username2, password=self.password2) self.assertEqual(result.code, 401) result = yield request( "DELETE", "%s/%s" % (self.url, BUCKETNAME), username=self.username, password=self.password) self.assertEqual(result.code, 200)
def get_file_name(self, filename): """ Ensures that a file name is unique before uploading. The PRMAttachment instance requires an extra attribute, partner (a Partner instance) to be set in order to create the file name. """ filename, extension = path.splitext(filename) filename = '.'.join([sub(r'[\W]', '', filename), sub(r'[\W]', '', extension)]) # If the uploaded file only contains invalid characters the end # result will be a file named "." if not filename or filename == '.': filename = 'unnamed_file' uid = uuid4() if self.partner: partner = self.partner.pk owner = self.partner.owner.pk else: partner = owner = 'none' path_addon = "mypartners/%s/%s/%s" % (owner, partner, uid) name = "%s/%s" % (path_addon, filename) # Make sure that in the unlikely event that a filepath/uid/filename # combination isn't actually unique a new unique id # is generated. while default_storage.exists(name): uid = uuid4() path_addon = "mypartners/%s/%s/%s" % (owner, partner, uid) name = "%s/%s" % (path_addon, filename) return name
def test_pqueue_message_time_to_live(live_servicebus_config, partitioned_queue): client = ServiceBusClient( service_namespace=live_servicebus_config['hostname'], shared_access_key_name=live_servicebus_config['key_name'], shared_access_key_value=live_servicebus_config['access_key'], debug=True) import uuid queue_client = client.get_queue(partitioned_queue) with queue_client.get_sender() as sender: content = str(uuid.uuid4()) message_id = uuid.uuid4() message = Message(content) message.time_to_live = timedelta(seconds=30) sender.send(message) time.sleep(30) with queue_client.get_receiver() as receiver: messages = receiver.fetch_next(timeout=10) assert not messages with queue_client.get_deadletter_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock) as receiver: count = 0 for message in receiver: print_message(message) message.complete() count += 1 assert count == 1
def test_element_count_validation(self): """ Tests that big collections are detected and raise an exception. """ TestMapModel.create(text_map={str(uuid4()): i for i in range(65535)}) with self.assertRaises(ValidationError): TestMapModel.create(text_map={str(uuid4()): i for i in range(65536)})
def _test_federated_payload_with_ids(self, exp_user_id, exp_group_id): exp_methods = ['password'] exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True) exp_audit_ids = [provider.random_urlsafe_str()] exp_federated_info = {'group_ids': [{'id': exp_group_id}], 'idp_id': uuid.uuid4().hex, 'protocol_id': uuid.uuid4().hex} payload = token_formatters.FederatedUnscopedPayload.assemble( exp_user_id, exp_methods, exp_expires_at, exp_audit_ids, exp_federated_info) (user_id, methods, expires_at, audit_ids, federated_info) = ( token_formatters.FederatedUnscopedPayload.disassemble(payload)) self.assertEqual(exp_user_id, user_id) self.assertEqual(exp_methods, methods) self.assertEqual(exp_expires_at, expires_at) self.assertEqual(exp_audit_ids, audit_ids) self.assertEqual(exp_federated_info['group_ids'][0]['id'], federated_info['group_ids'][0]['id']) self.assertEqual(exp_federated_info['idp_id'], federated_info['idp_id']) self.assertEqual(exp_federated_info['protocol_id'], federated_info['protocol_id'])
def test_consumer_update_normalize_field(self): # If update a consumer with a field with : or - in the name, # the name is normalized by converting those chars to _. field1_name = 'some:weird-field' field1_orig_value = uuid.uuid4().hex extra_fields = {field1_name: field1_orig_value} consumer = self._consumer_create(**extra_fields) consumer_id = consumer['id'] field1_new_value = uuid.uuid4().hex field2_name = 'weird:some-field' field2_value = uuid.uuid4().hex update_ref = {field1_name: field1_new_value, field2_name: field2_value} update_resp = self.patch(self.CONSUMER_URL + '/%s' % consumer_id, body={'consumer': update_ref}) consumer = update_resp.result['consumer'] normalized_field1_name = 'some_weird_field' self.assertEqual(field1_new_value, consumer[normalized_field1_name]) normalized_field2_name = 'weird_some_field' self.assertEqual(field2_value, consumer[normalized_field2_name])
def forwards(self, orm): for award in orm['badges.Award'].objects.all(): user = award.user try: user.identity current_identity_hash = user.identity.identity_hash new_candidate_identity_hash = u'sha256$' + hashlib.sha256(user.email + user.identity.salt).hexdigest() if current_identity_hash != new_candidate_identity_hash: salt = uuid.uuid4().hex[:5] user.identity.salt = salt user.identity.identity_hash = u'sha256$' + hashlib.sha256(user.email + salt).hexdigest() user.identity.save() except: salt = uuid.uuid4().hex[:5] orm['badges.Identity'].objects.create( user=user, identity_hash=u'sha256$' + hashlib.sha256(user.email + salt).hexdigest(), salt=salt ) award.uuid = uuid.uuid1() award.identity_hash = award.user.identity.identity_hash award.identity_type = award.user.identity.type award.identity_hashed = award.user.identity.hashed award.identity_salt = award.user.identity.salt award.expires = None award.save()
def test_multiple_delivery_with_multiple_ack(self): data = str(uuid.uuid4()) data2 = str(uuid.uuid4()) client = yield self.quick_register(use_webpush=True) yield client.disconnect() ok_(client.channels) yield client.send_notification(data=data) yield client.send_notification(data=data2) yield client.connect() yield client.hello() result = yield client.get_notification() ok_(result != {}) ok_(result["data"] in map(urlsafe_b64encode, [data, data2])) result2 = yield client.get_notification() ok_(result2 != {}) ok_(result2["data"] in map(urlsafe_b64encode, [data, data2])) yield client.ack(result2["channelID"], result2["version"]) yield client.ack(result["channelID"], result["version"]) yield client.disconnect() yield client.connect() yield client.hello() result = yield client.get_notification() eq_(result, None) yield self.shut_down(client)
def test_delete_index(self): mother_case_id = uuid.uuid4().hex _submit_case_block( True, mother_case_id, user_id='user1', owner_id='owner1', case_type='mother', case_name='mother', date_modified=datetime.utcnow() ) child_case_id = uuid.uuid4().hex _submit_case_block( True, child_case_id, user_id='user1', owner_id='owner1', case_type='child', case_name='child', date_modified=datetime.utcnow(), index={ 'mom': ('mother', mother_case_id) } ) case = self.casedb.get_case(child_case_id) self.assertEqual(len(case.indices), 1) _submit_case_block( False, child_case_id, user_id='user1', date_modified=datetime.utcnow(), index={ 'mom': ('mother', '') } ) case = self.casedb.get_case(child_case_id) self.assertEqual(len(case.indices), 0)
def set_image_count(self, value): '''Add or remove image nodes as needed''' assert value > 0 root = self.root_node if self.image_count > value: image_nodes = root.find(qn(self.ns['ome'], "Image")) for image_node in image_nodes[value:]: root.remove(image_node) while(self.image_count < value): new_image = self.Image(ElementTree.SubElement(root, qn(self.ns['ome'], "Image"))) new_image.ID = str(uuid.uuid4()) new_image.Name = "default.png" new_image.AcquiredDate = xsd_now() new_pixels = self.Pixels( ElementTree.SubElement(new_image.node, qn(self.ns['ome'], "Pixels"))) new_pixels.ID = str(uuid.uuid4()) new_pixels.DimensionOrder = DO_XYCTZ new_pixels.PixelType = PT_UINT8 new_pixels.SizeC = 1 new_pixels.SizeT = 1 new_pixels.SizeX = 512 new_pixels.SizeY = 512 new_pixels.SizeZ = 1 new_channel = self.Channel( ElementTree.SubElement(new_pixels.node, qn(self.ns['ome'], "Channel"))) new_channel.ID = "Channel%d:0" % self.image_count new_channel.Name = new_channel.ID new_channel.SamplesPerPixel = 1
def test_update_index(self): mother_case_id = uuid.uuid4().hex _submit_case_block( True, mother_case_id, user_id='user1', owner_id='owner1', case_type='mother', case_name='mother', date_modified=datetime.utcnow() ) child_case_id = uuid.uuid4().hex _submit_case_block( True, child_case_id, user_id='user1', owner_id='owner1', case_type='child', case_name='child', date_modified=datetime.utcnow(), index={ 'mom': ('mother', mother_case_id) } ) case = self.casedb.get_case(child_case_id) self.assertEqual(case.indices[0].identifier, 'mom') _submit_case_block( False, child_case_id, user_id='user1', date_modified=datetime.utcnow(), index={ 'mom': ('other_mother', mother_case_id) } ) case = self.casedb.get_case(child_case_id) self.assertEqual(case.indices[0].referenced_type, 'other_mother')
def test_validation_error(self): target = uuid.uuid4().hex attribute = uuid.uuid4().hex e = exception.ValidationError(target=target, attribute=attribute) self.assertValidJsonRendering(e) self.assertIn(target, six.text_type(e)) self.assertIn(attribute, six.text_type(e))
def test_dup_domain(self): openstack_driver = FakeExtensionManager.get_extension_objects("vnc_cfg_api.resync")[0] orig_ks_domains_list = openstack_driver._ks_domains_list orig_ks_domain_get = openstack_driver._ks_domain_get try: openstack_driver._ks_domains_list = openstack_driver._ksv3_domains_list openstack_driver._ks_domain_get = openstack_driver._ksv3_domain_get logger.info('Creating first domain in "keystone"') dom_id = str(uuid.uuid4()) dom_name = self.id() test_case.get_keystone_client().domains.add_domain(dom_id, dom_name) dom_obj = self._vnc_lib.domain_read(id=dom_id) self.assertThat(dom_obj.name, Equals(dom_name)) logger.info('Creating second domain with same name diff id in "keystone"') new_dom_id = str(uuid.uuid4()) test_case.get_keystone_client().domains.add_domain(new_dom_id, dom_name) new_dom_obj = self._vnc_lib.domain_read(id=new_dom_id) self.assertThat(new_dom_obj.name, Not(Equals(dom_name))) self.assertThat(new_dom_obj.name, Contains(dom_name)) self._vnc_lib.domain_delete(id=dom_id) self._vnc_lib.domain_delete(id=new_dom_id) finally: openstack_driver._ks_domains_list = orig_ks_domains_list openstack_driver._ks_domain_get = orig_ks_domain_get
def vios_to_vios_auth(source, dest, conn_info): """Context allowing for SSH between VIOS partitions This context will build an SSH key on the source host, put the key into the authorized_keys on the destination host, and make the private key file name available within the context. The key files and key inserted into authorized_keys will be removed when the context exits. :param source: source IP or DNS name :param dest: destination IP or DNS name :param conn_info: dictionary object with SSH connection information for both hosts """ KEY_BASE_NAME = "os-%s" % uuid.uuid4().hex keypair_uuid = uuid.uuid4() src_conn_obj = ssh_connect(conn_info) dest_conn_info = Connection(dest, conn_info.username, conn_info.password) dest_conn_obj = ssh_connect(dest_conn_info) def run_command(conn_obj, cmd): stdout, stderr = processutils.ssh_execute(conn_obj, cmd) return stdout.strip().splitlines() def build_keypair_on_source(): mkkey = ('ssh-keygen -f %s -N "" -C %s' % (KEY_BASE_NAME, keypair_uuid.hex)) ssh_command_as_root(src_conn_obj, mkkey) chown_key = ('chown %s %s*' % (conn_info.username, KEY_BASE_NAME)) ssh_command_as_root(src_conn_obj, chown_key) cat_key = ('cat %s.pub' % KEY_BASE_NAME) pubkey = run_command(src_conn_obj, cat_key) return pubkey[0] def cleanup_key_on_source(): rmkey = 'rm %s*' % KEY_BASE_NAME run_command(src_conn_obj, rmkey) def insert_into_authorized_keys(public_key): echo_key = 'echo "%s" >> .ssh/authorized_keys' % public_key ssh_command_as_root(dest_conn_obj, echo_key) def remove_from_authorized_keys(): rmkey = ('sed /%s/d .ssh/authorized_keys > .ssh/authorized_keys' % keypair_uuid.hex) ssh_command_as_root(dest_conn_obj, rmkey) public_key = build_keypair_on_source() insert_into_authorized_keys(public_key) try: yield KEY_BASE_NAME finally: remove_from_authorized_keys() cleanup_key_on_source()
def execute(self, code): if debug: execute_request = Msg(["execute_request"], {"msg_id": "07033084-5cfd-4812-90a4-e4d24ffb6e3d", "username": str(self.id), "session": "07033084-5cfd-4812-90a4-e4d24ffb6e3d", "msg_type": "execute_request"}, {"code": code, "silent": False, "store_history": False, "user_variables": list(), "user_expressions": {}, "allow_stdin": True}, {}) else: execute_request = Msg(["execute_request"], {"msg_id": str(uuid.uuid4()), "username": str(self.id), "session": str(uuid.uuid4()), "msg_type": "execute_request"}, {"code": code, "silent": False, "store_history": False, "user_variables": list(), "user_expressions": {}, "allow_stdin": True}, {}) ret = self.shell.send(execute_request)
def test_result_generation(): jobstep = JobStep( id=uuid.uuid4(), project_id=uuid.uuid4(), job_id=uuid.uuid4(), ) fp = StringIO(SAMPLE_XUNIT) handler = XunitHandler(jobstep) results = handler.get_tests(fp) assert len(results) == 2 r1 = results[0] assert type(r1) == TestResult assert r1.step == jobstep assert r1.package is None assert r1.name == 'tests.test_report' assert r1.duration == 0.0 assert r1.result == Result.failed assert r1.message == """tests/test_report.py:1: in <module> > import mock E ImportError: No module named mock""" assert r1.owner == 'foo' r2 = results[1] assert type(r2) == TestResult assert r2.step == jobstep assert r2.package is None assert r2.name == 'tests.test_report.ParseTestResultsTest.test_simple' assert r2.duration == 1.65796279907 assert r2.result == Result.passed assert r2.message == '' assert r2.reruns == 1 assert r2.owner is None
def test_notifier(self): self.config(notification_driver=['log']) transport = _FakeTransport(self.conf) notifier = messaging.Notifier(transport, 'test.localhost') message_id = uuid.uuid4() self.mox.StubOutWithMock(uuid, 'uuid4') uuid.uuid4().AndReturn(message_id) timeutils.set_time_override() message = { 'message_id': str(message_id), 'publisher_id': 'test.localhost', 'event_type': 'test.notify', 'priority': 'INFO', 'payload': 'bar', 'timestamp': str(timeutils.utcnow.override_time), } logger = self.mox.CreateMockAnything() self.mox.StubOutWithMock(logging, 'getLogger') logging.getLogger('oslo.messaging.notification.test.notify').\ AndReturn(logger) logger.info(jsonutils.dumps(message)) self.mox.ReplayAll() notifier.info({}, 'test.notify', 'bar')
def test_legacy_padding_validation(self): first_value = uuid.uuid4().hex second_value = uuid.uuid4().hex payload = (first_value, second_value) msgpack_payload = msgpack.packb(payload) # NOTE(lbragstad): This method perserves the way that keystone used to # percent encode the tokens, prior to bug #1491926. def legacy_pack(payload): tf = token_formatters.TokenFormatter() encrypted_payload = tf.crypto.encrypt(payload) # the encrypted_payload is returned with padding appended self.assertTrue(encrypted_payload.endswith('=')) # using urllib.parse.quote will percent encode the padding, like # keystone did in Kilo. percent_encoded_payload = urllib.parse.quote(encrypted_payload) # ensure that the padding was actaully percent encoded self.assertTrue(percent_encoded_payload.endswith('%3D')) return percent_encoded_payload token_with_legacy_padding = legacy_pack(msgpack_payload) tf = token_formatters.TokenFormatter() # demonstrate the we can validate a payload that has been percent # encoded with the Fernet logic that existed in Kilo serialized_payload = tf.unpack(token_with_legacy_padding) returned_payload = msgpack.unpackb(serialized_payload) self.assertEqual(first_value, returned_payload[0]) self.assertEqual(second_value, returned_payload[1])
def test_foreign_cluster_volume(self): """ Volumes from other Flocker clusters are not listed. """ block_device_api1 = cinderblockdeviceapi_for_test( test_case=self, cluster_id=uuid4(), ) flocker_volume1 = block_device_api1.create_volume( dataset_id=uuid4(), size=REALISTIC_BLOCKDEVICE_SIZE, ) block_device_api2 = cinderblockdeviceapi_for_test( test_case=self, cluster_id=uuid4(), ) flocker_volume2 = block_device_api2.create_volume( dataset_id=uuid4(), size=REALISTIC_BLOCKDEVICE_SIZE, ) self.assertEqual( ([flocker_volume1], [flocker_volume2]), (block_device_api1.list_volumes(), block_device_api2.list_volumes()) )
def test_federated_domain_scoped_payload(self): exp_user_id = 'someNonUuidUserId' exp_methods = ['token'] exp_domain_id = uuid.uuid4().hex exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True) exp_audit_ids = [provider.random_urlsafe_str()] exp_federated_info = {'group_ids': [{'id': 'someNonUuidGroupId'}], 'idp_id': uuid.uuid4().hex, 'protocol_id': uuid.uuid4().hex} payload = token_formatters.FederatedDomainScopedPayload.assemble( exp_user_id, exp_methods, exp_domain_id, exp_expires_at, exp_audit_ids, exp_federated_info) (user_id, methods, domain_id, expires_at, audit_ids, federated_info) = ( token_formatters.FederatedDomainScopedPayload.disassemble( payload)) self.assertEqual(exp_user_id, user_id) self.assertEqual(exp_methods, methods) self.assertEqual(exp_domain_id, domain_id) self.assertEqual(exp_expires_at, expires_at) self.assertEqual(exp_audit_ids, audit_ids) self.assertDictEqual(exp_federated_info, federated_info)
def get_vw_nvalues(model_run_uuid): """ Given a model run uuid that contains the lookup table and ESRI .asc with vegetation codes, return an ascii file that has the n-values properly assigned """ vwc = default_vw_client() records = vwc.dataset_search(model_run_uuid=model_run_uuid).records downloads = [r['downloads'][0] for r in records] asc_url = filter(lambda d: d.keys().pop() == 'ascii', downloads).pop()['ascii'] xlsx_url = filter(lambda d: d.keys().pop() == 'xlsx', downloads).pop()['xlsx'] asc_path = 'tmp_' + str(uuid4()) + '.asc' vwc.download(asc_url, asc_path) xlsx_path = 'tmp_' + str(uuid4()) + '.xlsx' vwc.download(xlsx_url, xlsx_path) asc_nvals = vegcode_to_nvalue(asc_path, xlsx_path) os.remove(asc_path) os.remove(xlsx_path) return asc_nvals
def testMcas(self): mc = GetMemcacheClient() key1, key2 = uuid4().hex, uuid4().hex mc.set_multi({key1: 'foo', key2: 'bar'}) item1, item2 = mcas_get(mc, key1), mcas_get(mc, key2) self.assertTrue(mcas(mc, [(item1, 'foo2'), (item2, 'bar2')])) self.assertEqual(mc.get_multi([key1, key2]), {key1: 'foo2', key2: 'bar2'})
def setUp(self): # Basic setup and mock/fake structures for testing only super(TestDatastoreBase, self).setUp() util.init_db() self.rand_id = str(uuid.uuid4()) self.ds_name = "my-test-datastore" + self.rand_id self.ds_version = "my-test-version" + self.rand_id self.capability_name = "root_on_create" + self.rand_id self.capability_desc = "Enables root on create" self.capability_enabled = True self.datastore_version_id = str(uuid.uuid4()) self.flavor_id = 1 datastore_models.update_datastore(self.ds_name, False) self.datastore = Datastore.load(self.ds_name) datastore_models.update_datastore_version(self.ds_name, self.ds_version, "mysql", "", "", True) DatastoreVersionMetadata.add_datastore_version_flavor_association( self.ds_name, self.ds_version, [self.flavor_id] ) self.datastore_version = DatastoreVersion.load(self.datastore, self.ds_version) self.test_id = self.datastore_version.id self.cap1 = Capability.create(self.capability_name, self.capability_desc, True) self.cap2 = Capability.create("require_volume" + self.rand_id, "Require external volume", True) self.cap3 = Capability.create("test_capability" + self.rand_id, "Test capability", False)
def test_pqueue_schedule_message(live_servicebus_config, partitioned_queue): client = ServiceBusClient( service_namespace=live_servicebus_config['hostname'], shared_access_key_name=live_servicebus_config['key_name'], shared_access_key_value=live_servicebus_config['access_key'], debug=True) import uuid queue_client = client.get_queue(partitioned_queue) enqueue_time = (datetime.now() + timedelta(minutes=2)).replace(microsecond=0) with queue_client.get_receiver() as receiver: with queue_client.get_sender() as sender: content = str(uuid.uuid4()) message_id = uuid.uuid4() message = Message(content) message.properties.message_id = message_id message.schedule(enqueue_time) sender.send(message) messages = receiver.fetch_next(timeout=120) if messages: try: data = str(messages[0]) assert data == content assert messages[0].properties.message_id == message_id assert messages[0].scheduled_enqueue_time == enqueue_time assert messages[0].scheduled_enqueue_time == messages[0].enqueued_time.replace(microsecond=0) assert len(messages) == 1 finally: for m in messages: m.complete() else: raise Exception("Failed to receive schdeduled message.")
def create_access_id(global_id=None): """ Create and return a new AccessID object. If 'global_id' is supplied, it should be a GlobalID object to use for this access ID. Otherwise, a new unique GlobalID object will be created. """ if global_id == None: global_id = create_unique_global_id() while True: device_id = random_string() try: access_id = AccessID.objects.get(global_id=global_id, device_id=device_id) except AccessID.DoesNotExist: access_id = None if access_id == None: break access_id = AccessID() access_id.global_id = global_id access_id.device_id = device_id access_id.timestamp = timezone.now() access_id.access_id = uuid.uuid4().hex access_id.access_secret = uuid.uuid4().hex access_id.save() return access_id
def rename_conflicting_fields(json_obj): if isinstance(json_obj, dict): for key, value in json_obj.items(): if key in conflicting_fields: # rename prefix = 'mongo' + key new_key = prefix while new_key in json_obj: new_key = prefix + str(uuid.uuid4()) json_obj[new_key] = value del json_obj[key] elif '.' in key: prefix = key.replace('.', ':') new_key = prefix while new_key in json_obj: new_key = prefix + str(uuid.uuid4()) json_obj[new_key] = value del json_obj[key] rename_conflicting_fields(value) elif isinstance(json_obj, list): for json_section in json_obj: rename_conflicting_fields(json_section)
def run_bad_case(): backup_uuid = uuid4() restore_uuid = uuid4() restore_total = 1000 backup_total = 1000 backup_started(backup_uuid) restore_started(restore_uuid) def _backup_progress(uuid, mult, total): backup_running(uuid) backup_progress(uuid, int(mult * total), total, 100000 * int(mult * total), 100000 * total) def _restore_progress(uuid, mult, total): restore_progress(uuid, int(mult * total), total, 100000 * int(mult * total), 100000 * total) restore_running(uuid) for i in xrange(1, 11): reactor.callLater(2 * i, _backup_progress, backup_uuid, i * 10. / 100.0, backup_total) reactor.callLater(2 * i, _restore_progress, restore_uuid, i * 9. / 100.0, restore_total) reactor.callLater(30, backup_finished, backup_uuid) reactor.callLater(30, restore_failed, restore_uuid)