def test_fsic_specific_id(self): fsics = {self.data["group2_id"].id: 1} self.data["sc"].current_transfer_session.client_fsic = json.dumps( fsics) _queue_into_buffer(self.data["sc"].current_transfer_session) # ensure only records modified with 2nd instance id are buffered self.assertRecordsNotBuffered(self.data["group1_c1"]) self.assertRecordsNotBuffered(self.data["group1_c2"]) self.assertRecordsBuffered(self.data["group2_c1"])
def test_all_fsics(self): fsics = {self.data["group1_id"].id: 1, self.data["group2_id"].id: 1} self.data["sc"].current_transfer_session.client_fsic = json.dumps( fsics) _queue_into_buffer(self.data["sc"].current_transfer_session) # ensure all store and buffer records are buffered self.assertRecordsBuffered(self.data["group1_c1"]) self.assertRecordsBuffered(self.data["group1_c2"]) self.assertRecordsBuffered(self.data["group2_c1"])
def test_valid_fsic_but_invalid_partition(self): filter_prefixes = "{}:user:summary".format(self.data["user1"].id) fsics = {self.data["group2_id"].id: 1} self.data["sc"].current_transfer_session.filter = filter_prefixes self.data["sc"].current_transfer_session.client_fsic = json.dumps( fsics) _queue_into_buffer(self.data["sc"].current_transfer_session) # ensure that record with valid fsic but invalid partition is not buffered self.assertRecordsNotBuffered([self.data["user4"]])
def test_partition_and_fsic_buffering(self): filter_prefixes = "{}:user:summary".format(self.data["user1"].id) fsics = {self.data["group1_id"].id: 1} self.data["sc"].current_transfer_session.filter = filter_prefixes self.data["sc"].current_transfer_session.client_fsic = json.dumps( fsics) _queue_into_buffer(self.data["sc"].current_transfer_session) # ensure records updated with 1st instance id and summarylog partition are buffered self.assertRecordsBuffered(self.data["user1_sumlogs"]) self.assertRecordsNotBuffered(self.data["user2_sumlogs"]) self.assertRecordsNotBuffered(self.data["user3_sumlogs"])
def test_partition_prefix_buffering(self): fsics = {self.data["group2_id"].id: 1} filter_prefixes = "{}".format(self.data["user2"].id) self.data["sc"].current_transfer_session.filter = filter_prefixes self.data["sc"].current_transfer_session.client_fsic = json.dumps( fsics) _queue_into_buffer(self.data["sc"].current_transfer_session) # ensure only records with user2 partition are buffered self.assertRecordsBuffered([self.data["user2"]]) self.assertRecordsBuffered(self.data["user2_sumlogs"]) self.assertRecordsBuffered(self.data["user2_interlogs"]) self.assertRecordsNotBuffered([self.data["user3"]])
def test_partition_filter_buffering(self): fsics = {self.data["group2_id"].id: 1} filter_prefixes = "{}:user:summary\n{}:user:interaction".format( self.data["user3"].id, self.data["user3"].id) self.data["sc"].current_transfer_session.filter = filter_prefixes self.data["sc"].current_transfer_session.client_fsic = json.dumps( fsics) _queue_into_buffer(self.data["sc"].current_transfer_session) # ensure records with different partition values are buffered self.assertRecordsNotBuffered([self.data["user2"]]) self.assertRecordsBuffered(self.data["user3_sumlogs"]) self.assertRecordsBuffered(self.data["user3_interlogs"])
def test_fsic_counters_too_high(self): fsics = { self.data["group1_id"].id: 100, self.data["group2_id"].id: 100 } self.data["sc"].current_transfer_session.client_fsic = json.dumps( fsics) self.data["sc"].current_transfer_session.server_fsic = json.dumps( fsics) _queue_into_buffer(self.data["sc"].current_transfer_session) # ensure no records are buffered self.assertFalse(Buffer.objects.all()) self.assertFalse(RecordMaxCounterBuffer.objects.all())
def test_fsic_counters(self): counter = InstanceIDModel.objects.get( id=self.data["group1_id"].id).counter fsics = {self.data["group1_id"].id: counter - 1} self.data["sc"].current_transfer_session.client_fsic = json.dumps( fsics) fsics[self.data["group1_id"].id] = 0 self.data["sc"].current_transfer_session.server_fsic = json.dumps( fsics) _queue_into_buffer(self.data["sc"].current_transfer_session) # ensure only records with updated 1st instance id are buffered self.assertRecordsBuffered(self.data["group1_c1"]) self.assertRecordsBuffered(self.data["group1_c2"]) self.assertRecordsNotBuffered(self.data["group2_c1"])
def create(self, request): # noqa: C901 # attempt to load the requested syncsession try: syncsession = core.SyncSession.objects.filter(active=True).get( id=request.data.get("sync_session_id") ) except core.SyncSession.DoesNotExist: return response.Response( "Requested syncsession does not exist or is no longer active!", status=status.HTTP_400_BAD_REQUEST, ) # a push is to transfer data from client to server; a pull is the inverse is_a_push = request.data.get("push") # check that the requested filter is within the appropriate certificate scopes scope_error_msg = None requested_filter = certificates.Filter(request.data.get("filter")) server_scope = syncsession.server_certificate.get_scope() client_scope = syncsession.client_certificate.get_scope() if is_a_push: if not requested_filter.is_subset_of(client_scope.write_filter): scope_error_msg = "Client certificate scope does not permit pushing for the requested filter." if not requested_filter.is_subset_of(server_scope.read_filter): scope_error_msg = "Server certificate scope does not permit receiving pushes for the requested filter." else: if not requested_filter.is_subset_of(client_scope.read_filter): scope_error_msg = "Client certificate scope does not permit pulling for the requested filter." if not requested_filter.is_subset_of(server_scope.write_filter): scope_error_msg = "Server certificate scope does not permit responding to pulls for the requested filter." if scope_error_msg: return response.Response(scope_error_msg, status=status.HTTP_403_FORBIDDEN) # build the data to be used for creating the transfersession data = { "id": request.data.get("id"), "start_timestamp": timezone.now(), "last_activity_timestamp": timezone.now(), "active": True, "filter": requested_filter, "push": is_a_push, "records_total": request.data.get("records_total") if is_a_push else None, "sync_session": syncsession, "client_fsic": request.data.get("client_fsic") or "{}", "server_fsic": "{}", } transfersession = core.TransferSession(**data) transfersession.full_clean() transfersession.save() # must update database max counters before calculating fsics if not is_a_push: if getattr(settings, "MORANGO_SERIALIZE_BEFORE_QUEUING", True): with OperationLogger("Serializing records", "Serialization complete"): _serialize_into_store( transfersession.sync_session.profile, filter=requested_filter ) transfersession.server_fsic = json.dumps( DatabaseMaxCounter.calculate_filter_max_counters(requested_filter) ) transfersession.save() if not is_a_push: # queue records to get ready for pulling with OperationLogger("Queueing records into buffer", "Queueing complete"): _queue_into_buffer(transfersession) # update records_total on transfer session object records_total = Buffer.objects.filter( transfer_session=transfersession ).count() transfersession.records_total = records_total transfersession.save() return response.Response( serializers.TransferSessionSerializer(transfersession).data, status=status.HTTP_201_CREATED, )