def test_should_update_restore_item_with_failed_status(self): # given _, restore_item_key, restore_item = \ self.__create_restoration_job_with_one_item("111") error_message = "Cannot read a table without a schema" # when RestoreItem.update_with_failed(restore_item_key, error_message) # then updated_restore_item = RestoreItem.get_by_key(restore_item_key) self.assertEqual(updated_restore_item.status, RestoreItem.STATUS_FAILED) self.assertEqual(updated_restore_item.status_message, error_message)
def generate_restore_items(cls, project_id, dataset_id, target_project_id, target_dataset_id, max_partition_days): if max_partition_days: table_entities = Table \ .get_tables_with_max_partition_days(project_id, dataset_id, max_partition_days) else: table_entities = Table.get_tables(project_id, dataset_id) for table_entity_sublist in paginated(1000, table_entities): restore_items = [] for table_entity, backup_entity in Table.get_last_backup_for_tables( table_entity_sublist): if backup_entity is not None: source_table_reference = \ RestoreTableReference.backup_table_reference( table_entity, backup_entity) target_table_reference = TableReference( target_project_id, target_dataset_id, table_entity.table_id, table_entity.partition_id ) restore_item = RestoreItem.create(source_table_reference, target_table_reference) restore_items.append(restore_item) logging.info("Restore items generator yields %s restore items", len(restore_items)) yield restore_items
def test_should_not_update_item_with_success_twice(self): # given _, restore_item_key, restore_item = \ self.__create_restoration_job_with_one_item("111") # when with freeze_time("2012-01-14") as frozen_datetime: RestoreItem.update_with_done(restore_item_key) frozen_datetime.move_to("2012-01-15") RestoreItem.update_with_done(restore_item_key) # then updated_restore_item = RestoreItem.get_by_key(restore_item_key) self.assertEqual(updated_restore_item.completed, datetime(2012, 1, 14)) self.assertEqual(updated_restore_item.status_message, None)
def post(self, **_): request_body_json = JsonRequestHelper.parse_request_body( self.request.body) self.__validate_json(request_body_json) url_safe_key = request_body_json.get('data').get('restoreItemKey') copy_job_result = CopyJobResult(request_body_json.get('jobJson')) restore_item_key = ndb.Key(urlsafe=url_safe_key) if copy_job_result.has_errors(): error_message = copy_job_result.error_message RestoreItem.update_with_failed(restore_item_key, error_message) else: RestoreItem.update_with_done(restore_item_key) self._finish_with_success()
def test_that_proper_entities_were_stored_in_datastore(self): # given restoration_job_key = RestorationJob.create( HARDCODED_UUID, create_disposition="CREATE_IF_NEEDED", write_disposition="WRITE_EMPTY") restore_item_tuples = self.__create_restore_items(count=3) # when AsyncBatchRestoreService().restore( restoration_job_key, [[restore_item_tuples[0][0], restore_item_tuples[1][0]], [restore_item_tuples[2][0]]]) # then restoration_job = RestorationJob.get_by_id(HARDCODED_UUID) self.assertEqual(restoration_job.items_count, 3) restore_items = list(RestoreItem.query().filter( RestoreItem.restoration_job_key == restoration_job.key)) self.assertEqual(restore_items[0].status, RestoreItem.STATUS_IN_PROGRESS) self.assertEqual(restore_items[0].completed, None) self.assertEqual(restore_items[0].source_table_reference, restore_item_tuples[0][1]) self.assertEqual(restore_items[0].target_table_reference, restore_item_tuples[0][2]) self.assertEqual(restore_items[1].status, RestoreItem.STATUS_IN_PROGRESS) self.assertEqual(restore_items[1].completed, None) self.assertEqual(restore_items[1].source_table_reference, restore_item_tuples[1][1]) self.assertEqual(restore_items[1].target_table_reference, restore_item_tuples[1][2])
def test_should_not_update_item_with_failed_twice(self): # given restoration_job_key, restore_item_key, restore_item = \ self.__create_restoration_job_with_one_item("111") error_message = "Cannot read a table without a schema" # when with freeze_time("2012-01-14") as frozen_datetime: RestoreItem.update_with_failed(restore_item_key, error_message) frozen_datetime.move_to("2012-01-15") RestoreItem.update_with_failed(restore_item_key, error_message) # then updated_restore_item = RestoreItem.get_by_key(restore_item_key) self.assertEqual(updated_restore_item.completed, datetime(2012, 1, 14)) self.assertEqual(updated_restore_item.status_message, error_message)
def test_should_return_item_by_url_safe_key(self): # given _, restore_item_key, restore_item = \ self.__create_restoration_job_with_one_item("111") # when returned_item = RestoreItem.get_by_key(restore_item_key) # then self.assertEquals(returned_item, restore_item)
def __create_restore_item(self, restore_request, backup_entity, backup_item): source_entity = self.__get_source_table_entity(backup_entity) source_table_reference = RestoreTableReference \ .backup_table_reference(source_entity, backup_entity) target_table_reference = self.__create_target_table_reference( restore_request, source_entity) return RestoreItem.create(source_table_reference, target_table_reference, backup_item.output_parameters)
def test_should_return_items_by_restoration_job_key(self): # given restoration_job_key1 = self.__create_restoration_job_with_two_items( "222") restoration_job_key2 = self.__create_restoration_job_with_one_item( "111") # when items = RestoreItem.get_restoration_items(restoration_job_key1) # then self.assertEquals(2, len(list(items)))
def test_should_return_item_when_quering_model_by_source_project_id(self): # given self.__create_restoration_job_with_one_item("111") _, _, other_restore_item = self.__create_restoration_job_with_one_item( "222") other_restore_item.source_table.project_id = 'other_project' other_restore_item.put() # when items = RestoreItem.query() \ .filter(RestoreItem.source_table.project_id == 'project-abc') # then self.assertEquals(1, items.count())
def test_restore_item_default_state_is_in_progress(self): # given source_table = TableReference(project_id='source_project_id', dataset_id='source_dataset_id', table_id='source_table_id') target_table = TableReference(project_id='target_project_id', dataset_id='target_dataset_id', table_id='target_table_id') # when result = RestoreItem.create(source_table, target_table) # then self.assertEqual(RestoreItem.STATUS_IN_PROGRESS, result.status)
def test_should_update_restore_item_when_copy_job_status_is_done(self): # given restore_item_key = self.prepare_initial_restore_item() payload = json.dumps( {"data": {"restoreItemKey": restore_item_key.urlsafe()}, "jobJson": JobResultExample.DONE}) # when self.under_test.post('/callback/restore-finished/', params=payload) # then updated_restore_item = RestoreItem.get_by_key(restore_item_key) self.assertEqual(updated_restore_item.status, RestoreItem.STATUS_DONE) self.assertEqual(updated_restore_item.completed, datetime.now())
def __create_restore_items(count=1): result = [] for i in range(0, count): source_table_reference = TableReference( "source_project_id_" + str(i), "source_dataset_id_" + str(i), "source_table_id_" + str(i), "source_partition_id_" + str(i)) target_table_reference = TableReference( "target_project_id_" + str(i), "target_dataset_id_" + str(i), "target_table_id_" + str(i), "target_partition_id_" + str(i)) restore_item = RestoreItem.create(source_table_reference, target_table_reference) result.append( (restore_item, source_table_reference, target_table_reference)) return result
def __create_restore_item_example(restoration_job_key): return RestoreItem(restoration_job_key=restoration_job_key, status=RestoreItem.STATUS_IN_PROGRESS, completed=None, source_table=TableReferenceEntity( project_id='project-abc', dataset_id='dataset_xyz', table_id='23423_table-43-logs_213213', partition_id='20171113'), target_table=TableReferenceEntity( project_id='target-project', dataset_id='dataset_original', table_id='table-43-logs', partition_id='20171113'), custom_parameters='{"external-id": "13-424"}')
def __get_restoration_items(self, restoration_job): for item in RestoreItem.get_restoration_items(restoration_job.key): completed = item.completed.isoformat() if item.completed else None yield { "status": item.status, "statusMessage": item.status_message, "completed": completed, "customParameters": self.__parse_custom_parameters(item.custom_parameters), "sourceTable": item.source_table_reference.__str__(), "targetTable": item.target_table_reference.__str__() }
def test_should_update_restore_item_when_copy_job_status_has_errors(self): # given restore_item_key = self.prepare_initial_restore_item() expected_error_message = 'Copy job finished with errors: invalid:Cannot read a table without a schema, backendError:Backend error' payload = json.dumps( {"data": {"restoreItemKey": restore_item_key.urlsafe()}, "jobJson": JobResultExample.DONE_WITH_NOT_REPETITIVE_ERRORS}) # when self.under_test.post('/callback/restore-finished/', params=payload) # then updated_restore_item = RestoreItem.get_by_key(restore_item_key) self.assertEqual(updated_restore_item.status, RestoreItem.STATUS_FAILED) self.assertEqual(updated_restore_item.completed, datetime.now()) self.assertEqual(updated_restore_item.status_message, expected_error_message)
def test_failing_creating_dataset_should_update_restore_item_status( self, _): # given restoration_job_key = RestorationJob.create( HARDCODED_UUID, create_disposition="CREATE_IF_NEEDED", write_disposition="WRITE_EMPTY") restore_items_tuple = self.__create_restore_items(count=1) restore_item = restore_items_tuple[0][0] # when AsyncBatchRestoreService().restore(restoration_job_key, [[restore_item]]) # then restore_items = list(RestoreItem.query().filter( RestoreItem.restoration_job_key == restoration_job_key)) self.assertEqual(restore_items[0].status, RestoreItem.STATUS_FAILED)
def __generate_expected_restore_item( table, target_project_id=RESTORATION_PROJECT_ID, custom_target_dataset=None): expected_source = TableReference( project_id=BACKUP_PROJECT_ID, dataset_id=table.last_backup.dataset_id, table_id=table.last_backup.table_id, partition_id=table.partition_id) target_dataset = TestDatasetRestoreItemsGenerator.__create_target_dataset( custom_target_dataset) expected_target = TableReference(project_id=target_project_id, dataset_id=target_dataset, table_id=table.table_id, partition_id=table.partition_id) expected_restore_item = RestoreItem.create(expected_source, expected_target) return expected_restore_item
def __create_restore_item_example(restoration_job_key, status, completed=None, custom_parameters=None): return RestoreItem(restoration_job_key=restoration_job_key, status=status, completed=completed, source_table=TableReferenceEntity( project_id='project-abc', dataset_id='dataset_xyz', table_id='23423_table-43-logs_213213', partition_id='20171113'), target_table=TableReferenceEntity( project_id='target-project', dataset_id='dataset_original', table_id='table-43-logs', partition_id='20171113'), custom_parameters=custom_parameters )
def test_that_restore_service_will_receive_suitable_request( self, mocked_restore_service, _): # given source_entity = self.__create_table_entity("source_project_id", "source_dataset_id", "source_table_id", "source_partition_id") source_entity.put() backup_entity = self.__create_backup_entity(source_entity, "backup_dataset_id", "backup_table_id") backup_key = backup_entity.put() output_parameters = "{\"test_param_key\": \"test_value\"}" backup_item = BackupItem(backup_key, output_parameters) request = BackupListRestoreRequest( backup_items=[backup_item], target_project_id="target_project_id", target_dataset_id="target_dataset_id", write_disposition="write_disposition", create_disposition="create_disposition") expected_source_table_reference = TableReference( Configuration.backup_project_id, "backup_dataset_id", "backup_table_id", "source_partition_id") expected_target_table_reference = TableReference( "target_project_id", "target_dataset_id", "source_table_id", "source_partition_id") expected_restore_item = RestoreItem.create( expected_source_table_reference, expected_target_table_reference, output_parameters) # when restoration_job_id = BackupListRestoreService().restore(request) # then mocked_restore_service.assert_called_once_with( ndb.Key('RestorationJob', restoration_job_id), [[expected_restore_item]])
def test_that_restore_service_will_generate_default_values_if_missing( self, mocked_restore_service, _): source_entity = self.__create_table_entity("source_project_id", "source_dataset_id", "source_table_id", "source_partition_id") source_entity.put() backup_entity = self.__create_backup_entity(source_entity, "backup_dataset_id", "backup_table_id") backup_key = backup_entity.put() request = BackupListRestoreRequest(backup_items=iter( [BackupItem(backup_key)]), target_project_id=None, target_dataset_id=None, write_disposition=None, create_disposition=None) expected_source_table_reference = TableReference( Configuration.backup_project_id, "backup_dataset_id", "backup_table_id", "source_partition_id") expected_target_table_reference = TableReference( "source_project_id", "source_dataset_id", "source_table_id", "source_partition_id") expected_restore_item = RestoreItem.create( expected_source_table_reference, expected_target_table_reference) # when restoration_job_id = BackupListRestoreService().restore(request) # then expected_key = ndb.Key('RestorationJob', restoration_job_id) mocked_restore_service.assert_called_once() mocked_restore_service.assert_called_with(expected_key, [[expected_restore_item]])
def prepare_initial_restore_item(self): restore_item = RestoreItem.create( TableReference('s_project', 's_dataset', 's_table', 's_partition'), TableReference('t_project', 't_dataset', 't_table', 't_partition')) restore_item.status = RestoreItem.STATUS_IN_PROGRESS return restore_item.put()