def testcp_s3copy_dryrun(self): # assert not tagged before self.mentry_not_in_s3.reload() self.assertFalse( kaltura_aws.SAVED_TO_S3 in self.mentry_not_in_s3.getTags(), "not tagged before test") self.assertFalse(aws.s3_exists(self.entry_id_not_in_s3, self.bucket), 'not in s3 before test') argv = ['s3copy', '-i', self.entry_id_not_in_s3] rc = kaltura_aws._main(argv) self.assertEqual(rc, None) # assert not tagged and not in s3 self.mentry_not_in_s3.reload() self.assertFalse( kaltura_aws.SAVED_TO_S3 in self.mentry_not_in_s3.getTags(), "not tagged after test") self.assertFalse(aws.s3_exists(self.entry_id_not_in_s3, self.bucket), 'not in s3 after test')
def testcp_s3copy_run(self): for _ in range(0, 2): argv = ['s3copy', '--s3copy', '-i', self.entry_id_not_in_s3] rc = kaltura_aws._main(argv) self.assertEqual(rc, None) # check Tagged and file in s3 kaltura.MediaEntry(self.entry_not_in_s3).reload() self.assertTrue(kaltura_aws.SAVED_TO_S3, self.entry_not_in_s3.getTags()) self.assertTrue(aws.s3_exists(self.entry_id_not_in_s3, self.bucket))
def testre_restore_video_from_s3_dryrun(self): self.mentry_not_in_s3.reload() self.assertTrue( kaltura_aws.PLACE_HOLDER_VIDEO in self.mentry_not_in_s3.getTags(), "tagged before test") self.assertTrue(aws.s3_exists(self.entry_id_not_in_s3, self.bucket), 'in s3 before test') self.assertTrue( kaltura_aws.SAVED_TO_S3 in self.mentry_not_in_s3.getTags(), "tagged after test") argv = ['restore_from_s3', '-i', self.entry_id_not_in_s3] rc = kaltura_aws._main(argv) self.assertEqual(rc, None) self.mentry_not_in_s3.reload() self.assertTrue( kaltura_aws.PLACE_HOLDER_VIDEO in self.mentry_not_in_s3.getTags(), "tagged after test") self.assertTrue(aws.s3_exists(self.entry_id_not_in_s3, self.bucket), 'in s3 after test') self.assertTrue( kaltura_aws.SAVED_TO_S3 in self.mentry_not_in_s3.getTags(), "saved after test")
def testr_replace_video_with_placeholder_dryrun(self): # assert not tagged before self.mentry_not_in_s3.reload() self.assertFalse( kaltura_aws.PLACE_HOLDER_VIDEO in self.mentry_not_in_s3.getTags(), "not tagged before test") self.assertTrue(aws.s3_exists(self.entry_id_not_in_s3, self.bucket), 'in s3 before test') argv = ['replace_video', '-i', self.entry_id_not_in_s3] rc = kaltura_aws._main(argv) self.mentry_not_in_s3.reload() self.assertFalse( kaltura_aws.PLACE_HOLDER_VIDEO in self.mentry_not_in_s3.getTags(), "not tagged after test") self.assertEqual(rc, None)
def entry_health_check(mentry, bucket): original = mentry.getOriginalFlavor() entry = mentry.entry # check whether item is 'healthy' # has original in READY state healthy = original != None and kaltura.Flavor(original).isReady() explanation = '' if (not healthy): explanation= 'ERROR: No healthy Original' # if there is an s3 entry it should be tagged SAVED_TO_S3 s3Exists = aws.s3_exists(entry.getId(), bucket) saved_tag = SAVED_TO_S3 in entry.getTags() if healthy and s3Exists and not saved_tag: explanation= 'ERROR: in bucket {} - but no {} tag'.format(bucket, SAVED_TO_S3) healthy = False # if it is tagged SAVED_TO_S3 there should be an s3 entry if healthy and saved_tag and not s3Exists: explanation= 'ERROR: has tag {} - but not in bucket {}'.format(SAVED_TO_S3, bucket) healthy = False # if it is tagged PLACE_HOLDER_VIDEO it should also be tagged SAVED_TO_S3 replaced_tag = PLACE_HOLDER_VIDEO in entry.getTags() if healthy and replaced_tag and not saved_tag: explanation= 'ERROR: has tag {} - but no {} tag'.format(PLACE_HOLDER_VIDEO, SAVED_TO_S3) healthy = False compatible_size = False if (original): compatible_size = aws_compatible_size(original.getSize(), aws.s3_size(entry.getId(), bucket)) # if it is saved and not tagged PLACE_HOLDER_VIDEO then original flavor and s3 entry size should match if healthy and saved_tag and not replaced_tag and not compatible_size: explanation = 'ERROR: is {} and not {} - size mismatch of bucket entry and original flavor'.format(SAVED_TO_S3, PLACE_HOLDER_VIDEO) healthy = False # if it is saved and sizes match then there should not be a replaced_tag if healthy and saved_tag and compatible_size and replaced_tag: explanation = 'ERROR: is {} and {} - but size match of bucket entry and original flavor'.format(SAVED_TO_S3, PLACE_HOLDER_VIDEO) healthy = False if (healthy and s3Exists and original.getSize() > CheckAndLog.SIZE_LIMIT_KB): explanation= 'WARNING: in bucket {} but original beyond size limit {}'.format(bucket, CheckAndLog.SIZE_LIMIT_KB) if (healthy and not explanation): explanation= 'HEALTHY' return healthy,compatible_size, explanation
def testr_replace_video_with_placeholder_run(self): self.mentry_not_in_s3.reload() self.assertFalse( kaltura_aws.PLACE_HOLDER_VIDEO in self.mentry_not_in_s3.getTags(), "not tagged before test") self.assertTrue(aws.s3_exists(self.entry_id_not_in_s3, self.bucket), 'in s3 before test') # force inclusion of all videos even recently created ones argv = [ 'replace_video', '--replace', '--created_before', '0', '-i', self.entry_id_not_in_s3 ] for _ in range(0, 2): rc = kaltura_aws._main(argv) self.assertEqual(rc, None) self.mentry_not_in_s3.reload() self.assertTrue( kaltura_aws.PLACE_HOLDER_VIDEO in self.mentry_not_in_s3.getTags(), "tagged after test")
def copy_to_s3(params): """ save original flavors to aws for matching kaltura records :param params: hash that contains kaltura connetion information as well as filtering options given for the list action :return: None """ doit = _setup(params, 's3copy') filter = _create_filter(params) bucket = params['awsBucket'] tmp = params['tmp'] nerror = 0 for entry in filter: done = False s3_file = entry.getId() checker = CheckAndLog(kaltura.MediaEntry(entry)) if (checker.has_original() and checker.original_ready()): if (aws.s3_exists(s3_file, bucket)): checker.mentry.log_action(logging.INFO, doit, "Archived", 's3://{}/{}'.format(bucket, s3_file)) else: if checker.original_below_size_limit(): # download from kaltura fname = checker.mentry.downloadOriginal(tmp, doit) if (fname): # store to S3 aws.s3_store(fname, bucket, entry.getId(), doit) kaltura.MediaEntry(entry).addTag(SAVED_TO_S3, doit) checker.mentry.log_action(logging.INFO, doit, "Delete", fname) if (doit): os.remove(fname) else: checker.mentry.log_action(logging.INFO, doit, "Skip Copy", "original flavor exceeds size limit {} kb".format(CheckAndLog.SIZE_LIMIT_KB)) done = True if (not done): nerror += 1 return nerror
def setUpClass(cls): print("-- setUpClass {}".format(cls)) TestKalturaAwsCli.no_s3_copy_id = TestKaltura.TEST_ID_NO_S3_COPY TestKalturaAwsCli.no_orig_id = TestKaltura.TEST_ID_NO_ORIGINAL mentry = kaltura.MediaEntry(kaltura.api.getClient().media.get( TestKaltura.TEST_ID_NO_S3_COPY)) if mentry.getTags(): print("FIXING: entry {} has tags - removing all".format( TestKalturaAwsCli.no_s3_copy_id)) mentry = kaltura.MediaEntry(kaltura.api.getClient().media.get( TestKaltura.TEST_ID_NO_S3_COPY)) mentry.setTags([], doUpdate=True) if aws.s3_exists(TestKalturaAwsCli.no_s3_copy_id, TestKaltura.bucket): print("FIXING: deleting {} from s3://{}".format( TestKalturaAwsCli.no_s3_copy_id, TestKaltura.bucket)) aws.s3_delete(TestKaltura.bucket, TestKalturaAwsCli.no_s3_copy_id, True) if (kaltura.MediaEntry(mentry.entry).getOriginalFlavor().getSize() < 350): print( "ABORT: based on size ORIGINAL FLAVOR seems to be replacement video" ) assert (False)
def aws_s3_exists(self, bucket): message = 'Exists s3://{}/{})'.format(bucket, self.entry.getId()) yes = (aws.s3_exists(self.entry.getId(), bucket)) self._log_action(yes, message) return yes