def setUp(self): super(TestKalturaAwsCli, self).setUp() self.entry_id_not_in_s3 = TestKaltura.TEST_ID_NO_S3_COPY self.mentry_not_in_s3 = kaltura.MediaEntry( kaltura.api.getClient().media.get(self.entry_id_not_in_s3)) self.mentry_not_in_s3.reload() self.entry_not_in_s3 = self.mentry_not_in_s3.entry
def list(params): """ print matching kaltura records :param params: hash that contains kaltura connection information as well as filtering options given for the list action :return: None """ _setup(params, None) filter = _create_filter(params) if (params['mode'] == 'video'): columns = [kaltura.LAST_PLAYED_DATE, kaltura.PLAYS, kaltura.ENTRY_ID, kaltura.STATUS, SAVED_TO_S3, PLACE_HOLDER_VIDEO, kaltura.CREATED_AT_DATE, kaltura.CREATED_AT, kaltura.CREATOR_ID, kaltura.CATEGORIES_IDS, kaltura.CATEGORIES] print('\t'.join(columns)) for entry in filter: kentry = kaltura.MediaEntry(entry) vals = [kentry.report_str(c) for c in columns] print("\t".join(v.decode('utf-8') for v in vals)) else: columns = [kaltura.ENTRY_ID, kaltura.FLAVOR_ID, kaltura.ORIGINAL, kaltura.SIZE, kaltura.CREATED_AT_DATE, kaltura.DELETED_AT_DATE, kaltura.STATUS] print('\t'.join(columns)) for entry in filter: for f in kaltura.FlavorAssetIterator(entry): kf = kaltura.Flavor(f) vals = [kf.report_str(c) for c in columns] print("\t".join(v.decode('utf-8') for v in vals)) return 0
def replace_videos(params): """ replace original videos with place holder video for matching entries prints counts of videos with different outcomes: REPLACE_DONE, REPLACE_DONE_BEFORE, REPLACE_BIG_FILE_SKIP, REPLACE_FAILED :param params: hash that contains kaltura connetion information as well as filtering options given for the list action :return: None """ doit = _setup(params, 'replace') wait = params['wait_ready'] filter = _create_filter(params) bucket = params['awsBucket'] place_holder = params['videoPlaceholder'] counts = _restore_counts() for entry in filter: mentry = kaltura.MediaEntry(entry) rc = replace_entry_video(mentry, place_holder, bucket, doit) if wait and (rc == REPLACE_DONE): wait_for_ready(mentry, doit) counts[rc] += 1 print("REPLACE Filter {}".format(filter)) _log_restore_counts(counts, filter) return counts[REPLACE_FAILED]
def health_check(params): """ TODO :param params: hash that contains kaltura connetion information as well as filtering options given for the list action :return: number of unhealthy videos encountered """ _setup(params, None) filter = _create_filter(params) bucket = params['awsBucket'] nerror = 0 columns = [kaltura.ORIGINAL, kaltura.ORIGINAL_STATUS, SAVED_TO_S3, PLACE_HOLDER_VIDEO, kaltura.CREATED_AT_DATE] print "\t".join([kaltura.ENTRY_ID, 'status-ok'] + columns + ['s3-size', kaltura.ORIGINAL_SIZE, 'size_match', '---']) for entry in filter: mentry = kaltura.MediaEntry(entry); healthy, comp_size, message = entry_health_check(mentry, bucket) vals = [mentry.report_str(kaltura.ENTRY_ID), str(healthy).ljust(len('status-ok'))] vals = vals + [mentry.report_str(c) for c in columns] vals = vals + [str(aws.s3_size(entry.getId(), bucket)/1024), mentry.report_str(kaltura.ORIGINAL_SIZE), str(comp_size), message] print "\t".join(v.decode('utf-8') for v in vals) if (not healthy): mentry.log_action(logging.ERROR, True, 'STATUS', message) nerror +=1 return nerror
def testcp_s3copy_run(self): for _ in range(0, 2): argv = ['s3copy', '--s3copy', '-i', self.entry_id_not_in_s3] rc = kaltura_aws._main(argv) self.assertEqual(rc, None) # check Tagged and file in s3 kaltura.MediaEntry(self.entry_not_in_s3).reload() self.assertTrue(kaltura_aws.SAVED_TO_S3, self.entry_not_in_s3.getTags()) self.assertTrue(aws.s3_exists(self.entry_id_not_in_s3, self.bucket))
def copy_to_s3(params): """ save original flavors to aws for matching kaltura records :param params: hash that contains kaltura connetion information as well as filtering options given for the list action :return: None """ doit = _setup(params, 's3copy') filter = _create_filter(params) bucket = params['awsBucket'] tmp = params['tmp'] nerror = 0 for entry in filter: done = False s3_file = entry.getId() checker = CheckAndLog(kaltura.MediaEntry(entry)) if (checker.has_original() and checker.original_ready()): if (aws.s3_exists(s3_file, bucket)): checker.mentry.log_action(logging.INFO, doit, "Archived", 's3://{}/{}'.format(bucket, s3_file)) else: if checker.original_below_size_limit(): # download from kaltura fname = checker.mentry.downloadOriginal(tmp, doit) if (fname): # store to S3 aws.s3_store(fname, bucket, entry.getId(), doit) kaltura.MediaEntry(entry).addTag(SAVED_TO_S3, doit) checker.mentry.log_action(logging.INFO, doit, "Delete", fname) if (doit): os.remove(fname) else: checker.mentry.log_action(logging.INFO, doit, "Skip Copy", "original flavor exceeds size limit {} kb".format(CheckAndLog.SIZE_LIMIT_KB)) done = True if (not done): nerror += 1 return nerror
def repair(params): """ repair entries that do not have original flavors depending on tags replace with place_holder video or with video from s3 :param params: hash that contains kaltura connetion information as well as filtering options given to download action :return number of entries not repaired due to failureor slow GLACIER restore """ doit = _setup(params, 'repair') filter = _create_filter(params) bucket = params['awsBucket'] tmp = params['tmp'] place_holder = params['videoPlaceholder'] counts = _restore_counts() for entry in filter: mentry = kaltura.MediaEntry(entry) checker = CheckAndLog(mentry) rc = RESTORE_UNDEFINED healthy, _, reason = entry_health_check(mentry, bucket) if (healthy): rc = RESTORE_DONE_BEFORE; mentry.log_action(logging.INFO, doit, 'Repair', 'No Need: ' + reason); else: mentry.log_action(logging.INFO, doit, 'Repair', 'Sick Entry: {} tags={}'.format(reason, entry.getTags())) if not (checker.has_tag(SAVED_TO_S3) and checker.aws_s3_exists(bucket) and checker.aws_s3_below_size_limit(bucket)): mentry.log_action(logging.ERROR, doit, 'Repair', 'Sick Entry: Do not know how to repair'); else: if (checker.has_tag(PLACE_HOLDER_VIDEO)): filename = place_holder; else: # replace with original from s3 s3_file = mentry.entry.getId() filename = aws.s3_download("{}/{}".format(tmp, mentry.entry.getId()), bucket, s3_file, doit) if (not filename): # tell GLACIER to restore aws.s3_restore(s3_file, bucket, doit) mentry.log_action(logging.INFO, doit, 'Restore', 'Waiting for s3 file to come out of Glacier'); rc = RESTORE_WAIT_GLACIER if (filename is not None): rc = REPLACE_FAILED; if mentry.deleteFlavors(doDelete=doit): if (mentry.replaceOriginal(filename, doReplace=doit)): rc = RESTORE_DONE; wait_for_ready(mentry, doit) counts[rc] += 1 _log_restore_counts(counts, filter) return counts[REPLACE_FAILED] + counts[RESTORE_WAIT_GLACIER]
def setUpClass(cls): print("-- setUpClass {}".format(cls)) TestKalturaAwsCli.no_s3_copy_id = TestKaltura.TEST_ID_NO_S3_COPY TestKalturaAwsCli.no_orig_id = TestKaltura.TEST_ID_NO_ORIGINAL mentry = kaltura.MediaEntry(kaltura.api.getClient().media.get( TestKaltura.TEST_ID_NO_S3_COPY)) if mentry.getTags(): print("FIXING: entry {} has tags - removing all".format( TestKalturaAwsCli.no_s3_copy_id)) mentry = kaltura.MediaEntry(kaltura.api.getClient().media.get( TestKaltura.TEST_ID_NO_S3_COPY)) mentry.setTags([], doUpdate=True) if aws.s3_exists(TestKalturaAwsCli.no_s3_copy_id, TestKaltura.bucket): print("FIXING: deleting {} from s3://{}".format( TestKalturaAwsCli.no_s3_copy_id, TestKaltura.bucket)) aws.s3_delete(TestKaltura.bucket, TestKalturaAwsCli.no_s3_copy_id, True) if (kaltura.MediaEntry(mentry.entry).getOriginalFlavor().getSize() < 350): print( "ABORT: based on size ORIGINAL FLAVOR seems to be replacement video" ) assert (False)
def test_add_then_get_tag_then_delete(self): tag = randomString("test_add_then_get_tag_then_delete") entry = kaltura.api.getClient().media.get(TestKaltura.TEST_ID_1) mentry = kaltura.MediaEntry(entry) mentry.addTag(tag, doUpdate=True) mentry.reload() tags = mentry.entry.getTags() self.assertIn(tag, tags) mentry.delTag(tag, doUpdate=True) mentry.reload() tags = mentry.entry.getTags() self.assertNotIn(tag, tags)
def download(params): """ save original flavors of first matching record to a local file :param params: hash that contains kaltura connetion information as well as filtering options given to download action :return: 0 upon succesful download """ doit = _setup(params, None) tmp = params['tmp'] filter = _create_filter(params) entry = next(iter(filter)) checker = CheckAndLog(kaltura.MediaEntry(entry)) if (checker.has_original() and checker.original_ready()): fname = checker.mentry.downloadOriginal(tmp, doit) print("downloded to " + fname) return 0 else: return 1
def restore_from_s3(params): """ restore original flavor from s3 for matching kaltura records prints number counts of videos with different outcomes: RESTORE_DONE, RESTORE_WAIT_GLACIER, RESTORE_FAILED :param params: hash that contains kaltura connection information as well as filtering options given for the restore action :return: number of failures """ doit = _setup(params, 'restore') filter = _create_filter(params) bucket = params['awsBucket'] tmp = params['tmp'] counts = _restore_counts() for entry in filter: mentry = kaltura.MediaEntry(entry) rc = restore_entry_from_s3(mentry, bucket, tmp, doit) counts[rc] += 1 if rc == RESTORE_DONE: wait_for_ready(mentry, doit) _log_restore_counts(counts, filter) return counts[RESTORE_FAILED]
def sum_sizes(filter): total = 0 for e in filter: total += kaltura.MediaEntry(e).getTotalSize() return total