def cli(ctx, sink, opt_dir_media, opt_disk, opt_density, opt_size_type, opt_drawframes): """Appends images to ChairItem""" # ------------------------------------------------- # imports from os.path import join import cv2 as cv from vframe.utils import file_utils, logger_utils from vframe.settings.paths import Paths # ------------------------------------------------- # initialize log = logger_utils.Logger.getLogger() log.debug('append images to pipeline') # process keyframes if not opt_dir_media: dir_media = Paths.media_dir(types.Metadata.KEYFRAME, data_store=opt_disk, verified=ctx.opts['verified']) else: dir_media = opt_dir_media # ------------------------------------------------- # process while True: chair_item = yield if chair_item.chair_type == types.ChairItemType.PHOTO: chair_item.load_images(dir_media, opt_size_type, opt_drawframes=opt_drawframes) if chair_item.chair_type == types.ChairItemType.VIDEO: pass #chair_item.load_images(opt_size_type, opt_drawframes=opt_drawframes) if chair_item.chair_type == types.ChairItemType.VIDEO_KEYFRAME: chair_item.load_images(opt_size_type, opt_drawframes=opt_drawframes) if chair_item.chair_type == types.ChairItemType.MEDIA_RECORD: chair_item.load_images(dir_media, opt_size_type, opt_density, opt_drawframes=opt_drawframes) # ------------------------------------------------------------ # send back to generator sink.send(chair_item)
def cli(ctx, opt_fp_neg, opt_dir_project, opt_disk, opt_size): """Generates negative images""" # ------------------------------------------------ # imports import os from os.path import join from glob import glob from pathlib import Path from vframe.utils import logger_utils, im_utils, file_utils from vframe.settings.paths import Paths log = logger_utils.Logger.getLogger() log.debug('negative mining') dir_media_unver = Paths.media_dir(types.Metadata.KEYFRAME, data_store=opt_disk, verified=types.Verified.UNVERIFIED) dir_media_ver = Paths.media_dir(types.Metadata.KEYFRAME, data_store=opt_disk, verified=types.Verified.VERIFIED) opt_size_label = cfg.IMAGE_SIZE_LABELS[opt_size] fp_train_neg = join(opt_dir_project, vcat_cfg.FP_TRAIN_NEGATIVES) dir_labels_negative = join(opt_dir_project, vcat_cfg.DIR_LABELS_NEGATIVE) dir_negative = join(opt_dir_project, vcat_cfg .DIR_IMAGES_NEGATIVE) file_utils.mkdirs(dir_negative) file_utils.mkdirs(dir_labels_negative) negative_list = pd.read_csv(opt_fp_neg) negative_list['description'] = negative_list['description'].fillna('') # ensure not empty # negative_list['desc'] = negative_list['desc'].astype('str') neg_training_files = [] # for sha256 in sha256_list[:35]: for i, row in negative_list.iterrows(): sha256 = row['sha256'] sha256_tree = file_utils.sha256_tree(sha256) ver_list = glob(join(dir_media_ver, sha256_tree, sha256, "*")) unver_list = glob(join(dir_media_unver, sha256_tree, sha256, "*")) dir_frames = ver_list + unver_list log.debug('adding {} frames about "{}"'.format(len(dir_frames), row['description'])) for dir_frame in dir_frames: frame_idx = Path(dir_frame).stem fp_keyframe_src = join(dir_frame, opt_size_label, 'index.jpg') fpp_keyframe_src = Path(fp_keyframe_src) if fpp_keyframe_src.exists(): # create symlinked image fpp_keyframe_dst = Path(join(dir_negative, '{}_{}.jpg'.format(sha256, frame_idx))) if fpp_keyframe_dst.exists() and fpp_keyframe_dst.is_symlink(): fpp_keyframe_dst.unlink() fpp_keyframe_dst.symlink_to(fpp_keyframe_src) # create empty label fp_label_txt = join(dir_labels_negative, '{}_{}.txt'.format(sha256, frame_idx)) with open(fp_label_txt, 'w') as fp: fp.write('') # and, add this file to the training list neg_training_files.append(str(fpp_keyframe_dst)) # for each keyframe if it exists log.info('writing {} lines to: {}'.format(len(neg_training_files), fp_train_neg)) file_utils.write_text(neg_training_files, fp_train_neg) # add prompt log.info('mv labels_negative/*.txt labels/') log.info('mv images_negative/*.jpg images/')
def cli(ctx, sink, opt_disk, opt_density): """Generates KeyframeStatus metadata""" # Recommended: Use Expanded density to check for all keyframes # ------------------------------------------------- # imports import os from os.path import join from pathlib import Path from vframe.settings.paths import Paths from vframe.settings import vframe_cfg as cfg from vframe.utils import file_utils, logger_utils from vframe.models.metadata_item import KeyframeStatusMetadataItem # ------------------------------------------------- # process log = logger_utils.Logger.getLogger() # set paths media_type = types.Metadata.KEYFRAME metadata_type = types.Metadata.KEYFRAME_STATUS dir_keyframes = Paths.media_dir(media_type, data_store=opt_disk, verified=ctx.opts['verified']) # iterate sink while True: chair_item = yield sha256 = chair_item.sha256 sha256_tree = file_utils.sha256_tree(sha256) dir_parent = join(dir_keyframes, sha256_tree, sha256) # check if keyframe metadata exists keyframe_metadata_item = chair_item.item.get_metadata( types.Metadata.KEYFRAME) if not keyframe_metadata_item: log.error( 'no keyframe metadata. try "append -t keyframe", {}'.format( keyframe_metadata_item)) chair_item.item.set_metadata(metadata_type, {}) else: # check if the keyframes images exist status = {k: False for k in cfg.IMAGE_SIZE_LABELS} if Path(dir_parent).exists(): # get keyframe numbers idxs = keyframe_metadata_item.get_keyframes(opt_density) for idx in idxs: labels = [v for k, v in cfg.IMAGE_SIZE_LABELS.items()] for k, label in cfg.IMAGE_SIZE_LABELS.items(): fpp_im = Path(dir_parent, file_utils.zpad(idx), label, 'index.jpg') if fpp_im.exists(): status[k] = True # append metadata to chair_item's mapping item chair_item.item.set_metadata( metadata_type, KeyframeStatusMetadataItem(status)) # ------------------------------------------------- # continue processing other items sink.send(chair_item)