def extract_all_features(video_id): #using Inception_V3 def create_graph(): # Creates a graph from saved GraphDef file and returns a saver if not path.exists(path.join(FLAGS.model_dir, 'classify_image_graph_def.pb')): print("Graph definition " + path.join(FLAGS.model_dir, 'classify_image_graph_def.pb') + " not found") exit(1) else: with tf.gfile.FastGFile(path.join(FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) _ = tf.import_graph_def(graph_def, name='') dst = path.join(frames_path, video_id) extract_frames(path.join(data_path, "TestVideo/", video_id + ".mp4"), dst) with tf.Session() as sess: create_graph() video_frames = sorted(glob.glob(path.join(dst,'*.jpg'))) if not path.exists(descriptors_save_path): makedirs(descriptors_save_path) mixed_10_tensor = sess.graph.get_tensor_by_name('mixed_10/join:0') video_descriptors = [] for image_path in tqdm(video_frames): with open(image_path, 'rb') as image_data: descriptors = sess.run(mixed_10_tensor, {'DecodeJpeg/contents:0': image_data.read()}) video_descriptors.append(np.squeeze(descriptors)) video_descriptors = np.asarray(video_descriptors, dtype=np.float32) np.save( path.join(descriptors_save_path, video_id + '_incp_v3.npy'), video_descriptors) #cleanup tf.reset_default_graph() return video_descriptors
print('Unzip dataset') print('==========================') preprocessing.unzip_dataset(temp) useful_frame = [[[] for i in range(10)] for i in range(4)] print('==========================') print('Extract all frames') print('==========================') video_list = preprocessing.get_filelist(temp, '.avi') for video in video_list: class_num = os.path.basename(video).split('_')[0] iter_num = os.path.basename(video).split('.')[0].split('_')[-1] print("{:10} size(MB): {:>8.2f}".format(os.path.basename(video), os.path.getsize(video) / 1000000)) preprocessing.extract_frames( video, os.path.join(Dagger_folder, f"{class_num}/video/{iter_num}")) print('==========================') print('Decay command') print('==========================') def decay(diff, len, index, options): if options == 'ex': sign = -1 if diff < 0 else 1 w = np.log(abs(int(diff))) / len return sign * np.exp(w * (index + 1)) elif options == 'sigmoid': sign = -1 if diff < 0 else 1 w = np.exp(abs(int(diff))) / len return np.log(w * (index + 1)) * sign elif options == 'linear':
print('==========================') print('Extract all frames') print('==========================') for classnum in rawdata.category: print('Processing Class:' + str(classnum) + ' data...') if len(rawdata.video_list[classnum]) > 0: for index, video in enumerate(rawdata.video_list[classnum]): print("{:10} size(MB): {:>8.2f}".format( split_name(video), os.path.getsize(video) / 1000000)) dest_name = os.path.join(dataset.video_list['root'][classnum], split_name(video)) archieve_path = os.path.join( rawdata.video_list['root'][classnum], 'archieve/' + split_name(video) + '.avi') preprocessing.extract_frames(video, dest_name) os.rename(video, archieve_path) print('Finish extract frames') dataset.update() # ========================================================================== # Merge all command # merge command from each video together # ========================================================================== if MERGE_COMMAND: print('==========================') print('Merge command') print('==========================') for classnum in rawdata.category: if len(rawdata.command_list[classnum]) > 0: print('Processing Class:' + str(classnum) + ' data...', end=' ') original_file_list = rawdata.command_list[classnum]