def generate_training_list(): file_list = glob_files(args.db_dir, ['.png']) file_list = [f for f in file_list if 'texture' in os.path.basename(f)] [os.makedirs(os.path.dirname(f.replace(args.db_dir, args.out_dir))) for f in file_list if not os.path.exists(os.path.dirname(f.replace(args.db_dir, args.out_dir)))] with Pool() as p: content = list(tqdm(p.imap_unordered(crop_image, file_list), total=len(file_list)))
def detect_face(): files = glob_files(args.db_dir, ['.jpg', '.jpeg', '.png', '.bmp']) detector = dlib.cnn_face_detection_model_v1(args.dlib_model) contents = [] p_bar = tqdm.tqdm(total=len(files)) for f in files: ext = os.path.splitext(f)[1] file_bbox = f.replace(args.db_dir, args.out_dir) file_bbox = file_bbox.replace(ext, '.roi') file_landmark = f.replace(args.db_dir, args.lm_dir) file_landmark = file_landmark.replace(ext, args.lm_ext) if os.path.exists(file_bbox): contents.append([f, file_bbox]) else: if not os.path.exists(os.path.dirname(file_bbox)): os.makedirs(os.path.dirname(file_bbox)) im = cv2.imread(f) if args.use_lm: box = gen_bbox_from_landmarks(file_landmark) else: box = detect_dlib(detector, f) if len(box) == 0: # center crop box = center_crop(im) if args.plot: plt.figure() plt.imshow(cv2.cvtColor(im, cv2.COLOR_BGR2RGB)) ax = plt.gca() ax.add_patch(patches.Rectangle((box[0], box[1]), box[2], box[3], fill=False, ec='g')) plt.show() with open(file_bbox, 'w') as fout: fout.write('%d %d %d %d' % (box[0], box[1], box[2], box[3])) contents.append([f, file_bbox]) p_bar.update() p_bar.close() if len(args.out_path) > 0: with open(args.out_path, 'w') as f: csv_writer = csv.writer(f) csv_writer.writerow(['FILE', 'ROI']) for im_path, roi_path in contents: csv_writer.writerow([im_path.replace(args.db_dir, ''), os.path.realpath(roi_path)])
def merge_im_box(): im_files = glob_files(args.db_dir, ['.jpg', '.jpeg', '.png', '.bmp']) box_files = [ im_file.replace(args.db_dir, args.box_dir).replace( os.path.splitext(im_file)[1], args.box_ext) for im_file in im_files ] content = [[im_file.replace(args.db_dir, ''), box_file] for im_file, box_file in zip(im_files, box_files)] with open(args.out_csv, 'w') as f: csv_writer = csv.writer(f) csv_writer.writerow(['FILE', 'ROI']) for line in content: csv_writer.writerow(line)
def write_final_csv(): files = glob_files(args.out_dir, ['.png']) p_bar = tqdm(total=len(files)) with open(os.path.join(args.out_dir, 'IJB-%s.csv' % args.set), 'w') as f: csv_writer = csv.writer(f) csv_writer.writerow(['FILE', 'ROI']) for file_path in files: box_path = os.path.realpath(file_path).replace('.png', '.roi') csv_writer.writerow( [file_path.replace(args.out_dir, ''), box_path]) p_bar.update() p_bar.close()
def extracting_features(): # glob images im_paths = glob_files(args.db_dir, args.im_exts) feature_paths = [ im_path.replace(args.db_dir, args.ft_dir).replace( os.path.splitext(os.path.basename(im_path))[1], args.ft_ext) for im_path in im_paths ] # generate feature directory [ os.makedirs(os.path.dirname(feature_path)) for feature_path in feature_paths if not os.path.exists(os.path.dirname(feature_path)) ] # loading model use_symbol_block = False if len(args.symbol_prefix) > 0: use_symbol_block = True sym, arg_params, aux_params = mx.model.load_checkpoint( args.symbol_prefix, 0) internals = sym.get_internals() inputs = internals['data'] outputs_blobs = [ internals[layer_name + '_output'] for layer_name in args.output_blobs.split(',') ] inference = gluon.SymbolBlock(outputs_blobs, inputs) else: inference = gluon.model_zoo.vision.get_model(args.arch, classes=args.num_classes) inference.load_params(args.weights, ctx=ctx) if len(args.symbol_prefix) == 0: inference.hybridize() # extracting features global_start_time = timeit.default_timer() valid_counts = 0 for im_idx, (im_path, feature_path) in enumerate(zip(im_paths, feature_paths)): if not os.path.exists(feature_path): start_time = timeit.default_timer() im = image_processing(im_path) if im is not None: im = im.as_in_context(ctx) elapsed_time = timeit.default_timer() processing_time = elapsed_time - start_time start_time = timeit.default_timer() if use_symbol_block: features = inference(im) else: features = inference.features(im) feature_extracting_time = timeit.default_timer() - start_time features = features.asnumpy().flatten() feature = Signature(features) feature.save_features(feature_path) valid_counts += 1 print( 'Presssed [%d/%d]: %s \t Pre-processing time: %.2f ms\t Extracting features time: %.2f ms' % (im_idx, len(im_paths), im_path, processing_time * 1000, feature_extracting_time * 1000)) else: print('The feature file exists, skip the file') global_elapsed_time = timeit.default_timer() - global_start_time print( 'Total elapsed time: %s \t Processed [%d] images \t Avg. time: %.2f ms' % (str(datetime.timedelta(seconds=global_elapsed_time)), valid_counts, global_elapsed_time * 1000 / valid_counts))
def main(): if args.set == 'A': files = glob_files(os.path.join(args.db_dir, 'IJB-A_1N_sets'), ['.csv']) elif args.set == 'B': files = [os.path.join(args.db_dir, 'protocol/ijbb_face_detection.csv')] elif args.set == 'C': files = [ os.path.join(args.db_dir, 'protocol/ijbc_face_detection_ground_truth.csv') ] else: raise NotImplementedError contents = [] idx_tmp_id, idx_sub_id, idx_file, idx_x, idx_y, idx_w, idx_h, idx_reye_x, idx_reye_y, idx_leye_x, idx_leye_y, \ idx_nose_x, idx_nose_y = 0, 1, 2, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 for file_ind, file_path in enumerate(files): print('Processing [%d]: %s...' % (file_ind, file_path)) with open(file_path) as f: csv_reader = csv.reader(f) for idx, row in enumerate(csv_reader): if idx == 0: idx_tmp_id = row.index('TEMPLATE_ID') idx_sub_id = row.index('SUBJECT_ID') idx_file = row.index('FILE') idx_x = row.index('FACE_X') idx_y = row.index('FACE_Y') idx_w = row.index('FACE_WIDTH') idx_h = row.index('FACE_HEIGHT') idx_reye_x = row.index('RIGHT_EYE_X') idx_reye_y = row.index('RIGHT_EYE_Y') idx_leye_x = row.index('LEFT_EYE_X') idx_leye_y = row.index('LEFT_EYE_Y') idx_nose_x = row.index('NOSE_BASE_X') idx_nose_y = row.index('NOSE_BASE_Y') else: template_id = row[idx_tmp_id] subject_id = row[idx_sub_id] im_path = row[idx_file] x, y, w, h = float(row[idx_x]), float(row[idx_y]), float( row[idx_w]), float(row[idx_h]) reye_x, reye_y, leye_x, leye_y, nose_x, nose_y = row[idx_reye_x], row[idx_reye_y], row[idx_leye_x],\ row[idx_leye_y], row[idx_nose_x], row[idx_nose_y] if len(reye_x) > 0 and len(reye_y) > 0 and len( leye_x) > 0 and len(leye_y) > 0 and len( nose_x) > 0 and len(nose_y) > 0: reye_x, reye_y, leye_x, leye_y, nose_x, nose_y = float(reye_x), float(reye_y), float(leye_x), \ float(leye_y), float(nose_x), float(nose_y) c_x, c_y = nose_x, nose_y radius = math.sqrt( max((reye_x - nose_x)**2 + (reye_y - nose_y)**2, (leye_x - nose_x)**2 + (leye_y - nose_y)**2)) radius *= 1.1 x, y = c_x - radius, c_y - radius w, h = 2 * radius, 2 * radius im_name = os.path.splitext(os.path.basename(im_path))[0] dir_name = os.path.dirname(im_path) out_im_path = os.path.join(args.out_dir, dir_name, '%s.png' % im_name) if not os.path.exists(os.path.dirname(out_im_path)): os.makedirs(os.path.dirname(out_im_path)) if len(args.box_dir) > 0: out_box_path = os.path.join(args.box_dir, dir_name, '%s.roi' % im_name) if not os.path.exists(os.path.dirname(out_box_path)): os.makedirs(os.path.dirname(out_box_path)) else: out_box_path = os.path.join(args.out_dir, dir_name, '%s.roi' % im_name) contents.append([ im_path, template_id, subject_id, int(x), int(y), int(w), int(h), out_im_path, out_box_path ]) with Pool() as p: processed_contents = list( tqdm(p.imap(crop_image, contents), total=len(contents))) processed_contents = list(filter(None, processed_contents)) processed_contents = remove_duplicates(processed_contents) if len(args.out_path) > 0: if not os.path.exists(os.path.dirname(args.out_path)): os.makedirs(os.path.dirname(args.out_path)) write_csv(args.out_path, processed_contents)