def run_single_test(data_dir, output_dir): from detection import train_detector, detect from keras import backend as K from keras.models import load_model from os import environ from os.path import abspath, dirname, join train_dir = join(data_dir, 'train') test_dir = join(data_dir, 'test') train_gt = read_csv(join(train_dir, 'gt.csv')) train_img_dir = join(train_dir, 'images') print('training') train_detector(train_gt, train_img_dir, fast_train=True) code_dir = dirname(abspath(__file__)) model = load_model(join(code_dir, 'model_e300.hdf5')) test_img_dir = join(test_dir, 'images') print('detecting') detected_points = detect(model, test_img_dir) save_csv(detected_points, join(output_dir, 'output.csv')) if environ.get('KERAS_BACKEND') == 'tensorflow': K.clear_session()
def run_single_test(data_dir, output_dir): from detection import train_detector, detect from keras import backend as K from keras.models import load_model from os.path import abspath, dirname, join train_dir = join(data_dir, 'train') test_dir = join(data_dir, 'test') train_gt = read_csv(join(train_dir, 'gt.csv')) train_img_dir = join(train_dir, 'images') model = train_detector(train_gt, train_img_dir, fast_train=True) code_dir = dirname(abspath(__file__)) model = load_model(join(code_dir, 'facepoints_model.hdf5')) test_img_dir = join(test_dir, 'images') detected_points = detect(model, test_img_dir) save_csv(detected_points, join(output_dir, 'output.csv')) K.clear_session()
return img_shapes def compute_metric(detected, gt, img_shapes): res = 0.0 for filename, coords in detected.items(): n_rows, n_cols = img_shapes[filename] diff = (coords - gt[filename]) diff[::2] /= n_cols diff[1::2] /= n_rows diff *= 100 res += (diff**2).mean() return res / len(detected.keys()) train_gt = read_csv(join(train_dir, 'gt.csv')) train_img_dir = join(train_dir, 'images') model = train_detector(train_gt, train_img_dir, fast_train=False) model.save('facepoints_model.hdf5') #model = train_detector(train_gt, train_img_dir, fast_train=True) model = load_model('facepoints_model.hdf5') test_img_dir = join(test_dir, 'images') detected_points = detect(model, test_img_dir) test_gt = read_csv(join(test_dir, 'gt.csv')) img_shapes = read_img_shapes(test_dir) error = compute_metric(detected_points, test_gt, img_shapes) print('Error: ', error)
pil_draw.ellipse( (pt2[0] - radius, pt2[1] - radius, pt2[0] + radius, pt2[1] + radius), fill=gt_color) pil_img.save(res_dir + '/' + impaths[i]) #if (len(argv) != 2) and (len(argv) != 4): # stdout.write('Usage: %s train_dir test_dir [-v results_dir]\n' % argv[0]) # exit(1) start_time = time.time() train_dir = argv[1] #test_dir = argv[2] visualisation_needed = (len(argv) > 2) and (argv[2] == '-v') if visualisation_needed: res_dir = argv[3] if visualisation_needed: train_imgs, train_gt,test_paths = load_data(train_dir,True) else: train_imgs, train_gt = load_data(train_dir) X_train,X_test,y_train,y_test = cross_validation.train_test_split(train_imgs,train_gt, test_size = 0.1, train_size = 0.5,random_state = 2 ) model = train_detector(X_train, y_train) del X_train,y_train detection_results = np.array(detect(model, X_test)) print("Result: %.4f" % compute_metrics(X_test, detection_results, y_test)) if visualisation_needed: visualise(X_test, detection_results, y_test, res_dir, test_paths) end_time = time.time() print("Running time:", round(end_time - start_time, 2), 's (' + str(round((end_time - start_time) / 60, 2)) + " minutes)")
(pt2[0] - radius, pt2[1] - radius, pt2[0] + radius, pt2[1] + radius), fill=gt_color) pil_img.save(res_dir + '/' + impaths[i]) if (len(argv) != 3) and (len(argv) != 5): stdout.write('Usage: %s train_dir test_dir [-v results_dir]\n' % argv[0]) exit(1) start_time = time.time() train_dir = argv[1] test_dir = argv[2] visualisation_needed = (len(argv) > 3) and (argv[3] == '-v') if visualisation_needed: res_dir = argv[4] train_imgs = load_imgs(train_dir) train_gt = load_gt(train_dir) model = train_detector(train_imgs, train_gt) del train_imgs, train_gt if visualisation_needed: test_imgs = list(load_imgs(test_dir)) test_gt = list(load_gt(test_dir)) test_paths = list(load_paths(test_dir)) else: test_imgs = list(load_imgs(test_dir)) test_gt = list(load_gt(test_dir)) detection_results = np.array(detect(model, test_imgs, test_gt)) print("Result: %.4f" % compute_metrics(test_imgs, detection_results, test_gt)) if visualisation_needed: visualise(test_imgs, detection_results, test_gt, res_dir, test_paths) end_time = time.time() print("Running time:", round(end_time - start_time, 2), 's (' + str(round((end_time - start_time) / 60, 2)) + " minutes)")
train_imgs, train_gt = load_data(train_dir) print("Data has been loaded") train_num = len(train_imgs) test_num = TEST_SIZE mask = range(test_num) test_imgs = train_imgs[mask] test_gt = train_gt[mask] mask = range(test_num, 6 * test_num) train_imgs = train_imgs[mask] train_gt = train_gt[mask] try: barnet = train_detector(train_imgs, train_gt) barnet.model.save_weights(model_dir, overwrite=True) layer = barnet.model.layers[0] weights = np.array(layer.get_weights()[0]) grid = visualize_grid(weights.transpose(0, 2, 3, 1)) grid = resize(grid, (512, 512, 3)) grid_img = Image.fromarray(grid.astype('uint8')) grid_img.save(res_dir + '/layer_weights.jpg') finally: del train_imgs, train_gt print("CNN has been trained") detection_results = np.array(detect(barnet, test_imgs))