예제 #1
0
def train():
    args = parse_command_line()

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    set_gpus()

    model, loss, snapshot_dir_name, snapshot_dir_path = load_model(args)

    print("Loading dataset!")
    print("Use diamond coords for output: {}".format(args.diamond))
    print("Scale for vp: {}".format(args.scale))

    train_dataset = RegBoxCarsDataset(args.path, 'train', batch_size=args.batch_size, img_size=args.input_size, use_diamond=args.diamond, scale=args.scale, crop_delta=args.crop_delta, perspective_sigma=args.perspective_sigma)
    print("Loaded training dataset with {} samples".format(len(train_dataset.instance_list)))
    val_dataset = RegBoxCarsDataset(args.path, 'val', batch_size=args.batch_size, img_size=args.input_size, use_diamond=args.diamond, scale=args.scale)
    print("Loaded val dataset with {} samples".format(len(val_dataset.instance_list)))

    callbacks = [keras.callbacks.ModelCheckpoint(filepath=os.path.join(snapshot_dir_path, 'model.{epoch:03d}.h5'), save_best_only=False),
                 keras.callbacks.TensorBoard(log_dir=os.path.join('logs', snapshot_dir_name))]

    print("Training for {} epochs".format(args.epochs))
    print("Workers: ", args.workers)
    print("Use multiprocessing: ", args.workers > 1)
    print("Starting training with lr: {}".format(args.lr))

    adam = keras.optimizers.Adam(args.lr)
    model.compile(adam, loss, metrics=get_metrics(args.diamond, args.scale))

    model.fit_generator(train_dataset, validation_data=val_dataset, epochs=args.epochs, callbacks=callbacks,
                        initial_epoch=args.resume, workers=args.workers, use_multiprocessing=args.workers > 1)

    if args.shutdown:
        os.system('sudo poweroff')
예제 #2
0
def detect():
    args = parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    set_gpus()

    if args.mask:
        print("Running with mask!")
        object_detector = hub.load(
            "https://hub.tensorflow.google.cn/tensorflow/mask_rcnn/inception_resnet_v2_1024x1024/1"
        )
    else:
        object_detector = hub.load(
            'https://tfhub.dev/tensorflow/centernet/resnet50v1_fpn_512x512/1')
    # object_detector = hub.load("https://tfhub.dev/tensorflow/centernet/hourglass_512x512/1")

    # object_detector = load_model('snapshots/od/resnet50_coco_best_v2.1.0.h5', backbone_name='resnet50')

    path = args.path
    sessions = sorted(os.listdir(os.path.join(path, 'dataset')))
    for session in sessions:
        detect_session(object_detector,
                       path,
                       session,
                       max_frames=args.max_frames,
                       skip=args.skip,
                       conf=args.conf,
                       dump_every=args.dump_every,
                       mask=args.mask,
                       debug=args.debug)
예제 #3
0
def train():
    args = parse_command_line()

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    set_gpus()


    model, scales, snapshot_dir_name, snapshot_dir_path = load_model(args)

    adam = keras.optimizers.Adam(args.lr)
    model.compile(adam, 'MSE', metrics=[heatmap_mean_accuracy(args.batch_size, args.heatmap_size, len(scales) * 2)])

    print(model.summary())

    print("Loading dataset!")
    train_dataset = HeatmapBoxCarsDataset(args.path, 'train', batch_size=args.batch_size, img_size=args.input_size, heatmap_size=args.heatmap_size, scales=scales, peak_original=args.peak_original, crop_delta=args.crop_delta, perspective_sigma=args.perspective_sigma)
    print("Loaded training dataset with {} samples".format(len(train_dataset.instance_list)))
    print("Using augmentation: ", args.perspective_sigma != 0.0 or args.crop_delta != 0)
    val_dataset = HeatmapBoxCarsDataset(args.path, 'val', batch_size=args.batch_size, img_size=args.input_size, heatmap_size=args.heatmap_size, scales=scales, peak_original=args.peak_original)
    print("Loaded val dataset with {} samples".format(len(val_dataset.instance_list)))


    callbacks = [keras.callbacks.ModelCheckpoint(filepath=os.path.join(snapshot_dir_path, 'model.{epoch:03d}.h5')),
                 keras.callbacks.TensorBoard(log_dir=os.path.join('logs', snapshot_dir_name))]

    print("Workers: ", args.workers)
    print("Use multiprocessing: ", args.workers > 1)
    print("Starting training with lr: {}".format(args.lr))


    model.fit_generator(train_dataset, validation_data=val_dataset, epochs=args.epochs, callbacks=callbacks, initial_epoch=args.resume, workers=args.workers, use_multiprocessing=args.workers > 1)

    if args.shutdown:
        os.system('sudo poweroff')
예제 #4
0
def detect():
    args = parse_command_line()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    set_gpus()

    model, _, model_dir_name, _ = load_model(args)

    data_path = args.path
    sessions = sorted(os.listdir(os.path.join(data_path, 'dataset')))
    for session in sessions:
        detect_session(model, model_dir_name, data_path, session, args)
예제 #5
0
def detect():
    args = parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    set_gpus()

    if args.mask:
        print("Running with mask!")
        object_detector = hub.load("https://hub.tensorflow.google.cn/tensorflow/mask_rcnn/inception_resnet_v2_1024x1024/1")
    else:
        object_detector = hub.load('https://tfhub.dev/tensorflow/centernet/resnet50v1_fpn_512x512/1')

    path = args.path
    sessions = sorted(os.listdir(os.path.join(path, 'frames')))
    for session in sessions:
        detect_session(object_detector, path, session, conf=args.conf, dump_every=args.dump_every, max_frames=args.max_frames, mask=args.mask, debug=args.debug)
예제 #6
0
def eval():
    args = parse_command_line()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    set_gpus()

    model, scales, snapshot_dir_name, _ = load_model(args)
    print("Heatmap model loaded")

    test_dataset = RegBoxCarsDataset(args.path, 'test', batch_size=args.batch_size_eval, img_size=args.input_size, num_stacks=1,
                                     use_diamond=False, scale=1.0, perspective_sigma=0.0, crop_delta=0)

    gt_vp_list = []
    pred_vp_list = []
    pred_dists_vars = []

    for X, gt_vp in test_dataset:
        pred = model.predict(X)
        pred_vps, pred_dists = process_heatmaps(pred[-1], scales)

        gt_vp_list.append(gt_vp[0])
        pred_vp_list.append(pred_vps)
        pred_dists_vars.append(pred_dists)

    gt_vps = np.concatenate(gt_vp_list, axis=0)
    pred_vps = np.concatenate(pred_vp_list, axis=0)
    pred_vars = np.concatenate(pred_dists_vars, axis=0)

    diff = pred_vps - gt_vps[:, np.newaxis, :]
    diff[np.isinf(diff)] = np.nan
    vp1_d = np.linalg.norm(diff[:, :, :2], axis=-1)
    vp2_d = np.linalg.norm(diff[:, :, 2:], axis=-1)

    vp1_gt_norm = np.linalg.norm(gt_vps[:, :2], axis=-1)
    vp2_gt_norm = np.linalg.norm(gt_vps[:, 2:], axis=-1)

    for j, scale in enumerate(scales):
        print('*' * 80)
        print("For scale: {}".format(scale))
        print("Median vp1 abs distance: {}".format(np.nanmedian(vp1_d[:, j])))
        print("Median vp2 abs distance: {}".format(np.nanmedian(vp2_d[:, j])))
        print("Median vp1 rel distance: {}".format(np.nanmedian(vp1_d[:, j] / vp1_gt_norm)))
        print("Median vp2 rel distance: {}".format(np.nanmedian(vp2_d[:, j] / vp2_gt_norm)))
        
    vp1_var = pred_vars[:, :, 0]
    vp2_var = pred_vars[:, :, 1]
    vp1_var_idx = np.argmin(vp1_var, axis=-1)
    vp2_var_idx = np.argmin(vp2_var, axis=-1)

    print('*' * 80)
    print('For optimal gt scale')

    vp1_d_idx = np.argmin(vp1_d, axis=-1)
    vp2_d_idx = np.argmin(vp2_d, axis=-1)

    print("Median vp1 abs distance: {}".format(np.nanmedian(vp1_d[:, vp1_d_idx])))
    print("Median vp2 abs distance: {}".format(np.nanmedian(vp2_d[:, vp2_d_idx])))
    print("Median vp1 rel distance: {}".format(np.nanmedian(vp1_d[:, vp1_d_idx] / vp1_gt_norm)))
    print("Median vp2 rel distance: {}".format(np.nanmedian(vp2_d[:, vp2_d_idx] / vp2_gt_norm)))

    print("Hist for vp1 optimal selection: ",
          ['scale {} : {}, '.format(scale, np.sum(vp1_d_idx == j)) for j, scale in enumerate(scales)])
    print("Hist for vp2 optimal selection: ",
          ['scale {} : {}, '.format(scale, np.sum(vp2_d_idx == j)) for j, scale in enumerate(scales)])

    print('*' * 80)
    print('For optimal var scale')
    print("Median vp1 abs distance: {}".format(np.nanmedian(vp1_d[:, vp1_var_idx])))
    print("Median vp2 abs distance: {}".format(np.nanmedian(vp2_d[:, vp2_var_idx])))
    print("Mean vp1 abs distance: {}".format(np.nanmean(vp1_d[:, vp1_var_idx])))
    print("Mean vp2 abs distance: {}".format(np.nanmean(vp2_d[:, vp2_var_idx])))
    print("Median vp1 rel distance: {}".format(np.nanmedian(vp1_d[:, vp1_var_idx] / vp1_gt_norm)))
    print("Median vp2 rel distance: {}".format(np.nanmedian(vp2_d[:, vp2_var_idx] / vp2_gt_norm)))
    print("Mean vp1 rel distance: {}".format(np.nanmean(vp1_d[:, vp1_var_idx] / vp1_gt_norm)))
    print("Mean vp2 rel distance: {}".format(np.nanmean(vp2_d[:, vp2_var_idx] / vp2_gt_norm)))

    print("Hist for vp1 var selection: ", ['scale {} : {}, '.format(scale, np.sum(vp1_var_idx == j)) for j, scale in enumerate(scales)])
    print("Hist for vp2 var selection: ", ['scale {} : {}, '.format(scale, np.sum(vp2_var_idx == j)) for j, scale in enumerate(scales)])