示例#1
0
def main(argv):
    """
    Run training of the Deeppose stg-1
    """
    args = cmd_options.get_arguments(argv)
    if not os.path.exists(args.o_dir):
        os.makedirs(args.o_dir)
    suffix = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
    with open(os.path.join(args.o_dir, 'params.dump_{}.txt'.format(suffix)), 'w') as f:
        f.write('{}\n'.format(pprint.pformat(args)))

    net, loss_op, pose_loss_op, train_op = regressionnet.create_regression_net(
        n_joints=args.n_joints,
        init_snapshot_path=args.snapshot,
        is_resume=args.resume,
        reset_iter_counter=args.reset_iter_counter,
        reset_moving_averages=args.reset_moving_averages,
        optimizer_type=args.optimizer,
        gpu_memory_fraction=0.32,  # Set how much GPU memory to reserve for the network
        net_type=args.net_type)
    with net.graph.as_default():
        saver = tf.train.Saver()

    print 'args.resume: {}\nargs.snapshot: {}'.format(args.resume, args.snapshot)
    bbox_extension_range = (args.bbox_extension_min, args.bbox_extension_max)
    if bbox_extension_range[0] is None or bbox_extension_range[1] is None:
        bbox_extension_range = None
        test_bbox_extension_range = None
    else:
        test_bbox_extension_range = (bbox_extension_range[1], bbox_extension_range[1])

    train_dataset = dataset.PoseDataset(
        args.train_csv_fn, args.img_path_prefix, args.im_size,
        fliplr=args.fliplr,
        rotate=args.rotate,
        rotate_range=args.rotate_range,
        shift=args.shift,
        bbox_extension_range=bbox_extension_range,
        min_dim=args.min_dim,
        coord_normalize=args.coord_normalize,
        gcn=args.gcn,
        fname_index=args.fname_index,
        joint_index=args.joint_index,
        symmetric_joints=args.symmetric_joints,
        ignore_label=args.ignore_label,
        should_downscale_images=args.should_downscale_images,
        downscale_height=args.downscale_height
    )
    test_dataset = dataset.PoseDataset(
        args.test_csv_fn, args.img_path_prefix, args.im_size,
        fliplr=False, rotate=False,
        shift=None,
        bbox_extension_range=test_bbox_extension_range,
        coord_normalize=args.coord_normalize,
        gcn=args.gcn,
        fname_index=args.fname_index,
        joint_index=args.joint_index,
        symmetric_joints=args.symmetric_joints,
        ignore_label=args.ignore_label,
        should_return_bbox=True,
        should_downscale_images=args.should_downscale_images,
        downscale_height=args.downscale_height
    )

    np.random.seed(args.seed)
    train_iterator = iterators.MultiprocessIterator(train_dataset, args.batch_size,
                                                    n_processes=args.workers, n_prefetch=3)
    test_iterator = iterators.MultiprocessIterator(
        test_dataset, args.batch_size,
        repeat=False, shuffle=False,
        n_processes=1, n_prefetch=1)

    val_iterator = None
    if args.val_csv_fn is not None and args.val_csv_fn != '':
        small_train_dataset = dataset.PoseDataset(
            args.val_csv_fn,
            args.img_path_prefix, args.im_size,
            fliplr=False, rotate=False,
            shift=None,
            bbox_extension_range=test_bbox_extension_range,
            coord_normalize=args.coord_normalize,
            gcn=args.gcn,
            fname_index=args.fname_index,
            joint_index=args.joint_index,
            symmetric_joints=args.symmetric_joints,
            ignore_label=args.ignore_label,
            should_return_bbox=True,
            should_downscale_images=args.should_downscale_images,
            downscale_height=args.downscale_height
        )
        val_iterator = iterators.MultiprocessIterator(
            small_train_dataset, args.batch_size,
            repeat=False, shuffle=False,
            n_processes=1, n_prefetch=1)

    train_loop(net, saver, loss_op, pose_loss_op, train_op, args.dataset_name,
               train_iterator, test_iterator,
               val_iterator=val_iterator,
               max_iter=args.max_iter,
               test_step=args.test_step,
               log_step=args.log_step,
               snapshot_step=args.snapshot_step,
               batch_size=args.batch_size,
               conv_lr=args.conv_lr,
               fc_lr=args.fc_lr,
               fix_conv_iter=args.fix_conv_iter,
               output_dir=args.o_dir
               )
示例#2
0
        return optimizer.lr

    trainer.extend(Evaluator(
        test_iter, model, device=args.gpu), trigger=val_interval)
    trainer.extend(extensions.observe_value(
        'lr', lambda _: lr_shift()), trigger=(1, 'epoch'))
    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.snapshot_object(
        model, 'epoch_{.updater.epoch}.model'), trigger=val_interval)
    trainer.extend(extensions.snapshot_object(
        optimizer, 'epoch_{.updater.epoch}.state'), trigger=val_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    start_time = time.time()
    trainer.extend(extensions.observe_value(
        'time', lambda _: time.time() - start_time), trigger=log_interval)
    trainer.extend(extensions.PrintReport([
        'time', 'epoch', 'iteration', 'main/loss', 'validation/main/loss',
        'main/accuracy', 'validation/main/accuracy', 'lr',
    ]), trigger=log_interval)
    trainer.extend(extensions.observe_value(
        'graph', lambda _: create_fig(args.dir)),
        trigger=(1, 'epoch'), priority=50)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.run()


if __name__ == '__main__':
    args = cmd_options.get_arguments()
    main(args)
示例#3
0
            model(x, t)

        sum_loss += float(model.loss.data) * input_data.shape[0]
        num += input_data.shape[0]

        logging.info('loss:{}'.format(sum_loss / num))

    # quit training data loading thread
    input_q.put(None)
    data_loader.join()

    return sum_loss


if __name__ == '__main__':
    args = get_arguments()
    if cuda.available and args.gpu >= 0:
        cuda.get_device(args.gpu).use()

    np.random.seed(args.seed)

    # create result dir
    create_result_dir(args)

    # create model and optimizer
    model, optimizer = get_model_optimizer(args)
    train_dl, test_dl = load_dataset(args)
    N, N_test = len(train_dl), len(test_dl)
    logging.info('# of training data:{}'.format(N))
    logging.info('# of test data:{}'.format(N_test))
示例#4
0
    input_data[0] = x.transpose((2, 0, 1))
    label[0] = t

    return orig, input_data, label


if __name__ == '__main__':
    sys.path.append('scripts')

    from test_flic_dataset import draw_joints as flic_draw_joints
    from test_lsp_dataset import draw_joints as lsp_draw_joints
    from test_mpii_dataset import draw_joints as mpii_draw_joints
    from transform import Transform
    from cmd_options import get_arguments

    args = get_arguments()
    print(args)

    flic_swap_joints = [(2, 4), (1, 5), (0, 6)]
    lsp_swap_joints = [(8, 9), (7, 10), (6, 11), (2, 3), (1, 4), (0, 5)]
    mpii_swap_joints = [(12, 13), (11, 14), (10, 15), (2, 3), (1, 4), (0, 5)]

    for datadir, n_joints, draw_joints, swap_joints, min_dim in [
            ('data/FLIC-full', 7, flic_draw_joints, flic_swap_joints, 100),
            ('data/lspet_dataset', 14, lsp_draw_joints, lsp_swap_joints, 50),
            ('data/mpii', 16, mpii_draw_joints, mpii_swap_joints, 100)]:
        args.datadir = datadir
        args.joint_num = n_joints
        args.min_dim = min_dim
        # augmentation setting
        trans = Transform(args, swap_joints)
示例#5
0
文件: train.py 项目: mitmul/deeppose
        j = 0
        while j != len(x_batch):
            a, b = o_queue.get()
            input_data[j] = a
            label[j] = b
            j += 1
        minibatch_q.put([input_data, label])

    for _ in range(args.batchsize):
        x_queue.put(None)
    for w in workers:
        w.join()


if __name__ == '__main__':
    args = cmd_options.get_arguments()
    result_dir = create_result_dir(args.model, args.resume_model)
    create_logger(args, result_dir)
    model = get_model(args.model, args.n_joints, result_dir, args.resume_model)
    model = loss.PoseEstimationError(model)
    opt = get_optimizer(model, args.opt, args.lr, adam_alpha=args.adam_alpha,
                        adam_beta1=args.adam_beta1, adam_beta2=args.adam_beta2,
                        adam_eps=args.adam_eps, weight_decay=args.weight_decay,
                        resume_opt=args.resume_opt)
    train_dataset = dataset.PoseDataset(
        args.train_csv_fn, args.img_dir, args.im_size, args.fliplr,
        args.rotate, args.rotate_range, args.zoom, args.base_zoom,
        args.zoom_range, args.translate, args.translate_range, args.min_dim,
        args.coord_normalize, args.gcn, args.n_joints, args.fname_index,
        args.joint_index, args.symmetric_joints, args.ignore_label
    )