Example #1
0
def main():
    args = parse_args()

    nlayers = 7
    cell_type = 'GRU'

    net_input = Input(shape=(args.lookback, 2))

    x_0 = Dense(args.ncells, activation='relu')(net_input)
    x_1 = Dense(args.ncells, activation='relu')(x_0)
    x_2 = GRU(args.ncells,
              return_sequences=True,
              dropout=args.dropout,
              recurrent_dropout=args.recurrent_dropout)(add([x_0, x_1]))
    x_3 = Dense(args.ncells, activation='relu')(add([x_0, x_1, x_2]))
    x_4 = GRU(args.ncells,
              return_sequences=True,
              dropout=args.dropout,
              recurrent_dropout=args.recurrent_dropout)(add(
                  [x_0, x_1, x_2, x_3]))
    x_5 = Dense(args.ncells, activation='relu')(add([x_0, x_1, x_2, x_3, x_4]))
    x_6 = Flatten()(GRU(args.ncells,
                        return_sequences=True,
                        dropout=args.dropout,
                        recurrent_dropout=args.recurrent_dropout)(add(
                            [x_0, x_1, x_2, x_3, x_4, x_5])))

    out_x = Dense(args.delay, activation='linear')(x_6)
    out_y = Dense(args.delay, activation='linear')(x_6)

    model = Model(inputs=net_input, outputs=[out_x, out_y])

    train_model(model, args, nlayers=nlayers, cell_type=cell_type)
Example #2
0
def main():
    args = parse_args()

    #dataset_size = 1000
    #dataloader = load_data(args.dataset_size, args)
    device = torch.device("cuda:%d" %
                          args.gpu if torch.cuda.is_available() else "cpu")
    num_output = 82
    model = myresnet50(device,
                       num_output=num_output,
                       use_pretrained=True,
                       num_views=args.num_views)
    gender = 'male'
    m = load_model('../../models/basicModel_%s_lbs_10_207_0_v1.0.0.pkl' %
                   gender[0])
    #parent_dic = "/home/yifu/workspace/data_smpl/A_pose_3/new_vertexes"
    #parent_dic = "/home/yifu/workspace/data_smpl/A_pose_4/scaled"
    parent_dic = "/home/yifu/workspace/Data/MPI-FAUST/training/registrations_obj/male/test_model_2"
    #path = "./trained_resnet.pth"
    save_name = 'trained_resnet_%d_%d.pth' % (num_output, 100000)
    path = os.path.join(parent_dic, save_name)
    model.load_state_dict(torch.load(path))

    #parent_dic = "/home/yifu/workspace/data_smpl/A_pose_3"
    #path = os.path.join(parent_dic, 'test')
    '''
    parent_dic = "/home/yifu/workspace/data_smpl/real_human"
    path = os.path.join(parent_dic, 'scaled')
    '''
    path = parent_dic

    evaluate_model(m, model, args.num_views, path, device, args)
Example #3
0
def main():
    args = parse_args()
    #args.dataset_size = 100000
    print('-----------------------------------------------------------')
    print('Dataset size: ', args.dataset_size)
    args.batch_size = 1
    gender = 'male'  #   female
    print('Gender: ', gender)
    device = torch.device("cuda:%d" %
                          args.gpu if torch.cuda.is_available() else "cpu")
    torch.cuda.set_device(device)

    parent_dic = "/home/yifu/workspace/data/synthetic/noise_free"
    print('Data path: ', parent_dic)
    dataloader = load_data(args.dataset_size, parent_dic, args)

    model = myresnet50(device,
                       num_output=args.num_output,
                       use_pretrained=True,
                       num_views=args.num_views)

    # save_name = 'out:%d_data:%d_par_w:%.1f.pth'%(args.num_output,args.dataset_size, args.par_loss_weight)

    # folder: network weights
    parent_dic = "/home/yifu/workspace/data/test/model_1"
    save_name = 'data:%d.pth' % (100000)
    save_path = os.path.join(parent_dic, save_name)
    print('Load state dict from save path: ', save_path)
    model.load_state_dict(torch.load(save_path))
    print('-----------------------------------------------------------')

    if raw_input('Confirm the above setting? (yes/no): ') != 'yes':
        print('Terminated')
        exit()
    print('validation starts')
    print('------------------------')
    path = parent_dic
    evaluate_model(model, dataloader, args.num_views, path, device, args)
def main():
    args = parse_args()

    nlayers = 4
    cell_type = 'LSTM'

    net_input = Input(shape=(args.lookback, 2))

    x = Dense(args.ncells, activation='relu')(net_input)
    x = LSTM(args.ncells,
             return_sequences=True,
             dropout=args.dropout,
             recurrent_dropout=args.recurrent_dropout)(x)
    x = LSTM(args.ncells,
             return_sequences=True,
             dropout=args.dropout,
             recurrent_dropout=args.recurrent_dropout)(x)

    out_x = Dense(args.delay, activation='linear')(x)
    out_y = Dense(args.delay, activation='linear')(x)

    model = Model(inputs=net_input, outputs=[out_x, out_y])

    train_model(model, args, nlayers=nlayers, cell_type=cell_type)
Example #5
0
def main():
    args = parse_args()
    args.dataset_size = 100000
    args.batch_size = 1
    args.num_output = 82
    gender = 'male'  #   female
    m = load_model('../../models/basicModel_%s_lbs_10_207_0_v1.0.0.pkl' %
                   gender[0])
    parent_dic = "/home/yifu/workspace/data_smpl/A_pose_5/male/noise_free"
    dataloader = load_data(args.dataset_size, parent_dic, args)
    device = torch.device("cuda:%d" %
                          args.gpu if torch.cuda.is_available() else "cpu")
    model = myresnet50(device,
                       num_output=args.num_output,
                       use_pretrained=True,
                       num_views=args.num_views)
    #model = myresnet50(num_output=80)
    save_name = 'trained_resnet_%d_%d.pth' % (args.num_output,
                                              args.dataset_size)
    path = os.path.join(parent_dic, save_name)
    model.load_state_dict(torch.load(path))

    path = parent_dic
    evaluate_model(m, model, dataloader, args.num_views, path, device, args)
Example #6
0
def main():
    args = parse_args()
    args.dataset_size = 5

    device = torch.device("cuda:%d" %
                          args.gpu if torch.cuda.is_available() else "cpu")
    torch.cuda.set_device(device)
    num_output = 82
    model = myresnet50(device,
                       num_output=num_output,
                       use_pretrained=True,
                       num_views=args.num_views)

    # folder: network weights
    '''
    parent_dic = "/home/yifu/workspace/data/test/model_1"
    save_name = 'data:%d.pth' % (100000)
    path = os.path.join(parent_dic, save_name)
    '''
    model_path = raw_input('Model Path:')
    model.load_state_dict(torch.load(model_path, map_location=device))
    # folder: image
    data_path = "/home/yifu/workspace/data/test/model_1"
    evaluate_model(model, args.num_views, data_path, device, args)
    def test_dataparallel(self):
        args = parse_args(external_args=[])
        trainer = Trainer(args)

        trainer.reference_model.train(False)
        trainer.dataparallel_model.train(False)

        def _compare_models():
            for i_layer, (ref_np, dp_np) in enumerate(
                    zip(trainer.reference_model.named_parameters(),
                        trainer.dataparallel_model.named_parameters())):

                if i_layer == 0:
                    print(ref_np[0], dp_np[0])
                    print("Weights:")
                    print(ref_np[1].data[0, 0, ...])
                    print(dp_np[1].data[0, 0, ...])
                    print("Grads:")
                    if ref_np[1].grad is not None:
                        print(ref_np[1].grad[0, 0, ...])
                    else:
                        print("None")
                    if dp_np[1].grad is not None:
                        print(dp_np[1].grad[0, 0, ...])
                    else:
                        print("None")
                    print("")

                rtol = 2e-2
                atol = 1e-7
                tt.assert_allclose(ref_np[1].data,
                                   dp_np[1].data,
                                   rtol=rtol,
                                   atol=atol)
                if ref_np[1].grad is not None and dp_np[1].grad is not None:
                    tt.assert_allclose(ref_np[1].grad,
                                       dp_np[1].grad,
                                       rtol=rtol)

        def _check_dp_models_equal():
            dp_model = trainer.dataparallel_model
            for i_model, model in enumerate(dp_model.models):
                if i_model == dp_model.master_model_idx:
                    continue
                master_model_params = dp_model.models[
                    dp_model.master_model_idx].parameters()
                model_params = model.parameters()
                for i_layer, (master_param, secondary_param) in \
                        enumerate(zip(master_model_params, model_params)):
                    if i_layer == 0:
                        print(f"Master model and model {i_model}")
                        print(master_param[0, 0, ...])
                        print(secondary_param[0, 0, ...])
                    # Important that after all-reduced gradients are applied,
                    # all replica weights are bit-exactly equal even as float32 values!
                    tt.assert_equal(master_param, secondary_param)

        print("Before step")
        _compare_models()
        _check_dp_models_equal()

        for batch_idx, (data, target) in enumerate(trainer.train_loader):
            data, target = data.to(trainer.device), target.to(trainer.device)

            step_info_ref = trainer.reference_model.step(data, target)
            ref_loss = step_info_ref["loss"]

            step_info_dp = trainer.dataparallel_model.step(data, target)
            dp_loss = step_info_dp["loss"]

            print("After step")
            print(f"Loss, reference={ref_loss} dp={dp_loss}")
            _compare_models()
            _check_dp_models_equal()

            break

        return