예제 #1
0
파일: main.py 프로젝트: han811/Tobigs_VO
        sum_loss = 0
        torch.save(tobiVO.state_dict(),
                   model_path + '/modelsize' + str(j) + '.pth')


if __name__ == "__main__":
    # gpu_num = 3
    # device = torch.device(f'cuda:{gpu_num}' if torch.cuda.is_available() else 'cpu')
    # torch.cuda.set_device(device)

    train_df = get_data_info_tobi(folder_list=par.train_video,
                                  overlap=False,
                                  pad_y=False,
                                  shuffle=False)
    train_dataset = ImageSequenceDataset(train_df, par.resize_mode,
                                         (par.img_w, par.img_h), par.img_means,
                                         par.img_stds, par.minus_point_5)

    if train == True:
        if gpu == False:
            tobiVO = Tobi_model()
        else:
            tobiVO = Tobi_model().cuda()
        # optimizer = optim.Adam(tobiVO.parameters(), lr=0.0001)
        if load == True:
            tobiVO.load_state_dict(
                torch.load(model_path + '/modelsize5.pth'))  # setting plz
        optimizer = optim.Adam(tobiVO.parameters(), lr=args.lr)
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=400,
                                              gamma=0.5)
예제 #2
0
    print('make validation data from sequences: {} (dataset: {})'.format(
        args.valid_sequences, args.dataset))
    valid_df = get_data_info(image_dir,
                             pose_dir,
                             folder_list=args.valid_sequences,
                             seq_len_range=par.seq_len,
                             overlap=1,
                             sample_times=par.sample_times,
                             max_step=par.max_step)

print('Create Dataset Loaders')
train_sampler = SortedRandomBatchSampler(train_df,
                                         args.batch_size,
                                         drop_last=True)
train_dataset = ImageSequenceDataset(
    train_df, par.resize_mode, (par.img_w, par.img_h), par.img_means,
    par.img_stds, par.minus_point_5)  # NOTE why swap h and w?
train_dl = DataLoader(train_dataset,
                      batch_sampler=train_sampler,
                      num_workers=args.n_processors,
                      pin_memory=par.pin_mem)

valid_sampler = SortedRandomBatchSampler(valid_df,
                                         args.batch_size,
                                         drop_last=True)
valid_dataset = ImageSequenceDataset(
    valid_df, par.resize_mode, (par.img_w, par.img_h), par.img_means,
    par.img_stds, par.minus_point_5)  # NOTE why swap h and w?
valid_dl = DataLoader(valid_dataset,
                      batch_sampler=valid_sampler,
                      num_workers=args.n_processors,