コード例 #1
0
        if not os.path.exists(single_dir):
            rain_type = single_dir.strip().split('frames/')[1].strip().split(
                "_")[0]
            print(
                "creating training images with augmentation %s and rain level 0.70"
                % rain_type)
            aug_data.save_avenue_rain_or_bright(args.dataset_path,
                                                rain_type,
                                                True,
                                                "training",
                                                bright_space=0.7)

frame_trans = data_utils.give_frame_trans(args.dataset_type, [args.h, args.w])

train_dataset = data_utils.DataLoader(train_folder,
                                      frame_trans,
                                      time_step=args.t_length - 1,
                                      num_pred=1)
test_dataset = data_utils.DataLoader(test_folder,
                                     frame_trans,
                                     time_step=args.t_length - 1,
                                     num_pred=1)

train_batch = data.DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.num_workers,
                              drop_last=True)
test_batch = data.DataLoader(test_dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.num_workers,
コード例 #2
0
    print("The dataset is not available..........")
    pass
    
frame_trans = transforms.Compose([
        transforms.Resize([height, width]),
        transforms.Grayscale(num_output_channels=1),
        transforms.ToTensor(),
        transforms.Normalize([0.5], [0.5]),
    ])
unorm_trans = utils.UnNormalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))

print("------Data folder", data_dir)
print("------Model folder", model_dir)
print("------Restored ckpt", ckpt_dir)

data_loader = data_utils.DataLoader(data_dir, frame_trans, time_step=num_frame-1, num_pred=1)
video_data_loader = DataLoader(data_loader, batch_size=batch_size, shuffle=False)

chnum_in_ = 1
mem_dim_in = 2000
sparse_shrink_thres = 0.0025

model = AutoEncoderCov3DMem(chnum_in_, mem_dim_in, shrink_thres=sparse_shrink_thres)
model_para = torch.load(ckpt_dir)
model.load_state_dict(model_para)
model.requires_grad_(False)
model.to(device)
model.eval()

img_crop_size = 0
recon_error_list = [None] * len(video_data_loader)
コード例 #3
0
    b, t, ch, h, w = im_input.shape
    im_input = np.reshape(im_input, [b * t, ch, h, w])
    return im_input


s_train, s_test = data_utils.give_data_folder(args.source_dataset,
                                              args.dataset_path)

print("The training path", s_train)
print("The testing path", s_test)

frame_trans = data_utils.give_frame_trans(args.source_dataset,
                                          [args.h, args.w])

s_train_dataset = data_utils.DataLoader(s_train,
                                        frame_trans,
                                        time_step=args.t_length - 1,
                                        num_pred=1)
s_test_dataset = data_utils.DataLoader(s_test,
                                       frame_trans,
                                       time_step=args.t_length - 1,
                                       num_pred=1)

s_train_batch = data.DataLoader(s_train_dataset,
                                batch_size=args.batch_size,
                                shuffle=True,
                                num_workers=args.num_workers,
                                drop_last=True)
s_test_batch = data.DataLoader(s_test_dataset,
                               batch_size=args.batch_size,
                               shuffle=False,
                               num_workers=args.num_workers,
コード例 #4
0
                                              args.dataset_path)

# prepare image transform
s_frame_trans = data_utils.give_frame_trans(args.source_dataset,
                                            [args.h, args.w])
t_frame_trans = data_utils.give_frame_trans(args.target_dataset,
                                            [args.h, args.w])

# prepare dataset
# s_test_label = np.load(args.source_test_label_path, allow_pickle=True)
t_test_label = np.load(args.target_test_label_path, allow_pickle=True)

s_train_dataset = data_utils.DataLoader(s_train,
                                        s_frame_trans,
                                        None,
                                        True,
                                        time_step=args.t_length - 1,
                                        num_pred=1,
                                        video_start=1,
                                        video_end=5)
# s_test_dataset = data_utils.DataLoader(s_test, s_frame_trans, s_test_label, False, time_step=args.t_length - 1, num_pred=1)

t_train_dataset = data_utils.DataLoader(t_train,
                                        t_frame_trans,
                                        None,
                                        True,
                                        time_step=args.t_length - 1,
                                        num_pred=1,
                                        video_start=1,
                                        video_end=4)
t_test_dataset = data_utils.DataLoader(t_test,
                                       t_frame_trans,
コード例 #5
0
    raise Exception("The dataset is not available..........")

frame_trans = transforms.Compose([
    transforms.Resize([height, width]),
    transforms.Grayscale(num_output_channels=1),
    transforms.ToTensor(),
    transforms.Normalize([0.5], [0.5]),
])
unorm_trans = utils.UnNormalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))

print("------Model folder", model_dir)
print("------Restored ckpt", ckpt_dir)

label = np.load(gt_file, allow_pickle=True)
# ped2toped1 20 ped1toped2 1
data_loader = data_utils.DataLoader(data_dir, frame_trans, label, False, time_step=num_frame - 1, num_pred=1, video_start=1, video_end=2)
video_data_loader = DataLoader(data_loader, batch_size=batch_size, shuffle=False)

chnum_in = 1
mem_dim_in = 2000
sparse_shrink_thres = 0.0025

model = AdversarialAutoEncoderCov3DMem(chnum_in, backward_coeff=0.0, mem_dim=mem_dim_in, shrink_thres=sparse_shrink_thres)
model_para = torch.load(ckpt_dir)
model.load_state_dict(model_para)
model.requires_grad_(False)
model.to(device)
model.eval()

img_crop_size = 0
recon_error_list = list()