예제 #1
0
def test_from_image():
    data_dir = "/data/hyeokjae/data/UG2-2021-Track2.1/video/Train"
    save_dir = "/data/hyeokjae/results/UG2-2021/light_enhancement/sid/UG2-2021-Track2.1/Train"
    model_fpath = r"./saved_model/checkpoint_sony_e4000.pth"

    model = SeeInDark()
    model.load_state_dict(
        torch.load(model_fpath, map_location={"cuda:1": "cuda:0"}))
    model = model.to(torch.device("cuda:0"))

    for label in os.listdir(data_dir):
        if not osp.isdir(osp.join(save_dir, label)):
            os.makedirs(osp.join(save_dir, label))

        img_fpath_list = glob.glob(osp.join(data_dir, label, "*.png"))
        for img_fpath in img_fpath_list:
            if osp.isfile(osp.join(save_dir, label, osp.basename(img_fpath))):
                continue

            read_img = imread(img_fpath)

            output = inference(read_img, model)

            # pad_w = int((output.shape[0] - read_img.shape[0]) / 2)
            # pad_h = int((output.shape[1] - read_img.shape[1]) / 2)
            # save_img = np.pad(read_img, ((pad_w, pad_w), (pad_h, pad_h), (0, 0)))
            # save_img = np.concatenate([save_img, output], axis=1)

            save_img = output
            imsave(osp.join(save_dir, label, osp.basename(img_fpath)),
                   save_img)
예제 #2
0
def test_from_video():
    data_dir = "/data/hyeokjae/data/UG2-2021-Track2.1/video/Train"
    save_dir = "/data/hyeokjae/results/UG2-2021/light_enhancement/sid/UG2-2021-Track2.1/Train"
    model_fpath = r"./saved_model/checkpoint_sony_e4000.pth"

    model = SeeInDark()
    model.load_state_dict(
        torch.load(model_fpath, map_location={"cuda:1": "cuda:0"}))
    model = model.to(torch.device("cuda:0"))

    dataset = ClaDataset(data_dir)
    loader = DataLoader(dataset, num_workers=0)
    batch_size = 8

    fps = 30
    fourcc = cv2.VideoWriter_fourcc(*"XVID")

    for video_x, fname, label, pad_size in loader:
        video_x = video_x[0].permute(0, 3, 1, 2).to(torch.device("cuda:0"))

        results = []
        if video_x.shape[0] % batch_size == 0:
            num_inference = video_x.shape[0] // batch_size
        else:
            num_inference = video_x.shape[0] // batch_size + 1
        for i in range(num_inference):
            tmp_i = (i + 1) * batch_size
            tmp_i = tmp_i if tmp_i < video_x.shape[0] else video_x.shape[0]
            batch_x = video_x[i * batch_size:tmp_i]

            out = model(batch_x)
            out = out.permute(0, 2, 3, 1).cpu().data.numpy()
            out = np.minimum(np.maximum(out, 0), 1)
            out = (255 * out[:, :, :, :]).astype("uint8")
            results.extend(list(out))

        if len(results) != video_x.shape[0]:
            raise ValueError

        # resize and un-pad to original size
        save_frames = []
        pad_h, pad_w = pad_size
        for frame in results:
            h, w, _ = frame.shape
            frame = cv2.resize(frame, (int(w / 2), int(h / 2)))
            frame = frame[pad_h:-pad_h, pad_w:-pad_w]
            size = frame.shape[:2]
            save_frames.append(frame)

        if not osp.isdir(osp.join(save_dir, label[0])):
            os.makedirs(osp.join(save_dir, label[0]))

        # save video
        save_video = cv2.VideoWriter(
            osp.join(save_dir, label[0], f"{osp.splitext(fname[0])[0]}.avi"),
            fourcc, fps, size[::-1])
        for frame in save_frames:
            save_video.write(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
        save_video.release()
예제 #3
0
def main():
    test_ids = get_test_ids()
    model = SeeInDark()
    model.load_state_dict(torch.load(m_path + m_name))
    if not os.path.isdir(test_result_dir):
        os.makedirs(test_result_dir)
        
    test(model, test_ids)
예제 #4
0
def test_on_single():
    data_dir = "/data/hyeokjae/data/UG2-2021-Track2.1/video/Train"
    save_dir = "/data/hyeokjae/results/UG2-2021/light_enhancement/sid/UG2-2021-Track2.1/Train"
    model_fpath = r"./saved_model/checkpoint_sony_e4000.pth"

    fname = "Run_1_5_037.png"
    img_fpath = osp.join(data_dir, fname)
    read_img = imread(img_fpath)

    model = SeeInDark()
    model.load_state_dict(
        torch.load(model_fpath, map_location={"cuda:1": "cuda:0"}))
    model = model.to(torch.device("cuda:0"))

    output = inference(read_img, model)

    pad_w = int((output.shape[0] - read_img.shape[0]) / 2)
    pad_h = int((output.shape[1] - read_img.shape[1]) / 2)
    save_img = np.pad(read_img, ((pad_w, pad_w), (pad_h, pad_h), (0, 0)))
    save_img = np.concatenate([save_img, output], axis=1)

    imsave(osp.join(save_dir, fname), save_img)
예제 #5
0
#Raw data takes long time to load. Keep them in memory after loaded.
gt_images = [None] * 6000
input_images = {}
input_images['300'] = [None] * len(train_ids)
input_images['250'] = [None] * len(train_ids)
input_images['100'] = [None] * len(train_ids)

g_loss = np.zeros((5000, 1))

allfolders = glob.glob('./result/*0')
lastepoch = 0
for folder in allfolders:
    lastepoch = np.maximum(lastepoch, int(folder[-4:]))

learning_rate = 1e-4
model = SeeInDark().to(device)
model._initialize_weights()
opt = optim.Adam(model.parameters(), lr=learning_rate)
for epoch in range(lastepoch, 4001):
    if os.path.isdir("result/%04d" % epoch):
        continue
    cnt = 0
    if epoch > 2000:
        for g in opt.param_groups:
            g['lr'] = 1e-5

    for ind in np.random.permutation(len(train_ids)):
        # get the path from image id
        train_id = train_ids[ind]
        in_files = glob.glob(input_dir + '%05d_00*.ARW' % train_id)
        in_path = in_files[np.random.random_integers(0, len(in_files) - 1)]
예제 #6
0
def pack_raw(raw):
    # pack Bayer image to 4 channels
    im = np.maximum(raw - 512, 0) / (16383 - 512)  # subtract the black level

    im = np.expand_dims(im, axis=2)
    img_shape = im.shape
    H = img_shape[0]
    W = img_shape[1]

    out = np.concatenate((im[0:H:2, 0:W:2, :], im[0:H:2, 1:W:2, :],
                          im[1:H:2, 1:W:2, :], im[1:H:2, 0:W:2, :]),
                         axis=2)
    return out


model = SeeInDark()
model.load_state_dict(
    torch.load(m_path + m_name, map_location={'cuda:1': 'cuda:0'}))
model = model.to(device)
if not os.path.isdir(result_dir):
    os.makedirs(result_dir)

for test_id in test_ids:
    # test the first image in each sequence
    in_files = glob.glob(input_dir + '%05d_00*.ARW' % test_id)
    for k in range(len(in_files)):
        in_path = in_files[k]
        _, in_fn = os.path.split(in_path)
        print(in_fn)
        gt_files = glob.glob(gt_dir + '%05d_00*.ARW' % test_id)
        gt_path = gt_files[0]
예제 #7
0
    #pack Bayer image to 4 channels
    global im
    im = np.maximum(im - 512, 0) / (16383 - 512)  #subtract the black level

    im = np.expand_dims(im, axis=2)
    img_shape = im.shape
    H = img_shape[0]
    W = img_shape[1]

    out = np.concatenate((im[0:H:2, 0:W:2, :], im[0:H:2, 1:W:2, :],
                          im[1:H:2, 1:W:2, :], im[1:H:2, 0:W:2, :]),
                         axis=2)
    return out


model = SeeInDark()
model.load_state_dict(
    torch.load(params['m_path'] + params['m_name'],
               map_location={'cuda:1': 'cuda:0'}))
model = model.to(device)
if not os.path.isdir(params['result_dir']):
    os.makedirs(params['result_dir'])

for test_id in test_ids:
    #test the first image in each sequence
    in_files = glob.glob(params['input_dir'] + '%05d_00*.ARW' % test_id)
    for k in range(len(in_files)):
        in_path = in_files[k]
        _, in_fn = os.path.split(in_path)
        print(in_fn)
        gt_files = glob.glob(params['gt_dir'] + '%05d_00*.ARW' % test_id)
예제 #8
0
    out = gaussianSmoothing(out)
    gt = gaussianSmoothing(gt)
    return torch.abs(out - gt).mean()


#Raw data takes long time to load. Keep them in memory after loaded.
gt_images = [None] * 6000
input_images = {}
input_images['300'] = [None] * len(train_ids)
input_images['250'] = [None] * len(train_ids)
input_images['100'] = [None] * len(train_ids)

g_loss = np.zeros((5000, 1))

learning_rate = 1e-4
model = SeeInDark().to(device)
opt = optim.Adam(model.parameters(), lr=learning_rate)

#load last saved model weights
if os.path.isfile(chpkdir):
    checkpoint = torch.load(chpkdir)
    model.load_state_dict(checkpoint['model'])
    opt.load_state_dict(checkpoint['optimizer'])
    lastepoch = checkpoint['epoch'] + 1
else:
    lastepoch = 0
    model._initialize_weights()

print("*****lastepoch***** ", lastepoch)
for epoch in range(lastepoch, 4001):
    cnt = 0
예제 #9
0
#Raw data takes long time to load. Keep them in memory after loaded.
gt_images=[None]*6000
input_images = {}
input_images['300'] = [None]*len(train_ids)
input_images['250'] = [None]*len(train_ids)
input_images['100'] = [None]*len(train_ids)

g_loss = np.zeros((5000,1))

allfolders = glob.glob('./result/*0')
lastepoch = 0
for folder in allfolders:
    lastepoch = np.maximum(lastepoch, int(folder[-4:]))

learning_rate = 1e-4
model = SeeInDark().to(device)
model._initialize_weights()
opt = optim.Adam(model.parameters(), lr = learning_rate)
for epoch in range(lastepoch, epochs):
    print('---------')
    print(epoch)
    print(len(train_ids))
    if os.path.isdir("result/%04d"%epoch):
        continue    
    cnt=0
    latest_loss=0
    if epoch > 2000:
        for g in opt.param_groups:
            g['lr'] = 1e-5

    for ind in np.random.permutation(len(train_ids)):
예제 #10
0
# Raw data takes long time to load. Keep them in memory after loaded.
gt_images = [None] * 6000
input_images = {}
input_images['300'] = [None] * len(train_ids)
input_images['250'] = [None] * len(train_ids)
input_images['100'] = [None] * len(train_ids)

g_loss = np.zeros((5000, 1))

allfolders = glob.glob('./result/*0')
lastepoch = 0
for folder in allfolders:
    lastepoch = np.maximum(lastepoch, int(folder[-4:]))

learning_rate = 1e-4
model = SeeInDark()
model._initialize_weights()
if torch.cuda.device_count() > 1:
    print("Let's use", torch.cuda.device_count(), "GPUs!")
    model = nn.DataParallel(model)
model.cuda()

opt = optim.Adam(model.parameters(), lr=learning_rate)
for epoch in range(lastepoch, 4001):
    if os.path.isdir("result/%04d" % epoch):
        continue
    cnt = 0
    if epoch > 2000:
        for g in opt.param_groups:
            g['lr'] = 1e-5