示例#1
0
 def test_save(self):
     img = random_rgb_image()
     for extension in lycon.get_supported_extensions():
         mkpath = lambda name: self.get_path('{}.{}'.format(
             name, extension))
         # Write using Lycon
         lycon.save(mkpath('opencv'), img)
         # Write using OpenCV
         cv2.imwrite(mkpath('lycon'), rgb_bgr(img))
         self.assertEqual(filehash(mkpath('opencv')),
                          filehash(mkpath('lycon')))
示例#2
0
def save_numpy_2_image_jpg_png(input_numpy, output_filepath, **kwargs):

    if _lycon_available:
        lycon.save(output_filepath, input_numpy)
    else:
        # Necessary conditional?
        if input_numpy.ndim == 3 and input_numpy.shape[-1] == 1:
            input_numpy = np.squeeze(input_numpy)
        imwrite(os.path.abspath(output_filepath), input_numpy)

    return output_filepath
示例#3
0
def _dump_png(filepath, data):
    """Save a PNG image from an RGB :class:`~numpy.ndarray`.

    Args:
        filepath (|Path|): The path to the image file on disk.
        data (:class:`~numpy.ndarray`): The image an HWC array of value.

    """
    # Backend selection (Fastest to slowest).
    if _HAS_LYCON:
        lycon.save(str(filepath), data)
    elif _HAS_CV2:
        cv2.imwrite(str(filepath), cv2.cvtColor(data, cv2.COLOR_RGB2BGR))
    elif _HAS_PILLOW:
        PIL.Image.fromarray(data).save(str(filepath))
    else:
        raise RuntimeError('No backend available to save PNG image.')
示例#4
0
def merge():

    img = json.loads(upload())
    blue = img["blue"]
    arr_b = np.array(blue, dtype="uint8")

    red = img["red"]
    arr_r = np.array(red, dtype="uint8")

    green = img["green"]
    arr_g = np.array(green, dtype="uint8")

    combinedimaged = np.dstack((arr_r, arr_g, arr_b))

    lycon.save("templates/images/combinedimages/mergeimage.png",
               combinedimaged)

    return render_template("merge.html")
示例#5
0
def telemetry(sid, data):
    global recorded_points

    if data:
        # steering_angle = float(data["steering_angle"])
        # throttle = float(data["throttle"])

        x, y, z = parse_position(data["Position"])
        recorded_points.append([x, y, z])

        if lap_definition is not None:
            completion = find_completion([x, y, z], lap_definition)
            sys.stderr.write("\rTrack position: {0:3.2f}%".format(completion *
                                                                  100))

        speed = float(data["speed"])
        image = Image.open(BytesIO(base64.b64decode(data["image"])))
        try:
            image = np.asarray(image)
            image = preprocess_image(image)
            image = np.array([image])
            image = 2.0 * image / 255 - 1

            steering_angle = float(model.predict(image, batch_size=1))

            global speed_limit
            if speed > speed_limit:
                speed_limit = MIN_SPEED  # slow down
            else:
                speed_limit = MAX_SPEED

            throttle = 1.0 - steering_angle**2 - (speed / speed_limit)**2

            # print('{} {} {}'.format(steering_angle, throttle, speed))
            send_control(steering_angle, throttle)
        except Exception as e:
            print(e)

        if args.image_folder != '':
            timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
            image_filename = os.path.join(args.image_folder, timestamp)
            lycon.save(path='{}.jpg'.format(image_filename), image=image)
    else:
        sio.emit('manual', data={}, skip_sid=True)
示例#6
0
def benchmark_write(img):
    for ext in ('png', 'jpg'):
        output = '/tmp/lycon_test.' + ext
        msg = lambda tag: '[WRITE ({})] {}'.format(ext, tag)
        benchmark(
            (msg('Lycon'), lambda: lycon.save(output, img)),
            (msg('OpenCV'), lambda: cv2.imwrite(output, img)),
            (msg('PIL'), lambda: PIL.Image.fromarray(img).save(output)),
            (msg('SKImage'), lambda: skimage.io.imsave(output, img)),
        )
def raw2rgb(inp_path, out_path):
    filename = inp_path.split('/')[-1].rsplit('.', 1)[0] + '.jpg'
    filepath = out_path + '/' + filename

    raw = rawpy.imread(inp_path)
    im = raw.postprocess(use_camera_wb=True,
                         half_size=False,
                         user_flip=0,
                         no_auto_bright=True,
                         output_bps=8)

    im_raw = raw.raw_image_visible.astype(np.float32)

    if im_raw.shape[0] != im.shape[0] or im_raw.shape[1] != im.shape[1]:
        print('Dimension Mismatch in Image: ', filename)
        print('Moving: ', filename[:-4] + '.dng')
        shutil.move(inp_path, './corrupt/FinePixS2Pro/')
    else:
        lycon.save(filepath, im)
示例#8
0
def _dump_jpg(filepath, data):
    """Save a JPG image from an RGB :class:`~numpy.ndarray`.

    Args:
        filepath (|Path|): The path to the image file on disk.
        data (:class:`~numpy.ndarray`): The image an HWC array of value.

    """
    # Backend selection (Fastest to slowest).
    if _HAS_TURBO_JPEG:
        with open(str(filepath), 'wb') as f:
            f.write(turbo_jpeg_handler.encode(data, pixel_format=TJPF.RGB))
    elif _HAS_LYCON:
        lycon.save(str(filepath), data)
    elif _HAS_CV2:
        cv2.imwrite(str(filepath), cv2.cvtColor(data, cv2.COLOR_RGB2BGR))
    elif _HAS_PILLOW:
        PIL.Image.fromarray(data).save(str(filepath))
    else:
        raise RuntimeError('No backend available to save JPG image.')
示例#9
0
def save_numpy_2_image_jpg_png(input_numpy, output_filepath, **kwargs):

    lycon.save(output_filepath, input_numpy)

    return output_filepath
示例#10
0
            shot_noise, read_noise = shot_noise.cuda(), read_noise.cuda()
            raw_noisy = add_noise(raw_gt[j],
                                  shot_noise,
                                  read_noise,
                                  use_cuda=True)
            raw_noisy = torch.clamp(raw_noisy, 0, 1)  ### CLIP NOISE
            variance = shot_noise * raw_noisy + read_noise
            #### Unpadding and saving
            clean_packed = raw_gt[j]
            clean_packed = clean_packed[:, padh[j] // 2:-padh[j] // 2,
                                        padw[j] // 2:-padw[j] //
                                        2]  ## RGGB channels  (4 x H/2 x W/2)
            clean_unpacked = utils.unpack_raw(clean_packed.unsqueeze(
                0))  ## Rearrange RGGB channels into Bayer pattern
            clean_unpacked = clean_unpacked.squeeze().cpu().detach().numpy()
            lycon.save(args.result_dir + 'png/clean/' + filename[:-4] + '.png',
                       (clean_unpacked * 255).astype(np.uint8))

            noisy_packed = raw_noisy
            noisy_packed = noisy_packed[:, padh[j] // 2:-padh[j] // 2,
                                        padw[j] // 2:-padw[j] //
                                        2]  ## RGGB channels
            noisy_unpacked = utils.unpack_raw(noisy_packed.unsqueeze(
                0))  ## Rearrange RGGB channels into Bayer pattern
            noisy_unpacked = noisy_unpacked.squeeze().cpu().detach().numpy()
            lycon.save(args.result_dir + 'png/noisy/' + filename[:-4] + '.png',
                       (noisy_unpacked * 255).astype(np.uint8))

            variance_packed = variance[:, padh[j] // 2:-padh[j] // 2,
                                       padw[j] // 2:-padw[j] //
                                       2]  ## RGGB channels
示例#11
0
 def run(self) -> None:
     while not self.q.empty():
         pass
         filename, data = self.q.get()
         lycon.save(filename, data)
         self.q.task_done()
示例#12
0
        
        # Denoises this crop of the image.
        output = denoiser(channels, variance)

        # Copies denoised results to output denoised array.
        for yy in range(2):
            for xx in range(2):
                denoised_crop[yy:height:2, xx:width:2] = output[:, :, 2 * yy + xx]

        # Flips denoised image back to original Bayer color pattern.
        if (bayer_pattern == [[1, 2], [2, 3]]):
            pass
        elif (bayer_pattern == [[2, 1], [3, 2]]):
            denoised_crop = np.fliplr(denoised_crop)
        elif (bayer_pattern == [[2, 3], [1, 2]]):
            denoised_crop = np.flipud(denoised_crop)

        Idenoised_crop = np.clip(np.float32(denoised_crop), 0.0, 1.0)
        
        # Saves denoised image crop.
        save_file = os.path.join(args.result_dir+ 'matfile/', '%04d_%02d.mat' % (i + 1, k + 1))
        sio.savemat(save_file, {'Idenoised_crop': Idenoised_crop})
        
        if args.save_images:
            denoised_img = Idenoised_crop*255
            save_file = os.path.join(args.result_dir+ 'png/', '%04d_%02d.png' % (i + 1, k + 1))
            lycon.save(save_file, denoised_img.astype(np.uint8))

bundle_submissions_raw(args.result_dir+'matfile/', 'raw_results_for_server_submission/')
os.system("rm {}".format(args.result_dir+'matfile/*.mat'))
示例#13
0
model_restoration.eval()

with torch.no_grad():
    psnr_val_rgb = []
    for ii, data_test in enumerate(tqdm(test_loader), 0):
        rgb_noisy = data_test[0].cuda()
        filenames = data_test[1]
        rgb_restored = model_restoration(rgb_noisy)
        rgb_restored = torch.clamp(rgb_restored, 0, 1)

        rgb_noisy = rgb_noisy.permute(0, 2, 3, 1).cpu().detach().numpy()
        rgb_restored = rgb_restored.permute(0, 2, 3, 1).cpu().detach().numpy()

        if args.save_images:
            for batch in range(len(rgb_noisy)):
                #temp = np.concatenate((rgb_noisy[batch]*255, rgb_restored[batch]*255),axis=1)
                denoised_img = rgb_restored[batch] * 255
                lycon.save(
                    args.result_dir + 'png/' + filenames[batch][:-4] + '.png',
                    denoised_img.astype(np.uint8))
                save_file = os.path.join(args.result_dir + 'matfile/',
                                         filenames[batch][:-4] + '.mat')
                sio.savemat(
                    save_file,
                    {'Idenoised_crop': np.float32(rgb_restored[batch])})

bundle_submissions_srgb_v1(args.result_dir + 'matfile/',
                           'srgb_results_for_server_submission/')
os.system("rm {}".format(args.result_dir + 'matfile/*.mat'))
示例#14
0
model_restoration.cuda()

model_restoration=nn.DataParallel(model_restoration)

model_restoration.eval()


with torch.no_grad():
    psnr_val_rgb = []
    for ii, data_test in enumerate(tqdm(test_loader), 0):
        rgb_noisy = data_test[0].cuda()
        filenames = data_test[1]
        rgb_restored = model_restoration(rgb_noisy)
        rgb_restored = torch.clamp(rgb_restored,0,1)
     
        rgb_noisy = rgb_noisy.permute(0, 2, 3, 1).cpu().detach().numpy()
        rgb_restored = rgb_restored.permute(0, 2, 3, 1).cpu().detach().numpy()

        if args.save_images:
            for batch in range(len(rgb_noisy)):
                denoised_img = img_as_ubyte(rgb_restored[batch])
                lycon.save(args.result_dir + 'png/'+ filenames[batch][:-4] + '.png', denoised_img)
                save_file = os.path.join(args.result_dir+ 'matfile/', filenames[batch][:-4] +'.mat')
                sio.savemat(save_file, {'Idenoised_crop': np.float32(rgb_restored[batch])})

  

bundle_submissions_srgb_v1(args.result_dir+'matfile/', 'srgb_results_for_server_submission/')
os.system("rm {}".format(args.result_dir+'matfile/*.mat'))
示例#15
0
utils.load_checkpoint(model_restoration,args.weights)
print("===>Testing using weights: ", args.weights)

model_restoration.cuda()

model_restoration=nn.DataParallel(model_restoration)

model_restoration.eval()

with torch.no_grad():
    psnr_val_raw = []
    for ii, data_val in enumerate(tqdm(test_loader), 0):
        raw_gt = data_val[0].cuda()
        raw_noisy = data_val[1].cuda()
        variance = data_val[2].cuda()       ##variance = shot_noise * raw_noisy + read_noise  (Shot and Read noise comes from images' metadata)
        filenames = data_val[3]
        raw_restored = model_restoration(raw_noisy, variance)
        raw_restored = torch.clamp(raw_restored,0,1)                
        psnr_val_raw.append(utils.batch_PSNR(raw_restored, raw_gt, 1.))

        if args.save_images:
            for batch in range(len(raw_gt)):
                denoised_img = utils.unpack_raw(raw_restored[batch,:,:,:].unsqueeze(0))
                denoised_img = denoised_img.permute(0, 2, 3, 1).cpu().detach().numpy()[0]
                denoised_img = np.squeeze(np.stack((denoised_img,) * 3, -1))
                lycon.save(args.result_dir + filenames[batch][:-4] + '.png', img_as_ubyte(denoised_img))
                

psnr_val_raw = sum(psnr_val_raw)/len(psnr_val_raw)
print("PSNR: %.2f " %(psnr_val_raw))
示例#16
0
 def imwrite(img_path, img):
     '''Stores image to disk.'''
     img = np.ascontiguousarray(img, dtype=np.uint8)
     lycon.save(img_path, img)
示例#17
0
model_restoration = MIRNet()

weights = args.weights + args.scale + '.pth'
utils.load_checkpoint(model_restoration, weights)
print("===>Testing using weights: ", weights)

model_restoration.cuda()

model_restoration = nn.DataParallel(model_restoration)

model_restoration.eval()

with torch.no_grad():
    for ii, data_test in enumerate(tqdm(test_loader), 0):
        LR_img = data_test[0].cuda()
        filenames = data_test[1]
        rgb_restored = model_restoration(LR_img)
        rgb_restored = torch.clamp(rgb_restored, 0, 1)

        LR_img = LR_img.permute(0, 2, 3, 1).cpu().detach().numpy()
        rgb_restored = rgb_restored.permute(0, 2, 3, 1).cpu().detach().numpy()

        if args.save_images:
            for batch in range(len(LR_img)):
                #temp = np.concatenate((LR_img[batch]*255, rgb_restored[batch]*255),axis=1)
                denoised_img = rgb_restored[batch] * 255
                lycon.save(
                    os.path.join(output_dir, filenames[batch][:-4] + '.png'),
                    denoised_img.astype(np.uint8))
示例#18
0
            raw_noisy = add_noise(raw_gt[j],
                                  shot_noise,
                                  read_noise,
                                  use_cuda=True)
            raw_noisy = torch.clamp(raw_noisy, 0, 1)  ### CLIP NOISE
            variance = shot_noise * raw_noisy + read_noise

            #### Unpadding and saving
            clean_packed = raw_gt[j]
            clean_packed = clean_packed[:, padh[j] // 2:-padh[j] // 2,
                                        padw[j] // 2:-padw[j] //
                                        2]  ## RGGB channels  (4 x H/2 x W/2)
            clean_unpacked = utils.unpack_raw(clean_packed.unsqueeze(
                0))  ## Rearrange RGGB channels into Bayer pattern
            clean_unpacked = clean_unpacked.squeeze().cpu().detach().numpy()
            lycon.save(args.result_dir + 'png/clean/' + filename[:-4] + '.png',
                       img_as_ubyte(clean_unpacked))

            noisy_packed = raw_noisy
            noisy_packed = noisy_packed[:, padh[j] // 2:-padh[j] // 2,
                                        padw[j] // 2:-padw[j] //
                                        2]  ## RGGB channels
            noisy_unpacked = utils.unpack_raw(noisy_packed.unsqueeze(
                0))  ## Rearrange RGGB channels into Bayer pattern
            noisy_unpacked = noisy_unpacked.squeeze().cpu().detach().numpy()
            lycon.save(args.result_dir + 'png/noisy/' + filename[:-4] + '.png',
                       img_as_ubyte(noisy_unpacked))

            variance_packed = variance[:, padh[j] // 2:-padh[j] // 2,
                                       padw[j] // 2:-padw[j] //
                                       2]  ## RGGB channels
示例#19
0
        raw_gt = model_rgb2raw(rgb_gt)       ## raw_gt is in RGGB format
        raw_gt = torch.clamp(raw_gt,0,1)
        
        ########## Add noise to clean raw images ##########
        for j in range(raw_gt.shape[0]):   ## Use loop to add different noise to different images.
            filename = filenames[j]
            shot_noise, read_noise = random_noise_levels_dnd() 
            shot_noise, read_noise = shot_noise.cuda(), read_noise.cuda()
            raw_noisy = add_noise(raw_gt[j], shot_noise, read_noise, use_cuda=True)
            raw_noisy = torch.clamp(raw_noisy,0,1)  ### CLIP NOISE
            

            #### Convert raw noisy to rgb noisy ####
            ccm_tensor = model_ccm(rgb_gt[j].unsqueeze(0))
            rgb_noisy = model_raw2rgb(raw_noisy.unsqueeze(0),ccm_tensor) 
            rgb_noisy = torch.clamp(rgb_noisy,0,1)

            rgb_noisy = rgb_noisy.permute(0, 2, 3, 1).squeeze().cpu().detach().numpy()

            rgb_clean = rgb_gt[j].permute(1,2,0).cpu().detach().numpy()
            ## Unpadding
            rgb_clean = rgb_clean[padh[j]:-padh[j],padw[j]:-padw[j],:]   
            rgb_noisy = rgb_noisy[padh[j]:-padh[j],padw[j]:-padw[j],:]   
            # import pdb;pdb.set_trace()

            lycon.save(args.result_dir+'clean/'+filename[:-4]+'.png',(rgb_clean*255).astype(np.uint8))
            lycon.save(args.result_dir+'noisy/'+filename[:-4]+'.png',(rgb_noisy*255).astype(np.uint8))

           

示例#20
0
model_restoration.cuda()

model_restoration = nn.DataParallel(model_restoration)

model_restoration.eval()

with torch.no_grad():
    psnr_val_rgb = []
    for ii, data_test in enumerate(tqdm(test_loader), 0):
        rgb_gt = data_test[0].cuda()
        rgb_noisy = data_test[1].cuda()
        filenames = data_test[2]
        rgb_restored = model_restoration(rgb_noisy)
        rgb_restored = torch.clamp(rgb_restored, 0, 1)

        psnr_val_rgb.append(utils.batch_PSNR(rgb_restored, rgb_gt, 1.))

        rgb_gt = rgb_gt.permute(0, 2, 3, 1).cpu().detach().numpy()
        rgb_noisy = rgb_noisy.permute(0, 2, 3, 1).cpu().detach().numpy()
        rgb_restored = rgb_restored.permute(0, 2, 3, 1).cpu().detach().numpy()

        if args.save_images:
            for batch in range(len(rgb_gt)):
                enhanced_img = img_as_ubyte(rgb_restored[batch])
                lycon.save(args.result_dir + filenames[batch][:-4] + '.png',
                           enhanced_img)

psnr_val_rgb = sum(psnr_val_rgb) / len(psnr_val_rgb)
print("PSNR: %.2f " % (psnr_val_rgb))
                         drop_last=False)

model_restoration = MIRNet()

weights = args.weights + args.scale + '.pth'
utils.load_checkpoint(model_restoration, weights)
print("===>Testing using weights: ", weights)

model_restoration.cuda()

model_restoration = nn.DataParallel(model_restoration)

model_restoration.eval()

with torch.no_grad():
    for ii, data_test in enumerate(tqdm(test_loader), 0):
        LR_img = data_test[0].cuda()
        filenames = data_test[1]
        rgb_restored = model_restoration(LR_img)
        rgb_restored = torch.clamp(rgb_restored, 0, 1)

        LR_img = LR_img.permute(0, 2, 3, 1).cpu().detach().numpy()
        rgb_restored = rgb_restored.permute(0, 2, 3, 1).cpu().detach().numpy()

        if args.save_images:
            for batch in range(len(LR_img)):
                sr_img = img_as_ubyte(rgb_restored[batch])
                lycon.save(
                    os.path.join(output_dir, filenames[batch][:-4] + '.png'),
                    sr_img)