def load_buffer(buffer_path, replay_buffer):
    file_list = os.listdir(buffer_path)
    for idx, file_name in enumerate(file_list[:-1]):
        if idx > 2000:
            print (idx)
        im = PngImageFile(os.path.join(buffer_path, file_name))
        im_next = PngImageFile(os.path.join(buffer_path, file_list[idx+1]))

        ## Change to action space, now two-dimensional
        throttle, steer, brake = float(im.text['control_throttle']), float(im.text['control_steer']), float(im.text['control_brake'])
        action = np.zeros((2,))
        action[0] = throttle if throttle > 0.0 else (-brake)
        action[1] = steer
        
        reward = np.array([float(im.text['reward'])])
        location = np.array([float(im.text['location_x']), float(im.text['location_y']), float(im.text['location_z'])])

        obs = Image.open(os.path.join(buffer_path, file_name))
        next_obs = Image.open(os.path.join(buffer_path, file_list[idx+1]))

        path = dict()
        path['observations'] = [{'image': np.array(obs).astype(np.float32) / 255.0},]
        path['next_observations'] = [{'image': np.array(next_obs).astype(np.float32) / 255.0}, ]
        path['actions'] = [action,]
        path['rewards'] = [reward,]
        path['terminals'] = [(False,),]

        print ('Location: ', idx, location)
        replay_buffer.add_path(path)
    
    print ('Replay Buffer Loaded: ', replay_buffer._size)
    def encrypt_png(self, in_path: PathType, out_path: PathType) -> None:
        image = PngImageFile(in_path)

        # convert pixels to bytes in order to encrypt them
        im_bytes = bytearray(self.__get_pixels(image))

        # get random IV and calculate MAC
        iv = get_random_bytes(CryptoAES.block_size)
        h = HMAC.new(self.__key, digestmod=SHA256)
        h.update(im_bytes)

        # create metadata object in order to save IV and MAC to image
        metadata = PngInfo()
        metadata.add_text('iv', iv.hex())
        metadata.add_text('mac', h.hexdigest())

        print(f'writing IV = {iv.hex()} and MAC = {h.hexdigest()} to image metadata')

        # encrypt image
        cipher = CryptoAES.new(self.__key, CryptoAES.MODE_ECB)
        enc_data = cipher.encrypt(im_bytes)

        # write image to file with metadata
        image.frombytes(enc_data)
        image.save(out_path, pnginfo=metadata)
Example #3
0
    def _png_to_nximg(data, image_format):
        data_nximg = bytes()

        with BytesIO(data) as io_png:
            with BytesIO() as io_nximg:
                png = PngImageFile(io_png)
                w, h = png.width, png.height

                if image_format == IMAGE_FORMAT_1555:
                    for y in range(h):
                        for x in range(w):
                            [r, g, b, a] = png.getpixel((x, y))
                            IOHelper.write_struct(io_nximg, "<2B",
                                                  *NXColor.to_1555(r, g, b, a))
                elif image_format == IMAGE_FORMAT_4444:
                    for y in range(h):
                        for x in range(w):
                            [r, g, b, a] = png.getpixel((x, y))
                            IOHelper.write_struct(io_nximg, "<2B",
                                                  *NXColor.to_4444(r, g, b, a))
                elif image_format == IMAGE_FORMAT_8888:
                    for y in range(h):
                        for x in range(w):
                            [r, g, b, a] = png.getpixel((x, y))
                            IOHelper.write_struct(io_nximg, "<4B", b, g, r, a)
                else:
                    raise Exception('Unsupport image format: %s' %
                                    image_format)

                data_nximg = IOHelper.read_range(io_nximg)

        return data_nximg, w, h
Example #4
0
    def load_spectrogram(cls, fname, to_nparray=True):
        '''
        Loads a .png spectrogram file,
        and returns a numpy array

        :param fname: file to load
        :type fname: str
        :param to_nparray: if True, convert torchvision. Image
            instance to a numpy array, and return that as result
        :type to_nparray: bool
        :returns tuple: the image and the .png file's possibly empty metadata dict
        :rtype: ({np.array|torchvision.Image}, {str : str})
        :raises FileNotFoundError
        '''

        if not os.path.exists(fname):
            raise FileNotFoundError(f"File {fname} does not exist.")

        png_img = PngImageFile(fname)
        try:
            info = png_img.text
        except Exception as e:
            cls.log.info(f"No available info in .png file: {repr(e)}")
            info = None

        img_obj = Image.open(fname)
        if to_nparray:
            res = np.asarray(img_obj)
        else:
            res = img_obj
        return (res, info)
Example #5
0
    def _action(uid: str, color: Color = Color.red):
        result, meta = encode(uid)
        result.save("test.png", pnginfo=meta)

        with PngImageFile("test.png") as fp:
            uid_result = decode(fp)

        return result, uid_result
Example #6
0
def get_dict_from_pnginfo(image_filename="", dict_name=""):
    dict_to_return = {}
    targetImage = PngImageFile(image_filename)
    all_settings_dict = targetImage.text
    for settings_key in all_settings_dict:
        if (settings_key == dict_name):
            dict_to_return = json.loads(all_settings_dict[settings_key])
    return dict_to_return
Example #7
0
def test_custom_encode(colors):
    result, meta = encode(uid_18, "custom", colors[0], colors[1], colors[2])
    result.save("test.png", pnginfo=meta)

    with PngImageFile("test.png") as fp:
        assert decode(fp) == uid_18

    delete_test_png()
Example #8
0
    def svg_image_factory(fp, filename):
        mime_type = magic.from_buffer(fp.read(1024), mime=True)
        if mime_type != "image/svg+xml":
            raise TypeError

        fp.seek(0)
        png_data = PNGSurface.convert(fp.read(), url_fetcher=url_fetcher)
        return PngImageFile(BytesIO(png_data))
    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index - - a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor) - - an image in the input domain
            B (tensor) - - its corresponding image in the target domain
            A_paths (str) - - image paths
            B_paths (str) - - image paths (same as A_paths)
        """
        # read a image given a random integer index
        AB_path = self.AB_paths[index]
        # print(AB_path)
        path = AB_path.replace('train', 'train2')
        # print(index)
        AB = Image.open(AB_path)
        targetImage = PngImageFile(AB_path)
        des = int(targetImage.text['des'])


        # des = int(AB.info['des'])
        # matrix = im.info['Comment']
        # split AB image into A and B
        w, h = AB.size
        w2 = int(w / 2)
        A = AB.crop((0, 0, w2, h))
        B = AB.crop((w2, 0, w, h))
        opacity = 50
        flash = skimage.img_as_float(A)
        ambient = skimage.img_as_float(B)
        # im = A_float * opacity / 100 + B_float * (100 - opacity) / 100
        # paper version 4: from A flash 0.5 ambient 1.7 to flash 1.7 ambient 0.5
        A = flash * 1.1+ ambient * 1.1
        A = xyztorgb(A,des)
        # opacity2 = opacity + 0.7
        # if opacity2 > 2:
        #     opacity2 = 2
        B = flash * 2.2 + ambient * 1.1
        B = xyztorgb(B,des)

        # cv2.imwrite(path_AB, im_AB)
        # im = (im * 255 / np.max(im)).astype('uint8')

        # print(blended)
        # im = (im * 255 / np.max(im)).astype('uint8')
        # im = Image.fromarray(im)

        # apply the same transform to both A and B
        transform_params = get_params(self.opt, A.size)
        A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
        B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))

        A = A_transform(A)
        B = B_transform(B)

        return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path}
Example #10
0
    def setUp(self):
        self.mock_button_repository = AbstractButtonRepository(None)
        self.directory = os.path.dirname(os.path.abspath(__file__))
        self.new_tab_template = PngImageFile(
            os.path.join(self.directory,
                         'data/buttons/chrome/new_tab_template_chrome.png'))
        self.new_tab_template = convert_picture_to_grayscale(
            self.new_tab_template)
        self.new_tab_template = convert_picture_to_numpy_array(
            self.new_tab_template)

        self.new_win_template = PngImageFile(
            os.path.join(
                self.directory,
                'data/buttons/chrome/new_windows_template_chrome.png'))
        self.new_win_template = convert_picture_to_grayscale(
            self.new_win_template)
        self.new_win_template = convert_picture_to_numpy_array(
            self.new_win_template)
def load_buffer(buffer_path, replay_buffer):
    file_list = os.listdir(buffer_path)
    for idx, file_name in enumerate(file_list[:-1]):
        if idx % 1000 == 0:
            print(idx)
        im = PngImageFile(os.path.join(buffer_path, file_name))
        im_next = PngImageFile(os.path.join(buffer_path, file_list[idx + 1]))
        action = np.array([
            float(im.text['control_throttle']),
            float(im.text['control_steer']),
            float(im.text['control_brake'])
        ])
        reward = np.array([float(im.text['reward'])])

        obs = Image.open(os.path.join(buffer_path, file_name))
        next_obs = Image.open(os.path.join(buffer_path, file_list[idx + 1]))

        # Since we are using obs_dict replay buffer, we can only
        # call add_sample
        path = dict()
        path['observations'] = [
            {
                'image': np.array(obs).astype(np.float32) / 255.0
            },
        ]
        path['next_observations'] = [
            {
                'image': np.array(next_obs).astype(np.float32) / 255.0
            },
        ]
        path['actions'] = [
            action,
        ]
        path['rewards'] = [
            reward,
        ]
        path['terminals'] = [
            (False, ),
        ]
        replay_buffer.add_path(path)

    print('Replay Buffer Loaded: ', replay_buffer._size)
Example #12
0
def add_tags_to_png_file(fpath):
    try:
        info = create_file_info(fpath)
        png_image = PngImageFile(open(fpath, 'rb'))
        png_info = PngInfo()
        for k, v in info.items():
            png_info.add_text(k, v)
        png_image.save(fpath, pnginfo=png_info)
    except (Exception, OSError):
        print("WARNING: Could not add debug info to file '{}'.".format(fpath))
        traceback.print_exc()
Example #13
0
    def send_png(self, fname):
        png = PngImageFile(fname)

        png.load()  # load metadata
        with open(fname, "rb") as f:
            return self.bot.send_photo(
                chat_id=self.chat_id,
                photo=f,
                parse_mode="Markdown",
                caption=png.info.get("Comment", "`{}`".format(fname.name)),
            )
Example #14
0
def cli_decode(path: str, color: str = "red"):
    """Decode a snowflake image at the given file PATH"""
    try:
        set_color = Color[color]
    except KeyError:
        raise ColorError("Invalid color passed.")

    try:
        with PngImageFile(path) as fp:
            print(decode(fp, set_color))
    except Exception as e:
        logging.error(f"Error: {e}")
Example #15
0
def test_no_metadata(caplog):
    caplog.set_level(logging.WARNING)

    with PngImageFile("src/tests/no_meta.png") as fp:
        decode(fp)

        assert (
            "Warning: Unable to fetch image metadata, using default value (Red).\n"
            in caplog.text)

        result = decode(fp, Color.orange)
        assert result == uid_18
Example #16
0
 def savemeta(*args, **kwargs):
     path = args[1]
     if path.endswith(".fig"):
         import pickle as pkl
         pkl.dump(args[0], open(path, 'wb'))
     else:
         mpl_savefig(*args, **kwargs)
         #fig = args[0]
         if path.endswith(".png"):
             targetImage = PngImageFile(path)
             metadata = PngInfo()
             metadata.add_text("Description", str(meta))
             targetImage.save(path, pnginfo=metadata)
Example #17
0
def write_png_metadata(filename, settings):
    targetImage = PngImageFile(filename)
    metadata = PngInfo()
    for (k, v) in settings.items():
        if type(v) == list:
            value = ""
            for item in v:
                value += str(item) + " "
            v = value
        if type(v) == bool:
            v = str(v)
        if v is None:
            continue
        else:
            metadata.add_text(k, str(v))
    targetImage.save(filename, pnginfo=metadata)
Example #18
0
    def sample_image_path(self):
        """Copy the sample image to to a unique file name and return the path.

		Returns:
			tuple of (the filename, the new sample image path)
		"""
        filename = self.generate_alphanumeric() + ".png"
        new_file = self.tmp_file_path(filename)
        shutil.copy(self.original_sample_image_path, new_file)

        # make the image content unique
        image_file = PngImageFile(open(new_file, "r"))
        info = PngInfo()
        info.add_text('Comment', self.generate_alphanumeric(length=30))
        image_file.save(new_file, pnginfo=info)
        self.temp_files.append(new_file)
        return (filename, new_file)
    def decrypt_png(self, in_path: PathType, out_path: PathType) -> None:
        image = PngImageFile(in_path)
        iv: Optional[str] = None
        mac: Optional[str] = None

        # try to get IV from metadata
        try:
            iv = image.text['iv']

            print(f'found IV = {iv}')
        except KeyError:
            print('IV was not found in file')

        # try to get MAC from metadata
        try:
            mac = image.text['mac']

            print(f'found MAC = {mac}')
        except KeyError:
            print('MAC was not found in file')

        # convert pixels to bytes in order to decrypt them
        im_bytes = bytearray(self.__get_pixels(image))

        # decrypt image
        cipher = CryptoAES.new(self.__key, CryptoAES.MODE_ECB)
        dec: bytes = cipher.decrypt(im_bytes)

        # try to verify MAC
        try:
            self.__hmac.update(dec)
            self.__hmac.verify(bytes.fromhex(mac))

            print('MAC is valid')
        except ValueError:
            print('MAC is invalid')

        # don't forget about metadata
        metadata = PngInfo()
        metadata.add_text('iv', iv)
        metadata.add_text('mac', mac)

        # save decrypted image to file
        image.frombytes(dec)
        image.save(out_path, pnginfo=metadata)
Example #20
0
    def load_undistorted_segmentation(self, image: str) -> np.ndarray:
        """Load an undistorted image segmentation."""
        segmentation_file = self._undistorted_segmentation_file(image)
        with self.io_handler.open(segmentation_file, "rb") as fp:
            with PngImageFile(fp) as png_image:
                # TODO: We do not write a header tag in the metadata. Might be good safety check.
                data = np.array(png_image)
                if data.ndim == 2:
                    return data
                elif data.ndim == 3:
                    return data[:, :, 0]

                    # TODO we can optionally return also the instances and scores:
                    # instances = (
                    #     data[:, :, 1].astype(np.int16) + data[:, :, 2].astype(np.int16) * 256
                    # )
                    # scores = data[:, :, 3].astype(np.float32) / 256.0
                else:
                    raise IndexError
Example #21
0
    def decode_from_img(self, file: BytesIO) -> str:
        """Decodes secrets from png squares

        Parameters
        ___________
        file: bytes
            file object produced by opening in 'rb' mode or other means"""

        img = PngImageFile(fp=file)
        try:
            edge = img.text["edge"]
        except KeyError:  # img was not upscaled
            pass
        else:  # img was upscaled so we scale it down as prescribed by metadata
            edge = int(edge)
            img = img.resize((edge, edge), resample=Image.NEAREST)

        # asarray creates readonly hence the np.array
        return self.decode(np.array(img))
Example #22
0
def replace_meta(filename, fields):
    metaname = get_dumpfile(filename)
    with open(metaname) as json_file:
        meta = json.load(json_file)
    if fields and fields[0] != '*':
        print(f"overwriting metadata[{fields}] in {filename} from {metaname}")
        newmeta = {}
        for f in fields:
            newmeta[f] = meta[f]
    else:
        print(f"overwriting metadata in {filename} from {metaname}")
        newmeta = meta

    newmeta['Metadata Modification Time'] = f"{datetime.now()}"
    img = PngImageFile(filename)
    metadata = PngInfo()
    for f in newmeta:
        metadata.add_text(f, newmeta[f])
    img.save(filename, pnginfo=metadata)
Example #23
0
    def on_save_clicked(self, button):
        current_path = self.entry.get_text()

        # write metadata
        metadata = PngInfo()
        metadata.add_text("screenshat", self.entryy.get_text())

        img = PngImageFile(img_path)
        img.save(img_path, pnginfo=metadata)
        

        # Update config file
        pathlib.Path(config_path).write_text(current_path)
        
        # in doubt do mkdir -p new directory
        pathlib.Path(current_path).mkdir(parents=True, exist_ok=True)

        # move file 
        shutil.move(img_path, current_path)

        self.destroy()
Example #24
0
def restore_instrument_settings(filename="", instrument_dict=[]):
    isimage = False
    if (filename.endswith('PNG') or filename.endswith('png')):
        targetImage = PngImageFile(filename)
        all_settings_dict = targetImage.text
    else:
        infile = open('filename', )
        all_settings_dict = json.load(infile)

    rm = pyvisa.ResourceManager()
    supported_inst_dict = {
        'k2308_unique_scpi': "KEITHLEY INSTRUMENTS INC.,MODEL 2308",
        'k2460_unique_scpi': "KEITHLEY INSTRUMENTS,MODEL 2460",
        'hmp4040_unique_scpi': "HAMEG,HMP4040",
        'tek_afg3000_unique_scpi': "TEKTRONIX,AFG3102",
        'plz4w_unique_scpi': "KIKUSUI,PLZ164WA,",
        'key_33250a_unique_scpi': "Agilent Technologies,33250A"
    }

    for settings_key in all_settings_dict:  # loop thru all settings inside image
        if (settings_key
                in supported_inst_dict):  # only process supported instruments
            if (isimage):
                unique_scpi = json.loads(
                    all_settings_dict[settings_key])  # json method needed
            else:
                unique_scpi = all_settings_dict[
                    settings_key]  # json method not needed
            for instrument_key in instrument_dict:  # loop thru all connected instruments
                if (
                        instrument_key.startswith(
                            supported_inst_dict[settings_key])
                ):  # only process connected instruments that match supported instruments
                    instrument = rm.open_resource(
                        instrument_dict[instrument_key])
                    instrument.write('*RST')
                    time.sleep(2)
                    for scpi_cmd in unique_scpi:
                        instrument.write(scpi_cmd)
Example #25
0
 def _get_EXIF_DateTimeOriginal(self, file_path):
     """ try to get the recording date from the EXIF in PNG file """
     try:
         image = PngImageFile(file_path)
         metadata = PngInfo()
         exif_array = []
         for i in image.text:
             compile = i, str(image.text[i])
             exif_array.append(compile)
         if len(exif_array) > 0:
             header = exif_array[0][0]
             if header.startswith("XML"):
                 xml = exif_array[0][1]
                 for line in xml.splitlines():
                     if 'DateCreated' in line:
                         idx1 = line.find('>')
                         idx2 = line.rfind('<')
                         if (idx1 != -1) and (idx2 != -1):
                             dt = line[idx1 + 1:idx2]
                             return dt
     except Exception as err:
         pass  # returns None
     return None
Example #26
0
def gen_metadata(image):

    # Read image into imageio for data type
    pic = imageio.imread(image)

    # Read image into PIL to extract basic metadata
    type = Image.open(image)

    # Calculations
    megapixels = (type.size[0] * type.size[1] / 1000000)  # Megapixels
    d = re.sub(r'[a-z]', '', str(pic.dtype))  # Dtype
    t = len(Image.Image.getbands(type))  # Number of channels

    print("\n--Summary--\n")
    print("Filename: ", type.filename)
    print("Format: ", type.format)
    print("Data Type:", pic.dtype)
    print("Bit Depth (per Channel):", d)
    print("Bit Depth (per Pixel): ", int(d) * int(t))
    print("Number of Channels: ", t)
    print("Mode: ", type.mode)
    print("Palette: ", type.palette)
    print("Width: ", type.size[0])
    print("Height: ", type.size[1])
    print("Megapixels: ", megapixels)

    # Open image with ExifMode to collect EXIF data
    exif_tags = open(image, 'rb')
    tags = exifread.process_file(exif_tags)

    # Create an empty array
    exif_array = []

    # Print header
    print("\n--Metadata--\n")

    # For non-PNGs
    if type.format != "PNG":
        # Compile array from tags dict
        for i in tags:
            compile = i, str(tags[i])
            exif_array.append(compile)
        for properties in exif_array:
            if properties[0] != 'JPEGThumbnail':
                print(': '.join(str(x) for x in properties))

    if type.format == "PNG":
        image = PngImageFile(image)  #via https://stackoverflow.com/a/58399815
        metadata = PngInfo()

        # Compile array from tags dict
        for i in image.text:
            compile = i, str(image.text[i])
            exif_array.append(compile)

        # If XML metadata, pull out data by idenifying data type and gathering useful meta
        if len(exif_array) > 0:
            header = exif_array[0][0]
        else:
            header = ""
            print("No available metadata")

        xml_output = []
        if header.startswith("XML"):
            xml = exif_array[0][1]
            xml_output.extend(xml.splitlines(
            ))  # Use splitlines so that you have a list containing each line
            # Remove useless meta tags
            for line in xml.splitlines():
                if "<" not in line:
                    if "xmlns" not in line:
                        # Remove equal signs, quotation marks, /> characters and leading spaces
                        xml_line = re.sub(r'[a-z]*:', '',
                                          line).replace('="', ': ')
                        xml_line = xml_line.rstrip(' />')
                        xml_line = xml_line.rstrip('\"')
                        xml_line = xml_line.lstrip(' ')
                        print(xml_line)

        elif header.startswith("Software"):
            print("No available metadata")

        # If no XML, print available metadata
        else:
            for properties in exif_array:
                if properties[0] != 'JPEGThumbnail':
                    print(': '.join(str(x) for x in properties))

    # Explanation for GIF or BMP
    if type.format == "GIF" or type.format == "BMP":
        print("No available metadata")
    def __getitem__(self, index):

        image_path = self.images_dir_all[index]

        if 'our_dataset' in image_path:
            image_pair = Image.open(image_path)
            hyper_des = int(PngImageFile(image_path).text['des'])

            A, B = self.divide_imagepair(image_pair)
            ambient = skimage.img_as_float(A)
            flash = skimage.img_as_float(B)

            if hyper_des == 21:
                flash = self.changeTemp(flash, 48, hyper_des)
                ambient = self.changeTemp(ambient, 48, hyper_des)
            flashPhoto = flash + ambient
            flashPhoto[flashPhoto < 0] = 0
            flashPhoto[flashPhoto > 1] = 1
            flashPhoto = self.xyztorgb(flashPhoto, hyper_des)
            ambient = self.xyztorgb(ambient, hyper_des)

            flashphoto_depth = self.getDepth(flashPhoto, image_path, 'flash')
            ambient_depth = self.getDepth(ambient, image_path, 'ambient')

        elif 'multi_dataset' in image_path:
            image_pair = Image.open(image_path)

            A, B = self.divide_imagepair(image_pair)
            ambient = skimage.img_as_float(A)
            flash = skimage.img_as_float(B)

            flashPhoto = flash + ambient
            flashPhoto[flashPhoto < 0] = 0
            flashPhoto[flashPhoto > 1] = 1
            ambient = Image.fromarray((ambient * 255).astype('uint8'))
            flashPhoto = Image.fromarray((flashPhoto * 255).astype('uint8'))

            flashphoto_depth = self.getDepth(flashPhoto, image_path, 'flash')
            ambient_depth = self.getDepth(ambient, image_path, 'ambient')

        elif 'portrait_dataset' in image_path:
            image_pair = Image.open(image_path)

            A, B = self.divide_imagepair(image_pair)
            ambient = skimage.img_as_float(A)
            flash = skimage.img_as_float(B)

            ambient = self.lin(ambient)
            flash = self.lin(flash)

            flashPhoto = flash + ambient
            flashPhoto[flashPhoto < 0] = 0
            flashPhoto[flashPhoto > 1] = 1
            ambient = Image.fromarray((ambient * 255).astype('uint8'))
            flashPhoto = Image.fromarray((flashPhoto * 255).astype('uint8'))

            flashphoto_depth = self.getDepth(flashPhoto, image_path, 'flash')
            ambient_depth = self.getDepth(ambient, image_path, 'ambient')

        torch.cuda.empty_cache()

        ambient_orgsize = skimage.img_as_float(ambient)
        flashPhoto_orgsize = skimage.img_as_float(flashPhoto)

        ambient = ambient.resize((self.data_size, self.data_size))
        flashPhoto = flashPhoto.resize((self.data_size, self.data_size))
        ambient_depth = ambient_depth.resize((self.data_size, self.data_size))
        flashphoto_depth = flashphoto_depth.resize(
            (self.data_size, self.data_size))

        transform_params = get_params(self.opt, ambient.size)
        rgb_transform = get_transform(self.opt,
                                      transform_params,
                                      grayscale=False)
        depth_transform = get_transform(self.opt,
                                        transform_params,
                                        grayscale=True)

        ambient = rgb_transform(ambient)
        flashPhoto = rgb_transform(flashPhoto)

        ambient_depth = depth_transform(ambient_depth)
        flashphoto_depth = depth_transform(flashphoto_depth)

        return {
            'A': flashPhoto,
            'B': ambient,
            'A_org': flashPhoto_orgsize,
            'B_org': ambient_orgsize,
            'depth_A': flashphoto_depth,
            'depth_B': ambient_depth,
            'A_paths': image_path,
            'B_paths': image_path
        }
    def recover_metadata(self):
        url_image = PngImageFile(self.image_where_to_hide)

        return url_image.text["url"], url_image.text["language"]
Example #29
0
def rgbChannels(inFile: PIL.PngImagePlugin.PngImageFile, message: str="", quantizationWidths: list=[],
                    traversalOrder: list=[], outFile="./IO/outColor.png", verify: bool=False, verbose: bool=False):
    """
    This function takes takes an image of the form
    [
        [<RGB 1>, <RGB 2>, <RGB 3>, ... ],
        [<RGB a>, <RGB a+1>, <RGB a+2>, ... ],
        [<RGB b>, <RGB b+1>, <RGB b+2>, ... ],
        ...
    ]
    where RGB <index> is of the form [R, G, B] (if there is an Alpha channel present, it is ignored)

    And utilizes a modified version Wu and Tsai's algorithm to encode a message into this nested array structure.

    Because this image is RGB, an order of traversal is needed to ensure the correct encoding/retrieval order 
    while traversing the structure.
    
    Define a general pair of RGB pixels as [[R1, G1, B1], [R2, G2, B2]] and flatten it into [R1, G1, B1, R2, G2, B2]
    The traversal order is an array of that maps the corresponding value to a location it should be sorted to. 
    After mapping and sorting the pixel values, pair adjacent pixels 

    For example, a possible traversal order is the standard [1, 3, 5, 2, 4, 6]
    Applying this traversal order concept to the RGB pixel pair 
    [[185, 75, 250], [255, 80, 200]] 
    results in these encodable groups of values:
    [[185, 255], [75, 80], [250, 200]]
    """

    # Verify image data
    if verify:
        print("Beginning verification...")
        
        if message == "":
            try:
                verificationData = inFile.text["png:fingerprint"].split(":")
            except:
                raise Exception(f"No verification data found.")
            
            # Retrieve verifiable data from image properties
            imageWidth, imageHeight = rosenburgStrongPairing(int(verificationData[0]), reverse=True)
            bitLength, messageHash = retrieveLength(verificationData[1])

            # Image dimensions are incorrect
            if inFile.size[0] != imageWidth or inFile.size[1] != imageHeight:
                raise Exception(f"Image verification failed. Image dimensions don't match encoded verification data.")

            # Execute function without verifying data option
            retrievedBinary = rgbChannels(inFile, message, quantizationWidths, traversalOrder, outFile, verbose=verbose)
            
            # Ensure entire message was encoded
            if len(retrievedBinary) >= bitLength:
                retrievedBinary = retrievedBinary[:bitLength]

                # Ensure hashes match
                if hashlib.sha256(retrievedBinary.encode()).hexdigest() == messageHash:
                    print("\nVerified.")
                    return retrievedBinary
                else:
                    raise Exception(f"Message verification failed. Hash of retrieved binary doesn't match encoded verification data.")
            raise Exception("Message verification failed. Length of retrieved message binary doesn't match encoded verification data.")
        else:
            # Get binary of message
            if sorted(set(message)) == ["0", "1"]:
                messageBinary = message
            else:
                messageBinary = "0" + str(bin(int.from_bytes(message.encode(), "big")))[2:]
                
            returnValue = rgbChannels(inFile, messageBinary, quantizationWidths, traversalOrder, outFile, verbose=verbose)

            # Build verification data to place in loaded image properties
            verificationBuilder = ""
            verificationBuilder += f"{str(rosenburgStrongPairing([inFile.size[0], inFile.size[1]]))}:"
            verificationBuilder += f"{embedLength(str(len(messageBinary)), messageBinary)}"

            # Edit PNG metadata to include fingerprint of this PVD algorithm
            modifyMetadata = PngImageFile(outFile)
            metadata = PngInfo()
            metadata.add_text("png:fingerprint", f"{verificationBuilder}")
            modifyMetadata.save(outFile, pnginfo=metadata)

            print("\nVerified.")
            return returnValue

    print()

    if message == "":
        if verbose:
            print("Verbose message: no message given, assuming retrieval of message")
    else:
        # Get binary of message
        if sorted(set(message)) == ["0", "1"]:
            messageBinary = message
            if verbose:
                print("Verbose message: message contains only binary values, assuming binary message")
        else:
            messageBinary = "0" + str(bin(int.from_bytes(message.encode(), "big")))[2:]
            if verbose:
                print("Verbose message: message contains non-binary values, assuming ascii message")

    quantizationWidths = validateQuantization(quantizationWidths, verbose)
    traversalOrder = validateTraversal(traversalOrder, verbose)

    print()

    # If there is an Alpha channel present in the image, it is ignored
    pixelPairs = pixelArrayToZigZag(inFile, 3, 2)

    # If function is run without message, assume retrieval of message
    if message == "":
        print(f"Retrieving binary from file \"{inFile.filename}\"")
        print()

        # Retrieval function
        messageBinary = ""

        currentPairCounter = 0
        for pixelPair in pixelPairs:
            currentPairCounter += 1
            if len(pixelPair) == 2:
                # Flatten pixel pair array into un-nested list
                pixelArray = [pixel for pair in pixelPair for pixel in pair]

                # Sort pixel array given traversal order and group into calculation ready pairs
                pixelIndicesDict = dict(sorted(dict(zip(traversalOrder, pixelArray)).items()))
                traversedPixelPairs = list(groupImagePixels(list(pixelIndicesDict.values()), 2))

                currentTraversedCounter = 0
                for traversedPixelPair in traversedPixelPairs:
                    currentTraversedCounter += 1
                    # d value
                    difference = traversedPixelPair[1] - traversedPixelPair[0]

                    # Determine number of bits storable between pixels
                    for width in quantizationWidths:
                        if width[0] <= abs(difference) <= width[1]:
                            lowerBound = width[0]
                            upperBound = width[1]
                            break
                    
                    # Falling-off-boundary check; ensure 0 < calculated pixel value < 255
                    testingPair = pixelPairEncode(traversedPixelPair, upperBound, difference)
                    if testingPair[0] < 0 or testingPair[1] < 0 or testingPair[0] > 255 or testingPair[1] > 255:
                        # One of the values "falls-off" the range from 0 to 255 and hence is invalid
                        if verbose == True:
                            print(f"Verbose message: channel pair number {currentTraversedCounter} in pixel pair number {currentPairCounter} has the possibility of falling off, skipping")
                    else:
                        # Passes the check, continue with decoding
                        # Number of storable bits between two pixels
                        storableCount = int(math.log(upperBound - lowerBound + 1, 2))
                        
                        # Extract encoded decimal
                        retrievedDecimal = difference - lowerBound if difference >= 0 else - difference - lowerBound
                        retrievedBinary = bin(retrievedDecimal).replace("0b", "")

                        # Edge case in which embedded data began with 0's
                        if storableCount > len(retrievedBinary):
                            retrievedBinary = "0" * (storableCount-len(retrievedBinary)) + retrievedBinary

                        messageBinary += retrievedBinary
        
        return messageBinary
    else:
        print(f"Encoding binary \"{messageBinary}\" into file \"{inFile.filename}\"")
        print()

        # Encoding function
        newPixels = []

        currentMessageIndex = 0

        currentPairCounter = 0
        for pixelPair in pixelPairs:
            currentPairCounter += 1
            if len(pixelPair) == 2 and currentMessageIndex < len(messageBinary) - 1:

                # Flatten pixel pair array into un-nested list
                pixelArray = [pixel for pair in pixelPair for pixel in pair]

                # Sort pixel array given traversal order and group into calculation ready pairs
                traversalIndiceDict = list(zip(traversalOrder, [0,1,2,3,4,5]))
                pixelIndicesDict = dict(sorted(dict(zip(traversalIndiceDict, pixelArray)).items()))
                traversedPixelPairs = list(groupImagePixels(list(pixelIndicesDict.values()), 2))

                postEncodingValues = []

                currentTraversedCounter = 0
                for traversedPixelPair in traversedPixelPairs:
                    currentTraversedCounter += 1
                    # d value
                    difference = traversedPixelPair[1] - traversedPixelPair[0]

                    # Determine number of bits storable between pixels
                    for width in quantizationWidths:
                        # Only need to check upper bound because widths are sorted
                        if abs(difference) <= width[1]:
                            lowerBound = width[0]
                            upperBound = width[1]
                            break

                    # Falling-off-boundary check; ensure 0 < calculated pixel value < 255
                    testingPair = pixelPairEncode(traversedPixelPair, upperBound, difference)
                    if testingPair[0] < 0 or testingPair[1] < 0 or testingPair[0] > 255 or testingPair[1] > 255:
                        # One of the values "falls-off" the range from 0 to 255 and hence is invalid
                        # Append original pixel pair and skip encoding
                        postEncodingValues += traversedPixelPair
                        if verbose:
                            print(f"Verbose message: channel pair number {currentTraversedCounter} in pixel pair number {currentPairCounter} has the possibility of falling off, skipping")
                    else:
                        # Passes the check, continue with encoding
                        # Number of storable bits between two pixels
                        storableCount = int(math.log(upperBound - lowerBound + 1, 2))

                        # Ensure haven't already finished encoding entire message
                        if currentMessageIndex + storableCount <= len(messageBinary):
                            # Encode as normal
                            storableBits = messageBinary[currentMessageIndex:currentMessageIndex+storableCount]
                            currentMessageIndex += storableCount
                        else:
                            if currentMessageIndex == len(messageBinary):
                                # Finished encoding entire message
                                postEncodingValues += traversedPixelPair
                                continue
                            else:
                                # Can encode more bits than available, encode what's left
                                storableBits = messageBinary[currentMessageIndex:]

                                # Ensure last bit doesn't get corrupted, fill empty space with 0's
                                storableBits += "0" * (storableCount - len(messageBinary[currentMessageIndex:]))
                                currentMessageIndex = len(messageBinary)

                        # Get value of the chunk of message binary
                        storableBitsValue = int(storableBits, 2)

                        # d' value
                        differencePrime = lowerBound + storableBitsValue if difference >= 0 else -(lowerBound + storableBitsValue)

                        # Calculate new pixel pair
                        newPixelPair = pixelPairEncode(traversedPixelPair, differencePrime, difference)                    
                        postEncodingValues += newPixelPair

                # Un-sort pixel array given traversal order and group into calculation original RGB channels
                pixelIndicesDict = dict(sorted(dict(zip([ key[1] for key in pixelIndicesDict.keys() ], postEncodingValues)).items()))
                reversedPaired = list(groupImagePixels([pixel for pixel in pixelIndicesDict.values()], 3))

                newPixels += reversedPaired
            else:
                # For case in which there's an odd number of pixels; append lone pixel value
                newPixels += pixelPair

        returnValue = True
        if currentMessageIndex != len(messageBinary):
            print(f"Warning: only encoded {len(messageBinary[0:currentMessageIndex])} of {len(messageBinary)} bits ({round(100*len(messageBinary[0:currentMessageIndex])/len(messageBinary), 2)}%)")
            returnValue = False

            # Verbose errors
            if verbose == True:
                # Underline section of binary that was encoded
                # Get max printable width in current terminal
                width = os.get_terminal_size()[0]
                if len(messageBinary) > width * 5:
                    print("Unable to print verbose warning, return binary exceeds maximum length")
                
                # Create array groupings of message lines and underlinings
                printableMessageLines = list(groupImagePixels(messageBinary, width))
                printableUnderlinings = list(groupImagePixels("~"*len(messageBinary[0:currentMessageIndex]), width))

                # Zip and print
                print("\nVerbose warning: only encoded underlined section of message:")
                for printableMessageLine, printableUnderlining in itertools.zip_longest(printableMessageLines, printableUnderlinings, fillvalue=""):
                    print(f"{printableMessageLine}")
                    if printableUnderlining:
                        print(f"{printableUnderlining}")

        # Create new image structure, save file
        newPixels = list(groupImagePixels(newPixels, inFile.size[0]))
        newPixels = pixelArrayToZigZag(newPixels, 1, inFile.size[0], inFile.size[0], inFile.size[1])
        array = np.array(newPixels, dtype=np.uint8)
        savedImage = PIL.Image.fromarray(array)
        savedImage.save(outFile)

        return returnValue
Example #30
0
    def load_template_from_file(file_path):

        button_template = PngImageFile(file_path)
        button_template = convert_picture_to_grayscale(button_template)
        button_template = convert_picture_to_numpy_array(button_template)
        return button_template