Ejemplo n.º 1
0
def main():
    # interpret user options
    parser = argparse.ArgumentParser()
    parser.add_argument("-i",
                        "--input",
                        required=True,
                        help="input raw file to read")
    parser.add_argument("-o",
                        "--output",
                        help="output file basename for embedded data in IFDs")
    parser.add_argument("-d",
                        "--display",
                        action="store_true",
                        default=False,
                        help="print read data")
    args = parser.parse_args()

    # read input file
    tiff = jbtiff.tiff_file(open(args.input, 'rb'))
    # print data as needed
    if args.display:
        tiff.display(sys.stdout)
    # save data strips as needed
    if args.output:
        tiff.save_data(args.output)
    return
Ejemplo n.º 2
0
def main():
    # interpret user options
    parser = argparse.ArgumentParser()
    parser.add_argument("-i",
                        "--input",
                        required=True,
                        help="input raw file to analyse")
    args = parser.parse_args()

    # read input raw file
    tiff = jbtiff.tiff_file(open(args.input, 'rb'))
    # display memory map
    mmap = tiff.get_memorymap()
    print "*** Memory Map ***"
    next_offset = 0
    for offset, length, description in sorted(mmap):
        if offset > next_offset:
            print "*%8d - %8d\t%8d" % (next_offset, offset - 1,
                                       offset - next_offset)
        print " %8d - %8d\t%8d\t%s" % (offset, offset + length - 1, length,
                                       description)
        next_offset = offset + length
    print "*** END ***"

    return
Ejemplo n.º 3
0
def main():
   # interpret user options
   parser = argparse.ArgumentParser()
   parser.add_argument("-i", "--input", required=True,
                     help="input raw file to use as basis")
   parser.add_argument("-b", "--basename", required=True,
                     help="base filename for replacement components (appended with -x.dat for IFD# x)")
   parser.add_argument("-o", "--output", required=True,
                     help="output CR2 file")
   args = parser.parse_args()

   # read input raw file
   tiff = jbtiff.tiff_file(open(args.input, 'rb'))
   # replace data strips where file exists
   for k in range(len(tiff.data)):
      # construct data filename and check it exists
      filename = '%s-%d.dat' % (args.basename, k)
      if not os.path.isfile(filename):
         continue
      # read input data file
      with open(filename, 'rb') as fid:
         data = fid.read()
      # replace data strips with new data
      jbcr2.replace_ifd(tiff, k, data)
   # save updated CR2 file
   tiff.write(open(args.output,'wb'))
   return
Ejemplo n.º 4
0
def main():
    # interpret user options
    parser = argparse.ArgumentParser()
    parser.add_argument("-r",
                        "--raw",
                        required=True,
                        help="input RAW file for image parameters")
    parser.add_argument("-i",
                        "--input",
                        required=True,
                        help="input sensor image file to encode")
    parser.add_argument("-o",
                        "--output",
                        required=True,
                        help="output JPEG lossless raw data file")
    parser.add_argument("-C",
                        "--components",
                        required=True,
                        type=int,
                        help="number of color components to create")
    parser.add_argument("-P",
                        "--precision",
                        required=True,
                        type=int,
                        help="number of bits per sensor pixel")
    parser.add_argument("-d",
                        "--display",
                        action="store_true",
                        default=False,
                        help="display encoded images")
    args = parser.parse_args()

    # See raw_decode.py for color components & slicing example

    # read input raw file
    tiff = jbtiff.tiff_file(open(args.raw, 'rb'))
    # load sensor image
    sensor = jbimage.image_file.read(args.input).squeeze()

    # obtain required parameters from RAW file
    width, height = tiff.get_sensor_size()
    slices = tiff.get_slices()
    # check input image parameters
    assert len(sensor.shape) == 2  # must be a one-channel image
    assert sensor.shape == (height, width)  # image size must be exact

    # slice image
    a = jbcr2.slice_image(sensor, width, height, slices)
    # encode to lossless JPEG output file
    parts = jbcr2.encode_lossless_jpeg(a, args.components, args.precision,
                                       args.output)

    # show user what we've done, as needed
    if args.display:
        for i, b in enumerate(parts):
            plt.figure()
            plt.imshow(b, cmap=plt.cm.gray)
            plt.title('Part %d' % i)
        plt.show()
    return
Ejemplo n.º 5
0
def main():
   # interpret user options
   parser = argparse.ArgumentParser()
   parser.add_argument("-i", "--input", required=True,
                     help="input raw file to use as basis")
   parser.add_argument("-b", "--basename", required=True,
                     help="base filename for replacement components (appended with -x.dat for IFD# x)")
   parser.add_argument("-o", "--output", required=True,
                     help="output CR2 file")
   args = parser.parse_args()

   # read input raw file
   tiff = jbtiff.tiff_file(open(args.input, 'rb'))
   # replace data strips where file exists
   for k, (IFD, ifd_offset, strips) in enumerate(tiff.data):
      # construct data filename and check it exists
      filename = '%s-%d.dat' % (args.basename, k)
      if not os.path.isfile(filename):
         continue
      # read input data file
      with open(filename, 'rb') as fid:
         data = fid.read()
      print "IFD#%d: Replacing data strip with length %d" % (k, len(data))
      # replace data strips with new data
      assert strips
      del strips[:]
      strips.append(data)
      # update IFD data
      if 273 in IFD:
         assert 279 in IFD
         assert 513 not in IFD and 514 not in IFD
         IFD[279] = (4, 1, [len(data)], 0)
         continue
      if 513 in IFD:
         assert 514 in IFD
         assert 273 not in IFD and 279 not in IFD
         IFD[514] = (4, 1, [len(data)], 0)
         continue
      raise AssertionError("Reference to data strip not found in IFD#%d" % k)
   # save updated CR2 file
   tiff.write(open(args.output,'wb'))
   return
Ejemplo n.º 6
0
def main():
   # interpret user options
   parser = argparse.ArgumentParser()
   parser.add_argument("-i", "--input", required=True,
                     help="input raw file to read")
   parser.add_argument("-o", "--output",
                     help="output file basename for embedded data in IFDs")
   parser.add_argument("-d", "--display", action="store_true", default=False,
                     help="print read data")
   args = parser.parse_args()

   # read input file
   tiff = jbtiff.tiff_file(open(args.input, 'rb'))
   # print data as needed
   if args.display:
      tiff.display(sys.stdout)
   # save data strips as needed
   if args.output:
      tiff.save_data(args.output)
   return
Ejemplo n.º 7
0
def main():
   # interpret user options
   parser = argparse.ArgumentParser()
   parser.add_argument("-r", "--raw", required=True,
                     help="input RAW file for image parameters")
   parser.add_argument("-i", "--input", required=True,
                     help="input JPEG lossless raw data file to decode")
   parser.add_argument("-o", "--output", required=True,
                     help="output sensor image file (PGM)")
   parser.add_argument("-d", "--display", action="store_true", default=False,
                     help="display decoded image")
   args = parser.parse_args()

   # read input raw file
   tiff = jbtiff.tiff_file(open(args.raw, 'rb'))

   # convert lossless JPEG encoded input file to raw data
   a, components, precision = jbcr2.decode_lossless_jpeg(args.input)
   # obtain required parameters from RAW file
   width,height = tiff.get_sensor_size()
   slices = tiff.get_slices()
   # unslice image
   I = jbcr2.unslice_image(a, width, height, slices)

   # save result
   jbimage.pnm_file.write(I.astype('>H'), open(args.output,'wb'))

   # show user what we've done, as needed
   if args.display:
      # linear display
      plt.figure()
      plt.imshow(I, cmap=plt.cm.gray)
      plt.title('%s' % args.input)
      # show everything
      plt.show()
   return
Ejemplo n.º 8
0
def main():
    # interpret user options
    parser = argparse.ArgumentParser()
    parser.add_argument("-r",
                        "--raw",
                        required=True,
                        help="input RAW file for image parameters")
    parser.add_argument("-i",
                        "--input",
                        required=True,
                        help="input sensor image file to decode (PGM)")
    parser.add_argument("-o",
                        "--output",
                        required=True,
                        help="output color image file (PPM)")
    parser.add_argument("-S",
                        "--saturation",
                        type=int,
                        help="saturation level (overriding camera default)")
    parser.add_argument(
        "-b",
        "--bayer",
        default="RGGB",
        help=
        "Bayer pattern (first letter pair for odd rows, second pair for even rows)"
    )
    parser.add_argument("-C",
                        "--camera",
                        help="camera identifier string for color table lookup")
    parser.add_argument("-d",
                        "--display",
                        action="store_true",
                        default=False,
                        help="display decoded image")
    args = parser.parse_args()

    # obtain required parameters from RAW file
    tiff = jbtiff.tiff_file(open(args.raw, 'rb'))
    width, height = tiff.get_sensor_size()
    border = tiff.get_border()
    if args.camera:
        model = args.camera
    else:
        model = tiff.get_model(0)

    # load sensor image
    I = jbimage.pnm_file.read(open(args.input, 'rb'))
    assert len(I.shape) == 2  # must be a one-channel image
    assert I.shape == (height, width)  # image size must be exact

    # get necessary transformation data
    t_black, t_maximum, cam_rgb = jbtiff.tiff_file.color_table[model]
    # extract references to color channels
    # c0 c1 / c2 c3 = R G / G B on most Canon cameras
    c = []
    for i in [0, 1]:
        for j in [0, 1]:
            c.append(I[i::2, j::2])
    # determine black levels for each channel from first four columns
    bl = [np.median(c[i][:, 0:4]) for i in range(4)]
    # determine if we need to increase the saturation level
    t_maximum_actual = max([c[i].max() for i in range(4)])
    if t_maximum_actual > t_maximum:
        print "WARNING: actual levels (%d) exceed saturation (%d)" % (
            t_maximum_actual, t_maximum)
    # subtract black level and scale each channel to [0.0,1.0]
    if args.saturation:
        t_maximum = args.saturation
    print "Scaling with black levels (%s), saturation %d" % (','.join(
        "%d" % x for x in bl), t_maximum)
    for i in range(4):
        c[i] = (c[i] - bl[i]) / float(t_maximum - bl[i])
    # determine nearest neighbour for each colour channel
    assert len(args.bayer) == 4
    nn = []
    for ch, color in enumerate("RGB"):
        ch_nn = np.zeros((2, 2), dtype=int)
        ch_nn[:] = -1  # initialize
        if args.bayer.count(color) == 1:  # there is only one instance
            ch_nn[:] = args.bayer.find(color)
        elif args.bayer.count(color) == 2:  # there are two instances
            ch_nn[0, :] = args.bayer.find(color, 0, 2)
            ch_nn[1, :] = args.bayer.find(color, 2, 4)
        assert (ch_nn.min() >= 0)
        nn.append(ch_nn)
    # copy color channels and interpolate missing data (nearest neighbour)
    I = np.zeros((height, width, 3))
    for ch in range(3):
        for i in [0, 1]:
            for j in [0, 1]:
                I[i::2, j::2, ch] = c[nn[ch][i, j]]
    # convert from camera color space to linear RGB D65 space
    rgb_cam = np.linalg.pinv(cam_rgb)
    I = np.dot(I, rgb_cam.transpose())
    # limit values
    np.clip(I, 0.0, 1.0, I)
    # apply sRGB gamma correction
    I = jbtiff.tiff_file.srgb_gamma(I)
    # cut border
    x1, y1, x2, y2 = border
    I = I[y1:y2 + 1, x1:x2 + 1]
    # show colour image, as needed
    if args.display:
        plt.figure()
        plt.imshow(I.astype('float'))
        plt.title('%s' % args.input)
    # scale to 16-bit
    I *= (1 << 16) - 1

    # save result
    jbimage.pnm_file.write(I.astype('>H'), open(args.output, 'wb'))

    # show user what we've done, as needed
    if args.display:
        plt.show()
    return
Ejemplo n.º 9
0
def main():
   # interpret user options
   parser = argparse.ArgumentParser()
   parser.add_argument("-i", "--input", required=True,
                     help="input raw file to use as basis")
   parser.add_argument("-d", "--decode",
                     help="decode and keep previous sensor image file (as PNM)")
   parser.add_argument("-s", "--sensor", required=True,
                     help="sensor image file to replace input")
   parser.add_argument("-o", "--output", required=True,
                     help="output CR2 file")
   args = parser.parse_args()

   # read input raw file
   tiff = jbtiff.tiff_file(open(args.input, 'rb'))
   # obtain required parameters from RAW file
   width,height = tiff.get_sensor_size()
   slices = tiff.get_slices()

   # extract existing sensor image
   IFD, ifd_offset, strips = tiff.data[3]
   # save into a temporary file
   fid, tmpfile = tempfile.mkstemp()
   for strip in strips:
      os.write(fid, strip)
   os.close(fid)
   # decode lossless JPEG encoded file to determine parameters
   a, components, precision = jbcr2.decode_lossless_jpeg(tmpfile)
   # delete temporary file
   os.remove(tmpfile)
   # store decoded sensor image file as needed
   if args.decode:
      # unslice image
      I = jbcr2.unslice_image(a, width, height, slices)
      # save result
      jbimage.pnm_file.write(I.astype('>H'), open(args.decode,'wb'))

   # read sensor image file
   sensor = jbimage.image_file.read(args.sensor).squeeze()
   # determine precision
   precision_sensor = jbimage.get_precision(sensor.max())
   print "Sensor image required precision: %d bits" % precision_sensor
   # decide if we need to upgrade encoding precision
   if precision_sensor > precision:
      print "Upgrading from precision: %d bits" % precision
      precision = precision_sensor

   # check input image parameters
   assert len(sensor.shape) == 2 # must be a one-channel image
   assert sensor.shape == (height,width) # image size must be exact
   # slice image
   a = jbcr2.slice_image(sensor, width, height, slices)
   # encode to a temporary lossless JPEG output file
   fid, tmpfile = tempfile.mkstemp()
   os.close(fid)
   parts = jbcr2.encode_lossless_jpeg(a, components, precision, tmpfile)
   # read and delete temporary file
   with open(tmpfile, 'rb') as fid:
      data = fid.read()
   os.remove(tmpfile)

   # replace data strips for main sensor image (IFD#3)
   jbcr2.replace_ifd(tiff, 3, data)

   # save updated CR2 file
   tiff.write(open(args.output,'wb'))
   return
Ejemplo n.º 10
0
def main():
   # interpret user options
   parser = argparse.ArgumentParser()
   parser.add_argument("-r", "--raw", required=True,
                     help="input RAW file for image parameters")
   parser.add_argument("-i", "--input", required=True,
                     help="input sensor image file to encode")
   parser.add_argument("-o", "--output", required=True,
                     help="output JPEG lossless raw data file")
   parser.add_argument("-C", "--components", required=True, type=int,
                     help="number of color components to create")
   parser.add_argument("-P", "--precision", required=True, type=int,
                     help="number of bits per sensor pixel")
   parser.add_argument("-d", "--display", action="store_true", default=False,
                     help="display encoded images")
   args = parser.parse_args()

   # See raw_decode.py for color components & slicing example

   # obtain required parameters from RAW file
   tiff = jbtiff.tiff_file(open(args.raw, 'rb'))
   width,height = tiff.get_sensor_size()
   slices = tiff.get_slices()

   # load sensor image
   I = jbtiff.pnm_file.read(open(args.input,'rb'))
   assert len(I.shape) == 2 # must be a one-channel image
   assert I.shape == (height,width) # image size must be exact

   # make a list of the width of each slice
   slice_widths = [slices[1]] * slices[0] + [slices[2]]
   assert sum(slice_widths) == width
   # first slice image
   a = np.zeros((height, width), dtype=np.dtype('>H'))
   for i, sw in enumerate(slice_widths):
      col_s = sum(slice_widths[0:i])
      col_e = col_s + sw
      a.flat[col_s*height:col_e*height] = I[:,col_s:col_e].flat

   # determine color components to create
   components = []
   for i in range(args.components):
      f = 'parts.%d' % (i+1)
      components.append(f)
   # next split color components
   for i, f in enumerate(components):
      # space for raw data for this color component
      b = np.zeros((height, width / args.components), dtype=np.dtype('>H'))
      # extract data from sliced color image
      b = a[:,i::args.components]
      # save to file
      b.tofile(f)
      # show user what we've done, as needed
      if args.display:
         plt.figure()
         plt.imshow(b, cmap=plt.cm.gray)
         plt.title('%s' % f)

   # convert raw data color components to lossless JPEG encoded file
   cmd = 'pvrg-jpeg -ih %d -iw %d -k 1 -p %d -s "%s"' % \
      (height, width / args.components, args.precision, args.output)
   for i, f in enumerate(components):
      cmd += ' -ci %d %s' % (i+1, f)
   st, out = commands.getstatusoutput(cmd)
   if st != 0:
      raise AssertionError('Error encoding JPEG file: %s' % out)

   # remove temporary files
   for i, f in enumerate(components):
      os.remove(f)

   # show user what we've done, as needed
   if args.display:
      plt.show()
   return
Ejemplo n.º 11
0
def main():
    # interpret user options
    parser = argparse.ArgumentParser()
    parser.add_argument("-r", "--raw", required=True, help="input RAW file for image parameters")
    parser.add_argument("-i", "--input", required=True, help="input color image file to encode (PPM)")
    parser.add_argument("-o", "--output", required=True, help="output sensor image file (PGM)")
    parser.add_argument("-s", "--small", required=True, help="output small RGB image file (DAT)")
    parser.add_argument("-B", "--black", required=True, type=int, help="black level (same for all channels)")
    parser.add_argument("-C", "--camera", help="camera identifier string for color table lookup")
    parser.add_argument("-d", "--display", action="store_true", default=False, help="display encoded image")
    args = parser.parse_args()

    # obtain required parameters from RAW file
    tiff = jbtiff.tiff_file(open(args.raw, "rb"))
    swidth, sheight = tiff.get_image_size(2)
    sdepth = tiff.get_image_depth(2)
    width, height = tiff.get_sensor_size()
    border = tiff.get_border()
    if args.camera:
        model = args.camera
    else:
        model = tiff.get_model(0)
    # determine image size without border
    x1, y1, x2, y2 = border
    iwidth = x2 - x1 + 1
    iheight = y2 - y1 + 1

    # load colour image
    I = jbtiff.pnm_file.read(open(args.input, "r"))
    assert len(I.shape) == 3 and I.shape[2] == 3  # must be a three-channel image
    assert I.shape == (iheight, iwidth, 3)  # image size must be exact

    # scale each channel to [0.0,1.0]
    if I.dtype == np.dtype("uint8"):
        depth = 8
    elif I.dtype == np.dtype(">H"):
        depth = 16
    else:
        raise ValueError("Cannot handle input arrays of type %s" % I.dtype)
    I = I / float((1 << depth) - 1)
    # invert sRGB gamma correction
    I = jbtiff.tiff_file.srgb_gamma_inverse(I)
    # get necessary transformation data
    t_black, t_maximum, cam_rgb = jbtiff.tiff_file.color_table[model]
    # convert from linear RGB D65 space to camera color space
    I = np.dot(I, cam_rgb.transpose())
    # limit values
    np.clip(I, 0.0, 1.0, I)
    # add black level and scale each channel to saturation limit
    print "Scaling with black level %d, saturation %d" % (args.black, t_maximum)
    I = I * (t_maximum - args.black) + args.black

    # determine subsampling rate
    step = int(round(height / float(sheight)))
    assert step == 2 ** int(np.log2(step))
    # determine precision to use
    if sdepth == 16:
        dtype = np.dtype("<H")
    elif sdepth == 8:
        dtype = np.dtype("uint8")
    else:
        raise ValueError("Cannot handle raw images of depth %d" % sdepth)
    # create small RGB image and copy color channels
    a = np.zeros((iheight // step, iwidth // step, 3), dtype=dtype)
    a[:, :, 0] = I[0::step, 0::step, 0]  # Red
    a[:, :, 1] = I[0::step, 1::step, 1]  # Green 1
    # a[:,:,1] = I[1::step,0::step,1] # Green 2
    a[:, :, 2] = I[1::step, 1::step, 2]  # Blue
    # add border
    dy1 = (sheight - a.shape[0]) // 2
    dy2 = sheight - a.shape[0] - dy1
    dx1 = (swidth - a.shape[1]) // 2
    dx2 = swidth - a.shape[1] - dx1
    a = np.pad(a, ((dy1, dy2), (dx1, dx2), (0, 0)), mode="constant", constant_values=args.black).astype(dtype)
    assert a.shape == (sheight, swidth, 3)
    # save result
    a.tofile(open(args.small, "w"))

    # add border
    dy1 = y1
    dy2 = height - y2 - 1
    dx1 = x1
    dx2 = width - x2 - 1
    I = np.pad(I, ((dy1, dy2), (dx1, dx2), (0, 0)), mode="constant", constant_values=args.black).astype(">H")
    assert I.shape == (height, width, 3)
    # create full sensor image and copy color channels
    a = np.zeros((height, width), dtype=np.dtype(">H"))
    a[0::2, 0::2] = I[0::2, 0::2, 0]  # Red
    a[0::2, 1::2] = I[0::2, 1::2, 1]  # Green 1
    a[1::2, 0::2] = I[1::2, 0::2, 1]  # Green 2
    a[1::2, 1::2] = I[1::2, 1::2, 2]  # Blue
    # save result
    jbtiff.pnm_file.write(a, open(args.output, "w"))

    # show user what we've done, as needed
    if args.display:
        # linear display
        plt.figure()
        plt.imshow(a, cmap=plt.cm.gray)
        plt.title("%s" % args.input)
        # show everything
        plt.show()
    return
Ejemplo n.º 12
0
def main():
   # interpret user options
   parser = argparse.ArgumentParser()
   parser.add_argument("-r", "--raw", required=True,
                     help="input RAW file for image parameters")
   parser.add_argument("-i", "--input", required=True,
                     help="input JPEG lossless raw data file to decode")
   parser.add_argument("-o", "--output", required=True,
                     help="output sensor image file (PGM)")
   parser.add_argument("-d", "--display", action="store_true", default=False,
                     help="display decoded image")
   args = parser.parse_args()

   # Laurent Clévy's example:
   # Image (w x h): 5184 x 3456
   # 4 color components (w x h): 0x538 x 0xdbc = 1336 x 3516 each
   #    interleaved components: 5344 x 3516
   #    border: 160 x 60 on declared image size
   # 3 slices (w): 2x 0x6c0 + 0x760 = 2x 1728 + 1888 = 5344
   #    each slice takes: 432 pixels from each of 4 colors (first two)
   #                      472 pixels from each of 4 colors (last one)

   # obtain required parameters from RAW file
   tiff = jbtiff.tiff_file(open(args.raw, 'rb'))
   width,height = tiff.get_sensor_size()
   slices = tiff.get_slices()

   # convert lossless JPEG encoded input file to raw data
   cmd = 'pvrg-jpeg -d -s "%s" -o parts' % args.input
   st, out = commands.getstatusoutput(cmd)
   if st != 0:
      raise AssertionError('Error decoding JPEG file: %s' % out)

   # interpret output to determine color components
   components = []
   for line in out.split('\n'):
      if line.startswith('>> '):
         record = line.split()
         f = record[4]
         w = int(record[6])
         h = int(record[8])
         components.append((f,w,h))
   # number of color components
   n = len(components)
   # first assemble color components
   assert all([h == height for f,w,h in components])
   assert sum([w for f,w,h in components]) == width
   a = np.zeros((height, width), dtype=np.dtype('>H'))
   for i, (f,w,h) in enumerate(components):
      # read raw data for this color component
      b = np.fromfile(f, dtype=np.dtype('>H'))
      b.shape = (h,w)
      # insert into assembled color image
      a[:,i::n] = b
      # remove temporary file
      os.remove(f)

   # make a list of the width of each slice
   slice_widths = [slices[1]] * slices[0] + [slices[2]]
   assert sum(slice_widths) == width
   # next unslice image
   I = np.zeros((height, width), dtype=np.dtype('>H'))
   for i, sw in enumerate(slice_widths):
      col_s = sum(slice_widths[0:i])
      col_e = col_s + sw
      I[:,col_s:col_e] = a.flat[col_s*height:col_e*height].reshape(height,sw)

   # save result
   jbtiff.pnm_file.write(I.astype('>H'), open(args.output,'wb'))

   # show user what we've done, as needed
   if args.display:
      # linear display
      plt.figure()
      plt.imshow(I, cmap=plt.cm.gray)
      plt.title('%s' % args.input)
      # show everything
      plt.show()
   return
Ejemplo n.º 13
0
def main():
    # interpret user options
    parser = argparse.ArgumentParser()
    parser.add_argument("-r",
                        "--raw",
                        required=True,
                        help="input RAW file for image parameters")
    parser.add_argument("-i",
                        "--input",
                        required=True,
                        help="input color image file to encode (PPM)")
    parser.add_argument("-o",
                        "--output",
                        required=True,
                        help="output sensor image file (PGM)")
    parser.add_argument("-s",
                        "--small",
                        required=True,
                        help="output small RGB image file (DAT)")
    parser.add_argument("-B",
                        "--black",
                        required=True,
                        type=int,
                        help="black level (same for all channels)")
    parser.add_argument("-S",
                        "--saturation",
                        type=int,
                        help="saturation level (overriding camera default)")
    parser.add_argument(
        "-b",
        "--bayer",
        default="RGGB",
        help=
        "Bayer pattern (first letter pair for odd rows, second pair for even rows)"
    )
    parser.add_argument("-C",
                        "--camera",
                        help="camera identifier string for color table lookup")
    parser.add_argument("-d",
                        "--display",
                        action="store_true",
                        default=False,
                        help="display encoded image")
    args = parser.parse_args()

    # obtain required parameters from RAW file
    tiff = jbtiff.tiff_file(open(args.raw, 'rb'))
    swidth, sheight = tiff.get_image_size(2)
    sdepth = tiff.get_image_depth(2)
    width, height = tiff.get_sensor_size()
    border = tiff.get_border()
    if args.camera:
        model = args.camera
    else:
        model = tiff.get_model(0)
    # determine image size without border
    x1, y1, x2, y2 = border
    iwidth = x2 - x1 + 1
    iheight = y2 - y1 + 1

    # load colour image
    I = jbimage.pnm_file.read(open(args.input, 'r'))
    assert len(
        I.shape) == 3 and I.shape[2] == 3  # must be a three-channel image
    assert I.shape == (iheight, iwidth, 3)  # image size must be exact

    # scale each channel to [0.0,1.0]
    if I.dtype == np.dtype('uint8'):
        depth = 8
    elif I.dtype == np.dtype('>H'):
        depth = 16
    else:
        raise ValueError("Cannot handle input arrays of type %s" % I.dtype)
    I = I / float((1 << depth) - 1)
    # invert sRGB gamma correction
    I = jbtiff.tiff_file.srgb_gamma_inverse(I)
    # get necessary transformation data
    t_black, t_maximum, cam_rgb = jbtiff.tiff_file.color_table[model]
    # convert from linear RGB D65 space to camera color space
    I = np.dot(I, cam_rgb.transpose())
    # limit values
    np.clip(I, 0.0, 1.0, I)
    # add black level and scale each channel to saturation limit
    if args.saturation:
        t_maximum = args.saturation
    print "Scaling with black level %d, saturation %d" % (args.black,
                                                          t_maximum)
    I = I * (t_maximum - args.black) + args.black

    # determine subsampling rate
    step = int(round(height / float(sheight)))
    assert step == 2**int(np.log2(step))
    # determine precision to use
    if sdepth == 16:
        dtype = np.dtype('<H')
    elif sdepth == 8:
        dtype = np.dtype('uint8')
    else:
        raise ValueError("Cannot handle raw images of depth %d" % sdepth)
    # create small RGB image and copy color channels
    a = np.zeros((iheight // step, iwidth // step, 3), dtype=dtype)
    a[:] = I[0::step, 0::step, :]
    # add border
    dy1 = (sheight - a.shape[0]) // 2
    dy2 = sheight - a.shape[0] - dy1
    dx1 = (swidth - a.shape[1]) // 2
    dx2 = swidth - a.shape[1] - dx1
    a = np.pad(a, ((dy1, dy2), (dx1, dx2), (0, 0)),
               mode='constant',
               constant_values=args.black).astype(dtype)
    assert a.shape == (sheight, swidth, 3)
    # save result
    a.tofile(open(args.small, 'w'))

    # add border
    dy1 = y1
    dy2 = height - y2 - 1
    dx1 = x1
    dx2 = width - x2 - 1
    I = np.pad(I, ((dy1, dy2), (dx1, dx2), (0, 0)),
               mode='constant',
               constant_values=args.black).astype('>H')
    assert I.shape == (height, width, 3)
    # determine mapping for each colour channel
    assert len(args.bayer) == 4
    cmap = {v: k for k, v in enumerate("RGB")}
    # create full sensor image and copy color channels
    a = np.zeros((height, width), dtype=np.dtype('>H'))
    a[0::2, 0::2] = I[0::2, 0::2, cmap[args.bayer[0]]]
    a[0::2, 1::2] = I[0::2, 1::2, cmap[args.bayer[1]]]
    a[1::2, 0::2] = I[1::2, 0::2, cmap[args.bayer[2]]]
    a[1::2, 1::2] = I[1::2, 1::2, cmap[args.bayer[3]]]
    # save result
    jbimage.pnm_file.write(a, open(args.output, 'w'))

    # show user what we've done, as needed
    if args.display:
        # linear display
        plt.figure()
        plt.imshow(a, cmap=plt.cm.gray)
        plt.title('%s' % args.input)
        # show everything
        plt.show()
    return
Ejemplo n.º 14
0
def main():
   # interpret user options
   parser = argparse.ArgumentParser()
   parser.add_argument("-r", "--raw", required=True,
                     help="input RAW file for image parameters")
   parser.add_argument("-i", "--input", required=True,
                     help="input sensor image file to decode (PGM)")
   parser.add_argument("-o", "--output", required=True,
                     help="output color image file (PPM)")
   parser.add_argument("-C", "--camera",
                     help="camera identifier string for color table lookup")
   parser.add_argument("-d", "--display", action="store_true", default=False,
                     help="display decoded image")
   args = parser.parse_args()

   # obtain required parameters from RAW file
   tiff = jbtiff.tiff_file(open(args.raw, 'rb'))
   width,height = tiff.get_sensor_size()
   border = tiff.get_border()
   if args.camera:
      model = args.camera
   else:
      model = tiff.get_model(0)

   # load sensor image
   I = jbtiff.pnm_file.read(open(args.input,'rb'))
   assert len(I.shape) == 2 # must be a one-channel image
   assert I.shape == (height,width) # image size must be exact

   # get necessary transformation data
   t_black, t_maximum, cam_rgb = jbtiff.tiff_file.color_table[model]
   # extract references to color channels
   R  = I[0::2,0::2] # Red
   G1 = I[0::2,1::2] # Green 1
   G2 = I[1::2,0::2] # Green 2
   B  = I[1::2,1::2] # Blue
   # determine black levels for each channel
   Rb = np.median(R[:,0:4])
   G1b = np.median(G1[:,0:4])
   G2b = np.median(G2[:,0:4])
   Bb = np.median(B[:,0:4])
   # subtract black level and scale each channel to [0.0,1.0]
   print "Scaling with black levels (%d,%d,%d,%d), saturation %d" % (Rb,G1b,G2b,Bb,t_maximum)
   R  = (R  - Rb)/float(t_maximum - Rb)
   G1 = (G1 - G1b)/float(t_maximum - G1b)
   G2 = (G2 - G2b)/float(t_maximum - G2b)
   B  = (B  - Bb)/float(t_maximum - Bb)
   # copy color channels and interpolate missing data (nearest neighbour)
   I = np.zeros((height, width, 3))
   for i in [0,1]:
      for j in [0,1]:
         I[i::2,j::2,0] = R # Red
   for i in [0,1]:
      I[i::2,1::2,1] = G1 # Green 1
      I[i::2,0::2,1] = G2 # Green 2
   for i in [0,1]:
      for j in [0,1]:
         I[i::2,j::2,2] = B # Blue
   # convert from camera color space to linear RGB D65 space
   rgb_cam = np.linalg.pinv(cam_rgb)
   I = np.dot(I, rgb_cam.transpose())
   # limit values
   np.clip(I, 0.0, 1.0, I)
   # apply sRGB gamma correction
   I = jbtiff.tiff_file.srgb_gamma(I)
   # cut border
   x1,y1,x2,y2 = border
   I = I[y1:y2+1,x1:x2+1]
   # show colour image, as needed
   if args.display:
      plt.figure()
      plt.imshow(I.astype('float'))
      plt.title('%s' % args.input)
   # scale to 16-bit
   I *= (1<<16)-1

   # save result
   jbtiff.pnm_file.write(I.astype('>H'), open(args.output,'wb'))

   # show user what we've done, as needed
   if args.display:
      plt.show()
   return