コード例 #1
0
ファイル: app.py プロジェクト: chosak/restful-gallery
    def get(self, album_id, image_id, extension=None):
        """GET handler for GGB image metadata and files.

        URL pattern: /albums/${album_id}/images/${image_id}(${extension})

        If called without a file extension:
            If image exists, returns 200 OK with JSON image data structure.
            Returns Content-type: application/json.
            If image doesn't exist, returns 404 NOT FOUND.
        
        If called with a file extension:
            If image exists and has the matching extension, returns the image.
            Returned Content-type matches the image format.
            Otherwise returns 404 NOT FOUND.
       
        Returns 401 UNAUTHORIZED to all calls if authorization fails.
        """
        q = Album.all().filter('album_id =', album_id)
        album = q.get()
        if not album:
            return self.error(404)

        q = Image.all().filter('album =', album).filter('image_id =', image_id)
        image = q.get()
        if not image:
            return self.error(404)

        if not extension:
            data = image.to_dict()
            return write_json(self, image.to_dict())
        
        if extension != image.extension:
            return self.error(404)
   
        write_image(self, image.image_data, image.extension)
コード例 #2
0
 def add_face(self, name, face_image):
     name = name.lower()
     entry = self.find_user(name)
     image_path = os.path.join('images/', f'{name}_{util.get_random_hash()}.jpg')
     util.write_image(image_path, face_image)
     if entry is not None:
         entry['faces'].append(image_path)
     else:
         entry = {
             'name': name,
             'faces': [
                 image_path
             ]
         }
         self.db.append(entry)
     self.flush()
コード例 #3
0
ファイル: app.py プロジェクト: chosak/restful-gallery
    def get(self, hash, extension=None):
        q = Album.all().filter('hash =', hash)
        album = q.get()
        if album:
            if extension:
                return self.error(404)
            
            q = Image.all().filter('album =', album)
            return self.response.out.write(render_template('album.html', {
                'name': album.name,
                'images': q,
            }))

        q = Image.all().filter('hash =', hash)
        image = q.get()
        if image:
            if not extension:
                return self.response.out.write(render_template('image.html',
                    { 'image': image }))
            elif image.extension == extension:
                return write_image(self, image.image_data, extension)
            else:
                return self.error(404)
        
        return self.error(404)
コード例 #4
0
def write_image_output(output_img, content_img, style_imgs, init_img):
  out_dir = os.path.join(args.img_output_dir, args.image_name)
  mkdir(out_dir)
  img_path = os.path.join(out_dir, args.image_name+'.png')
  content_path = os.path.join(out_dir, 'content.png')
  init_path = os.path.join(out_dir, 'init.png')

  write_image(img_path, output_img)
  write_image(content_path, content_img)
  write_image(init_path, init_img)
  index = 0
  for style_img in style_imgs:
    path = os.path.join(out_dir, 'style_'+str(index)+'.png')
    write_image(path, style_img)
    index += 1
コード例 #5
0
"""
Additive brightness control.
Resulting pixel value = original pixel value + c, c integer).
Watch out for R, G and B bounds!
"""

import cv2 as cv
import color
import util

BRIGHTNESS_FACTOR = 100

name = "skate.jpg"
original = util.read_image(name)

bright = color.additive_brightness(original, BRIGHTNESS_FACTOR)
cv.imshow('Additive Brightness', bright)
util.write_image("rgb-add-bright-" + name, bright)

yiq = color.rgb_to_yiq(original)
bright = color.additive_brightness(yiq, BRIGHTNESS_FACTOR)
cv.imshow('Additive Brightness', bright)
util.write_image("yiq-add-bright-" + name, bright)

cv.waitKey(0)
cv.destroyAllWindows()
コード例 #6
0
def numpixels(image):
  return image.view(rgb_dtype).size

def test(image):
  return numcolors(image) == numpixels(image)

if __name__ == '__main__':
  import argparse
  import sys

  parser = argparse.ArgumentParser(description="Generates an image with every RGB color exactly once")
  parser.add_argument('command',
    choices=['generate', 'test'],
    default='generate')
  parser.add_argument('-i', '--input',
    type=argparse.FileType('r'),
    default=sys.stdin)
  parser.add_argument('-o', '--output',
    type=argparse.FileType('w'),
    default=sys.stdout)

  args = parser.parse_args()
  image = read_image(args.input)

  if args.command == 'generate':
    write_image(args.output, generate(image))
  elif args.command == 'test':
    imagecolors = numcolors(image)
    imagepixels = numpixels(image)
    print("This image has {} colors and {} pixels".format(imagecolors, imagepixels))
    sys.exit(imagecolors != imagepixels)
コード例 #7
0
# Processing

red_band = color.red_band(original)
green_band = color.green_band(original)
blue_band = color.blue_band(original)

mono_red = color.monochromatic_red(original)
mono_green = color.monochromatic_green(original)
mono_blue = color.monochromatic_blue(original)

gray = color.rgb_to_gray(original)

# Save

util.write_image("red-" + name, red_band)
util.write_image("green-" + name, green_band)
util.write_image("blue-" + name, blue_band)
util.write_image("mono-red-" + name, mono_red)
util.write_image("mono-green-" + name, mono_green)
util.write_image("mono-blue-" + name, mono_blue)
util.write_image("gray-" + name, gray)

# Show

cv.imshow('Red', red_band)
cv.imshow('Green', green_band)
cv.imshow('Blue', blue_band)

cv.imshow('Monochromatic Red', mono_red)
cv.imshow('Monochromatic Green', mono_green)
コード例 #8
0
"""
    Custom filters
"""

import cv2 as cv
import filter
import util
import color

name = "skate.jpg"
# name = "32bits.png"
original = util.read_image(name)
cv.imshow('Original', original)

# Custom 1
custom1 = filter.custom_filter1(original)
cv.imshow('Custom 1', custom1)
util.write_image("custom1-filter-" + name, custom1)

# Custom 2
custom2 = filter.custom_filter2(original)
cv.imshow('Custom 2', custom2)
util.write_image("custom2-filter-" + name, custom2)

cv.waitKey(0)
cv.destroyAllWindows()
コード例 #9
0
"""

import cv2 as cv
import color
import util
import filter

name = 'tree.jpg'
img = util.read_image(name)
lightness = 255

# RGB to Gray
img_gray = color.rgb_to_gray(img)
cv.imshow('RGB to Gray (r=g=b)', img_gray)
util.write_image("gray-" + name, img_gray)

# Gray to Expansion
img_expansion = filter.histogram_expansion(img_gray, lightness)
cv.imshow('Histogram Expansion L=' + str(lightness), img_expansion)
util.write_image('hist-exp-l' + str(lightness) + '-' + name, img_expansion)

# Expansion to Equalization
img_equalization = filter.histogram_equalization(img_expansion)
cv.imshow('Histogram Expansion L=' + str(lightness) + ' + Equalization',
          img_equalization)
util.write_image('hist-exp-l' + str(lightness) + '-eq-' + name,
                 img_equalization)

# Gray to Equalization
img_equalization = filter.histogram_equalization(img_gray)
コード例 #10
0
def write_augmentation(example, img, car_mask, road_mask, prev_ex, augmentation_name, augmentation):
  base = "/tmp/output/augment/" + str(ex) + "_"
  img, car_mask, road_mask = augmentation(img, car_mask, road_mask, prev_ex[0], prev_ex[1])
  util.write_image(base + augmentation_name + ".png", img)
  util.write_mask(base + augmentation_name + "_car.png", car_mask)
  util.write_mask(base + augmentation_name + "_road.png", road_mask)

prev_ex = (None,None)

for ex in examples:
  pre_out = "/tmp/output/preprocessing/" + str(ex)
  car_out = "/tmp/output/infer_car/" + str(ex)
  road_out = "/tmp/output/infer_road/" + str(ex)
  augment_out = "/tmp/output/augment/" + str(ex)
  img = util.read_train_image(ex)
  util.write_image(pre_out + ".png", img)
  util.write_image(car_out + ".png", img)
  util.write_image(road_out + ".png", img)
  util.write_image(augment_out + ".png", img)
  road_mask, car_mask = util.read_masks(ex)
  util.write_mask(car_out + "_truth.png",car_mask)
  util.write_mask(road_out + "_truth.png",road_mask)
  cropped = util.crop(img,util.preprocess_opts)
  util.write_image(pre_out + "_crop.png", cropped)
  uncropped = util.uncrop_image(cropped,util.preprocess_opts)
  util.write_image(pre_out + "_uncrop.png", uncropped)
  preprocessed = util.preprocess_input_image(img,util.preprocess_opts)
  car_infer = car_model.predict(np.array([preprocessed]), batch_size=1)[0]
  car_infer = util.postprocess_output(car_infer, util.preprocess_opts)
  util.write_probability(car_out + "_infer.png", car_infer)
  road_infer = road_model.predict(np.array([preprocessed]), batch_size=1)[0]
コード例 #11
0
# RGB to Negative

import cv2 as cv
import color
import util

name = "skate.jpg"
original = util.read_image(name)

negative = color.negative(original)
cv.imshow('RGB Negative', negative)
util.write_image("rgb-neg-" + name, negative)

yiq = color.yiq_to_rgb(original)
yiq_negative = color.negative(yiq)
cv.imshow('YIQ Negative', yiq_negative)
util.write_image("yiq-neg-" + name, yiq_negative)

cv.waitKey(0)
cv.destroyAllWindows()
コード例 #12
0
"""
Thresholding on Y with a median m.
    Case 1: m from user input
    Case 2: m from mean of Y values
"""

import cv2 as cv
import color
import util


THRESHOLDING_FACTOR = 127

name = "skate.jpg"
original = util.read_image(name)

# Mean from user input
black_white_user = color.thresholding_user(original, THRESHOLDING_FACTOR)
cv.imshow('Thresholding User Input', black_white_user)
util.write_image("thres-user-" + name, black_white_user)

# Mean calculated from Y component
black_white_mean = color.thresholding_mean(original)
cv.imshow('Thresholding Mean', black_white_mean)
util.write_image("thres-mean-" + name, black_white_mean)

cv.waitKey(0)
cv.destroyAllWindows()
コード例 #13
0
import dct
import util

# Original
name = "lena256.jpg"
img = util.read_image(name)

# DCT
dct_img = dct.transform_2d(img)
util.write_image("dct_" + name, dct_img)

# IDCT
idct_img = dct.i_transform_2d(dct_img)
util.write_image("idct_" + name, idct_img)
コード例 #14
0
# RGB-YIQ-RGB conversion

import cv2 as cv
import color
import util

# An image is a matrix with the dimensions [w][h][3]. 3 for R, G and B

name = "32bits.png"
original = util.read_image(name)

# RGB to YIQ
yiq = color.rgb_to_yiq(original)
cv.imshow('YIQ', yiq)
util.write_image("yiq-" + name, yiq)

# YIQ to RGB
rgb = color.yiq_to_rgb(yiq)
cv.imshow('RGB', rgb)
util.write_image("rgb-" + name, rgb)

cv.waitKey(0)
cv.destroyAllWindows()
コード例 #15
0
def write_augmentation(example, img, car_mask, road_mask, prev_ex, augmentation_name, augmentation):
  base = "/tmp/output/augment/" + str(ex) + "_"
  img, car_mask, road_mask = augmentation(img, car_mask, road_mask, prev_ex[0], prev_ex[1])
  util.write_image(base + augmentation_name + ".png", img)
  util.write_mask(base + augmentation_name + "_car.png", car_mask)
  util.write_mask(base + augmentation_name + "_road.png", road_mask)
コード例 #16
0
ファイル: allrgb.py プロジェクト: allrgb/allrgb

if __name__ == '__main__':
    import argparse
    import sys

    parser = argparse.ArgumentParser(
        description="Generates an image with every RGB color exactly once")
    parser.add_argument('command',
                        choices=['generate', 'test'],
                        default='generate')
    parser.add_argument('-i',
                        '--input',
                        type=argparse.FileType('r'),
                        default=sys.stdin)
    parser.add_argument('-o',
                        '--output',
                        type=argparse.FileType('w'),
                        default=sys.stdout)

    args = parser.parse_args()
    image = read_image(args.input)

    if args.command == 'generate':
        write_image(args.output, generate(image))
    elif args.command == 'test':
        imagecolors = numcolors(image)
        imagepixels = numpixels(image)
        print("This image has {} colors and {} pixels".format(
            imagecolors, imagepixels))
        sys.exit(imagecolors != imagepixels)
コード例 #17
0
import cv2 as cv
import filter
import util
import color

name = "skate.jpg"
# name = "lenna.png"
original = util.read_image(name)
# cv.imshow('Original', original)

# Sobel X
# sobel_x = cv.Sobel(original, cv.CV_64F, 1, 0, 3)
sobel_x = filter.sobel_x(original)
cv.imshow('Sobel X', sobel_x)
util.write_image("sobelx-filter-" + name, sobel_x)

# Sobel Y
# sobel_y = cv.Sobel(original, cv.CV_64F, 0, 1, 3)
sobel_y = filter.sobel_y(original)
cv.imshow('Sobel Y', sobel_y)
util.write_image("sobely-filter-" + name, sobel_y)

# Sobel XY
# sobel_xy = cv.Sobel(original, cv.CV_64F, 1, 1, 3)
sobel_xy = filter.sobel_xy(original)
cv.imshow('Sobel XY', sobel_xy)
util.write_image("sobelxy-filter-" + name, sobel_xy)

# Laplace
# laplace = cv.Laplacian(original, cv.CV_64F)
コード例 #18
0
ファイル: infer.py プロジェクト: divelab/GPT
def infer(
    gitapp: controller.GetInputTargetAndPredictedParameters,
    restore_directory: str,
    output_directory: str,
    extract_patch_size: int,
    stitch_stride: int,
    infer_size: int,
    channel_whitelist: Optional[List[str]],
    simplify_error_panels: bool,
):
    """Runs inference on an image.

  Args:
    gitapp: GetInputTargetAndPredictedParameters.
    restore_directory: Where to restore the model from.
    output_directory: Where to write the generated images.
    extract_patch_size: The size of input to the model.
    stitch_stride: The stride size when running model inference.
      Equivalently, the output size of the model.
    infer_size: The number of simultaneous inferences to perform in the
      row and column dimensions.
      For example, if this is 8, inference will be performed in 8 x 8 blocks
      for a batch size of 64.
    channel_whitelist: If provided, only images for the given channels will
      be produced.
      This can be used to create simpler error panels.
    simplify_error_panels: Whether to create simplified error panels.

  Raises:
    ValueError: If
      1) The DataParameters don't contain a ReadPNGsParameters.
      2) The images must be larger than the input to the network.
      3) The graph must not contain queues.
  """
    rpp = gitapp.dp.io_parameters
    if not isinstance(rpp, data_provider.ReadPNGsParameters):
        raise ValueError(
            'Data provider must contain a ReadPNGsParameter, but was: %r',
            gitapp.dp)

    original_crop_size = rpp.crop_size
    image_num_rows, image_num_columns = util.image_size(rpp.directory)
    logging.info('Uncropped image size is %d x %d', image_num_rows,
                 image_num_columns)
    image_num_rows = min(image_num_rows, original_crop_size)
    if image_num_rows < extract_patch_size:
        raise ValueError(
            'Image is too small for inference to be performed: %d vs %d',
            image_num_rows, extract_patch_size)
    image_num_columns = min(image_num_columns, original_crop_size)
    if image_num_columns < extract_patch_size:
        raise ValueError(
            'Image is too small for inference to be performed: %d vs %d',
            image_num_columns, extract_patch_size)
    logging.info('After cropping, input image size is (%d, %d)',
                 image_num_rows, image_num_columns)

    num_row_inferences = (image_num_rows -
                          extract_patch_size) // (stitch_stride * infer_size)
    num_column_inferences = (image_num_columns - extract_patch_size) // (
        stitch_stride * infer_size)
    logging.info('Running %d x %d inferences', num_row_inferences,
                 num_column_inferences)
    num_output_rows = (num_row_inferences * infer_size * stitch_stride)
    num_output_columns = (num_column_inferences * infer_size * stitch_stride)
    logging.info('Output image size is (%d, %d)', num_output_rows,
                 num_output_columns)

    g = tf.Graph()
    with g.as_default():
        row_start = tf.placeholder(dtype=np.int32, shape=[])
        column_start = tf.placeholder(dtype=np.int32, shape=[])
        # Replace the parameters with a new set, which will cause the network to
        # run inference in just a local region.
        gitapp = gitapp._replace(dp=gitapp.dp._replace(
            io_parameters=rpp._replace(
                row_start=row_start,
                column_start=column_start,
                crop_size=(infer_size - 1) * stitch_stride +
                extract_patch_size,
            )))

        visualization_lts = controller.setup_stitch(gitapp)

        def get_statistics(tensor):
            rc = lt.ReshapeCoder(list(tensor.axes.keys())[:-1], ['batch'])
            return rc.decode(ops.distribution_statistics(rc.encode(tensor)))

        visualize_input_lt = visualization_lts['input']
        visualize_predict_input_lt = get_statistics(
            visualization_lts['predict_input'])
        visualize_target_lt = visualization_lts['target']
        visualize_predict_target_lt = get_statistics(
            visualization_lts['predict_target'])

        input_lt = lt.LabeledTensor(tf.placeholder(
            dtype=np.float32,
            shape=[
                1, num_output_rows, num_output_columns,
                len(gitapp.dp.input_z_values), 1, 2
            ]),
                                    axes=[
                                        'batch',
                                        'row',
                                        'column',
                                        ('z', gitapp.dp.input_z_values),
                                        ('channel', ['TRANSMISSION']),
                                        ('mask', [False, True]),
                                    ])
        predict_input_lt = lt.LabeledTensor(
            tf.placeholder(
                dtype=np.float32,
                shape=[
                    1,
                    num_output_rows,
                    num_output_columns,
                    len(gitapp.dp.input_z_values),
                    1,
                    len(visualize_predict_input_lt.axes['statistic']),
                ]),
            axes=[
                'batch',
                'row',
                'column',
                ('z', gitapp.dp.input_z_values),
                ('channel', ['TRANSMISSION']),
                visualize_predict_input_lt.axes['statistic'],
            ])
        input_error_panel_lt = visualize.error_panel_from_statistics(
            input_lt, predict_input_lt, simplify_error_panels)

        target_lt = lt.LabeledTensor(
            tf.placeholder(dtype=np.float32,
                           shape=[
                               1, num_output_rows, num_output_columns,
                               len(gitapp.dp.target_z_values),
                               len(gitapp.dp.target_channel_values) + 1, 2
                           ]),
            axes=[
                'batch',
                'row',
                'column',
                ('z', gitapp.dp.target_z_values),
                ('channel',
                 gitapp.dp.target_channel_values + ['NEURITE_CONFOCAL']),
                ('mask', [False, True]),
            ])
        predict_target_lt = lt.LabeledTensor(
            tf.placeholder(
                dtype=np.float32,
                shape=[
                    1,
                    num_output_rows,
                    num_output_columns,
                    len(gitapp.dp.target_z_values),
                    len(gitapp.dp.target_channel_values) + 1,
                    len(visualize_predict_target_lt.axes['statistic']),
                ]),
            axes=[
                'batch',
                'row',
                'column',
                ('z', gitapp.dp.target_z_values),
                ('channel',
                 gitapp.dp.target_channel_values + ['NEURITE_CONFOCAL']),
                visualize_predict_target_lt.axes['statistic'],
            ])

        logging.info('input_lt: %r', input_lt)
        logging.info('predict_input_lt: %r', predict_input_lt)
        logging.info('target_lt: %r', target_lt)
        logging.info('predict_target_lt: %r', predict_target_lt)

        def select_channels(tensor):
            if channel_whitelist is not None:
                return lt.select(tensor, {'channel': channel_whitelist})
            else:
                return tensor

        target_error_panel_lt = visualize.error_panel_from_statistics(
            select_channels(target_lt), select_channels(predict_target_lt),
            simplify_error_panels)

        # There shouldn't be any queues in this configuration.
        queue_runners = g.get_collection(tf.GraphKeys.QUEUE_RUNNERS)
        if queue_runners:
            raise ValueError('Graph must not have queues, but had: %r',
                             queue_runners)

        logging.info('Attempting to find restore checkpoint in %s',
                     restore_directory)
        init_fn = util.restore_model(restore_directory,
                                     restore_logits=True,
                                     restore_global_step=True)

        with tf.Session() as sess:
            logging.info('Generating images')
            init_fn(sess)

            input_rows = []
            predict_input_rows = []
            target_rows = []
            predict_target_rows = []
            for infer_row in range(num_row_inferences):
                input_row = []
                predict_input_row = []
                target_row = []
                predict_target_row = []
                for infer_column in range(num_column_inferences):
                    rs = infer_row * infer_size * stitch_stride
                    cs = infer_column * infer_size * stitch_stride
                    logging.info('Running inference at offset: (%d, %d)', rs,
                                 cs)
                    [inpt, predict_input, target,
                     predict_target] = sess.run([
                         visualize_input_lt,
                         visualize_predict_input_lt,
                         visualize_target_lt,
                         visualize_predict_target_lt,
                     ],
                                                feed_dict={
                                                    row_start: rs,
                                                    column_start: cs
                                                })

                    input_row.append(inpt)
                    predict_input_row.append(predict_input)
                    target_row.append(target)
                    predict_target_row.append(predict_target)
                input_rows.append(np.concatenate(input_row, axis=2))
                predict_input_rows.append(
                    np.concatenate(predict_input_row, axis=2))
                target_rows.append(np.concatenate(target_row, axis=2))
                predict_target_rows.append(
                    np.concatenate(predict_target_row, axis=2))

            logging.info('Stitching')
            stitched_input = np.concatenate(input_rows, axis=1)
            stitched_predict_input = np.concatenate(predict_input_rows, axis=1)
            stitched_target = np.concatenate(target_rows, axis=1)
            stitched_predict_target = np.concatenate(predict_target_rows,
                                                     axis=1)

            logging.info('Creating error panels')
            [input_error_panel, target_error_panel, global_step] = sess.run(
                [
                    input_error_panel_lt, target_error_panel_lt,
                    tf.train.get_global_step()
                ],
                feed_dict={
                    input_lt: stitched_input,
                    predict_input_lt: stitched_predict_input,
                    target_lt: stitched_target,
                    predict_target_lt: stitched_predict_target,
                })

            output_directory = os.path.join(output_directory,
                                            '%.8d' % global_step)
            if not gfile.Exists(output_directory):
                gfile.MakeDirs(output_directory)

            util.write_image(
                os.path.join(output_directory, 'input_error_panel.png'),
                input_error_panel[0, :, :, :])
            util.write_image(
                os.path.join(output_directory, 'target_error_panel.png'),
                target_error_panel[0, :, :, :])

            logging.info('Done generating images')
コード例 #19
0
"""
Multiplicative brightness control.
Resulting pixel value = original pixel value * c, c integer).
Watch out for R, G and B bounds!
"""

import cv2 as cv
import color
import util

BRIGHTNESS_FACTOR = 0.25

name = "skate.jpg"
original = util.read_image(name)

# RGB Brightness
bright = color.multiplicative_brightness(original, BRIGHTNESS_FACTOR)
cv.imshow('Multiplicative Brightness', bright)
util.write_image("mult-bright-" + name, bright)

# YIQ Brightness
yiq = color.rgb_to_yiq(original)
bright = color.multiplicative_brightness(yiq, BRIGHTNESS_FACTOR)
cv.imshow('Multiplicative Brightness', bright)
util.write_image("yiq-mult-bright-" + name, bright)

cv.waitKey(0)
cv.destroyAllWindows()
コード例 #20
0
    [0, 0, 2],
    [0, -1, 0],
    [-1, 0, 0]
])

_offset = 1

# Original
name = "monkey.jpg"
_img = util.read_image(name)
cv.imshow("Original", _img)

# Gray
img_gray = color.rgb_to_gray(_img)
cv.imshow("Gray", img_gray)
util.write_image("gray-" + name, img_gray)

# Convolution Kernel c1
util.do_convolution_show_and_write(_img, kernel_c1, "conv-c1-" + name, _offset)
# Convolution Gray Kernel c1
util.do_convolution_show_and_write(img_gray, kernel_c1, "conv-c1-gray-" + name, _offset)

# 2
util.do_convolution_show_and_write(_img, kernel_c2, "conv-c2-" + name, _offset)
util.do_convolution_show_and_write(img_gray, kernel_c2, "conv-c2-gray-" + name, _offset)

# 3
util.do_convolution_show_and_write(_img, kernel_c3, "conv-c3-" + name, _offset)
util.do_convolution_show_and_write(img_gray, kernel_c3, "conv-c3-gray-" + name, _offset)

コード例 #21
0
import cv2 as cv

import util

# Equalization with OpenCV
img_cv = cv.imread('img/tree.jpg', 0)
cv_eq = cv.equalizeHist(img_cv)
cv.imshow('cv-eq-tree.jpg', cv_eq)
util.write_image('cv-eq-tree.jpg', cv_eq)

cv.waitKey(0)
cv.destroyAllWindows()