def image_rotate_keep(image_path, rotate_degrees):
    '''
    Rotate the image and keep entire content
    https://blog.paperspace.com/data-augmentation-for-object-detection-rotation-and-shearing/
    '''

    image_src = cv2.imread(image_path)
    (image_src_h, image_src_w, _) = image_src.shape

    #helper function for creation of rotation matrix
    center = (image_src_w / 2, image_src_h / 2)
    angle = rotate_degrees
    scale = 1.0
    rotation_transform = cv2.getRotationMatrix2D(center, angle, scale)

    # rotation calculates the cos and sin, taking absolutes of those.
    abs_cos = abs(rotation_transform[0, 0])
    abs_sin = abs(rotation_transform[0, 1])

    # find the new width and height bounds
    bound_w = int(image_src_h * abs_sin + image_src_w * abs_cos)
    bound_h = int(image_src_h * abs_cos + image_src_w * abs_sin)

    # subtract old image center (bringing image back to origo) and adding the new image center coordinates
    rotation_transform[0, 2] += bound_w / 2 - image_src_w // 2
    rotation_transform[1, 2] += bound_h / 2 - image_src_h // 2

    # rotate image with the new bounds and translated rotation matrix
    image_rotated = cv2.warpAffine(image_src, rotation_transform,
                                   (bound_w, bound_h))

    plot_image(image_rotated)
def plot_agument(path, **kwargs):
    """
        可视化预测结果,单张图片复制
        :param
            path(str) --  图片路径文件地址
        :kwargs
    """
    # 模型预测
    cm_plot_labels = ['MEL', 'NV', 'BCC', 'AKIEC', 'BKL', 'DF', 'VASC']
    with open(path, 'rt') as f:
        lines = f.readlines()
    outcome = []
    for line in lines:
        line = line.strip()
        data = process_data_agument(line)
        output = predict_agument(data, kwargs)
        outcome.append(output)

    # 可视化预测结果
    title = []
    for val in outcome:
        title.append(cm_plot_labels[val])
    ims = np.zeros((0, 224, 224, 3))
    for line in lines:
        line = line.strip()
        img = Image.open(line)
        img = np.array(img)
        img = img[np.newaxis, :, :, :]
        ims = np.concatenate((ims, img), 0)

    plot_image(ims, title)
예제 #3
0
def get_image(filename, label):
    img = load_image(filename)
    #"""
    if img is None:
        print("image is None")
        raise SystemExit(-1)
    path = os.path.abspath(__file__)
    dir_path = os.path.dirname(path)
    haar_face_cascade = cv2.CascadeClassifier()
    result = haar_face_cascade.load(
        "{0}/../haarcascade_frontalface_alt.xml".format(dir_path))
    #result = haar_face_cascade.load("../haarcascade_frontalface_alt.xml")
    if not result:
        print("Error loading haarcascade")
        raise SystemExit(-1)

    face_imgs = extract_faces(img, haar_face_cascade=haar_face_cascade)
    if len(face_imgs) == 0:
        print("No faces found")
        raise SystemExit(-1)
    img = face_imgs[0]
    #resized_img = cv2.resize(img, (FLAGS.image_size, FLAGS.image_size), interpolation=cv2.INTER_CUBIC)
    img = resize_image(img, FLAGS.image_size)
    #"""
    #img = preprocess_image(img, image_size=FLAGS.image_size)

    img = grayscale_image(img)

    plot_image(img)

    img = img / 255

    label = np.array([label])

    return img, label
예제 #4
0
    def paralelepipedo():
        ladoX = 1.5
        ladoY = 5
        ladoZ = 2.5

        ax, verts = make_rectangle(parallel_points)
        plot_image(ax, verts, "green", "b")
예제 #5
0
def test():
    anchors = config.ANCHORS

    transform = config.test_transforms

    dataset = YOLODataset(
        "COCO/train.csv",
        "COCO/images/images/",
        "COCO/labels/labels_new/",
        S=[13, 26, 52],
        anchors=anchors,
        transform=transform,
    )
    S = [13, 26, 52]
    scaled_anchors = torch.tensor(anchors) / (
        1 / torch.tensor(S).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2))
    loader = DataLoader(dataset=dataset, batch_size=1, shuffle=True)
    for x, y in loader:
        boxes = []

        for i in range(y[0].shape[1]):
            anchor = scaled_anchors[i]
            print(anchor.shape)
            print(y[i].shape)
            boxes += cells_to_bboxes(y[i],
                                     is_preds=False,
                                     S=y[i].shape[2],
                                     anchors=anchor)[0]
        boxes = nms(boxes,
                    iou_threshold=1,
                    threshold=0.7,
                    box_format="midpoint")
        print(boxes)
        plot_image(x[0].permute(1, 2, 0).to("cpu"), boxes)
예제 #6
0
def main(args):
    # run the model forward with some initial conditions
    # these are the labels.
    f, g = get_initial_conditions(args)
    u = run_heat_forward(f, args)

    # creat new initial condtions
    f_est, g_est = get_initial_conditions(args)

    for _ in range(args.num_inv_steps):
        u_est = run_heat_forward(f_est, args)

        # compute the error and gradients
        err = ((u_est - u.data)**2).sum()
        f_est.register_hook(save_grad('f_est'))
        err.backward()
        f_est = f_est - args.lr * grads['f_est']
        print('u_T error: {:.4f}'.format(err))
        f_err = loss(f_est, f)  #(((f-f_est)**2).sum()
        print('f_err: {:.4f}'.format(f_err))

    if args.inverse_debug:
        img, fig = plot_image(f, title='True Initial Condition (f)')
        plt.figure(1)
        img, fig = plot_image(f_est,
                              title='Estimated Initial Condition (f_est)')
        plt.figure(2)
        plt.show()
    return f, f_est
예제 #7
0
def color_histogram_equalisation_per_channel():

    image_src = cv2.imread("lena.png")

    image_channels = cv2.split(image_src)

    channels = [0]
    mask = None
    hist_size = [256]
    ranges = [0, 256]

    image_channels_equ = []
    for image_channel in image_channels:
        image_channel_equ = cv2.equalizeHist(image_channel)
        image_channels_equ.append(image_channel_equ)

    image_equ = cv2.merge(image_channels_equ)

    plot_image(image_equ)

    for image_channel, color in zip(image_channels, ["b", "g", "r"]):
        image_hist = cv2.calcHist([image_channel], channels, mask, hist_size,
                                  ranges)
        plt.plot(image_hist, color=color)
        plt.xlim([0, 256])

    plt.show()
예제 #8
0
def test_vae(vae):
    for part in ('train', 'val', 'test'):
        input_images = dataset.fetch_smallbatch_from_celeba('CelebA',
                                                            part=part)
        rec_images = vae.get_layer('decoder').predict(
            vae.get_layer('encoder').predict(input_images)[0])
        plot_image(img_renorm(input_images), img_renorm(rec_images))
예제 #9
0
def main():
    model = Yolov1(split_size=7, num_boxes=2, num_classes=20).to(DEVICE)
    optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
    loss_fn = YoloLoss()
    train_dataset = VOCDataset("data/train.csv", transform=transform, img_dir=IMG_DIR, label_dir=LABEL_DIR)
    test_dataset = VOCDataset("data/test.csv", transform=transform, img_dir=IMG_DIR, label_dir=LABEL_DIR)
    train_loader=DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, num_workers=1, pin_memory=PIN_MEMORY, shuffle=True,drop_last=True)
    test_loader=DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, num_workers=1, pin_memory=PIN_MEMORY, shuffle=True,drop_last=True)
    for epoch in range(EPOCHS):
        pred_boxes, target_boxes = get_bboxes(train_loader, model, iou_threshold=0.5, threshold=0.4)
        mAP = mean_average_precision(pred_boxes, target_boxes, iou_threshold=0.5)
        print(f"Train mAP:{mAP}")
        train_fn(train_loader, model, optimizer, loss_fn)
    if epoch > 99:
        for x, y in test_loader:
        x = x.to(DEVICE)
        for idx in range(16):
            bboxes = cellboxes_to_boxes(model(x))
            bboxes = non_max_suppression(bboxes[idx], iou_threshold=0.5, threshold=0.4)
            plot_image(x[idx].permute(1,2,0).to("cpu"), bboxes)
        
        

if __name__  == "__main__":
    
    main()
예제 #10
0
def random(vae):
    z_shape = K.int_shape(vae.get_layer('encoder').outputs[0])
    z = K.random_normal(shape=(z_shape[1], ))
    z = z.eval(session=tf.Session())
    z = np.expand_dims(z, axis=0)

    modified_image = vae.get_layer('decoder').predict(z)
    plot_image(img_renorm(modified_image), img_renorm(modified_image))
def process_in_question_one(datalist,
                            idx,
                            city_name,
                            ma_mode=0,
                            saved_folder=None,
                            saved=True):
    #First to do moving average
    simple_average_data = utils.SampleMovingAverage(datalist[idx])
    culmative_average_data = utils.CumulativeMovingAverage(datalist[idx])
    expontial_average_data = utils.ExponentialMovingAverage(datalist[idx])
    draw_data_list = [
        datalist[idx], simple_average_data, culmative_average_data,
        expontial_average_data
    ]
    data_lengend = [
        "Orginal {} Data".format(city_name), "Simple Moving average data",
        "Culmulative Averge Data", "Exponential Moving Average Data"
    ]
    ma_data_list = [
        simple_average_data, culmative_average_data, expontial_average_data
    ]
    # Draw the data
    utils.plot_all(
        draw_data_list,
        data_lengend,
        "Date(only show the point of 0:00 in the X axe)",
        "Tide value ",
        "Tide value per day of {} with Moving Average".format(city_name),
        xticks=xticks_para,
        figsize=(16, 10),
        saved="{}/Moving_Average_{}.png".format(saved_folder, city_name))

    # Select to use which kind of Move Average Data, default is the simple one
    simple_average_data = ma_data_list[ma_mode]

    # Remove the Bias and Trend of the Data for further processing
    simple_average_data = utils.remove_bias_and_trend(simple_average_data)

    utils.plot_image(simple_average_data,
                     "Time(hours)",
                     "Tide Value",
                     "{} Tide reduced Data".format(city_name),
                     "{} Data".format(city_name),
                     saved="{}/{}_processed".format(saved_folder, city_name))

    # Draw the ACF plot of the data
    utils.draw_acf(simple_average_data,
                   saved="{}/{}_acf.png".format(saved_folder, city_name))

    # Draw the PACF plot of the data
    utils.draw_pacf(simple_average_data,
                    saved='{}/{}_pacf.png'.format(saved_folder, city_name))

    # Draw the ampltitude Specturm and Power Specturm
    utils.draw_spectrum(simple_average_data, city_name, saved_folder)

    return simple_average_data
예제 #12
0
def view_image(image, hparams, mask=None):
    """Process and show the image"""
    image = display_transform(image)
    if len(image) == hparams.n_input:
        image = image.reshape([28, 28])
        if mask is not None:
            mask = mask.reshape([28, 28])
            image = np.maximum(np.minimum(1.0, image - 1.0*(1-mask)), 0.0)
    utils.plot_image(image, 'Greys')
예제 #13
0
def indice_nubosidad(frame, x1, y1, x2, y2):
    '''
    Completar código aquí
    '''

    utils.plot_image(matrix[y1:y2, x1:x2], 'Imagen APT ')
    index = 0

    return index
예제 #14
0
def view_image(image, hparams, mask=None):
    """Process and show the image"""
    if len(image) == hparams.n_input:
        image = image.reshape(hparams.image_shape)
        image = transpose(image)
        if mask is not None:
            mask = mask.reshape(hparams.image_shape)
            image = np.maximum(
                np.minimum(1.0, image - 1.0 * image * (1 - mask)), -1.0)
    utils.plot_image(scale(image))
예제 #15
0
def test_trans(translator, discriminator, image_file_name, target_gender):
    image = read_image(image_file_name)
    image = np.expand_dims(image, axis=0)
    target_gender = np.expand_dims(target_gender, axis=0)
    translated_img = translator.predict([image, target_gender])

    r_src, _, r_cls = discriminator.predict(image)
    g_src, _, g_cls = discriminator.predict(translated_img)

    plot_image(img_renorm(image), img_renorm(translated_img))
    print('input: ' + str(r_src) + " , " + str(r_cls) + ' - translated: ' +
          str(g_src) + " , " + str(g_cls))
예제 #16
0
 def get_inital_image_noise(self, channel):
     """
     get the inital image by convolve the channel with the kernel M
     :param channel: one of channels of the RGB color image
     :return: the inital image
     """
     inital_image_noise = convolve2d(channel,
                                     self.kernel_M,
                                     mode='same',
                                     boundary='fill',
                                     fillvalue=0)
     plot_image(inital_image_noise, 'Inital Image Noise')
     return inital_image_noise
def image_rotate(image_path, rotate_degrees):
    image_src = cv2.imread(image_path)
    (image_src_h, image_src_w, _) = image_src.shape

    #helper function for creation of rotation matrix
    center = (image_src_w / 2, image_src_h / 2)
    angle = rotate_degrees
    scale = 1.0
    rotation_transform = cv2.getRotationMatrix2D(center, angle, scale)
    image_rotated = cv2.warpAffine(image_src, rotation_transform,
                                   (image_src_w, image_src_h))

    plot_image(image_rotated)
예제 #18
0
 def get_unsaturated_region_mask(self, channel):
     """
     get the unsaturated region mask from the channel
     :param channel: one of channels of the RGB color image
     :return: the homogeneous mask
     """
     between = np.logical_and(channel >= self.t_l, channel <= self.t_h)
     out = np.logical_or(channel < self.t_l, channel > self.t_h)
     unsaturated_region_mask = channel.copy()
     unsaturated_region_mask[between] = 1
     unsaturated_region_mask[out] = 0
     plot_image(unsaturated_region_mask, 'Unsaturated Region Mask')
     return unsaturated_region_mask
예제 #19
0
def trans_attribute(vae, image_file_name, attribute_vectors_file,
                    attr_trans_dic):
    attribute_vectors = np.load(attribute_vectors_file).item()

    image = read_image(image_file_name)
    image = np.expand_dims(image, axis=0)

    z = vae.get_layer('encoder').predict(image)[0]

    for attr, trans in attr_trans_dic.items():
        z[0] += (attribute_vectors[attr] * trans)

    modified_image = vae.get_layer('decoder').predict(z)
    plot_image(img_renorm(image), img_renorm(modified_image))
예제 #20
0
 def get_gradient_image(self, grayscale_image):
     """
     Get the gradient magnitude image from gray scale image
     :param grayscale_image: the numpy array with format grayscale
     :return: the gradient image of the input image
     """
     dX = cv2.Sobel(grayscale_image, cv2.CV_32F, 1, 0, (3, 3))
     dY = cv2.Sobel(grayscale_image, cv2.CV_32F, 0, 1, (3, 3))
     mag, direction = cv2.cartToPolar(dX, dY, angleInDegrees=True)
     mag_disp = mag / 1448  # for double values to have a result between 0-1
     plot_image(mag_disp, 'Gradient Image')
     mag_disp_byte = mag / 5.6  # for uchar values between 0-255
     mag_disp_byte = mag_disp_byte / 255
     return mag_disp_byte
예제 #21
0
def monitor_samples():
    itr = iter(sampling_loader)
    for i in range(8):
        data = itr.__next__()
        chars, chars_mask, strokes, strokes_mask = [x.cuda() for x in data]

        with torch.no_grad():
            stroke_loss, eos_loss, monitor_vars, _, teacher_forced_sample = model.compute_loss(
                chars, chars_mask, strokes, strokes_mask)
            generated_sample = model.sample(chars, chars_mask)[0]

        teacher_forced_sample = teacher_forced_sample.cpu().numpy()
        generated_sample = generated_sample.cpu().numpy()

        # Plotting image for phi
        phi = monitor_vars.pop('phi')
        fig = plot_image(phi[0].squeeze().cpu().numpy().T)
        writer.add_figure('attention/phi_%d' % i, fig, steps)

        # Line plot for alpha, beta and kappa
        for key, val in monitor_vars.items():
            fig = plot_lines(val[0].cpu().numpy().T)
            writer.add_figure('attention/%s_%d' % (key, i), fig, steps)

        # Draw generated and teacher forced samples
        fig = draw(generated_sample[0],
                   save_file=root / ("generated_%d.png" % i))
        writer.add_figure("samples/generated_%d" % i, fig, steps)

        fig = draw(teacher_forced_sample[0],
                   save_file=root / ("teacher_forced_%d.png" % i))
        writer.add_figure("samples/teacher_forced_%d" % i, fig, steps)
예제 #22
0
    def validate(self, epoch, n_epochs):
        model.eval()
        loss_value = 0.0
        Accuracy = 0.0
        for i, (_, sample) in enumerate(self.Dataloader_val):
            inputs = sample['image']
            labels = sample['masks']
            inputs = inputs.cuda()
            labels = labels.cuda()
            outputs = self.model(inputs.float())
            loss = self.criterion(outputs.float(), labels)
            loss_value += loss.item()
            Accuracy += iou(torch.sigmoid(outputs), labels)
            if i == 0:
                preds = {
                    'image': inputs.detach().cpu(),
                    'masks':
                    (torch.sigmoid(outputs) > 0.3).float().detach().cpu()
                }
                self.writer.add_figure('Visulations ',
                                       plot_image(sample, preds), self.steps)

        loss_value /= len(self.Dataloader_val)
        Accuracy /= len(self.Dataloader_val)
        print('[%d/%d][%d/%d]\tVal Loss: %.4f\tVal Accuracy: %.4f ' %
              (epoch, n_epochs, i, len(
                  self.Dataloader_val), loss_value, Accuracy))
        self.writer.add_scalar('Validation loss ', loss_value, self.steps)
        self.writer.add_scalar('Validation accuracy ', Accuracy, self.steps)
예제 #23
0
def validate(model, criterion, val_loader, iteration, writer):
    model.eval()
    with torch.no_grad():
        n_data, val_loss = 0, 0
        for i, batch in enumerate(val_loader):
            n_data += len(batch[0])
            text_padded, text_lengths, mel_padded, mel_lengths, align_padded = [
                x.cuda() for x in batch
            ]
            mel_out, durations, durations_out = model.module.outputs(
                text_padded, align_padded, text_lengths, mel_lengths)
            mel_loss, duration_loss = criterion((mel_out, durations_out),
                                                (mel_padded, durations),
                                                (text_lengths, mel_lengths))
            val_loss += (mel_loss + duration_loss).item() * len(batch[0])

        val_loss /= n_data

    writer.add_scalar('val_loss',
                      val_loss,
                      global_step=iteration // hparams.accumulation)

    fig = plot_image(mel_padded, mel_out, align_padded, text_padded,
                     mel_lengths, text_lengths)
    writer.add_figure('Validation plots',
                      fig,
                      global_step=iteration // hparams.accumulation)

    model.train()
예제 #24
0
def compute_attribute_vector(vae,
                             attrs,
                             attribute_vectors_file,
                             batch_size=32):
    sess = K.get_session()
    encoder = vae.get_layer('encoder')
    z_shape = K.int_shape(encoder.outputs[0])
    pos_vectors = np.zeros((len(attrs), z_shape[1]), np.float32)
    neg_vectors = np.zeros((len(attrs), z_shape[1]), np.float32)
    pos_nums = np.zeros((len(attrs), 1), np.int32)
    neg_nums = np.zeros((len(attrs), 1), np.int32)

    data, total_num = dataset.load_full_celeba_with_labels(
        'CelebA', batch_size, attrs)

    iterator = data.make_one_shot_iterator()
    next_element = iterator.get_next()
    while True:
        try:
            images, labels = sess.run(next_element)
            z = encoder.predict(images)[0]

            for i in range(len(attrs)):
                pos_idx = np.argwhere(labels[:, i] == 1)[:, 0]
                neg_idx = np.argwhere(labels[:, i] == -1)[:, 0]
                pos_vec = np.sum(z[pos_idx, :], 0)
                neg_vec = np.sum(z[neg_idx, :], 0)
                pos_nums[i][0] += len(pos_idx)
                neg_nums[i][0] += len(neg_idx)
                pos_vectors[i] += pos_vec
                neg_vectors[i] += neg_vec
        except tf.errors.OutOfRangeError:
            break

    pos_vectors /= pos_nums
    neg_vectors /= neg_nums

    attribute_vectors = {}
    pos_images = vae.get_layer('decoder').predict(pos_vectors)
    neg_images = vae.get_layer('decoder').predict(neg_vectors)
    for i in range(len(attrs)):
        attribute_vectors[attrs[i]] = pos_vectors[i] - neg_vectors[i]
        # draw the attribute for debugging
        print(attrs[i])
        plot_image([img_renorm(pos_images[i])], [img_renorm(neg_images[i])])

    np.save(attribute_vectors_file, attribute_vectors)
    def get_mapped_gradient(self, gradient_image):
        """
            get the mapped gradient image form the gradient image
            :param gradient_image: a numpy array denotes the mapped gradient image
            :return: a numpy array denotes the gradient image
            """
        greater = gradient_image >= self.gamma
        lower = gradient_image < self.gamma

        mapped_gradient_image = gradient_image.copy()
        mapped_gradient_image[greater] = (1 / self.N_g) * np.log(
            self.lamda * (mapped_gradient_image[greater] - self.gamma) + 1)
        mapped_gradient_image[lower] = 0

        plot_image(mapped_gradient_image, 'Mapped Gradient Image')

        return mapped_gradient_image
예제 #26
0
def test_knn_matching(image1, kp1, desc1, image2, kp2, desc2):
    bf = cv2.BFMatcher(cv2.NORM_L2)
    matches = bf.knnMatch(desc1, desc2, k=2)
    good = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:  # ratio test
            good.append(m)
    print(f'{len(good)} matches')
    image_draw = cv2.drawMatches(
        image1,
        kp1,
        image2,
        kp2,
        good,
        None,
        flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)
    plot_image(image_draw)
예제 #27
0
def test_bruteforce_matching(image1, kp1, desc1, image2, kp2, desc2, n=-1):
    # https://docs.opencv.org/4.3.0/dc/dc3/tutorial_py_matcher.html
    bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
    matches = bf.match(desc1, desc2)
    matches = sorted(matches, key=lambda x: x.distance)
    if n > 0:
        matches = matches[:n]
    print(f'{len(matches)} matches')
    image_draw = cv2.drawMatches(
        image1,
        kp1,
        image2,
        kp2,
        matches,
        None,
        flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)
    plot_image(image_draw)
    def save_solution(self, sections_segmented, original_image):
        """Saves the solution provided by Segmentation.

        Each organ found will be grouped in one image and that image will be
        saved as the solution.

        If the argparse arguments were not used, then the solution will be saved
        in the current folder under the name 'optim.out'.

        Otherwise, the solutions will be saved in the self.args.output_dir
        directory with names 'same_numer-opt.out'.

        For more visualisation of the solutions: uncomment the last two lines
        of this method. (elapsed time for each extraction will increase the time
        the plots are opened)

        Args:
            sections_segmented (list): All the DicomSegmentationData objects
                for the current input data (there is one for each organ,
                if there is more than one).
            original_image (np.array) Image of the original DICOM image,
                used for visualisation purposes only.

        """
        solution = np.zeros((512, 512), dtype=np.uint8)
        doctor_image = np.zeros((512, 512), dtype=np.uint8)

        for section in sections_segmented:
            solution += section.image.astype(np.uint8)
            doctor_image += section.doctor_image.astype(np.uint8)
        
        solution[solution > 1] = 1
        solution = solution.astype(np.uint8)
        doctor_image[doctor_image > 1] = 1

        
        if self.no_parser:
            solution_name = 'optim.out'
        else:
            solution_name = "{}/{}-opt.out".format(self.args.output_dir,
                sections_segmented[0].file_name)

        np.savetxt(solution_name, solution, fmt="%d")
        
        plot_image(solution)
        plot_comparison(original_image, solution, doctor_image)
예제 #29
0
 def get_homogeneous_region_mask(self, channel):
     """
     get the homogeneous region mask from the channel
     :param channel: one of channels of the RGB color image
     :return: the homogeneous mask
     """
     gradient_image = self.get_gradient_image(channel)
     gradient_vector = gradient_image.ravel()
     gradient_vector = np.sort(gradient_vector)
     ordinal_rank = math.ceil((0.1 / 100) * len(gradient_vector))
     delta = gradient_vector[ordinal_rank]
     homogeneous_region_mask = gradient_image.copy()
     lower = gradient_image <= delta
     greater = gradient_image > delta
     homogeneous_region_mask[lower] = 1
     homogeneous_region_mask[greater] = 0
     plot_image(homogeneous_region_mask, 'Homogeneous Region Mask')
     return homogeneous_region_mask
예제 #30
0
def color_histogram_equalisation_per_channel_correct():

    image_src = cv2.imread("img/lena_dark.png")
    image_src = cv2.cvtColor(image_src, cv2.COLOR_BGR2YCrCb)

    image_channels = cv2.split(image_src)
    light_channel = image_channels[0]

    channels = [0]
    mask = None
    hist_size = [256]
    ranges = [0, 256]

    light_channel_equ = cv2.equalizeHist(light_channel)
    image_channels[0] = light_channel_equ

    image_equ = cv2.merge(image_channels)
    image_equ = cv2.cvtColor(image_equ, cv2.COLOR_YCrCb2BGR)

    plot_image(image_equ)