def main():
    mode = 'rgb'
    root_dir = "./training/"

    image_dir = root_dir + "images/"
    gt_dir = root_dir + "groundtruth/"

    imgs, _ = load_all(image_dir, mode)
    gt_imgs, _ = load_all(gt_dir, mode)
    count = 0

    #If augmented_training does not exist we create all directories.
    new_image_dir = './augmented_training/satellite/'
    new_gt_dir = './augmented_training/ground_truth/'

    if not os.path.exists('./augmented_training/'):
        os.makedirs('./augmented_training/')
        os.makedirs(new_image_dir)
        os.makedirs(new_gt_dir)

    for i, (img, gt_img) in enumerate(zip(imgs, gt_imgs)):
        tmp = img
        gt_tmp = gt_img
        for k in range(2):
            for j in range(4):
                save_image(new_image_dir + 'sat_{}.png'.format(count), tmp)
                save_image(new_gt_dir + 'gt_{}.png'.format(count), gt_tmp)
                tmp = np.rot90(tmp)
                gt_tmp = np.rot90(gt_tmp)
                count += 1
            tmp = np.flip(tmp, 0)
            gt_tmp = np.flip(gt_tmp, 0)
Beispiel #2
0
    def __init__(self, path, show_steps=False, save=False):
        self.image = Helpers.load_image(path)

        # build the prepocessing pipleine
        pipeline = Pipeline([
            Helpers.convert_to_grayscale, lambda image: Helpers.blur(image, 5),
            Helpers.thresholdify, Helpers.ellipse_morph
        ])

        processed_image = pipeline.process_pipeline(self.image)

        # get the contour, crop it out, find the corners and straighten
        contour = Helpers.largest_contour(processed_image)
        processed_image_cropped = Helpers.cut_out_rect(processed_image,
                                                       contour)
        corners = Helpers.get_corners(processed_image_cropped)

        # apply the same cropping and warping to the original image
        image_cropped = Helpers.cut_out_rect(self.image, contour)
        straigtened_image = Helpers.warp_perspective(corners, image_cropped)

        if show_steps:
            Helpers.show(processed_image, 'Preprocessing')
            Helpers.show(processed_image_cropped, 'Processed image cropped')
            Helpers.show(image_cropped, 'Original image cropped')
            Helpers.show(straigtened_image, 'Final image')

        self.final = straigtened_image

        if save:
            Helpers.save_image(
                f'{Helpers.IMAGE_DIRECTORY}/extractor_finish.png', self.final)

        return None
Beispiel #3
0
def main(image_path):
    # get the final worksheet from the image
    ext = Extractor(image_path, False)
    final = ext.final

    # get the form code by checking the image's QR code
    decoded_qr_code = reader(final)

    # extract the cells and student's responses
    cells = Cells(final)

    # grade the worksheet by using a CNN to OCR the student's responses
    grader = Grader(decoded_qr_code)
    grader.grade(cells.student_responses)
    worksheet = grader.display(final, cells.sorted_contours)
    Helpers.save_image(f'{Helpers.IMAGE_DIRECTORY}/graded.png', worksheet)
Beispiel #4
0
def run():
    if int(arg.images) >= 1:
        model = load_generator(z_size, g_conv_dims, img_size)
        for k in tqdm(range(int(arg.images)), desc='Generating images'):
            seed_torch = int(torch.randint(1, int(10e5), (1, )).numpy())
            seed_numpy = int(np.random.randint(1, 10e5, size=1))
            np.random.seed(seed_numpy)
            torch.manual_seed(seed_torch)
            z_latent = create_sample(model, 1, z_size, seed_torch)
            image = convert_image(z_latent)
            image = np.squeeze(image, axis=0)
            save_image(image,
                       'image_' + str(k),
                       int(arg.size),
                       path='../results/')
        save_gifs(model, int(arg.size), generate=arg.gif)
    else:
        print(
            'Unable to produce images using {}. Insert a number equal or greater than 1.'
            .format(arg.images))
def undistort_img_dir(in_img_dir, M, dist, out_img_dir=None):
    """Undistort test images and apply a watermark to the image if output
    directory to save to is provided.

    Args:
        in_img_dir (str): path to directory containing images to undistort

        M (numpy.array): camera matrix (output from cv2.calibrateCamera())

        dist (numpy.array): distortion coefficients (output from
                            cv2.calibrateCamera())

        out_img_dir (str): (OPTIONAL) if specified, undistorted images will be
                           saved to this directory.

    References:
        cv2.putText
         - http://docs.opencv.org/2.4/modules/core/doc/drawing_functions.html#cv2.putText

        OpenCV fonts
         - https://codeyarns.com/2015/03/11/fonts-in-opencv/

        Transparent overlays with OpenCV
         - http://www.pyimagesearch.com/2016/03/07/transparent-overlays-with-opencv/
    """
    for path in glob.iglob(os.path.join(in_img_dir, '*.jpg')):
        img = cv2.imread(path)
        img = undistort_img(img, M, dist)

        # save and show image if requested
        if out_img_dir:
            # save locals
            file_name = os.path.split(path)[-1]
            img = watermark(img, 'UNDISTORTED')

            # save
            save_image(img, os.path.join(out_img_dir, file_name))
def hybrid_img_generation(img_one_path, img_two_path):
    # Setup
    # Read images and convert to floating point format
    image1 = load_image(img_one_path)
    image2 = load_image(img_two_path)

    image1, image2 = equalize_image_sizes(image1, image2)

    # display the dog and cat images
    plt.figure(figsize=(3, 3))
    plt.imshow((image1 * 255).astype(np.uint8))
    plt.figure(figsize=(3, 3))
    plt.imshow((image2 * 255).astype(np.uint8))

    # For your write up, there are several additional test cases in 'data'.
    # Feel free to make your own, too (you'll need to align the images in a
    # photo editor such as Photoshop).
    # The hybrid images will differ depending on which image you
    # assign as image1 (which will provide the low frequencies) and which image
    # you asign as image2 (which will provide the high frequencies)

    ## Hybrid Image Construction ##
    # cutoff_frequency is the standard deviation, in pixels, of the Gaussian#
    # blur that will remove high frequencies. You may tune this per image pair
    # to achieve better results.
    cutoff_frequency = 7
    low_frequencies, high_frequencies, hybrid_image = gen_hybrid_image(
        image1, image2, cutoff_frequency)

    ## Visualize and save outputs ##
    plt.figure()
    plt.imshow((low_frequencies * 255).astype(np.uint8))
    plt.figure()
    plt.imshow(((high_frequencies + 0.5) * 255).astype(np.uint8))
    vis = vis_hybrid_image(hybrid_image)
    plt.figure(figsize=(20, 20))
    plt.imshow(vis)

    save_image('../results/low_frequencies.jpg', low_frequencies)
    outHigh = np.clip(high_frequencies + 0.5, 0.0, 1.0)
    save_image('../results/high_frequencies.jpg', outHigh)
    save_image('../results/hybrid_image.jpg', hybrid_image)
    save_image('../results/hybrid_image_scales.jpg', vis)
def create_item():
    ''' Save a new Item in the database '''
    owner_id = session.get('user_id')
    if not owner_id:
        return ("You must be logged in to be able to create items", 401)
    form = NewItemForm()
    if form.validate_on_submit():
        file = request.files.get(form.image_file.name)
        saved_path = save_image(file) if file else None
        new_item = Item(name=form.data["name"],
                        category_id=form.data["category_id"],
                        description=form.data["description"],
                        image_file=saved_path,
                        owner_id=owner_id)
        db.session.add(new_item)
        db.session.commit()
        return "ok"
    return render_template('edit_item.html', form=form, action="/items")
def update_item(id):
    ''' Updates an Item in the database '''
    user_id = session.get('user_id')
    if not user_id:
        return ("You must be logged in to be able to edit items", 401)
    item = Item.query.filter_by(id=id).first()
    if not item:
        return "Not Found", 404
    if item.owner_id != user_id:
        return ("This item doesn't belong to you", 401)
    form = NewItemForm(obj=item)
    if form.validate_on_submit():
        file = request.files.get(form.image_file.name)
        saved_path = save_image(file) if file else None
        if saved_path:
            item.image_file = saved_path
        item.name = form.data["name"]
        item.description = form.data["description"]
        db.session.commit()
        return "ok"
    return render_template('edit_item.html', form=form, action=item.url)
def filter_test(img_path):
    resultsDir = '..' + os.sep + 'results'
    if not os.path.exists(resultsDir):
        os.mkdir(resultsDir)

    test_image = load_image(img_path)
    test_image = rescale(test_image, 0.7, mode='reflect', multichannel=True)
    '''
    Identity filter
    This filter should do nothing regardless of the padding method you use.
    '''
    identity_filter = np.asarray([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
                                 dtype=np.float32)
    identity_image = my_imfilter(test_image, identity_filter)
    plt.imshow(identity_image)
    plt.show()
    done = save_image('../results/identity_image.jpg', identity_image)
    '''
    Small blur with a box filter
    This filter should remove some high frequencies.
    '''
    blur_filter = np.ones((3, 3), dtype=np.float32)
    # making the filter sum to 1
    blur_filter /= np.sum(blur_filter, dtype=np.float32)
    blur_image = my_imfilter(test_image, blur_filter)
    plt.imshow(blur_image)
    plt.show()
    done = save_image(resultsDir + os.sep + 'blur_image.jpg', blur_image)
    '''
    Large blur
    This blur would be slow to do directly, so we instead use the fact that Gaussian blurs are separable and blur sequentially in each direction.
    '''
    # generate a 1x(2k+1) gaussian kernel with mean=0 and sigma = s, see https://stackoverflow.com/questions/17190649/how-to-obtain-a-gaussian-filter-in-python
    s, k = 10, 12
    large_1d_blur_filter = np.asarray([
        exp(-z * z / (2 * s * s)) / sqrt(2 * pi * s * s)
        for z in range(-k, k + 1)
    ],
                                      dtype=np.float32)
    large_1d_blur_filter = large_1d_blur_filter.reshape(-1, 1)
    large_blur_image = my_imfilter(test_image, large_1d_blur_filter)
    # notice the T operator which transposes the filter
    large_blur_image = my_imfilter(large_blur_image, large_1d_blur_filter.T)
    plt.imshow(large_blur_image)
    plt.show()
    done = save_image(resultsDir + os.sep + 'large_blur_image.jpg',
                      large_blur_image)

    # Slow (naive) version of large blur
    # import time
    # large_blur_filter = np.dot(large_1d_blur_filter, large_1d_blur_filter.T)
    # t = time.time()
    # large_blur_image = my_imfilter(test_image, large_blur_filter);
    # t = time.time() - t
    # print('{:f} seconds'.format(t))
    ##
    '''
    Oriented filter (Sobel operator)
    '''
    sobel_filter = np.asarray(
        [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]],
        dtype=np.float32)  # should respond to horizontal gradients
    sobel_image = my_imfilter(test_image, sobel_filter)

    # 0.5 added because the output image is centered around zero otherwise and mostly black
    sobel_image = np.clip(sobel_image + 0.5, 0.0, 1.0)
    plt.imshow(sobel_image)
    plt.show()
    done = save_image(resultsDir + os.sep + 'sobel_image.jpg', sobel_image)
    '''
    High pass filter (discrete Laplacian)
    '''
    laplacian_filter = np.asarray([[0, 1, 0], [1, -4, 1], [0, 1, 0]],
                                  dtype=np.float32)
    laplacian_image = my_imfilter(test_image, laplacian_filter)

    # added because the output image is centered around zero otherwise and mostly black
    laplacian_image = np.clip(laplacian_image + 0.5, 0.0, 1.0)
    plt.figure()
    plt.imshow(laplacian_image)
    plt.show()
    done = save_image(resultsDir + os.sep + 'laplacian_image.jpg',
                      laplacian_image)

    # High pass "filter" alternative
    high_pass_image = test_image - blur_image
    high_pass_image = np.clip(high_pass_image + 0.5, 0.0, 1.0)
    plt.figure()
    plt.imshow(high_pass_image)
    plt.show()
    done = save_image(resultsDir + os.sep + 'high_pass_image.jpg',
                      high_pass_image)
Beispiel #10
0
plt.imshow((image2*255).astype(np.uint8))

# For your write up, there are several additional test cases in 'data'.
# Feel free to make your own, too (you'll need to align the images in a
# photo editor such as Photoshop).
# The hybrid images will differ depending on which image you
# assign as image1 (which will provide the low frequencies) and which image
# you asign as image2 (which will provide the high frequencies)

## Hybrid Image Construction ##
# cutoff_frequency is the standard deviation, in pixels, of the Gaussian#
# blur that will remove high frequencies. You may tune this per image pair
# to achieve better results.
cutoff_frequency = 500
ksize = (19,19)  
low_frequencies, high_frequencies, hybrid_image = gen_hybrid_image(image1, image2, cutoff_frequency, ksize)

## Visualize and save outputs ##
plt.figure()
plt.imshow((low_frequencies*255).astype(np.uint8))
plt.figure()
plt.imshow(((high_frequencies+0.5)*255).astype(np.uint8))
vis = vis_hybrid_image(hybrid_image)
plt.figure(figsize=(20, 20))
plt.imshow(vis)

save_image('../results/low_frequencies.jpg', np.clip((low_frequencies),0,1))
save_image('../results/high_frequencies.jpg', np.clip((high_frequencies+0.5),0,1))
save_image('../results/hybrid_image.jpg', hybrid_image)
save_image('../results/hybrid_image_scales.jpg', vis)
Beispiel #11
0
        diff = time.time() - start_time

        if diff >= 1:
            print fps
            start_time = time.time()
            fps = 0

        fps += 1

        jpg = bytes[a:b + 2]
        bytes = bytes[b + 2:]

        img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),
                           cv2.CV_LOAD_IMAGE_COLOR)

        # save original capture
        helpers.save_image(img)

        # process data
        img, thresh, response = helpers.detectTriangles(img)
        helpers.update_thing_shadow(thing_id, response)

        # display
        cv2.imshow('result', img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

# When everything done, release the capture
cv2.destroyAllWindows()
Beispiel #12
0
img_stretched = h.contrast_stretching(img)
img2_stretched = h.contrast_stretching(img2)
h.show_images([img, img_stretched, img2, img2_stretched],
            ["bad_kid", "bad_kid stretched", "dark", "dark stretched"])


############# Gamma correction testing #############
gamma = 1.5
img_corr = h.gamma_corr(img, gamma)
img2_corr = h.gamma_corr(img2, gamma)
h.show_images([img, img_corr, img2, img2_corr],
            ["bad_kid", "bad_kid gamma", "dark", "dark gamma"])


############# Histogram equalization testing #############
img_eq = h.hist_eq(img)
img2_eq = h.hist_eq(img2)
h.show_images([img, img_eq/255, img2, img2_eq/255],
            ["bad_kid", "bad_kid equalized", "dark", "dark equalized"])

h.save_image('./results/salt_pepper_noise.jpg', noise)
h.save_image('./results/median_filtered.jpg', filtered_img)
h.save_image('./results/negative_transform.jpg', inv)
h.save_image('./results/contrast_stretched.jpg', img_stretched)
h.save_image('./results/contrast_stretched2.jpg', img2_stretched)
h.save_image('./results/gamma_corrected.jpg', img_corr)
h.save_image('./results/gamma_corrected2.jpg', img2_corr)
h.save_image('./results/hist_equalized.jpg', np.clip((img_eq/255),0,1))
h.save_image('./results/hist_equalized2.jpg', np.clip((img2_eq/255),0,1)) 
Beispiel #13
0
}

pickle.dump(calibration_data, open( "calibration_data.p", "wb" ))

# Let's load the camera calibration parameters to each chessboard as additional detail
# If we don't do this, we won't be able to get an undistorted image from that instance

for chessboard in chessboards:
  chessboard.load_undistort_params(camera_matrix = matrix, distortion = distortion_coef)

# Save each image to respective files

for chessboard in chessboards:

    if chessboard.has_corners:
        save_image(chessboard.image_with_corners(), "corners", chessboard.i)

    if chessboard.can_undistort:
        save_image(chessboard.undistorted_image(), "undistortedboard", chessboard.i)

# Visualization

raw_images, images_with_corners, undistorted_images = [], [], []

for chessboard in chessboards:

    raw_images.append(chessboard.image())

    if chessboard.has_corners:
        images_with_corners.append(chessboard.image_with_corners())
if not os.path.exists(resultsDir):
    os.mkdir(resultsDir)

image_title = 'mona_lisa.jpg'
image_path = dataDir + os.sep + image_title
test_image = load_image(image_path)
test_image = rescale(test_image, 0.7, multichannel=True, mode='reflect')
'''
Identity filter
This filter should do nothing regardless of the padding method you use.
'''
identity_filter = np.asarray([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
                             dtype=np.float32)
identity_image = my_imfilter(test_image, identity_filter)
plt.imshow(identity_image)
done = save_image('../results/identity_image.jpg', identity_image)
'''
Small blur with a box filter
This filter should remove some high frequencies.
'''
blur_filter = np.ones((3, 3), dtype=np.float32)
blur_filter /= np.sum(blur_filter,
                      dtype=np.float32)  # making the filter sum to 1
blur_image = my_imfilter(test_image, blur_filter)
plt.imshow(blur_image)
done = save_image(resultsDir + os.sep + 'blur_image.jpg', blur_image)
'''
Large blur
This blur would be slow to do directly, so we instead use the fact that Gaussian blurs are separable and blur sequentially in each direction.
'''
# generate a gaussian kernel with any parameters of your choice. you may only in this case use a function
        # Validation dataset
        dataset_val = helpers.PeopleDataset()
        val_type = "val" if args.year in '2017' else "minival"
        dataset_val.load_coco(args.dataset, val_type, year=args.year, auto_download=args.download, class_ids=[1])
        dataset_val.prepare()

        # Image Augmentation
        # Right/Left flip 50% of the time
        augmentation = imgaug.augmenters.Fliplr(0.5)

        print("Training network heads")
        model.train(dataset_train, dataset_val,
                    learning_rate=config.LEARNING_RATE,
                    epochs=10,
                    layers='heads',
                    augmentation=augmentation)

    else:
        model = modellib.MaskRCNN(mode="inference", config=config,
                                  model_dir=args.logs)
        model_path = model.find_last()
        print("Loading weights ", model_path)
        model.load_weights(model_path, by_name=True)

        if args.image:
            helpers.save_image(args.image, lambda image: model.detect([image], verbose=0))
        elif args.video:
            helpers.save_video(args.video, lambda image: model.detect([image], verbose=0)[0])
        else:
            print("You must provide either --image or --video commandline parameter when using inference mode")
# to achieve better results.
cutoff_frequency = 2
low_frequencies, high_frequencies, hybrid_image = gen_hybrid_image_fft(
    image1, image2, cutoff_frequency)
# bouns

## Visualize and save outputs ##
plt.figure()
plt.imshow((low_frequencies * 255).astype(np.uint8))
plt.figure()
plt.imshow(((high_frequencies + 0.5) * 255).astype(np.uint8))
vis = vis_hybrid_image(hybrid_image)
plt.figure(figsize=(20, 20))
plt.imshow(vis)

save_image('../results/low_frequencies.jpg', low_frequencies - .1)
save_image('../results/high_frequencies.jpg', high_frequencies)
save_image('../results/hybrid_image.jpg', hybrid_image - .3)
save_image('../results/hybrid_image_scales.jpg', vis - .3)

#bouns
low_frequencies_f, high_frequencies_f, hybrid_image_f = gen_hybrid_image(
    image1, image2, cutoff_frequency)

plt.figure()
plt.imshow((low_frequencies_f * 255).astype(np.uint8))
plt.figure()
plt.imshow(((high_frequencies_f + 0.5) * 255).astype(np.uint8))
vis_f = vis_hybrid_image(hybrid_image_f)
plt.figure(figsize=(20, 20))
plt.imshow(vis)
Beispiel #17
0
# Load image
lena = imread("../lena.jpg")

# Draw and display an image
plt.imshow(lena)
plt.show()

# Matrix indices are [rows, columns]
# Extract the rows 100 to 200 & all columns
sub_lena = lena[100:200, 0:-1]
show_image(sub_lena)

# Accessing color channels: third index -> [rows, cols, channel]
red_lena = lena[:, :, 0]
show_image(red_lena, cmap="gray")

# Convert to grayscale and use different colormap
gray_lena = rgb2gray(lena)
show_image(gray_lena, colormap="inferno")

# Convert to other colorspace
hsv_lena = convert_colorspace(lena, "RGB", "HSV")

# Only show saturation
sat_lena = hsv_lena[:, :, 1]
show_image(sat_lena, "gray")

# Save image
save_image(sat_lena, "sat_lena.jpg", "gray")
Beispiel #18
0
# Load image
lena = imread("../lena.jpg")

# Draw and display an image
plt.imshow(lena)
plt.show()

# Matrix indices are [rows, columns]
# Extract the rows 100 to 200 & all columns
sub_lena = lena[100:200 , 0:-1]
show_image(sub_lena)

# Accessing color channels: third index -> [rows, cols, channel]
red_lena = lena[:,:,0]
show_image(red_lena, cmap="gray")

# Convert to grayscale and use different colormap
gray_lena = rgb2gray(lena)
show_image(gray_lena, colormap="inferno")

# Convert to other colorspace
hsv_lena = convert_colorspace(lena, "RGB", "HSV")

# Only show saturation
sat_lena = hsv_lena[:,:,1]
show_image(sat_lena, "gray")

# Save image
save_image(sat_lena, "sat_lena.jpg", "gray")
plt.imshow((image2 * 255).astype(np.uint8))

# For your write up, there are several additional test cases in 'data'.
# Feel free to make your own, too (you'll need to align the images in a
# photo editor such as Photoshop).
# The hybrid images will differ depending on which image you
# assign as image1 (which will provide the low frequencies) and which image
# you asign as image2 (which will provide the high frequencies)

## Hybrid Image Construction ##
# cutoff_frequency is the standard deviation, in pixels, of the Gaussian#
# blur that will remove high frequencies. You may tune this per image pair
# to achieve better results.
cutoff_frequency = 7
low_frequencies, high_frequencies, hybrid_image = gen_hybrid_image(
    image1, image2, cutoff_frequency)

## Visualize and save outputs ##
plt.figure()
plt.imshow((low_frequencies * 255).astype(np.uint8))
plt.figure()
plt.imshow(((high_frequencies + 0.5) * 255).astype(np.uint8))
vis = vis_hybrid_image(hybrid_image)
plt.figure(figsize=(20, 20))
plt.imshow(vis)

save_image('../results/low_frequencies.jpg', low_frequencies)
save_image('../results/high_frequencies.jpg', high_frequencies + 0.5)
save_image('../results/hybrid_image.jpg', hybrid_image)
save_image('../results/hybrid_image_scales.jpg', vis)
def main():
    # print command line arguments
    if (len(sys.argv) < 2):
        print("not enought arguments, put the weight file as argument")
        return
    weight_path = sys.argv[1]

    reg = 1e-6

    model = Sequential()

    model.add(
        Conv2D(32, (3, 3),
               padding='same',
               input_shape=(window_size, window_size, 3)))
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Conv2D(128, (3, 3), padding='same'))
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(512, kernel_regularizer=l2(reg)))
    model.add(LeakyReLU(alpha=0.1))
    model.add(Dropout(0.5))
    model.add(Dense(1, activation='sigmoid', kernel_regularizer=l2(reg)))

    opt = Adam(lr=0.001)
    model.compile(loss='binary_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
    reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                  verbose=1,
                                                  factor=0.2,
                                                  patience=5,
                                                  min_lr=0.0)

    model.load_weights(weight_path)

    nb_pred = 50
    pred_path = './test_set_images/test_'
    save_path = pred_dir

    if not os.path.exists(pred_dir):
        os.makedirs(pred_dir)

    for i in range(1, nb_pred + 1):
        to_predict_img = load_image(
            pred_path + str(i) + '/test_' + str(i) + '.png', mode)
        to_predict_windows = windows_from_img(to_predict_img, window_size,
                                              patch_size)
        to_predict_windows = np.asarray(to_predict_windows)
        pred = model.predict(to_predict_windows, batch_size)
        save_image(
            save_path + 'pred_' + str(i) + '.png',
            prediction_to_img(pred, test_img_size, patch_size, threshold=0.4))
Beispiel #21
0
if not os.path.exists(resultsDir):
    os.mkdir(resultsDir)

test_image = load_image('../data/cat.bmp')
test_image = rescale(test_image, 0.7, mode='reflect', multichannel=True)

'''
Identity filter
This filter should do nothing regardless of the padding method you use.
'''
identity_filter = np.asarray(
    [[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=np.float32)
identity_image = my_imfilter(test_image, identity_filter)
plt.imshow(identity_image)
plt.show()
done = save_image('../results/identity_image.jpg', identity_image)


'''
Small blur with a box filter
This filter should remove some high frequencies.
'''
blur_filter = np.ones((3, 3), dtype=np.float32)
# making the filter sum to 1
blur_filter /= np.sum(blur_filter, dtype=np.float32)
blur_image = my_imfilter(test_image, blur_filter)
plt.imshow(blur_image)
plt.show()
done = save_image(resultsDir + os.sep + 'blur_image.jpg', blur_image)

## Hybrid Image Construction ##
# cutoff_frequency is the standard deviation, in pixels, of the Gaussian#
# blur that will remove high frequencies. You may tune this per image pair
# to achieve better results.
cutoff_frequency = 7
low_frequencies, high_frequencies, hybrid_image = gen_hybrid_image(
    image1, image2, cutoff_frequency)
## Visualize and save outputs ##
plt.figure()
plt.imshow((low_frequencies * 255).astype(np.uint8))
plt.figure()
plt.imshow(((high_frequencies + 0.5) * 255).astype(np.uint8))
vis = vis_hybrid_image(hybrid_image)
plt.figure(figsize=(20, 20))
plt.imshow(vis)

# save_image('../results/low_frequencies.jpg', low_frequencies)
# save_image('../results/high_frequencies.jpg', high_frequencies+0.5)
# save_image('../results/hybrid_image.jpg', hybrid_image)
# save_image('../results/hybrid_image_scales.jpg', vis)

# 注:经过处理后的图像像素表示形式是0-1之间的浮点数,需要转换成0-255的整型数才能存储为正常的图片,否则所有图片会存储为纯黑色(像素为0)
save_image('../results/low_frequencies.jpg',
           (low_frequencies * 255).astype(np.uint8))
save_image('../results/high_frequencies.jpg',
           ((high_frequencies + 0.5) * 255).astype(np.uint8))
save_image('../results/hybrid_image.jpg',
           (hybrid_image * 255).astype(np.uint8))
save_image('../results/hybrid_image_scales.jpg', (vis * 255).astype(np.uint8))