예제 #1
0
def process(img1, img2, outfile):
    data1 = rasterio.open(img1)
    lons1, lats1 = utils.get_bounds(data1)

    data2 = rasterio.open(img2)
    lons2, lats2 = utils.get_bounds(data2)

    lons, lats = utils.get_overlap(lons1, lats1, lons2, lats2)
    if not lons or not lats:
        raise Exception(f'ERR no overlap: {lons1} {lats1} {lons2} {lats2}')

    crop1 = utils.crop_data(data1, lons, lats)
    crop2 = utils.crop_data(data2, lons, lats)

    new1, new2 = utils.scale_img(crop1, crop2)
    res = abs(int(new1.shape[0] / crop1.shape[0]) * data1.res[0])

    s = get_ssim(new1, new2)
    print(f'{img1}, {img2}, ssim, {s:.06f}')

    norm1, norm2 = get_norm(new1), get_norm(new2)
    p = get_psnr(norm1, norm2)
    print(f'{img1}, {img2}, pnsr, {p:.06f}')

    transform = Affine.translation(lons[0], lats[0])
    transform *= Affine.scale(res)

    utils.write_tiff(outfile, norm1 * norm2, transform, nodata=data1.nodata)
예제 #2
0
def main():
    img = load_and_preprocess_style()

    shape = img.shape[1:]
    vgg = VGG16_AvgPool(shape)
    symbolic_conv_outputs = [
        layer.get_output_at(1) for layer in vgg.layers
        if layer.__class__ == Conv2D
    ]

    style_model = Model(vgg.input, symbolic_conv_outputs)
    style_outputs = [K.variable(y) for y in style_model.predict(img)]
    print(len(style_outputs))

    loss = 0
    for symbolic, actual in zip(symbolic_conv_outputs, style_outputs):
        loss += style_loss(symbolic[0], actual[0])

    gradients = K.gradients(loss, style_model.input)
    get_loss_grads = K.function(inputs=[style_model.input],
                                outputs=[loss] + gradients)

    def get_loss_grads_wrapper(x):
        l, g = get_loss_grads([x.reshape(img.shape)])
        return l.astype(np.float64), g.flatten().astype(np.float64)

    img = minimize_loss(get_loss_grads_wrapper, 10, shape)
    img = np.reshape(img, newshape=(1, shape[0], shape[1], 3))
    img = unpreprocess(img[0].copy())
    img = scale_img(img)

    cv2.imshow('style', img)
    cv2.waitKey(0)
예제 #3
0
def main():
    img = load_and_preprocess_content()
    shape = img.shape[1:]
    content_model = VGG16_AvgPool_CutOff(shape, 10)
    target = K.variable(content_model.predict(img))

    mean_squared_loss = K.mean(K.square(target - content_model.output))
    gradients = K.gradients(mean_squared_loss, content_model.input)
    get_loss_grads = K.function(inputs=[content_model.input],
                                outputs=[mean_squared_loss] + gradients)

    def get_loss_grads_wrapper(x):
        l, g = get_loss_grads([x.reshape(img.shape)])
        return l.astype(np.float64), g.flatten().astype(np.float64)

    img = minimize_loss(get_loss_grads_wrapper, 10, shape)
    final_img = img.reshape(*shape)
    final_img = unpreprocess(final_img)
    final_img = scale_img(final_img)
    cv2.imshow('final_img', final_img)
    cv2.waitKey(0)
예제 #4
0
def gen_hog(execute, show=True, save=False):
    """Generates the hog visualization for images"""
    if not execute:
        return

    imgs = [(config.vehicle, config.get_vehicle_hog()),
            (config.get_not_vehicle(), config.get_not_vehicle_hog())]
    for img in imgs:
        img_in = mpimg.imread(img[0])
        image = cv2.cvtColor(img_in, cv2.COLOR_RGB2GRAY)
        orient, pix_per_cell, cell_per_block = 9, 8, 2
        features, hog_image = lf.get_hog_features(image,orient,pix_per_cell,cell_per_block)
        if show:
            plt.subplot(121)
            plt.imshow(image, cmap='gray')
            plt.title('Example Image')
            plt.subplot(122)
            plt.imshow(hog_image, cmap='gray')
            plt.title('HOG Visualization')
        if save:
            img_out = img[1]
            cv2.imwrite(img_out, utils.scale_img(hog_image))
예제 #5
0
    def forward(self, p, c , zi, z_norm=None, mask=None):
        x = torch.cat([p,c], dim=1)
        if type(z_norm) != type(None):
            x = torch.cat([x,x], dim=0)
            z = torch.cat([zi,z_norm], dim=0)
        else:
            z = zi
        g_input = torch.cat([x,z.repeat(1,1,x.size(2)//z.size(2),x.size(3)//z.size(3))], dim=1)
        f = self.block0(g_input)

        f_e = f[2]
        out = f[-1]
        results= []
        attn = 0
        for i in range(self.layers):
            model = getattr(self, 'decoder' + str(i))
            out = model(out)
            if i == 1 and self.use_attn:
                # auto attention
                model = getattr(self, 'attn' + str(i))
                scaled_mask = scale_img(mask, size=[f_e.size(2), f_e.size(3)])
                if type(z_norm) != type(None):
                    scaled_mask = torch.cat([scaled_mask,scaled_mask], dim=0)
                out, attn = model(out, f_e, scaled_mask)
            if i > self.layers - self.output_scale - 1:
                model = getattr(self, 'out' + str(i))
                output = model(out)
                results.append(output)
                out = torch.cat([out, output], dim=1)

        if self.stage == 'stage1':
            raw_output = results[-1]
            post_output = seg1_to_seg8(torch.argmax(raw_output, dim=1)).float()  # [2*N,8,256,256]
            return raw_output, post_output
        else:
            return results
예제 #6
0
## write sample training images

import utils
utils.write_sample_imgs(X_contrast=(0, 99))
utils.write_sample_border_imgs(channels=['corr', 'median'], contrast=(1, 99))
# write_sample_border_imgs(channels=['corr'], contrast=(0,99))

## try auto-correlation images

dataset = 'J123'

folder = os.path.join(cfg.data_dir, 'caiman', 'datasets', 'images_' + dataset)
imgs = utils.get_frames(folder, frame_inds=np.arange(0, 1000))

##
offset = 1
acorr = np.sum(np.multiply(imgs[:-offset], imgs[offset:]), axis=0)
acorr = acorr / (np.sqrt(np.sum(np.square(imgs[:-offset]), 0)) *
                 np.sqrt(np.sum(np.square(imgs[offset:]), 0)))
# acorr = np.sum(np.multiply(imgs[:-offset], imgs[offset:]), 0)
# acorr = np.divide(acorr, np.square(np.linalg.norm(imgs, axis=0)))

# plt.close('all')
# imshow = plt.imshow(utils.scale_img(acorr))
imshow.set_data(utils.scale_img(acorr))
예제 #7
0
    summary_titles = ['corr', 'mean', 'median', 'max', 'std']
    X = {key: np.zeros((batches, height, width)) for key in summary_titles}

    # get summary images for each batch in video
    for b in tqdm(range(batches)):
        img_stack = utils.get_frames(folder, 
            frame_numbers=np.arange(batch_inds[b], batch_inds[b]+cfg.summary_frames))

        X['corr'][b] = utils.get_correlation_image(img_stack)
        X['mean'][b] = np.mean(img_stack, 0)
        X['median'][b] = np.median(img_stack, 0)
        X['max'][b] = img_stack.max(0)
        X['std'][b] = img_stack.std(0)

    # collapse across summary images and scale from 0-1
    X['corr'] = utils.scale_img(X['corr'].max(0))
    X['mean'] = utils.scale_img(X['max'].max(0))
    X['median'] = utils.scale_img(X['median'].max(0))
    X['max'] = utils.scale_img(X['max'].max(0))
    X['std'] = utils.scale_img(X['std'].mean(0))

    # get targets
    y = utils.get_targets(
        os.path.join(cfg.data_dir, 'labels', d), collapse_masks=True,
        centroid_radius=3, border_thickness=cfg.border_thickness)

    # get tensor of masks for each individual neuron (used by segmentation network only)
    neuron_masks = utils.get_targets(
        os.path.join(cfg.data_dir, 'labels', d), collapse_masks=False)
    neuron_masks = neuron_masks['somas']  # keep only the soma masks
예제 #8
0
from ssa import ssa_2d

import matplotlib.pyplot as plt
import matplotlib.image as mpimg

import numpy as np
from skimage.measure import compare_psnr as psnr
from skimage.measure import compare_ssim as ssim

import utils

img = mpimg.imread('lena.bmp')
img = utils.scale_img(img)

img_noise = utils.add_gaussian_noise(img, noise_variance=0.2)
img_noise = utils.scale_img(img_noise)

window_height = 10
window_width = 15
number_of_eigenvectors = 25
#number_of_eigenvectors_rec = 17

img_reconstructed = ssa_2d(
    img=img_noise,
    u=window_height,
    v=window_width,
    l=number_of_eigenvectors,
    #l_rec = number_of_eigenvectors_rec,
    verbose=3)

img_reconstructed = utils.scale_img(img_reconstructed)