def still(grayscaleName='example.bmp', markedName='example_marked.bmp'):
    # grayscaleName = 'example.bmp'
    # markedName = 'example_marked.bmp'
    cleanName = grayscaleName[:grayscaleName.index('.')]
    outputName = "{}_output.jpg".format(cleanName)

    grayIm = np.float32(imread(fn(grayscaleName))) / 255.
    markedIm = np.float32(imread(fn(markedName))) / 255.
    if len(grayIm.shape) == 2: grayIm = np.dstack([grayIm for x in range(3)])
    isColored = (np.sum(np.abs(grayIm - markedIm), axis=2) > .01)

    # grayIm_conv = cv2.cvtColor(grayIm, cv2.COLOR_RGB2HSV)
    # markedIm_conv = cv2.cvtColor(markedIm, cv2.COLOR_RGB2YUV)
    grayIm_conv = RGB2YIQ(grayIm)
    markedIm_conv = RGB2YIQ(markedIm)

    yuvIm = np.dstack(
        [grayIm_conv[:, :, 0], markedIm_conv[:, :, 1], markedIm_conv[:, :, 2]])
    yuvH, yuvW, yuvD = yuvIm.shape

    # dMax = math.floor(math.log(min(yuvH, yuvW)) / math.log(2) - 2)
    # xa = 0
    # ya = 0
    # xb = math.floor(yuvH / (2 ** (dMax-1))) * (2 ** (dMax -1))
    # yb = math.floor(yuvW / (2 ** (dMax-1))) * (2 ** (dMax -1))

    # isColored = isColored[xa:xb, ya:yb]
    # yuvIm = yuvIm[xa:xb, ya:yb, :]

    outputImYIQ = getColor(isColored, yuvIm)
    outputIm = YIQ2RGB(outputImYIQ)
    imsave(fn(outputName), outputIm)
def main():
    #gif()
    #gif('frame10.jpg','frame11.jpg', 'frame10_marked.jpg')
    #still('trident_gray.bmp', 'trident_marked.bmp')
    name1 = 'kid_marked.bmp'
    name1_1 = 'kid_marked.jpg'
    name2 = 'trident_marked.bmp'
    name2_1 = 'trident_marked.jpg'
    colorIm1 = np.float32(imread(fn(name1))) / 255.
    # colorIm2 = np.float32(imread(fn(name2))) / 255.
    # grayIm = cv2.cvtColor(colorIm, cv2.COLOR_RGB2GRAY)
    imsave(fn(name1_1), colorIm1)
示例#3
0
 def crop_poly(self, event=None):
     tk.messagebox.showinfo("Instructions", "Crop from Original Input Size for Accurate Scaling & Measurement")
     self.status_label.configure(text="Status: Running")
     convertImg = np.asarray(self.img)
     polygon = cropPts
     maskIm = Image.new('L', (convertImg.shape[1], convertImg.shape[0]), 0)
     ImageDraw.Draw(maskIm).polygon(polygon, outline='black', fill=1)
     mask = np.array(maskIm)
     unitmask =mask
     unitmask[unitmask>0] = 1
     # construct new image (uint8: 0-255)
     newIm = np.empty(convertImg.shape,dtype='uint8')
     # colors (RGB)
     newIm[:,:,:3] = convertImg[:,:,:3]
     newIm[:,:,0] *= unitmask
     newIm[:,:,1] *= unitmask
     newIm[:,:,2] *= unitmask
     
     bounding_rect = cv2.boundingRect(np.asarray(polygon))
     x,y,w,h = bounding_rect
     newIm = newIm[y:y+h, x:x+w].copy()
     
     # back to Image from numpy
     cropImg = Image.fromarray(newIm, "RGB")
     self.cropped = cropImg
     cropImg.save(fn('outputs/cropout.png'))
     tk.messagebox.showinfo("Instructions", "Cropped Img Saved!")
     global cropstartoverflag
     cropstartoverflag=False
     self.img = cropImg
     self.imgt = ImageTk.PhotoImage(image=self.img)
     self.image_label.configure(image=self.imgt)
     self.status_label.configure(text="Status: Complete")
     self.image_label.unbind("<Button-1>",buttonid)
     self.crop.destroy()
示例#4
0
    def numcomponents(self,event=None):
        img = self.bin_img
        labels = np.zeros((img.shape),dtype=int)
        vflag = np.zeros((img.shape),dtype=int)
        countlabel=0
        print("NumComponents Start")
        for i in range(img.shape[0]):
            for j in range(img.shape[1]):
                if vflag[i][j] == 1:
                    continue
                countlabel +=1
                queue_points = deque([])
                queue_points.append([i,j])
                while not len(queue_points)==0:
                    [x,y] = queue_points.popleft()
                    if x < 0 or y < 0 or x >= img.shape[0] or y >= img.shape[1]:
                        continue
                    if vflag[x][y] == 1:
                        continue
                    vflag[x][y] = 1
                    if img[x][y] == 0:
                        continue
                    labels[x][y] = countlabel
                    queue_points.append([x-1,y])
                    queue_points.append([x+1,y])
                    queue_points.append([x,y-1])
                    queue_points.append([x,y+1])
                    queue_points.append([x-1,y-1])
                    queue_points.append([x-1,y+1])
                    queue_points.append([x+1,y-1])
                    queue_points.append([x+1,y+1])
                    
        labels=labels.astype('uint8')
        imsave(fn('outputs/numcomponents.png'),labels)
        newImRGB = np.stack((labels,)*3, axis=-1)

        newImg = Image.fromarray(newImRGB,'RGB')
        print("NumComponents Done")
        self.img = newImg
        self.imgt = ImageTk.PhotoImage(image=self.img)
        self.image_label.configure(image=self.imgt)
        self.status_label.configure(text="Status: Labeling Components Complete")
        self.nComp = countlabel
        self.labels=labels
        global labelflag
        labelflag == True
def gif(grayFrame1='example.bmp',
        grayFrame2='exampleShift.bmp',
        markedFrame='example_marked.bmp',
        THRESH=5):
    grayIm1 = np.float32(imread(fn(grayFrame1))) / 255.
    grayIm2 = np.float32(imread(fn(grayFrame2))) / 255.
    markedIm = np.float32(imread(fn(markedFrame))) / 255.
    isColored1 = (np.sum(np.abs(grayIm1 - markedIm), axis=2) > .01)
    flow = lucaskanade(grayIm1[:, :, 0], grayIm2[:, :, 0], 11)
    R2 = np.dstack(
        np.meshgrid(np.arange(flow.shape[1]), np.arange(flow.shape[0])))
    pxlMap = R2 + flow
    newMarks = cv2.remap(markedIm,
                         pxlMap.astype(np.float32),
                         None,
                         interpolation=cv2.INTER_CUBIC)
    outputmapname = "{}_MAPPING.bmp".format(grayFrame1[:grayFrame1.index('.')])
    imsave(fn(outputmapname), newMarks)
    flowNorm = np.zeros_like(grayIm1[:, :, 0])
    flowNorm = np.sqrt(flow[:, :, 0]**2 + flow[:, :, 1]**2)
    flowNorm = np.dstack([flowNorm for x in range(3)])
    #neighbors = np.linalg.norm(grayIm1-flowNorm+grayIm2,axis=2) <= THRESH
    neighborsEqn = np.sqrt(
        (grayIm1[:, :, 0] + flow[:, :, 0] - grayIm2[:, :, 0])**2 +
        (grayIm1[:, :, 0] + flow[:, :, 1] - grayIm2[:, :, 0])**2)
    neighbors = np.where(neighborsEqn <= THRESH, 1, 0)
    isColored2 = (np.sum(np.abs(grayIm2 - markedIm), axis=2) > .01) * neighbors

    grayIm1_conv = RGB2YIQ(grayIm1)
    grayIm2_conv = RGB2YIQ(grayIm2)
    markedIm_conv = RGB2YIQ(markedIm)

    yuvIm1 = np.dstack([
        grayIm1_conv[:, :, 0], markedIm_conv[:, :, 1], markedIm_conv[:, :, 2]
    ])
    yuvIm2 = np.dstack([
        grayIm2_conv[:, :, 0], markedIm_conv[:, :, 1], markedIm_conv[:, :, 2]
    ])

    outputImYIQ1 = getColor(isColored1, yuvIm1)
    marks2 = getColor(isColored2, yuvIm2)
    marks2RGB = YIQ2RGB(marks2)
    outputIm2YIQ = getColor(
        (np.sum(np.abs(grayIm2 - marks2RGB), axis=2) > .01), yuvIm2)

    outputName1 = "{}_out.bmp".format(grayFrame1[:grayFrame1.index('.')])
    outputName2 = "{}_out.bmp".format(grayFrame2[:grayFrame2.index('.')])
    outputIm1 = YIQ2RGB(outputImYIQ1)
    outputIm2 = YIQ2RGB(outputIm2YIQ)
    imsave(fn(outputName1), outputIm1)
    imsave(fn(outputName2), outputIm2)
示例#6
0
    def getlargest(self):
        print("Largest Components Start")
        k = tk.simpledialog.askinteger("Input", "How many Largest Components? ",parent=self.master,minvalue=0,maxvalue=20)

        nComp = self.nComp
        bin_img = self.bin_img.astype(int)
        tempimg = np.zeros((bin_img.shape),dtype=int)
        print(np.unique(bin_img))
        labels = self.labels
        counts = np.zeros((nComp),dtype=int)
        for i in range(bin_img.shape[0]):
            for j in range(bin_img.shape[1]):
                counts[labels[i][j]] += 1
        counts = counts.tolist()
        whilecount=0
        while whilecount<k:
            maxi = 1
            maxs = counts[maxi]
            for i in range(2,nComp):
                if counts[i] > maxs:
                    maxi = i
                    maxs = counts[i]
            for x in range(bin_img.shape[0]):
                for y in range(bin_img.shape[1]):             
                    if labels[x][y] == maxi:
                        tempimg[x][y] = 1
            counts[maxi] = 0
            whilecount+=1
                    
        self.bin_img = tempimg
        
        tempImg = tempimg.copy()
        tempImg[tempImg==1] = 255
        newIm=tempImg.astype('uint8')
        imsave(fn('outputs/largestcomponents.png'),tempImg)
        newImRGB = np.stack((newIm,)*3, axis=-1)
        newImg = Image.fromarray(newImRGB, 'RGB')
        print("Largest Components Done")
        self.img = newImg
        self.imgt = ImageTk.PhotoImage(image=self.img)
        self.image_label.configure(image=self.imgt)
        self.status_label.configure(text="Status: K Largest Components Complete")
示例#7
0
 def adaptivecontrast(self,event=None):
     tk.messagebox.showinfo("Instructions", "Adaptive Contrasting...")
     self.status_label.configure(text="Status: Running")
     self.prevcontrastImg = self.img
     k = 5 #3,4
     contrastval = 60 #40,50
     tempimg = np.asarray(self.img).copy()
     tempimg1 = tempimg[:,:,0]
     img = np.asarray(self.img).astype(int).copy()
     print("Adaptive Contrast Start")
     for i in range(tempimg.shape[0]):
         for j in range(tempimg.shape[1]):
             isum = 0
             count = 0
             for ki in range(-k,k+1):
                 for kj in range(-k,k+1):
                     if ki+i < tempimg.shape[0] and ki+i >= 0 and kj+j < tempimg.shape[1] and kj+j >= 0:
                        if np.all(img[ki+i][kj+j]) == False:
                            pass
                        else:
                            isum += tempimg1[ki+i][kj+j]
                            count += 1
             if tempimg1[i][j] * count > isum:
                     if tempimg1[i][j] + contrastval > 255:
                         tempimg[i][j] = [255,255,255]
                     else:
                         tempimg[i][j] = img[i][j] + contrastval
     imsave(fn('outputs/adContrastOut.png'),tempimg)
     
     newImg = Image.fromarray(tempimg, 'RGB')
     self.img = newImg
     self.imgt = ImageTk.PhotoImage(image=self.img)
     self.image_label.configure(image=self.imgt)
     self.status_label.configure(text="Status: Complete")
     print("Adaptive Contrast Done")
     global ocontbuttonflag
     if ocontbuttonflag ==False:
         self.contrast_o = tk.Button(self.text_frame, command=self.original_contrast, text="Original Contrast", width=25, default=ACTIVE, borderwidth=0)
         self.contrast_o.pack()
         ocontbuttonflag = True
示例#8
0
    def thresholding(self,event=None):
        tk.messagebox.showinfo("Instructions", "Thresholding...")
        self.status_label.configure(text="Status: Thresholding...")
        k=5
        npImg = np.asarray(self.img)
        grayImg = cv2.cvtColor(npImg, cv2.COLOR_BGR2GRAY)
        self.grayImg = grayImg
        bin_img = np.zeros((grayImg.shape), dtype=int)
        print("Thresholding Start")
        for i in range(grayImg.shape[0]):
            for j in range(grayImg.shape[1]):
                ksum = 0
                count = 0
                if grayImg[i][j] != 0:
                    for ki in range(-k,k+1):
                        for kj in range(-k,k+1):
                            if ki+i < grayImg.shape[0] and ki+i >= 0 and kj+j < grayImg.shape[1] and kj+j >= 0:
                                if grayImg[ki+i][kj+j] != 0:
                                    ksum += grayImg[ki+i][kj+j]
                                    count += 1
                    if grayImg[i][j]*count > ksum:
                        bin_img[i][j] = 1
        self.bin_img = bin_img
#        print(np.unique(self.bin_img))
        tempImg =bin_img.copy()
        tempImg[tempImg==1] = 255
        newIm=tempImg.astype('uint8')
        imsave(fn('outputs/threshout.png'),tempImg)
        
        newImRGB = np.stack((newIm,)*3, axis=-1)
        newImg = Image.fromarray(newImRGB, 'RGB')
        print("Thresholding Done")
        self.img = newImg
        self.imgt = ImageTk.PhotoImage(image=self.img)
        self.image_label.configure(image=self.imgt)
        self.status_label.configure(text="Status: Thresholding Complete")
        global threshflag
        threshflag = True
示例#9
0
    return Z


########################## Support code below

from os.path import normpath as fn  # Fixes window/linux path conventions
import warnings
warnings.filterwarnings('ignore')

import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D

#### Main function

nrm = imread(fn('inputs/phstereo/true_normals.png'))

# Un-comment  next line to read your output instead
# nrm = imread(fn('outputs/prob3_nrm.png'))

mask = np.float32(imread(fn('inputs/phstereo/mask.png')) > 0)

nrm = np.float32(nrm / 255.0)
nrm = nrm * 2.0 - 1.0
nrm = nrm * mask[:, :, np.newaxis]

# Main Call
Z = ntod(nrm, mask, 1e-7)

# Plot 3D shape
示例#10
0
        w_start = np.maximum(0, int(cluster_centers[k, 1] - S[k]))
        w_end = np.minimum(w - 1, int(cluster_centers[k, 1] + S[k]))
        im_patch = aug_im[h_start:h_end, w_start:w_end, :]
        dist2 = np.sum(np.square(im_patch - mu_k), axis=-1)
        dist = np.sqrt(dist2)
        L[h_start:h_end, w_start:w_end] = np.where(
            dist < min_dist[h_start:h_end, w_start:w_end], k, L[h_start:h_end,
                                                                w_start:w_end])
        min_dist[h_start:h_end, w_start:w_end] = np.where(
            dist < min_dist[h_start:h_end, w_start:w_end], dist,
            min_dist[h_start:h_end, w_start:w_end])

    return L


im = np.float32(imread(fn('inputs/24063.jpg')))

num_clusters = [25, 49, 100]
for num_clusters in num_clusters:
    start_time = time.time()
    [src_set, cluster_centers] = improved_css_img(X=im,
                                                  K=num_clusters,
                                                  max_iter=10,
                                                  lmda_1=2)
    end_time = time.time()
    print("time elapse:")
    print(end_time - start_time)
    imsave(
        fn('outputs/dijkstra/dijkstra_' + str(num_clusters) + '_centers.jpg'),
        normalize_im(create_centers_im(im.copy(), cluster_centers)))
    out_im = slic_adjS(im, num_clusters, cluster_centers)
示例#11
0
        h_start = np.maximum(0, int(cluster_centers[k, 0] - S[k]))
        h_end = np.minimum(h - 1, int(cluster_centers[k, 0] + S[k]))
        w_start = np.maximum(0, int(cluster_centers[k, 1] - S[k]))
        w_end = np.minimum(w - 1, int(cluster_centers[k, 1] + S[k]))
        im_patch = aug_im[h_start:h_end, w_start:w_end, :]
        dist2 = np.sum(np.square(im_patch - mu_k), axis=-1)
        dist = np.sqrt(dist2)
        L[h_start:h_end, w_start:w_end] = np.where(dist < min_dist[h_start:h_end, w_start:w_end],
                                                   k, L[h_start:h_end, w_start:w_end])
        min_dist[h_start:h_end, w_start:w_end] = np.where(dist < min_dist[h_start:h_end, w_start:w_end],
                                                          dist, min_dist[h_start:h_end, w_start:w_end])

    return L


im = np.float32(imread(fn('inputs/302003.jpg')))

num_clusters = [25,49,100,200,300]
for num_clusters in num_clusters:
    start_time = time.time()
    [src_set, cluster_centers] = qd_css_img(X=im, K=num_clusters, max_iter=10, lmda_1=2)
    end_time = time.time()
    print("time elapse:")
    print(end_time - start_time)
    imsave(fn('outputs/qd/qd_' + str(num_clusters) + '_centers.jpg'),
           normalize_im(create_centers_im(im.copy(), cluster_centers)))
    out_im = slic_adjS(im, num_clusters, cluster_centers)

    border_im = np.ones_like(out_im)
    gg = get_gradients(out_im)
    gg2 = get_gradients(gg)
示例#12
0
import numpy as np
from skimage.io import imread, imsave

# Edit the following two functions


def vflip(X):
    X = X[::-1, :, :]
    return X


def hflip(X):
    X = X[:, ::-1, :]
    return X


########################## Support code below

from os.path import normpath as fn  # Fixes window/linux path conventions
import warnings

warnings.filterwarnings('ignore')

img = imread(fn('inputs/cat.jpg'))

flipy = vflip(img)
flipx = hflip(img)

imsave(fn('outputs/flipy.jpg'), flipy)
imsave(fn('outputs/flipx.jpg'), flipx)
示例#13
0
    img[0:sz[0],0:sz[1]] = vis(pyr[1:],lev+1)

    # Just scale / shift gradient images for visualization
    img[sz[0]:,0:sz[1]] = pyr[0][0]*(2**(1-lev))+0.5
    img[0:sz[0],sz[1]:] = pyr[0][1]*(2**(1-lev))+0.5
    img[sz[0]:,sz[1]:] = pyr[0][2]*(2**(1-lev))+0.5

    return img



############# Main Program


img = np.float32(imread(fn('inputs/p6_inp.png')))/255.

# Visualize pyramids
pyr = im2wv(img,1)
imsave(fn('outputs/prob6a_1.png'),clip(vis(pyr)))

pyr = im2wv(img,2)
imsave(fn('outputs/prob6a_2.png'),clip(vis(pyr)))

pyr = im2wv(img,3)
imsave(fn('outputs/prob6a_3.png'),clip(vis(pyr)))

# Inverse transform to reconstruct image
im = clip(wv2im(pyr))
imsave(fn('outputs/prob6b.png'),im)
示例#14
0
                b = ((p1y * 10) % 10) / 10  #second weight
                a1 = a / (a + b)
                b1 = b / (a + b)
                i = int(p1x)  # x
                j = int(p1y)  # y

                # i = int(p1[0])  # x
                # j = int(p1[1])  # y
                intensity = b1 * (a1 * src[i, j] + b1 * src[i, j + 1]) + a1 * (
                    a1 * src[i + 1, j] + b1 * src[i + 1, j + 1])
                dest[x, y] = intensity

    return dest


########################## Support code below

from skimage.io import imread, imsave
from os.path import normpath as fn  # Fixes window/linux path conventions
import warnings
warnings.filterwarnings('ignore')

simg = np.float32(imread(fn('inputs/p4src.png'))) / 255.
dimg = np.float32(imread(fn('inputs/p4dest.png'))) / 255.
# dpts = np.float32([ [276,54],[406,79],[280,182],[408,196]]) # Hard coded top-left, bottom-left, top-right, bottom-right
dpts = np.float32([[54, 276], [79, 406], [182, 280], [196, 408]])

comb = splice(simg, dimg, dpts)

imsave(fn('outputs/prob4.png'), comb)
示例#15
0
               patch_size] = input_img[best_patch_h + edgei, best_patch_w +
                                       minIndex:best_patch_w + patch_size]
    for edgej in range(overlap, len(minCostPathVertical)):
        minIndex = minCostPathVertical[edgej]
        output[i + minIndex:i + patch_size,
               j + edgej] = input_img[best_patch_h + minIndex:best_patch_h +
                                      patch_size, best_patch_w + edgej]


from os.path import normpath as fn  # Fixes window/linux path conventions
import warnings
warnings.filterwarnings('ignore')

# img = np.float32(imread(fn('inputs/cotton.png')))

# patch_size = 16
# output = quilting(img, patch_size)

# imsave(fn('outputs/output_cotton.png'), output / 255)

source_img = np.float32(imread(fn('inputs/rice.png')))
target_img = np.float32(imread(fn('inputs/man_face.png')))
patch_size = 48
for i in range(0, 5):
    reduce_ratio = float(2) / float(3)
    reduced_patch_size = int(patch_size * (reduce_ratio**i))
    print(reduced_patch_size)
    output = transfer(source_img, target_img, False, reduced_patch_size, 0.2,
                      0.1)
    imsave(fn('outputs/output_manface_itr%i_alpha0.2.png' % (i)), output / 255)
示例#16
0
from skimage.io import imread, imsave

# Fill this out
# X is input 8-bit grayscale image
# Return equalized image with intensities from 0-255
def histeq(X):
    # Generating CDF
    unique,unique_counts = np.unique(X, return_counts=True)
    counts = np.zeros(np.iinfo(np.uint8).max+1)
    counts[unique] = unique_counts
    cdf = np.cumsum(counts/np.size(X))

    # Applying equalization
    return cdf[X]*255.0


########################## Support code below

from os.path import normpath as fn # Fixes window/linux path conventions
import warnings
warnings.filterwarnings('ignore')

img = imread(fn('inputs/p2_inp.jpg'))

out = histeq(img)

out = np.maximum(0,np.minimum(255,out))
out = np.uint8(out)

imsave(fn('outputs/prob2.jpg'),out)
示例#17
0
# normalize_im normalizes our output to be between 0 and 1
def normalize_im(im):
    im += np.abs(np.min(im))
    im /= np.max(im)
    return im


# create an output image of our cluster centers
def create_centers_im(im, centers):
    for center in centers:
        im[center[0] - 2:center[0] + 2,
           center[1] - 2:center[1] + 2] = [255., 0., 255.]
    return im


im = np.float32(imread(fn('inputs/lion.jpg')))

num_clusters = [25, 49, 64, 81, 100]
for num_clusters in num_clusters:
    cluster_centers = get_cluster_centers(im, num_clusters)
    imsave(fn('outputs/prob1a_' + str(num_clusters) + '_centers.jpg'),
           normalize_im(create_centers_im(im.copy(), cluster_centers)))
    out_im = slic(im, num_clusters, cluster_centers)

    Lr = np.random.permutation(num_clusters)
    out_im = Lr[np.int32(out_im)]
    dimg = cm.jet(
        np.minimum(1,
                   np.float32(out_im.flatten()) / float(num_clusters)))[:, 0:3]
    dimg = dimg.reshape([out_im.shape[0], out_im.shape[1], 3])
    imsave(fn('outputs/prob1b_' + str(num_clusters) + '.jpg'),
示例#18
0
import numpy as np
np.random.seed(0)

from os.path import normpath as fn
from time import time

import edf ## This will be your code

# Load data
data = np.load(fn('inputs/mnist_26k.npz'))

train_im = np.float32(data['im_train'])/255.-0.5
train_im = np.reshape(train_im,[-1,28,28,1])
train_lb = data['lbl_train']

val_im = np.float32(data['im_val'])/255.-0.5
val_im = np.reshape(val_im,[-1,28,28,1])
val_lb = data['lbl_val']


#######################################

# Inputs and parameters
inp = edf.Value()
lab = edf.Value()

K1 = edf.Param()
B1 = edf.Param()

K2 = edf.Param()
B2 = edf.Param()
示例#19
0
       0:hk_x], Ko[0:hk_y, size[1] - hk_x +
                   1:size[1]], Ko[size[0] - hk_y + 1:size[0],
                                  0:hk_x], Ko[size[0] - hk_y + 1:size[0],
                                              size[1] - hk_x +
                                              1:size[1]] = k_1, k_2, k_3, k_4

    return Ko


########################## Support code below

from os.path import normpath as fn  # Fixes window/linux path conventions
import warnings
warnings.filterwarnings('ignore')

img = np.float32(imread(fn('inputs/p5_inp.jpg'))) / 255.

# Create Gaussian Kernel
x = np.float32(range(-21, 22))
x, y = np.meshgrid(x, x)
G = np.exp(-(x * x + y * y) / 2 / 9.)
G = G / np.sum(G[:])

# Traditional convolve
v1 = conv2(img, G, 'same', 'wrap')

# Convolution in Fourier domain
G = kernpad(G, img.shape)
v2f = np.fft.fft2(G) * np.fft.fft2(img)
v2 = np.real(np.fft.ifft2(v2f))
示例#20
0
        Y += Xshift*B_k

    Y = Y/norm_sums
    return Y


########################## Support code below

def clip(im):
    return np.maximum(0.,np.minimum(1.,im))

from os.path import normpath as fn # Fixes window/linux path conventions
import warnings
warnings.filterwarnings('ignore')

img1 = np.float32(imread(fn('inputs/p4_nz1.png')))/255.
img2 = np.float32(imread(fn('inputs/p4_nz2.png')))/255.

K=9

print("Creating outputs/prob4_1_a.png")
im1A = bfilt(img1,K,2,0.5)
imsave(fn('outputs/prob4_1_a.png'),clip(im1A))


print("Creating outputs/prob4_1_b.png")
im1B = bfilt(img1,K,4,0.25)
imsave(fn('outputs/prob4_1_b.png'),clip(im1B))

print("Creating outputs/prob4_1_c.png")
im1C = bfilt(img1,K,16,0.125)
########################## Support code below

from os.path import normpath as fn  # Fixes window/linux path conventions
import warnings

warnings.filterwarnings('ignore')


# Utility functions to clip intensities b/w 0 and 1
# Otherwise imsave complains
def clip(im):
    return np.maximum(0., np.minimum(1., im))


############# Main Program
im1 = np.float32(imread(fn('inputs/CC/ex1.jpg'))) / 255.
im2 = np.float32(imread(fn('inputs/CC/ex2.jpg'))) / 255.
im3 = np.float32(imread(fn('inputs/CC/ex3.jpg'))) / 255.

im1a = balance2a(im1)
im2a = balance2a(im2)
im3a = balance2a(im3)

imsave(fn('outputs/prob2a_1.png'), clip(im1a))
imsave(fn('outputs/prob2a_2.png'), clip(im2a))
imsave(fn('outputs/prob2a_3.png'), clip(im3a))

im1b = balance2b(im1)
im2b = balance2b(im2)
im3b = balance2b(im3)
示例#22
0
    gx = conv2(gx,df,'same','symm')
    gy = conv2(im,sf,'same','symm')
    gy = conv2(gy,df.T,'same','symm')
    return np.sqrt(gx*gx+gy*gy)

# normalize_im normalizes our output to be between 0 and 1
def normalize_im(im):
    im += np.abs(np.min(im))
    im /= np.max(im)
    return im

# create an output image of our cluster centers
def create_centers_im(im,centers):
    for center in centers:
        im[center[0]-2:center[0]+2,center[1]-2:center[1]+2] = [255.,0.,255.]
    return im

im = np.float32(imread(fn('inputs/lion.jpg')))

num_clusters = [25,49,64,81,100]
for num_clusters in num_clusters:
    cluster_centers = get_cluster_centers(im,num_clusters)
    imsave(fn('outputs/prob1a_' + str(num_clusters)+'_centers.jpg'),normalize_im(create_centers_im(im.copy(),cluster_centers)))
    out_im = slic(im,num_clusters,cluster_centers)

    Lr = np.random.permutation(num_clusters)
    out_im = Lr[np.int32(out_im)]
    dimg = cm.jet(np.minimum(1,np.float32(out_im.flatten())/float(num_clusters)))[:,0:3]
    dimg = dimg.reshape([out_im.shape[0],out_im.shape[1],3])
    imsave(fn('outputs/prob1b_'+str(num_clusters)+'.jpg'),normalize_im(dimg))
示例#23
0
    return x


########################## Support code below

from os.path import normpath as fn  # Fixes window/linux path conventions
import warnings
warnings.filterwarnings('ignore')


# Utility functions to clip intensities b/w 0 and 1
# Otherwise imsave complains
def clip(im):
    return np.maximum(0., np.minimum(1., im))


############# Main Program

lmain = 0.88

img = np.float32(imread(fn('inputs/p1.png'))) / 255.

pyr = im2wv(img, 4)
for i in range(len(pyr) - 1):
    for j in range(2):
        pyr[i][j] = denoise_coeff(pyr[i][j], lmain / (2**i))
    pyr[i][2] = denoise_coeff(pyr[i][2], np.sqrt(2) * lmain / (2**i))

im = wv2im(pyr)
imsave(fn('outputs/prob1.png'), clip(im))
示例#24
0
 def thinning(self):
     self.status_label.configure(text="Status: Thinning Start")
     print("Thinning Start")
     bin_img = self.bin_img
     cell1 = self.cell1
     cell2 = self.cell2
     cell3 = self.cell3
     absthreshold = 5
     relthreshold = 0.5
     removed1 = [0]*len(cell1)
     removed2 = [0]*len(cell2)
     removed3 = [0]*len(cell3)
     flag = True
     thiniter = 0
     iso = [0]*len(cell2)
     
     while flag:
         flag = False
         thiniter += 1
         cell1parents = [0]*len(cell1)
         cell2parents = [0]*len(cell2)
         
         for i in range(len(cell2)):
             if removed2[i] == 0:
                 cell1parents[int(cell2[i][0]-1)] += 1
                 cell1parents[int(cell2[i][1]-1)] += 1
                 
         for i in range(len(cell3)):
             if removed3[i] == 0:
                 cell2parents[int(cell3[i][0]-1)] += 1
                 cell2parents[int(cell3[i][1]-1)] += 1
                 cell2parents[int(cell3[i][2]-1)] += 1
                 cell2parents[int(cell3[i][3]-1)] += 1
                 
         for i in range(len(cell2)):
             if removed2[i] == 0:
                 if not (cell2parents[i]==0 and (thiniter-iso[i]) > absthreshold and (1-iso[i]//thiniter) > relthreshold):
                     if cell1parents[int(cell2[i][0]-1)] == 1 and removed1[int(cell2[i][0]-1)] == 0:
                         removed2[i] = 1
                         removed1[int(cell2[i][0]-1)] = 1
                         flag = True
                     elif cell1parents[int(cell2[i][1]-1)] == 1 and removed1[int(cell2[i][1]-1)] == 0:
                         removed2[i] = 1
                         removed1[int(cell2[i][1]-1)] = 1
                         flag = True
                         
         for i in range(len(cell3)):
             if removed3[i] == 0:
                 cell3elem = cell3[i]
                 for j in range(4):
                     if cell2parents[int(cell3elem[j]-1)] == 1 and removed2[int(cell3elem[j]-1)] == 0:
                         removed3[i] = 1
                         removed2[int(cell3elem[j]-1)] = 1
                         flag = True
                         break
                     
         for i in range(len(cell2)):
             if removed2[i] == 0 and cell2parents[i] == 0 and iso[i] == 0:
                 iso[i] = thiniter+1
                 
     output = []
     
     for i in range(len(cell1)):
         if removed1[i] == 0:
             output.append(cell1[i])
     outImg = np.full((bin_img.shape),0)
     
     for l in output:
         outImg[l[0]][l[1]] = 255
     
     print("Number of Thinned Output Points: ",len(output))
     outImg=outImg.astype('uint8')
     imsave(fn('outputs/thinOut.png'),outImg)
     newImRGB = np.stack((outImg,)*3, axis=-1)
     newImg = Image.fromarray(newImRGB, 'RGB')
     print("Thinning Done")
     self.img = newImg
     self.imgt = ImageTk.PhotoImage(image=self.img)
     self.image_label.configure(image=self.imgt)
     self.final = len(output)
     self.status_label.configure(text="Status: Thinning Complete")
示例#25
0
from skimage.io import imread, imsave
import numpy as np
from os.path import normpath as fn  # Fixes window/linux path conventions
import matplotlib.cm as cm
import warnings
from scipy.ndimage import gaussian_filter
from numpy.linalg import matrix_rank
import cv2

warnings.filterwarnings('ignore')

im = np.float32(imread(fn('dot_input.jpg')))
normal = np.array([[0], [0], [1]])
print(normal.shape, "hello")

#Sampling translation along z axis
tz = (np.arange(0, 1, 5))
# print(tz[3])
tx = 3
ty = 4
T = np.zeros((3, 3, 5))
H = np.zeros((3, 3, 5))
rows, cols, ch = im.shape
# Translational matrix
for i in range(np.size(tz)):
    # print(i)

    #
    T[:, :,
      i] = [[1 + tx * normal[0], tx * normal[1], tx * normal[2]],
            [ty * normal[0], 1 + ty * normal[1], ty * normal[2]],
示例#26
0
    # xcoord2, ycoord2 = warp_coords[0, :], warp_coords[1, :]
    # # Get pixels within image boundary
    # indices = np.where((xcoord2 >= 0) & (xcoord2 < width) &
    #                    (ycoord2 >= 0) & (ycoord2 < height))
    # xpix2, ypix2 = xcoord2[indices], ycoord2[indices]
    # xpix, ypix = x_ori[indices], y_ori[indices]
    # # Map the pixel RGB data to new location in another array
    # canvas = np.zeros_like(image)
    # canvas[int(ypix2), int(xpix2)] = image[int(ypix), int(xpix)]

    return dest


########################## Support code below

from skimage.io import imread, imsave
from os.path import normpath as fn  # Fixes window/linux path conventions
import warnings

warnings.filterwarnings('ignore')

simg = np.float32(imread(fn('dot_input.jpg'))) / 255.
dimg = np.float32(imread(fn('test1.jpg'))) / 255.
dpts = np.float32([[276, 54], [406, 79], [280, 182], [408, 196]])  # Hard coded

comb = splice(simg)
# pts = np.array([[1,1,1,1],[2,1,4,1],[1,1,3,3],[1,2,1,5]])
# getH(pts)

imsave(fn('test.jpg'), comb)
示例#27
0
    #finally, use these indices to lookup the corresponding d-values
    d = drange[y, x, min_idx]
    #for each point, generate range of possible d values
    drange = np.tile(np.arange(dmax + 1),
                     np.size(x)).reshape((dmax + 1, ) + x.shape)
    #limit drange to values that are less than the x-values

    print(d)

    return d


########################## Support code below

from skimage.io import imread, imsave
from os.path import normpath as fn  # Fixes window/linux path conventions
import matplotlib.cm as cm
import warnings
warnings.filterwarnings('ignore')

left = imread(fn('inputs/left.jpg'))
right = imread(fn('inputs/right.jpg'))

d = smatch(left, right, 40)
census(left)

# Map to color and save
dimg = cm.jet(np.minimum(1, np.float32(d.flatten()) / 20.))[:, 0:3]
dimg = dimg.reshape([d.shape[0], d.shape[1], 3])
imsave(fn('outputs/prob5.png'), dimg)
示例#28
0
    E[digRangeIndex] = 0
    antiDigRangeIndex = np.where(
        np.logical_and(antiDigRange,
                       conv2(H, antiD, mode='same') <= 0))
    E[antiDigRangeIndex] = 0

    return E


########################## Support code below

from os.path import normpath as fn  # Fixes window/linux path conventions
import warnings
warnings.filterwarnings('ignore')

img = np.float32(imread(fn('inputs/p3_inp.jpg'))) / 255.

H, theta = grads(img)

imsave(fn('outputs/prob3_a.jpg'), H / np.max(H[:]))

## Part b

E0 = np.float32(H > T0)
E1 = np.float32(H > T1)
E2 = np.float32(H > T2)

imsave(fn('outputs/prob3_b_0.jpg'), E0)
imsave(fn('outputs/prob3_b_1.jpg'), E1)
imsave(fn('outputs/prob3_b_2.jpg'), E2)
示例#29
0
文件: prob3b.py 项目: worstkid92/CV
            cud[y, :, dp] = cv[y, :, dp] + np.amin(ls, axis=1)
    tmpcv = clr + crl + cud + cdu
    d = np.argmin(tmpcv, axis=2)

    #chat[:, x + 1, dp] = cv[:, x + 1, dp] + ls[np.arange(H),ttt]

    return d


########################## Support code below

from skimage.io import imread, imsave
from os.path import normpath as fn  # Fixes window/linux path conventions
import matplotlib.cm as cm
import warnings
warnings.filterwarnings('ignore')

left = np.float32(imread(fn('inputs/left.jpg'))) / 255.
right = np.float32(imread(fn('inputs/right.jpg'))) / 255.

left_g = np.mean(left, axis=2)
right_g = np.mean(right, axis=2)

cv = buildcv(left_g, right_g, 50)
d = SGM(cv, 0.5, 16)

# Map to color and save
dimg = cm.jet(np.minimum(1, np.float32(d.flatten()) / 50.))[:, 0:3]
dimg = dimg.reshape([d.shape[0], d.shape[1], 3])
imsave(fn('outputs/prob3b.jpg'), dimg)
示例#30
0
    sz1 = [sz[0] * 2, sz[1] * 2]
    img = np.zeros(sz1, dtype=np.float32)

    img[0:sz[0], 0:sz[1]] = vis(pyr[1:], lev + 1)

    # Just scale / shift gradient images for visualization
    img[sz[0]:, 0:sz[1]] = pyr[0][0] * (2**(1 - lev)) + 0.5
    img[0:sz[0], sz[1]:] = pyr[0][1] * (2**(1 - lev)) + 0.5
    img[sz[0]:, sz[1]:] = pyr[0][2] * (2**(1 - lev)) + 0.5

    return img


############# Main Program

img = np.float32(imread(fn('inputs/p6_inp.png'))) / 255.

# # Visualize pyramids
# pyr = im2wv(img,1)
# imsave(fn('outputs/prob6a_1.png'),clip(vis(pyr)))
#
# pyr = im2wv(img,2)
# imsave(fn('outputs/prob6a_2.png'),clip(vis(pyr)))

pyr = im2wv(img, 3)
# imsave(fn('outputs/prob6a_3.png'),clip(vis(pyr)))

pyr1 = pyr
pyr2 = pyr
pyr3 = pyr