def scaleLin(im, k): '''Takes an image and a scale factor. Returns an image scaled using bilinear interpolation. ''' out = io.constantIm(im.shape[0] * k, im.shape[1] * k, 0) for y, x in imIter(out): out[y, x] = interpolateLin(im, float(y)/k, float(x)/k, True) return out
def scaleNN(im, k): '''Takes an image and a scale factor. Returns an image scaled using nearest neighbor interpolation. ''' out = io.constantIm(im.shape[0]*k, im.shape[1]*k, 0.0) for y, x in imIter(out): out[y,x]=im[clipY(im,round(y/k)), clipX(im,round(x/k))] return out
def stitchH(im1, im2, H1): '''Stitch im1 and im2 into a panorama. The resulting panorama should be in the coordinate system of im2, though possibly extended to a larger image. That is, im2 should never appear distorted in the resulting panorama, only possibly translated. Returns the stitched output (which may be larger than either input image).''' H = np.linalg.inv(H1) bbox1 = [[0,0], [im2.shape[0]-1, im2.shape[1]-1]] bbox2 = computeTransformedBBox(im1.shape, H) bbox = bboxUnion( bbox1, bbox2 ) trans = translate( bbox ) height = bbox[1][0] - bbox[0][0] + 1 width = bbox[1][1] - bbox[0][1] + 1 out = io.constantIm( height, width, 0.0) ty = trans[0,2] tx = trans[1,2] out[ -ty:-ty+im2.shape[0], -tx:-tx+im2.shape[1] ] = im2 Htrans = np.dot(H, trans) applyHomographyFast( im1, out, Htrans, True ) return out
def painterly(im, texture, N=10000, size=50, noise=01.3, debug=False, imname=''): '''First paints at a coarse scale using all 1's for importance sampling, then paints again at size/4 scale using the sharpness map for importance sampling.''' out = io.constantIm(im.shape[0], im.shape[1]) outCopy = None if debug: outCopy = out.copy() # first pass importance_first_pass = np.ones_like(im) singleScalePaint(im, out, importance_first_pass, texture, size, N, noise) if debug: io.imwrite(out, str(imname + "PainterlyFirstPassOnly.png")) # second pass importance_second_pass = helper.sharpnessMap(im) singleScalePaint(im, out, importance_second_pass, texture, size / 4, N, noise) if debug: singleScalePaint(im, outCopy, importance_second_pass, texture, size / 4, N, noise) io.imwrite(outCopy, str(imname + "PainterlySecondPassOnly.png")) return out
def warp(im, segmentsBefore, segmentsAfter, a=10, b=1, p=1): '''Takes an image, a list of before segments, a list of after segments, and the parameters a,b,p (see Beier) ''' height = im.shape[0] width = im.shape[1] out = io.constantIm(height, width, 0.0) for y,x in imIter(out): X = np.array([y,x], dtype=np.float64) DSUM = np.array([0,0], dtype=np.float64) weightsum = 0 for i in range(len(segmentsAfter)): (u,v) = segmentsAfter[i].uv(X) X_prime = segmentsBefore[i].uvtox(u,v) D = np.subtract(X_prime, X) dist = segmentsAfter[i].dist(X) w = weight(segmentsAfter[i], X) DSUM = np.add( DSUM, D*w ) weightsum += w X_final = np.add( X, DSUM/float(weightsum) ) pixel = interpolateLin(im, X_final[0], X_final[1]) out[y,x] = pixel return out
def convolve(im, kernel): # Return an image filtered by kernel shiftY, shiftX = (int(kernel.shape[0] / 2), int(kernel.shape[1] / 2)) im_out = io.constantIm(im.shape[0], im.shape[1], 0) for y, x in imIter(im_out): for yp, xp in imIter(kernel): im_out[y, x] += getEdgePadded(im, y + yp - shiftY, x + xp - shiftX) * kernel[yp, xp] return im_out
def scaleLin(im, k): '''Takes an image and a scale factor. Returns an image scaled using bilinear interpolation. ''' (height, width, depth) = np.shape(im) img = io.constantIm(im.shape[0]*k, im.shape[1]*k, 0.0) for y, x in imIter(img): img[y][x] = interpolateLin(im, y/k, x/k, True) return img
def warpBy1(im, src_segment, dest_segment): h,w = im.shape[0:2] out = imageIO.constantIm(h, w, [0, 0, 0]) for y, x in imIter(out): dest_point = np.array([y,x]) y_p, x_p = transform(dest_point, dest_segment, src_segment) out[y,x] = edgePaddingAccessor(y_p, x_p, im) return interpolateLin(out, .5, .5, accessor=edgePaddingAccessor)
def boxBlur(im, k): # Return a blured image filtered by box filter shift = int(k / 2) factor = 1.0 / (k ** 2) im_out = io.constantIm(im.shape[0], im.shape[1], 0) for y, x in imIter(im_out): for yp, xp in [(a, b) for a in xrange(k) for b in xrange(k)]: im_out[y, x] += getEdgePadded(im, y + yp - shift, x + xp - shift) * factor return im_out
def interpolateLin(im, k_y, k_x, accessor=blackAccessor): #k_y, k_x better than y, x, which is confusing. h, w = im.shape[0:2] out = imageIO.constantIm(h, w, 0.0) for y,x in imIter(im): y_avr= (accessor(y-1,x,im) + accessor(y+1,x,im))/2 x_avr = (accessor(y,x-1,im) + accessor(y, x+1,im))/2 pixel_vals = (y_avr*k_y + x_avr*k_x) out[y,x] = pixel_vals return out
def painterly(im, texture, N=10000, size=50, noise=0.3): '''First paints at a coarse scale using all 1's for importance sampling, then paints again at size/4 scale using the sharpness map for importance sampling.''' importanceLow = np.ones_like(im) outLow = io.constantIm( im.shape[0], im.shape[1], 0.0 ) singleScalePaint(im, outLow, importanceLow, texture, size, N, noise) importanceHigh = helper.sharpnessMap(im) singleScalePaint(im, outLow, importanceHigh, texture, size/4, N, noise) return outLow
def scaleNN(im, k): '''Takes an image and a scale factor. Returns an image scaled using nearest neighbor interpolation. ''' height = im.shape[0] width = im.shape[1] out = io.constantIm(height*k, width*k, 0.0) for y,x in imIter(out): out[y,x] = im[y/k, x/k] return out
def scaleNN(im, k): '''Takes an image and a scale factor. Returns an image scaled using nearest neighbor interpolation. ''' out = io.constantIm(im.shape[0] * k, im.shape[1] * k, 0) for y, x in imIter(out): origY = clipY(im, int(round(y/k))) origX = clipX(im, int(round(x/k))) out[y, x] = pix(im, origY, origX) return out
def warpBy1(im, segmentBefore, segmentAfter): '''Takes an image, one before segment, and one after segment. Returns an image that has been warped according to the two segments. ''' out = io.constantIm(im.shape[0], im.shape[1], 0) for y, x in imIter(out): u, v = segmentAfter.uv(np.array([y, x])) Xprime = segmentBefore.uvtox(u, v) out[y, x] = interpolateLin(im, Xprime[0], Xprime[1], True) return out
def merge2images(im1, im2, H): B=computeTransformedBBox(im1, linalg.inv(H)) B=bboxUnion(B, array([[0.0, 0.0], 1.0*array(im2.shape[0:2])])) print 'bbox after union: ', B T=translate(B[0, 0], B[0, 1]) out = imageIO.constantIm(B[1, 0]-B[0, 0], B[1, 1]-B[0, 1], 0) applyHomography(im2, out, T) applyHomography(im1, out, dot(H, T))
def bilaYUV(im, sigmaRange, sigmaY, sigmaUV): # 6.865 only: filter YUV differently imYUV = rgb2yuv(im) bilateralY = bilateral(imYUV, sigmaRange, sigmaY) bilateralUV = bilateral(imYUV, sigmaRange, sigmaUV) im_out = io.constantIm(im.shape[0], im.shape[1], 0) for y, x in imIter(im_out): im_out[y, x] = np.array([bilateralY[y, x, 0], bilateralUV[y, x, 1], bilateralUV[y, x, 2]]) return yuv2rgb(im_out)
def orientedPaint(im, texture, N=7000, size=50, noise=0.3): '''same as painterly but computes and uses the local orientation information to orient strokes.''' importanceLow = np.ones_like(im) outLow = io.constantIm( im.shape[0], im.shape[1], 0.0 ) thetas = computeAngles(im) singleScaleOrientedPaint(im, outLow, thetas, importanceLow, texture, size, N, noise) importanceHigh = helper.sharpnessMap(im) singleScaleOrientedPaint(im, outLow, thetas, importanceHigh, texture, size/4, N, noise) return outLow
def scaleNN(im, k): h, w = im.shape[0:2] out = imageIO.constantIm(h*k, w*k, 0.0) for y,x in imIter(out): try: y_orig, x_orig = int(y/k), int(x/k) out[y,x] = im[y_orig, x_orig] except: #only for testing purposes print y_orig, x_orig return out
def basicDemosaic(raw, offsetGreen=0, offsetRedY=1, offsetRedX=1, offsetBlueY=0, offsetBlueX=0): '''takes a raw image and a bunch of offsets. Returns an rgb image computed with our basic techniche.''' height = raw.shape[0] width = raw.shape[1] out = io.constantIm(height, width, [0,0,0]) out[:,:,0] = basicRorB(raw, offsetRedY, offsetRedX) out[:,:,1] = basicGreen(raw) #use default offsetGreen out[:,:,2] = basicRorB(raw, offsetBlueY, offsetBlueX) return out
def edgeBasedGreenDemosaic(raw, offsetGreen=0, offsetRedY=1, offsetRedX=1, offsetBlueY=0, offsetBlueX=0): '''same as basicDemosaic except it uses the edge based technique to produce the green channel.''' height = raw.shape[0] width = raw.shape[1] out = io.constantIm(height, width, [0,0,0]) out[:,:,0] = basicRorB(raw, offsetRedY, offsetRedX) out[:,:,1] = edgeBasedGreen(raw) #use default offsetGreen out[:,:,2] = basicRorB(raw, offsetBlueY, offsetBlueX) return out
def stitch(im1, im2, listOfPairs): H = computeHomography(listOfPairs) # pairs from 1->2 bbox = computeTransformedBBox(im1, H) t_m = translate(bbox) t_m_inv = np.linalg.inv(t_m) h, w = bbox[1][0] - bbox[0][0], bbox[1][1] - bbox[0][1] out = imageIO.constantIm(h, w, [0, 0, 0.0]) applyHomography(im1, out, t_m, bilinear=True) comb = t_m+H # applyhomography(im2, out, comb, bilinear=True) return out
def improvedDemosaic(raw, offsetGreen=0, offsetRedY=1, offsetRedX=1, offsetBlueY=0, offsetBlueX=0): '''Same as basicDemosaic but uses edgeBasedGreen and greenBasedRorB.''' height = raw.shape[0] width = raw.shape[1] out = io.constantIm(height, width, [0,0,0]) green = edgeBasedGreen(raw) #use default offsetGreen out[:,:,0] = greenBasedRorB(raw, green, offsetRedY, offsetRedX) out[:,:,1] = green out[:,:,2] = greenBasedRorB(raw, green, offsetBlueY, offsetBlueX) return out
def epiSlice(LF, y): '''Takes a light field. Returns the epipolar slice with constant v=(nv/2) and constant y (input argument).''' nv = LF.shape[0] nu = LF.shape[1] ny = LF.shape[2] nx = LF.shape[3] v=(nv/2) out = io.constantIm( nu, nx, 0.0) for x in range(nx): for u in range(nu): out[u, x] = LF[v,u,y,x] return out
def rotate(im, theta): '''takes an image and an angle in radians as input returns an image of the same size and rotated by theta ''' out = io.constantIm(im.shape[0], im.shape[1], 0) halfX = int(im.shape[1] / 2) halfY = int(im.shape[0] / 2) inverseRotateM = np.array([[math.cos(theta), -math.sin(theta)], [math.sin(theta), math.cos(theta)]]) for y, x in imIter(out): origImgPos = np.dot(inverseRotateM, np.array([x - halfX, y - halfY])) + np.array([halfX, halfY]) out[y, x] = interpolateLin(im, origImgPos[1], origImgPos[0]) return out
def split(raw): '''splits one of Sergei's images into a 3-channel image with height that is floor(height_of_raw/3.0). Returns the 3-channel image.''' width = raw.shape[1] height = raw.shape[0] crop = math.floor(height/3.0) out = io.constantIm(crop, width, [0,0,0]) out[:,:,0] = raw[2*crop:3*crop,:] out[:,:,1] = raw[crop:2*crop,:] out[:,:,2] = raw[:crop,:] return out
def apertureView(LF): '''Takes a light field, returns 'out,' an image with nx*ny sub-pictures representing the value of each pixel in each of the nu*nv views.''' nv = LF.shape[0] nu = LF.shape[1] ny = LF.shape[2] nx = LF.shape[3] out = io.constantIm( nv*ny, nu*nx, 0.0) for y in range(ny): for x in range(nx): for v in range(nv): for u in range(nu): out[nv*y+v,nu*x+u] = LF[v,u,y,x] return out
def warpBy1(im, segmentBefore, segmentAfter): '''Takes an image, one before segment, and one after segment. Returns an image that has been warped according to the two segments. ''' height = im.shape[0] width = im.shape[1] out = io.constantIm(height, width, 0.0) for y,x in imIter(out): X = np.array([y,x], dtype=np.float64) (u,v) = segmentAfter.uv(X) X_prime = segmentBefore.uvtox(u,v) pixel = interpolateLin(im, X_prime[0], X_prime[1]) out[y,x] = pixel return out
def merge2images(im1, im2, H): B = computeTransformedBBox(im1, np.linalg.inv(H)) B = bboxUnion(B, np.array([[0.0, 0.0], 1.0 * np.array(im2.shape[0:2])])) T = translate(B[0, 0], B[0, 1]) out = imageIO.constantIm(B[1, 0] - B[0, 0], B[1, 1] - B[0, 1], 0) ws1 = calcLinBlendWeights(im1) ws2 = calcLinBlendWeights(im2) H2 = np.dot(H, T) applyDoubleHomography(im1, im2, ws1, ws2, out, T, H2, bilinear=False) # applyHomography(im1, out, T) # applyHomography(im2, out, np.dot(H, T)) return out
def compositeNImages(listOfImages, listOfH, listOfWeights=None, twoScale=False): '''Computes the composite image. listOfH is of the form returned by computeNHomographies. Hint: You will need to deal with bounding boxes and translations again in this function.''' bbox = getOuterBBox( listOfImages, listOfH ) trans = translate(bbox) height = bbox[1][0] - bbox[0][0] + 1 width = bbox[1][1] - bbox[0][1] + 1 out = io.constantIm( height, width, 0.0) if (listOfWeights is not None): weightSum = io.constantIm( height, width, 0.0) if (twoScale): sigmaG = 2.0 L_low = map(lowFreqIm, listOfImages, [sigmaG] * len(listOfImages)) L_high = map(highFreqIm, listOfImages, [sigmaG] * len(listOfImages)) weightSumL = io.constantIm( height, width, 0.0) weightSumH = io.constantIm( height, width, 0.0) outL = io.constantIm( height, width, 0.0) outH = io.constantIm( height, width, 0.0) for i in range(len(listOfH)): H = np.dot( listOfH[i], trans) if (listOfWeights is not None): w = listOfWeights[i] imW = listOfImages[i].copy() imW[:,:,0] = w imW[:,:,1] = w imW[:,:,2] = w if( twoScale ): imL = L_low[i] applyHomographyFastBlended(imL, outL, H, True, imW, weightSumL) imH = L_high[i] applyHomographyFastBlended(imH, outH, H, True, imW, weightSumH, True) else: im = listOfImages[i] applyHomographyFastBlended(im, out, H, True, imW, weightSum) else: applyHomographyFast(listOfImages[i], out, H, True) if (listOfWeights is not None): if( twoScale ): #io.imwrite(weightSumL, 'debugWeightsLow.png', 1.0) #io.imwrite(weightSumH, 'debugWeightsHigh.png', 1.0) out = outL + outH return out
def scaleBiquadratic(im, k): '''Takes an image and a scale factor. Returns an image scaled using bilinear interpolation. ''' height = im.shape[0] width = im.shape[1] newHeight = height*k newWidth = width*k out = io.constantIm(newHeight, newWidth, 0.0) for y,x in imIter(out): x_float = x/float(newWidth) * (width) y_float = y/float(newHeight) * (height) pix = interpolateBiquad(im, y_float, x_float) out[y,x] = pix return out
def orientedPaint(im, texture, N=7000, size=50, noise=0.3): '''same as painterly but computes and uses the local orientation information to orient strokes.''' out = io.constantIm(im.shape[0], im.shape[1]) thetas = computeAngles(im) # first pass importance_first_pass = np.ones_like(im) singleScaleOrientedPaint(im, out, thetas, importance_first_pass, texture, size, N, noise) # second pass importance_second_pass = helper.sharpnessMap(im) singleScaleOrientedPaint(im, out, thetas, importance_second_pass, texture, size / 4, N, noise) return out
def rotate(im, theta): '''takes an image and an angle in radians as input returns an image of the same size and rotated by theta ''' h = im.shape[0] w = im.shape[1] center = (int(h/2), int(w/2)) out = io.constantIm(h, w, 0.0) for y,x in imIter(out): tx = ((x-center[1])*math.cos(theta) - (y-center[0])*math.sin(theta) + center[1]) ty = ((x-center[1])*math.sin(theta) + (y-center[0])*math.cos(theta) + center[0]) pix = interpolateLin(im, ty, tx, True) out[y,x] = pix return out