def test_toneMap(): hdr=np.load('hdr.npy') out=a5.toneMap(hdr, 100, 1.0, useBila=False) io.imwrite(out, 'tone_map_gauss.png') out=a5.toneMap(hdr, 100, 2.0, useBila=True) io.imwrite(out, 'tone_map_bila.png')
def testSergei(): sergeis, sergeiNames = getRawPNGsInDir("data/Sergei/") scount = 0 for f in sergeis: io.imwrite(a4.split(f), str('splitSergei'+'%03d'%(scount))+'.png') io.imwrite(a4.sergeiRGB(f), str('Sergei'+'%03d'%(scount))+'.png') scount = scount +1
def paintAndOutline(im, texture, N=7000, size=50, noise=0.3, debug=False): ''' On top of painted images, I implement a Sobel filter pipeline to outline edges black. This is similar to the game series 'Borderlands' and its art style. Output image is a grayscale painted-style image. Color processing for edge detection requires more advanced techniques (like the one for Borderlands). See the company's SIGGRAPH talk about Borderlands's development: https://www.cs.williams.edu/~morgan/SRG10/borderlands.pptx.zip. ''' edges = gradientMagnitude(helper.BW(im)) black_edges = 1 - edges if debug: # should print a picture where the edges are black io.imwriteGrey(black_edges, str("paintAndOutlineSobelEdges.png")) # mix with the original color image, but painted! out = orientedPaint(im, texture, N, size, noise) for y in xrange(im.shape[0]): for x in xrange(im.shape[1]): if black_edges[y, x] < 0.05: out[y, x] = 0.0 io.imwrite(out, str("paintAndOutlineEdges.png")) return out
def writeFrames(video, path): nFrame=video.shape[0] for i in xrange(nFrame): pathi=path+str('%03d'%i)+'.png' #if i%10==0: print i io.imwrite(video[i], pathi) print 'wrote'+path+'\n'
def painterly(im, texture, N=10000, size=50, noise=01.3, debug=False, imname=''): '''First paints at a coarse scale using all 1's for importance sampling, then paints again at size/4 scale using the sharpness map for importance sampling.''' out = io.constantIm(im.shape[0], im.shape[1]) outCopy = None if debug: outCopy = out.copy() # first pass importance_first_pass = np.ones_like(im) singleScalePaint(im, out, importance_first_pass, texture, size, N, noise) if debug: io.imwrite(out, str(imname + "PainterlyFirstPassOnly.png")) # second pass importance_second_pass = helper.sharpnessMap(im) singleScalePaint(im, out, importance_second_pass, texture, size / 4, N, noise) if debug: singleScalePaint(im, outCopy, importance_second_pass, texture, size / 4, N, noise) io.imwrite(outCopy, str(imname + "PainterlySecondPassOnly.png")) return out
def testStitchNVancouver(): #im1=io.imread('vancouverPan/vancouver4.png') #im2=io.imread('vancouverPan/vancouver3.png') im1=io.imread('vancouverPan/vancouver2.png') im2=io.imread('vancouverPan/vancouver1.png') im3=io.imread('vancouverPan/vancouver0.png') imList = [im1, im2, im3]#, im4, im5] pointList1a=[np.array([99, 326, 1], dtype=np.float64), np.array([271, 247, 1], dtype=np.float64), np.array([180, 178, 1], dtype=np.float64), np.array([179, 276, 1], dtype=np.float64)] pointList2a=[np.array([124, 169, 1], dtype=np.float64), np.array([284, 98, 1], dtype=np.float64), np.array([189, 25, 1], dtype=np.float64), np.array([194, 125, 1], dtype=np.float64)] listOfPairs2=zip(pointList1a, pointList2a) pointList1b=[np.array([176, 300, 1], dtype=np.float64), np.array([318, 204, 1], dtype=np.float64), np.array([258, 203, 1], dtype=np.float64), np.array([181, 138, 1], dtype=np.float64)] pointList2b=[np.array([179, 180, 1], dtype=np.float64), np.array([317, 86, 1], dtype=np.float64), np.array([256, 87, 1], dtype=np.float64), np.array([173, 15, 1], dtype=np.float64)] listOfPairs1=zip(pointList1b, pointList2b) #pointList1_3=[np.array([165, 186, 1], dtype=np.float64), np.array([173, 146, 1], dtype=np.float64), np.array([188, 80, 1], dtype=np.float64), np.array([164, 40, 1], dtype=np.float64)] #pointList2_3=[np.array([153, 298, 1], dtype=np.float64), np.array([162, 253, 1], dtype=np.float64), np.array([178, 188, 1], dtype=np.float64), np.array([156, 151, 1], dtype=np.float64)] #listOfPairs3=zip(pointList1_3, pointList2_3) #pointList1_4=[np.array([156, 151, 1], dtype=np.float64), np.array([220, 34, 1], dtype=np.float64), np.array([184, 160, 1], dtype=np.float64), np.array([186, 89, 1], dtype=np.float64)] #pointList2_4=[np.array([151, 304, 1], dtype=np.float64), np.array([226, 189, 1], dtype=np.float64), np.array([180, 316, 1], dtype=np.float64), np.array([190, 239, 1], dtype=np.float64)] #listOfPairs4=zip(pointList1_4, pointList2_4) listOfListOfPairs = [listOfPairs1, listOfPairs2]# listOfPairs4] #listOfListOfPairs = [listOfPairs]#, listOfPairs2]#, listOfPairs2]#, listOfPairs3, listOfPairs4] out = a6.stitchN(imList, listOfListOfPairs, 0) io.imwrite(out, "vancouver_stitchN.png")
def testSingleScale(im, texture, outputName, N=10000, size=50, noise=0.3): out = np.zeros_like(im) importance = np.zeros_like(im) importance[150] = 1 ## npr.singleScalePaint(im, out, np.ones_like(im), texture, size, N, noise) npr.singleScalePaint(im, out, importance, texture, size, N, noise) io.imwrite(out, str(outputName+"SingleScale"+".png"))
def makeConventionManyPano(): conv1 = io.imread("convention/convention-1.png") conv2 = io.imread("convention/convention-2.png") conv3 = io.imread("convention/convention-3.png") pointList1 = [ np.array([298, 206, 1], dtype=np.float64), np.array([267, 320, 1], dtype=np.float64), np.array([170, 325, 1], dtype=np.float64), np.array([172, 188, 1], dtype=np.float64), ] pointList2 = [ np.array([309, 70, 1], dtype=np.float64), np.array([270, 175, 1], dtype=np.float64), np.array([182, 176, 1], dtype=np.float64), np.array([179, 42, 1], dtype=np.float64), ] listOfPairs1 = zip(pointList1, pointList2) pointList3 = [ np.array([288, 173, 1], dtype=np.float64), np.array([267, 306, 1], dtype=np.float64), np.array([219, 306, 1], dtype=np.float64), np.array([217, 210, 1], dtype=np.float64), ] pointList4 = [ np.array([298, 15, 1], dtype=np.float64), np.array([269, 151, 1], dtype=np.float64), np.array([225, 148, 1], dtype=np.float64), np.array([221, 55, 1], dtype=np.float64), ] listOfPairs2 = zip(pointList3, pointList4) listOfImages = [conv1, conv2, conv3] listOfListOfPairs = [listOfPairs1, listOfPairs2] refIndex = 1 out = a6.stitchN(listOfImages, listOfListOfPairs, refIndex) io.imwrite(out, "MyPanoMany.png")
def testStitchScience(): im1=io.imread('science/science-1.png') im2=io.imread('science/science-2.png') pointList1=[np.array([307, 15, 1], dtype=np.float64), np.array([309, 106, 1], dtype=np.float64), np.array([191, 102, 1], dtype=np.float64), np.array([189, 47, 1], dtype=np.float64)] pointList2=[np.array([299, 214, 1], dtype=np.float64), np.array([299, 304, 1], dtype=np.float64), np.array([182, 292, 1], dtype=np.float64), np.array([183, 236, 1], dtype=np.float64)] listOfPairs=zip(pointList1, pointList2) out = a6.stitch(im1, im2, listOfPairs) io.imwrite(out, "science_stitch.png")
def testStitchMIT(): im1=io.imread('mit1.png') im2=io.imread('mit0.png') pointList1=[np.array([196, 245, 1], dtype=np.float64), np.array([250, 320, 1], dtype=np.float64), np.array([138, 306, 1], dtype=np.float64), np.array([113, 260, 1], dtype=np.float64)] pointList2=[np.array([200, 48, 1], dtype=np.float64), np.array([255, 115, 1], dtype=np.float64), np.array([150, 109, 1], dtype=np.float64), np.array([119, 65, 1], dtype=np.float64)] listOfPairs=zip(pointList1, pointList2) out = a6.stitch(im1, im2, listOfPairs) io.imwrite(out, "MyPano.png")
def testCompositeStata(): im1=io.imread('stata/stata-1.png') im2=io.imread('stata/stata-2.png') pointList1=[np.array([209, 218, 1]), np.array([425, 300, 1]), np.array([209, 337, 1]), np.array([396, 336, 1])] pointList2=[np.array([232, 4, 1]), np.array([465, 62, 1]), np.array([247, 125, 1]), np.array([433, 102, 1])] listOfPairs=zip(pointList1, pointList2) out = a6.stitchN([im1, im2], [listOfPairs], 0) io.imwrite(out, "stata_stitchN.png")
def testStitchFun(): im1=io.imread('fun/room1.png') im2=io.imread('fun/room2.png') pointList1=[np.array([327, 258, 1], dtype=np.float64), np.array([75, 437, 1], dtype=np.float64), np.array([224, 364, 1], dtype=np.float64), np.array([423, 449, 1], dtype=np.float64)] pointList2=[np.array([294, 50, 1], dtype=np.float64), np.array([50, 227, 1], dtype=np.float64), np.array([190, 161, 1], dtype=np.float64), np.array([366, 240, 1], dtype=np.float64)] listOfPairs=zip(pointList1, pointList2) out = a6.stitch(im1, im2, listOfPairs) io.imwrite(out, "MyPano.png")
def test_convolve_gauss(): im=io.imread('pru.png') gauss3=np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) kernel=gauss3.astype(float) kernel=kernel/sum(sum(kernel)) out=a3.convolve(im, kernel) io.imwrite(out, 'my_gaussblur.png')
def testStitchStata(): im1 = io.imread("stata/stata-1.png") im2 = io.imread("stata/stata-2.png") pointList1 = [np.array([209, 218, 1]), np.array([425, 300, 1]), np.array([209, 337, 1]), np.array([396, 336, 1])] pointList2 = [np.array([232, 4, 1]), np.array([465, 62, 1]), np.array([247, 125, 1]), np.array([433, 102, 1])] listOfPairs = zip(pointList1, pointList2) out = a6.stitch(im1, im2, listOfPairs) io.imwrite(out, "stata_stitch.png")
def testAngle(im): thetas = npr.computeAngles(im) out = np.zeros_like(thetas) for y in xrange(thetas.shape[0]): for x in xrange(thetas.shape[1]): theta = thetas[y,x,0] if theta<0: theta+=2*math.pi out[y,x]=theta/2.0/math.pi io.imwrite(out, 'testangle.png')
def testCatalina(im, N=50000, size=50, noise=0.2): ''' Renders a painted version of Catalina. Refer to the macOS Catalina default dynamic wallpaper for this. ''' print 'TEST 4: oriented brushing on macOS Catalina wallpaper' io.imwrite(npr.orientedPaint(im, brush1, N, size, noise), str("CatalinaOrientedPaint" + ".png"))
def testAngle(im): thetas = npr.computeAngles(im) out = np.zeros_like(thetas) for y in xrange(thetas.shape[0]): for x in xrange(thetas.shape[1]): theta = thetas[y, x, 0] if theta < 0: theta += 2 * math.pi out[y, x] = theta / 2.0 / math.pi io.imwrite(out, 'testangle.png')
def test_3_toneMap(self): im1 = numpy.load('vine-hdr.npy') # im2 = numpy.load('design-hdr.npy') # im3 = numpy.load('ante2-hdr.npy') t1 = toneMap(im1, 100, 1, False) # t2 = toneMap(im2) # t3 = toneMap(im3) imageIO.imwrite(t1, 'vine-toned-g.png')
def main(): # This program defines a single-stage imaging pipeline that # brightens an image. # First we'll load the input image we wish to brighten. # We'll use imageIO to get a numpy array from a PNG image im=imageIO.imread('rgb.png') # We then create a Halide representation of this image using the Image # constructor input = Image(Float(32), im) # the first input to the Image constructor is a type 32-bit float here) # the second can be a filename, a numpy array or nothing # when it's a filename, the file gets loaded # Next we declare our Func object that represents our one pipeline # stage. sel=Func() # Our Func will have three arguments, representing the position # in the image and the color channel. Halide treats color # channels as an extra dimension of the image, just like in numpy # let's declare the corresponding Vars before we can use them x, y, c = Var(), Var(), Var() # Finally define the function. sel[x, y, c] = select(input[x,y,1]<0.5, 0.0, 1.0) # The equivalent one-liner to all of the above is: # # brighter[x, y, c] = input[x, y, c] * 1.5 # # Remember. All we've done so far is build a representation of a # Halide program in memory. We haven't actually processed any # pixels yet. We haven't even compiled that Halide program yet. # So now we'll realize the Func. The size of the output image # should match the size of the input image. If we just wanted to # brighten a portion of the input image we could request a # smaller size. If we request a larger size Halide will throw an # error at runtime telling us we're trying to read out of bounds # on the input image. output = sel.realize(input.width(), input.height(), input.channels()); # realize provides us with some Halide internal datatype representing image buffers. # We want to convert it to a numpy array. For this, we first turn it into a # proper Halide Image using the Halide constructor Image(), and we then convert # it to a numpy array. It's a little verbose but not a big deal. outputNP=numpy.array(Image(output)) imageIO.imwrite(outputNP, 'sel.png') print "Success!\n" return 0;
def spanish(im): L = rgb2yuv(im) u = L[:, :, 1] L[:, :, 1] = L[:, :, 2] L[:, :, 2] = u dotInMiddle(L) imageIO.imwrite(L, "L.png") C = BW(im) dotInMiddle(C) imageIO.imwrite(C, "C.png")
def main(): # This program defines a multi-stage Halide imaging pipeline # One stage computes the horixontal gradient of an image dI/dx # Another stage computes dI/dy (for all three channels of RGB in both cases) # The final stage computes the magnitude of the corresponding vector # We will compute the gradient with finite differences: dI/dx=I(x+1)-I(x) # As usual, let's load an input im=imageIO.imread('rgb.png') # and create a Halide representation of this image input = Image(Float(32), im) # Next we declaure the Vars # We here give an extra argument to the Var constructor, an optional string that # can help for debugging by naming the variable in the Halide representation. # Otherwise, the names x, y, c are only known to the Python side x, y, c = Var('x'), Var('y'), Var('c') # Next we declare the three Funcs corresponding to the various stages of the gradient . # Similarly, we pass strings to name them. gx = Func('gx') gy = Func('gy') gradientMagnitude=Func('gradientMagnitude') # Define our horizontal gradient Func using finite difference # The value at a pixel is the input at that pixel minus its left neighbor. # Note how we now use the more direct definition of Funcs without declaring # intermediate Exprs gx[x,y,c]=input[x+1,y,c]-input[x, y,c] # Similarly define the vertical gradient. gy[x,y,c]=input[x,y+1,c]-input[x,y,c] # Finally define the gradient magnitude as the Euclidean norm of the gradient vector # We use overloaded operators and functions such as **, + and sqrt # Through he magic of metaprogramming, this creates the appropriate algebraic tree # in Halide representation # Most operators and functions you expect are supported. # Check the documentation for the full list. gradientMagnitude[x,y,c]= sqrt(gx[x,y,c]**2+gy[x,y,c]**2) # As usual, all we have done so far is create a Halide internal representation. # No computation has happened yet. # We now call realize() to compile and execute. # You'll note that we subtracted 1 from the width and height to make sure that the # x+1 and y+1 neighbors always exist. We'll see a more general solution in the next tutorial output = gradientMagnitude.realize(input.width()-1, input.height()-1, input.channels()); outputNP=numpy.array(Image(output)) imageIO.imwrite(outputNP, 'tut3out.png', gamma=1.0) print 'success!' return 0
def test_makeHDR(): import glob, time inputs=sorted(glob.glob('data/ante0-*.png')) p=multi.Pool(processes=8) im_list=p.map(io.imread,inputs) hdr=a5.makeHDR(im_list) np.save('hdr', hdr) hdr_scale=hdr/max(hdr.flatten()) io.imwrite(hdr_scale, 'hdr_linear_scale_ante0.png')
def testComputeAndApplyHomographyStata(): im1=io.imread('stata/stata-1.png') im2=io.imread('stata/stata-2.png') pointList1=[np.array([209, 218, 1]), np.array([425, 300, 1]), np.array([209, 337, 1]), np.array([396, 336, 1])] pointList2=[np.array([232, 4, 1]), np.array([465, 62, 1]), np.array([247, 125, 1]), np.array([433, 102, 1])] listOfPairsS=zip(pointList1, pointList2) HS=a6.computeHomography(listOfPairsS) #multiply by 0.2 to better show the transition out=im2*0.5 a6.applyHomography(im1, out, HS, True) io.imwrite(out, "stata_computeAndApplyHomography.png")
def testForest(im, N=50000, size=25, noise=0.4): ''' Tests NPR on a beautiful image of a forest I found on Google Images. Generates a squiggly, worm-looking image. Quite abstract. ''' print 'TEST 2: oriented brushing on beautiful forest image' io.imwrite(npr.orientedPaint(im, bigBrush, N, size, noise), str("ForestOrientedPaint" + ".png"))
def testComputeAndApplyHomographyFun(): im1=io.imread('fun/room1.png') im2=io.imread('fun/room2.png') pointList1=[np.array([327, 258, 1], dtype=np.float64), np.array([75, 437, 1], dtype=np.float64), np.array([224, 364, 1], dtype=np.float64), np.array([423, 449, 1], dtype=np.float64)] pointList2=[np.array([294, 50, 1], dtype=np.float64), np.array([50, 227, 1], dtype=np.float64), np.array([190, 161, 1], dtype=np.float64), np.array([366, 240, 1], dtype=np.float64)] listOfPairsS=zip(pointList1, pointList2) HS=a6.computeHomography(listOfPairsS) #multiply by 0.2 to better show the transition out=im2*0.5 a6.applyHomography(im1, out, HS, True) io.imwrite(out, "fun.png")
def main(): im=imageIO.imread('hk.png', 1.0) t=time.time() out=harris(im) dt=time.time()-t print 'took ', dt, 'seconds' norm=np.max(out) imageIO.imwrite(out/norm)
def testApplyHomographyPoster(): signH = np.array( [ [1.12265192e00, 1.44940136e-01, 1.70000000e02], [8.65164180e-03, 1.19897030e00, 9.50000000e01], [2.55704864e-04, 8.06420365e-04, 1.00000000e00], ] ) green = io.getImage("green.png") poster = io.getImage("poster.png") a6.applyHomography(poster, green, signH, True) io.imwrite(green, "HWDueAt9pm_applyHomography.png")
def test_makeHDR(): import glob, time inputs=glob.glob('data/sea-*.png') im_list = [] for inp in inputs: im_list.append(io.imread(inp)) hdr=a5.makeHDR(im_list) np.save('hdr', hdr) hdr_scale=hdr/max(hdr.flatten()) io.imwrite(hdr_scale, 'hdr_linear_scale.png')
def testSingleScaleOrientedPaint(im, texture, outputName, N=10000, size=50, noise=0.3, nAngles=36): out = np.zeros_like(im) thetas = npr.computeAngles(im) npr.singleScaleOrientedPaint(im, out, thetas, np.ones_like(im), texture, size, N, noise, nAngles) io.imwrite(out, str(outputName + "SingleScaleOriented" + ".png"))
def main(): im=np.load('Input/hk.npy') mpix= (im.shape[0] * im.shape[1])/1e6 t=time.time() out=harris(im) dt=time.time()-t print 'took ', dt, 'seconds' print '%.5f ms per megapixel (%.7f ms for %d megapixels)' % (dt/mpix*1e3, dt*1e3, mpix) norm=np.max(out) imageIO.imwrite(out/norm)
def test_makeHDR(file): import glob inputs=sorted(glob.glob('data/' + file + '-*.png')) im_list = [] for inp in inputs: im_list.append(io.imread(inp)) hdr=a5.makeHDR(im_list) np.save('npy/'+file+'hdr', hdr) hdr_scale=hdr/max(hdr.flatten()) io.imwrite(hdr_scale, file+'_hdr_linear_scale.png')
def main(): #im=imageIO.imread('rgb.png') im=np.load('Input/hk.npy') t=time.time() out=harris(im) dt=time.time()-t print 'took ', dt, 'seconds' norm=np.max(out) imageIO.imwrite(out/norm, "FredoHarris.png")
def main(): im=imageIO.imread('hk.png') im = im[:im.shape[0]/1.2, :im.shape[1]/1.2] t=time.time() out=harris(im) dt=time.time()-t print 'took ', dt, 'seconds' mp = im.shape[0] * im.shape[1] / 1e6 print dt / float(mp), 'seconds / mp' # norm=np.max(out) imageIO.imwrite(out)
def testComputeAndApplyHomographyPoster(): green = io.getImage("green.png") poster = io.getImage("poster.png") h, w = poster.shape[0] - 1, poster.shape[1] - 1 pointListPoster = [np.array([0, 0, 1]), np.array([0, w, 1]), np.array([h, w, 1]), np.array([h, 0, 1])] pointListT = [np.array([170, 95, 1]), np.array([171, 238, 1]), np.array([233, 235, 1]), np.array([239, 94, 1])] listOfPairs = zip(pointListPoster, pointListT) H = a6.computeHomography(listOfPairs) # print H a6.applyHomography(poster, green, H, True) io.imwrite(green, "HWDueAt9pm_computeHomography.png")
def testNPanoGuedelon(): im1 = io.imread("guedelon/guedelon-1.png") im2 = io.imread("guedelon/guedelon-2.png") im3 = io.imread("guedelon/guedelon-3.png") im4 = io.imread("guedelon/guedelon-4.png") pointList1 = [ np.array([444, 306, 1], dtype=np.float64), np.array([198, 210, 1], dtype=np.float64), np.array([271, 198, 1], dtype=np.float64), np.array([399, 203, 1], dtype=np.float64), ] pointList2 = [ np.array([434, 114, 1], dtype=np.float64), np.array([188, 44, 1], dtype=np.float64), np.array([261, 24, 1], dtype=np.float64), np.array([394, 18, 1], dtype=np.float64), ] listOfPairs1 = zip(pointList1, pointList2) pointList3 = [ np.array([419, 293, 1], dtype=np.float64), np.array([384, 234, 1], dtype=np.float64), np.array([254, 274, 1], dtype=np.float64), np.array([301, 324, 1], dtype=np.float64), ] pointList4 = [ np.array([401, 142, 1], dtype=np.float64), np.array([372, 88, 1], dtype=np.float64), np.array([245, 139, 1], dtype=np.float64), np.array([292, 179, 1], dtype=np.float64), ] listOfPairs2 = zip(pointList3, pointList4) pointList5 = [ np.array([245, 139, 1], dtype=np.float64), np.array([403, 143, 1], dtype=np.float64), np.array([379, 220, 1], dtype=np.float64), np.array([273, 311, 1], dtype=np.float64), ] pointList6 = [ np.array([236, 70, 1], dtype=np.float64), np.array([398, 69, 1], dtype=np.float64), np.array([371, 149, 1], dtype=np.float64), np.array([267, 238, 1], dtype=np.float64), ] listOfPairs3 = zip(pointList5, pointList6) listOfImages = [im1, im2, im3, im4] listOfListOfPairs = [listOfPairs1, listOfPairs2, listOfPairs3] refIndex = 2 out = a6.stitchN(listOfImages, listOfListOfPairs, refIndex) io.imwrite(out, "guedelon_stitchNFast.png")
def makeStreetSign(): sign = io.imread("highway.png") people = io.imread("coverphoto.png") h, w = people.shape[0] - 1, people.shape[1] - 1 peoplecorners = [np.array([0, 0, 1]), np.array([0, w, 1]), np.array([h, w, 1]), np.array([h, 0, 1])] pointList1 = [ np.array([105, 94, 1], dtype=np.float64), np.array([110, 200, 1], dtype=np.float64), np.array([162, 200, 1], dtype=np.float64), np.array([159, 92, 1], dtype=np.float64), ] listOfPairs = zip(peoplecorners, pointList1) H = a6.computeHomography(listOfPairs) a6.applyHomography(people, sign, H, True) io.imwrite(sign, "Fun.png")
def writeFrames(video, folder, framerate=30): '''writes the frames of video to path.''' # make the folder if it doesn't exist if not os.path.exists(folder): os.makedirs(folder) nFrame = video.shape[0] for i in tqdm(range(nFrame)): image_path = os.path.join(folder, "frame_%03d.png" % i) io.imwrite(video[i], image_path) print('Saved images to {}'.format(folder)) # now save the video os.system("ffmpeg -y -r {} -i {} -vcodec mpeg4 {}.mp4".format( framerate, os.path.join(folder, "frame_%03d.png"), folder)) print("Saved video to {}.mp4".format(folder))
def videoMagPhase(phasePyramid, magnitudePyramid, low, high, order, gains_y, gains_uv=None, frame_write_path=None): for i in xrange(phasePyramid.shape[1]): print("Processing layer: " + str(i+1) + "/" + str(phasePyramid.shape[1])) phases = phasePyramid[:,i,:,:,:] if gains_uv is not None: print("\tRGB to YUV") phases = RGB2YUV(phases) print("\tButterworth filter") tbpPhases = timeBandPassButter(phases - phases[0], low, high, order) print("\tPhase amplification") diffPhase = np.diff(tbpPhases, n=1, axis=0) diffPhase = (diffPhase + np.pi) % (2 * np.pi) - np.pi if gains_uv is not None: diffPhase[:,:,:,0] *= gains_y[i] diffPhase[:,:,:,1:] *= gains_uv[i] else: diffPhase *= gains_y[i] tbpPhases[1:,:,:,:] = np.cumsum(diffPhase, axis=0) + tbpPhases[0,:,:,:] if gains_uv is not None: print("\tYUV to RGB") phases= YUV2RGB(phases + tbpPhases) else: phases = (phases + tbpPhases) # print("\tPhase denoising") # phases = ndimage.filters.median_filter(phases, (1, 3, 3, 1)) # for j in xrange(phases.shape[0]): # print("\t\tFrame " + str(j+1) + "/" + str(phases.shape[0])) # phases[j] = bilagrid.bilateral_grid(phases[j], max(phases.shape[1], phases.shape[2])/50.0, 0.4) phasePyramid[:,i,:,:,:] = phases print("Reconstructing") frame = np.zeros((phasePyramid.shape[2], phasePyramid.shape[3], phasePyramid.shape[4])) if frame_write_path is None: video = np.zeros((phasePyramid.shape[0], phasePyramid.shape[2], phasePyramid.shape[3], phasePyramid.shape[4])) for i in xrange(phasePyramid.shape[0]): print("Reconstructing frame " + str(i+1) + "/" + str(phasePyramid.shape[0])) for j in xrange(3): frame[:,:,j] = reconstruct((magnitudePyramid[i,:,:,:,j], phasePyramid[i,:,:,:,j])) if frame_write_path is not None: io.imwrite(frame, frame_write_path + str(i).zfill(5) + ".png") else: video[i] = frame if frame_write_path is None: return video else: return None
import a1 import imageIO as io castle = io.imread('castle_small.png') imL, imC = a1.spanish(castle) io.imwrite(imL, 'L.png') io.imwrite(imC, 'C.png')
def testOrientedPaint(im, texture, outputName, N=10000, size=50, noise=0.3): io.imwrite(npr.orientedPaint(im, texture, N, size, noise), str(outputName + "OrientedPaint" + ".png"))
def testPainterly(im, texture, outputName, N=10000, size=50, noise=0.3): io.imwrite(npr.painterly(im, texture, N, size, noise), str(outputName + "Painterly" + ".png"))
def testSingleScale(im, texture, outputName, N=10000, size=50, noise=0.3): out = np.zeros_like(im) npr.singleScalePaint(im, out, np.ones_like(im), texture, size, N, noise) io.imwrite(out, str(outputName + "SingleScale" + ".png"))
str(outputName + "OrientedPaint" + ".png")) def runTests(im, texture, imname): testSingleScale(im, texture, imname) testPainterly(im, texture, imname) testSingleScaleOrientedPaint(im, texture, imname) testOrientedPaint(im, texture, imname) brush1 = io.imread('brush.png') longBrush = io.imread('longBrush.png') bigBrush = io.imread('longBrush2.png') roundIm = io.imread('round.png') liz = io.imread('liz.png') china = io.imread('china.png') vpd = io.imread('villeperdue.png') brushtest = np.zeros([200, 200, 3]) testBrush(brushtest, brush1) io.imwrite(brushtest, "brushtest1.png") testAngle(roundIm) runTests(liz, brush1, "Liz") runTests(china, brush1, "China") runTests(vpd, brush1, "VPD") runTests(liz, longBrush, "LizTestLongBrush") runTests(china, longBrush, "ChinaTestLongBrush") runTests(roundIm, longBrush, "RoundImLong")