def demo_show_image(): im3 = np.zeros(shape=(200,100),dtype=np.int) im3[15:15+10,15:15+10]=11 print im3[15:15+10,15:15+10] stream=cStringIO.StringIO() imsave(stream,im3) return stream.getvalue()
def slice_save(self, astr_outputFile): ''' Saves a single slice. ARGS o astr_output The output filename to save the slice to. ''' self.dp.qprint('Input file = %s' % self.str_inputFile) self.dp.qprint('Outputfile = %s' % astr_outputFile) fformat = astr_outputFile.split('.')[-1] if fformat == 'dcm': if self._dcm: self._dcm.pixel_array.flat = self._Mnp_2Dslice.flat self._dcm.PixelData = self._dcm.pixel_array.tostring() self._dcm.save_as(astr_outputFile) else: raise ValueError( 'dcm output format only available for DICOM files') else: pylab.imsave(astr_outputFile, self._Mnp_2Dslice, format=fformat, cmap=cm.Greys_r)
def montage(X, colormap=pylab.cm.gist_gray, filename=''): num_blocks, width = np.shape(X) mont_width = int(np.ceil(np.sqrt(num_blocks))) block_size = np.sqrt(width) M = np.zeros((mont_width * block_size + 1, mont_width * block_size + 1)) blk_count = 0 for j in range(mont_width): for i in range(mont_width): if blk_count >= num_blocks: break sliceM, sliceN = j * block_size, i * block_size M[sliceM:sliceM + block_size, sliceN:sliceN + block_size] = np.reshape(X[j * mont_width + i], (block_size, block_size)) blk_count += 1 if len(filename) == 0: pylab.imshow(M, cmap=colormap) pylab.show() else: pylab.imsave(filename, M)
def test_with_file(fn): im = pylab.imread(fn) if im.ndim > 2: im = numpy.mean(im[:, :, :3], 2) pylab.imsave("intermediate.png", im, vmin=0, vmax=1., cmap=pylab.cm.gray) r = test_inline(im) return r
def plot_seam(self, image, seam): """ Input: 1. image: Original image in the first instance and then reduced image in subsequent iteration 2. seam: list having column numbers for each row of the image, depicting the seam to be removed. Output: Input image with the seam drawn, showing the seam visualization Working: Replaces the RGB values of the seam pixel co-ordinates identified by list - seam with (0.7, 0, 0) which RGB value for red """ seam_plot = pylab.imread(image) seam_plot = img_as_float(seam_plot) height, width = seam_plot.shape[0:2] for i in range(height): for j in range(width): if seam[i] == j: seam_plot[i][j][0] = 0.7 seam_plot[i][j][1] = 0 seam_plot[i][j][2] = 0 pylab.imsave("SeamPlot", seam_plot) return seam_plot
def visualize_array(array, title='Image', show=True, write=False): """ Visualize 3d and 4d array as image. filters (shape[2], shape[3]) are stacked first horizontaly, then verticaly """ assert (array.ndim == 3 or array.ndim == 4) array = normalize(array) # this makes a copy if array.ndim == 3: array = construct_stacked_array(array) elif array.ndim == 4: array = construct_stacked_matrix(array) else: raise NotImplementedError() cm = pylab.gray() if show: fig = pylab.gcf() fig.canvas.set_window_title(title) pylab.axis('off') pylab.imshow(array, interpolation='nearest', cmap=cm) pylab.show() pylab.draw() if write: pylab.imsave(title + '.png', array, cmap=cm)
def plot_coefficient_images(h5file, output_dir, data_file='Data.npz', x=None, y=None,problemtype="RobustGraphNet"): """ Iterate through hdf5 file of fits, plotting the coefficients as images and slices of images. """ # get ground truth Data = np.load(data_file) true_im = Data['sig_im'] # get fit results f = h5py.File(h5file,'r') results = f[problemtype] # make appropriate directories for saving images if not os.path.isdir(output_dir): os.makedirs(output_dir) for k in results.keys(): local_dir = output_dir + k if not os.path.isdir(local_dir): os.makedirs(local_dir) os.makedirs(local_dir + "/slice_plots/") # get coefficients and l1 values solution = results[k+'/coefficients'].value l1_path= results[k+'/l1vec'].value if x is None and y is None: x = np.sqrt(solution.shape[1]) y = x # image is square # make plots for i in xrange(solution.shape[0]): im = solution[i,:].reshape((x,y),order='F') pl.imsave(local_dir + "/l1=" + str(l1_path[i]) + ".png", im) print "\t---> Saved coefficient image", i plot_image_slice(im, true_im, x_slice=45, out_path=local_dir+"/slice_plots/l1="+str(l1_path[i])+".png") print "\t---> Saved coefficient image slice", i
def makeTestPair(paths, homography, collection, location=".", size=(250,250), scale = 1.0) : """ Given a pair of paths to two images and a homography between them, this function creates two crops and calculates a new homography. input: paths [strings] (paths to images) homography [numpy.ndarray] (3 by 3 array homography) collection [string] (The name of the testset) location [string] (The location (path) of the testset size [(int, int)] (The size of an image crop in pixels) scale [double] (The scale by which we resize the crops after they've been cropped) out: nothing """ # Get width and height width, height = size # Load images in black/white images = map(loadImage, paths) # Crop part of first image and part of second image: (top_o, left_o) = (random.randint(0, images[0].shape[0]-height), random.randint(0, images[0].shape[1]-width)) (top_n, left_n) = (random.randint(0, images[1].shape[0]-height), random.randint(0, images[1].shape[1]-width)) # Get two file names c_path = getRandPath("%s/%s/" % (location, collection)) if not exists(dirname(c_path)) : makedirs(dirname(c_path)) # Make sure we save as gray pylab.gray() im1 = images[0][top_o: top_o + height, left_o: left_o + width] im2 = images[1][top_n: top_n + height, left_n: left_n + width] im1_scaled = imresize(im1, size=float(scale), interp='bicubic') im2_scaled = imresize(im2, size=float(scale), interp='bicubic') pylab.imsave(c_path + "_1.jpg", im1_scaled) pylab.imsave(c_path + "_2.jpg", im2_scaled) # Homography for transpose T1 = numpy.identity(3) T1[0,2] = left_o T1[1,2] = top_o # Homography for transpose back T2 = numpy.identity(3) T2[0,2] = -1*left_n T2[1,2] = -1*top_n # Homography for scale Ts = numpy.identity(3) Ts[0,0] = scale Ts[1,1] = scale # Homography for scale back Tsinv = numpy.identity(3) Tsinv[0,0] = 1.0/scale Tsinv[1,1] = 1.0/scale # Combine homographyies and save hom = Ts.dot(T2).dot(homography).dot(T1).dot(Tsinv) hom = hom / hom[2,2] numpy.savetxt(c_path, hom)
def _draw_image(self): # im=RIM.dicom_reader('U:\Documents\medical_imaging\D3Slice270.dcm') # PixelType = itk.ctype('signed short') # Dimension = 2 # ImageType_threshold = itk.Image[PixelType, Dimension] # thresholdFilter= itk.IntensityWindowingImageFilter[ImageType_threshold,ImageType_threshold].New() # thresholdFilter.SetInput(im) # thresholdFilter.SetWindowMinimum(600) # thresholdFilter.SetWindowMaximum(1000) # thresholdFilter.SetOutputMinimum(0) # thresholdFilter.SetOutputMaximum(255) # thresholdFilter.Update() # # threshold_input=thresholdFilter.GetOutput() # im=itk.GetArrayFromImage(thresholdFilter.GetOutput()) f = self.Image_matrix # sa=Image.fromarray(f) #f=Image.open('U:\Documents\medical_imaging\ytestimage.png') # pylab.imsave('U:\Documents\medical_imaging\ytestimage_copy.gif',f,cmap=pylab.cm.bone) # # self.im = Image.open('U:\Documents\medical_imaging\ytestimage_copy.gif') pylab.imsave('U:\Documents\medical_imaging\ytestimage_copy.dcm', f, cmap=pylab.cm.bone) sa = Image.open('U:\Documents\medical_imaging\ytestimage_copy.dcm') # sa=Image.fromarray(f) #sa=scipy.misc.imrotate(sa,90) self.tk_im = ImageTk.PhotoImage(sa) # label=self.Label(self,image=self.tk_im) # label.image=self.tk_im self.canvas.create_image(0, 0, anchor="nw", image=self.tk_im)
def save_im(image, name): _images_aff = image.data.cpu().numpy() _images_aff -= _images_aff.min() _images_aff /= _images_aff.max() _images_aff *= 255 _images_aff = _images_aff.transpose((1,2,0)) pylab.imsave(name, _images_aff.astype('uint8'))
def output_image(image, fname): pylab.imsave(fname, image, cmap='gray') if not os.path.exists(fname): print(" ##################### WARNING #####################") print(" --> No image file at @ '{}' (expected) ...".format(fname))
def img2output(img, cmap=DEFAULT_COLORMAP, output=None, show=False): """ Plots and saves the desired fractal raster image """ if output: pylab.imsave(output, img, cmap=cmap) if show: pylab.imshow(img, cmap=cmap) pylab.show()
def packet_matrix_png(): import pylab as plt import numpy as np senders, sender_names, packets = get_packet_matrix() if len(senders) == 0: senders = ['null'] nrecv = len(app.nodes) nsend = len(senders) npackets = np.zeros((nrecv, nsend), int) for irecv, p in enumerate(packets): for isend, l0 in enumerate(senders): npackets[irecv, isend] = p.get(l0, 0) from io import BytesIO out = BytesIO() # plt.clf() # plt.imshow(npackets, interpolation='nearest', origin='lower', vmin=0) # plt.colorbar() # plt.xticks(np.arange(nsend)) # plt.xlabel('L0 senders') # plt.yticks(np.arange(nrecv)) # plt.ylabel('L1 receivers') # plt.savefig(out, format='png') # plt.title('Packets received matrix') plt.imsave(out, npackets, format='png') bb = out.getvalue() return (bb, {'Content-type': 'image/png'})
def visualize_array(array, title='Image', show=True, write=False): """ Visualize 3d and 4d array as image. filters (shape[2], shape[3]) are stacked first horizontaly, then verticaly """ assert(array.ndim == 3 or array.ndim == 4) array = normalize(array) # this makes a copy if array.ndim == 3: array = construct_stacked_array(array) elif array.ndim == 4: array = construct_stacked_matrix(array) else: raise NotImplementedError() cm = pylab.gray() if show: fig = pylab.gcf() fig.canvas.set_window_title(title) pylab.axis('off') pylab.imshow(array, interpolation='nearest', cmap=cm) pylab.show() pylab.draw() if write: pylab.imsave(title + '.png', array, cmap=cm)
def output_image(image, fname): """Save an image and check that it exists afterward.""" pylab.imsave(fname, image, cmap='gray') if not os.path.exists(fname): print(" ##################### WARNING #####################") print(" --> No image file at @ '{}' (expected) ...".format(fname))
def main(args): """ DocString """ dim = (1000, 1000) # Dimensions de l'image de sortie xint = (-3, 3) # Intervalle des parties réelles yint = (-3, 3) # Intervalle des parties imaginaires iterate = 30 # Nombre d'itérations c = 1 + .1j # Paramètre im = julia_build(dim, xint, yint, iterate, c) pl.imshow(im, cmap="nipy_spectral", origin="lower") pl.imsave("julia.png", im, cmap="nipy_spectral", format="png") pl.show() vertex = None i_image = 0 while vertex != "exit": vertex = complex(input("Vertex supérieur gauche sous la forme\ x+yj (pixels) : ")) size_int = float(input("Intervalle de pixels : ")) xint = (remap(dim[1], xint[0], xint[1], vertex.real), remap(dim[1], xint[0], xint[1], vertex.real + size_int)) yint = (remap(dim[0], yint[0], yint[1], vertex.imag), remap(dim[0], yint[0], yint[1], vertex.imag + size_int)) im = julia_build(dim, xint, yint, iterate, c) pl.imshow(im, cmap="gnuplot") pl.imsave("julia{}.png".format(i_image), im, cmap="nipy_spectral", format="png") pl.show() i_image += 1 return 0
def AnalyseNSS(self): if self.Mode=="Manual": files=QFileDialog(self) files.setWindowTitle('Non-Synchronised Segment Stripes') self.CurrentImages=files.getOpenFileNames(self,caption='Non-Synchronised Segment Stripes') SSSDlg1=SSSDlg.SSSWidget(self) SSSDlg1.Img1=DCMReader.ReadDCMFile(str(self.CurrentImages[0])) SSSDlg1.SSS1.axes.imshow(SSSDlg1.Img1,cmap='gray') SSSDlg1.Img2=DCMReader.ReadDCMFile(str(self.CurrentImages[1])) SSSDlg1.SSS2.axes.imshow(SSSDlg1.Img2,cmap='gray') SSSDlg1.Img3=DCMReader.ReadDCMFile(str(self.CurrentImages[2])) SSSDlg1.SSS3.axes.imshow(SSSDlg1.Img3,cmap='gray') SSSDlg1.Img4=DCMReader.ReadDCMFile(str(self.CurrentImages[3])) SSSDlg1.SSS4.axes.imshow(SSSDlg1.Img4,cmap='gray') SSSDlg1.ImgCombi=SSSDlg1.Img1+SSSDlg1.Img2+SSSDlg1.Img3+SSSDlg1.Img4 SSSDlg1.SSSCombi.axes.imshow(SSSDlg1.ImgCombi,cmap='gray') EPIDType=np.shape(SSSDlg1.Img1) pl.imsave('NSS.jpg',SSSDlg1.ImgCombi) Img1=pl.imread('NSS.jpg') if EPIDType[0]==384: Img2=pl.imread('NSSOrgRefas500.jpg') else: Img2=pl.imread('NSSOrgRef.jpg') self.MSENSS=np.round(self.mse(Img1,Img2)) if self.Mode=="Manual": SSSDlg1.exec_()
def writeToKml(filename, arr2d, NSEW, rotation=0.0, vmin=None, vmax=None, cmap=None, format=None, origin=None, dpi=72): """ writeToKml(filename, arr2d, NSEW, rotation=0.0, vmin=None, vmax=None, cmap=None, format=None, origin=None, dpi=None): NSEW=[north, south, east, west] """ import os #check if filename has extension base,ext=os.path.splitext(filename); if len(ext)==0: ext='.kml' kmlFile=base+ext; pngFile=base+'.png'; f=open(kmlFile,'w'); f.write('<kml xmlns="http://earth.google.com/kml/2.1">\n') f.write('<Document>\n') f.write('<GroundOverlay>\n') f.write(' <visibility>1</visibility>\n') f.write(' <LatLonBox>\n') f.write(' <north>%(#)3.4f</north>\n' % {"#":NSEW[0]}) f.write(' <south>%(#)3.4f</south>\n'% {"#":NSEW[1]}) f.write(' <east>%(#)3.4f</east>\n'% {"#":NSEW[2]}) f.write(' <west>%(#)3.4f</west>\n'% {"#":NSEW[3]}) f.write(' <rotation>%(#)3.4f</rotation>\n' % {"#":rotation}) f.write(' </LatLonBox>') f.write(' <Icon>') f.write(' <href>%(pngFile)s</href>' % {'pngFile':pngFile}) f.write(' </Icon>') f.write('</GroundOverlay>') f.write('</Document>') f.write('</kml>') f.close(); #Now write the image plt.imsave(pngFile, arr2d,vmin=vmin, vmax=vmax, cmap=cmap, format=format, origin=origin, dpi=dpi)
def dispims(M, height, width, border=0, bordercolor=0.0, layout=None, gray = None, name='no_name'): numimages = M.shape[1] if layout is None: n0 = int(np.ceil(np.sqrt(numimages))) n1 = int(np.ceil(np.sqrt(numimages))) else: n0, n1 = layout im = bordercolor * np.ones(((height+border)*n0+border,(width+border)*n1+border),dtype='<f8') for i in range(n0): for j in range(n1): if i*n1+j < M.shape[1]: im[i*(height+border)+border:(i+1)*(height+border)+border, j*(width+border)+border :(j+1)*(width+border)+border] = np.vstack(( np.hstack((np.reshape(M[:,i*n1+j],(height, width)), bordercolor*np.ones((height,border),dtype=float))), bordercolor*np.ones((border,width+border),dtype=float) )) if gray == None: pylab.imsave(arr = im, fname='./PSD_'+ name +'.png', cmap=pylab.cm.gray) else: pylab.savefig('sparse.png')
def saveImages(self): for zoom in self.zooms: try: pylab.imsave('zoom' + str(zoom) + '_' + '_'.join( time.asctime().split()) + '.png', self.images[zoom]) except KeyError, diag: print diag print 'Can\'t save image at zoom %s, it\'s not in the dictionary.' % zoom
def save_mean_sharpness_map(self, rows, cols, sharp, label): mean_sharp_map = np.zeros((rows, cols), np.float) for x, y in np.ndindex((rows, cols)): mean_sharp_map[x,y] = sharp[label[x,y]] pp.gray() pp.imsave("tiger_reg_sharp.jpg", mean_sharp_map)
def visualize(image_list, cluster): i = 0 for image in image_list: image = np.reshape(image, (28,28)) plt.figure() plt.imsave("./Results/Centroid_" + str(i) + "_for_" + str(cluster) + "_clusters", image, cmap='gray') i+=1 plt.close('all')
def save_jpeg(fn, rgb, **kwargs): import pylab as plt import tempfile f,tempfn = tempfile.mkstemp(suffix='.png') os.close(f) plt.imsave(tempfn, rgb, **kwargs) cmd = 'pngtopnm %s | pnmtojpeg -quality 90 > %s' % (tempfn, fn) os.system(cmd) os.unlink(tempfn)
def extract_perannotation(slide_index=41,annotation_index=0,\ patch_size=448,step = 128,max_patches= None,\ level=0,count_offset = 0,\ save_path = os.path.expanduser('~')+'/DATA_CRLM/Patches/Patches_Level0/Patches_448/Eval/', annotation_root = os.path.expanduser('~')+'/DATA_CRLM/CRLM/ndpa_bak/'): """ Extract patches from annotations slide_index annotation_index patch_size: step: step_size max_patches: if not None, change step to get patches less than max """ offsetx = 128 offsety = 128 centroid_region = int(patch_size / 2) ratio = 0.9 tc = CRLM(slide_index, annotation_root=annotation_root) label, img, mask = tc.ExtractAnnotationImage(annotation_index) ref_x, xa, ref_y, ya = tc.AnnotationBbox(annotation_index) tim = img def get_current_patch_nums(tstep): tcount = 0 for ix in range(offsetx, tim.shape[0] - tstep, tstep): for iy in range(offsety, tim.shape[1] - tstep, tstep): if np.sum(mask[ix:ix + centroid_region, iy:iy + centroid_region] ) > centroid_region * centroid_region * ratio: tcount += 1 return tcount if max_patches is not None: t_num_patches = get_current_patch_nums(step) while (t_num_patches > max_patches): step = step + 32 t_num_patches = get_current_patch_nums(step) count = 0 for ix in range(offsetx, tim.shape[0] - step, step): for iy in range(offsety, tim.shape[1] - step, step): if np.sum(mask[ix:ix + centroid_region, iy:iy + centroid_region] ) > centroid_region * centroid_region * 0.9: tim2 = np.array( tc.img.read_region(location=(ref_x + iy - 48, ref_y + ix - 48), level=level, size=(patch_size, patch_size))) plt.imsave( save_path + label_dict[label] + '_%03d_%04d.png' % (slide_index, count + count_offset), tim2) count += 1 #else: #print(np.sum(mask[ix:ix+centroid_region,iy:iy+centroid_region]),patch_size**2/0.9) return count
def save_jpeg(fn, rgb, **kwargs): import pylab as plt import tempfile f, tempfn = tempfile.mkstemp(suffix='.png') os.close(f) plt.imsave(tempfn, rgb, **kwargs) cmd = 'pngtopnm %s | pnmtojpeg -quality 90 > %s' % (tempfn, fn) os.system(cmd) os.unlink(tempfn)
def thumbnailForFolder(targetFolderName, destFolderName): origFiles = [ f for f in listdir(targetFolderName) if isfile(join(targetFolderName,f)) ] for files in origFiles: if(files[-4:] == '.png'): print(files) originalImage = imread(targetFolderName + files) thumbnail = createThumbnail(originalImage) imsave(destFolderName+files[0:-4]+'.png', thumbnail)
def visualize_class_activation_map(self, model_path, output_path, layer): cn = create_net.create_net() model = cn.net([], [], self.case, self.height, 1, 2, self.width) model.load_weights(model_path) original_img = np.array(self.X) print(original_img.shape) original_img = np.reshape( np.array(original_img), [self.X.shape[0], self.channels, self.height, self.width]) n_i, width, height, channels = original_img.shape #Reshape to the network input shape (b, channel, w, h). img = np.array([np.transpose(np.float32(original_img), (0, 3, 2, 1))]) img = np.reshape( (img), [self.X.shape[0], self.height, self.width, self.channels]) print(img.shape) #Get the 512 input weights to the softmax. class_weights = model.layers[-1].get_weights()[0] for k in range(len(layer)): final_conv_layer = model.get_layer(layer[k]) #final_conv_layer = get_output_layer(model, layer) print('test1') #conv_outputs=[] #output_layer=final_conv_layer.get_output_at(-1)[2] #if (k==2): output_layer = final_conv_layer.get_output_at(-1) out1 = K.function([model.layers[0].input], [output_layer, model.layers[-1].output]) [out, pred] = out1([img]) print(out.shape) print(img.shape) conv_output = np.reshape( out, [out.shape[0], out.shape[1], out.shape[2], out.shape[3]]) conv_outputs = conv_output #for u in range(1,self.X.shape[0]): # get_output = K.function([model.layers[0].input],[final_conv_layer.get_output_at(-1)[u], model.layers[-1].output]) # [conv_output, pred] = get_output([img]) # conv_output=np.reshape(conv_output,[1, conv_output.shape[0], conv_output.shape[1], conv_output.shape[2]]) # print(conv_output.shape) # conv_outputs = np.append(conv_outputs,conv_output,axis=0) print(conv_outputs.shape[0:4]) print(class_weights.shape) #Create the class activation map. w = 1 for o in range(n_i): cam = np.zeros( dtype=np.float32, shape=[conv_outputs.shape[1], conv_outputs.shape[2]]) print(cam.shape) for i in range(conv_outputs.shape[3]): cam = cam + w * conv_outputs[o, :, :, i] str3 = output_path[k] + '/heatmap_%s' % (o) pylab.imsave(str3, cam, format='png')
def saveImages(self): for zoom in self.zooms: try: pylab.imsave( 'zoom' + str(zoom) + '_' + '_'.join(time.asctime().split()) + '.png', self.images[zoom]) except KeyError, diag: print diag print 'Can\'t save image at zoom %s, it\'s not in the dictionary.' % zoom
def display_digit(label,X,colormap=pylab.cm.gist_gray): l = len(X) m = int(np.ceil(np.sqrt(l))) M = np.zeros((m,m)) for i in range(m): M[i,:] = X[i*m:(i+1)*m] pylab.imshow(M, cmap=colormap) pylab.imsave(str(label)+".png",M) pylab.axis('off') return M
def save_segmented_image(self, cleaned_contours): ''' Saves image Parameters ---------- cleaned_contours : nparray Processed graphcut output ''' pylab.imsave(cleaned_contours)
def SaveMap(self,filename, data, reportRange): data_max = np.nanmax(data) data_min = np.nanmin(data) label_min = data_min label_max = data_max cmap = 'viridis' if(filename[-3:] in ["jpg","tif","png"] or filename[-4:] == "tiff"): if ((filename[-6:] == "geotif") or (filename[-7:] == "geotiff")) and gdal_flag: if self.type == "NPV": array_to_raster(filename.replace('geotif','tif'), data[::-1]*1e-06) else: array_to_raster(filename.replace('geotif','tif'), data[::-1]) filename = filename.replace("tiff", "tif") filename = filename.replace("geotif", "png") if self.type == "NPV": if abs(data_min) > data_max: data_max = -1.*data_min if data_max > abs(data_min): data_min = -1.*data_max cmap = 'seismic' label_min = data_min * 1e-06 label_max = data_max * 1e-06 elif self.type == "benefit_cost_ratio": cmap = 'PiYG' data_min = np.log(data_min) data_max = np.log(data_max) if abs(data_min) > data_max: data_max = -1.*data_min if data_max > abs(data_min): data_min = -1.*data_max label_min = np.exp(data_min) label_max = np.exp(data_max) data = np.log(data) elif self.type == "breakeven_grade": cmap = 'viridis' elif self.type == "employment": cmap = 'viridis' else: cmap = 'viridis' #if not gdal_flag: pl.imsave(filename,data,origin="lower",cmap=pl.get_cmap(cmap),vmin=data_min,vmax=data_max) elif( filename[-3:] == "npy"): np.save(filename,data) elif( filename[-3:] == "txt"): np.savetxt(filename,data) else: print("Error unrecognized output type: ", filename) if(reportRange): np.savetxt(filename+"_range.txt",[np.round(label_min,2), np.round(label_max,2)]) return 0
def save_generated_images(self): if hasattr(self.dataset, 'next_generator_sample_test'): batch = self.dataset.next_generator_sample_test() else: batch = self.dataset.next_generator_sample() gen_images = self.generate_op(batch + [False]) image = self.dataset.display(gen_images, batch) title = "epoch_{}.png".format(str(self.current_epoch).zfill(3)) if not os.path.exists(self.output_dir): os.makedirs(self.output_dir) plt.imsave(os.path.join(self.output_dir, title), image, cmap='gray')
def save(filename, key, matrix): import os out_matrix = os.path.join(os.getcwd(),filename + ".mat") out_img = os.path.join(os.getcwd(), filename + ".png") savemat(out_matrix, {key: matrix}) print out_matrix print out_img out_list.append(out_matrix) imsave(out_img, matrix.todense()) out_list.append(out_img)
def lookOrigin(self): strOrigin='查看原图[Alt]' strLabel='查看标注[Alt]' if self.bLookOrigin.text()==strOrigin: self.bLookOrigin.setText(strLabel) plt.imsave('tmp.png', self.img) self.label_img.setPixmap(QPixmap('tmp.png')) else: self.bLookOrigin.setText(strOrigin) plt.imsave('tmp.png', self.plus) self.label_img.setPixmap(QPixmap('tmp.png'))
def visualize(image_list, cluster): i = 0 for image in image_list: image = np.reshape(image, (28, 28)) plt.figure() plt.imsave("./Results/Centroid_" + str(i) + "_for_" + str(cluster) + "_clusters", image, cmap='gray') i += 1 plt.close('all')
def slice_save(self, astr_outputFile): ''' Processes/saves a single slice. ARGS o astr_output The output filename to save the slice to. ''' self._log('Outputfile = %s\n' % astr_outputFile) pylab.imsave(astr_outputFile, self._Mnp_2Dslice, cmap = cm.Greys_r)
def manualinterpolate(im, t2x, x2t, p, degPerPix=None, fname=None): pixToLonlat = lambda x, y: p(*t2x([x, y]), inverse=True) ll2pix = lambda lon, lat: x2t(p(lon, lat)) height, width, _ = im.shape inx = np.arange(width) iny = -np.arange(height) SKIP = 10 xs, ys = np.meshgrid(inx[::SKIP], iny[::SKIP]) origLon, origLat = pixToLonlat(xs.ravel(), ys.ravel()) vecToBounds = lambda x: np.array([np.min(x), np.max(x)]) boundLon = vecToBounds(origLon) boundLat = vecToBounds(origLat) if not degPerPix: degPerPix = np.min(np.abs(np.diff(origLat.reshape(xs.shape), axis=0))) degPerPix = min(degPerPix, np.min(np.diff(origLon.reshape(xs.shape), axis=1))) degPerPix *= 1.0 / SKIP lonvec = np.arange(boundLon[0] - 0.5, boundLon[1] + 0.5, degPerPix) latvec = np.arange(boundLat[0] - 0.5, boundLat[1] + 0.5, degPerPix) outLon, outLat = np.meshgrid(lonvec, latvec) outx, outy = ll2pix(outLon.ravel(), outLat.ravel()) outx = outx.reshape(outLat.shape) # map_coordinates doesn't need QGIS' -y axis, just integer indexes, so negative: outy = -outy.reshape(outLat.shape) from scipy.ndimage.interpolation import map_coordinates res = np.dstack([map_coordinates(im[:, :, dim], [outy, outx], order=0) for dim in range(3)]) if fname: plt.imsave(fname=fname, arr=res[::-1, :, :]) plateCarree = Proj(init="EPSG:32662") tl = plateCarree(outLon[0, 0], outLat[-1, -1]) br = plateCarree(outLon[-1, -1], outLat[0, 0]) print("""{} saved. top_left_lon={}, top_left_lat={}, bottom_right_lon={}, bottom_right_lat={}""".format(fname, outLon[0, 0], outLat[-1, -1], outLon[-1, -1], outLat[0, 0])) print("To convert to a georegistered (Geo)JPEG, run:") print(("gdal_translate -of JPEG -a_ullr {top_left_lon} {top_left_lat} {bottom_right_lon}" + " {bottom_right_lat} -a_srs EPSG:32662 {fname} output.jpg").format( top_left_lon=tl[0], top_left_lat=tl[1], bottom_right_lon=br[0], bottom_right_lat=br[1], fname=fname)) return res, outLon, outLat
def show_out_image(img, title='Image', show=True, write=False): """ Plots image representing pixel classes """ cm = pylab.get_cmap('gnuplot') if show: pylab.axis('off') pylab.imshow(img, interpolation='nearest', cmap=cm) pylab.show() pylab.draw() if write: pylab.imsave(title + '.png', img, cmap=cm)
def main(): import sys if len(sys.argv) > 1: S = test_with_file(sys.argv[1]) else: S = test_with_noise() print "Values of min and max" print S.min(), S.max() print "Location of min and max" print numpy.unravel_index(S.argmin(), S.shape), \ numpy.unravel_index(S.argmax(), S.shape) pylab.imsave('result.png', S, cmap=pylab.cm.gray)
def resize_image(): db = current.globalenv['db'] rows = db(db.image.id==current.request.args[0]).select() f = rows.first().file f = os.path.join(current.request.folder,'uploads',f) print f im3 = Image.open(f) im3 = im3.resize((550,100)) im3 = im3.rotate(45) stream=cStringIO.StringIO() imsave(stream,np.array(im3)) return stream.getvalue()
def slice_save(self, astr_outputFile): ''' Processes/saves a single slice. ARGS o astr_output The output filename to save the slice to. ''' self._log('Outputfile = %s\n' % astr_outputFile) pylab.imsave(astr_outputFile, self._Mnp_2Dslice, cmap=cm.Greys_r)
def img_transition(file1, file2, map_array, blk_siz=2, n=50): I = file2gray(file1) J = file2gray(file2) d = absolute(I - J) steps = linspace(d.min(), d.max(), n+1) for i, step in enumerate(steps): K = I * (d>step) + J * (d<step) # do something with K im_name = os.path.join(".", "output", "%s-%s-%02d.png" %(file1, file2, i)) imsave(im_name, K, cmap=cm.gray)
def save_samples_PNG(self, path, color_map=None, r_g_b=[1, 2, 3]): for pos in range(len(self.samples_img)): samples_dir = os.path.join(path, 'sample_imgs') labels_dir = os.path.join(path, 'sample_labels') fs.mkdir(samples_dir) fs.mkdir(labels_dir) file_name = 'sample' + str(pos) + '.png' scipy.misc.imsave(os.path.join(samples_dir, file_name), self.samples_img[pos][:, :, r_g_b]) if color_map is None: scipy.misc.imsave(os.path.join(labels_dir, file_name), self.samples_labels[pos][:, :, 0]) else: pl.imsave(fname=os.path.join(labels_dir, file_name), arr=self.samples_labels[pos][:, :, 0], cmap=color_map)
def plot_predictions(self): #data = self.get_next_batch(train=False)[2] # get a test batch #num_classes = self.test_data_provider.get_num_classes() #NUM_ROWS = 2 #NUM_COLS = 4 #NUM_IMGS = NUM_ROWS * NUM_COLS #NUM_TOP_CLASSES = min(num_classes, 4) # show this many top labels batch_path = '/home/wangning/traffic_sign/20150409_60100/test_batch/' meta_name = '/home/wangning/traffic_sign/20150409_60100/meta/batches.meta' metafile = open(meta_name) metaDic = cPickle.load(metafile) batch_names = os.listdir(batch_path) for bt_name in batch_names: print "+++++++++++++++++++++++++++++++++" print bt_name datafile = open(batch_path + bt_name,'rb') dataDic = cPickle.load(datafile) dataDic['labels'] = numpy.array(dataDic['labels']) dataDic['labels'] = dataDic['labels'].astype(numpy.float32) dataDic['data'] = numpy.require((dataDic['data'] - metaDic['data_mean']), dtype=numpy.single, requirements='C') dataDic['labels'] = numpy.require(dataDic['labels'].reshape((1, dataDic['data'].shape[1])), dtype=n.single, requirements='C') data = [dataDic['data'], dataDic['labels']] filenames = dataDic['filenames'] num_classes = self.test_data_provider.get_num_classes() preds = n.zeros((data[0].shape[1], num_classes), dtype=n.single) data += [preds] imgs = self.test_data_provider.get_plottable_data(data[0]) # Run the model self.libmodel.startFeatureWriter(data, self.sotmax_idx) self.finish_batch() result = preds.argmax(axis=1) for i in range(imgs.shape[0]): #print filenames[i] tmp = filenames[i] tmp = tmp.split('/') tmp = tmp[len(tmp)-1] if len(tmp) == 0: continue if tmp[0] != 'D': continue typecode = self.cvt_typecode(result[i]) tps = tmp.split('_') if tps[2] != typecode: err_name = str(typecode) + "_" + tmp print err_name tmpImg = imgs[i,:,:,:] img = tmpImg[:,:,[2,1,0]] imgfileName = 'checkImg/' + err_name pl.imsave(imgfileName[:-4] + ".png",img) print "+++++++++++++++++++++++++++++++++"
def test_file_image(fname): ext = os.path.splitext(fname)[-1][len(os.path.extsep):] kwargs = to_dict_params(fname) # Creates the image in memory mem = BytesIO() fractal_data = call_kw(generate_fractal, kwargs) imsave(mem, fractal_data, cmap=kwargs["cmap"], format=ext) mem.seek(0) # Return stream position back for reading # Comparison pixel-by-pixel img_file = imread("images/" + fname) img_mem = imread(mem, format=ext) assert img_file.tolist() == img_mem.tolist()
def imsave_jpeg(jpegfn, img, **kwargs): '''Saves a image in JPEG format. Some matplotlib installations (notably at NERSC) don't support jpeg, so we write to PNG and then convert to JPEG using the venerable netpbm tools. *jpegfn*: JPEG filename *img*: image, in the typical matplotlib formats (see plt.imsave) ''' import pylab as plt tmpfn = create_temp(suffix='.png') plt.imsave(tmpfn, img, **kwargs) cmd = ('pngtopnm %s | pnmtojpeg -quality 90 > %s' % (tmpfn, jpegfn)) rtn = os.system(cmd) print(cmd, '->', rtn) os.unlink(tmpfn)
def makeTestPair(paths, homography, collection, width=250, height=250, scale = 1.0) : images = map(loadImage, paths) # Crop part of first image and part of second image: (top_o, left_o) = (random.randint(0, images[0].shape[0]-height), random.randint(0, images[0].shape[1]-width)) (top_n, left_n) = (random.randint(0, images[1].shape[0]-height), random.randint(0, images[1].shape[1]-width)) # Get two file names c_path = getRandPath("%s/" % collection) print(c_path) if not exists(dirname(c_path)) : makedirs(dirname(c_path)) # Make sure we save as gray pylab.gray() im1 = images[0][top_o: top_o + height, left_o: left_o + width] im2 = images[1][top_n: top_n + height, left_n: left_n + width] im1_scaled = imresize(im1, size=float(scale), interp='bicubic') im2_scaled = imresize(im2, size=float(scale), interp='bicubic') pylab.imsave(c_path + "_1.jpg", im1_scaled) pylab.imsave(c_path + "_2.jpg", im2_scaled) #imsave(c_path + "_1.jpg", im1) #imsave(c_path + "_2.jpg", im2) T1 = numpy.identity(3) T1[0,2] = left_o T1[1,2] = top_o T2 = numpy.identity(3) T2[0,2] = -1*left_n# * scale T2[1,2] = -1*top_n# * scale Ts = numpy.identity(3) Ts[0,0] = scale Ts[1,1] = scale Tsinv = numpy.identity(3) Tsinv[0,0] = 1.0/scale Tsinv[1,1] = 1.0/scale hom = Ts.dot(T2).dot(homography).dot(T1).dot(Tsinv) hom = hom / hom[2,2] numpy.savetxt(c_path, hom) return c_path
def show_img(img,show=True, save=False, xy=None): #if show: # pylab.imshow(img) # pylab.gray() # pylab.show() filename = os.tempnam()+".png" pylab.imsave(filename, img, cmap=pylab.cm.Greys_r) if xy is not None: x0 = xy[0]-5 y0 = xy[1]-5 x1 = x0+5 y1 = y0+5 os.system("convert %s -fill red -draw 'rectangle %d,%d,%d,%d' %s_" % (filename, x0, y0, x1, y1, filename)) os.system("mv %s_ %s" % (filename, filename)) if show: os.system("display %s" % filename) if not save: os.unlink(filename) else: print filename