def print_map(self): ocmap = [] for line in self.occupancy_map: ocmap.append([cell[0] for cell in line]) npmap = np.array(ocmap) toimage(npmap, cmin=0.0, cmax=1.0).save('occupancy_map.jpeg')
def _data_images(): """ Make images with linear scaling, sqrt scaling, log scaling. This might take a little bit of time. """ mag, phase = np.abs(self.rs_data), np.angle(self.rs_data) mag *= self.blocker # resize, then save the color and scale permutations of first_frame. linr = mag sqrt = np.sqrt(mag) logd = np.log((mag-mag.min())/mag.max()*1000+1) self.rs_image_linr = io.complex_hsv_image(linr) self.rs_image_sqrt = io.complex_hsv_image(sqrt*np.exp(1j*phase)) self.rs_image_log = io.complex_hsv_image(logd*np.exp(1j*phase)) imgs = {'logd':smp.toimage(self.rs_image_log), 'sqrt':smp.toimage(self.rs_image_sqrt), 'linr':smp.toimage(self.rs_image_linr)} base = './static/imaging/images/ifth_session%s_id%s_%s_%s.%s' for key, val in imgs.items(): val.save(base%(self.session_id, self.data_id, self.blocker_power, key, 'png')) val.save(base%(self.session_id, self.data_id, self.blocker_power, key, 'jpg'))
def detect_blur(f): print "Processing %s" % f im = Image.open("images/orig_"+f+".jpeg") im = im.convert('F') im = array(im) im = filters.laplace(im) laplacien_im = im toimage(im).save(out_dir+"/laplacien_"+f+".png", "png") im = morphology.white_tophat(im, (3,3)) tophat_im = im toimage(im).save(out_dir+"/tophat_"+f+".png", "png") im = filters.percentile_filter(im, 30, 1) toimage(im).save(out_dir+"/final_"+f+".png", "png") subprocess.call("/usr/local/bin/convert %s/final_%s.png %s/ppm_final_%s.ppm" % (out_dir, f, out_dir,f), shell=True) subprocess.call("./segment/segment 0.8 100 100 %s/ppm_final_%s.ppm %s/segment_%s.ppm" % (out_dir, f, out_dir,f), shell=True) im_seg = Image.open("%s/segment_%s.ppm" % (out_dir, f)) im_seg = array(im_seg) print "laplacien" laplacian_mask = build_mask(im_seg, laplacien_im) toimage(laplacian_mask).save(out_dir+"/laplacian_mask_"+f+".png", "png") print "tophat" tophat_mask = build_mask(im_seg, tophat_im, 10) toimage(tophat_mask).save(out_dir+"/tophat_mask_"+f+".png", "png")
def pairdump(): "可视化模型列" from layerbase import DrawPatch import cPickle sparsedirect = cPickle.load(file('sparselinked','rb')) drec = sparsedirect.components_.reshape((-1,1,70-25,90-0)) misc.toimage(DrawPatch(drec)).save('dictpair.jpg')
def arrayVisualizeS1(self): # Used to plot all of the arrays for a,n in zip(self.S1.arrays,range(12)): # Plots all of the S1 arrays array = a.arr ph = np.empty(array.shape) for i in range(array.shape[0]): for j in range(array.shape[1]): ph[i,j]= array[i,j].output img = toimage(ph) plt.subplot(5,3,n+1) plt.imshow(img) plt.title('S1_{0}'.format(n+1)) ph = np.empty(self.C0.arrays[0].arr.shape) for i in range(self.C0.arrays[0].arr.shape[0]): # Plots the input array for j in range(self.C0.arrays[0].arr.shape[1]): ph[i,j]= self.C0.arrays[0].arr[i,j].output img = toimage(ph) plt.subplot(5,3,13) for i in range(self.V0.arrays[0].arr.shape[0]): # Plots the inhibitory array for j in range(self.V0.arrays[0].arr.shape[1]): ph[i,j]= self.V0.arrays[0].arr[i,j].output img = toimage(ph) plt.subplot(5,3,13) plt.imshow(img) # plt.subplot(5,3,13) plt.title('C0') plt.tight_layout() plt.draw()
def retrieve(self, request, pano_id=None, heading=0): target_heading = get_int_value( request, 'heading', default=heading, upper=360, strategy='modulo') target_width = get_int_value( request, 'width', default=750, lower=1, upper=1600, strategy='cutoff') target_fov = get_int_value( request, 'fov', default=80, upper=120, strategy='cutoff') target_width, target_fov = self._max_fov_per_width(target_width, target_fov) target_horizon = get_float_value( request, 'horizon', default=0.3, lower=0.0, upper=1.0) target_aspect = get_float_value( request, 'aspect', default=1.5, lower=1.0) pano = get_object_or_404(Panoramas, pano_id=pano_id) thumb = Thumbnail(pano) thumb_img = thumb.get_image(target_width=target_width, target_fov=target_fov, target_horizon=target_horizon, target_heading=target_heading, target_aspect=target_aspect) response = HttpResponse(content_type="image/jpeg") misc.toimage(thumb_img).save(response, "JPEG") return response
def rgbtiffstojpg(files, path, name): ''' files: a list of files ordered as follow. 0: Blue Band 1: Green Band 2: Red Band path: the path to look for the tiff files and to save the jpg ''' import scipy.misc as sm import gdal import numpy as np b2_link = gdal.Open(path+"/tiff/"+files[0]) b3_link = gdal.Open(path+"/tiff/"+files[1]) b4_link = gdal.Open(path+"/tiff/"+files[2]) # call the norm function on each band as array converted to float def norm(band): band_min, band_max = band.min(), band.max() return ((band - band_min) / (band_max - band_min)) b2 = norm(b2_link.ReadAsArray().astype(np.float)) b3 = norm(b3_link.ReadAsArray().astype(np.float)) b4 = norm(b4_link.ReadAsArray().astype(np.float)) # Create RGB rgb = np.dstack((b4, b3, b2)) del b2, b3, b4 sm.toimage(rgb, cmin=np.percentile(rgb,2), cmax=np.percentile(rgb,98)).save(path+'/images/'+name)
def overlay_predictions(image, im_softmax,image_shape, threshold, channel, seg_color=(0,255,0,172)): """creates a overlay using pixels with p(class) > threshold""" segmentation = np.expand_dims(im_softmax[:,:,channel] > threshold, 2) mask = segmentation * np.reshape(np.array(seg_color), (1,1,-1)) mask = misc.toimage(mask, mode="RGBA") street_im = misc.toimage(image) street_im.paste(mask, box=None, mask=mask) return street_im
def test_jpg(w, img): prev_i = 100 for i in range(prev_i, 0, -5): print i, misc.toimage(img).save("out.jpg", quality = i) if w.extract(misc.imread("out.jpg")) is None: return prev_i prev_i = i return "unbound"
def test_jpg(w, img): prev_i = 100 for i in range(prev_i, 0, -5): misc.toimage(img).save("out.jpg", quality = i) try: w.extract(misc.imread("out.jpg")) except ReedSolomonError: return prev_i prev_i = i return 5
def showtransform(): "显示神经网络转换结果" trans = np.load('transform.npy')[:,0] glassmodel = np.load('glassline.npy').astype('f').reshape((-1,45,90)) glassmodel /= np.max(glassmodel) trans = np.insert(trans,2,glassmodel,axis=1) p = trans[:,0].reshape((trans.shape[0],-1)) trans[:,0] -= ((p.max(axis=1)+p.mean(axis=1))*0.5).reshape((-1,1,1)) trans[:,0] = np.where(trans[:,0]>0, trans[:,0],0) from layerbase import DrawPatch misc.toimage(DrawPatch(trans,False,'bgy')).save('nntrans.png')
def reshapeAndPrint( components, fold ): '''Takes in the vector encoding of each of the nmf components and plots them to file''' imHeight = 192 imWidth = 168 nComp = np.shape(components)[0] for comp in range(nComp): image = np.reshape(components[comp,:],(imHeight,imWidth),order='C') savestring = './Results/images/nComp' + str(nComp) + \ '_comp' + str(comp) + '_fold' + str(fold) + '.jpg' misc.toimage(image, cmin=0.0, cmax=...).save(savestring)
def png_buffer(array): """Convert an array to PNG, handling transparency in an at-least-partially-sane manner.""" assert array.ndim == 2 im = toimage(array) alpha = toimage(array != 0) im.putalpha(alpha) # Return format is a buffer of PNG-encoded data fp = BytesIO() im.save(fp, format='png') return fp.getbuffer()
def main(): image = bayer("itau02.png") template = bayer("keyboard.png") correlation = correlate(image, template) c_max = max(correlation) c_min = min(correlation) signal = (correlation - c_min) * 255 / (c_max - c_min) toimage(signal).save("signal.png") winner = where(correlation == c_max, 255, 0) toimage(winner).save("winner.png") print searchmax(winner)
def get_phase(args): filename = args[0] path = args[1] path_raw = args[2] path_images = args[3] mask = args[4] coord = args[5] file_in = os.path.join(path,filename) file_raw = os.path.join(path_raw,'raw_'+filename) image_phase = os.path.join(path_images,'wrapped'+filename[4:11]+'bmp') binary_phase = os.path.join(path_raw,'wrapped'+filename[4:11]+'dat') mod_arr = os.path.join(path_raw,'mod'+filename[4:11]+'dat') mod_image = os.path.join(path_images,'mod'+filename[4:11]+'bmp') qual_arr = os.path.join(path_raw,'qual'+filename[4:11]+'dat') qual_image = os.path.join(path_images,'qual'+filename[4:11]+'bmp') # Open meas file and grab dataset try: f = File(file_in, 'r') except: print 'Corrupt h5 file: '+filename+' ignoring' return sub = f.get(r'measurement0/frames/frame_full/data') data = np.array(sub[coord[0]-1:coord[1]+1,coord[2]-1:coord[3]+1],'f') f.close() # Get phase phase, modulation, intensity = calc_phase(data) # Apply mask phase[~mask] = 0 intensity[~mask] = 0 modulation[~mask] = 0 #phase = phase[coord[0]:coord[1],coord[2]:coord[3]] # Save phase toimage(phase).save(image_phase) phase.tofile(binary_phase) ave_mod = np.average(modulation[mask]) ave_int = np.average(intensity[mask]) ''' if ave_mod < 0.6: print filename+' low mod:', ave_mod else: sys.stdout.write('.') ''' return "%s,%f,%f\n" % (filename, ave_int, ave_mod)
def main(): # image_array = load_images('olsh.dat') # im = Image.open("test_image_1.png") gray = ndimage.imread("test_image_1.png", flatten=True) # toimage(gray).show() subsamples = get_subsamples([gray]) for i, sample in enumerate(subsamples): pca = PCA.PCA(sample, 90) compressed = toimage(pca.get_compressed_matrix()) filename = 'pca_images/im_ex' + str(i) + '.png' print filename imsave(filename, compressed) toimage(compressed).show()
def original_color_transform(content, generated, mask=None): generated = fromimage(toimage(generated, mode='RGB'), mode='YCbCr') # Convert to YCbCr color space if mask is None: generated[:, :, 1:] = content[:, :, 1:] # Generated CbCr = Content CbCr else: width, height, channels = generated.shape for i in range(width): for j in range(height): if mask[i, j] == 1: generated[i, j, 1:] = content[i, j, 1:] generated = fromimage(toimage(generated, mode='YCbCr'), mode='RGB') # Convert to RGB color space return generated
def __init__(self, abs_image, sound, frequencies): self.abs_image = abs_image self.image = toimage(apply_colormap(self.abs_image)) self.width, self.height = self.image.size self.sound = sound self.frequencies = frequencies self.reversed_frequencies = list(reversed(frequencies))
def parse(self): """runs each mask(crop) across the image file to improve OCR functionality""" image = Image.open(self.image_path) for form_field, bounding_box in self.bounding_box_dict.items(): # the crops are scaled up and the contrast maxed out in order to enhance character # features and increase OCR success x1, y1, x2, y2 = bounding_box xx = (x2-x1) << 2 yy = (y2-y1) << 2 the_crop = image.crop(bounding_box) the_crop = the_crop.resize((xx,yy),PIL.Image.LANCZOS) area = (xx * yy) gray = the_crop.convert('L') bw = np.asarray(gray).copy() bw[bw < 200] = 0 bw[bw >= 200] = 255 the_crop = misc.toimage(bw) # use this to check out a particular mask #if "box_c_address_city_town_zip_postal_code" is form_field: # the_crop.show() if "checkbox" in form_field: # a box is considered checked if 10% or more of it's area is black checked = np.sum(bw) >= (0.1 * area) self.component_contents_dict[form_field] = checked else: self.component_contents_dict[form_field] = self.clean_text(pytesseract.image_to_string(the_crop)) print([self.component_contents_dict['box_c_address_city_town_zip_postal_code']])
def save_merged_file(file_name, imgs, labels, imgRows=60): """ Saves the preprocessed images into a concatenated file. Saved file format [numimages, [factor, minval, img], [factor, min....]...] """ assert(type(imgs) is list and type(imgs[0]) is np.ndarray) with open(file_name, 'wb') as out_file: # write the number of images out_file.write(struct.pack('i', len(imgs))) out_file.write(struct.pack('i', imgRows)) for ind,img in enumerate(imgs): PIL_img = toimage(img) # record where we need to write image size nbytes_pos = out_file.tell() # skip ahead out_file.seek(4,1) curr_file_pos = out_file.tell() # save the image PIL_img.save(out_file, format='png') # jump back to write file size next_file_pos = out_file.tell() nbytes = next_file_pos - curr_file_pos out_file.seek(nbytes_pos) out_file.write(struct.pack('i', nbytes)) out_file.seek(next_file_pos) # write labels for l in labels[ind]: out_file.write(struct.pack('f',l))
def hist(im_source): arr_im_rgb = array(im_source) arr_im_rcolor = [] arr_im_gcolor = [] arr_im_bcolor = [] i = 0 for itemL in arr_im_rgb: arr_im_gcolor.append([]) arr_im_rcolor.append([]) arr_im_bcolor.append([]) for itemC in itemL: arr_im_rcolor[i].append(itemC[0]) arr_im_gcolor[i].append(itemC[1]) arr_im_bcolor[i].append(itemC[2]) i = 1+i arr_im_rcolor_hist = beautyImage(array(arr_im_rcolor)) arr_im_gcolor_hist = beautyImage(array(arr_im_gcolor)) arr_im_bcolor_hist = beautyImage(array(arr_im_bcolor)) i = 0 arr_im_hist = [] while i<len(arr_im_rcolor_hist): ii = 0 tmp_line = [] while ii < len(arr_im_rcolor_hist[i]): tmp_point = [arr_im_rcolor_hist[i][ii], arr_im_gcolor_hist[i][ii],arr_im_bcolor_hist[i][ii]] tmp_line.append(tmp_point) ii += 1 arr_im_hist.append(tmp_line) i += 1 figure() im_beauty = toimage(array(arr_im_hist), 255) im_beauty.show() im_beauty.save("../result/he/he_seperate.png")
def BuildImageFromInput(self, input_): """Create the initial image layer from some input. :param input_: Input data. If array, values should lie in the range [0, 1]. :type input_: PIL.Image or 2D ndarray :returns: image layer data. :rtype: 2D ndarray of float """ resize_method = self.params.image_resize_method if resize_method != ResizeMethod.METHOD_NONE: resize_length = self.params.image_resize_length resize_aspect_ratio = self.params.image_resize_aspect_ratio # Make sure input is an image if not isinstance(input_, Image.Image): input_ = toimage(input_) old_size = np.array(input_.size, np.float) # format is (width, height) if resize_method == ResizeMethod.METHOD_SHORT_EDGE: input_ = ScaleImage(input_, old_size / min(old_size) * resize_length) elif resize_method == ResizeMethod.METHOD_LONG_EDGE: input_ = ScaleImage(input_, old_size / max(old_size) * resize_length) elif resize_method == ResizeMethod.METHOD_WIDTH: input_ = ScaleImage(input_, old_size / old_size[0] * resize_length) elif resize_method == ResizeMethod.METHOD_HEIGHT: input_ = ScaleImage(input_, old_size / old_size[1] * resize_length) elif resize_method == ResizeMethod.METHOD_SCALE_AND_CROP: width = resize_length height = width / resize_aspect_ratio input_ = ScaleAndCropImage(input_, (width, height)) else: raise ValueError("Unknown resize method: %s" % resize_method) return ImageLayerFromInputArray(input_, self.backend)
def slide(self, value): """ When x or y are changed, instead of recomputing the hologram, we use the shortcut of selection a region of a larger pre-computed hologram. """ source = self.sender() # select area to display x = round(self.lcd.value() / self.scale) y = round(self.lcd2.value() / self.scale) im = toimage(self.holo[256 - x : 512 - x, 256 - y : 512 - y]) # PIL image # convert image to pixmap # https://github.com/shuge/Enjoy-Qt-Python-Binding/blob/master/image/display_img/pil_to_qpixmap.py if im.mode == "RGB": pass elif im.mode == "L": im = im.convert("RGBA") data = im.tostring("raw", "RGBA") qim = QtGui.QImage(data, 256, 256, QtGui.QImage.Format_ARGB32) pixmap = QtGui.QPixmap.fromImage(qim) # asign to the hologram myScaledPixmap = pixmap.scaled(QtCore.QSize(400, 400)) self.hologram.setPixmap(myScaledPixmap) # make a sphere object size of window displayed to # label at the bottom of the frame sphere2 = Sphere( n=self.lcd5.value() + 0.0001j, r=self.lcd4.value(), center=(self.lcd.value(), self.lcd2.value(), self.lcd3.value()), ) self.sphObject.setText(repr(sphere2))
def test_jpg(w, img, fmt = "jpg-%d.jpg"): prev_i = None prev_fn = None for i in range(100, 0, -5): fn = fmt % (i,) misc.toimage(img).save(fn, "JPEG", quality = i) try: w.extract(misc.imread(fn)) except ReedSolomonError: os.remove(fn) return prev_i if prev_fn: os.remove(prev_fn) prev_i = i prev_fn = fn return "unbound"
def load_images(filename): binary_all_images = numpy.fromfile(filename, dtype=float) # image_edge_length = 512 # num_images = 10 all_numbers = [] array_position = 0 for k in range(0, num_images): matrix = numpy.zeros((image_edge_length, image_edge_length), numpy.float64) for i in range(0, image_edge_length): for j in range(0, image_edge_length): matrix[i][j] = binary_all_images[array_position] array_position += 1 all_numbers.append(matrix) toimage(all_numbers[0]).show() return all_numbers
def delete_white_lines(image): im = image im = im.resize((100, 100), Image.ANTIALIAS) pix = im.load() res_mass = [] for x in xrange(0, im.size[0]): new_mass = [] for y in xrange(0, im.size[1]): pixel = pix[x, y] new_mass.append(pixel/255) res_mass.append(new_mass) res_mass = np.array(res_mass) marks = [] for i in xrange(0, 100): if sum(res_mass[:, i]) == 100: marks.append(i) print marks if marks == [] : return image#.rotate(180) else: for i in reversed(marks): res_mass = np.delete(res_mass, i, 1) return toimage(res_mass).transpose(Image.FLIP_LEFT_RIGHT)#.rotate(90)
def appendAssay(self, assay, image=None): """Add statistics and image (and save it on disk) to end of the table. Replaces both insertRow and setData. There is issue with metadata handling and image compression. * TIFF format leaks for standardized rich metadata container. Although OMERO project afford necessary capabilities with OME-TIFF, it's too much overhead to implement it by myself. * Pillow can't compress TIFF and save it with tags without libtiff. May be in future we switch to Bioformats or pylibtiff to overreach this disadvantages. """ total_rows = self.rowCount() self.beginInsertRows(QtCore.QModelIndex(), total_rows, total_rows) if image is not None: img = misc.toimage(image) imgpath = os.path.join( self.__datadir, assay.timestamp.strftime("%Y%m%d-%H%M%S.tif")) tiffinfo = { 270: str(assay), # ImageDescription 305: "Immunopy", # Software 306: assay.timestamp.strftime("%Y:%m:%d %H:%M:%S")} # DateTime img.save( imgpath.encode(sys.getfilesystemencoding()), format='TIFF', compression="tiff_deflate", tiffinfo=tiffinfo) assay.img_path = imgpath self.__assays.append(assay) print("Appending assay %s" % assay) self.endInsertRows() return True
def domedmask(): diff = np.load('rpca_diff.npy').reshape((-1,70-25,90-0)) diffmask = np.where(diff>50,255,0) from bigrec import bigrec maskall = np.zeros((diff.shape[0],105,90),np.int0) glassorig = np.load('glassorig.npy').reshape((-1,105,90)) noglass = np.zeros_like(glassorig) for i in range(diff.shape[0]): print i maskall[i] = bigrec(diffmask[i], maskall[i]) medresult = maskedmedfilt(glassorig[i], maskall[i], 6,4) noglass[i] = np.where(maskall[i], medresult, glassorig[i]) np.save('medfilt_noglass.npy',noglass) from layerbase import DrawPatch misc.toimage(DrawPatch(noglass.reshape((-1,1,105,90)))).save('mednoglass.png') misc.toimage(DrawPatch(maskall.reshape((-1,1,105,90)))).save('medmask.png')
def resize(data, dims): """ Wrapper to resize an image """ import scipy.misc as smp import Image tmp = smp.fromimage(smp.toimage(data, mode='F')) tmp = tmp.resize(dims, Image.ANTIALIAS) return tmp
def image_from_bits(self, bits, filename): # declare pixels matrix (list of lists) pixels = [] # turning list of bits to matrix of bits for y in range(0,32): i = y * 32 pixels.append(bits[i:i+32]) # convert to 0's and 255's for y in range(len(pixels)): row = pixels[y] for x in range(len(row)): val = row[x] if val == 1: val = 255 pixels[y][x] = val # convert pixels to image and save imd = numpy.array(pixels) im = toimage(imd) im = im.convert('RGB') im.save(filename, 'PNG') pass
img = sc.imread(file) original_img = copy.deepcopy( img) #save it for now because it will be modified soon img = sc.imresize(img, 0.25 / 2) if KEEP_ORIGINAL_SIZE == False: original_img = copy.deepcopy( img) #save it for now because it will be modified soon brightness = Silver.brightness(img) average_brightness = np.average(brightness) std_brightness = np.std(brightness) silver.avg = average_brightness silver.std = std_brightness print('img total size is:{}'.format(img.size)) bounding_box = get_bounding_box(img) draw_bounding_box(img, bounding_box) if MODE == 'show': imshow(img) print('that was the picture:{}\n'.format(file)) if MODE == 'write_csv': sc.toimage(original_img, cmin=0.0, cmax=255.0).save(str(current_file_number) + '.jpg') if KEEP_ORIGINAL_SIZE == True: bounding_box = tuple( 8 * x for x in bounding_box) #Rescale back the bounding box. csv_file.write( str(current_file_number) + '.jpg,{},{},{},{}\n'.format(*bounding_box)) current_file_number += 1 if MODE == 'write_csv': csv_file.close()
from scipy.misc import toimage, fromimage import Image import scipy.ndimage as snd # opening the image and converting it to grayscale a = Image.open('../Figures/er_image.png').convert('L') # performing binary erosion for 5 iterations b = snd.morphology.binary_erosion(a,iterations=25) # converting b from an ndarray to an image b = toimage(b) # displaying the image b.show()
import tensorflow as tf import numpy as np import scipy.io from scipy.misc import toimage mat = scipy.io.loadmat('W.mat') print(mat['outputarr'].shape) # input() outputarr = mat['outputarr'] for i in range(len(outputarr)): curr = outputarr[0][i] nxt = outputarr[1][i] toimage(curr).show() toimage(nxt).show() input()
def base64ToImage(base64_string): arr = base64ToNumpyArray(base64_string) img = misc.toimage(arr) return img
try: from scipy.misc import imsave, toimage except ImportError as e: print( "imsave requires you to install pillow. Run `pip install pillow` and then try again." ) sys.exit() # Load digits dat data, labels = datasets.load_digits().data, datasets.load_digits().target # Create images for a custom tooltip array tooltip_s = [] for image_data in data: output = io.BytesIO() img = toimage(image_data.reshape( (8, 8))) # Data was a flat row of 64 "pixels". img.save(output, format="PNG") contents = output.getvalue() img_encoded = base64.b64encode(contents) img_tag = """<img src="data:image/png;base64,{}"> """.format( img_encoded.decode('utf-8')) tooltip_s.append(img_tag) output.close() tooltip_s = np.array( tooltip_s) # need to make sure to feed it as a NumPy array, not a list # Initialize to use t-SNE with 2 components (reduces data to 2 dimensions). Also note high overlap_percentage. mapper = km.KeplerMapper(verbose=2) # Fit and transform data
def save_img(data, i, j): data = gaussian_filter(data, sigma=3) #blur the image by 5 pixels gaussian width img = smp.toimage(data) # Create a PIL image smp.imsave("Bow_shocks/bs" + str(i) + str(j) + '.png', img)
def _fold(self): fold_mask_root = self.fold_masks_base_dir if not os.path.exists(fold_mask_root) or self.rebuild_mask: os.makedirs(fold_mask_root, exist_ok=True) fold_images = list() fold_masks = list() ignore_classes = self.get_classes_to_ignore() ignore_classes2ignore_idx = { ic: self.ignore_idx for ic in ignore_classes } if self.image_set == 'train': print( 'Ignoring classes for fold %d in split %s: ' % (self.fold, self.image_set), ignore_classes) else: print('Preserving all classes for fold %d in split %s' % (self.fold, self.image_set)) print('Preparing data for fold %d in split %s' % (self.fold, self.image_set)) for index in tqdm(range(len(self)), position=0, leave=True): original_mask_path = self.masks[index] fold_mask_path = os.path.join( fold_mask_root, os.path.basename(original_mask_path)) target = Image.open(original_mask_path) target = np.array(target, dtype=np.int) if self.image_set == 'train': # only filter out some classes during training # old style, slow # for ic in ignore_classes: # target[target == ic] = self.ignore_idx # new style, maybe faster target = replace_array_ele_as_dict( target, ignore_classes2ignore_idx) # Images with only background and ignored will not be used if target[target != self.ignore_idx].sum() == 0: continue if not os.path.exists(fold_mask_path) or self.rebuild_mask: target = misc.toimage(target, low=target.min(), high=target.max()) misc.imsave(fold_mask_path, target) fold_images.append(self.images[index]) fold_masks.append(fold_mask_path) self.images = fold_images self.masks = fold_masks else: mask_path = [ os.path.join(fold_mask_root, i) for i in os.listdir(fold_mask_root) ] self.masks = mask_path new_images = [] for m_path in mask_path: image_path = os.path.join( self.images_base_dir, os.path.basename(m_path).split('.')[0] + '.jpg') assert os.path.exists(m_path), 'mask %s not exist' % m_path assert os.path.exists( image_path), 'image %s not exist' % image_path new_images.append(image_path) self.images = new_images
def xtomo_writer_f(data, output_file=None, x_start=0, digits=3, axis=0, overwrite=False, precision=True): """ Write 3-D data to a stack of tif files. Parameters ----------- output_file : str, optional Name of the output file. x_start : scalar, optional First index of the data on first dimension of the array. digits : scalar, optional Number of digits used for file indexing. For example if 4: test_XXXX.tiff axis : scalar, optional Imaages is read along that axis. overwrite: bool, optional if overwrite=True the existing data in the reconstruction folder will be overwritten precision : bool, optional Export data type precision. if True it saves 32-bit precision. Otherwise it uses 8-bit precision. Notes ----- If file exists, saves it with a modified name. If output location is not specified, the data is saved inside ``recon`` folder where the input data resides. The name of the reconstructed files will be initialized with ``recon`` Examples -------- - Save sinogram data: >>> import tomopy >>> >>> # Load data >>> myfile = 'demo/data.h5' >>> data, white, dark, theta = tomopy.xtomo_reader(myfile) >>> >>> # Save data >>> output_file='tmp/slice_' >>> tomopy.xtomo_writer(data, output_file, axis=1) >>> print "Images are succesfully saved at " + output_file + '...' - Save first 16 projections: >>> import tomopy >>> >>> # Load data >>> myfile = 'demo/data.h5' >>> data, white, dark, theta = tomopy.xtomo_reader(myfile, projections_start=0, projections_end=16) >>> >>> # Save data >>> output_file='tmp/projection_' >>> tomopy.xtomo_writer(data, output_file, axis=0) >>> print "Images are succesfully saved at " + output_file + '...' - Save reconstructed slices: >>> import tomopy >>> >>> # Load data >>> myfile = 'demo/data.h5' >>> data, white, dark, theta = tomopy.xtomo_reader(myfile) >>> >>> # Perform reconstruction >>> d = tomopy.xtomo_dataset(log='error') >>> d.dataset(data, white, dark, theta) >>> d.center = 661.5 >>> d.gridrec() >>> >>> # Save data >>> output_file='tmp/reconstruction_' >>> tomopy.xtomo_writer(d.data_recon, output_file, axis=0) >>> print "Images are succesfully saved at " + output_file + '...' """ if output_file == None: output_file = "tmp/img_" output_file = os.path.abspath(output_file) dir_path = os.path.dirname(output_file) # Remove TIFF extension if there is. if (output_file.endswith('tif') or output_file.endswith('tiff')): output_file = output_file.split(".")[-2] if overwrite: if os.path.exists(dir_path): shutil.rmtree(dir_path) # Create new folders. if not os.path.exists(dir_path): os.makedirs(dir_path) # Select desired x from whole data. num_x, num_y, num_z = data.shape if axis == 0: x_end = x_start + 1 elif axis == 1: x_end = x_start + 1 elif axis == 2: x_end = x_start + 1 # Write data. file_index = ["" for x in range(digits)] for m in range(digits): file_index[m] = '0' * (digits - m - 1) ind = range(x_start, x_end) for m in range(len(ind)): for n in range(digits): if ind[m] < np.power(10, n + 1): file_body = output_file + file_index[n] + str(ind[m]) file_name = file_body + '.tif' break if precision: if axis == 0: img = misc.toimage(data[m, :, :], mode='F') elif axis == 1: img = misc.toimage(data[:, m, :], mode='F') elif axis == 2: img = misc.toimage(data[:, :, m], mode='F') else: if axis == 0: img = misc.toimage(data[m, :, :]) elif axis == 1: img = misc.toimage(data[:, m, :]) elif axis == 2: img = misc.toimage(data[:, :, m]) # check if file exists. if os.path.isfile(file_name): # genarate new file name. indq = 1 FLAG_SAVE = False while not FLAG_SAVE: new_file_body = file_body + '-' + str(indq) new_file_name = new_file_body + '.tif' if not os.path.isfile(new_file_name): img.save(new_file_name) FLAG_SAVE = True file_name = new_file_name else: indq += 1 else: img.save(file_name)
from recompression import * from PIL import Image #from pylab import * from scipy import misc import os im = np.array(Image.open('empire.jpg').convert('L')) misc.toimage(im, cmin=0, cmax=255).save('gray_empire.jpg') n, m = im.shape new_im = np.zeros([n, m]) for i in range(n // 8): for j in range(m // 8): X = im[i * 8:(i + 1) * 8, j * 8:(j + 1) * 8] X, TR = TBT(X, 1) IP, BP_mode, res = IBP(X, 0, 7) X = IBP_decode(IP, BP_mode, res) X = TBT_decode(X, TR, 1) new_im[i * 8:(i + 1) * 8, j * 8:(j + 1) * 8] = X misc.toimage(new_im, cmin=0, cmax=255).save('new_empire.jpg') ''' im = array(Image.open('empire.jpg').convert('L')) # get image list image_path = './data/' imglist = [os.path.join(image_path,f) for f in os.listdir(image_path)] distr = [] # document the residual distribution for f in imglist: print(f) current_distr = zeros(33)
#r approximation # for r in range(1,200): r = 100 sigma = np.zeros((540, 540)) for i in range(r): sigma[i][i] = s[i] tmp = np.matmul(u, sigma) tmp2 = np.matmul(tmp, v) err.append(LA.norm(tmp2 - x, 'fro')) xr = np.reshape(tmp2[2], (50, 50)) toimage(np.reshape(x[2], (50, 50))).show() toimage(xr).show() #isum += (np.abs(tmp - x_train[i])).sum() / (len(tmp)*len(tmp)) #err.append(isum / 540) # plt.plot(err) # plt.show() # # print err[80] detection_err = [] r = 200 #for r in range(1, 201):
import noise import numpy as np from scipy.misc import toimage shape = (1024, 1024) scale = 100.0 octaves = 6 persistence = 0.5 lacunarity = 2.0 world = np.zeros(shape) for i in range(shape[0]): for j in range(shape[1]): world[i][j] = noise.pnoise2(i / scale, j / scale, octaves=octaves, persistence=persistence, lacunarity=lacunarity, repeatx=1024, repeaty=1024, base=0) toimage(world).show()
def numpyArrayToImage(arr): return misc.toimage(arr)
] for f in tqdm(semantic_file_list): semantic_img = sp.imread(join(semantic_dir, f)) instance_img = sp.imread(join(instance_dir, f)) instance_img = kitti_to_cityscapes_instaces(instance_img) out_semantic_filename = join(out_semantic_dir, 'kitti_%s_gtFine_labelIds.png' % f[:-4]) out_instance_filename = join( out_instance_dir, 'kitti_%s_gtFine_instanceIds.png' % f[:-4]) out_polygons_filename = join(out_instance_dir, 'kitti_%s_gtFine_polygons.json' % f[:-4]) sp.toimage(semantic_img, mode='L').save(out_semantic_filename) sp.toimage(instance_img, high=np.max(instance_img), low=np.min(instance_img), mode='I').save(out_instance_filename) # create empty json file for pseudo polygons with open(out_polygons_filename, 'w') as out_json: json.dump({}, out_json) # copy and rename kitti semantics training image_2 to cityscapes format training_img_src = join(training_dir, f) training_img_dst = join(im_output_dir, 'kitti_%s_leftImg8bit.png' % f[:-4]) shutil.copy2(training_img_src, training_img_dst)
grayscale = Grayscale() imageresize = ImageResize(84, 84) sequence = Sequence(4) #observation=sequence.process(observation) #observation=imageresize.process(observation) #observation=crop(observation,14,77,11,75) #observation=crop(observation,36,190,20,144) #imageresize=ImageResize(42,42) #observation=grayscale.process(observation) #observation=zeroandone(observation) a = observation[30] print(observation) print(len(observation)) print(a) print(len(a)) print(a[0]) #imageresize=ImageResize(336,336) #observation=rgb2gray(observation) #observation=zeroandone(observation, 105) #observation=imageresize.process(observation) #observation = scipy.misc.imresize(arr=observation.astype(np.uint8), size=(168, 168)) print(observation) img = smp.toimage(observation) smp.imsave('ong.png', img) img.show()
def rgba(arr): img = misc.toimage(arr) rgba = img.convert("RGBA") rgba_arr = numpy.array(rgba) return rgba_arr
def convert(arr, format_str): img = misc.toimage(arr) converted_img = img.convert(format_str) converted_arr = numpy.array(converted_img) return converted_arr
#Object Recognition with Convolutional Neural Networks in the Keras Deep Learning Library #https://machinelearningmastery.com/object-recognition-convolutional-neural-networks-keras-deep-learning-library/ #https://github.com/deep-diver/CIFAR10-img-classification-tensorflow # Plot ad hoc CIFAR10 instances from keras.datasets import cifar10 from matplotlib import pyplot from scipy.misc import toimage # load data (X_train, y_train), (X_test, y_test) = cifar10.load_data() # create a grid of 3x3 images for i in range(0, 8): pyplot.subplot(330 + 1 + i) pyplot.imshow(toimage(X_train[i])) # show the plot pyplot.show() pyplot.imshow(toimage(X_train[9])) pyplot.imshow(toimage(X_train[12]), interpolation='nearest') pyplot.imshow(toimage(X_train[13]), interpolation='nearest') pyplot.imshow(toimage(X_train[14]), interpolation='nearest') pyplot.imshow(toimage(X_train[18]), interpolation='nearest') # Simple CNN model for CIFAR-10 import numpy from keras.datasets import cifar10 from keras.models import Sequential
def grayscale(arr): img = misc.toimage(arr) grayscale = img.convert("L") grayscale_arr = numpy.array(grayscale) return grayscale_arr
averaged_vals = np.zeros([1,w2,3]) for i in range(w2): r = np.mean(img[:, i * batchWidth:(i + 1) * batchWidth, 0]) b = np.mean(img[:, i * batchWidth:(i + 1) * batchWidth, 1]) g = np.mean(img[:, i * batchWidth:(i + 1) * batchWidth, 2]) averaged_vals[0][i][0] = r averaged_vals[0][i][1] = b averaged_vals[0][i][2] = g averaged_vals = np.stack((averaged_vals[0],averaged_vals[0],averaged_vals[0],averaged_vals[0],averaged_vals[0],averaged_vals[0]), axis=0) plt.imshow(averaged_vals.astype(int)) plt.show() misc.toimage(averaged_vals, cmin=0.0, cmax=255.0).save("josh.jpg") identifier = colorIdentifier("./josh.jpg") print("hi") for i in range(w2): if(averaged_vals[0][i][0] < (identifier.getDominant()[0]+20) and averaged_vals[0][i][0] > (identifier.getDominant()[0]-20)) : if(averaged_vals[0][i][1] < (identifier.getDominant()[1]+20) and averaged_vals[0][i][1] > (identifier.getDominant()[1]-20)) : if(averaged_vals[0][i][2] < (identifier.getDominant()[2]+20) and averaged_vals[0][i][2] > (identifier.getDominant()[2]-20)) : averaged_vals = averaged_vals[:,i:] break for i in range(averaged_vals.shape[1]-1, -1,-1): if(averaged_vals[0][i][0] < (identifier.getDominant()[0]+20) and averaged_vals[0][i][0] > (identifier.getDominant()[0]-20)) : if(averaged_vals[0][i][1] < (identifier.getDominant()[1]+20) and averaged_vals[0][i][1] > (identifier.getDominant()[1]-20)) : if(averaged_vals[0][i][2] < (identifier.getDominant()[2]+20) and averaged_vals[0][i][2] > (identifier.getDominant()[2]-20)) :
reduction_rate = 0 thinness = 0 sensitivity = 0 start_time = time.time() for file in glob.glob(test_path + "/*.png"): count = count + 1 image = cv2.imread(file) image = color.rgb2gray(image) image = invert(image) print(image.shape) fgps = foregroundPixels(image) print("fgps: ", fgps) skeleton = hilditch(image) fgpst = foregroundPixels(skeleton) print("fgpst: ", fgpst) reduction_rate = reduction_rate + (((fgps - fgpst) / fgps) * 100) sensitivity = sensitivity + sensitivitycheck(skeleton, fgps) thinnesst = thinnesscheck(skeleton) thinnesso = thinnesscheck(image) thinness = thinness + (1 - (thinnesst / thinnesso)) im = toimage(skeleton) im.save(str(count) + ".png") end_time = time.time() # Used to stop time record seconds = end_time - start_time print("Total time: ", seconds) print("Average time: ", seconds / count) print("Reduction rate: ", reduction_rate) print("Average Reduction rate: ", reduction_rate / count) print("sensitivity: ", sensitivity) print("Average sensitivty: ", sensitivity / count) print("Average thinness: ", thinness / count)
np.savetxt('labels_orl.txt', Y_orl) np.savetxt('projections_orl.txt', Z_orl) ########################################## ############## Extra Tasks ############### ########################################## print "\n------------------------------" print "Eigen Faces..." print "\nLFW Faces : Top 5 Principal Components" for i in xrange(5): print "Face ", (i + 1) maxNum = np.amax(eigenLfw[:, i]) minNum = np.amin(eigenLfw[:, i]) misc.toimage(((eigenLfw[:, i] - minNum).reshape(rowLfw, colLfw)) * (200.0 / maxNum)).save("lfw" + str(i + 1) + ".png") print "\nORL Faces : Top 5 Principal Components" for i in xrange(5): print "Face ", (i + 1) maxNum = np.amax(eigenOrl[:, i]) minNum = np.amin(eigenOrl[:, i]) misc.toimage(((eigenOrl[:, i] - minNum).reshape(rowOrl, colOrl)) * (255.0 / maxNum)).save("orl" + str(i + 1) + ".png") ############################################## ######## Finding the accuracy ############### ############################################## ########### Helper Functions ############
imsave('gradcam.png', heatmap) #--------------- #Guided backprop #--------------- g = tf.get_default_graph() with g.gradient_override_map({'Relu': 'GuidedRelu'}): sess = tf.Session() imgs = tf.placeholder(tf.float32, [None, 224, 224, 3]) vgg = vgg16(imgs, 'vgg16_weights.npz', sess, category) img1 = imread(img_file, mode='RGB') img1 = imresize(img1, (224, 224)) guided_backprop = Guided_backprop(vgg.imgs, vgg.probs, category) ll = sess.run([vgg.probs, guided_backprop], feed_dict={vgg.imgs: [img1]}) print ll[1].shape guided_backprop = toimage(ll[1]) #heatmap = double2image.to_image(heatmap) #guidedGradCam = imresize(guided_backprop, (224, 224)) imsave('guided_backprop.png', guided_backprop) #--------------- #Guided gradcam #--------------- guided_gradcam = ll[1] cam = resize(ll1[1] / np.max(ll1[1]), (224, 224), preserve_range=True) for i in range(3): guided_gradcam[:, :, i] = guided_gradcam[:, :, i] * cam imsave('guided_gradcam.png', toimage(guided_gradcam)) #preds = (np.argsort(prob)[::-1])[0:5] #for p in preds: # print p, class_names[p], prob[p]
def sample_blob_negatives(img, roi_mask, out_dir, img_id, abn, blob_detector, patch_size=256, neg_cutoff=.35, nb_bkg=100, start_sample_nb=0, bkg_dir='background', verbose=False): #命名 bkg_out = os.path.join(out_dir, bkg_dir) basename = '_'.join([img_id, str(abn)]) #加0边距 img = add_img_margins(img, patch_size / 2) roi_mask = add_img_margins(roi_mask, patch_size / 2) # Get ROI bounding box.获取ROI边界 roi_mask_8u = roi_mask.astype('uint8') ver = (cv2.__version__).split('.') if int(ver[0]) < 3: contours, _ = cv2.findContours(roi_mask_8u.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) else: _, contours, _ = cv2.findContours(roi_mask_8u.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cont_areas = [cv2.contourArea(cont) for cont in contours] idx = np.argmax(cont_areas) # find the largest contour. rx, ry, rw, rh = cv2.boundingRect(contours[idx]) if verbose: M = cv2.moments(contours[idx]) cx = int(M['m10'] / M['m00']) cy = int(M['m01'] / M['m00']) print("ROI centroid=", (cx, cy)) sys.stdout.flush() # Sample blob negative samples.阴性斑点样本采样 #采取特征点? key_pts = blob_detector.detect((img / img.max() * 255).astype('uint8')) rng = np.random.RandomState(12345) key_pts = rng.permutation(key_pts) sampled_bkg = 0 #循环特征点 for kp in key_pts: #样本背景数大于等于背景总数 if sampled_bkg >= nb_bkg: break #获取特征点坐标 x, y = int(kp.pt[0]), int(kp.pt[1]) #如果patch与roi掩码不重叠则保存 if not overlap_patch_roi( (x, y), patch_size, roi_mask, cutoff=neg_cutoff): patch = img[y - patch_size / 2:y + patch_size / 2, x - patch_size / 2:x + patch_size / 2] patch = patch.astype('int32') patch_img = toimage(patch, high=patch.max(), low=patch.min(), mode='I') filename = basename + "_%04d" % (start_sample_nb + sampled_bkg) + ".png" fullname = os.path.join(bkg_out, filename) patch_img.save(fullname) if verbose: print("sampled a blob patch at (x,y) center=", (x, y)) sys.stdout.flush() sampled_bkg += 1 return sampled_bkg
def save_image(self, image, path): if not os.path.exists(os.path.split(path)[0]): os.makedirs(os.path.split(path)[0]) misc.toimage(image, cmin=0, cmax=255).save(path)
def OCR(data, results, action, idname): """ Use pyOCR which for OCR returns rect rois for plotting in overview If results is None, just return the OCR content, do not add to results """ try: params = action['params'] except KeyError: params = {} # optional parameters ocr_options = {} for lab in ['ocr_threshold', 'ocr_zoom', 'ocr_border']: if lab in params: ocr_options[lab] = int(params[lab]) inputfile = data.series_filelist[0] # give me a [filename] rgbchannel = params.get('rgbchannel', 'B') dcmInfile, pixelData, dicomMode = wadwrapper_lib.prepareInput( inputfile, headers_only=False, logTag=logTag(), rgbchannel=rgbchannel) # find probename idname = '' if params.get('auto_suffix', False): if idname is None: idname = '_' + qc.imageID(probeonly=True) # add pluginversion to 'result' object add_plugin_version(qc, results, 'pluginversion' + idname) rectrois = [] error = False msg = '' values = {} # solve ocr params ocr_regions = params.get('ocr_regions', {}) # new format regions = {} for ocrname, ocrparams in ocr_regions.items(): regions[ocrname] = {'prefix': '', 'suffix': ''} for key, val in ocrparams.items(): if key == 'xywh': regions[ocrname]['xywh'] = [int(p) for p in val.split(';')] elif key == 'prefix': regions[ocrname]['prefix'] = val elif key == 'suffix': regions[ocrname]['suffix'] = val elif key == 'type': regions[ocrname]['type'] = val for name, region in regions.items(): rectrois.append([(region['xywh'][0], region['xywh'][1]), (region['xywh'][0] + region['xywh'][2], region['xywh'][1] + region['xywh'][3])]) txt, part = ocr_lib.OCR(pixelData, region['xywh'], **ocr_options) uname = name + str(idname) if region['type'] == 'object': im = toimage(part) fn = '{}.jpg'.format(uname) im.save(fn) results.addObject(uname, fn) else: try: value = ocr_lib.txt2type(txt, region['type'], region['prefix'], region['suffix']) if not results is None: if region['type'] == 'float': results.addFloat(uname, value) elif region['type'] == 'string': results.addString(uname, value) elif region['type'] == 'bool': results.addBool(uname, value) else: values[uname] = value except: print("error", uname, value) error = True msg += uname + ' ' im = toimage(part) fn = '{}.jpg'.format(uname) im.save(fn) if results is None: return values, error, msg return rectrois, error, msg
def sample_hard_negatives(img, roi_mask, out_dir, img_id, abn, patch_size=256, neg_cutoff=.35, nb_bkg=100, start_sample_nb=0, bkg_dir='background', verbose=False): #警告:hns(难类)的定义可能有问题。 已有研究表明ROI的背景对分类也很有用。 '''WARNING: the definition of hns may be problematic. There has been study showing that the context of an ROI is also useful for classification. ''' #添加背景目录并命名 bkg_out = os.path.join(out_dir, bkg_dir) basename = '_'.join([img_id, str(abn)]) #图像加0边距,掩码同样处理 img = add_img_margins(img, patch_size / 2) roi_mask = add_img_margins(roi_mask, patch_size / 2) #获取ROI边框(上面有讲述) # Get ROI bounding box. roi_mask_8u = roi_mask.astype('uint8') ver = (cv2.__version__).split('.') if int(ver[0]) < 3: contours, _ = cv2.findContours(roi_mask_8u.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) else: _, contours, _ = cv2.findContours(roi_mask_8u.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cont_areas = [cv2.contourArea(cont) for cont in contours] idx = np.argmax(cont_areas) # find the largest contour. rx, ry, rw, rh = cv2.boundingRect(contours[idx]) if verbose: M = cv2.moments(contours[idx]) cx = int(M['m10'] / M['m00']) cy = int(M['m01'] / M['m00']) print("ROI centroid=", (cx, cy)) sys.stdout.flush() rng = np.random.RandomState(12345) # Sample hard negative samples.难类样本采样 #初始化样本背景 sampled_bkg = start_sample_nb #当样本背景小于初始样本数加背景数时进行循环 while sampled_bkg < start_sample_nb + nb_bkg: #扩大感兴趣区域,取不小于patch_size/2,不超过img.shape - patch_size/2的x,y的随机值 x1, x2 = (rx - patch_size / 2, rx + rw + patch_size / 2) y1, y2 = (ry - patch_size / 2, ry + rh + patch_size / 2) x1 = crop_val(x1, patch_size / 2, img.shape[1] - patch_size / 2) x2 = crop_val(x2, patch_size / 2, img.shape[1] - patch_size / 2) y1 = crop_val(y1, patch_size / 2, img.shape[0] - patch_size / 2) y2 = crop_val(y2, patch_size / 2, img.shape[0] - patch_size / 2) x = rng.randint(x1, x2) y = rng.randint(y1, y2) #如果patch与掩码不重叠则执行 if not overlap_patch_roi( (x, y), patch_size, roi_mask, cutoff=neg_cutoff): #选取图像中patch patch = img[y - patch_size / 2:y + patch_size / 2, x - patch_size / 2:x + patch_size / 2] patch = patch.astype('int32') #patch由数组转为图像,并保存至背景 patch_img = toimage(patch, high=patch.max(), low=patch.min(), mode='I') filename = basename + "_%04d" % (sampled_bkg) + ".png" fullname = os.path.join(bkg_out, filename) patch_img.save(fullname) sampled_bkg += 1 if verbose: print("sampled a hns patch at (x,y) center=", (x, y)) sys.stdout.flush()
for i in range(iter_n): g, score = sess.run([t_grad, t_score], {t_input: img0}) g /= g.std() + 1e-8 img0 += g * step img_noise += g * step print("Step ", i, ", Loss value:", score) ## Display score = sess.run(T('output2'), feed_dict={t_input: img0}) print(" -- After fooling CNN --") top1 = print_prob(score[0]) cv2.putText(img0, str(top1), (70, 220), 0, 0.5, (255, 0, 0), 2) toimage(img_noise).show() toimage(img0).show() #gibbon: 185, panda: 169, bulldog: 82 img0 = PIL.Image.open(os.path.expanduser("~/Downloads/dog.jpg")) img0 = img0.resize((224, 224), PIL.Image.NEAREST) img0 = np.float32(img0) img0 = np.array( img0[:, :, :3]) # crop image in OS X create a 4-dim depth image img = img0.copy() score = sess.run(T('output2'), feed_dict={t_input: img0}) top1 = print_prob(score[0]) cv2.putText(img, str(top1), (70, 220), 0, 0.5, (255, 0, 0), 2) toimage(img).show() fool_CNN(T("softmax2_pre_activation")[:, 169], img0)
def sample_patches(img, roi_mask, out_dir, img_id, abn, pos, patch_size=256, pos_cutoff=.75, neg_cutoff=.35, nb_bkg=100, nb_abn=100, start_sample_nb=0, itype='calc', bkg_dir='background', calc_pos_dir='calc_mal', calc_neg_dir='calc_ben', mass_pos_dir='mass_mal', mass_neg_dir='mass_ben', verbose=False): #图像如果为阳性 if pos: #是钙化点 if itype == 'calc': #图像放入输出目录中钙化阳性目录 roi_out = os.path.join(out_dir, calc_pos_dir) else: #否则放入输出目录中肿块阳性目录 roi_out = os.path.join(out_dir, mass_pos_dir) #图像如果为阴性 else: #是钙化点 if itype == 'calc': #图像放入输出目录中钙化阴性目录 roi_out = os.path.join(out_dir, calc_neg_dir) else: #否则放入输出目录中肿块阴性目录 roi_out = os.path.join(out_dir, mass_neg_dir) #背景放到输出目录中背景目录下 bkg_out = os.path.join(out_dir, bkg_dir) #命名 basename = '_'.join([img_id, str(abn)]) #图像增加边距 img = add_img_margins(img, patch_size / 2) #掩码增加边距 roi_mask = add_img_margins(roi_mask, patch_size / 2) # Get ROI bounding box. #获取感兴趣区域边界 #掩码转为8位无符号整型 roi_mask_8u = roi_mask.astype('uint8') #获取轮廓contours ver = (cv2.__version__).split('.') if int(ver[0]) < 3: contours, _ = cv2.findContours(roi_mask_8u.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) else: _, contours, _ = cv2.findContours(roi_mask_8u.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) #获取轮廓面积 cont_areas = [cv2.contourArea(cont) for cont in contours] #返回轮廓面积的索引值 idx = np.argmax(cont_areas) # find the largest contour. #获取轮廓左上角点的坐标以及宽高 rx, ry, rw, rh = cv2.boundingRect(contours[idx]) #显示详细信息为true if verbose: #将计算得到的矩以一个字典的形式返回至M M = cv2.moments(contours[idx]) try: cx = int(M['m10'] / M['m00']) cy = int(M['m01'] / M['m00']) print("ROI centroid=", (cx, cy)) sys.stdout.flush() except ZeroDivisionError: cx = rx + int(rw / 2) cy = ry + int(rh / 2) print("ROI centroid=Unknown, use b-box center=", (cx, cy)) sys.stdout.flush() #实现的随机数生成通常为伪随机数生成器,为了使得具备随机性的代码最终的结果可复现,需要设置相同的种子值 rng = np.random.RandomState(12345) # Sample abnormality first.采样异常图片先进行 sampled_abn = 0 nb_try = 0 while sampled_abn < nb_abn: if nb_abn > 1: #randint用于产生基质的均匀分布的随机整数 x = rng.randint(rx, rx + rw) y = rng.randint(ry, ry + rh) nb_try += 1 if nb_try >= 1000: # 试验的Nb达到最大值,重叠截止以0.05幅度降低 print( "Nb of trials reached maximum, decrease overlap cutoff by 0.05" ) sys.stdout.flush() pos_cutoff -= .05 nb_try = 0 if pos_cutoff <= .0: # 重叠截止值达到了非阳性界限,检查感性区域掩码输入值 raise Exception("overlap cutoff becomes non-positive, " "check roi mask input.") else: x = cx y = cy # import pdb; pdb.set_trace() if nb_abn == 1 or overlap_patch_roi( (x, y), patch_size, roi_mask, cutoff=pos_cutoff): patch = img[y - patch_size / 2:y + patch_size / 2, x - patch_size / 2:x + patch_size / 2] patch = patch.astype('int32') #max返回最大值,最小值 patch_img = toimage(patch, high=patch.max(), low=patch.min(), mode='I') # patch = patch.reshape((patch.shape[0], patch.shape[1], 1)) filename = basename + "_%04d" % (sampled_abn) + ".png" fullname = os.path.join(roi_out, filename) # import pdb; pdb.set_trace() patch_img.save(fullname) sampled_abn += 1 #异常样本数量记录 nb_try = 0 if verbose: #将异常样本的块坐标显示 print("sampled an abn patch at (x,y) center=", (x, y)) sys.stdout.flush() # Sample background. sampled_bkg = start_sample_nb while sampled_bkg < start_sample_nb + nb_bkg: x = rng.randint(patch_size / 2, img.shape[1] - patch_size / 2) y = rng.randint(patch_size / 2, img.shape[0] - patch_size / 2) if not overlap_patch_roi( (x, y), patch_size, roi_mask, cutoff=neg_cutoff): patch = img[y - patch_size / 2:y + patch_size / 2, x - patch_size / 2:x + patch_size / 2] patch = patch.astype('int32') patch_img = toimage(patch, high=patch.max(), low=patch.min(), mode='I') filename = basename + "_%04d" % (sampled_bkg) + ".png" fullname = os.path.join(bkg_out, filename) patch_img.save(fullname) sampled_bkg += 1 if verbose: #将样本背景中心块坐标显示 print("sampled a bkg patch at (x,y) center=", (x, y)) sys.stdout.flush()
Shelf(current_order[0], WIDTH_TYPE[shelf_type], shelf_type)) print("The following shelfs have been created: \n") # print the created shelfs in terminal for x in shelf_list: print("**********************\n") print(x.state) print("\n") # try to add every shelf to a plate, if no available plate is found, create new plate with own height and type for current_shelf in shelf_list: for current_plate in plate_list: if current_plate.add_shelf(current_shelf): break if current_plate == plate_list[-1]: plate_type = current_shelf.category plate_list.append( Plate(HEIGHT_TYPE[plate_type], WIDTH_TYPE[plate_type], plate_type)) print("\n This resulted in the following way of filling the plates:\n") # print the created plates in terminal for x in plate_list: print("**********************\n") print(x.state) print("\n") toimage(plate_list[0].state).show()
def imwrite(filename, matrix): toimage(matrix, cmin=0, cmax=256).save(filename)