def loadDataMontgomery(df, path, im_shape): """Function for loading Montgomery dataset""" X, y = [], [] for i, item in df.iterrows(): img = img_as_float(io.imread(path + item[0])) gt = io.imread(path + item[1]) l, r = np.where(img.sum(0) > 1)[0][[0, -1]] t, b = np.where(img.sum(1) > 1)[0][[0, -1]] img = img[t:b, l:r] mask = gt[t:b, l:r] img = transform.resize(img, im_shape) img = exposure.equalize_hist(img) img = np.expand_dims(img, -1) mask = transform.resize(mask, im_shape) mask = np.expand_dims(mask, -1) X.append(img) y.append(mask) X = np.array(X) y = np.array(y) X -= X.mean() X /= X.std() print '### Data loaded' print '\t{}'.format(path) print '\t{}\t{}'.format(X.shape, y.shape) print '\tX:{:.1f}-{:.1f}\ty:{:.1f}-{:.1f}\n'.format(X.min(), X.max(), y.min(), y.max()) print '\tX.mean = {}, X.std = {}'.format(X.mean(), X.std()) return X, y
def computeCAM(snet, X, W, reshape_size=None, n_top_convs=20): """ Applies a forward pass of the pre-processed samples "X" in the GAP net "snet" and generates the resulting CAM "maps" using the GAP weights "W" with the defined size "reshape_size". Additionally, it returns the best "n_top_convs" convolutional features for each of the classes. The ranking is computed considering the weight Wi assigned to the i-th feature map. """ from skimage.transform import resize if reshape_size is None: reshape_size = [256, 256] # Apply forward pass in GAP model [X, predictions] = applyForwardPass(snet, X) # Get indices of best convolutional features for each class ind_best = np.zeros((W.shape[1], n_top_convs)) for c in range(W.shape[1]): ind_best[c, :] = np.argsort(W[:, c])[::-1][:n_top_convs] # Compute heatmaps (CAMs) for each class [n_samples, n_classes, height, width] maps = np.zeros((X.shape[0], W.shape[1], reshape_size[0], reshape_size[1])) # Store top convolutional features convs = np.zeros((X.shape[0], W.shape[1], n_top_convs, reshape_size[0], reshape_size[1])) for s in range(X.shape[0]): weighted_activation = np.dot(np.transpose(W), np.reshape(X[s], (W.shape[0], X.shape[2] * X.shape[3]))) mapping = np.reshape(weighted_activation, (W.shape[1], X.shape[2], X.shape[3])) maps[s] = resize(mapping, tuple([W.shape[1]] + reshape_size), order=1, preserve_range=True) for c in range(W.shape[1]): for enum_conv, i_conv in list(enumerate(ind_best[c])): convs[s, c, enum_conv] = resize(X[s, i_conv], reshape_size, order=1, preserve_range=True) return [maps, predictions, convs]
def normalize_size(image, size=256, trans='c'): ## 'c':central crop, 'w':warp, 'p':padding o_shape = image.shape assert o_shape[0] > size and o_shape[1] > size if trans == 'c': if o_shape[0] > o_shape[1]: dh = int( (o_shape[0] - o_shape[1])/2 ) image = image[dh:dh+o_shape[1],:] else: dw = int( (o_shape[1] - o_shape[0])/2 ) image = image[:,dw:dw+o_shape[0]] new_shape = image.shape assert new_shape[0] == new_shape[1] image = resize(image, (size,size), order=3, preserve_range=False) elif trans == 'w': image = resize(image, (size,size), order=3, preserve_range=False) elif trans == 'p': background = np.zeros((size, size, 3)) if o_shape[0] > o_shape[1]: new_shape = (size,size*o_shape[1]/o_shape[0]) dh = 0 dw = (size - new_shape[1])/2 else: new_shape = (size*o_shape[0]/o_shape[1],size) dh = (size - new_shape[0])/2 dw = 0 image = resize(image, (new_shape[0],new_shape[1]), order=0, preserve_range=False) background[dh:dh+new_shape[0],dw:dw+new_shape[1],:] = image[:,:,:] image = background else: print "ERROR:undesignated transformation" return image
def put_image(self, image_path): # print "Loading the image" self.image = io.imread(image_path, as_grey=True) self.image = transform.resize(self.image,(50,50)) self.image_scaled = io.imread(image_path, as_grey=True) self.image_scaled = transform.resize(self.image_scaled,(50,50)) self.image_scaled *= (1/self.image_scaled.max())
def create_thumbnail(self, size, img=None): print 'processing raw images' if img: return resize(img, (size, size)) curr_dir = os.path.dirname( os.path.abspath(inspect.getfile(inspect.currentframe()))) folders = os.walk(os.path.join(curr_dir, '../../data/train/')) images = [] classes = [] targets = [] for class_id, folder in enumerate(folders): classes.append(folder[0][17:]) for img in folder[2]: if img.index('.jpg') == -1: continue image = imread(folder[0] + '/' + img) image = resize(image, (size, size)) # Important to put -1, to have it 0-based. target = class_id - 1 new_images, new_targets = self.augment_data(image, target) images.extend(new_images) targets.extend(new_targets) train = (images, targets) self.save_set('train' + str(size), images, targets) # f = open(curr_dir + '/train' + str(size) + '.pkl', 'wb') # pickle.dump(train, f, protocol=pickle.HIGHEST_PROTOCOL) # f.close() return train
def test(classifier, pca): building = io.imread("http://www.nps.gov/tps/images/briefs/14-commercial-building.jpg") building = transform.resize(building, (200, 200, 3)) building = color.rgb2gray(building) building = building.reshape(1, -1) # building = pca.transform(building) print building print classifier.predict(building)[0] print to_cat[str(classifier.predict(building)[0])] + " (expect building)" # print classifier.predict_proba(building) snow = io.imread("http://farm4.static.flickr.com/3405/3332148397_92d89db2ab.jpg") snow = transform.resize(snow, (200, 200, 3)) snow = color.rgb2gray(snow) snow = snow.reshape(1, -1) # snow = pca.transform(snow) print snow print to_cat[str(classifier.predict(snow)[0])] + " (expect snow)" # print classifier.predict_proba(snow) flower = io.imread("https://upload.wikimedia.org/wikipedia/commons/f/fd/Daisy_flower_green_background.jpg") flower = transform.resize(flower, (200, 200, 3)) flower = color.rgb2gray(flower) flower = flower.reshape(1, -1) # flower = pca.transform(flower) print to_cat[str(classifier.predict(flower)[0])] + " (expect plant)"
def check_size(img, min_image_width_height, fixed_image_size=None): ''' checks if the image accords to the minimum and maximum size requirements or fixed image size and resizes if not :param img: the image to be checked :param min_image_width_height: the minimum image size :param fixed_image_size: ''' if fixed_image_size is not None: if len(fixed_image_size) != 2: raise ValueError('The requested fixed image size is invalid!') new_img = resize(image=img, output_shape=fixed_image_size[::-1]) new_img = new_img.astype(np.float32) return new_img elif np.amin(img.shape[:2]) < min_image_width_height: if np.amin(img.shape[:2]) == 0: return None scale = float(min_image_width_height + 1) / float(np.amin(img.shape[:2])) new_shape = (int(scale * img.shape[0]), int(scale * img.shape[1])) new_img = resize(image=img, output_shape=new_shape) new_img = new_img.astype(np.float32) return new_img else: return img
def modify(img): """Randomly modify an image This is a preprocessing step for training an OCR classifier. It takes in an image and casts it to greyscale, reshapes it, and adds some (1) rotations, (2) translations and (3) noise. If more efficiency is needed, we could factor out some of the initial nonrandom transforms. """ block_size = np.random.uniform(20, 40) rotation = 5*np.random.randn() #print 'BLOCK SIZE', block_size #print 'ROTATION ', rotation img = color.rgb2grey(img) img = transform.resize(img, output_shape=(50,30)) img = filter.threshold_adaptive(img, block_size=block_size) # rotate the image img = np.logical_not(transform.rotate(np.logical_not(img), rotation)) # translate the image img = shift(img) # add some noise to the image img = noise(img) img = transform.resize(img, output_shape=(25,15)) return filter.threshold_adaptive(img, block_size=25)
def repeated_sales(df, artistname, artname, r2thresh=7000, fftr2thresh=10000, IMAGES_DIR='/home/ryan/asi_images/'): """ Takes a dataframe, artistname and artname and tries to decide, via image matching, if there is a repeat sale. Returns a dict of lot_ids, each entry a list of repeat sales """ artdf = df[(df['artistID']==artistname) & (df['artTitle']==artname)] artdf.images = artdf.images.apply(getpath) paths = artdf[['_id','images']].dropna() id_dict = {} img_buffer = {} already_ordered = [] for i, path_i in paths.values: id_dict[i] = [] img_buffer[i] = img_as_float(rgb2gray(resize(imread(IMAGES_DIR + path_i), (300,300)))) for j, path_j in paths[paths._id != i].values: if j > i and j not in already_ordered: if j not in img_buffer.keys(): img_buffer[j] = img_as_float(rgb2gray(resize(imread(IMAGES_DIR + path_j), (300,300)))) if norm(img_buffer[i] - img_buffer[j]) < r2thresh and\ norm(fft2(img_buffer[i]) - fft2(img_buffer[j])) < fftr2thresh: id_dict[i].append(j) already_ordered.append(j) for key in id_dict.keys(): if id_dict[key] == []: id_dict.pop(key) return id_dict
def image_compare(df, IMAGES_DIR='/home/ryan/asi_images/'): ''' takes a list of n image ids and returns sum(n..n-1) n comparisons of r2 difference, r2(fft) difference, and average number of thresholded pixels ''' img_buffer = {} return_list = [] artdf = df[['_id', 'images']].copy() artdf.images = artdf.images.apply(getpath) paths = artdf[['_id','images']].dropna() paths.index = paths._id paths = paths.images if paths.shape[0] < 2: return DataFrame([]) for id_pair in combinations(paths.index, 2): if id_pair[0] in img_buffer: img1 = img_buffer[id_pair[0]] else: img_buffer[id_pair[0]] = img_as_float(rgb2gray(resize(imread(IMAGES_DIR + paths[id_pair[0]]), (300,300)))) img1 = img_buffer[id_pair[0]] if id_pair[1] in img_buffer: img2 = img_buffer[id_pair[1]] else: img_buffer[id_pair[1]] = img_as_float(rgb2gray(resize(imread(IMAGES_DIR + paths[id_pair[1]]), (300,300)))) img2 = img_buffer[id_pair[1]] return_list.append( [id_pair[0], id_pair[1], \ norm(img1 - img2), \ norm(fft2(img1) - fft2(img2)), \ #mean([sum(img1 > threshold_otsu(img1)), sum(img2 > threshold_otsu(img2))])] #mean([sum(img1 > 0.9), sum(img2 > 0.9)])] std(img1)+std(img2)/2.] ) return DataFrame(return_list, columns=['id1','id2','r2diff', 'fftdiff', 'stdavg'])
def preproc(self, img, size, pixel_spacing, equalize=True, crop=True): """crop center and resize""" # TODO: this is stupid, you could crop out the heart # But should test this if img.shape[0] < img.shape[1]: img = img.T # Standardize based on pixel spacing img = transform.resize(img, (int(img.shape[0]*(1.0/np.float32(pixel_spacing[0]))), int(img.shape[1]*(1.0/np.float32(pixel_spacing[1]))))) # we crop image from center short_egde = min(img.shape[:2]) yy = int((img.shape[0] - short_egde) / 2) xx = int((img.shape[1] - short_egde) / 2) if crop: crop_img = img[yy : yy + short_egde, xx : xx + short_egde] # resize to 64, 64 resized_img = transform.resize(crop_img, (size, size)) else: resized_img = img #resized_img = gaussian_filter(resized_img, sigma=1) #resized_img = median_filter(resized_img, size=(3,3)) if equalize: resized_img = equalize_hist(resized_img) resized_img = adjust_sigmoid(resized_img) resized_img *= 255. return resized_img.astype("float32")
def main(): for file_path in glob.glob("/home/lucas/Downloads/Lucas/GSK 10uM/*.JPG"): img = data.imread(file_path, as_grey=True) img = transform.resize(img, [600, 600]) img_color = transform.resize(data.imread(file_path), [600, 600]) img[img >img.mean()-0.1] = 0 # io.imshow(img) # io.show() # edges = canny(img) bordas_fechadas = closing(img > 0.1, square(15)) # fechando gaps fill_cells = ndi.binary_fill_holes(bordas_fechadas) # io.imshow(fill_cells) # io.show() img_label = label(fill_cells, background=0) n= 0 for x in regionprops(img_label): if x.area < 2000 and x.area > 300: n +=1 print x.area minr, minc, maxr, maxc = x.bbox try: out_path_name = file_path.split("/")[-1].rstrip(".JPG") io.imsave("out/cell_{}_pic_{}_area_{}.png".format(n, out_path_name, str(round(x.area))),img_color[minr-3: maxr+3, minc-3: maxc+3]) #io.show() except: pass
def sfit(arr, degree=3, binning=16): # For efficiency, we downsample the input array before doing the fit. "Fit polynomial to a 2D array, aka surface." # For info on resizing, see http://stackoverflow.com/questions/29958670/how-to-use-matlabs-imresize-in-python shape_small = (np.size(arr,0)/binning, np.size(arr,1)/binning) shape_big = np.shape(arr) # Create x and y arrays, which we need to pass to the fitting routine x_big, y_big = np.mgrid[:shape_big[0], :shape_big[1]] x_small = skt.resize(x_big, shape_small, order=1, preserve_range=True) y_small = skt.resize(y_big, shape_small, order=1, preserve_range=True) arr_small = skt.resize(arr, shape_small, order=1, preserve_range=True) p_init = astropy.modeling.models.Polynomial2D(degree=degree) # Define the fitting routine fit_p = astropy.modeling.fitting.LevMarLSQFitter() # with warnings.catch_warnings(): # Ignore model linearity warning from the fitter # warnings.simplefilter('ignore') # Do the fit itself poly = fit_p(p_init, x_small, y_small, arr_small) # Take the returned polynomial, and apply it to our x and y axes to get the final surface fit surf_big = poly(x_big, y_big) return surf_big
def augmentation(image, imageB, org_width=160,org_height=224, width=190, height=262): max_angle=20 image=resize(image,(width,height)) imageB=resize(imageB,(width,height)) angle=np.random.randint(max_angle) if np.random.randint(2): angle=-angle image=rotate(image,angle,resize=True) imageB=rotate(imageB,angle,resize=True) xstart=np.random.randint(width-org_width) ystart=np.random.randint(height-org_height) image=image[xstart:xstart+org_width,ystart:ystart+org_height] imageB=imageB[xstart:xstart+org_width,ystart:ystart+org_height] if np.random.randint(2): image=cv2.flip(image,1) imageB=cv2.flip(imageB,1) if np.random.randint(2): imageB=cv2.flip(imageB,0) # image=resize(image,(org_width,org_height)) return image,imageB
def generate_bg(bg_resize=True): files = glob.glob("/usr/share/backgrounds/*/*.jpg") # random.choice(files) # print(random.choice(files)) found = False while not found: fname = random.choice(files) bg = cv2.imread(fname) / 255.#, cv2.CV_LOAD_IMAGE_GRAYSCALE) / 255. if (bg.shape[1] >= OUTPUT_SHAPE[1] and bg.shape[0] >= OUTPUT_SHAPE[0]): found = True #print(files) # while not found: # fname = "bgs/{:08d}.jpg".format(random.randint(0, num_bg_images - 1)) # bg = cv2.imread(fname, cv2.CV_LOAD_IMAGE_GRAYSCALE) / 255. # if (bg.shape[1] >= OUTPUT_SHAPE[1] and # bg.shape[0] >= OUTPUT_SHAPE[0]): # found = True if bg_resize: x_shape = np.random.randint(OUTPUT_SHAPE[1], bg.shape[1]) y_shape = np.random.randint(OUTPUT_SHAPE[0], bg.shape[0]) resize(image=bg, output_shape=(y_shape, x_shape), order=3) x = random.randint(0, bg.shape[1] - OUTPUT_SHAPE[1]) y = random.randint(0, bg.shape[0] - OUTPUT_SHAPE[0]) bg = bg[y:y + OUTPUT_SHAPE[0], x:x + OUTPUT_SHAPE[1]] return bg, fname
def _images_thumbnails(self): from vispy.io import imsave, imread # TODO: Switch to using PIL for resizing from skimage.transform import resize import numpy as np gallery_dir = op.join(IMAGES_DIR, 'gallery') thumbs_dir = op.join(IMAGES_DIR, 'thumbs') carousel_dir = op.join(IMAGES_DIR, 'carousel') for fname in os.listdir(gallery_dir): filename1 = op.join(gallery_dir, fname) filename2 = op.join(thumbs_dir, fname) filename3 = op.join(carousel_dir, fname) # im = imread(filename1) newx = 200 newy = int(newx * im.shape[0] / im.shape[1]) im = (resize(im, (newy, newx), 2) * 255).astype(np.uint8) imsave(filename2, im) newy = 160 # This should match the carousel size! newx = int(newy * im.shape[1] / im.shape[0]) im = (resize(im, (newy, newx), 1) * 255).astype(np.uint8) imsave(filename3, im) print('Created thumbnail and carousel %s' % fname)
def get_batches_fn(batch_size): """ Create batches of training data :param batch_size: Batch Size :return: Batches of training data """ image_paths = glob(os.path.join(data_folder, 'image_2', '*.png')) label_paths = { re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))} background_color = np.array([255, 0, 0]) random.shuffle(image_paths) for batch_i in range(0, len(image_paths), batch_size): images = [] gt_images = [] for image_file in image_paths[batch_i:batch_i+batch_size]: gt_image_file = label_paths[os.path.basename(image_file)] #image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape) image = resize(scipy.misc.imread(image_file), image_shape) #gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape) gt_image = resize(scipy.misc.imread(gt_image_file), image_shape) gt_bg = np.all(gt_image == background_color, axis=2) gt_bg = gt_bg.reshape(*gt_bg.shape, 1) gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2) images.append(image) gt_images.append(gt_image) yield np.array(images), np.array(gt_images)
def daisy_features(train_data_images, train_data_split_images, test_data_images, IMG_SIZE): canny(train_data_images, train_data_split_images, test_data_images, IMG_SIZE) train_data_features = [] test_data_features = [] train_data = [] test_data = [] train_data_split_crossfold = [] print(4) #bow_train = cv2.BOWKMeansTrainer(8) #flann_params = dict(algorithm = 1, trees = 5) #matcher = cv2.FlannBasedMatcher(flann_params, {}) #detect = cv2.xfeatures2d.SIFT_create() #extract = cv2.xfeatures2d.SIFT_create() #bow_extract = cv2.BOWImgDescriptorExtractor(extract, matcher) #help(bow_train) #help(bow_extract) for image in train_data_images: img = imread(image, as_grey=True) resized_image = resize(img, (40,40)) train_data.append(resized_image) for image in train_data_split_images: img = imread(image, as_grey=True) resized_image = resize(img, (40,40)) train_data_split_crossfold.append(resized_image) for image in test_data_images: img = imread(image, as_grey=True) resized_image = resize(img, (40,40)) test_data.append(resized_image) print(6) des = [] des_cross = [] des_test = [] radius = 5 for image in train_data: descs = daisy(image, radius=radius) des.append(descs) train_data_features = bow(des, train_data) del des print('oi1') #for image in train_data_split_crossfold: #descs = daisy(image, radius=radius) #des_cross.append(descs) print('oi1') #for image in test_data: #descs = daisy(image, radius=radius) #des_test.append(descs) print('oi1')
def iterate_train(self,batchsize,data_augment=False): num_batch=40000 for i in range(num_batch/batchsize): start=i*batchsize end=(i+1)*batchsize if (data_augment==False): x=self.train_set_x.get_value(borrow=True)[start:end] x=(x-self.mean)/256.0 x=np.asarray(x,dtype=theano.config.floatX) yield x, self.train_set_y.eval()[start:end] else: imgs=self.train_set_x.get_value(borrow=True)[start:end] for j in range(imgs.shape[0]): #horizontally flip if randint(0,1)==0: target=np.copy(imgs[j]) for i in range(imgs[j].shape[2]): target[:,:,i]=imgs[j][:,:,imgs[j].shape[2]-1-i] imgs[j]=target #color transform target=np.zeros([3,32,32]) mix=range(3) np.random.shuffle(mix) for x in range(3): target[x]=imgs[j][mix[x]] imgs[j]=target r=randint(0,7) if r==0: tmp=np.transpose(imgs[j],(1,2,0)); tmp=transform.resize(tmp[0:28,0:28,:],[32,32,3]) imgs[j]=np.transpose(tmp,(2,0,1)) elif r==1: tmp=np.transpose(imgs[j],(1,2,0)) tmp=transform.resize(tmp[0:28,4:32,:],[32,32,3]) imgs[j]=np.transpose(tmp,(2,0,1)) elif r==2: tmp=np.transpose(imgs[j],(1,2,0)) tmp=transform.resize(tmp[4:32,0:28,:],[32,32,3]) imgs[j]=np.transpose(tmp,(2,0,1)) elif r==3: tmp=np.transpose(imgs[j],(1,2,0)) tmp=transform.resize(tmp[4:32,4:32,:],[32,32,3]) imgs[j]=np.transpose(tmp,(2,0,1)) elif r==4: tmp=np.asarray(imgs[j],dtype='int32') tmp=transform.rotate(image=tmp,angle=5) imgs[j]=np.asarray(imgs[j],dtype=theano.config.floatX) elif r==5: tmp=np.asarray(imgs[j],dtype='int32') tmp=transform.rotate(image=tmp,angle=-5) imgs[j]=np.asarray(imgs[j],dtype=theano.config.floatX) imgs=(imgs-self.mean)/256.0 imgs=np.asarray(imgs,dtype=theano.config.floatX) yield imgs,self.train_set_y.eval()[start:end]
def preprocessing(self): p = resize(self.p, (256, 256)) image = np.zeros(shape=(258, 258)) image[1:257, 1:257] = p image_ratio = self.universe(image) image = image_ratio[0] ratio = image_ratio[1] image = resize(image, (252, 252)) return image, ratio
def resize_image(im, new_dims, interp_order=1): """ Resize an image array with interpolation. Parameters ---------- im : (H x W x K) or (H x W x K x L) ndarray new_dims : (height, width) tuple of new dimensions. interp_order : interpolation order, default is linear. Returns ------- im : resized ndarray with shape (new_dims[0], new_dims[1], K), or (new_dims[0], new_dims[1], K, L) """ # (H x W x K) case if im.ndim == 3: if im.shape[-1] == 1 or im.shape[-1] == 3: im_min, im_max = im.min(), im.max() if im_max > im_min: # skimage is fast but only understands {1,3} channel images # in [0, 1]. im_std = (im - im_min) / (im_max - im_min) resized_std = resize(im_std, new_dims, order=interp_order) resized_im = resized_std * (im_max - im_min) + im_min else: # the image is a constant -- avoid divide by 0 ret = np.empty((new_dims[0], new_dims[1], im.shape[-1]), dtype=np.float32) ret.fill(im_min) return ret else: # ndimage interpolates anything but more slowly. scale = tuple(np.array(new_dims, dtype=float) / np.array(im.shape[:2])) resized_im = zoom(im, scale + (1,), order=interp_order) # (H x W x K x L) case (C3D) elif im.ndim == 4: resized_im = np.empty(new_dims + im.shape[-2:]) for l in range(im.shape[3]): im_min, im_max = im[:,:,:,l].min(), im[:,:,:,l].max() if im_max > im_min: im_std = (im[:,:,:,l] - im_min) / (im_max - im_min) resized_std = resize(im_std, new_dims, order=interp_order) resized_im[:,:,:,l] = resized_std * (im_max - im_min) + im_min else: resized_im[:,:,:,l] = np.empty((new_dims[0], new_dims[1], im.shape[-2]), dtype=np.float32) resized_im[:,:,:,l].fill(im_min) # unknown case else: raise ValueError('Incorrect input array shape.') return resized_im.astype(np.float32)
def test_resize3d_keep(): # keep 3rd dimension x = np.zeros((5, 5, 3), dtype=np.double) x[1, 1, :] = 1 resized = resize(x, (10, 10), order=0) ref = np.zeros((10, 10, 3)) ref[2:4, 2:4, :] = 1 assert_almost_equal(resized, ref) resized = resize(x, (10, 10, 3), order=0) assert_almost_equal(resized, ref)
def resizeToFit(image, label_bndbox, cropSize, scale): exceed = checkExceedBndBox(label_bndbox, cropSize) newcoord = np.array(label_bndbox) if (image.shape[0] < cropSize or image.shape[1] < cropSize): X = image.shape[1] - cropSize Y = image.shape[0] - cropSize image = resize(image, (cropSize, cropSize)) xycoord = [X,Y,X,Y] newcoord[:] = newcoord[:] - xycoord #calculate new coordinate according to x, y changes newcoord = np.clip(newcoord, 0, cropSize-1) exceed = False while (exceed): #w = int(image.shape[1] / scale) #res = imutils.resize(image, width=w) w = int(image.shape[1] / scale) h = int(image.shape[0] / scale) res = resize(image, (h, w)) if (res.shape[0] <= cropSize or res.shape[1] <= cropSize): #image size is too small if (res.shape[0] > res.shape[1]): #res = imutils.resize(image, width=cropSize) ratio = float(image.shape[1])/cropSize w = cropSize h = int(image.shape[0] / ratio) res = resize(image, (h, w)) else: #res = imutils.resize(image, height=cropSize) ratio = float(image.shape[0])/cropSize w = int(image.shape[1] / ratio) h = cropSize res = resize(image, (h, w)) newcoord = newcoord[:,:]/ratio #calculate new coordinate according to resized ratio newcoord = newcoord.astype(int) exceed = checkExceedBndBox(newcoord, cropSize) if (exceed or res.shape[0] < cropSize or res.shape[1] < cropSize): X = res.shape[1] - cropSize Y = res.shape[0] - cropSize res = resize(res, (cropSize, cropSize)) xycoord = [X,Y,X,Y] newcoord[:] = newcoord[:] - xycoord #calculate new coordinate according to x, y changes newcoord = np.clip(newcoord, 0, cropSize-1) exceed = False else: newcoord = newcoord[:,:]/scale #calculate new coordinate according to resized ratio newcoord = newcoord.astype(int) exceed = checkExceedBndBox(newcoord, cropSize) #make sure that the coordinates do not exceed the image size image = res newcoord[:,0] = np.minimum(newcoord[:,0], image.shape[1]-1) newcoord[:,1] = np.minimum(newcoord[:,1], image.shape[0]-1) return (image, newcoord)
def scale_images(img, locs, scale, conf): sz = img.shape simg = np.zeros((sz[0], int(float(sz[1])/ scale), int(float(sz[2])/ scale), sz[3])) for ndx in range(sz[0]): if sz[3] == 1: simg[ndx, :, :, 0] = transform.resize(img[ndx, :, :, 0], simg.shape[1:3], preserve_range=True) else: simg[ndx, :, :, :] = transform.resize(img[ndx, :, :, :], simg.shape[1:3], preserve_range= True) new_locs = locs.copy() new_locs = new_locs/scale return simg, new_locs
def load_test_efficient(cache=False, size=PIXELS, grayscale=False): if grayscale: mode = '_bw' else: mode = '' filename = 'data/cache/X_test_%d_f32%s'%(PIXELS, mode) if cache and os.path.exists(filename): X_test = np.load(filename) X_test_id = np.load('data/cache/X_test_id_f32.npy') else: print('Read test images') path = os.path.join('data', 'imgs', 'test', '*.jpg') files = glob.glob(path) total_files = len(files) X_test_id = np.empty(total_files, dtype='S14') # S14 is what numpy saves these as # Lazy allocation X_test = None for count, fl in enumerate(files): if count%100 == 0: print('%d of %d'%(count, len(files))) flbase = os.path.basename(fl) img = imread(fl, as_grey = grayscale) img = transform.resize(img, output_shape=(PIXELS, PIXELS, 3), preserve_range=True) if not grayscale: #img = img.transpose(2, 0, 1) channels = 3 else: channels = 1 if X_test is none: X_test = np.empty((PIXELS, PIXELS, channels, total_files), dtype=np.float32) # Removed for computation and ease of figuring out if its grayscale #img = np.reshape(img, (1, num_features)) X_test[..., count] = transform.resize(img, output_shape=(PIXELS, PIXELS, channels), preserve_range=True) X_test_id[count] = flbase np.save(filename, X_test) np.save('data/cache/X_test_id_f32.npy', X_test_id) X_train = X_train.transpose(3, 2, 0, 1) #/ 255. # subtract pixel mean pixel_mean = np.load('data/pixel_mean_full_%d.npy'%PIXELS) X_test -= pixel_mean return X_test, X_test_id
def min_resize(img, size): """ Resize an image so that it is size along the minimum spatial dimension. """ w, h = map(float, img.shape[:2]) if min([w, h]) != size: if w <= h: img = resize(img, (int(round((h/w)*size)), int(size))) else: img = resize(img, (int(size), int(round((w/h)*size)))) return img
def pre_process_image(image_file): #get the image and resize image_data = ndi.imread(image_file, mode = 'L') resized_image = resize(image_data, (200,200)) 0.6 up_left,low_right = cropper(resized_image,40,0.6) resized_image = resize(resized_image[up_left[0]:low_right[0]+1,up_left[1]:low_right[1]+1], (200,200)) binar = binarize(resized_image, 0.4) undilated = deepcopy(binar) #dilate the binarized image selem = rectangle(1,2) dil = dilation(binar, selem) #binarize dilation dil = binarize(dil) #final = dil final = deepcopy(dil) for i in range(4): for j in range(4): final[i*50+3:i*50+25,j*50+3:j*50+44] = undilated[i*50+3:i*50+25,j*50+3:j*50+44] #Try to remove all borders and grid lines in the image. #Do this by scanning over rows and cols and if more than 25% #of the pixels are <= 0.45 then set the entire row to 1(white) #first rows for row in range(len(final)): count = 0 for pixel in final[row,:]: if pixel == 0: count += 1 if count >= 48: final[row,:] = final[row,:]*0 + 1 #columns for col in range(len(final[0,:])): count = 0 for pixel in final[:,col]: if pixel == 0: count += 1 if count >= 48: final[:,col] = final[:,col]*0 + 1 #add some final erosion (black) to fill out numbers and ensure they're connected final = binarize(erosion(final, rectangle(1,2)),.0000001) return final
def test_resize3d_keep(): # keep 3rd dimension x = np.zeros((5, 5, 3), dtype=np.double) x[1, 1, :] = 1 with expected_warnings(['The default mode']): resized = resize(x, (10, 10), order=0) ref = np.zeros((10, 10, 3)) ref[2:4, 2:4, :] = 1 assert_almost_equal(resized, ref) with expected_warnings(['The default mode']): resized = resize(x, (10, 10, 3), order=0) assert_almost_equal(resized, ref)
def set_data(self, A): dims = A.shape max_dims = (3840,2160) # 4K resolution if dims[0] > max_dims[0] or dims[1] > max_dims[1]: new_dims = numpy.minimum(dims, max_dims) if(_skimage_version()): self.unsampled_data = resize(A, new_dims, mode='constant', cval=numpy.nan, anti_aliasing=True) else: self.unsampled_data = resize(A, new_dims, mode='constant', cval=numpy.nan) else: self.unsampled_data = A super(ScalingAxesImage, self).set_data(A)
def test_spacing_1(): n = 30 lx, ly, lz = n, n, n data, _ = make_3d_syntheticdata(lx, ly, lz) # Rescale `data` along Y axis # `resize` is not yet 3D capable, so this must be done by looping in 2D. data_aniso = np.zeros((n, n * 2, n)) for i, yz in enumerate(data): data_aniso[i, :, :] = resize(yz, (n * 2, n), mode='constant', anti_aliasing=False) # Generate new labels small_l = int(lx // 5) labels_aniso = np.zeros_like(data_aniso) labels_aniso[lx // 5, ly // 5, lz // 5] = 1 labels_aniso[lx // 2 + small_l // 4, ly - small_l // 2, lz // 2 - small_l // 4] = 2 # Test with `spacing` kwarg # First, anisotropic along Y with expected_warnings(['"cg" mode' + '|' + SCIPY_RANK_WARNING, NUMPY_MATRIX_WARNING]): labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg', spacing=(1., 2., 1.)) assert (labels_aniso[13:17, 26:34, 13:17] == 2).all() # Rescale `data` along X axis # `resize` is not yet 3D capable, so this must be done by looping in 2D. data_aniso = np.zeros((n, n * 2, n)) for i in range(data.shape[1]): data_aniso[i, :, :] = resize(data[:, 1, :], (n * 2, n), mode='constant', anti_aliasing=False) # Generate new labels small_l = int(lx // 5) labels_aniso2 = np.zeros_like(data_aniso) labels_aniso2[lx // 5, ly // 5, lz // 5] = 1 labels_aniso2[lx - small_l // 2, ly // 2 + small_l // 4, lz // 2 - small_l // 4] = 2 # Anisotropic along X with expected_warnings(['"cg" mode' + '|' + SCIPY_RANK_WARNING, NUMPY_MATRIX_WARNING]): labels_aniso2 = random_walker(data_aniso, labels_aniso2, mode='cg', spacing=(2., 1., 1.)) assert (labels_aniso2[26:34, 13:17, 13:17] == 2).all()
0.15 * license_plate.shape[1]) min_height, max_height, min_width, max_width = character_dimensions characters = [] counter = 0 column_list = [] for regions in regionprops(labelled_plate): y0, x0, y1, x1 = regions.bbox region_height = y1 - y0 region_width = x1 - x0 if region_height > min_height and region_height < max_height and region_width > min_width and region_width < max_width: roi = license_plate[y0:y1, x0:x1] # draw a red bordered rectangle over the character. rect_border = patches.Rectangle((x0, y0), x1 - x0, y1 - y0, edgecolor="red", linewidth=2, fill=False) ax1.add_patch(rect_border) # resize the characters to 20X20 and then append each character into the characters list resized_char = resize(roi, (20, 20)) characters.append(resized_char) # this is just to keep track of the arrangement of the characters column_list.append(x0) # print(characters)
import cv2 import tensorflow as tf """ def imshow(img): cv2.imshow("img", img) cv2.waitKey(0) cv2.destroyAllWindows() """ model = tf.keras.models.load_model('colorize_autoencoder.h5', custom_objects=None, compile=True) img1_color = [] img1 = img_to_array(load_img('17165,044jpg.png')) img1 = resize(img1, (256, 256)) img1_color.append(img1) img1_color = np.array(img1_color, dtype=float) img1_color = rgb2lab(1.0 / 255 * img1_color)[:, :, :, 0] img1_color = img1_color.reshape(img1_color.shape + (1, )) output1 = model.predict(img1_color) # "LAB" format "L" represents black and white image # We are predicting A and B channels then join them output1 = output1 * 128 result = np.zeros((256, 256, 3)) result[:, :, 0] = img1_color[:, :, 0] result[:, :, 1:] = output1[0] imsave("result.png", lab2rgb(result))
def mainFunction(img): import cv2 import numpy as np #%matplotlib inline from skimage.io import imread from matplotlib import pyplot as plt from scipy.ndimage.filters import gaussian_filter from skimage import img_as_float from skimage.color import rgb2ycbcr from skimage.color import ycbcr2rgb #Normalize to 0-255 def normalise(I): N = np.uint8(255 * ((I - np.min(I))) / (np.max(I) - np.min(I))) return N #white balance using gray world algorithm def white_balance(img): row, col, ch = img.shape output = np.zeros(np.shape(img)) for j in range(0, 3): scalVal = sum(sum(img)) / (row * col) #print(scalVal) output[:, :, j] = img[:, :, j] * (.725 / scalVal[j]) return output #generate gaussian pyramid def genrateGaussianPyr(A, level): G = A.copy() gpA = [G] for i in range(level): G = cv2.pyrDown(G) gpA.append(G) return gpA #generate laplacian pyramid def genrateLaplacianPyr(A, level): G = A.copy() gpA = [G] for i in range(level): G = cv2.pyrDown(G) gpA.append(G) lpA = [G] for i in range(level, 0, -1): GE = cv2.pyrUp(gpA[i]) r, c, ch = gpA[i - 1].shape #print(gpA[i-1].shape) #print(GE.shape) GE = cv2.resize(GE, (c, r)) #print(GE.shape) L = cv2.subtract(gpA[i - 1], GE) lpA.append(L) return lpA #a10 img = imread(img) #plt.imshow(img) #plt.show() #color compensation #normalize Image 0-1 imgNormalised = np.divide((img - np.min(img)), (np.max(img) - np.min(img))) #split the channels r = imgNormalised[:, :, 0] g = imgNormalised[:, :, 1] b = imgNormalised[:, :, 2] rmean = r.mean() gmean = g.mean() bmean = b.mean() alpha = 1 # the compensated red channel Irc at everypixel location(x) Irc = r + alpha * (gmean - rmean) * (1 - r) * g # the compensated blue channel Ibc at everypixel location(x) Ibc = b + alpha * (gmean - bmean) * (1 - b) * g # New image newImg = np.zeros(np.shape(img)) newImg[:, :, 0] = Irc newImg[:, :, 1] = g newImg[:, :, 2] = Ibc #plt.imshow(newImg) #plt.show() #white balance wb_img = white_balance(newImg) wb_img = normalise(wb_img) #plt.imshow(wb_img) #plt.show() #Gamma Correction from skimage import exposure gamma_img = exposure.adjust_gamma(wb_img, 1.3) #plt.imshow(gamma_img) #plt.show() #from skimage.filters import unsharp_mask def _unsharp_mask_single_channel(image, radius, amount, vrange): blurred = gaussian_filter(image, sigma=radius, mode='reflect') result = image + (image - blurred) * amount if vrange is not None: return np.clip(result, vrange[0], vrange[1], out=result) return result def unsharp_mask(image, radius=1.0, amount=1.0, multichannel=False, preserve_range=False): vrange = None if preserve_range: fimg = image.astype(np.float) else: fimg = img_as_float(image) negative = np.any(fimg < 0) if negative: vrange = [-1., 1.] else: vrange = [0., 1.] if multichannel: result = np.empty_like(fimg, dtype=np.float) for channel in range(image.shape[-1]): result[..., channel] = _unsharp_mask_single_channel( fimg[..., channel], radius, amount, vrange) return result else: return _unsharp_mask_single_channel(fimg, radius, amount, vrange) sha_img = unsharp_mask(wb_img, radius=3, amount=1) sha_img = normalise(sha_img) #plt.imshow(sha_img) #plt.show() #weight calculation #Local contrast weight map def local_contrast_weight(img): local_con_wt = np.zeros(img.shape) Ycbcr = rgb2ycbcr(img) Y_factor = Ycbcr[:, :, 0] #print(max(np.ravel(Ycbcr))) laplacian = cv2.Laplacian(Y_factor, cv2.CV_64F) Y_new = np.abs(Y_factor + laplacian) local_con_wt[:, :, 0] = Y_new local_con_wt[:, :, 1] = img[:, :, 1] local_con_wt[:, :, 2] = img[:, :, 2] local_new = ycbcr2rgb(local_con_wt) return local_new #Saturation weight maps def saturation_weight(img): sat_wt = np.zeros(img.shape) R = img[:, :, 0] G = img[:, :, 1] B = img[:, :, 2] Ycbcr = rgb2ycbcr(img) Y_factor = Ycbcr[:, :, 0] wght = np.sqrt(1 / 3 * (np.square(R - Y_factor) + np.square(G - Y_factor) + np.square(B - Y_factor))) sat_wt[:, :, 0] = wght sat_wt[:, :, 1] = wght sat_wt[:, :, 2] = wght return sat_wt #Saliency weight maps def saliency_weight(img): img = 255 * img / (max(img.flatten())) sal_wt = np.zeros(img.shape) Igauss = cv2.GaussianBlur(img, (5, 5), 0) Imean = img.mean() #print(Imean) normI = Igauss - Imean R = normI[:, :, 0] G = normI[:, :, 1] B = normI[:, :, 2] NormR = R / (R + G + B) NormG = G / (R + G + B) NormB = B / (R + G + B) sal_wt[:, :, 0] = NormR sal_wt[:, :, 1] = NormG sal_wt[:, :, 2] = NormB return (sal_wt) #Sum Weight Map def sum_weight(inp1, inp2, inp3): inp1 = np.double(inp1) inp2 = np.double(inp2) inp3 = np.double(inp3) out = (inp1 + inp2 + inp3) return out #normlized Weight Map def norm_weight(inp1, inp2): #inp1max=np.max(inp1.flatten()) #inp2max=np.max(inp2.flatten()) #inp1=255*inp1/inp1max #inp2=255*inp2/inp2max inp1 = np.double(inp1) inp2 = np.double(inp2) suminp = inp1 + inp2 + 0.0001 inp1 = np.divide(inp1, suminp) inp2 = np.divide(inp2, suminp) return inp1, inp2 #finding weights fus_inp1 = gamma_img fus_inp2 = sha_img out_11 = local_contrast_weight(fus_inp1) out_11 = normalise(out_11) out_12 = saturation_weight(fus_inp1) out_12 = normalise(out_12) out_13 = (saliency_weight(fus_inp1)) out_13 = normalise(out_13) out_21 = local_contrast_weight(fus_inp2) out_21 = normalise(out_21) out_22 = saturation_weight(fus_inp2) out_22 = normalise(out_22) out_23 = (saliency_weight(fus_inp2)) out_23 = normalise(out_23) out_1 = sum_weight(out_11, out_12, out_13) #out_1 =normalise(out_1) out_2 = sum_weight(out_21, out_22, out_23) #out_2 =normalise(out_2) out1, out2 = norm_weight(out_1, out_2) out1 = normalise(out1) out2 = normalise(out2) #generate pyramid fus_inp1 = (gamma_img) fus_inp2 = (sha_img) w_py1 = (genrateGaussianPyr(out1, 2)) w_py2 = (genrateGaussianPyr(out2, 2)) i_py1 = (genrateLaplacianPyr(fus_inp1, 2)) i_py2 = (genrateLaplacianPyr(fus_inp2, 2)) fus_inp1 = np.double(fus_inp1) fus_inp2 = np.double(fus_inp2) from skimage.transform import resize row, col, ch = fus_inp1.shape fused_image = np.zeros(fus_inp1.shape) for i in range(0, 3): fus_w1 = resize(w_py1[i], [row, col, ch], preserve_range=True) fus_w2 = resize(w_py2[i], [row, col, ch], preserve_range=True) fus_i1 = resize(i_py1[i], [row, col, ch], preserve_range=True) fus_i2 = resize(i_py2[i], [row, col, ch], preserve_range=True) #fus_w1 = im2double(fus_w1) #fus_w2 = im2double(fus_w1) #fus_i1 = im2double(fus_w1) #fus_i2 = im2double(fus_w1) fused_image = fused_image + np.multiply(fus_w1, fus_i1) + np.multiply( fus_w2, fus_i2) #output_image fused_image = normalise(fused_image) res = cv2.resize(fused_image, dsize=(600, 600), interpolation=cv2.INTER_CUBIC) cv2.imshow("imG", res) #cv2.imwrite(os.path.join(path , 'out.jpg'), img) #cv2.imwrite("/home/arunima/flask/static/img/in.png",img) cv2.imwrite("/home/arunima/flask/aru_UI/static/out.png", fused_image) cv2.waitKey(0)
def __loadpredict__(self, fn): img = pydicom.dcmread(os.path.join(self.folder, fn)).pixel_array img = resize(img, (self.image_size, self.image_size), mode = 'reflect') img = np.expand_dims(img, -1) return img
np.random.seed(10) # In[ ]: # Get and resize train images and masks X_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8) Y_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool) print('Getting and resizing train images and masks ... ') sys.stdout.flush() for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)): path = TRAIN_PATH + id_ img = imread(path + '/images/' + id_ + '.png')[:, :, :IMG_CHANNELS] img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True) X_train[n] = img mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool) for mask_file in next(os.walk(path + '/masks/'))[2]: mask_ = imread(path + '/masks/' + mask_file) mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True), axis=-1) mask = np.maximum(mask, mask_) Y_train[n] = mask # Get and resize test images X_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
def merge_result(self): heatmap_root_folder = getAFolder('%s/heatmap' % (self.output_folder)) wsi_list, msk_list = [], [] for line in [ l.strip() for l in open(self.input_list) if l[0] != '#' and len(l.strip()) > 0 ]: itms = line.strip().split() if len(itms) == 2: wsi_list.append('%s/%s' % (self.input_folder, itms[0])) msk_list.append('%s/%s' % (self.input_folder, itms[1])) elif len(itms) == 1: wsi_list.append('%s/%s' % (self.input_folder, itms[0])) msk_list.append(None) for wsiName, mskName in zip(wsi_list, msk_list): wsi_name_root = wsiName.split('/')[-1].split('.')[0] outputName = '%s/%s.png' % (heatmap_root_folder, wsi_name_root) if os.path.exists(outputName): print("Omit:%s" % (outputName)) else: print("Generating:%s" % (outputName)) wsi = osi.open_slide(wsiName) #img_w_ml, img_h_ml = wsi.level_dimensions[deep_model_level] img_w_hl, img_h_hl = wsi.level_dimensions[self.heatmap_level] heatmap = np.zeros((img_h_hl, img_w_hl, 2)) # load inds and values if 1: wsiNameRoot = wsiName.split('/')[-1].split('.')[0] lst_output_folder = '%s/lst/%s' % (self.output_folder, wsiNameRoot) lst_files = [ l.strip() for l in os.popen('ls %s' % (lst_output_folder)) ] inds_list, values = [], [] for lst_file in lst_files: lst_file_path = '%s/lst/%s/%s' % ( self.output_folder, wsiNameRoot, lst_file) val_file_path = '%s/values/%s/%s.npy' % ( self.output_folder, wsiNameRoot, lst_file.split('.')[0]) lst_info = [ l.strip().split() for l in open(lst_file_path) ] value = [v for v in np.load(val_file_path)] ind_list = [(int(i[1]), int(i[2])) for i in lst_info] ## inds_list += ind_list values += value # genearte the heatmap if 1: logging.info("\t Updating heatmap...") inds_list_size = len(inds_list) for (h1_ml, w1_ml), v in tqdm(zip(inds_list, values)): h1_hl = int(h1_ml / np.power( 2, self.heatmap_level - self.deep_model_level)) w1_hl = int(w1_ml / np.power( 2, self.heatmap_level - self.deep_model_level)) window_size_hl = int(self.window_size / np.power( 2, self.heatmap_level - self.deep_model_level)) ori_patch = heatmap[h1_hl:h1_hl + window_size_hl, w1_hl:w1_hl + window_size_hl, :] l1, c1 = np.split(ori_patch, 2, axis=2) nmask = np.tile( np.array([v, 1]).reshape((1, 1, 2)), (window_size_hl, window_size_hl, 1)) l2, c2 = np.split(nmask, 2, axis=2) ## merging c3 = c1 + c2 l3 = ((l1 * c1) + l2) / c3 heatmap[h1_hl:h1_hl + window_size_hl, w1_hl:w1_hl + window_size_hl, :] = np.dstack( (l3, c3)) #break logging.info("\t Done!") # Save the results if 1: logging.info("\t Saving Mask...") ## save the npy file ## heatmap = np.squeeze(heatmap[:, :, 0]).astype(np.float32) heatmap[heatmap < 1e-5] = 0.0 #np.save(outputName + ".npy", heatmap) ## save the heat map ## img_h_kl = img_h_hl / np.power( 2, self.mask_image_level - self.heatmap_level) img_w_kl = img_w_hl / np.power( 2, self.mask_image_level - self.heatmap_level) heatmap_kl = (resize(heatmap, (img_h_kl, img_w_kl)) * 255).astype(np.uint) skio.imsave(outputName, heatmap_kl)
def segment(dataloader, segment_dir, model, core_config): model.eval() # convert the model into evaluation mode rle_dir = os.path.join(segment_dir, 'rle') img_dir = os.path.join(segment_dir, 'img') if not os.path.exists(rle_dir): os.makedirs(rle_dir) if not os.path.exists(img_dir): os.makedirs(img_dir) exist_ids = next(os.walk(rle_dir))[2] num_classes = core_config.num_classes offset_list = core_config.offsets for i, (img, size, id) in enumerate(dataloader): id = id[0] # tuple to str if id + '.rle' in exist_ids: continue original_height, original_width = size[0].item(), size[1].item() with torch.no_grad(): output = model(img) # class_pred = (output[:, :num_classes, :, :] + 0.001) * 0.999 # adj_pred = (output[:, num_classes:, :, :] + 0.001) * 0.999 class_pred = output[:, :num_classes, :, :] adj_pred = output[:, num_classes:, :, :] # By default, we use c++ version segmenter. In short, we call function # cseg.run_segmentation(). For details, please check the "README" file # in directory whose path is "scripts/waldo/csegmenter". # If the c++ version segmenter is not available, we can comment out the # python segmenter and use it. """ if args.object_merge_factor is None: args.object_merge_factor = 1.0 / len(offset_list) segmenter_opts = SegmenterOptions(same_different_bias=args.same_different_bias, object_merge_factor=args.object_merge_factor, merge_logprob_bias=args.merge_logprob_bias) seg = ObjectSegmenter(class_pred[0].detach().numpy(), adj_pred[0].detach().numpy(), num_classes, offset_list, segmenter_opts) mask_pred, object_class = seg.run_segmentation() """ if args.object_merge_factor is None: args.object_merge_factor = 1.0 / len(offset_list) mask_pred, object_class = cseg.run_segmentation( class_pred[0].detach().numpy().astype(np.float32), adj_pred[0].detach().numpy().astype(np.float32), num_classes, offset_list, args.same_different_bias, args.object_merge_factor, args.merge_logprob_bias) mask_pred = resize(mask_pred, (original_height, original_width), order=0, preserve_range=True).astype(int) image_with_mask = {} img = np.moveaxis(img[0].detach().numpy(), 0, -1) img = resize(img, (original_height, original_width), preserve_range=True) image_with_mask['img'] = img image_with_mask['mask'] = mask_pred image_with_mask['object_class'] = object_class visual_mask = visualize_mask(image_with_mask, core_config)[ 'img_with_mask'] scipy.misc.imsave('{}/{}.png'.format(img_dir, id), visual_mask) rles = list(mask_to_rles(mask_pred)) segment_rle_file = '{}/{}.rle'.format(rle_dir, id) with open(segment_rle_file, 'w') as fh: for obj in rles: obj_str = ' '.join(str(n) for n in obj) fh.write(obj_str) fh.write('\n')
def __getitem__(self, index): #--------- # Image #--------- img_path = self.img_files[index % len(self.img_files)].rstrip() img = np.array(Image.open(img_path)) # Handles images with less than three channels while len(img.shape) != 3: index += 1 img_path = self.img_files[index % len(self.img_files)].rstrip() img = np.array(Image.open(img_path)) h, w, _ = img.shape dim_diff = np.abs(h - w) # Upper (left) and lower (right) padding pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2 # Determine padding pad = ((pad1, pad2), (0, 0), (0, 0)) if h <= w else ((0, 0), (pad1, pad2), (0, 0)) # Add padding input_img = np.pad(img, pad, 'constant', constant_values=128) / 255. padded_h, padded_w, _ = input_img.shape # Resize and normalize input_img = resize(input_img, (*self.img_shape, 3), mode='reflect') # Channels-first input_img = np.transpose(input_img, (2, 0, 1)) # As pytorch tensor input_img = torch.from_numpy(input_img).float() #--------- # Label #--------- label_path = self.label_files[index % len(self.img_files)].rstrip() labels = None if os.path.exists(label_path): labels = np.loadtxt(label_path).reshape(-1, 5) # Extract coordinates for unpadded + unscaled image x1 = w * (labels[:, 1] - labels[:, 3] / 2) y1 = h * (labels[:, 2] - labels[:, 4] / 2) x2 = w * (labels[:, 1] + labels[:, 3] / 2) y2 = h * (labels[:, 2] + labels[:, 4] / 2) # Adjust for added padding x1 += pad[1][0] y1 += pad[0][0] x2 += pad[1][0] y2 += pad[0][0] # Calculate ratios from coordinates labels[:, 1] = ((x1 + x2) / 2) / padded_w labels[:, 2] = ((y1 + y2) / 2) / padded_h labels[:, 3] *= w / padded_w labels[:, 4] *= h / padded_h # Fill matrix filled_labels = np.zeros((self.max_objects, 5)) if labels is not None: filled_labels[range( len(labels))[:self.max_objects]] = labels[:self.max_objects] filled_labels = torch.from_numpy(filled_labels) return img_path, input_img, filled_labels
def read_one_image(path): img = cv2.imread(path) img = transform.resize(img, (w, h)) return np.asarray(img)
def nldas_attr_ds(year, month, day, hour, attr="TMP", res=500): assert attr=="TMP" or attr=="DLWRF" or attr=="SPFH" or attr=="PRES" or attr=="WIND" or attr=="APCP",\ "The attribute %r is not supported in linear interpolation" % attr if attr == "WIND": yday = day_of_year(year, month, day) nldas_fn = "NLDAS_data/" + str(year) + "/" + yday + "/" + \ "NLDAS_FORA0125_H.A" + str(year) + str(month).zfill(2) + \ str(day).zfill(2) + "." + str(hour).zfill(2) + "00.002.grb" nldas_ds = gdal.Open(nldas_fn, GA_ReadOnly) nldas_gt = nldas_ds.GetGeoTransform() u_wind_raster = find_band_raster(nldas_ds, "UGRD") v_wind_raster = find_band_raster(nldas_ds, "VGRD") dem, u_wind_bilinear_result = bilinear_interpolation( u_wind_raster, nldas_gt) dem, v_wind_bilinear_result = bilinear_interpolation( v_wind_raster, nldas_gt) u_wind_lr, u_wind_residual = regression_information( dem, u_wind_bilinear_result) v_wind_lr, v_wind_residual = regression_information( dem, v_wind_bilinear_result) if res == 500: new_dem_fn = "DEM/500m_dem.tif" else: new_dem_fn = "DEM/30m_dem.tif" new_dem_ds = gdal.Open(new_dem_fn, GA_ReadOnly) new_dem = new_dem_ds.ReadAsArray() u_wind_downscale_raster = apply_regression_on_dem( u_wind_lr, new_dem, u_wind_residual) v_wind_downscale_raster = apply_regression_on_dem( v_wind_lr, new_dem, v_wind_residual) # attr_imshow(u_wind_bilinear_result, u_wind_downscale_raster) # attr_imshow(v_wind_bilinear_result, v_wind_downscale_raster) return u_wind_downscale_raster, v_wind_downscale_raster elif attr == 'APCP': yday = day_of_year(year, month, day) nldas_fn = "NLDAS_data/" + str(year) + "/" + yday + "/" + \ "NLDAS_FORA0125_H.A" + str(year) + str(month).zfill(2) + \ str(day).zfill(2) + "." + str(hour).zfill(2) + "00.002.grb" nldas_ds = gdal.Open(nldas_fn, GA_ReadOnly) nldas_gt = nldas_ds.GetGeoTransform() attr_raster = find_band_raster(nldas_ds, attr) dem, bilinear_result = bilinear_interpolation(attr_raster, nldas_gt) if res == 500: new_dem_fn = "DEM/500m_dem.tif" else: new_dem_fn = "DEM/30m_dem.tif" new_dem_ds = gdal.Open(new_dem_fn, GA_ReadOnly) new_dem = new_dem_ds.ReadAsArray() downscale_raster = resize(bilinear_result, new_dem.shape) return downscale_raster else: yday = day_of_year(year, month, day) nldas_fn = "NLDAS_data/" + str(year) + "/" + yday + "/" + \ "NLDAS_FORA0125_H.A" + str(year) + str(month).zfill(2) + \ str(day).zfill(2) + "." + str(hour).zfill(2) + "00.002.grb" nldas_ds = gdal.Open(nldas_fn, GA_ReadOnly) nldas_gt = nldas_ds.GetGeoTransform() attr_raster = find_band_raster(nldas_ds, attr) dem, bilinear_result = bilinear_interpolation(attr_raster, nldas_gt) lr, residual = regression_information(dem, bilinear_result) if res == 500: new_dem_fn = "DEM/500m_dem.tif" else: new_dem_fn = "DEM/30m_dem.tif" new_dem_ds = gdal.Open(new_dem_fn, GA_ReadOnly) new_dem = new_dem_ds.ReadAsArray() downscale_raster = apply_regression_on_dem(lr, new_dem, residual) # attr_imshow(bilinear_result, downscale_raster) return downscale_raster
def _call(self, x, out, **kwargs): out.assign( space.element( resize(x, (upscale_shape, upscale_shape), order=1)))
labels = train_json['annotations'][i]['labelId'] labels = np.array(list(map(int, labels))) y_train.append(labels) y_train = np.array(y_train) all_labels = [] for i in range(1, 229): all_labels.append([i]) mlb.fit(all_labels) # fitting multilabelbinarizer to all labels y_train = mlb.transform(y_train) #print('smallest vertical:',min(i.shape[0] for i in X_train)) #print('smallest horizontal:',min(i.shape[1] for i in X_train)) #All images resized to the smallest dimensions X_train_resized = [transform.resize(img, (200, 128, 3)) for img in X_train] X_train_flat = np.array([img.flatten() for img in X_train_resized]) os.chdir('./imaterial_validation') X_test = np.array([io.imread(str(i) + '.jpg') for i in range(1, 201)]) os.chdir('..') X_test_resized = [transform.resize(img, (200, 128, 3)) for img in X_test] X_test_flat = np.array([img.flatten() for img in X_test_resized]) val_json = json.load(open('validation.json')) y_test = [] for i in range(1, 201): labels = val_json['annotations'][i]['labelId'] labels = np.array(list(map(int, labels)))
from sklearn.mixture import GaussianMixture from utils import generate_samples from utils import plot_ellipses_and_assignment from approximated_transport import (transport_samples_to_barycenter, get_assignment, get_pairwise_barycenter) figsize = (8, 8) current_palette = sns.color_palette() rnd = check_random_state(seed=3) ### Load Images n_x = n_y = 200 ## Cat image1_ = (1 - io.imread("data/shapes/cat_1.jpg")[:, :, 0] / 255) image1 = resize(image1_, (200, 200), mode="reflect", anti_aliasing=False).astype("bool") * 1 ## Rabbit image2_ = (1 - io.imread("data/shapes/rabbit.png")[:, :, 1] / 255).astype("bool") image2 = resize(image2_, (image1.shape[0], image1.shape[1]), mode="reflect", anti_aliasing=False).astype("bool") * 2 image = np.zeros((600, 600)) image[30:30 + n_x, 10:10 + n_y] = image1 image[320:320 + n_x, 350:350 + n_y] = image2[:, ::-1] data = generate_samples(image, n_y, n_samples=60000, random_state=43) X = data[0] Y = data[1]
def change_resolution_to(image, resolution, desired_resolution, pad_to_match_res=True, err_to_higher_res=True, return_final_resolution=False, **resize_kwargs): # Validate inputs. image = _validate_ndarray(image) resolution = _validate_resolution(resolution, image.ndim) desired_resolution = _validate_resolution(desired_resolution, image.ndim) # Compute final_shape and final_resolution. # Take the exact floating point final_shape that would be necessary to achieve desired_resolution. final_shape = image.shape * resolution / desired_resolution # Adjust final_shape and compute final_resolution according to specification. if pad_to_match_res: # Guarantee realization of desired_resolution at the possible expense of maintaining the true shape (shape * resolution). # Note: "true shape" implies real-valued shape: the product of image shape and corresponding resolution. final_shape = np.ceil(final_shape) # Pad image evenly until image.shape * resolution >= final_shape * desired_resolution. minimum_image_padding = np.ceil( (final_shape * desired_resolution - image.shape * resolution) / resolution) pad_width = np.array( list( zip(np.ceil(minimum_image_padding / 2), np.ceil(minimum_image_padding / 2))), int) old_true_shape = resolution * image.shape new_true_shape = desired_resolution * final_shape stat_length = np.maximum( 1, np.ceil((desired_resolution - ((new_true_shape - old_true_shape) / 2)) / resolution)).astype(int) stat_length = np.broadcast_to(stat_length, pad_width.T.shape).T image = np.pad(image, pad_width=pad_width, mode='mean', stat_length=stat_length) # Side effect: breaks alias. # final_resolution has been guaranteed to equal desired_resolution. final_resolution = desired_resolution else: # Guarantee the true shape (image.shape * resolution) is maintained at the possible expense of achieving desired_resolution. if err_to_higher_res: # Round resolution up. final_shape = np.ceil(final_shape) else: # Round resolution down. final_shape = np.floor(final_shape) # Compute the achieved resultant resolution, or final_resolution. final_resolution = image.shape * resolution / final_shape # Warn the user if desired_resolution cannot be produced from image and resolution. if not np.array_equal(final_shape, image.shape * resolution / desired_resolution ): # If desired_resolution != final_resolution. warnings.warn( message=f"Could not exactly produce the desired_resolution.\n" f"xyz_resolution {xyz_resolution}.\n" f"desired_resolution: {desired_resolution}.\n" f"final_resolution: {final_resolution}.", category=RuntimeWarning) # Note: if the function used for resampling below does not include anti-aliasing in the dase of downsampling, # that ought to be performed here. skimage.transform.resize does perform anti-aliasing by default. # Perform resampling. resampled_image = resize(image, final_shape, **resize_kwargs) return resampled_image
import os from dnn_model import * import skimage.transform as tsf print("\n\n--------test on my own image--------\n") my_image = "cat1.jpg" my_label_y = [1] fname = os.path.join(os.getcwd(), "images") picpath = os.path.join(fname, my_image) image = np.array(plt.imread(picpath)) #print(image.shape) my_image = tsf.resize(image, (num_px, num_px), mode='constant').reshape( (1, num_px * num_px * 3)).T #print(my_image.shape) my_predicted_image = predict(my_image, my_label_y, parameters) plt.imshow(image) print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)), ].decode("utf-8") + "\" picture.")
optimizer='adam', metrics=['accuracy']) return model model = build_model() model.summary() model.load_weights('MNIST_WEIGHTS.h5') #Running Inference img = imread("two.jpg") # resize to 28 x 28 imresize = resize(img, (28, 28), mode='constant') # turn the image from color to gray im_gray = rgb2gray(imresize) # the color of the original set are inverted,so we invert it here #im_gray_invert = 255 - im_gray*255 #treat color under threshold as black #im_gray_invert[im_gray_invert<=90] = 0 im_final = im_gray.reshape(1, 28, 28, 1) start = process_time() pred = model.predict(im_final) print("Prediction :", pred)
def detect_cell_one_section(fn): if is_invalid(fn): return fn_output_dir = create_if_not_exists(os.path.join(output_dir, fn)) sys.stderr.write('Processing image %s\n' % fn) # Load mask t = time.time() mask_tb = DataManager.load_thumbnail_mask_v2(stack=stack, fn=fn) mask = resize(mask_tb, metadata_cache['image_shape'][stack][::-1]) > .5 sys.stderr.write('Load mask: %.2f\n' % (time.time() - t)) if alg == 'myown': img_filename = DataManager.get_image_filepath(stack=stack, fn=fn, resol='lossless', version='cropped') img = imread(img_filename) sys.stderr.write('Load image: %.2f\n' % (time.time() - t)) t = time.time() im = rgb2gray(img) sys.stderr.write('Convert to gray: %.2f\n' % (time.time() - t)) t = time.time() thresh = threshold_otsu(im) binary = im < thresh binary[~mask] = 0 sys.stderr.write('threshold: %.2f\n' % (time.time() - t)) t = time.time() dt = distance_transform_edt(binary) sys.stderr.write('distance transform: %.2f\n' % (time.time() - t)) t = time.time() local_maxi = peak_local_max(dt, labels=binary, footprint=np.ones((10, 10)), indices=False) sys.stderr.write('local max: %.2f\n' % (time.time() - t)) t = time.time() markers = label(local_maxi) sys.stderr.write('label: %.2f\n' % (time.time() - t)) t = time.time() labelmap = watershed(-dt, markers, mask=binary) sys.stderr.write('watershed: %.2f\n' % (time.time() - t)) elif alg == 'cellprofiler': labelmap = load_cell_data(stack=stack, fn=fn, what='image_inverted_labelmap_cellprofiler', ext='bp') labelmap[~mask] = 0 elif alg == 'farsight': labelmap = load_cell_data(stack=stack, fn=fn, what='image_inverted_labelmap_farsight', ext='bp') labelmap[~mask] = 0 else: raise 'Algorithm not recognized.' t = time.time() props = regionprops(labelmap.astype(np.int32)) sys.stderr.write('regionprops: %.2f\n' % (time.time() - t)) valid_blob_indices = [ i for i, p in enumerate(props) if p.area > min_blob_area and p.area < max_blob_area ] sys.stderr.write('%d blobs identified.\n' % len(valid_blob_indices)) # Get blobs t = time.time() valid_blob_coords = [props[i].coords for i in valid_blob_indices] # r,c fp = get_cell_data_filepath(stack=stack, fn=fn, what='blobCoords', ext='hdf') # Global coordinates of all pixels in the mask. save_hdf_v2(valid_blob_coords, fp) upload_to_s3(fp) sys.stderr.write('Save blob coords: %.2f\n' % (time.time() - t)) # Generate masks t = time.time() cell_masks = [] cell_mask_centers = [] for i, coords in enumerate(valid_blob_coords): # bar.value = i ymin, xmin = coords.min(axis=0) ymax, xmax = coords.max(axis=0) cell_mask = np.zeros((ymax + 1 - ymin, xmax + 1 - xmin), np.bool) cell_mask[coords[:, 0] - ymin, coords[:, 1] - xmin] = 1 yc, xc = np.mean(np.where(cell_mask), axis=1) cell_masks.append(cell_mask) cell_mask_centers.append([xc, yc]) fp = get_cell_data_filepath(stack=stack, fn=fn, what='blobMasks', ext='hdf') # binary mask of each blob. save_hdf_v2(cell_masks, fp) upload_to_s3(fp) fp = get_cell_data_filepath(stack=stack, fn=fn, what='blobMaskCenters', ext='bp') # blob centroid in bounding box bp.pack_ndarray_file(np.array(cell_mask_centers), fp) upload_to_s3(fp) sys.stderr.write('Save blob masks: %.2f\n' % (time.time() - t)) # Other blob attributes t = time.time() # Must use serial rather than multiprocess because this is nested in a worker process that is being parallelized. # pool = Pool(NUM_CORES) # valid_blob_contours = pool.map(find_contour_worker, cell_masks) # pool.terminate() # pool.join() valid_blob_contours = [find_contour_worker(msk) for msk in cell_masks] fp = get_cell_data_filepath(stack=stack, fn=fn, what='blobContours', ext='hdf') # contour coordinates relative to the bounding box save_hdf_v2(valid_blob_contours, fp) upload_to_s3(fp) sys.stderr.write('Save blob contours, save: %.2f\n' % (time.time() - t)) t = time.time() valid_blob_orientations = np.array( [props[i].orientation for i in valid_blob_indices]) valid_blob_centroids = np.array( [props[i].centroid for i in valid_blob_indices])[:, ::-1] # r,c -> x,y valid_blob_majorAxisLen = np.array( [props[i].major_axis_length for i in valid_blob_indices]) valid_blob_minorAxisLen = np.array( [props[i].minor_axis_length for i in valid_blob_indices]) fp = get_cell_data_filepath(stack=stack, fn=fn, what='blobOrientations', ext='bp') # in radian, relative to horizontal axis of the image bp.pack_ndarray_file(valid_blob_orientations, fp) upload_to_s3(fp) fp = get_cell_data_filepath(stack=stack, fn=fn, what='blobCentroids', ext='bp') # centroid coordinates of each blob relative to the whole image bp.pack_ndarray_file(valid_blob_centroids, fp) upload_to_s3(fp) fp = get_cell_data_filepath(stack=stack, fn=fn, what='blobMajorAxisLen', ext='bp') # in pixels bp.pack_ndarray_file(valid_blob_majorAxisLen, fp) upload_to_s3(fp) fp = get_cell_data_filepath(stack=stack, fn=fn, what='blobMinorAxisLen', ext='bp') # in pixels bp.pack_ndarray_file(valid_blob_minorAxisLen, fp) upload_to_s3(fp) blob_contours_global = [(valid_blob_contours[i] - cell_mask_centers[i] + valid_blob_centroids[i]).astype(np.int) for i in range(len(valid_blob_coords))] blob_contours_global_fp = get_cell_data_filepath( stack=stack, fn=fn, what='blobContoursGlobal_%(alg)s' % {'alg': alg}, ext='hdf') # Contour coordinates of each blob relative to the whole image. save_hdf_v2(blob_contours_global, blob_contours_global_fp) upload_to_s3(blob_contours_global_fp) # Compute cell sizes cell_sizes = np.reshape(cell_masks, (cell_masks.shape[0], -1)).sum(axis=1) cell_sizes_fp = get_cell_data_filepath('cellSizes', stack=stack, sec=sec, ext='bp') bp.pack_ndarray_file(cell_sizes, cell_sizes_fp)
for f in files_names: images.append(skimage.data.imread(f)) labels.append(int(d)) return images, labels #DIRECTORYS for Test- & Train-Data ROOT_DIR = os.path.dirname(os.path.realpath(__file__)) train_dir = os.path.join(ROOT_DIR, "Training") test_dir = os.path.join(ROOT_DIR, "Testing") #TEST-DATA 28x28 in Grayscale train_images, train_labels = load_data(train_dir) test_images, test_labels = load_data(test_dir) #train data train_images = [transform.resize(image, (28, 28)) for image in train_images] train_images = np.array(train_images) train_images = rgb2gray(np.array(train_images)) train_images = train_images.astype(np.float32) train_labels = np.array(train_labels) #test data test_images = [transform.resize(image, (28, 28)) for image in test_images] test_images = np.array(test_images) test_images = rgb2gray(np.array(test_images)) test_images = test_images.astype(np.float32) test_labels = np.array(test_labels)
break # if the frame dimensions are empty, grab them if W is None or H is None: (H, W) = frm.shape[:2] #framecount = framecount+1 # clone the output frame, then convert it from BGR to RGB # ordering, resize the frame to a fixed 224x224, and then # perform mean subtraction output = frm.copy() while i < 30: rval, frame = vs.read() frame_counter += 1 if frame_counter == vs.get(cv2.CAP_PROP_FRAME_COUNT): frame_counter = 0 #Or whatever as long as it is the same as next line vs.set(cv2.CAP_PROP_POS_FRAMES, 0) frame = resize(frame, (160, 160, 3)) frame = np.expand_dims(frame, axis=0) if (np.max(frame) > 1): frame = frame / 255.0 frames[i][:] = frame i += 1 datav[0][:][:] = frames frames -= mean # make predictions on the frame and then update the predictions # queue #preds = model1.predict(datav) # print('Preds = :', preds) # total = (preds[0]+ preds[1]+preds[2] + preds[3]+ preds[4]+preds[5])
def valid_CT(self, args): """valid cyclegan""" init_op = tf.global_variables_initializer() self.sess.run(init_op) val_id_vec = [28, 29, 30] sample_files = r'dataset/InputNorm_{:0>2d}_T1.nii' gt_name = r'dataset/InputNorm_{:0>2d}_CT.nii' isCT = False namehd = 'synCT' input_val = 0 output_val = -1000 epochNum = 10 epochVec = np.arange(epochNum) dataInfoFile = open(r'dataset/trainInfo.txt', 'r') sourceInLines = dataInfoFile.readlines() dataInfoFile.close() dataInfo = [] for line in sourceInLines: temp1 = line.strip('\n') temp2 = temp1.split(' ') dataInfo.append(temp2) valid_dir = './valid' if not os.path.exists(valid_dir): os.makedirs(valid_dir) for epoch in epochVec: self.load_valid(args.checkpoint_dir, epoch) for val_id in val_id_vec: sliceNum = int(dataInfo[val_id - 1][3]) sliceVec = np.arange(sliceNum) imgTemp = nib.load(sample_files.format(val_id)) teResults = np.ones( [imgTemp.shape[0], args.fine_size0, args.fine_size1], dtype=np.int16) * int(output_val) inputImage = np.ones( [imgTemp.shape[0], args.fine_size0, args.fine_size1], dtype=np.int16) * int(input_val) gtImage = np.ones( [imgTemp.shape[0], args.fine_size0, args.fine_size1], dtype=np.int16) * int(output_val) for iSlicet in sliceVec: iSlice = iSlicet + int(dataInfo[val_id - 1][1]) - int( dataInfo[val_id - 1][5]) print('Processing image: id ' + str(val_id) + 'slice' + str(iSlicet)) sample_image = [ load_test_data(sample_files.format(val_id), isCT, iSlice, args.fine_size0, args.fine_size1) ] sample_image = np.array(sample_image).astype(np.float32) sample_image = sample_image.reshape( [1, args.fine_size0, args.fine_size1, 1]) if epoch == 0: gt_imageAll = nib.load(gt_name.format(val_id)) gt_image = gt_imageAll.get_data()[ int(iSlice), :, :].astype('int16') gtzm = gt_imageAll.get_header().get_zooms() gtsz = gt_imageAll.shape gt_resize_1 = args.fine_size1 gt_resize_0 = round(gtsz[1] * gtzm[1] * gt_resize_1 / (gtsz[2] * gtzm[2])) gt_pad_size = int(args.fine_size0) - gt_resize_0 gt_image = transform.resize(gt_image, (gt_resize_0, gt_resize_1), preserve_range=True) gt_image = np.pad( gt_image, ((int(gt_pad_size // 2), int(gt_pad_size) - int(gt_pad_size // 2)), (0, 0)), mode='constant', constant_values=output_val) gtImage[int(iSlice), :, :] = np.array(gt_image).astype( 'int16') input_imageAll = nib.load(sample_files.format(val_id)) input_image = input_imageAll.get_data()[ int(iSlice), :, :].astype('int16') inputzm = input_imageAll.get_header().get_zooms() inputsz = input_imageAll.shape input_resize_1 = args.fine_size1 input_resize_0 = round(inputsz[1] * inputzm[1] * input_resize_1 / (inputsz[2] * inputzm[2])) input_pad_size = int(args.fine_size0) - input_resize_0 input_image = transform.resize( input_image, (input_resize_0, input_resize_1), preserve_range=True) input_image = np.pad( input_image, ((int(input_pad_size // 2), int(input_pad_size) - int(input_pad_size // 2)), (0, 0)), mode='constant', constant_values=input_val) inputImage[int(iSlice), :, :] = np.array( input_image).astype('int16') fake_img = self.sess.run( self.testA, feed_dict={self.test_B: sample_image}) #fake_img_255 = np.exp((fake_img + 1.) * 4. * np.log(2)) - 1. fake_img_255 = (fake_img + 1.) * 127.5 if isCT: temp = fake_img_255 / 255. * (3500. - 0.) + 0. else: temp = fake_img_255 / 255. * (3500. + 1000.) - 1000. teResults[int(iSlice), :, :] = np.array(temp).astype( 'int16').reshape([args.fine_size0, args.fine_size1]) head_output = imgTemp.get_header() head_output.set_zooms([ head_output.get_zooms()[0] * args.fine_size1 / (head_output.get_zooms()[2] * imgTemp.shape[2]), 1.0, 1.0 ]) affine_output = imgTemp.affine affine_output[1][1] = np.sign(affine_output[1][1]) affine_output[0][0] = np.sign( affine_output[0][0]) * head_output.get_zooms()[0] saveResults = nib.Nifti1Image(teResults, affine_output, head_output) nib.save( saveResults, '{}/{}_{:0>2d}_epoch{}.nii'.format(valid_dir, namehd, val_id, epoch)) if epoch == 0: gtResults = nib.Nifti1Image(gtImage, affine_output, head_output) gt_path = os.path.join( valid_dir, '{}'.format(os.path.basename(gt_name).format(val_id))) nib.save(gtResults, gt_path) inputResults = nib.Nifti1Image(inputImage, affine_output, head_output) input_path = os.path.join( valid_dir, '{}'.format( os.path.basename(sample_files).format(val_id))) nib.save(inputResults, input_path)
def getViewData(self): from skimage.measure import block_reduce from skimage.transform import rescale, resize, downscale_local_mean ### Get pixel data from view if ("use_tiny_renderer" in self._config): cam_pos = [1, 1, 5] char_or_imitation_char = 1 img = self._sim.getPixels2(self._render_condition, cam_pos, self._zoom_, self._config["resize_window"][0], self._config["resize_window"][1]) img = np.array(img) img = np.reshape(img, (self._config["resize_window"][0], self._config["resize_window"][1], 3)) # print ("img shape: ", img.shape) img = img[self._config["image_clipping_area"][0]:self. _config["image_clipping_area"][0] + self._config["image_clipping_area"][2], self._config["image_clipping_area"][1]:self. _config["image_clipping_area"][1] + self._config["image_clipping_area"][3]] # print ("img shape after: ", img.shape) elif ("image_clipping_area" in self._config): img = np.array(self.getEnv().getPixels( self._config["image_clipping_area"][0], self._config["image_clipping_area"][1], self._config["image_clipping_area"][2], self._config["image_clipping_area"][3])) else: img = np.array(self.getEnv().getPixels(0, 0, 800, 450)) # assert(np.sum(img) > 0.0) ### reshape into image, colour last if ("skip_reshape" in self._config and (self._config["skip_reshape"] == True)): pass elif ("image_clipping_area" in self._config): # print ("img shape:", img.shape) img = np.reshape( img, (self._config["image_clipping_area"][3], self._config["image_clipping_area"][2], 3)) / 255.0 if 'resize_image' in self._config: img = resize(img, (self._config["resize_image"][1], self._config["resize_image"][2]), anti_aliasing=True) else: ### downsample image img = block_reduce( img, block_size=(self._config["downsample_image"][0], self._config["downsample_image"][1], self._config["downsample_image"][2]), func=np.mean) if ("add_img_noise" in self._config and (self._config["add_img_noise"] == True)): noise_ = np.random.randn( *(img.shape)) * self._config['add_img_noise'] img = img + noise_ ### convert to greyscale if ("convert_to_greyscale" in self._config and self._config["convert_to_greyscale"]): img = np.mean(img, axis=2) # assert(np.sum(img) > 0.0) ### Still not sure why this is upside down. image_ = np.zeros((img.shape)) for row in range(len(img)): image_[row] = img[len(img) - row - 1] return image_
def test(self, args): """Test cyclegan""" init_op = tf.global_variables_initializer() self.sess.run(init_op) if args.which_direction == 'AtoB': sample_files = r'dataset/InputNorm_T{:0>2d}_CT.nii' gt_name = r'dataset/InputNorm_T{:0>2d}_T1.nii' isCT = True namehd = 'synT1' input_val = -1000 output_val = 0 elif args.which_direction == 'BtoA': sample_files = r'dataset/InputNorm_T{:0>2d}_T1.nii' gt_name = r'dataset/InputNorm_T{:0>2d}_CT.nii' isCT = False namehd = 'synCT' input_val = 0 output_val = -1000 else: raise Exception('--which_direction must be AtoB or BtoA') epoch = 2 self.load_valid(args.checkpoint_dir, epoch) out_var, in_var = ( self.testB, self.test_A) if args.which_direction == 'AtoB' else (self.testA, self.test_B) tedataSize = 15 teIdVec = np.arange(tedataSize) + 1 dataInfoFile = open(r'dataset/testInfo.txt', 'r') sourceInLines = dataInfoFile.readlines() dataInfoFile.close() dataInfo = [] for line in sourceInLines: temp1 = line.strip('\n') temp2 = temp1.split(' ') dataInfo.append(temp2) for teId in teIdVec: sliceNum = int(dataInfo[teId - 1][3]) sliceVec = np.arange(sliceNum) imgTemp = nib.load(sample_files.format(teId)) teResults = np.ones( [imgTemp.shape[0], args.fine_size0, args.fine_size1], dtype=np.int16) * int(output_val) inputImage = np.ones( [imgTemp.shape[0], args.fine_size0, args.fine_size1], dtype=np.int16) * int(input_val) gtImage = np.ones( [imgTemp.shape[0], args.fine_size0, args.fine_size1], dtype=np.int16) * int(output_val) for iSlicet in sliceVec: iSlice = iSlicet + int(dataInfo[teId - 1][1]) - int( dataInfo[teId - 1][5]) print('Processing image: id ' + str(teId) + 'slice' + str(iSlicet)) sample_image = [ load_test_data(sample_files.format(teId), isCT, iSlice, args.fine_size0, args.fine_size1) ] sample_image = np.array(sample_image).astype(np.float32) sample_image = sample_image.reshape( [1, args.fine_size0, args.fine_size1, 1]) gt_imageAll = nib.load(gt_name.format(teId)) gt_image = gt_imageAll.get_data()[int(iSlice), :, :].astype( 'int16') gtzm = gt_imageAll.get_header().get_zooms() gtsz = gt_imageAll.shape gt_resize_1 = args.fine_size1 gt_resize_0 = round(gtsz[1] * gtzm[1] * gt_resize_1 / (gtsz[2] * gtzm[2])) gt_pad_size = int(args.fine_size0) - gt_resize_0 gt_image = transform.resize(gt_image, (gt_resize_0, gt_resize_1), preserve_range=True) gt_image = np.pad(gt_image, ((int(gt_pad_size // 2), int(gt_pad_size) - int(gt_pad_size // 2)), (0, 0)), mode='constant', constant_values=output_val) gtImage[int(iSlice), :, :] = np.array(gt_image).astype('int16') input_imageAll = nib.load(sample_files.format(teId)) input_image = input_imageAll.get_data()[ int(iSlice), :, :].astype('int16') inputzm = input_imageAll.get_header().get_zooms() inputsz = input_imageAll.shape input_resize_1 = args.fine_size1 input_resize_0 = round(inputsz[1] * inputzm[1] * input_resize_1 / (inputsz[2] * inputzm[2])) input_pad_size = int(args.fine_size0) - input_resize_0 input_image = transform.resize( input_image, (input_resize_0, input_resize_1), preserve_range=True) input_image = np.pad( input_image, ((int(input_pad_size // 2), int(input_pad_size) - int(input_pad_size // 2)), (0, 0)), mode='constant', constant_values=input_val) inputImage[int(iSlice), :, :] = np.array(input_image).astype( 'int16') fake_img = self.sess.run(out_var, feed_dict={in_var: sample_image}) #fake_img_255 = np.exp((fake_img + 1.) * 4. * np.log(2)) - 1. fake_img_255 = (fake_img + 1.) * 127.5 if isCT: temp = fake_img_255 / 255. * (3500. - 0.) + 0. else: temp = fake_img_255 / 255. * (3500. + 1000.) - 1000. teResults[int(iSlice), :, :] = np.array(temp).astype( 'int16').reshape([args.fine_size0, args.fine_size1]) head_output = imgTemp.get_header() head_output.set_zooms([ head_output.get_zooms()[0] * args.fine_size1 / (head_output.get_zooms()[2] * imgTemp.shape[2]), 1.0, 1.0 ]) affine_output = imgTemp.affine affine_output[1][1] = np.sign(affine_output[1][1]) affine_output[0][0] = np.sign( affine_output[0][0]) * head_output.get_zooms()[0] saveResults = nib.Nifti1Image(teResults, affine_output, head_output) nib.save(saveResults, '{}/{}_T{:0>2d}.nii'.format(args.test_dir, namehd, teId)) gtResults = nib.Nifti1Image(gtImage, affine_output, head_output) gt_path = os.path.join( args.test_dir, '{}'.format(os.path.basename(gt_name).format(teId))) nib.save(gtResults, gt_path) inputResults = nib.Nifti1Image(inputImage, affine_output, head_output) input_path = os.path.join( args.test_dir, '{}'.format(os.path.basename(sample_files).format(teId))) nib.save(inputResults, input_path)
from sklearn import datasets, metrics from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from skimage import io, color, feature, transform mnist = datasets.load_digits() images = mnist.images data_size = len(images) #Preprocessing images images = images.reshape(-1, 1) # Reshape the image to a 2D array because we are analizying a single example labels = mnist.target #Initialize Logistic Regression LR_classifier = LogisticRegression(C=0.01, penalty='l2', tol=0.01) #Training the data on only 75% of the dataset. Rest of the 25% will be used in testing the Lo LR_classifier.fit(images[:int((data_size / 4) * 3)], labels[:int((data_size / 4) * 3)]) #Load a custom image digit_img = io.imread('../media/digit.png') #Convert image to grayscale digit_img = color.rgb2gray(digit_img) #Resize the image to 28x28 digit_img = transform.resize(digit_img, (8, 8), mode="wrap") #Run edge detection on the image digit_edge = feature.canny(digit_img, sigma=5) digit_edge = digit_edge.flatten() #Testing the data prediction = LR_classifier.predict(digit_edge) print(prediction)
x_test_original.append(x) # Create inverted image (flip the grayscale so black is white and white is back) x_test_invert.append(util.invert(x)) # Add random noise to image x_test_noise.append(util.random_noise(x)) # Reshape image for next augmentations to work properly img = x.reshape(28, 28) # Create blurred image x_test_blur.append(filters.gaussian(img).reshape(-1)) # Create resized image img_resize = transform.resize(img, (18, 28)) img_resize = np.vstack((img_resize, zeros)) img_resize = np.vstack((zeros, img_resize)) x_test_resize.append(img_resize.reshape(-1)) # Create horizontal flipped test dataset x_test_hflip.append(np.flip(img, axis=1).copy().reshape(-1)) # Pass image lists through a MinMax Scaler to ensure pixel range is 0 to 255 mm_scaler = preprocessing.MinMaxScaler(feature_range=(0, 255)) x_test_original = mm_scaler.fit_transform(x_test_original) x_test_invert = mm_scaler.fit_transform(x_test_invert) x_test_noise = mm_scaler.fit_transform(x_test_noise) x_test_blur = mm_scaler.fit_transform(x_test_blur) x_test_resize = mm_scaler.fit_transform(x_test_resize) x_test_hflip = mm_scaler.fit_transform(x_test_hflip)
def preprocess(observation, last_observation): processed_observation = np.maximum(observation, last_observation) processed_observation = np.uint8( resize(rgb2gray(processed_observation), (FRAME_WIDTH, FRAME_HEIGHT)) * 255) return np.reshape(processed_observation, (1, FRAME_WIDTH, FRAME_HEIGHT))
encode = LabelEncoder() onehot = OneHotEncoder(sparse=False) y = onehot.fit_transform( encode.fit_transform(labels_data['breed']).reshape(-1, 1)) train_temp = np.zeros((1000, 224, 224, 3)) targets_temp = np.zeros((1000, 120)) #training begins for j in range(100): if j == 0: for i in range(1000): a = randrange(0, 10222) img = plt.imread('images_data/train/' + train_imgs_path[a]) train_temp[i, :] = resize(img, (224, 224)) targets_temp[i] = y[a] #full_model.load_weights("model.h5") full_model.fit(train_temp, targets_temp, epochs=1, batch_size=50, verbose=2) full_model.save_weights("model.h5") else: for i in range(1000): a = randrange(0, 10222) img = plt.imread('images_data/train/' + train_imgs_path[a]) train_temp[i, :] = resize(img, (224, 224)) targets_temp[i] = y[a] full_model.load_weights("model.h5")
train_path = 'stage1_train/' test_path = 'stage1_test/' train_ids = next(os.walk(train_path))[1] test_ids = next(os.walk(test_path))[1] X_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8) Y_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool) print('Resizing training images and masks') for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)): path = train_path + id_ img = imread(path + '/images/' + id_ + '.png')[:, :, :IMG_CHANNELS] img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True) X_train[n] = img #Fill empty X_train with values from img mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool) for mask_file in next(os.walk(path + '/masks/'))[2]: mask_ = imread(path + '/masks/' + mask_file) mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True), axis=-1) mask = np.maximum(mask, mask_) Y_train[n] = mask # test images X_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS),
# Evan Widloski - 2020-03-28 # test registration on upscaled AIA data from skimage.transform import resize from mas.strand_generator import StrandVideo, get_visors_noise from mas.tracking import guizar_multiframe, correlate_and_sum, shift_and_sum, guizar_upsample from mas.misc import combination_experiment from html_slider.html_slider import render_pandas import numpy as np from imageio import imread resolution_ratio = 2 fov_ratio = 2 scene = imread('scene.bmp') size = np.array((750, 750)) scene = resize(scene, size * resolution_ratio * fov_ratio) def experiment(*, max_count, background_noise, drift_velocity, frame_rate): # noise noise_model = get_visors_noise(background=background_noise) sv = StrandVideo( ccd_size=size, start=((1400, 1300)), scene=scene, max_count=max_count, noise_model=noise_model, drift_velocity=drift_velocity * 1e-3, resolution_ratio=resolution_ratio, fov_ratio=fov_ratio, frame_rate=frame_rate
for labelname in imageinfo: print(labelname) if labelname != 'AD': #选定标签 pathsaveimge1 = os.path.join(pathsaveimge, labelname) # 保存新图片的文件夹 pathnibimage1 = os.path.join(pathnibimage, labelname) # 要切割3D图片的子文件夹 for NIIname in os.listdir(pathnibimage1): #遍历所有的图片名称 if NIIname[-9:] == 'brain.nii': # 选中字符为'brain.nii'的名称 pathsaveimge2 = os.path.join(pathsaveimge1, NIIname[:-3]) #给保存的新图片命名 pathnibimage2 = os.path.join(pathnibimage1, NIIname, NIIname) #打开要切割的图片 if not os.path.exists(pathsaveimge2): os.makedirs(pathsaveimge2) #新建保存图片的文件夹 NIIimg = nib.load(pathnibimage2) #下载nii MRI img = NIIimg.get_data() #读取nii MRI ref_affine = NIIimg.affine #为保存图片—仿射 jianqie1 = jianqie3Dimg(img, aa=-1) jianqie2 = all(jianqie1) jianqie3 = all(jianqie2) for i, patch in enumerate(jianqie3): # patch = np.array(patch) mx = patch.max(axis=0).max(axis=0).max(axis=0) patch = np.array(patch) / mx patch = transform.resize(patch, (32, 40, 32), mode='constant') #resize到相同大小 patch = nib.Nifti1Image(patch, ref_affine) #仿射为nii文件 savepath = os.path.join(pathsaveimge2, str(i) + '_' + NIIname[:-4] + '.nii') #保存名称 nib.save(patch, savepath) #保存.nii文件
# Import the module and function from skimage.transform import resize # Set proportional height so its half its size height = int(dogs_banner.shape[0] / 2) width = int(dogs_banner.shape[1] / 2) # Resize using the calculated proportional height and width image_resized = resize(dogs_banner, (height, width), anti_aliasing=True) # Show the original and rotated image show_image(dogs_banner, 'Original') show_image(image_resized, 'Resized image')