def _create_dataset(): """Creates a very simple dataset from within this directory by taking some training and testing sets, normalizes them and crops them. """ # Import some sample images from within the directory x_train = skio.ImageCollection( "tests/example_dataset/images_prepped_train/*.png").concatenate() y_train = skio.ImageCollection( "tests/example_dataset/annotations_prepped_train/*.png" ).concatenate() x_test = skio.ImageCollection( "tests/example_dataset/images_prepped_test/*.png").concatenate() y_test = skio.ImageCollection( "tests/example_dataset/annotations_prepped_test/*.png" ).concatenate() # Crop the images to 256x256 and convert them to float32 x_train = x_train[:, :256, :256, :].astype(np.float32) y_train = y_train[:, :256, :256, None].astype(np.float32) x_test = x_test[:, :256, :256, :].astype(np.float32) y_test = y_test[:, :256, :256, None].astype(np.float32) # Normalize the images x_train /= 255.0 y_train /= 255.0 x_test /= 255.0 y_test /= 255.0 return x_train, x_test, y_train, y_test
def load_defect_data(): train_path = [] train_label = [] with open("./data/train/label.csv", "r") as file: for line in file: image_path, image_label = line.split(',') train_path.append(image_path) train_label.append(image_label.strip('\n')) test_path = [] test_label = [] with open("./data/test/label.csv", "r") as file: for line in file: image_path, image_label = line.split(',') test_path.append(image_path) test_label.append(image_label.strip('\n')) coll_train = io.ImageCollection(train_path, load_func=convert_gray) coll_test = io.ImageCollection(test_path, load_func=convert_gray) coll_train = np.asarray(coll_train) coll_test = np.asarray(coll_test) train_label = np.asarray(train_label).astype(np.int8) test_label = np.asarray(test_label).astype(np.int8) X_train = coll_train X_test = coll_test y_train = train_label y_test = test_label return (X_train, y_train), (X_test, y_test)
def test_collection(): pattern = [os.path.join(data_dir, pic) for pic in ['camera.png', 'color.png', 'multipage.tif']] images = sio.ImageCollection(pattern[:-1]) assert len(images) == 2 assert len(images[:]) == 2 images = sio.ImageCollection(pattern) assert len(images) == 3 assert len(images[:]) == 3
def rotate_from_dir(imagepath, savepath, rotateflag=2): # str=imagepath+'\\*.jpg'+':'+imagepath+'\\*.png' str = imagepath + '\\*.jpg' if (rotateflag > 4): if (rotateflag == 5): coll = io.ImageCollection(str, load_func=img_rotate140180) if (rotateflag == 6): coll = io.ImageCollection(str, load_func=img_rotate180140) if (rotateflag == 7): coll = io.ImageCollection(str, load_func=img_rotate14090) if (rotateflag == 8): coll = io.ImageCollection(str, load_func=img_rotate9040) else: coll = io.ImageCollection(str, load_func=img_rotate4000) else: if (rotateflag == 4): coll = io.ImageCollection(str, load_func=img_rotate90140) if (rotateflag == 3): coll = io.ImageCollection(str, load_func=img_rotate6090) if (rotateflag == 1): coll = io.ImageCollection(str, load_func=img_rotate0030) else: coll = io.ImageCollection(str, load_func=img_rotate3060) # 循环保存图片 for i in range(len(coll)): io.imsave( savepath + '\\rs_' + np.str(rotateflag) + '_' + np.str(i) + '.jpg', coll[i])
def marcm_save_CSVs_RGB_images_overlapping_regions_three_channel( Bin_C0_Dir="Binaries_C0", Bin_C1_Dir="Binaries_C1", Bin_C2_Dir="Binaries_C2", C2_overlap_threshold=0.3, C1_overlap_threshold=0.5, pixel_size=0.4, csv_save_dir="CSVs_C0_in_C1C2", RGB_save_dir="RGB_C0_overlapping_regions", ): ic_C0 = io.ImageCollection( load_pattern=os.path.join(Bin_C0_Dir, "*.tif*"), load_func=read_image_2d ) ic_C1 = io.ImageCollection( load_pattern=os.path.join(Bin_C1_Dir, "*.tif*"), load_func=read_image_2d ) ic_C2 = io.ImageCollection( load_pattern=os.path.join(Bin_C2_Dir, "*.tif*"), load_func=read_image_2d ) condition, img_names = test_imagecollections_same_files_and_order( ic_C0, ic_C1, ic_C2 ) if not condition: print( f"""{Bin_C0_Dir}, {Bin_C1_Dir}, {Bin_C2_Dir} do not contain the same number of files or they are not in the same order.""" ) return if not os.path.isdir(csv_save_dir): os.mkdir(csv_save_dir) if not os.path.isdir(RGB_save_dir): os.mkdir(RGB_save_dir) Dict_DFs = OrderedDict() for C0_img, C1_img, C2_img, names in zip(ic_C0, ic_C1, ic_C2, img_names): DF = in_region_three_channel( C0_img=C0_img, C1_img=C1_img, C2_img=C2_img, C1_overlap_threshold=C1_overlap_threshold, C2_overlap_threshold=C2_overlap_threshold, pixel_size=pixel_size, ) Dict_DFs[names] = DF DF.to_csv(os.path.join(csv_save_dir, names + ".csv")) RGB_img = create_RGB_image_overlapping_regions(DF, C0_img_input=C0_img) io.imsave(os.path.join(RGB_save_dir, names + ".tiff"), RGB_img) return Dict_DFs
def _predict_on_sample(sample, network, tiramisu_model, weights): """Predicts values on sample, using the chosen network and weights. Parameters ---------- network : str Network results to compare with the gold standard. tiramisu_model : str or None (default : None) Tiramisu model to be used. predict_vars : str or None (default : None) JSON file containing the variables 'folder', 'path', 'has_goldstd', 'path_goldstd', 'segmentation_interval', and 'registered_path'. If None, values are based on Larson et al samples. weights : str or None (default : None) File containing weight coefficients to be used during prediction. Returns ------- None """ print(f"# Now reading sample {sample['path']}.") # need to check how to deal with this better. try: if not sample['file_ext']: sample['file_ext'] = const.EXT_SAMPLE except KeyError: sample['file_ext'] = const.EXT_SAMPLE pattern = os.path.join(sample['path'], f"*{sample['file_ext']}") data_sample = io.ImageCollection(load_pattern=pattern) print('# Processing...') folder = os.path.join(utils.prediction_folder(network, tiramisu_model), sample['folder']) utils.process_sample(folder, data=data_sample, weights=weights, network=network) if sample['registered_path']: pattern = os.path.join(sample['registered_path'], '*' + const.EXT_REG) data_sample = io.ImageCollection(load_pattern=pattern) print('# Processing registered sample...') folder = os.path.join(utils.prediction_folder(network, tiramisu_model), f"{sample['folder']}_REG") utils.process_sample(folder, data=data_sample, weights=weights, network=network) return None
def valData(self): #print(io.ImageCollection(self.val_path+self.image_folder+'/*',load_func=self.imread_convert)) image_collection = np.expand_dims( np.array( io.ImageCollection(self.val_path + self.image_folder + '/*', load_func=self.imread_convert)), 3) mask_collection = np.expand_dims( np.array( io.ImageCollection(self.val_path + self.mask_folder + '/*', load_func=self.imread_convert)), 3) #print(image_collection.shape) print(mask_collection.shape) val_data = self.adjustData(image_collection, mask_collection) return val_data
def polar2_from_dir(imagepath, savepath, flag=1): # str=imagepath+'\\*.jpg'+':'+imagepath+'\\*.png' str = imagepath + '\\*.jpg' if (flag == 1): coll = io.ImageCollection(str, load_func=img_polar2_1) if (flag == 2): coll = io.ImageCollection(str, load_func=img_polar2_2) if (flag == 3): coll = io.ImageCollection(str, load_func=img_polar2_3) if (flag == 4): coll = io.ImageCollection(str, load_func=img_polar2_4) if (flag == 5): coll = io.ImageCollection(str, load_func=img_polar2_5) if (flag == 6): coll = io.ImageCollection(str, load_func=img_polar2_6) if (flag == 7): coll = io.ImageCollection(str, load_func=img_polar2_7) else: coll = io.ImageCollection(str, load_func=img_polar2_1) # scipy.misc.toimage(coll[0]).save('outfile.jpg') # #循环保存图片 for i in range(len(coll)): io.imsave(savepath + '\\plr2_' + np.str(i) + '.jpg', coll[i])
def load_data_from_folder(self, dir): # read all images into an image collection ic = io.ImageCollection(dir + "*.jpg", load_func=self.imread_convert) # create one large array of image data data = io.concatenate_images(ic) # extract labels from image names labels = np.array(ic.files) for i, f in enumerate(labels): m = re.search("_", f) labels[i] = f[len(dir):m.start()] return (data, labels)
def create_dict_of_binary_masks( input_dir="Prob_Map_C1", thresh_method=filters.threshold_otsu, binary_method_list=[ [morphology.binary_erosion, morphology.selem.diamond(1)], [morphology.binary_opening, morphology.selem.diamond(3)], [scipy_morphology.binary_fill_holes, morphology.selem.diamond(4)], [morphology.binary_erosion, morphology.selem.diamond(1)], ], min_area=1000, ): ic = io.ImageCollection( load_pattern=os.path.join(input_dir, "*.tiff"), load_func=read_prob_image ) dict_binary_masks = OrderedDict() for i, files in enumerate(ic.files): thresh_value = thresh_method(ic[i]) binary_image = ic[i] > thresh_value edited_binary_image = apply_binary_methods( binary_image, binary_method_list=binary_method_list ) region_list = extract_regions_from_binary( edited_binary_image, min_area=min_area ) dict_binary_masks[files] = create_binary_image_from_region_list( edited_binary_image, region_list=region_list ) return dict_binary_masks
def create_img_dict_from_folder(label_img_re=r"\w\dg\d\d?", **kwargs): img_collection = io.ImageCollection(**kwargs) labelled_img_dict = { re.search(label_img_re, file)[0]: img_collection[i] for i, file in enumerate(img_collection.files) } return labelled_img_dict
def loadData(): csv_path = '../data/malignancy_labels.csv' # csv_path = '../data/test_uid_img_roi_labels.csv' #TEST # csv_path = '../data/uid_img_roi_labels.csv' Y_train = [] with open(csv_path, 'r') as f1: reader = csv.reader(f1) for row in reader: Y_train.append(row) f1.close() str = "../data/Test_img/*.jpg" #TEST # str = "../data/img_data3/*.jpg" coll = io.ImageCollection(str) print(len(coll)) X_train = [] for i in range(0, len(coll)): print(i + 1) img = coll[i] # print(img.shape) # jishenbianhao3 = [] # 将单通道的灰度图像,变成三通道的灰度图像,因为这样的检测效果会更好 # for x in range(3): # jishenbianhao3.append(img) # jishenbianhao3 = np.array(jishenbianhao3).transpose([1, 2, 0]) # img1 = jishenbianhao3 # print(img1.shape) X_train.append(img) X = np.array(X_train) Y = np.array(Y_train) # index = np.arange(400)#TEST _X_train, _X_test, _Y_train, _Y_test = train_test_split(X, Y, test_size=0.30, random_state=42) del X_train,Y_train print(gc.collect()) return _X_train, _Y_train,_X_test,_Y_test """
def copyFile(fileDir, tarDir): for i in range(2, 6): imagedir = fileDir + str(i) + '/' labeldir = docDir + str(i) + '/' pathDir = os.listdir(imagedir) # print(pathDir) # for filename in pathDir: # print(filename) coll = io.ImageCollection(suffix) # print(len(coll)) # 打印图片数量 num = 500 # num = int ((2*len(coll))/10) # print(num) sample = random.sample(pathDir, num) # print(sample) for name in sample: # print(name) # print(fileDir + name) label = name.replace('.png', '.txt') print(name) print(label) shutil.copyfile(imagedir + name, tarDir + name) shutil.copyfile(labeldir + label, tarDocDir + label)
def test_imread_collection_single_MEF(): io.use_plugin('fits') testfile = os.path.join(data_dir, 'multi.fits') ic1 = io.imread_collection(testfile) ic2 = io.ImageCollection([(testfile, 1), (testfile, 2), (testfile, 3)], load_func=fplug.FITSFactory) assert _same_ImageCollection(ic1, ic2)
def copyFile(fileDir, tarDir): pathDir = os.listdir(fileDir) # for filename in pathDir: # print(filename) coll = io.ImageCollection(str) # print(len(coll)) # 打印图片数量 num = 500 # num = int ((2*len(coll))/10) # print(num) sample = random.sample(pathDir, num) # print(sample) for name in sample: # print(name) # print(fileDir + name) label = name.replace('.png', '.txt') print(name) print(label) shutil.copyfile(fileDir + name, tarDir + name) shutil.copyfile(docDir + label, tarDocDir + label) os.remove(fileDir + name) os.remove(docDir + label)
def test6(): def convert_gray(f): rgb = io.imread(f) gray = color.rgb2gray(rgb) print("fname is:%s, shape is:%s, gray shape is:%s" % (f, rgb.shape, gray.shape)) dst = transform.resize(gray, (256, 256)) return dst input_path = data_dir + '/*.png' coll = io.ImageCollection(input_path, load_func=convert_gray) output_path = "./output" if not os.path.exists(output_path): os.makedirs(output_path) for i in range(len(coll)): io.imsave(output_path + '/' + np.str(i) + '.jpg', coll[i]) print("has %d images" % len(coll)) io.imshow(coll[10]) plt.show() # miss it at the first time # TODO: the window behave is strange plt.figure(num='collection') plt.subplot(1, 2, 1) plt.title('pic 10') plt.imshow(coll[10]) plt.subplot(1, 2, 2) plt.title('pic 11') plt.imshow(coll[11]) plt.show()
def Test(opts): #model loading.. model = load_model(opts.modelPath, compile=False) #pathnames loading for test images and labels directory = os.path.join(opts.dataDir, opts.dataType) fnamesX = sorted(os.listdir(os.path.join(directory, 'X'))) pathnamesX = [ os.path.join(directory, 'X', f) for f in fnamesX if f.split('.')[-1] in opts.ext ] # fnamesY = sorted(os.listdir(os.path.join(directory,'Y'))) # pathnamesY = [os.path.join(directory,'Y',f) for f in fnamesY if f.split('.')[-1] in opts.ext] #loading images and model imgsX = io.ImageCollection(load_pattern=pathnamesX) # print(type(imgsX)) # imgsY = io.ImageCollection(load_pattern=pathnamesY) imgsX = img_as_float(imgsX.concatenate()[:, :, :, np.newaxis]) # imgsY = img_as_float(imgsY.concatenate()[:,:,:,np.newaxis]) model = load_model(opts.modelPath, compile=False) print(imgsX.shape) #predicting and saving predicted images.. predY = model.predict(imgsX, batch_size=opts.batchSize, verbose=opts.verbosity) CheckAndCreate(opts.outDir) # for predY_,fnameX in izip(predY,fnamesX): for predY_, fnameX in zip(predY, fnamesX): io.imsave(os.path.join(opts.outDir, fnameX), predY_[:, :, 0]) return
def polar1_from_dir(imagepath, savepath): # str=imagepath+'\\*.jpg'+':'+imagepath+'\\*.png' str = imagepath + '\\*.jpg' coll = io.ImageCollection(str, load_func=img_polar) # io.imshow(coll[0]) # ok ok # scipy.misc.toimage(coll[0], cmin=0.0, cmax=255).save('outfile.jpg') # scipy.misc.toimage(coll[0], mode='P').save('outfile.jpg') # scipy.misc.toimage(coll[0]).save('outfile.jpg') # output = scipy.misc.toimage(coll[0]) # output.convert('RGBA') # scipy.misc.toimage(output).save('outfile.jpg') # error # imgp = Image.fromarray(coll[0]) # imgp.save('outfile.jpeg') # ok ok 但是灰色图 # cv2.imwrite("filename.jpg", coll[0]) # error # matplotlib.image.imsave('name.png', coll[0]) # 循环保存图片 for i in range(len(coll)): # imgp = Image.fromarray(coll[i]) # error # io.imsave(savepath + '\\plr1_' + np.str(i)+'.jpg',coll[i]) img_name = savepath + '\\plr1_' + np.str(i) + '.jpg' scipy.misc.toimage(coll[i]).save(img_name)
def save_animation(data, _title=None, _path=None): class IMGLoader: ic = data def __call__(self, frame): return np.hstack([self.ic[frame]['oldData'], self.ic[frame]['newData']]) img_load = IMGLoader() frm = range(len(data)) img_collection = io.ImageCollection(frm, load_func=img_load) fig, ax = plt.subplots() show_img = plt.imshow(img_collection[0], cmap='gray') plt.axis('off') def update(i): show_img.set_data(img_collection[i]) return show_img anim = FuncAnimation(fig, update, frames=frm, interval=10, repeat=True) # Подготовка директории try: os.stat(_path) except FileNotFoundError: os.makedirs(_path) filename = os.path.join(_path, _title + '.gif') anim.save(filename, writer='imagemagick', fps=30)
def LoadData(number): if number == 1: path = '/Users/zhuxiaoxiansheng/Desktop/日常/数据集/yale_faces/*.bmp' elif number == 2: path = '/Users/zhuxiaoxiansheng/Desktop/日常/数据集/orl_faces_full/*.pgm' elif number == 3: path = '/Users/zhuxiaoxiansheng/Desktop/日常/数据集/jaffe/*.tiff' elif number == 4: path = '/Volumes/TOSHIBA EXT/数据集/YaleB/*.pgm' pictures = io.ImageCollection(path) data = [] for i in range(len(pictures)): picture = pictures[i] picture = skimage.color.rgb2gray(picture) data.append(np.ravel(picture.reshape((1,picture.shape[0]*picture.shape[1])))) label = [] if number == 1: for i in range(len(data)): label.append(int(i/11)) elif number == 2: for i in range(len(data)): label.append(int(i/10)) elif number == 3: for i in range(len(data)): label.append(int(i/20)) elif number == 4: label = [0]*64+[1]*64+[2]*64+[3]*64+[4]*64+[5]*64+[6]*64+[7]*64+[8]*64+[9]*64+[10]*60+[11]*59+[12]*60+[13]*63+[14]*62+[15]*63+[16]*63+[17]*64+[18]*64+[19]*64+[20]*64+[21]*64+[22]*64+[23]*64+[24]*64+[25]*64+[26]*64+[27]*64+[28]*64+[29]*64+[30]*64+[31]*64+[32]*64+[33]*64+[34]*64+[35]*64+[36]*64+[37]*64 return np.matrix(data),np.matrix(label).T
def main(_): # file_path = "/Volumes/TOSHIBA EXT/final/try" file_path = "D:\\final\\thumb" out_file_name = "D:\\final\\useful_data\\" png = file_path + '/*.png' # xml_path = "/Volumes/TOSHIBA EXT/final/final.xlsx" xml_path = "D:\\final\\final.xlsx" labels_xml = xlrd.open_workbook(xml_path) labels_table = labels_xml.sheets()[0] defect = labels_table.col_values(3)[1:] labels = load_label(defect) workbook = xlwt.Workbook(encoding='ascii') worksheet = workbook.add_sheet("sheet1") coll = io.ImageCollection(png) for n in range(19): worksheet.write(0, n, labels_table.row_values(0)[n]) m = 1 for i in range(16950): print("i=", i, "m=", m, "n=", n) if labels[i] != 1: image = load_photo(coll, i) cv2.imwrite(out_file_name + str(i) + ".png", image) for n in range(19): worksheet.write(m, n, labels_table.row_values(i + 1)[n]) m = m + 1 workbook.save('D:\\final\\useful_data\\useful_data.xlsx')
def shear_from_dir(imagepath, savepath, shearflag=2): # str=imagepath+'\\*.jpg'+':'+imagepath+'\\*.png' str = imagepath + '\\*.jpg' if (shearflag == 3): coll = io.ImageCollection(str, load_func=img_shear43) if (shearflag == 1): coll = io.ImageCollection(str, load_func=img_shear41) else: coll = io.ImageCollection(str, load_func=img_zoom42) # 循环保存图片 for i in range(len(coll)): io.imsave( savepath + '\\sr_' + np.str(shearflag) + '_' + np.str(i) + '.jpg', coll[i])
def getImage(str): mat=io.ImageCollection(str) print(len(mat)) return mat
def images(path): x = [] for i in range(len(files)): x = x + ["%s/%s/%s" % (path, labels[i], files[i])] #print("%s/%sResized/%s.Bmp" % (path, name, i)) x = io.ImageCollection(x) return x
def img_read(sub_path): str1 = path + '/*.jpg' dataX = io.ImageCollection(str1) train_xdata = np.array([dataX]) train_xdata = train_xdata[0] return train_xdata
def build_moment_dataset(folder): from sklearn import model_selection def just_alpha(file, img_num): img = io.imread(file) img = np.squeeze(img[:, :, 3]) img = np.bool_(img) return img ic = io.ImageCollection(folder, load_func=just_alpha) X_pos = np.zeros(shape=(len(ic), 9), dtype='float') y_pos = np.ones(shape=(len(ic)), dtype='int') X_neg = np.zeros_like(X_pos) y_neg = np.zeros_like(y_pos) for i in range(len(ic)): #calculate the positive and negative moments alpha = ic[i] X_pos[i, :] = gen_features(alpha) X_neg[i, :] = gen_features(np.logical_not(alpha)) X = np.concatenate((X_neg, X_pos)) y = np.concatenate((y_neg, y_pos)) X_train, X_test, y_train, y_test = model_selection.train_test_split( X, y, train_size=.5, shuffle=True, stratify=y) return X_train, X_test, y_train, y_test
def piliangzhixing(): #用于批量处理 pic_name = file_name('./tes') #待识别图像文件名字的读取 for i in range(0, len(pic_name)): picture_name = pic_name[i] #批量读入图像并存在coll coll = io.ImageCollection('./tes/*.jpg') print '图片的数量', len(coll) for i in range(0, len(coll)): io.imsave('./linshi/test.jpg', coll[i]) zhixing() os.remove('./linshi/test.jpg') #print "全局变量YanSe",YanSe #识别结果写进去txt fp = open('./result/test.txt', 'a+') fp.write(picture_name[i] + ' ' + YanSe + '\n') fp.close() #修改结果名字txt result_txt_name = time.strftime('%Y%m%d_%H_%M', time.localtime(time.time())) result_name = result_txt_name + '.txt' os.rename('./result/test.txt', './result/' + result_name)
def load_stack(inputDir): """Loads .jpg files from a directory and concatenates into an image stack\ using io.ImageCollection.""" imageFiles = glob.glob(os.path.join(inputDir, '*.jpg')) imageVolume = io.ImageCollection(imageFiles, as_grey=True).concatenate() return imageVolume
def main(): parser = argparse.ArgumentParser(usage=__doc__) parser.add_argument('images', nargs='*', help='Images to view.') parser.add_argument( '--layers', action='store_true', help='Treat multiple input images as layers.', ) parser.add_argument( '-m', '--multichannel', help='Treat images as RGB.', action='store_true', ) args = parser.parse_args() with gui_qt(): v = Viewer() images = io.ImageCollection(args.images, conserve_memory=False) if args.layers: for image in images: v.add_image(image, multichannel=args.multichannel) else: if len(images) > 0: if len(images) == 1: image = images[0] else: image = np.stack(images, axis=0) v.add_image(image, multichannel=args.multichannel)
def images(name, path): x = [] for i in labelsInfo.ID: x = x + ["%s/%sResized/%s.Bmp" % (path, name, i)] #print("%s/%sResized/%s.Bmp" % (path, name, i)) x = io.ImageCollection(x) return x