def reload(self, n=None): """Reload one or more images from file. Parameters ---------- n : None or int The number of the image to reload. If None (default), all images in memory are reloaded. If `n` specifies an image not yet in memory, it is loaded. Returns ------- None Notes ----- `reload` is used to reload all images in memory when `as_grey` is changed. """ if n is not None: n = self._check_numimg(n) idx = n % len(self.data) self.data[idx] = imread(self.files[n], self.as_grey, dtype=self._dtype) else: for idx, img in enumerate(self.data): if img is not None: self.data[idx] = imread(self.files[idx], self.as_grey, dtype=self._dtype)
def __getitem__(self, idx): assert isinstance(idx, int) img_name = os.path.join(self.root_dir, self.image_dir, f'{idx}.png') image = io.imread(img_name) msk_name = os.path.join(self.root_dir, self.mask_dir, f'{idx}.png') mask = io.imread(msk_name) sample = {'image': image, 'mask': mask} if self.transform: return self.transform(sample) return sample
def output_image_points_for_leaf_teeth(input_image, contour_tuples, index_list_a, index_list_b, output_suffix): """ Outputs an image to file with points highlighted :param input_image: the input image to draw on. :param contour_tuples: the contour tuples giving xy coords. :param index_list_a: list a that indexes contour tuples. :param index_list_b: list b that indexes contour tuples. :param output_suffix: the suffix for the output (not including .jpg) :return: None """ # open the input image. img = io.imread(input_image) # iterate through the indexes and add a circle point for each tuple. for i in index_list_a: cv2.circle(img, contour_tuples[i], 3, thickness=-1, color=(255, 0, 0)) # iterate though the indexes and add a circle point for each tuple. for i in index_list_b: cv2.circle(img, contour_tuples[i], 3, thickness=-1, color=(0, 255, 0)) # save the image. io.imsave(input_image + "." + output_suffix + ".jpg", img)
def __getitem__(self, idx): img_name = os.path.join(self.root_dir, self.frame.iloc[idx, 0]) image = torch.from_numpy(io.imread(img_name)) empty = int(self.frame.iloc[idx, 1:]) image = image.permute(2, 0, 1).float() sample = (image, empty) return sample
def load_image(self, image, unchanged=False, anydepth=False): """Load image pixels as numpy array. The array is 3D, indexed by y-coord, x-coord, channel. The channels are in RGB order. """ a = io.imread(self._image_file(image), unchanged=unchanged, anydepth=anydepth) return a
def write_QC(full_QC_model_path): with open(os.path.join(full_QC_model_path, 'Quality Control', 'QC_metrics_' + args.name + '.csv'), "w", newline='') as file: writer = csv.writer(file) writer.writerow(["File name", "IoU", "IoU-optimised threshold"]) # Initialise the lists filename_list = [] best_threshold_list = [] best_IoU_score_list = [] for filename in os.listdir(args.testInputPath): if not os.path.isdir(os.path.join(args.testInputPath, filename)): print('Running QC on: ' + filename) test_input = io.imread(os.path.join(args.testInputPath, filename), as_gray=True) test_ground_truth_image = io.imread(os.path.join( args.testGroundTruthPath, filename), as_gray=True) (threshold_list, iou_scores_per_threshold) = getIoUvsThreshold( os.path.join(prediction_QC_folder, prediction_prefix + filename), os.path.join(args.testGroundTruthPath, filename)) # Here we find which threshold yielded the highest IoU score for image n. best_IoU_score = max(iou_scores_per_threshold) best_threshold = iou_scores_per_threshold.index(best_IoU_score) # Write the results in the CSV file writer.writerow( [filename, str(best_IoU_score), str(best_threshold)]) # Here we append the best threshold and score to the lists filename_list.append(filename) best_IoU_score_list.append(best_IoU_score) best_threshold_list.append(best_threshold)
def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() img_name = self.data_paths[idx] image = io.imread(img_name) sample = {'image': image, 'label': self.data_labels['label'][idx]} return sample
def infer(self, img_file): tik = time.time() img = io.imread(img_file) img = Image.fromarray(img.astype(np.uint8)) preprocess = transforms.Compose([ transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) img = preprocess(img) img.unsqueeze_(0) img = img.to(self.device) outputs = self.model.forward(img) outputs = F.softmax(outputs, dim=1) # get TOP-K output labels and corresponding probabilities topK_prob, topK_label = torch.topk(outputs, self.topK) prob = topK_prob.to("cpu").detach().numpy().tolist() _, predicted = torch.max(outputs.data, 1) tok = time.time() if prob[0][0] >= cfg['thresholds']['plant_recognition']: return { 'status': 0, 'message': 'success', 'elapse': tok - tik, 'results': [ { 'name': self.key_type[int(topK_label[0][i].to("cpu"))], 'category_id': int(topK_label[0][i].data.to("cpu").numpy()) + 1, 'prob': round(prob[0][i], 4) } for i in range(self.topK) ] } else: return { 'status': 0, 'message': 'success', 'elapse': tok - tik, 'results': [ { 'name': "Unknown", 'category_id': -1, 'prob': round(prob[0][0], 4) } ] }
def load_mask(self, image): """Load image mask if it exists, otherwise return None.""" if image in self.mask_files: ## 여기 안들어감 mask_path = self.mask_files[image] mask = io.imread(mask_path, grayscale=True) if mask is None: raise IOError("Unable to load mask for image {} " "from file {}".format(image, mask_path)) else: mask = None return mask
def __getitem__(self, idx): img_name = os.path.join(self.root_dir, self.landmarks_frame.iloc[idx, 0]) image = io.imread(img_name) landmarks = self.landmarks_frame.iloc[idx, 1:].as_matrix() landmarks = landmarks.astype('float').reshape(-1, 2) sample = {'image': image, 'landmarks': landmarks} if self.transform: sample = self.transform(sample) return sample
def image_from_url(url): """ Read an image from a URL. Returns a numpy array with the pixel data. We write the image to a temporary file then read it back. Kinda gross. """ try: img = io.imread(url) return img except urllib.error.URLError as e: print('URL Error: ', e.reason, url) except urllib.error.HTTPError as e: print('HTTP Error: ', e.code, url)
def test_data(): """Return an image for testing purposes. Returns ------- I : ndarray of uint8 512x512 test image. """ import os f = os.path.join(os.path.dirname(__file__), 'lib/pywt/demo/data/aero.png') return io.imread(f)
def load(self, path): from skimage import io if not io.util.is_url(path): path = os.path.abspath(path) self._path = path ext = path[-3:].lower() if ext == 'pdf': data = read_pdf(path) else: with io.util.file_or_url_context(path) as context: data = io.imread(context) mode = guessmode(data) self._set(data, mode) return self
def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() entry = self.data_list[idx] img_name = entry['file'] answer = entry['answer'] img_path = os.path.join(self.root_dir, img_name) image = io.imread(img_path).transpose(2, 0, 1).astype(np.float32) if self.transform: image = self.transform(image) return [image, answer]
def bounding_box(url, texts): """ add bounding box """ img = io.imread(url) # read image from url for text in texts[1:]: # 0th bounding box is whole picture vertices = [ (vertex.x, vertex.y) for vertex in text.bounding_poly.vertices ] # get coordinates # plot line cv2.polylines(img, [np.array(vertices)], True, (0, 255, 0), 2) plt.figure(figsize=(10, 10)) plt.imshow(img) plt.show()
def run(): print('toto') import io import os import sys import argparse from skimage import io from skimage import filter from skimage import restoration from skimage import measure kidney_image = io.imread('manu.jpg') # estimate the noise in the image # do a test denosing using a total variation filter kidney_image_denoised_tv = restoration.denoise_tv_chambolle( kidney_image, weight=0.1) io.imsave('denoise_image.jpg', kidney_image_denoised_tv)
def pickle_image_data(y, ylabels, imageDirectory, pickleDirectory): # Extract all training images from a directory # Use skimage to put all the training data into a big array which is IMAGES,LEFT,RIGHT,CHANNEL iterator = 0 for (image_name, labels) in ylabels: raw_data = io.imread(imageDirectory + image_name + ".tif") np.save(file=pickleDirectory + "train_" + str(iterator), arr=np.swapaxes(raw_data, 0, 2)) iterator += 1 if (iterator % 500 == 0): print("image" + str(iterator))
def preprocessing(img_name, trans_all): # get some random training images image = io.imread(img_name) input = image[:, :288, :] normals = image[:, 288:2 * 288, :] diffuse = image[:, 2 * 288:3 * 288, :] roughness = image[:, 3 * 288:4 * 288, :] specular = image[:, 4 * 288:5 * 288, :] #label = np.concatenate((normals,diffuse,roughness,specular),axis = 2) if trans_all: input_t = trans_all(input) normals_t = trans_all(normals) sample = {'input': input_t, 'label': normals_t} else: sample = {'input': input, 'label': normals} images, labels = sample["input"].unsqueeze(0), sample["label"].unsqueeze(0) return images, labels
def load_data(validation_percent): all_images = [] counter = 0 for image_path in os.listdir('charts'): #print(image_path) img = io.imread('charts/%s' % image_path, as_grey=True) # io.imshow(img) # io.show() img = img.reshape([128, 128, 1]) all_images.append(img) counter += 1 data = np.array(all_images) validation_size = int(data.__len__()*validation_percent/100) return data[:data.__len__()-validation_size], labels[:data.__len__()-validation_size], \ data[data.__len__()-validation_size:], labels[data.__len__()-validation_size:]
def load_image_files(container_path, dimension=(64, 64)): """ Load image files with categories as subfolder names which performs like scikit-learn sample dataset Parameters ---------- container_path : string or unicode Path to the main folder holding one subfolder per category dimension : tuple size to which image are adjusted to Returns ------- Bunch """ image_dir = Path(container_path) folders = [ directory for directory in image_dir.iterdir() if directory.is_dir() ] categories = [fo.name for fo in folders] descr = "A image classification dataset" images = [] flat_data = [] target = [] for i, direc in enumerate(folders): for file in direc.iterdir(): img = io.imread(file) img_resized = resize(img, dimension, anti_aliasing=True, mode='reflect') flat_data.append(img_resized.flatten()) images.append(img_resized) target.append(i) flat_data = np.array(flat_data) target = np.array(target) images = np.array(images) return Bunch(data=flat_data, target=target, target_names=categories, images=images, DESCR=descr)
def draw_edge_dir(detector, predictor, source_dir, target_dir, add_face_keypoints): if not os.path.exists(target_dir): os.makedirs(target_dir) img_paths = glob.glob(os.path.join(source_dir, '*.jpg')) face_edger = FaceEdge() en = 0 for img_path in img_paths: if en % 1000 == 0 and en != 0: print("The number of processed images is", en) img = io.imread(img_path) dets = detector(img, 1) if len(dets) > 0: shape = predictor(img, dets[0]) points = np.empty([68, 2], dtype=int) for b in range(68): points[b, 0] = shape.part(b).x points[b, 1] = shape.part(b).y img_ = Image.open(img_path) img_size = img_.size face_edger.get_crop_coords(points, img_size) B_crop = face_edger.crop(img_) params = get_img_params(B_crop.size) transform_scale = get_transform(params, method=Image.BILINEAR, normalize=False) edge_image = face_edger.get_face_image(points, transform_scale, img_size, img_, add_face_keypoints) save_path = os.path.join(target_dir, os.path.basename(img_path)) torchvision.utils.save_image(edge_image, filename=(save_path)) en += 1 print("The total number of edge images is ", en)
def __getitem__(self, n): """Return image n in the collection. Loading is done on demand. Parameters ---------- n : int The image number to be returned. Returns ------- img : ndarray The `n`-th image in the collection. """ n = self._check_imgnum(n) idx = n % len(self.data) if (self.conserve_memory and n != self._cached) or (self.data[idx] is None): self.data[idx] = imread(self.files[n], self.as_grey, dtype=self._dtype) self._cached = n return self.data[idx]
import numpy as np import os import subprocess from PIL import Image from matplotlib import pyplot from skimage import io, transform ###################################################################### # First, let's load the image, pre-process it using standard skimage # python library. Note that this preprocessing is the standard practice of # processing data for training/testing neural networks. # # load the image img_in = io.imread("./_static/img/cat.jpg") # resize the image to dimensions 224x224 img = transform.resize(img_in, [224, 224]) # save this resized image to be used as input to the model io.imsave("./_static/img/cat_224x224.jpg", img) ###################################################################### # Now, as a next step, let's take the resized cat image and run the # super-resolution model in Caffe2 backend and save the output image. The # image processing steps below have been adopted from PyTorch # implementation of super-resolution model # `here <https://github.com/pytorch/examples/blob/master/super_resolution/super_resolve.py>`__ #