def _USPS(self): def resize_and_scale(img, size, scale): img = skresize(img, size) return 1 - (np.array(img, "float32") / scale) # if os.path.isfile(self.data_path+'/USPS'+set+'.pt'): sz = (28, 28) imgs_usps = [] lbl_usps = [] if 'USPdata.zip' not in os.listdir(self.data_path): urllib.request.urlretrieve( 'https://github.com/darshanbagul/USPS_Digit_Classification/raw/master/USPSdata/USPSdata.zip', self.data_path + '/USPSdata.zip') zip_ref = zipfile.ZipFile(self.data_path + '/USPSdata.zip', 'r') zip_ref.extractall(self.data_path) zip_ref.close() if self.set == 'train' or self.set == 'validation': for i in range(10): label_data = self.data_path + '/Numerals/' + str(i) + '/' img_list = os.listdir(label_data) for name in img_list: if '.png' in name: img = skimread(label_data + name) img = sk_rgb2gray(img) resized_img = resize_and_scale(img, sz, 255) imgs_usps.append(resized_img.flatten()) lbl_usps.append(i) elif self.set == 'test': test_path = self.data_path + '/Test/' strt = 1 for lbl, cntr in enumerate(range(151, 1651, 150)): for i in range(strt, cntr): i = format(i, '04d') img = skimread( os.path.join(test_path, 'test_' + str(i) + '.png')) img = sk_rgb2gray(img) resized_img = resize_and_scale(img, sz, 255) imgs_usps.append(resized_img.flatten()) lbl_usps.append(9 - lbl) strt = cntr # os.remove(self.data_path+'/USPSdata.zip') shutil.rmtree(self.data_path + '/Numerals') shutil.rmtree(self.data_path + '/Test') imgs_usps, lbl_usps = np.asarray(imgs_usps).reshape( -1, 28, 28), np.asarray(lbl_usps) lbl_usps = torch.tensor(lbl_usps, dtype=torch.long) imgs_usps = torch.tensor(imgs_usps) # torch.save((imgs_usps,lbl_usps), open(self.data_path+'/USPS'+set+'.pt','wb')) # # else: # imgs_usps, lbl_usps = torch.load(open(self.data_path+'/USPS'+set+'.pt','rb')) return imgs_usps, lbl_usps
def imread(fname, factor=100): """Read possibly scaled version of image""" img = skimread(fname) if factor < 100: img = imresize(img, [ int(img.shape[0] * factor / 100), int(img.shape[1] * factor / 100) ], order=3) img = (img * 255).astype(np.uint8) return img
def imread( filepath: str, return_type: Union[np.dtype, str] = np.uint8, convert_to_tensor: bool = False, ) -> np.ndarray: """Read image data from file into the specified format. This function wraps scikit-image imread functionality and applies required conversions. Args: filepath: string containing path to the image data; .npy files are also supported. return_type: data format of the returned image, can be np.dtype or a string. Supported formats are: float, uint8, uint16. Default is uint8. convert_to_tensor: flag to convert output image to torch.Tensor. Defauls is False. Returns: Image in selected format.""" str_to_type = { 'float': np.float, 'uint8': np.uint8, 'uint16': np.uint16, } if hasattr(return_type, 'lower'): return_type = return_type.lower() if isinstance(return_type, str): return_type = str_to_type[return_type] # load file if filepath.endswith('.npy'): image = np.load(filepath) else: image = skimread(filepath) # apply conversions if not np.issubdtype(image.dtype, return_type): if return_type is np.float: image = convert_image_to_float(image) else: image = convert_image_to_int(image, image_type=return_type) if convert_to_tensor: image = ToTensor()(image) return image
def imread(imgfile): return th.from_numpy(skimread(imgfile))
def spot_scan(file): try: img_dir = os.path.dirname(file) # Determine directory of image file. Use to save outputs later. ##### LOAD IMAGE FILE AND CROP A D45 mm AREA FROM CENTER ##### img = skimread(file) # Read image file # Determine image size for centered crop height = img.shape[0] width = img.shape[1] center_y = int(height/2) center_x = int(width/2) img_center = (center_x, center_y) mask_radius = 4252 # Create D45mm circular mask at center of image mask = np.zeros((height,width), np.uint8) cv2.circle(mask, img_center, mask_radius, (255, 255, 255), -1) masked_img = cv2.bitwise_and(img, img, mask=mask) # Threshold and find contours thresh_mask = cv2.threshold(mask, 1, 255, cv2.THRESH_BINARY)[1] mask_contours = cv2.findContours(thresh_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) x,y,w,h = cv2.boundingRect(mask_contours[0]) # Crop masked data crop_img = masked_img[y:y+h,x:x+w] #convert black edge pixels to white so that edge does not get captured by threshold crop_img[np.where(crop_img==[0])] = 255 ##### APPLY THRESHOLDING TO IMAGE THEN FIND AND RECORD MICROFEATURES ##### # Apply thresholding to the cropped image thresh_img = cv2.threshold(crop_img, 145, 255, cv2.THRESH_BINARY_INV)[1] # Create contours (_,cnts,_) = cv2.findContours(thresh_img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnt_data = {"Area":[], "Location":[]} # Initialize contour data #determine if contour area is large enough to be interesting for contour in cnts: area = cv2.contourArea(contour)*0.000028 (x, y), radius = cv2.minEnclosingCircle(contour) center = (int(x), int(y)) radius = int(radius) cnt_data["Area"].append(area) cnt_data["Location"].append(center) if area < 0.002: continue else: # If mf large enough draw a circle around the contour cv2.circle(crop_img, center, radius, (0, 0, 255), 10) # Create dataframe from cnt_data cnt_data = pd.DataFrame(cnt_data) # Resize and save image with circled microfeatures out_img_file = img_dir + "/highlighted_spots.jpg" # Concatenate directory/filename for output image resize_img = cv2.resize(crop_img, (800, 800)) cv2.imwrite(out_img_file, resize_img) #cv2.imshow("original", resize_img) #cv2.waitKey(0) ##### CREATE A HISTOGRAM OF MICROFEATURE SIZE ##### # create microfeature histogram bins = [0.002, 0.0025, 0.003, 0.004, 0.006, 0.008, 0.012, 0.018, 0.025, 0.050, 0.1, 10] bars = [0.0025, 0.003, 0.004, 0.006, 0.008, 0.012, 0.018, 0.025, 0.050, 0.1, 'More'] # Use hist method to sort data into bins n, bins, patches = plt.hist(x=list(cnt_data["Area"]), bins=bins, color='#0504aa', alpha=0.7, rwidth=0.85) bar_height = n.tolist() # Convert array of bin counts to list y_pos = np.arange(len(bars)) # y_pos determines # of bars plt.bar(y_pos, bar_height, align='center', alpha=0.5) plt.xticks(y_pos, bars, rotation = 30) plt.ylabel('Count') plt.xlabel('Microfeature Size ($mm^2$)') plt.title('Microfeature Distribution') maxmf = max(list(cnt_data['Area'])) # Determine size of largest MF text_y = max(bar_height) / 2 # Variable to place text at mid height of chart plt.text(7, text_y , 'largest feature\n%s mm2' % str(round(maxmf,4))) out_plt_file = img_dir + "/histogram.jpg" plt.savefig(out_plt_file) #plt.show() ##### CREATE A SUMMARY REPORT IN EXCEL ##### # Create summary statistics nsmall = bar_height[0] + bar_height[1] + bar_height[2] + bar_height[3] + bar_height[4] # 0.002 to 0.008 nmed = bar_height[5] + bar_height[6] # 0.008 to 0.018 nlarge = bar_height[7] + bar_height[8] + bar_height[9] + bar_height[10] # 0.018 to 10 mf_coverage = sum(cnt_data["Area"].values)/(1590)*100 # Determine pass or fail if nsmall < 380: tsmall = "pass" else: tsmall = "fail" if nmed < 250: tmed = "pass" else: tmed = "fail" if nlarge < 160: tlarge = "pass" else: tlarge = "fail" if maxmf < 0.1: tmax = "pass" else: tmax = "fail" # Set indexes and columns index = ["Count 0.002 - 0.008 mm2", "Count 0.008 - 0.018 mm2", "Count 0.018 - 0.1 mm2", "Largest Spot", "Spot Coverage %"] summary = {"Key Data":[nsmall, nmed, nlarge, round(maxmf,4), round(mf_coverage,4)], "Pass / Fail":[tsmall, tmed, tlarge, tmax, ""]} summary = pd.DataFrame(data = summary, index = index) # Send summary dataframe to excel then add a tab for cnt_data out_xls_file = img_dir + "/spot_data.xlsx" writer = pd.ExcelWriter(out_xls_file, engine = 'openpyxl') summary.to_excel(writer, sheet_name = "Summary") cnt_data.to_excel(writer, sheet_name = "Spot Data") writer.save() writer.close() return 0, summary except Exception as e: return 1, ("scan error\n" + str(e))
def imread(filename : str) -> Image: from skimage.io import imread as skimread image = skimread(filename) from .._tier0 import push return push(image)
def imread(imgfile): return skimread(imgfile)
def imread(image_path): # Takes an image file (.png, .jpg etc) and returns a 2D np.array return img_as_float(skimread(image_path))