def encrypt_file(self): to_encrypt = self.parent.textarea.get('1.0', 'end-1c') if State.generated_key_filename and State.generated_key: if State.encrypt_method == 'AES': cipher = AES.new(State.generated_key, AES.MODE_EAX) ciphertext, tag = cipher.encrypt_and_digest( to_encrypt.encode()) token = cipher.nonce + tag + ciphertext elif State.encrypt_method == 'Triple DES': cipher = DES3.new(State.generated_key, DES3.MODE_CFB) token = cipher.iv + cipher.encrypt(to_encrypt.encode()) elif State.encrypt_method == 'Salsa20': cipher = Salsa20.new(key=State.generated_key) token = cipher.nonce + cipher.encrypt(to_encrypt.encode()) else: return messagebox.showerror( title='Key', message= 'Something went wrong. Close the window and try again.') ans = filedialog.asksaveasfilename( parent=self.parent, defaultextension='.txt', filetypes=CONFIG['DEFAULT_FILETYPES'], initialfile=get_filename_from_path(State.filename)) if ans: State.filename = ans with open(State.filename, 'wb') as f: f.write(token) f.close() self.navbar.file_helper.update_title( get_filename_from_path(State.filename)) self.frame_encrypt.destroy() messagebox.showwarning( title='Success', message= 'File was successfully encrypted and saved. Do not lose your key.' ) State.is_modified = False State.generated_key_filename = False State.generated_key = False State.encrypt_method = CONFIG['DEFAULT_ENCRYPTION_METHOD'] else: messagebox.showerror( title='Key', message='You need to generate random key first.')
def extract_patches_normal(): wsi_image_names = glob.glob( os.path.join(utils.TRAIN_NORMAL_WSI_PATH, '*.tif')) wsi_image_names.sort() for image_path in wsi_image_names: extract_patches(image_path, utils.get_filename_from_path(image_path))
def extract_negative_patches_from_tumor_wsi(wsi_ops, patch_extractor, patch_index, augmentation=False): wsi_paths = glob.glob(os.path.join(utils.TRAIN_TUMOR_WSI_PATH, '*.tif')) wsi_paths.sort() mask_paths = glob.glob(os.path.join(utils.TUMOR_MASK_PATH, '*.tif')) mask_paths.sort() image_mask_pair = zip(wsi_paths, mask_paths) image_mask_pair = list(image_mask_pair) # image_mask_pair = image_mask_pair[67:68] patch_save_dir = utils.PATCHES_TRAIN_AUG_NEGATIVE_PATH if augmentation else utils.PATCHES_TRAIN_NEGATIVE_PATH patch_prefix = utils.PATCH_AUG_NORMAL_PREFIX if augmentation else utils.PATCH_NORMAL_PREFIX for image_path, mask_path in image_mask_pair: print('extract_negative_patches_from_tumor_wsi(): %s' % utils.get_filename_from_path(image_path)) start = time.time() wsi_image, rgb_image, _, tumor_gt_mask, level_used = wsi_ops.read_wsi_tumor( image_path, mask_path) assert wsi_image is not None, 'Failed to read Whole Slide Image %s.' % image_path bounding_boxes, image_open = wsi_ops.find_roi_bbox(np.array(rgb_image)) patch_index = patch_extractor.extract_negative_patches_from_tumor_wsi( wsi_image, np.array(tumor_gt_mask), image_open, level_used, bounding_boxes, patch_save_dir, patch_prefix, patch_index) duration = time.time() - start print('Negative patches count: %d and duration is %f' % (patch_index - utils.PATCH_INDEX_NEGATIVE, duration)) wsi_image.close() return patch_index
def extract_cor(heatmap_prob_name_prefix): print( '************************** extract_features_test() ***************************' ) test_wsi_paths = glob.glob( os.path.join( '/media/jiaojiao/Seagate Backup Plus Drive/CAMELYON16/TestingData/Testset', '*.tif')) test_wsi_paths.sort() for wsi_path in test_wsi_paths: wsi_name = utils.get_filename_from_path(wsi_path) print('extracting features for: %s' % wsi_name) print( os.path.join( '/home/jiaojiao/PycharmProjects/results/first_model/heatmap_prob', 'heatmap_%s.png' % wsi_name)) heatmap_prob_path = glob.glob( os.path.join( '/home/jiaojiao/PycharmProjects/camelyon16/results/first_model/heatmap_prob', 'heatmap_*%s.png' % wsi_name[1:])) print(heatmap_prob_path) heatmap_prob = cv2.imread(heatmap_prob_path[0]) csv_name = heatmap_prob_path[0].split('/')[-1][-12:-4] + '.csv' csv_path = os.path.join('/home/jiaojiao/Desktop/test', csv_name) csv_file = open(csv_path, 'w') csv_test = csv.writer(csv_file, quoting=csv.QUOTE_NONNUMERIC) get_result(heatmap_prob, csv_test, 7)
def _find_attachments_in_email(mesg, expand_attachment, atts): for part in mesg.walk(): content_type = part.get_content_type() if part.is_multipart(): continue payload = part.get_payload(decode=True) if content_type.startswith('text/') and expand_attachment: normalized = payload.lstrip(" \t\r\n") if any(normalized.startswith(m) for m in EMAIL_MAGIC): new_mesg = email.message_from_string(normalized) _find_attachments_in_email(new_mesg, expand_attachment, atts) continue if content_type in SAFE_MEDIA_TYPE: continue filename = part.get_filename() if filename is None: ext = mimetypes.guess_extension(content_type) or '' filename = '<unknown>' + ext else: # Sanitize the header value filename = _decode_header(filename) filename = utils.get_filename_from_path(filename) tempfile_path = utils.store_temp_file(payload, filename) atts.append((tempfile_path, filename, content_type))
def extract_features_train_all(heatmap_prob_name_postfix_first_model, heatmap_prob_name_postfix_second_model, f_train): print( '********************** extract_features_train_all() *************************' ) print('heatmap_prob_name_postfix_first_model: %s' % heatmap_prob_name_postfix_first_model) print('heatmap_prob_name_postfix_second_model: %s' % heatmap_prob_name_postfix_second_model) print('f_train: %s' % f_train) tumor_wsi_paths = glob.glob(os.path.join(utils.TUMOR_WSI_PATH, '*.tif')) tumor_wsi_paths.sort() normal_wsi_paths = glob.glob(os.path.join(utils.NORMAL_WSI_PATH, '*.tif')) normal_wsi_paths.sort() wsi_paths = tumor_wsi_paths + normal_wsi_paths features_file_train_all = open(f_train, 'w') wr_train = csv.writer(features_file_train_all, quoting=csv.QUOTE_NONNUMERIC) wr_train.writerow(utils.heatmap_feature_names) for wsi_path in wsi_paths: wsi_name = utils.get_filename_from_path(wsi_path) # print('extracting features for: %s' % wsi_name) heatmap_prob_path = glob.glob( os.path.join( utils.HEAT_MAP_DIR, '*%s*%s' % (wsi_name, heatmap_prob_name_postfix_first_model))) # print(heatmap_prob_path) image_open = wsi_ops.get_image_open(wsi_path) heatmap_prob = cv2.imread(heatmap_prob_path[0]) if heatmap_prob_name_postfix_second_model is not None: heatmap_prob_path_second_model = glob.glob( os.path.join( utils.HEAT_MAP_DIR, '*%s*%s' % (wsi_name, heatmap_prob_name_postfix_second_model))) heatmap_prob_second_model = cv2.imread( heatmap_prob_path_second_model[0]) for row in range(heatmap_prob.shape[0]): for col in range(heatmap_prob.shape[1]): if heatmap_prob[ row, col, 0] >= 0.90 * 255 and heatmap_prob_second_model[ row, col, 0] < 0.50 * 255: heatmap_prob[row, col, :] = heatmap_prob_second_model[ row, col, :] features = extract_features(heatmap_prob, image_open) if 'umor' in wsi_name: features += [1] else: features += [0] print(features) wr_train.writerow(features)
def extract_patches_test(): wsi_image_names = glob.glob(utils.TEST_WSI_DIR + '/*.tif') wsi_image_names.sort() print(wsi_image_names) # wsi_image_names = wsi_image_names[1:2] for image_path in wsi_image_names: extract_patches(image_path, utils.get_filename_from_path(image_path)) print('Finished extract patches.')
def borgmatic(): if not utils.verify_auth_token(request.headers): return "Access denied, invalid auth token.", 403 # Backup encountered an error if isinstance(request.json, dict) and request.json.get("error", False): data = request.json backup_name = utils.get_filename_from_path(data.get('config_path')) data = { "backup_successful": False, "backup_name": backup_name, "message": data.get('message').replace("\n", "<br />\n"), "stderr": data.get('stderr').replace("\n", "<br />\n") } # Backup was successful else: data = request.json[0] backup_name = utils.get_filename_from_path( data.get('repository').get('location')) backup_duration = data.get('archive').get('duration') total_size = data.get('cache').get('stats').get('unique_csize') data = { "backup_successful": True, "backup_name": os.path.basename( os.path.normpath(data.get('repository').get('location'))), "backup_duration_formatted": time.strftime('%H:%M:%S', time.gmtime(backup_duration)), "formatted_size": utils.format_file_size(total_size) } # Build out the email and send it template = utils.load_email_template("BorgBackup.html") email_body = MIMEText(template.render(data=data), 'html') EmailSender().send_email(f"Backup complete: {backup_name}", email_body) return "Borgmatic backup email notification sent."
def extract_patches_from_heatmap_false_region_normal(wsi_ops, patch_extractor, patch_index, augmentation=False): normal_heatmap_prob_paths = glob.glob( os.path.join(utils.HEAT_MAP_DIR, 'Normal*prob.png')) normal_heatmap_prob_paths.sort() wsi_paths = glob.glob(os.path.join(utils.NORMAL_WSI_PATH, '*.tif')) wsi_paths.sort() assert len(normal_heatmap_prob_paths) == len( wsi_paths), 'Some heatmaps are missing!' image_heatmap_tuple = zip(wsi_paths, normal_heatmap_prob_paths) image_heatmap_tuple = list(image_heatmap_tuple) # image_mask_pair = image_mask_pair[67:68] patch_save_dir_neg = utils.PATCHES_TRAIN_AUG_NEGATIVE_PATH if augmentation else utils.PATCHES_TRAIN_NEGATIVE_PATH patch_prefix_neg = utils.PATCH_AUG_NORMAL_PREFIX if augmentation else utils.PATCH_NORMAL_PREFIX for image_path, heatmap_prob_path in image_heatmap_tuple: print('extract_patches_from_heatmap_false_region_normal(): %s, %s' % (utils.get_filename_from_path(image_path), utils.get_filename_from_path(heatmap_prob_path))) wsi_image, rgb_image, level_used = wsi_ops.read_wsi_normal(image_path) assert wsi_image is not None, 'Failed to read Whole Slide Image %s.' % image_path bounding_boxes, image_open = wsi_ops.find_roi_bbox(np.array(rgb_image)) heatmap_prob = cv2.imread(heatmap_prob_path) heatmap_prob = heatmap_prob[:, :, :1] heatmap_prob = np.reshape( heatmap_prob, (heatmap_prob.shape[0], heatmap_prob.shape[1])) heatmap_prob = np.array(heatmap_prob, dtype=np.float32) heatmap_prob /= 255 patch_index = patch_extractor.extract_patches_from_heatmap_false_region_normal( wsi_image, image_open, heatmap_prob, level_used, bounding_boxes, patch_save_dir_neg, patch_prefix_neg, patch_index) print('patch count: %d' % (patch_index - utils.PATCH_INDEX_NEGATIVE)) wsi_image.close() return patch_index
def extract_features_test(heatmap_prob_name_postfix_first_model, heatmap_prob_name_postfix_second_model, f_test): print('************************** extract_features_test() ***************************') print('heatmap_prob_name_postfix_first_model: %s' % heatmap_prob_name_postfix_first_model) print('heatmap_prob_name_postfix_second_model: %s' % heatmap_prob_name_postfix_second_model) print('f_test: %s' % f_test) test_wsi_paths = glob.glob(os.path.join(utils.TEST_WSI_PATH, '*.tif')) test_wsi_paths.sort() features_file_test = open(f_test, 'w') # features_file_test wr_test = csv.writer(features_file_test, quoting=csv.QUOTE_NONNUMERIC) wr_test.writerow(utils.heatmap_feature_names) df_test = pd.read_csv(utils.HEATMAP_FEATURE_CSV_TEST_GROUNDTRUTH) for wsi_path in test_wsi_paths: wsi_name = utils.get_filename_from_path(wsi_path) print('extracting features for: %s' % wsi_name) heatmap_prob_path = glob.glob( os.path.join(utils.HEAT_MAP_DIR, '*%s*%s' % (wsi_name, heatmap_prob_name_postfix_first_model))) # print(heatmap_prob_path) image_open = wsi_ops.get_image_open(wsi_path) prob_img = cv2.imread(heatmap_prob_path[0]) if heatmap_prob_name_postfix_second_model is not None: heatmap_prob_path_second_model = glob.glob( os.path.join(utils.HEAT_MAP_DIR, '*%s*%s' % (wsi_name, heatmap_prob_name_postfix_second_model))) heatmap_prob_second_model = cv2.imread(heatmap_prob_path_second_model[0]) for row in range(prob_img.shape[0]): for col in range(prob_img.shape[1]): if prob_img[row, col, 0] >= 0.90 * 255 and heatmap_prob_second_model[row, col, 0] < 0.50 * 255: prob_img[row, col, :] = heatmap_prob_second_model[row, col, :] features = extract_features(prob_img, image_open) id = wsi_name.split('_')[1] id = int(id) label = df_test['label'][id-1] if label == 'Tumor': label = 1 else: label = 0 features += [label] print(id) print(features) wr_test.writerow(features) feature_json = pd.DataFrame(features) feature_json.to_json(utils.FEATURES_TEST) print('The test features was saved in: {}'.format(utils.FEATURES_TEST))
def extract_patches_tumor(): wsi_image_names = glob.glob( os.path.join(utils.TRAIN_TUMOR_WSI_PATH, '*.tif')) wsi_image_names.sort() wsi_mask_names = glob.glob( os.path.join(utils.TRAIN_TUMOR_MASK_PATH, '*.tif')) wsi_mask_names.sort() image_mask_pair = zip(wsi_image_names, wsi_mask_names) image_mask_pair = list(image_mask_pair) for image_path, mask_path in image_mask_pair: extract_patches(image_path, utils.get_filename_from_path(image_path), mask_path)
def open_file(self, e=None): ans = filedialog.askopenfilename(parent=self.parent.root) if ans: State.filename = ans self.parent.textarea.delete('1.0', 'end') with open(State.filename, 'r', encoding='utf-8') as f: try: self.parent.textarea.insert('1.0', f.read()) self.text_modified() self.update_title(get_filename_from_path(State.filename)) State.is_modified = False self.parent.textarea.see('1.0') self.parent.linenumberingarea.see('1.0') except: messagebox.showerror(title='Wrong file', message='You can not open this file')
def receive_file_message(self, file_message, sock): """Process received file message. Currently only store it in ./receive/files/file_name. """ # write into file filename = utils.get_filename_from_path(file_message.path) save_path = os.path.join(config.FILE_SAVE_FOLDER, filename) # write to file self.write_to_file(save_path, file_message, sock) # display return DisplayMessage(file_message.sender_icon, file_message.sender_id, file_message.t, 2, 'Receive file, saved to {}'.format(save_path))
def fix_plate_readings(inputFile, barcode): #def fix_plate_readings(inputFile): """ Given a MS Excel file with a table (readings from a 386-well plate), rotates the well positions clockwise by the indicated degree. Keyword arguments: :param inputFile: path to MS Excel file (no default) :param barcode: plate barcode (no default) :param loessSpan: degree of rotation of table (default: 180) :return: Table in the original, rotated and tidy (vertical) formats in a MS Excel file """ t0 = time.time() # Read table from MS Excel file and get file path details table = pd.read_excel(io=inputFile, sheetname=0, header=0, index_col=0) dir_name, file_name, extless_filename = utils.get_filename_from_path( inputFile) print('Read table from file:\n"{}"'.format(file_name)) print() # Rotate table 180 degrees rotated_table = utils.rotate_table(df=table) # Get barcode from user # Added as CLI argument instead #print() #barcode = (input(Fore.RED + 'What is the plate\'s barcode: ')) #print(Style.RESET_ALL) # Convert to tidy format tidy_table = utils.rectangular_to_tidy(df=rotated_table, barcode=barcode) # Write to MS Excel file file_name = os.path.basename(file_name) writer = pd.ExcelWriter(dir_name + '/' + extless_filename + '_final.xlsx') tidy_table.to_excel(writer, 'Final table', index=False) rotated_table.to_excel(writer, 'Rotated table') table.to_excel(writer, 'Original table') writer.save() print() print(Fore.CYAN + 'Saved final tables to MS Excel file:\n"{}_final.xlsx"'.format( extless_filename)) print() utils.print_elapsed_time(t0)
def extract_cor1(heatmap_prob_name_prefix): print( '************************** extract_features_test() ***************************' ) test_wsi_paths = glob.glob(os.path.join('/fast/TestSet', '*.tif')) test_wsi_paths.sort() for wsi_path in test_wsi_paths: wsi_name = utils.get_filename_from_path(wsi_path) print('extracting features for: %s' % wsi_name) heatmap_prob_path = glob.glob( os.path.join('./results/second_model/csv_avg', '*%s*%s.png' % (heatmap_prob_name_prefix, wsi_name))) heatmap_prob = cv2.imread(heatmap_prob_path[0]) csv_name = wsi_name[0:9] + '.csv' csv_path = os.path.join('./results/second_model/csv_avg', csv_name) csv_file = open(csv_path, 'w') csv_test = csv.writer(csv_file, quoting=csv.QUOTE_NONNUMERIC) get_result(heatmap_prob, csv_test, 7)
def extract_negative_patches_from_normal_wsi(wsi_ops, patch_extractor, patch_index, augmentation=False): """ Extracted up to Normal_060. :param wsi_ops: :param patch_extractor: :param patch_index: :param augmentation: :return: """ wsi_paths = glob.glob(os.path.join(utils.TRAIN_NORMAL_WSI_PATH, '*.tif')) wsi_paths.sort() # wsi_paths = wsi_paths[61:] patch_save_dir = utils.PATCHES_VALIDATION_AUG_NEGATIVE_PATH if augmentation \ else utils.EXTRACTED_PATCHES_NORMAL_PATH patch_prefix = utils.PATCH_AUG_NORMAL_PREFIX if augmentation else utils.PATCH_NORMAL_PREFIX begin = time.time() for image_path in wsi_paths: print('extract_negative_patches_from_normal_wsi(): %s' % utils.get_filename_from_path(image_path)) start = time.time() wsi_image, rgb_image, level_used = wsi_ops.read_wsi_normal(image_path) assert wsi_image is not None, 'Failed to read Whole Slide Image %s.' % image_path bounding_boxes, _, image_open = wsi_ops.find_roi_bbox( np.array(rgb_image)) patch_index = patch_extractor.extract_negative_patches_from_normal_wsi( wsi_image, image_open, level_used, bounding_boxes, patch_save_dir, patch_prefix, patch_index) duration = time.time() - start print('Negative patches count: %d and duration is %f' % (patch_index - utils.PATCH_INDEX_NEGATIVE, duration)) wsi_image.close() end = time.time() print('extract normal patches average time is %f' % ((end - begin) / (int(patch_index) - 70000))) return patch_index
def receive_image_message(self, image_message, sock): """Process received image message. Currently only store it in ./receive/imgs/file_name, may also display it on screen in the future. """ # write image into file filename = utils.get_filename_from_path(image_message.path) save_path = os.path.join(config.IMAGE_SAVE_FOLDER, filename) # write to file self.write_to_file(save_path, image_message, sock) # display return DisplayMessage(image_message.sender_icon, image_message.sender_id, image_message.t, 1, 'Receive image, saved to {}'.format(save_path), img_path=save_path)
def extract_positive_patches_from_tumor_wsi(wsi_ops, patch_extractor, patch_index, augmentation=False): wsi_paths = glob.glob(os.path.join(utils.TRAIN_TUMOR_WSI_PATH, '*.tif')) wsi_paths.sort() mask_paths = glob.glob(os.path.join(utils.TRAIN_TUMOR_MASK_PATH, '*.tif')) mask_paths.sort() image_mask_pair = zip(wsi_paths, mask_paths) image_mask_pair = list(image_mask_pair) # image_mask_pair = image_mask_pair[67:68] patch_save_dir = utils.PATCHES_VALIDATION_AUG_POSITIVE_PATH if augmentation else utils.EXTRACTED_PATCHES_POSITIVE_PATH patch_prefix = utils.PATCH_AUG_TUMOR_PREFIX if augmentation else utils.PATCH_TUMOR_PREFIX begin = time.time() for image_path, mask_path in image_mask_pair: print('extract_positive_patches_from_tumor_wsi(): %s' % utils.get_filename_from_path(image_path)) start = time.time() wsi_image, rgb_image, _, tumor_gt_mask, level_used = wsi_ops.read_wsi_tumor( image_path, mask_path) assert wsi_image is not None, 'Failed to read Whole Slide Image %s.' % image_path bounding_boxes = wsi_ops.find_roi_bbox_tumor_gt_mask( np.array(tumor_gt_mask)) patch_index = patch_extractor.extract_positive_patches_from_tumor_region( wsi_image, np.array(tumor_gt_mask), level_used, bounding_boxes, patch_save_dir, patch_prefix, patch_index) duration = time.time() - start print('Positive patch count: %d and duration is %f' % (patch_index - utils.PATCH_INDEX_POSITIVE, duration)) wsi_image.close() end = time.time() print('extract positive patches average time is %f' % ((end - begin) / (int(patch_index) - 70000))) return patch_index
def _find_attachments_in_email(mesg, expand_attachment, atts): # MHTML detection if mesg.get_content_maintype() == "multipart" and mesg.get_content_subtype() == "related": for part in mesg.walk(): if part.is_multipart(): continue payload = part.get_payload(decode=True) if isinstance(payload, str) and payload.startswith('ActiveMime'): return for part in mesg.walk(): content_type = part.get_content_type() if part.is_multipart(): continue payload = part.get_payload(decode=True) if content_type.startswith('text/') and expand_attachment: normalized = payload.lstrip(" \t\r\n") if any(normalized.startswith(m) for m in EMAIL_MAGIC): new_mesg = email.message_from_string(normalized) _find_attachments_in_email(new_mesg, expand_attachment, atts) continue if content_type in SAFE_MEDIA_TYPE: continue filename = part.get_filename() if filename is None: ext = mimetypes.guess_extension(content_type) or '' filename = '<unknown>' + ext else: # Sanitize the header value filename = _decode_header(filename) filename = utils.get_filename_from_path(filename) tempfile_path = utils.store_temp_file(payload, filename) atts.append((tempfile_path, filename, content_type))
def extract_patches_from_heatmap_false_region_tumor(wsi_ops, patch_extractor, patch_index, augmentation=False): tumor_heatmap_prob_paths = glob.glob( os.path.join(utils.HEAT_MAP_DIR, '*umor*prob.png')) tumor_heatmap_prob_paths.sort() wsi_paths = glob.glob(os.path.join(utils.TUMOR_WSI_PATH, '*.tif')) wsi_paths.sort() mask_paths = glob.glob(os.path.join(utils.TUMOR_MASK_PATH, '*.tif')) mask_paths.sort() assert len(tumor_heatmap_prob_paths) == len( wsi_paths), 'Some heatmaps are missing!' image_mask_heatmap_tuple = zip(wsi_paths, mask_paths, tumor_heatmap_prob_paths) image_mask_heatmap_tuple = list(image_mask_heatmap_tuple) # image_mask_heatmap_tuple = image_mask_heatmap_tuple[32:] # delete Tumor slides with mirror(duplicate regions) and incomplete annotation: Tumor_018, Tumor_046, Tumor_054 delete_index = [17, 45, 53] for i in range(len(delete_index)): print('deleting: %s' % utils.get_filename_from_path( image_mask_heatmap_tuple[delete_index[i] - i][0])) del image_mask_heatmap_tuple[delete_index[i] - i] patch_save_dir_pos = utils.PATCHES_TRAIN_AUG_EXCLUDE_MIRROR_WSI_POSITIVE_PATH if augmentation else utils.PATCHES_TRAIN_POSITIVE_PATH patch_prefix_pos = utils.PATCH_AUG_TUMOR_PREFIX if augmentation else utils.PATCH_TUMOR_PREFIX patch_save_dir_neg = utils.PATCHES_TRAIN_AUG_EXCLUDE_MIRROR_WSI_NEGATIVE_PATH if augmentation else utils.PATCHES_TRAIN_NEGATIVE_PATH patch_prefix_neg = utils.PATCH_AUG_NORMAL_PREFIX if augmentation else utils.PATCH_NORMAL_PREFIX not_0_255_cnt = 0 for image_path, mask_path, heatmap_prob_path in image_mask_heatmap_tuple: print( 'extract_patches_from_heatmap_false_region_normal(): %s, %s, %s' % (utils.get_filename_from_path(image_path), utils.get_filename_from_path(mask_path), utils.get_filename_from_path(heatmap_prob_path))) wsi_image, rgb_image, wsi_mask, tumor_gt_mask, level_used = wsi_ops.read_wsi_tumor( image_path, mask_path) assert wsi_image is not None, 'Failed to read Whole Slide Image %s.' % image_path # tumor_gt_mask = cv2.cvtColor(tumor_gt_mask, cv2.COLOR_BGR2GRAY) # not_0_255_cnt += (tumor_gt_mask[tumor_gt_mask != 255].shape[0]-tumor_gt_mask[tumor_gt_mask == 0].shape[0]) # print(tumor_gt_mask[tumor_gt_mask != 255].shape[0], tumor_gt_mask[tumor_gt_mask == 0].shape[0], not_0_255_cnt) bounding_boxes, image_open = wsi_ops.find_roi_bbox(np.array(rgb_image)) heatmap_prob = cv2.imread(heatmap_prob_path) heatmap_prob = heatmap_prob[:, :, :1] heatmap_prob = np.reshape( heatmap_prob, (heatmap_prob.shape[0], heatmap_prob.shape[1])) heatmap_prob = np.array(heatmap_prob, dtype=np.float32) heatmap_prob /= 255 patch_index = patch_extractor.extract_patches_from_heatmap_false_region_tumor( wsi_image, wsi_mask, tumor_gt_mask, image_open, heatmap_prob, level_used, bounding_boxes, patch_save_dir_pos, patch_save_dir_neg, patch_prefix_pos, patch_prefix_neg, patch_index) print('patch count: %d' % (patch_index - utils.PATCH_INDEX_NEGATIVE)) wsi_image.close() wsi_mask.close() # print('not_0_255_cnt: %d' % not_0_255_cnt) return patch_index
def extract_features_test(heatmap_prob_name_postfix_first_model, heatmap_prob_name_postfix_second_model, f_test): print( '************************** extract_features_test() ***************************' ) print('heatmap_prob_name_postfix_first_model: %s' % heatmap_prob_name_postfix_first_model) print('heatmap_prob_name_postfix_second_model: %s' % heatmap_prob_name_postfix_second_model) print('f_test: %s' % f_test) test_wsi_paths = glob.glob( os.path.join( '/media/jiaojiao/Seagate Backup Plus Drive1/CAMELYON16/TrainingData/Train_Tumor', '*.tif')) #test_wsi_paths = glob.glob(os.path.join(utils.TUMOR_WSI_PATH, '*.tif')) test_wsi_paths.sort() features_file_test = open(f_test, 'w') wr_test = csv.writer(features_file_test, quoting=csv.QUOTE_NONNUMERIC) wr_test.writerow( utils.heatmap_feature_names[:len(utils.heatmap_feature_names) - 1]) for wsi_path in test_wsi_paths: wsi_name = utils.get_filename_from_path(wsi_path) level_used = OpenSlide( os.path.join( '/media/jiaojiao/Seagate Backup Plus Drive1/CAMELYON16/TrainingData/Train_Tumor', wsi_path)).level_count - 1 if level_used > 8: level_used = 8 print('extracting features for: %s' % wsi_name) heatmap_prob_path = glob.glob( os.path.join( utils.HEAT_MAP_DIR, '*%s*%s' % (wsi_name, heatmap_prob_name_postfix_first_model))) # print(heatmap_prob_path) image_open = wsi_ops.get_image_open(wsi_path) heatmap_prob = cv2.imread(heatmap_prob_path[0]) if heatmap_prob_name_postfix_second_model is not None: heatmap_prob_path_second_model = glob.glob( os.path.join( utils.HEAT_MAP_DIR, '*%s*%s' % (wsi_name, heatmap_prob_name_postfix_second_model))) heatmap_prob_second_model = cv2.imread( heatmap_prob_path_second_model[0]) for row in range(heatmap_prob.shape[0]): for col in range(heatmap_prob.shape[1]): if heatmap_prob[ row, col, 0] >= 0.90 * 255 and heatmap_prob_second_model[ row, col, 0] < 0.50 * 255: heatmap_prob[row, col, :] = heatmap_prob_second_model[ row, col, :] features = extract_features(heatmap_prob, image_open) print(features) wr_test.writerow(features) csv_name = wsi_name[0:9] + '.csv' csv_path = os.path.join(utils.results, csv_name) csv_file = open(csv_path, 'w') csv_test = csv.writer(csv_file, quoting=csv.QUOTE_NONNUMERIC) get_result(heatmap_prob, csv_test, level_used)
def extract_features_train_validation(heatmap_prob_name_postfix_first_model, heatmap_prob_name_postfix_second_model, f_train, f_validation): print( '********************** extract_features_train_validation() ********************************' ) print('heatmap_prob_name_postfix_first_model: %s' % heatmap_prob_name_postfix_first_model) print('heatmap_prob_name_postfix_second_model: %s' % heatmap_prob_name_postfix_second_model) print('f_train: %s' % f_train) print('f_validation: %s' % f_validation) tumor_wsi_paths = glob.glob(os.path.join(utils.TUMOR_WSI_PATH, '*.tif')) tumor_wsi_paths.sort() """""" normal_wsi_paths = glob.glob(os.path.join(utils.NORMAL_WSI_PATH, '*.tif')) normal_wsi_paths.sort() tumor_shuffled_index = list(range(len(tumor_wsi_paths))) random.seed(12345) random.shuffle(tumor_shuffled_index) normal_shuffled_index = list( range(len(tumor_wsi_paths), len(tumor_wsi_paths) + len(normal_wsi_paths))) random.seed(12345) random.shuffle(normal_shuffled_index) tumor_shuffled_index = tumor_shuffled_index[:20] normal_shuffled_index = normal_shuffled_index[:30] validation_index = tumor_shuffled_index + normal_shuffled_index print('number of validation samples: %d' % len(validation_index)) wsi_paths = tumor_wsi_paths + normal_wsi_paths print(len(wsi_paths)) features_file_train = open(f_train, 'w') features_file_validation = open(f_validation, 'w') wr_train = csv.writer(features_file_train, quoting=csv.QUOTE_NONNUMERIC) wr_validation = csv.writer(features_file_validation, quoting=csv.QUOTE_NONNUMERIC) wr_train.writerow(utils.heatmap_feature_names) wr_validation.writerow(utils.heatmap_feature_names) index = 0 for wsi_path in wsi_paths: wsi_name = utils.get_filename_from_path(wsi_path) heatmap_prob_path = glob.glob( os.path.join( utils.HEAT_MAP_DIR, '*%s*%s' % (wsi_name, heatmap_prob_name_postfix_first_model))) image_open = wsi_ops.get_image_open(wsi_path) heatmap_prob = cv2.imread(heatmap_prob_path[0]) if heatmap_prob_name_postfix_second_model is not None: heatmap_prob_path_second_model = glob.glob( os.path.join( utils.HEAT_MAP_DIR, '*%s*%s' % (wsi_name, heatmap_prob_name_postfix_second_model))) heatmap_prob_second_model = cv2.imread( heatmap_prob_path_second_model[0]) for row in range(heatmap_prob.shape[0]): for col in range(heatmap_prob.shape[1]): if heatmap_prob[ row, col, 0] >= 0.90 * 255 and heatmap_prob_second_model[ row, col, 0] < 0.20 * 255: heatmap_prob[row, col, :] = heatmap_prob_second_model[ row, col, :] features = extract_features(heatmap_prob, image_open) if 'umor' in wsi_name: features += [1] else: features += [0] print(features) if index in validation_index: wr_validation.writerow(features) else: wr_train.writerow(features) index += 1
def get_format(self, path): filename = get_filename_from_path(path) format_name, _ = filename.rsplit('_', 1) return self.format_dict.get(format_name)
def _eval_once(saver, summary_writer, accuracy, summary_op, confusion_matrix_op,labels,logits): # def _eval_once(saver, summary_writer, accuracy, summary_op, confusion_matrix_op, logits, labels, dense_labels): """Runs Eval once. Args: saver: Saver.Restore the moving average version of the learned variables for eval. summary_writer: Summary writer. top_1_op: Top 1 op. top_5_op: Top 5 op. summary_op: Summary op. """ """ # with tf.Session() as sess: # print(FLAGS.checkpoint_dir)#在train里 # ckpt = None # if CKPT_PATH is not None: # saver.restore(sess, CKPT_PATH) # global_step = CKPT_PATH.split('/')[-1].split('-')[-1] # print('Succesfully loaded model from %s at step=%s.' % # (CKPT_PATH, global_step)) # elif ckpt is None: # ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_dir) # if ckpt and ckpt.model_checkpoint_path: # print(ckpt.model_checkpoint_path) # if os.path.isabs(ckpt.model_checkpoint_path): # # Restores from checkpoint with absolute path. # saver.restore(sess, ckpt.model_checkpoint_path) # else: # # Restores from checkpoint with relative path. # saver.restore(sess, os.path.join(FLAGS.checkpoint_dir, # ckpt.model_checkpoint_path)) # # # Assuming model_checkpoint_path looks something like: # # /my-favorite-path/imagenet_train/model.ckpt-0, # # extract global_step from it. # global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] # print('Succesfully loaded model from %s at step=%s.' % # (ckpt.model_checkpoint_path, global_step)) # else: # print('No checkpoint file found') # return # """ with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir) global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] print("the model path is :%s" % ckpt.model_checkpoint_path) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) print("successfully load model from path") print("global step:") print(global_step) else: print('No checkpoint file found') return # Start the queue runners. coord = tf.train.Coordinator() try: threads = [] for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS): threads.extend(qr.create_threads(sess, coord=coord, daemon=True, start=True)) num_iter = int(math.ceil(FLAGS.num_examples / BATCH_SIZE)) # Counts the number of correct predictions. total_correct_count = 0 total_false_positive_count = 0 total_false_negative_count = 0 total_true_positive_count = 0 total_true_negative_count = 0 total_sample_count = num_iter * BATCH_SIZE step = 0 total_predict_label = [] print('%s: starting evaluation on (%s).' % (datetime.now(), FLAGS.subset)) start_time = time.time() while step < num_iter and not coord.should_stop(): label,logit,correct_count, confusion_matrix = sess.run([labels,logits,accuracy, confusion_matrix_op]) # print("step:%d" % step) # print("label:") # print(label) # print('logit:') # print(logit) # print('accuracy:') # print(correct_count) # correct_count, confusion_matrix, logits_v, labels_v, dense_labels_v = sess.run([accuracy, confusion_matrix_op, logits, labels, dense_labels]) # 合并所有的prdict label predict_lebel = list(correct_count) total_predict_label += predict_lebel total_correct_count += np.sum(correct_count) # total_false_positive_count += confusion_matrix[1][0] # total_false_negative_count += confusion_matrix[0][1] # total_true_positive_count +=confusion_matrix[1][1] # total_true_negative_count +=confusion_matrix[0][0] print('confusion_matrix:') print(confusion_matrix) # print('total_false_positive_count: %d' % total_false_positive_count) # print('total_false_negative_count: %d' % total_false_negative_count) # print('total_true_positive_count: %d' % total_true_positive_count) # print('total_true_negative_count: %d' % total_true_negative_count) print('correct_count(step=%d): %d / %d' % (step, total_correct_count, BATCH_SIZE * (step + 1))) step += 1 if step % 20 == 0: duration = time.time() - start_time sec_per_batch = duration / 20.0 examples_per_sec = BATCH_SIZE / sec_per_batch print('%s: [%d batches out of %d] (%.1f examples/sec; %.3f' 'sec/batch)' % (datetime.now(), step, num_iter, examples_per_sec, sec_per_batch)) start_time = time.time() # print('total_false_positive_count: %d' % total_false_positive_count) # print('total_false_negative_count: %d' % total_false_negative_count) # Compute precision @ 1. # print('total pridict label:') # print(total_predict_label) wsi_path = glob.glob(os.path.join(utils.TRAIN_TUMOR_WSI_PATH, '*.tif')) wsi_path.sort() tumor_name = [] for wsi_name in wsi_path: tumor_path = utils.get_filename_from_path(wsi_name).split('.')[0] tumor_name.append(tumor_path) # print(tumor_name) start_number = 0 for tests in tumor_name: patch_name = '*' + tests + '.png' patches_paths = glob.glob(os.path.join('E:/2016/Validation-Set/Extracted_Positive_Patches', patch_name)) patches_paths.sort() patches_number = len(patches_paths) print(patch_name) end_number = start_number+patches_number each_wsi_correct_number = np.sum(total_predict_label[start_number:end_number]) print('%d/%d'%(each_wsi_correct_number,patches_number)) print('the correct value is :%.4f' % (each_wsi_correct_number/patches_number)) start_number+=patches_number precision = total_correct_count / total_sample_count print('%s: precision = %.4f [%d examples]' % (datetime.now(), precision, total_sample_count)) summary = tf.Summary() summary.ParseFromString(sess.run(summary_op)) summary.value.add(tag='Precision', simple_value=precision) summary_writer.add_summary(summary, global_step) except Exception as e: # pylint: disable=broad-except coord.request_stop(e) coord.request_stop() coord.join(threads, stop_grace_period_secs=10)
def write_to_file(self): with open(State.filename, 'w', encoding='utf-8') as f: f.write(self.parent.textarea.get('1.0', 'end-1c')) f.close() State.is_modified = False self.update_title(get_filename_from_path(State.filename))
for i, fn in enumerate(infilenames): df = read_csv(fn, sep=',') possible_cols = list(df.columns.values) if opts['--skipcols'] is not None: skipcols = int(opts['--skipcols']) dropcols = [x for x in range(skipcols)] df = df.drop(df.columns[dropcols], axis=1) if nolag: dropcols = [c for c in possible_cols if 'lag' in c] df = df.drop(dropcols, axis=1) df = df.dropna() df = df.apply(zscore) f = plt.figure(i) step = int(df.shape[0] / XTICKS_HEATMAP) xticklabels = np.arange(0, df.shape[0], step) ax = sns.heatmap(np.transpose(df), cmap='YlGnBu', xticklabels=xticklabels) ax.xaxis.set_major_locator(MaxNLocator(integer=True)) f.suptitle(get_filename_from_path(fn)) plt.yticks(rotation='horizontal') plt.show()
def __init__(self, formats): super(FilenamePrefixFormatSelector, self).__init__(formats) self.format_dict = { get_filename_from_path(format.path): format for format in formats }
def set_compliance_config_cli(argv): _METHOD_ = 'set_compliance_config.set_compliance_config_cli' SHORT_OPTIONS = "c:i:u:f:d:p:h" LONG_OPTIONS = [ 'connectiontype=', 'ip=', 'userid=', 'fixCentral_ibm_id= ', 'download_location= ', 'progress_refresh_interval', 'help' ] try: opts, args = getopt.getopt(argv, SHORT_OPTIONS, LONG_OPTIONS) except getopt.GetoptError as err: message = str(err) return -1, message # remote machine is proxy machine which has access to internet and thus access to FLRT connection_type = '' remote_machine_ip = None remote_machine_userid = None remote_machine_encrypted_password = None if args: return -1, set_compliance_config_help() for o, a in opts: if o in ('-h', '--help'): return -1, set_compliance_config_help() elif o in ('-c', '--connectiontype'): connection_type = a elif o in ('-i', '--ip'): remote_machine_ip = a elif o in ('-u', '--userid'): remote_machine_userid = a if connection_type: if connection_type != PPIM_ONLINE and connection_type != PPIM_OFFLINE and connection_type != PPIM_PROXY: msg = _("Wrong value for -c option given.\n\n") msg += set_compliance_config_help() return -2, msg rc = 0 msg = _("Success") if connection_type == PPIM_PROXY: if remote_machine_ip == None or remote_machine_userid == None: msg = _( "Missing argument(s): remote machine's ip or userid. Use -i and -u options.\n\n" ) msg += set_compliance_config_help() return -3, msg # Ask for password and read it remote_machine_password = getpass.getpass( _("Remote machine's password:"******"Please input a valid password and retry the command.") return -4, error_message remote_machine_encrypted_password = persistent_mgr.encryptData( remote_machine_password) # Set details in the config file rc, msg = update_mgr.set_compliance_config( connection_type, remote_machine_ip, remote_machine_userid, remote_machine_encrypted_password) if rc == 0 and connection_type == PPIM_PROXY: # Create package if not already there rc, local_path = update_mgr.get_offline_flrt_tarfile() if rc == 0: # Copy the package to the remote/online machine pkg_name = utils.get_filename_from_path(local_path) remote_path = "/tmp/" + pkg_name rc = utils.copyFileToRemoteMachine(remote_machine_ip, remote_machine_userid, remote_machine_password, local_path, remote_path) if rc == 0: # Run script on the remote machine and the script generates the stack definitions file rc = update_mgr.generate_stack_definition_remotely( remote_machine_ip, remote_machine_userid, remote_machine_password, remote_path) if rc == 0: # Copy the generated stack definitions file from the remote machine to puremgrVM rc = utils.copyFileFromRemoteMachine( remote_machine_ip, remote_machine_userid, remote_machine_password, update_mgr.STACK_DEFINITIONS_PROXY_TMP_FILE, update_mgr.STACK_DEFINITIONS_TMP_FILE) utils.change_file_ownership_and_permission( update_mgr.STACK_DEFINITIONS_PROXY_TMP_FILE) if rc == 0: rc, msg = update_mgr.backup_and_overwrite_stack_definition_file( update_mgr.STACK_DEFINITIONS_PROXY_TMP_FILE) if rc == 0: msg = _( 'Stack definition generated successfully in the remote machine' ) else: msg = _( 'Could not copy remote file to local machine') else: msg = _( 'Stack definition file could not be generated in the remote machine' ) else: msg = _('Could not copy file to remote machine') else: msg = _('Tar file is not generated successfully') elif rc == 0 and connection_type == PPIM_OFFLINE: # Create package if not already there rc1, local_path = update_mgr.get_offline_flrt_tarfile() if rc1 != 0: logging.warning("%s:: Tar file is not generated successfully", _METHOD_) return rc, msg
def set_compliance_config_cli(argv): _METHOD_ = 'set_compliance_config.set_compliance_config_cli' SHORT_OPTIONS = "c:i:u:f:d:p:h" LONG_OPTIONS = ['connectiontype=', 'ip=', 'userid=', 'fixCentral_ibm_id= ' , 'download_location= ' , 'progress_refresh_interval', 'help'] try: opts, args = getopt.getopt(argv, SHORT_OPTIONS, LONG_OPTIONS) except getopt.GetoptError as err: message = str(err) return -1, message # remote machine is proxy machine which has access to internet and thus access to FLRT connection_type = '' remote_machine_ip = None remote_machine_userid = None remote_machine_encrypted_password = None if args: return -1, set_compliance_config_help() for o, a in opts: if o in ('-h', '--help'): return -1, set_compliance_config_help() elif o in ('-c', '--connectiontype'): connection_type = a elif o in ('-i', '--ip'): remote_machine_ip = a elif o in ('-u', '--userid'): remote_machine_userid = a if connection_type: if connection_type != PPIM_ONLINE and connection_type != PPIM_OFFLINE and connection_type != PPIM_PROXY: msg = _("Wrong value for -c option given.\n\n") msg += set_compliance_config_help() return -2, msg rc = 0 msg = _("Success") if connection_type == PPIM_PROXY: if remote_machine_ip == None or remote_machine_userid == None: msg = _("Missing argument(s): remote machine's ip or userid. Use -i and -u options.\n\n") msg += set_compliance_config_help() return -3, msg # Ask for password and read it remote_machine_password = getpass.getpass(_("Remote machine's password:"******"Please input a valid password and retry the command.") return -4, error_message remote_machine_encrypted_password = persistent_mgr.encryptData(remote_machine_password) # Set details in the config file rc, msg = update_mgr.set_compliance_config(connection_type, remote_machine_ip, remote_machine_userid, remote_machine_encrypted_password) if rc == 0 and connection_type == PPIM_PROXY: # Create package if not already there rc, local_path = update_mgr.get_offline_flrt_tarfile() if rc == 0: # Copy the package to the remote/online machine pkg_name = utils.get_filename_from_path(local_path) remote_path = "/tmp/" + pkg_name rc = utils.copyFileToRemoteMachine(remote_machine_ip, remote_machine_userid, remote_machine_password, local_path, remote_path) if rc == 0: # Run script on the remote machine and the script generates the stack definitions file rc = update_mgr.generate_stack_definition_remotely(remote_machine_ip, remote_machine_userid, remote_machine_password, remote_path) if rc == 0: # Copy the generated stack definitions file from the remote machine to puremgrVM rc= utils.copyFileFromRemoteMachine(remote_machine_ip, remote_machine_userid, remote_machine_password, update_mgr.STACK_DEFINITIONS_PROXY_TMP_FILE, update_mgr.STACK_DEFINITIONS_TMP_FILE) utils.change_file_ownership_and_permission(update_mgr.STACK_DEFINITIONS_PROXY_TMP_FILE) if rc == 0: rc, msg = update_mgr.backup_and_overwrite_stack_definition_file(update_mgr.STACK_DEFINITIONS_PROXY_TMP_FILE) if rc == 0: msg = _('Stack definition generated successfully in the remote machine') else: msg = _('Could not copy remote file to local machine') else: msg = _('Stack definition file could not be generated in the remote machine') else: msg = _('Could not copy file to remote machine') else: msg = _('Tar file is not generated successfully') elif rc == 0 and connection_type == PPIM_OFFLINE: # Create package if not already there rc1, local_path = update_mgr.get_offline_flrt_tarfile() if rc1 != 0: logging.warning("%s:: Tar file is not generated successfully", _METHOD_) return rc, msg
def save_as_file(self, e=None): ans = filedialog.asksaveasfilename(parent=self.parent, defaultextension='.txt', filetypes=CONFIG['DEFAULT_FILETYPES'], initialfile=get_filename_from_path(State.filename)) if ans: State.filename = ans self.write_to_file()