def load_npy(self, data_path, session, ignore_missing=False): '''Load network weights. data_path: The path to the numpy-serialized network weights session: The current TensorFlow session ignore_missing: If true, serialized weights for missing layers are ignored. ''' if data_path.endswith('.npz'): get_model_loader(data_path).init(session) return if data_path.endswith('.npy'): data_dict = np.load(data_path, encoding='latin1').item() for op_name in data_dict: with tf.variable_scope(op_name, reuse=True): for param_name, data in data_dict[op_name].items(): try: if 'bn' in op_name: param_name = BN_param_map[param_name] var = tf.get_variable(param_name) print('loading {}/{}'.format(op_name, param_name)) session.run(var.assign(data)) except ValueError: if not ignore_missing: raise
def run(self): model_path = self.inf_model_path MODEL_MAKER = Model_NP_XY if self.model_mode == 'np+xy' else Model_NP_DIST pred_config = PredictConfig( model=MODEL_MAKER(), session_init=get_model_loader(model_path), input_names=self.eval_inf_input_tensor_names, output_names=self.eval_inf_output_tensor_names) predictor = OfflinePredictor(pred_config) for norm_target in self.inf_norm_codes: norm_dir = '%s/%s/' % (self.inf_norm_root_dir, norm_target) norm_save_dir = '%s/%s/' % (self.inf_output_dir, norm_target) # TODO: cache list to check later norm dir has same number of files file_list = glob.glob('%s/*%s' % (norm_dir, self.inf_imgs_ext)) file_list.sort() # ensure same order rm_n_mkdir(norm_save_dir) for filename in file_list: filename = os.path.basename(filename) basename = filename.split('.')[0] print(basename, norm_target, end=' ', flush=True) ## img = cv2.imread(norm_dir + filename) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) ## pred_map = self.__gen_prediction(img, predictor) sio.savemat('%s/%s.mat' % (norm_save_dir, basename), {'result': [pred_map]}) print('FINISH')
def get_predictor(cls): ''' load trained model''' with cls.lock: # check if model is already loaded if cls.predictor: return cls.predictor os.environ['TENSORPACK_FP16'] = 'true' # create a mask r-cnn model mask_rcnn_model = ResNetFPNModel(True) try: model_dir = os.environ['SM_MODEL_DIR'] except KeyError: model_dir = '/opt/ml/model' try: cls.pretrained_model = os.environ['PRETRAINED_MODEL'] except KeyError: pass # file path to previoulsy trained mask r-cnn model latest_trained_model = "" model_search_path = os.path.join(model_dir, "model-*.index" ) for model_file in glob.glob(model_search_path): if model_file > latest_trained_model: latest_trained_model = model_file trained_model = latest_trained_model print(f'Using model: {trained_model}') # fixed resnet50 backbone weights cfg.BACKBONE.WEIGHTS = os.path.join(cls.pretrained_model) cfg.MODE_FPN = True cfg.MODE_MASK = True # calling detection dataset gets the number of coco categories # and saves in the configuration DetectionDataset() finalize_configs(is_training=False) # Create an inference model # PredictConfig takes a model, input tensors and output tensors cls.predictor = OfflinePredictor(PredictConfig( model=mask_rcnn_model, session_init=get_model_loader(trained_model), input_names=['images', 'orig_image_dims'], output_names=[ 'generate_{}_proposals_topk_per_image/boxes'.format('fpn' if cfg.MODE_FPN else 'rpn'), 'generate_{}_proposals_topk_per_image/scores'.format('fpn' if cfg.MODE_FPN else 'rpn'), 'fastrcnn_all_scores', 'output/boxes', 'output/scores', 'output/labels', 'output/masks' ])) return cls.predictor
def load_model(self): print('Loading Model...') model_path = self.model_path model_constructor = self.get_model() pred_config = PredictConfig(model=model_constructor( self.nr_types, self.input_shape, self.mask_shape, self.input_norm), session_init=get_model_loader(model_path), input_names=self.input_tensor_names, output_names=self.output_tensor_names) self.predictor = OfflinePredictor(pred_config)
def get_model(game): """ J.Madge 23.04.2018, 'get_model'. :param game: Instance of the 'Game' class containing information about the actions in the game and the location of the associated model. :return: A model representing the Atari agents pre-trained to play the specified game. """ return OfflinePredictor( PredictConfig(model=Model(game.actions), session_init=get_model_loader(game.model), input_names=['state'], output_names=['policy']))
def load_model(self): """ Loads the model and checkpoints according to the model stated in config.py """ print('Loading Model...') model_path = self.model_path model_constructor = self.get_model() pred_config = PredictConfig(model=model_constructor( self.nr_types, self.input_shape, self.mask_shape, self.input_norm), session_init=get_model_loader(model_path), input_names=self.input_tensor_names, output_names=self.output_tensor_names) self.predictor = OfflinePredictor(pred_config)
def run(self): if self.inf_auto_find_chkpt: print('-----Auto Selecting Checkpoint Basing On "%s" Through "%s" Comparison' % \ (self.inf_auto_metric, self.inf_auto_comparator)) model_path, stat = get_best_chkpts(self.save_dir, self.inf_auto_metric, self.inf_auto_comparator) print('Selecting: %s' % model_path) print('Having Following Statistics:') for key, value in stat.items(): print('\t%s: %s' % (key, value)) else: model_path = self.inf_model_path model_constructor = self.get_model() pred_config = PredictConfig( model=model_constructor(), session_init=get_model_loader(model_path), input_names=self.eval_inf_input_tensor_names, output_names=self.eval_inf_output_tensor_names) predictor = OfflinePredictor(pred_config) for data_dir_set in self.inf_data_list: data_root_dir = data_dir_set[0] data_out_code = data_dir_set[1] for subdir in data_dir_set[2:]: data_dir = '%s/%s/' % (data_root_dir, subdir) save_dir = '%s/%s/%s' % (self.inf_output_dir, data_out_code, subdir) file_list = glob.glob('%s/*%s' % (data_dir, self.inf_imgs_ext)) file_list.sort() # ensure same order rm_n_mkdir(save_dir) for filename in file_list: filename = os.path.basename(filename) basename = filename.split('.')[0] print(data_dir, basename, end=' ', flush=True) ## img = cv2.imread(data_dir + filename) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) ## pred_map = self.__gen_prediction(img, predictor) sio.savemat('%s/%s.mat' % (save_dir, basename), {'result': [pred_map]}) print('FINISH')
def load_model(self): """Loads the model and checkpoints""" print("Loading Model...") model_path = self.model_path model_constructor = self.get_model() pred_config = PredictConfig( model=model_constructor( self.nr_types, self.patch_input_shape, self.patch_output_shape, self.input_norm ), session_init=get_model_loader(model_path), input_names=self.input_tensor_names, output_names=self.output_tensor_names, ) self.predictor = OfflinePredictor(pred_config)
def run(self, save_only): if self.inf_auto_find_chkpt: self.inf_model_path = os.path.join(self.save_dir, str(max([int(x) for x in [name for name in os.listdir(self.save_dir) if os.path.isdir(os.path.join(self.save_dir, name))]]))) print(f"Inference model path: <{self.inf_model_path}>") print('-----Auto Selecting Checkpoint Basing On "%s" Through "%s" Comparison' % \ (self.inf_auto_metric, self.inf_auto_comparator)) model_path, stat = get_best_chkpts(self.inf_model_path, self.inf_auto_metric, self.inf_auto_comparator) print('Selecting: %s' % model_path) print('Having Following Statistics:') for key, value in stat.items(): print('\t%s: %s' % (key, value)) else: model_path = self.inf_model_path model_constructor = self.get_model() pred_config = PredictConfig( model = model_constructor(), session_init = get_model_loader(model_path), input_names = self.eval_inf_input_tensor_names, output_names = self.eval_inf_output_tensor_names) predictor = OfflinePredictor(pred_config) if save_only: exporter = ModelExporter(pred_config) rm_n_mkdir(self.model_export_dir) print ('{}/compact.pb'.format(self.model_export_dir)) exporter.export_compact(filename='{}/compact.pb'.format(self.model_export_dir)) exporter.export_serving(os.path.join(self.model_export_dir, 'serving'), signature_name='serving_default') return for num, data_dir in enumerate(self.inf_data_list): save_dir = os.path.join(self.inf_output_dir, str(num)) file_list = glob.glob(os.path.join(data_dir, '*{}'.format(self.inf_imgs_ext))) file_list.sort() # ensure same order rm_n_mkdir(save_dir) for filename in file_list: filename = os.path.basename(filename) basename = filename.split('.')[0] print(data_dir, basename, end=' ', flush=True) ## img = cv2.imread(os.path.join(data_dir, filename)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) ## pred_map = self.__gen_prediction(img, predictor) sio.savemat(os.path.join(save_dir,'{}.mat'.format(basename)), {'result':[pred_map]}) print(f"Finished. {datetime.now().strftime('%H:%M:%S.%f')}")
def get_predictor(cls): ''' load trained model''' with cls.lock: # check if model is already loaded if cls.predictor: return cls.predictor # create a mask r-cnn model mask_rcnn_model = ResNetFPNModel() try: model_dir = os.environ['SM_MODEL_DIR'] except KeyError: model_dir = '/opt/ml/model' try: cls.pretrained_model = os.environ['PRETRAINED_MODEL'] except KeyError: pass # file path to previoulsy trained mask r-cnn model latest_trained_model = "" model_search_path = os.path.join(model_dir, "model-*.index") for model_file in glob.glob(model_search_path): if model_file > latest_trained_model: latest_trained_model = model_file trained_model = latest_trained_model[:-6] print(f'Using model: {trained_model}') # fixed resnet50 backbone weights cfg.BACKBONE.WEIGHTS = os.path.join(cls.pretrained_model) cfg.MODE_FPN = True cfg.MODE_MASK = True cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS finalize_configs(is_training=False) # Create an inference model # PredictConfig takes a model, input tensors and output tensors input_tensors = mask_rcnn_model.get_inference_tensor_names()[0] output_tensors = mask_rcnn_model.get_inference_tensor_names()[1] cls.predictor = OfflinePredictor( PredictConfig(model=mask_rcnn_model, session_init=get_model_loader(trained_model), input_names=input_tensors, output_names=output_tensors)) return cls.predictor
def run(self): if self.inf_auto_find_chkpt: print( '-----Auto Selecting Checkpoint Basing On "%s" Through "%s" Comparison' % (self.inf_auto_metric, self.inf_auto_comparator)) model_path, stat = get_best_chkpts(self.save_dir, self.inf_auto_metric, self.inf_auto_comparator) print("Selecting: %s" % model_path) print("Having Following Statistics:") for key, value in stat.items(): print("\t%s: %s" % (key, value)) else: model_path = self.inf_model_path model_constructor = self.get_model() pred_config = PredictConfig( model=model_constructor(), session_init=get_model_loader(model_path), input_names=self.eval_inf_input_tensor_names, output_names=self.eval_inf_output_tensor_names, create_graph=False, ) predictor = OfflinePredictor(pred_config) for data_dir in self.inf_data_list: save_dir = self.inf_output_dir + "/raw/" file_list = glob.glob("%s/*%s" % (data_dir, self.inf_imgs_ext)) file_list.sort() # ensure same order rm_n_mkdir(save_dir) for filename in file_list: start = time.time() filename = os.path.basename(filename) basename = filename.split(".")[0] print(data_dir, basename, end=" ", flush=True) ## img = cv2.imread(data_dir + filename) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) pred_map = self.__gen_prediction(img, predictor) np.save("%s/%s.npy" % (save_dir, basename), [pred_map]) end = time.time() diff = str(round(end - start, 2)) print("FINISH. TIME: %s" % diff)
def get_predictor(cls): """load trained model""" with cls.lock: # check if model is already loaded if cls.predictor: return cls.predictor # create a mask r-cnn model mask_rcnn_model = ResNetFPNModel() try: model_dir = os.environ["SM_MODEL_DIR"] except KeyError: model_dir = "/opt/ml/model" try: resnet_arch = os.environ["RESNET_ARCH"] except KeyError: resnet_arch = "resnet50" # file path to previoulsy trained mask r-cnn model latest_trained_model = "" model_search_path = os.path.join(model_dir, "model-*.index") for model_file in glob.glob(model_search_path): if model_file > latest_trained_model: latest_trained_model = model_file trained_model = latest_trained_model[:-6] print(f"Using model: {trained_model}") cfg.MODE_FPN = True cfg.MODE_MASK = True if resnet_arch == "resnet101": cfg.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 23, 3] else: cfg.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3] cfg_prefix = "CONFIG__" for key, value in dict(os.environ).items(): if key.startswith(cfg_prefix): attr_name = key[len(cfg_prefix) :] attr_name = attr_name.replace("__", ".") value = eval(value) print(f"update config: {attr_name}={value}") nested_var = cfg attr_list = attr_name.split(".") for attr in attr_list[0:-1]: nested_var = getattr(nested_var, attr) setattr(nested_var, attr_list[-1], value) cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS cfg.DATA.BASEDIR = "/data" cfg.DATA.TRAIN = "coco_train2017" cfg.DATA.VAL = "coco_val2017" register_coco(cfg.DATA.BASEDIR) finalize_configs(is_training=False) # Create an inference model # PredictConfig takes a model, input tensors and output tensors input_tensors = mask_rcnn_model.get_inference_tensor_names()[0] output_tensors = mask_rcnn_model.get_inference_tensor_names()[1] cls.predictor = OfflinePredictor( PredictConfig( model=mask_rcnn_model, session_init=get_model_loader(trained_model), input_names=input_tensors, output_names=output_tensors, ) ) return cls.predictor
dorefa = args.dorefa.split(',') if dorefa[0] == 't': assert dorefa[1] == '32' and dorefa[2] == '32' BITW, BITA, BITG = 't', 32, 32 else: BITW, BITA, BITG = map(int, dorefa) if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu if args.run: assert args.load.endswith('.npz') run_image(Model(), DictRestore(dict(np.load(args.load))), args.run) sys.exit() if args.eval: BATCH_SIZE = 128 ds = get_data('val') eval_on_ILSVRC12(Model(), get_model_loader(args.load), ds) sys.exit() nr_tower = max(get_num_gpu(), 1) BATCH_SIZE = TOTAL_BATCH_SIZE // nr_tower logger.set_logger_dir( os.path.join('train_log', 'alexnet-dorefa-{}'.format(args.dorefa))) logger.info("Batch per tower: {}".format(BATCH_SIZE)) config = get_config() if args.load: config.session_init = SaverRestore(args.load) launch_train_with_config(config, SyncMultiGPUTrainerReplicated(nr_tower))
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu logger.auto_set_dir() dataset_train = get_data(args.data, args.task, 'train') dataset_test = get_data(args.data, args.task, 'test') # TensorPack: Training configuration config = TrainConfig( model=YourModel() if args.task == '1' else VGGModel(), dataflow=dataset_train, callbacks=[ # Callbacks are performed at the end of every epoch. # # For instance, we can save the current model ModelSaver(), # Evaluate the current model and print out the loss InferenceRunner(dataset_test, [ScalarStats('cost'), ClassificationError()]) # # You can put other callbacks here to change hyperparameters, # etc... # ], max_epoch=hp.num_epochs, nr_tower=max(get_nr_gpu(), 1), session_init=None if args.task == '1' else get_model_loader(args.load)) # TensorPack: Training with simple one at a time feed into batches SimpleTrainer(config).train()
predict_tower = list(range(nr_gpu))[-nr_gpu // 2:] else: predict_tower = [0] PREDICTOR_THREAD = len(predict_tower) * PREDICTOR_THREAD_PER_GPU train_tower = list(range(nr_gpu))[:-nr_gpu // 2] or [0] logger.info("[Batch-A3C] Train on gpu {} and infer on gpu {}".format( ','.join(map(str, train_tower)), ','.join(map(str, predict_tower)))) from drlutils.train.multigpu import MultiGPUTrainer trainer = MultiGPUTrainer config = get_config() if os.path.exists(logger.LOG_DIR + '/checkpoint'): from tensorpack.tfutils.sessinit import SaverRestore config.session_init = SaverRestore(logger.LOG_DIR + '/checkpoint') elif args['--load']: config.session_init = get_model_loader(args['--load']) config.tower = train_tower config.predict_tower = predict_tower trainer(config).train() import sys sys.exit(0) elif args['dataserver']: import os os.system('killall -9 torcs-bin > /dev/null 2>&1') from drlutils.dataflow.server import DataFlowServer from ad_cur.autodrive.agent.pool import AgentPool, AgentPoolFake clsPool = AgentPoolFake if args['--fake_agent'] else AgentPool try: ds = DataFlowServer( AgentPoolFake if args['--fake_agent'] else AgentPool,
if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model', help='model file') parser.add_argument('--meta', help='metagraph proto file. Will be used to load the graph', required=True) parser.add_argument('-i', '--input', nargs='+', help='list of input tensors with their shapes.') parser.add_argument('-o', '--output', nargs='+', help='list of output tensors') parser.add_argument('--warmup', help='warmup iterations', type=int, default=5) parser.add_argument('--print-flops', action='store_true') parser.add_argument('--print-params', action='store_true') parser.add_argument('--print-timing', action='store_true') args = parser.parse_args() tf.train.import_meta_graph(args.meta) G = tf.get_default_graph() with tf.Session(config=get_default_sess_config()) as sess: init = get_model_loader(args.model) init.init(sess) feed = {} for inp in args.input: inp = inp.split('=') name = get_op_tensor_name(inp[0].strip())[1] shape = list(map(int, inp[1].strip().split(','))) tensor = G.get_tensor_by_name(name) logger.info("Feeding shape ({}) to tensor {}".format(','.join(map(str, shape)), name)) feed[tensor] = np.random.rand(*shape) fetches = [] for name in args.output: name = get_op_tensor_name(name)[1] fetches.append(G.get_tensor_by_name(name))
def run(self, data_dir, output_dir, model_path, img_ext='.png'): if (not data_dir): print('Using Config file path for data_dir.') data_dir = self.inf_data_dir if (not output_dir): print('Using Config file path for output_dir.') output_dir = self.inf_output_dir if (not model_path): print('Using placeholder path for model_dir.') model_path = '/home/dm1/shikhar/hover_net_modified/v2_multitask/np_hv/07/model-35854.index' if (not img_ext): print('Using Config img ext value img_ext.') img_ext = self.inf_imgs_ext model_constructor = self.get_model() pred_config = PredictConfig( model=model_constructor(), session_init=get_model_loader(model_path), input_names=self.eval_inf_input_tensor_names, output_names=self.eval_inf_output_tensor_names) predictor = OfflinePredictor(pred_config) #file_list = glob.glob('%s/*%s' % (data_dir, img_ext)) #file_list.sort() # ensure same order #if(not file_list): # print('No Images found in data_dir! Check script arg-paths') # Create Output Directory #rm_n_mkdir(output_dir) # Expecting MoNuSAC's input data directory tree (Patient Name -> Image Name -> ) if not os.path.isdir(output_dir): os.makedirs(output_dir) os.chdir(output_dir) patients = [x[0] for x in os.walk(data_dir) ] #Total patients in the data_path print(len(patients)) for patient_loc in patients: patient_name = patient_loc[len(data_dir) + 1:] #Patient name print(patient_name, flush=True) ## To make patient's name directory in the destination folder try: os.mkdir(patient_name) except OSError: print("\n Creation of the patient's directory %s failed" % patient_name, flush=True) sub_images = glob(str(patient_loc) + '/*' + str(img_ext)) for sub_image_loc in sub_images: sub_image_name = sub_image_loc[len(data_dir) + len(patient_name) + 1:-4] print(sub_image_name) ## To make sub_image directory under the patient's folder sub_image = './' + patient_name + sub_image_name #Destination path try: os.mkdir(sub_image) except OSError: print("\n Creation of the patient's directory %s failed" % sub_image) image_name = sub_image_loc if (img_ext == '.svs'): img = openslide.OpenSlide(image_name) cv2.imwrite( sub_image_loc[:-4] + '.png', np.array( img.read_region((0, 0), 0, img.level_dimensions[0]))) img = cv2.imread(sub_image_loc[:-4] + '.png') else: img = cv2.imread(image_name) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) ## Generate Prediction Map pred_map = self.__gen_prediction(img, predictor) pred = pred_map # Process Prediction Map pred_inst = pred[..., self.nr_types:] pred_type = pred[..., :self.nr_types] pred_inst = np.squeeze(pred_inst) pred_type = np.argmax(pred_type, axis=-1) pred_inst = postproc.hover.proc_np_hv(pred_inst, marker_mode=marker_mode, energy_mode=energy_mode, rgb=img) pred_inst = remap_label(pred_inst, by_size=True) # Map Instances to Labels for creating submission format pred_id_list = list( np.unique(pred_inst))[1:] # exclude background ID pred_inst_type = np.full(len(pred_id_list), 0, dtype=np.int32) for idx, inst_id in enumerate(pred_id_list): inst_type = pred_type[pred_inst == inst_id] type_list, type_pixels = np.unique(inst_type, return_counts=True) type_list = list(zip(type_list, type_pixels)) type_list = sorted(type_list, key=lambda x: x[1], reverse=True) inst_type = type_list[0][0] if inst_type == 0: # ! pick the 2nd most dominant if exist if len(type_list) > 1: inst_type = type_list[1][0] else: print('[Warn] Instance has `background` type') pred_inst_type[idx] = inst_type # Write Instance Maps based on their Classes/Labels to the folders for class_id in range(1, self.nr_types): separated_inst = pred_inst.copy() separated_inst[pred_inst_type[separated_inst - 1] != [class_id]] = 0 # Create directory for each label label = class_id_mapping[class_id] sub_path = sub_image + '/' + label try: os.mkdir(sub_path) except OSError: print("Creation of the directory %s failed" % label) else: print("Successfully created the directory %s " % label) sio.savemat(sub_path + '/maskorempty.mat', {'n_ary_mask': separated_inst})
else: predict_tower = [0] PREDICTOR_THREAD = len(predict_tower) * PREDICTOR_THREAD_PER_GPU train_tower = list(range(nr_gpu))[:-nr_gpu // 2] or [0] logger.info("[Batch-A3C] Train on gpu {} and infer on gpu {}".format( ','.join(map(str, train_tower)), ','.join(map(str, predict_tower)))) # if len(train_tower) > 1: # trainer = AsyncMultiGPUTrainer from autodrive.trainer.base import MyMultiGPUTrainer trainer = MyMultiGPUTrainer config = get_config() if os.path.exists(logger.LOG_DIR + '/checkpoint'): from tensorpack.tfutils.sessinit import SaverRestore config.session_init = SaverRestore(logger.LOG_DIR + '/checkpoint') elif args['--load']: config.session_init = get_model_loader(args['--load']) config.tower = train_tower config.predict_tower = predict_tower trainer(config).train() elif args['infer']: assert args['--load'] is not None from tensorpack.predict.config import PredictConfig from tensorpack.tfutils.sessinit import get_model_loader cfg = PredictConfig( model=Model(), session_init=get_model_loader(args['--load']), input_names=['state'], output_names=['policy']) if args['--target'] == 'play':
dorefa = args.dorefa.split(',') if dorefa[0] == 't': assert dorefa[1] == '32' and dorefa[2] == '32' BITW, BITA, BITG = 't', 32, 32 else: BITW, BITA, BITG = map(int, dorefa) if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu if args.run: assert args.load.endswith('.npz') run_image(Model(), DictRestore(dict(np.load(args.load))), args.run) sys.exit() if args.eval: BATCH_SIZE = 128 ds = get_data('val') eval_classification(Model(), get_model_loader(args.load), ds) sys.exit() nr_tower = max(get_num_gpu(), 1) BATCH_SIZE = TOTAL_BATCH_SIZE // nr_tower logger.set_logger_dir( os.path.join('train_log', 'alexnet-dorefa-{}'.format(args.dorefa))) logger.info("Batch per tower: {}".format(BATCH_SIZE)) config = get_config() if args.load: config.session_init = SaverRestore(args.load) launch_train_with_config(config, SyncMultiGPUTrainerReplicated(nr_tower))
def get_predictor(cls): """load trained model""" with cls.lock: # check if model is already loaded if cls.predictor: return cls.predictor os.environ["TENSORPACK_FP16"] = "true" # create a mask r-cnn model mask_rcnn_model = ResNetFPNModel(True) try: model_dir = os.environ["SM_MODEL_DIR"] except KeyError: model_dir = "/opt/ml/model" try: resnet_arch = os.environ["RESNET_ARCH"] except KeyError: resnet_arch = "resnet50" # file path to previoulsy trained mask r-cnn model latest_trained_model = "" model_search_path = os.path.join(model_dir, "model-*.index") for model_file in glob.glob(model_search_path): if model_file > latest_trained_model: latest_trained_model = model_file trained_model = latest_trained_model print(f"Using model: {trained_model}") # fixed resnet50 backbone weights cfg.MODE_FPN = True cfg.MODE_MASK = True if resnet_arch == "resnet101": cfg.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 23, 3] else: cfg.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3] cfg_prefix = "CONFIG__" for key, value in dict(os.environ).items(): if key.startswith(cfg_prefix): attr_name = key[len(cfg_prefix):] attr_name = attr_name.replace("__", ".") value = eval(value) print(f"update config: {attr_name}={value}") nested_var = cfg attr_list = attr_name.split(".") for attr in attr_list[0:-1]: nested_var = getattr(nested_var, attr) setattr(nested_var, attr_list[-1], value) # calling detection dataset gets the number of coco categories # and saves in the configuration DetectionDataset() finalize_configs(is_training=False) # Create an inference model # PredictConfig takes a model, input tensors and output tensors cls.predictor = OfflinePredictor( PredictConfig( model=mask_rcnn_model, session_init=get_model_loader(trained_model), input_names=["images", "orig_image_dims"], output_names=[ "generate_{}_proposals_topk_per_image/boxes".format( "fpn" if cfg.MODE_FPN else "rpn"), "generate_{}_proposals_topk_per_image/scores".format( "fpn" if cfg.MODE_FPN else "rpn"), "fastrcnn_all_scores", "output/boxes", "output/scores", "output/labels", "output/masks", ], )) return cls.predictor
'--output', nargs='+', help='list of output tensors') parser.add_argument('--warmup', help='warmup iterations', type=int, default=5) parser.add_argument('--print-flops', action='store_true') parser.add_argument('--print-params', action='store_true') parser.add_argument('--print-timing', action='store_true') args = parser.parse_args() tf.train.import_meta_graph(args.meta) G = tf.get_default_graph() with tf.Session(config=get_default_sess_config()) as sess: init = get_model_loader(args.model) init.init(sess) feed = {} for inp in args.input: inp = inp.split('=') name = get_op_tensor_name(inp[0].strip())[1] shape = list(map(int, inp[1].strip().split(','))) tensor = G.get_tensor_by_name(name) logger.info("Feeding shape ({}) to tensor {}".format( ','.join(map(str, shape)), name)) feed[tensor] = np.random.rand(*shape) fetches = [] for name in args.output: name = get_op_tensor_name(name)[1]
def run(self): if self.inf_auto_find_chkpt: print( '-----Auto Selecting Checkpoint Basing On "%s" Through "%s" Comparison' % (self.inf_auto_metric, self.inf_auto_comparator)) model_path, stat = get_best_chkpts(self.save_dir, self.inf_auto_metric, self.inf_auto_comparator) print("Selecting: %s" % model_path) print("Having Following Statistics:") for key, value in stat.items(): print("\t%s: %s" % (key, value)) else: model_path = self.inf_model_path model_constructor = self.get_model() pred_config = PredictConfig( model=model_constructor(), session_init=get_model_loader(model_path), input_names=self.eval_inf_input_tensor_names, output_names=self.eval_inf_output_tensor_names, create_graph=False) predictor = OfflinePredictor(pred_config) #### save_dir = self.inf_output_dir predict_list = [["case", "prediction"]] file_load_img = HDF5Matrix( self.inf_data_list[0] + "camelyonpatch_level_2_split_test_x.h5", "x") file_load_lab = HDF5Matrix( self.inf_data_list[0] + "camelyonpatch_level_2_split_test_y.h5", "y") true_list = [] prob_list = [] pred_list = [] num_ims = file_load_img.shape[0] last_step = math.floor(num_ims / self.inf_batch_size) last_step = self.inf_batch_size * last_step last_batch = num_ims - last_step count = 0 for start_batch in range(0, last_step + 1, self.inf_batch_size): sys.stdout.write("\rProcessed (%d/%d)" % (start_batch, num_ims)) sys.stdout.flush() if start_batch != last_step: img = file_load_img[start_batch:start_batch + self.inf_batch_size] img = img.astype("uint8") lab = np.squeeze(file_load_lab[start_batch:start_batch + self.inf_batch_size]) else: img = file_load_img[start_batch:start_batch + last_batch] img = img.astype("uint8") lab = np.squeeze(file_load_lab[start_batch:start_batch + last_batch]) prob, pred = self.__gen_prediction(img, predictor) for j in range(prob.shape[0]): predict_list.append([str(count), str(prob[j])]) count += 1 prob_list.extend(prob) pred_list.extend(pred) true_list.extend(lab) prob_list = np.array(prob_list) pred_list = np.array(pred_list) true_list = np.array(true_list) accuracy = (pred_list == true_list).sum() / np.size(true_list) error = (pred_list != true_list).sum() / np.size(true_list) print("Accurcy (%): ", 100 * accuracy) print("Error (%): ", 100 * error) if self.model_mode == "class_pcam": auc = roc_auc_score(true_list, prob_list) print("AUC: ", auc) # Save predictions to csv rm_n_mkdir(save_dir) for result in predict_list: predict_file = open("%s/predict.csv" % save_dir, "a") predict_file.write(result[0]) predict_file.write(",") predict_file.write(result[1]) predict_file.write("\n") predict_file.close()
def run(self, data_dir, output_dir, model_path, ambi_path, img_ext='.png'): if (not data_dir): print('Using Config file path for data_dir.') data_dir = self.inf_data_dir if (not output_dir): print('Using Config file path for output_dir.') output_dir = self.inf_output_dir if (not model_path): print('Using placeholder path for model_dir.') model_path = '/home/dm1/shikhar/hover_net_modified/v2_multitask/np_hv/07/model-35854.index' if (not img_ext): print('Using Config img ext value img_ext.') img_ext = self.inf_imgs_ext if (not ambi_path): # Hard coding path here for single test run. ambi_path = '/home/dm1/shikhar/check_sandbox/testing_code/MoNuSAC_testing_data/MoNuSAC_testing_ambiguous_regions' model_constructor = self.get_model() pred_config = PredictConfig( model=model_constructor(), session_init=get_model_loader(model_path), input_names=self.eval_inf_input_tensor_names, output_names=self.eval_inf_output_tensor_names) predictor = OfflinePredictor(pred_config) #file_list = glob.glob('%s/*%s' % (data_dir, img_ext)) #file_list.sort() # ensure same order #if(not file_list): # print('No Images found in data_dir! Check script arg-paths') # Create Output Directory #rm_n_mkdir(output_dir) # Expecting MoNuSAC's input data directory tree (Patient Name -> Image Name -> ) if not os.path.isdir(output_dir): os.makedirs(output_dir) os.chdir(output_dir) patients = [x[0] for x in os.walk(data_dir) ] #Total patients in the data_path print(len(patients)) for patient_loc in patients: patient_name = patient_loc[len(data_dir) + 1:] #Patient name print(patient_name, flush=True) ## To make patient's name directory in the destination folder try: os.mkdir(patient_name) except OSError: print("\n Creation of the patient's directory %s failed" % patient_name, flush=True) sub_images = glob(str(patient_loc) + '/*' + str(img_ext)) for sub_image_loc in sub_images: sub_image_name = sub_image_loc[len(data_dir) + len(patient_name) + 1:-4] print(sub_image_name) ## To make sub_image directory under the patient's folder sub_image = './' + patient_name + sub_image_name #Destination path try: os.mkdir(sub_image) except OSError: print("\n Creation of the patient's directory %s failed" % sub_image) image_name = sub_image_loc if (img_ext == '.svs'): img = openslide.OpenSlide(image_name) cv2.imwrite( sub_image_loc[:-4] + '.png', np.array( img.read_region((0, 0), 0, img.level_dimensions[0]))) img = cv2.imread(sub_image_loc[:-4] + '.png') else: img = cv2.imread(image_name) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) ## Generate Prediction Map pred_map = self.__gen_prediction(img, predictor) pred = pred_map # Process Prediction Map pred_inst = pred[..., self.nr_types:] pred_type = pred[..., :self.nr_types] pred_inst = np.squeeze(pred_inst) pred_type = np.argmax(pred_type, axis=-1) pred_inst = postproc.hover.proc_np_hv(pred_inst, marker_mode=marker_mode, energy_mode=energy_mode, rgb=img) pred_inst = remap_label(pred_inst, by_size=True) # Read Ambiguous Region mask if any ambi_mask_final = None save_mask = None full_ambi_path = ambi_path + '/' + patient_name + '/' + sub_image_name + '/Ambiguous' ambi_masks = glob(full_ambi_path + '/*') if (ambi_masks): try: ambi_mask_final = cv2.imread(ambi_masks[0]) print('Ambiguous Mask Found: ', ambi_mask_final.shape) save_mask = ambi_mask_final gray = cv2.cvtColor(ambi_mask_final, cv2.COLOR_BGR2GRAY) count, ambi = cv2.connectedComponents(gray) match_iou = 0.01 # Modify parameter experimentally # Remove Ambiguous region pairwise_iou = get_iou(true=ambi, pred=pred_inst) matched_regions = np.array(pairwise_iou >= match_iou, np.uint8) matched_region_list = np.nonzero(matched_regions)[1] pred_inst_copy = pred_inst.copy() for id in matched_region_list: region_id = id + 1 pred_inst[pred_inst == region_id] = 0 # Re-Order Cleaned pred_inst pred_inst = remap_label(pred_inst, by_size=True) except Exception as e: print('\n\t [Warn] Ambiguous Region not removed : ', e) else: print('\n\t No Ambiguous Masks for this image: ', full_ambi_path) # Map Instances to Labels for creating submission format pred_id_list = list( np.unique(pred_inst))[1:] # exclude background ID pred_inst_type = np.full(len(pred_id_list), 0, dtype=np.int32) for idx, inst_id in enumerate(pred_id_list): inst_type = pred_type[pred_inst == inst_id] type_list, type_pixels = np.unique(inst_type, return_counts=True) type_list = list(zip(type_list, type_pixels)) type_list = sorted(type_list, key=lambda x: x[1], reverse=True) inst_type = type_list[0][0] if inst_type == 0: # ! pick the 2nd most dominant if exist if len(type_list) > 1: inst_type = type_list[1][0] else: print('[Warn] Instance has `background` type') pred_inst_type[idx] = inst_type # Write Instance Maps based on their Classes/Labels to the folders for class_id in range(1, self.nr_types): separated_inst = pred_inst.copy() separated_inst[pred_inst_type[separated_inst - 1] != [class_id]] = 0 separated_inst = separated_inst.astype(np.uint8) # Create directory for each label label = class_id_mapping[class_id] sub_path = sub_image + '/' + label try: os.mkdir(sub_path) except OSError: print("Creation of the directory %s failed" % label) else: print("Successfully created the directory %s " % label) # Check if Mask is empty then write check = np.unique(separated_inst) if ((len(check) == 1) & (check[0] == 0)): print('Empty inst. Not writing.', check) else: sio.savemat(sub_path + '/mask.mat', {'n_ary_mask': separated_inst})
def get_predictor(cls): ''' load trained model''' with cls.lock: # check if model is already loaded if cls.predictor: return cls.predictor os.environ['TENSORPACK_FP16'] = 'true' # create a mask r-cnn model mask_rcnn_model = ResNetFPNModel(True) try: model_dir = os.environ['SM_MODEL_DIR'] except KeyError: model_dir = '/opt/ml/model' try: cls.pretrained_model = os.environ['PRETRAINED_MODEL'] except KeyError: pass try: div = int(eval(os.environ['divisor'])) except KeyError: div = 1 pass rpn_anchor_stride = int(16 / div) rpn_anchor_sizes = (int(32 / div), int(64 / div), int(128 / div), int(256 / div), int(512 / div)) try: rpn_anchor_stride = int(eval(os.environ['rpnanchor_stride'])) except KeyError: pass try: nms_topk = int(eval(os.environ['NMS_TOPK'])) except KeyError: nms_topk = 2 pass try: nms_thresh = eval(os.environ['NMS_THRESH']) except KeyError: nms_thresh = 0.7 pass try: results_per_img = eval(os.environ['res_perimg']) except KeyError: results_per_img = 400 pass # file path to previoulsy trained mask r-cnn model latest_trained_model = "" model_search_path = os.path.join(model_dir, "model-*.index") for model_file in glob.glob(model_search_path): if model_file > latest_trained_model: latest_trained_model = model_file trained_model = latest_trained_model print(f'Using model: {trained_model}') # fixed resnet50 backbone weights cfg.BACKBONE.WEIGHTS = os.path.join(cls.pretrained_model) cfg.MODE_FPN = True cfg.MODE_MASK = True cfg.RPN.ANCHOR_STRIDE = rpn_anchor_stride cfg.RPN.ANCHOR_SIZES = rpn_anchor_sizes cfg.RPN.TEST_PRE_NMS_TOPK = int(6000 * nms_topk) cfg.RPN.TEST_POST_NMS_TOPK = int(1000 * nms_topk) cfg.RPN.TEST_PER_LEVEL_NMS_TOPK = int(1000 * nms_topk) # testing ----------------------- cfg.TEST.FRCNN_NMS_THRESH = nms_thresh cfg.TEST.RESULT_SCORE_THRESH = 0.05 cfg.TEST.RESULT_SCORE_THRESH_VIS = 0.2 # only visualize confident results cfg.TEST.RESULTS_PER_IM = results_per_img # calling detection dataset gets the number of coco categories # and saves in the configuration DetectionDataset() finalize_configs(is_training=False) # Create an inference model # PredictConfig takes a model, input tensors and output tensors cls.predictor = OfflinePredictor( PredictConfig( model=mask_rcnn_model, session_init=get_model_loader(trained_model), input_names=['images', 'orig_image_dims'], output_names=[ 'generate_{}_proposals_topk_per_image/boxes'.format( 'fpn' if cfg.MODE_FPN else 'rpn'), 'generate_{}_proposals_topk_per_image/scores'.format( 'fpn' if cfg.MODE_FPN else 'rpn'), 'fastrcnn_all_scores', 'output/boxes', 'output/scores', 'output/labels', 'output/masks' ])) return cls.predictor