def _setup_metrics(self): self.metrics_list = {} if self.logparams['metrics']['loss']: self.metrics_list['loss'] = Metrics('loss_curve') if self.logparams['metrics']['accuracy']: self.metrics_list['accuracy'] = Metrics('acc_curve') if self.logparams['metrics']['cosine-dists']: if not self.logparams['metrics']['cosine-dists']['stats-only']: self.metrics_list['cosine_dists'] = Metrics('cos_dists') self.metrics_list['cosine_dists_hist'] = Metrics( 'cosine_dists_hist') self.metrics_list['cosine_dists_diff'] = Metrics( 'cosine_dists_diff') self.metrics_list['cosine_dists_mean'] = Metrics( 'cosine_dists_mean') if self.logparams['metrics']['gradient-projections']: self.metrics_list['mean_grad'] = Metrics('mean_grad') self.metrics_list['diff_grad'] = Metrics('diff_grad') if self.logparams['metrics']['test-accuracy']: self.metrics_list['test_accuracy'] = Metrics('test_accuracy') if self.logparams['metrics']['weights']: for i in range(self.num_runs): os.makedirs(os.path.join(self.logdir, 'weight_history', 'run_' + str(i)), exist_ok=True)
def _setup_metrics(self): self.metrics_list = {} if self.logparams['metrics']['loss']: self.metrics_list['loss'] = Metrics('loss_curve') if self.logparams['metrics']['accuracy']: self.metrics_list['accuracy'] = Metrics('acc_curve') if self.logparams['metrics']['test-accuracy']: self.metrics_list['test_accuracy'] = Metrics('test_accuracy') if self.logparams['metrics']['synth-grad-norm']: self.metrics_list['grad_norm'] = Metrics('synth_grad_norm') if self.logparams['metrics']['weights']: for i in range(self.num_runs): os.makedirs(os.path.join(self.logdir, 'weight_history', 'run_' + str(i)), exist_ok=True)
def train_epoch(model, device, train_loader, criterion, optimizer, k, warm_up, lr, writer, epoch): # training phase print("Training Progress:") metrics = Metrics(args.dataset, train=True) model.train() for batch_idx, (batch, labels) in enumerate(tqdm(train_loader)): iteration = epoch * len(train_loader) + batch_idx optimizer.zero_grad() batch = batch.type(torch.FloatTensor).to(device) labels = labels.to(device) outputs = model(batch) loss = criterion(outputs, labels) loss.backward() optimizer.step() # warm up if k <= warm_up: k = learning_rate_scheduler(optimizer, k, warm_up, lr) # Batch metrics metrics.update_metrics(outputs, labels, loss) if iteration % 10 == 0: metrics.write_to_tensorboard(writer, iteration) # Epoch metrics final_metrics = metrics.get_epoch_metrics() return (final_metrics, k)
def __init__(self, args): self.args = args # Get the class from globals by selecting it by arguments if self.args.model == 'FCN8sMobileNet': self.model = FCN8sMobileNet elif self.args.model == 'FCN8sShuffleNet': self.model = FCN8sShuffleNet elif self.args.model == 'UNetMobileNet': self.model = UNetMobileNet elif self.args.model == 'UNetShuffleNet': self.model = UNetShuffleNet else: raise NameError(self.args.model + ' unknown!!') # Reset the graph tf.reset_default_graph() # Create the sess gpu_options = tf.GPUOptions(allow_growth=True) self.sess = tf.Session(config=tf.ConfigProto( gpu_options=gpu_options, allow_soft_placement=True)) # Create Model class and build it with self.sess.as_default(): self.build_model() # initialize metrics self.metrics = Metrics(self.args.num_classes)
def main(): config = Config() parser = argparse.ArgumentParser( description='Code for evaluating dialog models\' responses with ' + '17 evaluation metrics (arxiv.org/abs/1905.05471)') parser.add_argument( '-tns', '--train_source', default=config.train_source, help='Path to the train source file, where each line ' + 'corresponds to one train input', metavar='') parser.add_argument('-tts', '--test_source', default=config.test_source, help='Path to the test source file, where each line ' + 'corresponds to one test input', metavar='') parser.add_argument('-ttt', '--test_target', default=config.test_target, help='Path to the test target file, where each line ' + 'corresponds to one test target', metavar='') parser.add_argument('-r', '--test_responses', default=config.test_responses, help='Path to the test model responses file', metavar='') parser.add_argument('-tv', '--text_vocab', default=config.text_vocab, help='A file where each line is a word in the vocab', metavar='') parser.add_argument('-vv', '--vector_vocab', default=config.vector_vocab, help='A file where each line is a word in the vocab ' + 'followed by a vector', metavar='') parser.add_argument('-s', '--bleu_smoothing', default=config.bleu_smoothing, help='Bleu smoothing method (choices: %(choices)s)', metavar='', choices=[0, 1, 2, 3, 4, 5, 6, 7]) parser.add_argument('-t', '--t', default=config.t, help='t value for confidence level calculation ' + '(default: %(default)s)', metavar='', type=int) parser.parse_args(namespace=config) m = Metrics(config) m.run()
def __init__(self, model, mode='0'): self.model = model self.metrics = Metrics() if mode == '0': self.X_train, self.Y_train, self.X_test, self.Y_test = DataPreprocessing( ).naive_preprocessing_data() elif mode == '1': self.X_train, self.Y_train, self.X_test, self.Y_test = DataPreprocessing( ).advanced_preprocessing_data()
def __init__(self, args, sess, model): """ Call the constructor of the base class init summaries init loading data :param args: :param sess: :param model: :return: """ super().__init__(args, sess, model) # Init load data and generator self.generator = None self.run = None # 加载数据 if self.args.data_mode == "realsense": self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.load_realsence_data() elif self.args.data_mode == "cityscapes_val": self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.load_val_data() elif self.args.data_mode == "cityscapes_test": self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.load_test_data() elif self.args.data_mode == "video": self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.load_vid_data() if self.args.task == "test": self.run = self.test elif self.args.task == "realsense": self.run = self.realsense_inference elif self.args.task == "realsense_imgs": self.run = self.realsense_imgs else: print("ERROR Please select a proper data_mode BYE") exit(-1) # Init metrics class self.metrics = Metrics(self.args.num_classes) # Init reporter class self.reporter = Reporter(self.args.out_dir + 'report_test.json', self.args)
def __init__(self, hparams, batch_fn=None): super(RaceBaseModel, self).__init__() if batch_fn: self.batch_fn = batch_fn else: self.batch_fn = self.default_batch_fn self.hparams = hparams self.save_hyperparameters(hparams) # Tokenizer: self.tokenizer = AutoTokenizer.from_pretrained( self.hparams.pretrained_model) self.tokenizer.add_special_tokens( {"additional_special_tokens": self.hparams.special_tokens}) # Metrics: self.metrics = Metrics()
def __init__(self): """ Sets default values for the Pair class """ self._warning = None self._ground_truth = None self._matched = False self._metric_version = None self._performer = None self._provider = None self._lead_time = None self._utility_time = None self._confidence = None self._probability = None self._quality = None self._event_type_similarity = None self._event_details_similarity = None self._occurrence_time_similarity = None self._targets_similarity = None self._metrics = Metrics()
def __init__(self, hparams, batch_fn=None): """ :param batch_fn: function to process batch """ super(RaceModule, self).__init__(hparams, batch_fn) if self.hparams.pretrained_model in ["t5-base","t5-small"]: # Model: config = T5Config(decoder_start_token_id = self.hparams.padding_token) self.model = T5ForConditionalGeneration(config).from_pretrained(self.hparams.pretrained_model) # Tokenizer: self.tokenizer = AutoTokenizer.from_pretrained(self.hparams.pretrained_model) self.tokenizer_.add_special_tokens({"additional_special_tokens": ["[CON]","[QUE]","[ANS]","[DIS]"]}) # Metrics: self.metrics = Metrics() try: self.model.resize_token_embeddings(self.hparams.tokenizer_len) except: self.model.resize_token_embeddings(32104) else: raise NotImplementedError
def validate_epoch(model, device, validation_loader, criterion, scheduler, writer, epoch): with torch.no_grad(): # validation phase print("Validation Progress:") metrics = Metrics(args.dataset, train=False) model.eval() for batch_idx, (batch, labels) in enumerate(tqdm(validation_loader)): batch = batch.type(torch.FloatTensor).to(device) labels = labels.to(device) outputs = model(batch) loss = criterion(outputs, labels) # Batch metrics metrics.update_metrics(outputs, labels, loss) # Epoch metrics final_metrics = metrics.get_epoch_metrics() metrics.write_to_tensorboard(writer, epoch) scheduler.step(final_metrics["Loss"]) return final_metrics
fx_dm.setup() # Trainer: trainer = pl.Trainer.from_argparse_args(args) fx_model = RaceModule.load_from_checkpoint("models/ckpts/t5.ckpt") fx_model.setup_tune(top_p=0.95, top_k=50, no_repeat_ngram_size=2, num_samples=NUM_SAMPLES) # qj_model = BertForSequenceClassification.from_pretrained("iarfmoose/bert-base-cased-qa-evaluator").cuda() # qj_tokenizer = AutoTokenizer.from_pretrained("iarfmoose/bert-base-cased-qa-evaluator") fx_model.eval() # qj_model.eval() metrics = Metrics() summary = { "bleu_1": 0.0, "bleu_2": 0.0, "bleu_3": 0.0, "bleu_4": 0.0, "meteor": 0.0, "rouge_l": 0.0 } count = 0 print("Total Length", len(fx_dm.test_dataloader())) for x, y in fx_dm.test_dataloader(): output = fx_model.generate(x)
def __init__(self, args, sess, train_model, test_model): """ Call the constructor of the base class init summaries init loading data :param args: :param sess: :param model: :return: """ super().__init__(args, sess, train_model, test_model) ################################################################################## # Init summaries # Summary variables self.scalar_summary_tags = ['mean_iou_on_val', 'train-loss-per-epoch', 'val-loss-per-epoch', 'train-acc-per-epoch', 'val-acc-per-epoch'] self.images_summary_tags = [ ('train_prediction_sample', [None, self.params.img_height, self.params.img_width * 2, 3]), ('val_prediction_sample', [None, self.params.img_height, self.params.img_width * 2, 3])] self.summary_tags = [] self.summary_placeholders = {} self.summary_ops = {} # init summaries and it's operators self.init_summaries() # Create summary writer self.summary_writer = tf.summary.FileWriter(self.args.summary_dir, self.sess.graph) ################################################################################## # Init load data and generator self.generator = None if self.args.data_mode == "experiment_tfdata": self.data_session = None self.train_next_batch, self.train_data_len = self.init_tfdata(self.args.batch_size, self.args.abs_data_dir, (self.args.img_height, self.args.img_width), mode='train') self.num_iterations_training_per_epoch = self.train_data_len // self.args.batch_size self.generator = self.train_tfdata_generator elif self.args.data_mode == "experiment_h5": self.train_data = None self.train_data_len = None self.val_data = None self.val_data_len = None self.num_iterations_training_per_epoch = None self.num_iterations_validation_per_epoch = None self.load_train_data_h5() self.generator = self.train_h5_generator elif self.args.data_mode == "experiment_v2": self.targets_resize = self.args.targets_resize self.train_data = None self.train_data_len = None self.val_data = None self.val_data_len = None self.num_iterations_training_per_epoch = None self.num_iterations_validation_per_epoch = None self.load_train_data(v2=True) self.generator = self.train_generator elif self.args.data_mode == "experiment": self.train_data = None self.train_data_len = None self.val_data = None self.val_data_len = None self.num_iterations_training_per_epoch = None self.num_iterations_validation_per_epoch = None self.load_train_data() self.generator = self.train_generator elif self.args.data_mode == "test_tfdata": self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.load_val_data() self.generator = self.test_tfdata_generator elif self.args.data_mode == "test": self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.load_val_data() self.generator = self.test_generator elif self.args.data_mode == "test_eval": self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.names_mapper = None self.load_test_data() self.generator = self.test_generator elif self.args.data_mode == "test_v2": self.targets_resize = self.args.targets_resize self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.load_val_data(v2=True) self.generator = self.test_generator elif self.args.data_mode == "video": self.args.data_mode = "test" self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.load_vid_data() self.generator = self.test_generator elif self.args.data_mode == "debug": print("Debugging photo loading..") # self.debug_x= misc.imread('/data/menna/cityscapes/leftImg8bit/val/lindau/lindau_000048_000019_leftImg8bit.png') # self.debug_y= misc.imread('/data/menna/cityscapes/gtFine/val/lindau/lindau_000048_000019_gtFine_labelIds.png') # self.debug_x= np.expand_dims(misc.imresize(self.debug_x, (512,1024)), axis=0) # self.debug_y= np.expand_dims(misc.imresize(self.debug_y, (512,1024)), axis=0) self.debug_x = np.load('data/debug/debug_x.npy') self.debug_y = np.load('data/debug/debug_y.npy') print("Debugging photo loaded") else: print("ERROR Please select a proper data_mode BYE") exit(-1) ################################################################################## # Init metrics class self.metrics = Metrics(self.args.num_classes) # Init reporter class if self.args.mode == 'train' or 'overfit': self.reporter = Reporter(self.args.out_dir + 'report_train.json', self.args) elif self.args.mode == 'test': self.reporter = Reporter(self.args.out_dir + 'report_test.json', self.args)
def train(model, dataset, model_dir, summary_writer, epochs, lr, conf_thres, nms_thres, iou_thres, lambda_coord=5, lambda_no_obj=0.5, gradient_accumulations=2, clip_gradients=False, limit=None, debug=False, print_every=10, save_every=None, log_to_neptune=False): if log_to_neptune: env_path = Path(os.environ['HOME'], 'workspace/setup-box/neptune.env') load_dotenv(dotenv_path=env_path) neptune.init('petersiemen/sandbox', api_token=os.getenv("NEPTUNE_API_TOKEN")) total = limit if limit is not None else len(dataset) logger.info( f'Start training on {total} images. Using lr: {lr}, ' f'lambda_coord: {lambda_coord}, lambda_no_obj: {lambda_no_obj}, ' f'conf_thres: {conf_thres}, nms_thres:{nms_thres}, iou_thres: {iou_thres}, ' f'gradient_accumulations: {gradient_accumulations}, ' f'clip_gradients: {clip_gradients}, lambda_no_obj: {lambda_no_obj}') metrics = Metrics() model.to(DEVICE) model.train() optimizer = torch.optim.Adam(model.get_trainable_parameters(), lr=lr) grid_sizes = model.grid_sizes data_loader = DataLoader(dataset, batch_size=dataset.batch_size, shuffle=True, collate_fn=dataset.collate_fn) class_names = model.class_names for epoch in range(1, epochs + 1): for batch_i, (images, ground_truth_boxes, image_paths) in tqdm(enumerate(data_loader), total=total): if len(images) != dataset.batch_size: logger.warning( f"Skipping batch {batch_i} because it does not have correct size ({dataset.batch_size})" ) continue images = images.to(DEVICE) coordinates, class_scores, confidence = model(images) obj_mask, noobj_mask, cls_mask, target_coordinates, target_confidence, target_class_scores = build_targets( coordinates, class_scores, ground_truth_boxes, grid_sizes) yolo_loss = YoloLoss(coordinates, confidence, class_scores, obj_mask, noobj_mask, cls_mask, target_coordinates, target_confidence, target_class_scores, lambda_coord=lambda_coord, lambda_no_obj=lambda_no_obj) class_scores = torch.sigmoid(class_scores) prediction = torch.cat( (coordinates, confidence.unsqueeze(-1), class_scores), -1) detections = non_max_suppression(prediction=prediction, conf_thres=conf_thres, nms_thres=nms_thres) ground_truth_map_objects = list( GroundTruth.from_ground_truths(image_paths, ground_truth_boxes)) detection_map_objects = list( Detection.from_detections(image_paths, detections)) metrics.add_detections_for_batch(detection_map_objects, ground_truth_map_objects, iou_thres=iou_thres) if debug: plot_batch(detections, ground_truth_boxes, images, class_names) loss = yolo_loss.get() # backward pass to calculate the weight gradients loss.backward() if clip_gradients: logger.debug("Clipping gradients with max_norm = 1") clip_grad_norm_(model.parameters(), max_norm=1) if batch_i % print_every == 0: # print every print_every +1 batches yolo_loss.capture(summary_writer, batch_i, during='train') #plot_weights_and_gradients(model, summary_writer, epoch * batch_i) log_performance(epoch, epochs, batch_i, total, yolo_loss, metrics, class_names, summary_writer, log_to_neptune) # Accumulates gradient before each step if batch_i % gradient_accumulations == 0: logger.debug( f"Updating weights for batch {batch_i} (gradient_accumulations :{gradient_accumulations})" ) # update the weights optimizer.step() # zero the parameter (weight) gradients optimizer.zero_grad() del images del ground_truth_boxes if limit is not None and batch_i + 1 >= limit: logger.info( 'Stop here after training {} batches (limit: {})'.format( batch_i, limit)) log_performance(epoch, epochs, batch_i, total, yolo_loss, metrics, class_names, summary_writer, log_to_neptune) save_model(model_dir, model, epoch, batch_i) return if save_every is not None and batch_i % save_every == 0: save_model(model_dir, model, epoch, batch_i) # save model after every epoch save_model(model_dir, model, epoch, None)
def __init__(self, args, sess, model): print("\nTraining is initializing itself\n") self.args = args self.sess = sess self.model = model # shortcut for model params self.params = self.model.params # To initialize all variables self.init = None self.init_model() # Create a saver object self.saver = tf.train.Saver(max_to_keep=self.args.max_to_keep, keep_checkpoint_every_n_hours=10, save_relative_paths=True) self.saver_best = tf.train.Saver(max_to_keep=1, save_relative_paths=True) # Load from latest checkpoint if found self.load_model() ################################################################################## # Init summaries # Summary variables self.scalar_summary_tags = [ 'mean_iou_on_val', 'train-loss-per-epoch', 'val-loss-per-epoch', 'train-acc-per-epoch', 'val-acc-per-epoch' ] self.images_summary_tags = [ ('train_prediction_sample', [None, self.params.img_height, self.params.img_width * 2, 3]), ('val_prediction_sample', [None, self.params.img_height, self.params.img_width * 2, 3]) ] self.summary_tags = [] self.summary_placeholders = {} self.summary_ops = {} # init summaries and it's operators self.init_summaries() # Create summary writer self.summary_writer = tf.summary.FileWriter(self.args.summary_dir, self.sess.graph) ################################################################################## if self.args.mode == 'train': self.num_iterations_training_per_epoch = self.args.tfrecord_train_len // self.args.batch_size self.num_iterations_validation_per_epoch = self.args.tfrecord_val_len // self.args.batch_size else: self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.load_test_data() ################################################################################## # Init metrics class self.metrics = Metrics(self.args.num_classes) # Init reporter class if self.args.mode == 'train' or 'overfit': self.reporter = Reporter(self.args.out_dir + 'report_train.json', self.args) elif self.args.mode == 'test': self.reporter = Reporter(self.args.out_dir + 'report_test.json', self.args)
def main(): parser = argparse.ArgumentParser() parser.add_argument("-m","--model",help="model_name") parser.add_argument("-p","--model_path",help="path to the pb file") parser.add_argument("-o","--out_path",help="path to save the segmentation numpy") parser.add_argument("-im","--image_path",help="path to the numpy that rgb images are saved") parser.add_argument("-gt","--label_path",help="path to the numpy that labels are saved") args = parser.parse_args() pb_path = args.model_path#"../fcn8s_mobilenet/checkpoints/best/final_model.pb"#"mobilenet_fcn8s.pb"#"unet_mobilenet"# "optimized_model.pb"#"mobilenet_fcn8s.pb" logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # Ask tensorflow logger not to propagate logs to parent (which causes # duplicated logging) logging.getLogger('tensorflow').propagate = False # build_trt_pb(model_name, pb_path, download_dir='data') logger.info('loading TRT graph from pb: %s' % pb_path) trt_graph = load_trt_pb(pb_path) logger.info('starting up TensorFlow session') tf_config = tf.ConfigProto() tf_config.gpu_options.allow_growth = True tf_sess = tf.Session(config=tf_config, graph=trt_graph) tf_input = tf_sess.graph.get_tensor_by_name('network/input/Placeholder:0') tf_output = tf_sess.graph.get_tensor_by_name('network/output/ArgMax:0') logger.info('warming up the TRT graph with a dummy image') all_images = np.load(args.image_path) all_labels = np.load(args.label_path) print("------------------Data loaded!!------------------") #uid_name_map = [] #with open('map_uid_img_name.txt','r') as f: # for row in f: # row = row.strip('\n') # uid_name_map.append(row) #print(uid_name_map) elipse = 0 metrics = Metrics(nclasses=18) means = [73.29132098, 83.04442645, 72.5238962] # bgr print("------------------Start Test!!------------------") for i in range(0, all_images.shape[0], 1): # pre process # subtract mean, normalize, then rgb to bgr image = all_images[i:i+1,:,:,:] new_image = copy.deepcopy(image).astype(float) new_image[0,:,:,0] = (image[0,:,:,2] - means[0])/255.0 #b new_image[0,:,:,1] = (image[0,:,:,1] - means[1])/255.0 #g new_image[0,:,:,2] = (image[0,:,:,0] - means[2])/255.0 start = time.time() segmentation = tf_sess.run(tf_output, feed_dict={tf_input: new_image}) elipse = time.time() - start # write records #img_name = uid_name_map[i] #uid=int(img_name.split('-')[0]) #curr_record = dict(uid=uid, # command='predict_segmentation', # environment='tx2', # building=None, # time=elipse*1000, # metric=None, # misc=None, # tag=None) #output_records.append(curr_record) #print(curr_record) #print("segmentation: ", segmentation.shape) #segmentation = np.argmax(segmentation, axis=1)#.astype(int)#tf.argmax(segmentation, axis=1, output_type=tf.int32) #segmentation = segmentation.reshape((512, 512))#tf.reshape(segmentation,[512, 512]) #if args.out_path is not None: # seg_img = Image.fromarray(np.uint8(segmentation[0])) # seg_img.save(os.path.join(args.out_path, img_name)) # update metrics label = all_labels[i:i+1,:,:] metrics.update_metrics(segmentation, label, 0, 0) if i%100 == 0: print(i) #with open(args.model+'.json','w') as f: # json.dump(output_records, f, indent=2) # print(elipse/(i+1)) print("segmentation size:", segmentation.shape) nonignore = [1,2,3,4,5,6,7,8,9,10,11,12,13,14] iou, mean_iou = metrics.compute_final_metrics(1, nonignore=nonignore) print("mean IOU: ", mean_iou) print("Per class IOU: ", iou)
def evaluate(model, dataset, summary_writer, images_results_dir, iou_thres, conf_thres, nms_thres, log_every=None, limit=None, plot=False, save=False): if save: assert dir_exists_and_is_empty( images_results_dir ), f'{images_results_dir} is not empty or does not exist.' logger.info( f'Start evaluating model with iou_thres: {iou_thres}, conf_thres: {conf_thres} and nms_thres: {nms_thres}' ) metrics = Metrics() model.to(DEVICE) model.eval() with torch.no_grad(): data_loader = DataLoader(dataset, batch_size=dataset.batch_size, shuffle=True, collate_fn=dataset.collate_fn) class_names = model.class_names total = limit if limit is not None else len(data_loader) for batch_i, (images, ground_truth_boxes, image_paths) in tqdm(enumerate(data_loader), total=total): if len(images) != dataset.batch_size: logger.warning( f"Skipping batch {batch_i} because it does not have correct size ({dataset.batch_size})" ) continue images = images.to(DEVICE) coordinates, class_scores, confidence = model(images) class_scores = torch.sigmoid(class_scores) prediction = torch.cat( (coordinates, confidence.unsqueeze(-1), class_scores), -1) detections = non_max_suppression(prediction=prediction, conf_thres=conf_thres, nms_thres=nms_thres) if plot: plot_batch(detections, ground_truth_boxes, images, class_names) if save: save_batch(image_paths, images_results_dir, detections, ground_truth_boxes, images, class_names) ground_truth_map_objects = list( GroundTruth.from_ground_truths(image_paths, ground_truth_boxes)) detection_map_objects = list( Detection.from_detections(image_paths, detections)) metrics.add_detections_for_batch(detection_map_objects, ground_truth_map_objects, iou_thres=iou_thres) if limit is not None and batch_i >= limit: logger.info(f"Stop evaluation here after {batch_i} batches") break if batch_i != 0 and log_every is not None and batch_i % log_every == 0: log_average_precision_for_classes(metrics, class_names, summary_writer, batch_i) log_average_precision_for_classes(metrics, class_names, summary_writer, total)
def __init__(self, model, hyperparameters, kfold): self.metrics = Metrics() cross_validation = StratifiedKFold(n_splits=kfold, shuffle=True) self.clf = GridSearchCV(model, hyperparameters, cv=cross_validation, n_jobs=-1, verbose=1)
def __init__(self): super(self.__class__, self).init() self._metrics = Metrics() self._ground_truth_bank = None self._warning_bank = None self._from_db = None