コード例 #1
0
def test_model(model, sess, is_test_mod=False):
    batch_generator = generate_batch_data('test' if is_test_mod else 'dev',
                                          model.settings)
    all_predict = []
    all_ground_truth = []
    all_loss = 0
    batch_len = 0
    for index, batch in enumerate(batch_generator):
        feed_dict = model.create_feed_dic(batch, is_training=False)
        loss, y_pred = sess.run([model.loss, model.sigmoid_y_pred], feed_dict)
        all_predict.extend(get_top_5_id(y_pred, model.settings.batch_size))
        all_ground_truth.extend(batch['ground_truth'])
        all_loss += loss
        batch_len += 1
    all_loss = all_loss / batch_len
    precision, recall, f1 = evaluate(all_ground_truth, all_predict)
    print(
        f"\ntest result: precision: {precision} , recall: {recall} , F1: {f1}")
    if not is_test_mod:
        """说明是dev,保存模型"""
        if f1 > model.max_f1:
            model.max_f1 = f1
            save_path = model.saver.save(sess, "./checkpoints/{}/{:.3f}_{:.3f}_{:.3f}.ckpt". \
                                         format(model.model_name, precision, recall, f1))
            print("find new best model,save to path: ", save_path)
        summary = tf.Summary(value=[
            tf.Summary.Value(tag="Loss", simple_value=all_loss),
            tf.Summary.Value(tag="precision", simple_value=precision),
            tf.Summary.Value(tag="recall", simple_value=recall),
            tf.Summary.Value(tag="F1", simple_value=f1),
        ])
        global_step = tf.train.global_step(sess, model.global_step)
        model.dev_writer.add_summary(summary, global_step=global_step)
コード例 #2
0
def mlp_test():
    data = 'data/warfarin_clean.csv'
    mlp = MLPBandit(data, 5e-4)
    load = loader('data/warfarin_clean.csv', mlp=mlp)
    bandit = alg(load, mlp_dim=50)
    counts = [0, 0, 0]
    cum_regret, avg_regret, avg_accuracy = 0, 0, 0
    for i in range(NUM_TRIALS):
        reg, avgreg = bandit.evaluate_online()
        avg_accuracy += evaluate(bandit.predictions, bandit.data_loader.labels)
        cum_regret += reg
        avg_regret += avgreg
        for pred in bandit.predictions:
            counts[int(pred)] += 1
        bandit.data_loader.reshuffle()

    cum_regret /= NUM_TRIALS
    avg_regret /= NUM_TRIALS
    avg_accuracy /= NUM_TRIALS
    total = sum(counts)

    print("Cumulative Regret {}, Average Regret {}".format(
        cum_regret, avg_regret))
    print("Accuracy: ", avg_accuracy)
    print("Average low: {} ({}%)".format(counts[0], 100 * (counts[0] / total)))
    print("Average med: {} ({}%)".format(counts[1], 100 * (counts[1] / total)))
    print("Average high: {} ({}%)".format(counts[2],
                                          100 * (counts[2] / total)))
コード例 #3
0
	def on_epoch_end(self, epoch, logs=None):
		""" Run the evaluation callback at the end of a given epoch.
		"""
		logs = logs or {}

		# Run evaluation.
		average_precisions, inference_time = evaluate(
			self.generator,
			self.model,
			iou_threshold=self.iou_threshold,
			score_threshold=self.score_threshold,
			max_detections=self.max_detections,
			save_path=self.save_path
		)

		# Compute per class average precision.
		total_instances = []
		precisions = []
		for label, (average_precision, num_annotations ) in average_precisions.items():
			if self.verbose == 1:
				print('{:.0f} instances of class'.format(num_annotations),
					  self.generator.label_to_name(label), 'with average precision: {:.4f}'.format(average_precision))
			total_instances.append(num_annotations)
			precisions.append(average_precision)
		if self.weighted_average:
			self.mean_ap = sum([a * b for a, b in zip(total_instances, precisions)]) / sum(total_instances)
		else:
			self.mean_ap = sum(precisions) / sum(x > 0 for x in total_instances)

		if self.tensorboard:
			import tensorflow as tf
コード例 #4
0
    def on_epoch_end(self, epoch, logs={}):
        # run evaluation
        average_precisions = evaluate(self.generator,
                                      self.model,
                                      iou_threshold=self.iou_threshold,
                                      score_threshold=self.score_threshold,
                                      max_detections=self.max_detections,
                                      save_path=self.save_path)

        self.mean_ap = sum(
            average_precisions.values()) / len(average_precisions)

        summary = tf.Summary()

        if self.verbose == 1:
            for label, average_precision in average_precisions.items():
                cls = self.generator.label_to_name(label)
                print(cls, '{:.4f}'.format(average_precision))
                summary = tf.Summary()
                summary.value.add(tag=cls, simple_value=average_precision)
                self.tensorboard.writer.add_summary(summary, epoch)

            print('mAP: {:.4f}'.format(self.mean_ap))

        if self.tensorboard is not None and self.tensorboard.writer is not None:
            summary_value = summary.value.add()
            summary_value.simple_value = self.mean_ap
            summary_value.tag = "mAP"
            self.tensorboard.writer.add_summary(summary, epoch)
コード例 #5
0
def train_model(model, sess):
    model.train_writer.add_graph(sess.graph)
    for epoch_index in range(model.settings.max_epoch):
        train_fetches = [
            model.loss, model.sigmoid_y_pred, model.train_op, model.global_step
        ]
        train_batch_generator = generate_batch_data('train', model.settings)
        prog = Progbar(target=model.settings.train_data_size //
                       model.settings.batch_size)
        for index, batch in enumerate(train_batch_generator):
            feed_dict = model.create_feed_dic(batch)
            loss, y_pred, _, global_step = sess.run(train_fetches, feed_dict)
            if global_step % 100 == 0:
                precision, recall, f1 = evaluate(
                    batch['ground_truth'],
                    get_top_5_id(y_pred, model.settings.batch_size))
                prog.update(index + 1, [("Loss", loss),
                                        ("precision", precision),
                                        ("recall", recall), ("F1", f1)])
                summary = tf.Summary(value=[
                    tf.Summary.Value(tag="Loss", simple_value=loss),
                    tf.Summary.Value(tag="precision", simple_value=precision),
                    tf.Summary.Value(tag="recall", simple_value=recall),
                    tf.Summary.Value(tag="F1", simple_value=f1),
                ])

                model.train_writer.add_summary(summary,
                                               global_step=global_step)
        test_model(model, sess)
コード例 #6
0
 def evaluate(self):
     self.image_list, self.actual_issame = self.get_val_pair(
         self.root, self.data_name)
     self.feature_extract()
     tpr, fpr, accuracy, best_thresholds = evaluate(self.embeddings,
                                                    self.actual_issame)
     buf = gen_plot(fpr, tpr)
     roc_curve = Image.open(buf)
     roc_curve_tensor = trans.ToTensor()(roc_curve)
     return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor
コード例 #7
0
    def evaluate_answer(self, session, data, use_best_span):

        # Now we whether finding the best span improves the score
        start_indicies, end_indicies = self.predict_for_batch(
            session, data, use_best_span)
        pred_answer, truth_answer = self.get_sentences_from_indices(
            data, start_indicies, end_indicies)
        result = evaluate(pred_answer, truth_answer)

        f1 = result["f1"]
        EM = result["EM"]

        return f1, EM
コード例 #8
0
    def xxxon_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        print("==== on_epoch_end {}".format(epoch))

        # run evaluation
        average_precisions, _ = eval.evaluate(
            self.generator,
            self.model,
            iou_threshold=self.iou_threshold,
            score_threshold=self.score_threshold,
            max_detections=self.max_detections,
            save_path=self.save_path)

        # compute per class average precision
        total_instances = []
        precisions = []
        for label, (average_precision,
                    num_annotations) in average_precisions.items():
            if self.verbose == 1:
                print(
                    '{:.0f} instances of class'.format(num_annotations),
                    self.generator.label_to_name(label),
                    'with average precision: {:.4f}'.format(average_precision))
            total_instances.append(num_annotations)
            precisions.append(average_precision)
        if self.weighted_average:
            self.mean_ap = sum([
                a * b for a, b in zip(total_instances, precisions)
            ]) / sum(total_instances)
        else:
            self.mean_ap = sum(precisions) / sum(x > 0
                                                 for x in total_instances)

        if self.tensorboard:
            import tensorflow as tf
            writer = tf.summary.create_file_writer(self.tensorboard.log_dir)
            with writer.as_default():
                tf.summary.scalar("mAP", self.mean_ap, step=epoch)
                if self.verbose == 1:
                    for label, (average_precision,
                                num_annotations) in average_precisions.items():
                        tf.summary.scalar("AP_" +
                                          self.generator.label_to_name(label),
                                          average_precision,
                                          step=epoch)
                writer.flush()

        logs['mAP'] = self.mean_ap

        if self.verbose == 1:
            print('mAP: {:.4f}'.format(self.mean_ap))
コード例 #9
0
ファイル: run_script.py プロジェクト: up700/Code1
def validation(args, model, tokenizer, labels, pad_token_label_id, best_dev, best_test, 
                  global_step, t_total, epoch, tors):
    
    model_type = MODEL_NAMES[tors].lower()

    results, _, best_dev, is_updated1 = evaluate(args, model, tokenizer, labels, pad_token_label_id, best_dev, mode="dev", \
        logger=logger, prefix='dev [Step {}/{} | Epoch {}/{}]'.format(global_step, t_total, epoch, args.num_train_epochs), verbose=False)

    results, _, best_test, is_updated2 = evaluate(args, model, tokenizer, labels, pad_token_label_id, best_test, mode="test", \
        logger=logger, prefix='test [Step {}/{} | Epoch {}/{}]'.format(global_step, t_total, epoch, args.num_train_epochs), verbose=False)
   
    # output_dirs = []
    if args.local_rank in [-1, 0] and is_updated1:
        # updated_self_training_teacher = True
        path = os.path.join(args.output_dir+tors, "checkpoint-best-1")
        logger.info("Saving model checkpoint to %s", path)
        if not os.path.exists(path):
            os.makedirs(path)
        model_to_save = (
                model.module if hasattr(model, "module") else model
        )  # Take care of distributed/parallel training
        model_to_save.save_pretrained(path)
        tokenizer.save_pretrained(path)
    # output_dirs = []
    if args.local_rank in [-1, 0] and is_updated2:
        # updated_self_training_teacher = True
        path = os.path.join(args.output_dir+tors, "checkpoint-best-2")
        logger.info("Saving model checkpoint to %s", path)
        if not os.path.exists(path):
            os.makedirs(path)
        model_to_save = (
                model.module if hasattr(model, "module") else model
        )  # Take care of distributed/parallel training
        model_to_save.save_pretrained(path)
        tokenizer.save_pretrained(path)

    return best_dev, best_test, is_updated1
コード例 #10
0
    def train(self):
        console_header = 'Epoch\tTrain_Loss\tTrain_Accuracy\tTest_Accuracy\tEpoch_Runtime\tLearning_Rate'
        print_to_console(console_header)
        print_to_logfile(self._logfile, console_header, init=True)

        for t in range(self._start_epoch, self._epochs):
            epoch_start = time.time()
            self._scheduler.step(epoch=t)
            # reset average meters
            self._train_loss.reset()
            self._train_accuracy.reset()

            self._net.train(True)
            self.single_epoch_training(t)
            test_accuracy = evaluate(self._test_loader, self._net)

            lr = get_lr_from_optimizer(self._optimizer)

            if test_accuracy > self._best_accuracy:
                self._best_accuracy = test_accuracy
                self._best_epoch = t + 1
                torch.save(self._net.state_dict(), 'model/step{}_best_epoch.pth'.format(self._step))
                # print('*', end='')
            epoch_end = time.time()
            single_epoch_runtime = epoch_end - epoch_start
            # Logging
            console_content = '{:05d}\t{:10.4f}\t{:14.4f}\t{:13.4f}\t{:13.2f}\t{:13.1e}'.format(
                t + 1, self._train_loss.avg, self._train_accuracy.avg, test_accuracy, single_epoch_runtime, lr)
            print_to_console(console_content)
            print_to_logfile(self._logfile, console_content, init=False)

            # save checkpoint
            save_checkpoint({
                'epoch': t + 1,
                'state_dict': self._net.state_dict(),
                'best_epoch': self._best_epoch,
                'best_accuracy': self._best_accuracy,
                'optimizer': self._optimizer.state_dict(),
                'step': self._step,
                'scheduler': self._scheduler.state_dict(),
                'memory_pool': self.memory_pool,
            })

        console_content = 'Best at epoch {}, test accuracy is {}'.format(self._best_epoch, self._best_accuracy)
        print_to_console(console_content)

        # rename log file, stats files and model
        os.rename(self._logfile, self._logfile.replace('.txt', '-{}_{}_{}_{:.4f}.txt'.format(
            self._config['net'], self._config['batch_size'], self._config['lr'], self._best_accuracy)))
コード例 #11
0
ファイル: evaluate.py プロジェクト: maroonray/RetinaNet
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # make save path if it doesn't exist
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    # create the generator
    generator = create_generator(args)

    # load the model
    print('Loading model, this may take a second...')
    model = keras.models.load_model(args.model, custom_objects=custom_objects)

    # print model summary
    print(model.summary())

    loss = {'regression': losses.smooth_l1(), 'classification': losses.focal()}

    # start evaluation
    average_precisions = evaluate(
        generator,
        model,
        # loss,
        iou_threshold=args.iou_threshold,
        score_threshold=args.score_threshold,
        max_detections=args.max_detections,
        save_path=args.save_path)

    # print evaluation
    for label, average_precision in average_precisions.items():
        print(generator.label_to_name(label),
              '{:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(
        sum(average_precisions.values()) / len(average_precisions)))
コード例 #12
0
    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}

        # run evaluation
        average_precisions = evaluate(
            self.generator,
            self.model,
            iou_threshold=self.iou_threshold,
            score_threshold=self.score_threshold,
            max_detections=self.max_detections,
            visualize=False,
        )

        # compute per class average precision
        total_instances = []
        precisions = []
        for label, (average_precision,
                    num_annotations) in average_precisions.items():
            if self.verbose == 1:
                print(
                    '{:.0f} instances of class'.format(num_annotations),
                    self.generator.label_to_name(label),
                    'with average precision: {:.4f}'.format(average_precision))
            total_instances.append(num_annotations)
            precisions.append(average_precision)
        if self.weighted_average:
            self.mean_ap = sum([
                a * b for a, b in zip(total_instances, precisions)
            ]) / sum(total_instances)
        else:
            self.mean_ap = sum(precisions) / sum(x > 0
                                                 for x in total_instances)

        if self.tensorboard is not None and self.tensorboard.writer is not None:
            import tensorflow as tf
            summary = tf.Summary()
            summary_value = summary.value.add()
            summary_value.simple_value = self.mean_ap
            summary_value.tag = "mAP"
            self.tensorboard.writer.add_summary(summary, epoch)

        logs['mAP'] = self.mean_ap

        if self.verbose == 1:
            print('mAP: {:.4f}'.format(self.mean_ap))
コード例 #13
0
def run_s1f():
    desired_data_features = [
        "Age", 'Weight (kg)', 'Height (cm)', "Asian",
        "Black or African American", "Unknown or mixed race",
        "Amiodarone (Cordarone)", "Enzyme inducer status"
    ]

    data = pd.read_csv('data/warfarin_clean5.csv')
    features_of_interest = []
    for name in desired_data_features:
        for feat in data.columns:
            if feat in name:
                features_of_interest.append(feat)

    print("Using {} features".format(len(features_of_interest)))
    print(features_of_interest)

    baseline = S1fBaseline(
        loader("data/warfarin_clean5.csv", features_of_interest))

    cum_regret, avg_regret, avg_accuracy = 0, 0, 0
    counts = [0, 0, 0]
    for i in range(NUM_TRIALS):
        reg, avgreg = baseline.evaluate_online()
        avg_accuracy += evaluate(baseline.predictions,
                                 baseline.data_loader.labels)
        cum_regret += reg
        avg_regret += avgreg
        for pred in baseline.predictions:
            counts[int(pred)] += 1
        baseline.data_loader.reshuffle()

    cum_regret /= NUM_TRIALS
    avg_regret /= NUM_TRIALS
    avg_accuracy /= NUM_TRIALS
    total = sum(counts)

    print("Results (averaged over {} trials)".format(NUM_TRIALS))
    print("Cumulative Regret {}, Average Regret {}".format(
        cum_regret, avg_regret))
    print("Accuracy: ", avg_accuracy)
    print("Average low: {} ({}%)".format(counts[0], 100 * (counts[0] / total)))
    print("Average med: {} ({}%)".format(counts[1], 100 * (counts[1] / total)))
    print("Average high: {} ({}%)".format(counts[2],
                                          100 * (counts[2] / total)))
コード例 #14
0
ファイル: run_script.py プロジェクト: up700/Code1
def predict(args, tors, labels, pad_token_label_id, best_test):
    path = os.path.join(args.output_dir+tors, "checkpoint-best-2")
    tokenizer = RobertaTokenizer.from_pretrained(path, do_lower_case=args.do_lower_case)
    model = RobertaForTokenClassification_Modified.from_pretrained(path)
    model.to(args.device)

    # if not best_test:
   
    # result, predictions, _, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, best=best_test, mode="test")
    result, _, best_test, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, best_test, mode="test", \
                                                        logger=logger, verbose=False)
    # Save results
    output_test_results_file = os.path.join(args.output_dir, "test_results.txt")
    with open(output_test_results_file, "w") as writer:
        for key in sorted(result.keys()):
            writer.write("{} = {}\n".format(key, str(result[key])))

    return best_test
コード例 #15
0
def _main_():
    parser = argparse.ArgumentParser(description='evaluate yolov3')
    parser.add_argument('--log_dir',
                        default="logs/",
                        help='path to log train folder')

    parser.add_argument('--annotation_path_val',
                        default="val.txt",
                        help=" file annotation data validation yolo format")
    parser.add_argument('--model_checkpoint',
                        default=None,
                        help=" file checkpoint model")
    parser.add_argument('--anchor_path',
                        default="data_utils/yolo_anchors.txt",
                        help="file anchor size")

    args = parser.parse_args()

    log_dir = args.log_dir + "full/"
    annotation_path_val = args.annotation_path_val
    model_checkpoint = args.model_checkpoint
    anchors_path = args.anchor_path

    valid_ints, labels = parse_voc_annotation(annotation_path_val, cf.classes)
    labels = labels.keys() if len(cf.classes) == 0 else cf.classes

    ###############################
    #   Load the model and do evaluation
    ###############################

    model = YOLO(model_path=log_dir + model_checkpoint,
                 anchors_path=anchors_path)

    # compute mAP for all the classes
    average_precisions, avg_time = evaluate(model, annotation_path_val)

    # print the score
    for label, average_precision in average_precisions.items():
        print(labels[label] + ': {:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(
        sum(average_precisions.values()) / len(average_precisions)))
    print('avg time: {:.4f}'.format(avg_time))
コード例 #16
0
def run_modified_ucb():
    data = pd.read_csv('data/warfarin_clean6.csv')
    features_of_interest = []
    for feat in data.columns:
        for name in FEATURES:
            if name in feat: features_of_interest.append(feat)

    print("Using {} features".format(len(features_of_interest)))
    print(features_of_interest)

    modified_ucb = SimpleLinearAlg(
        loader("data/warfarin_clean6.csv",
               features=features_of_interest,
               seed=random.randint(1, 100)))

    cum_regret, avg_regret, avg_accuracy = 0, 0, 0
    counts = [0, 0, 0]
    NUM_TRIALS = 1
    for i in range(NUM_TRIALS):
        reg, avgreg = modified_ucb.evaluate_online()
        avg_accuracy += evaluate(modified_ucb.predictions,
                                 modified_ucb.data_loader.labels)
        cum_regret += reg
        avg_regret += avgreg
        for pred in modified_ucb.predictions:
            counts[int(pred)] += 1
        modified_ucb.data_loader.reshuffle()

    cum_regret /= NUM_TRIALS
    avg_regret /= NUM_TRIALS
    avg_accuracy /= NUM_TRIALS
    total = sum(counts)

    print("Results (averaged over {} trials)".format(NUM_TRIALS))
    print("Cumulative Regret {}, Average Regret {}".format(
        cum_regret, avg_regret))
    print("Accuracy: ", avg_accuracy)
    print("Average low: {} ({}%)".format(counts[0], 100 * (counts[0] / total)))
    print("Average med: {} ({}%)".format(counts[1], 100 * (counts[1] / total)))
    print("Average high: {} ({}%)".format(counts[2],
                                          100 * (counts[2] / total)))
コード例 #17
0
def main(args=None):
    # parse arguments

    args = parse_args()
    args = load_setting_cfg(args)

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
        set_gpu()

    # make save path if it doesn't exist
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    # create the generator
    generator = create_generator(args)

    # load the model
    print('Loading model, this may take a second...')
    model = VGG16FasterRCNN_bbox(num_classes=generator.num_classes())
    model.load_weights(filepath=args.weight_path)

    # print model summary
    print(model.summary())

    # start evaluation
    print('start evaluate')
    average_precisions = evaluate(generator,
                                  model,
                                  iou_threshold=args.iou_threshold,
                                  score_threshold=args.score_threshold,
                                  max_detections=args.max_detections,
                                  save_path=args.save_path)

    # print evaluation
    for label, average_precision in average_precisions.items():
        print(generator.label_to_name(label),
              '{:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(
        sum(average_precisions.values()) / len(average_precisions)))
コード例 #18
0
def test(test_loader, model, epoch):
    l2_distance = PairwiseDistance(2)
    model.eval()
    labels, distances = [], []
    for batch_idx, (data_a, data_b, label) in enumerate(test_loader):
        if use_cuda:
            data_a, data_b = data_a.cuda(), data_b.cuda()
        data_a, data_b, label = Variable(data_a, volatile=True), Variable(
            data_b, volatile=True), Variable(label)
        out_a, out_b = model(data_a), model(data_b)
        distance = l2_distance.forward(out_a, out_b)
        distances.append(distance.data.cpu().numpy())
        labels.append(label.data.cpu().numpy())
    labels = np.array([sublabel for label in labels for sublabel in label])
    distances = np.array([subdist for dist in distances for subdist in dist])
    TPR, FPR, accuracy, val, val_std, far = evaluate(distances, labels)
    logging('[Test Accuracy]: %f' % np.mean(accuracy))
    plot_roc(FPR,
             TPR,
             figure_name='roc_train_epoch_{}.png'.format(epoch),
             savefile='./log')
コード例 #19
0
def evaluate_model(generator=None,
                   model=None,
                   nt=0.5,
                   it=0.25,
                   st=0.4,
                   md=500,
                   save_path=None,
                   mask=None,
                   output=[]):
    # evaluate the model
    average_precisions = evaluate(
        generator=generator,
        model=model,
        nms_thres=
        nt,  # 0.4 IoU threshold for NMS between detections (candidates)
        iou_threshold=
        it,  # 0.2 [AP_25=0.25, AP_50=0.50, AP_75=0.75, AP=avg(0.50,0.05,0.95)]
        score_threshold=st,  # 0.45 confidence value
        max_detections=
        md,  # fixed value (max=331 from 'eae-cerebro-0014_t001_10003.jpg')
        save_path=
        save_path,  # '/home/bgregorio/workspace/mynet_keras/out_imgs/test',
        mask_base_path=mask  # '/home/bgregorio/workspace/data/dataset/all/masks'
    )

    # print evaluation
    for l, (r, p, f1, ap, num_annotations) in average_precisions.items():
        dic = {
            'nms_threshold': nt,
            'iou_threshold': it,
            'score_threshold': st,
            'max_detections': md,
            'label': l,
            'recall': r,
            'precision': p,
            'f1_score': f1,
            'average_precision': ap,
            'num_annotations': num_annotations
        }
        output.append(dic)
コード例 #20
0
ファイル: callbacks.py プロジェクト: jhl13/tf-keras-retinanet
    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        # Run evaluation.
        average_precisions, inference_time = evaluate(
            self.generator,
            self.predict_model,
            iou_threshold=self.iou_threshold,
            score_threshold=self.score_threshold,
            max_detections=self.max_detections,
            save_path=self.save_path)

        # Compute per class average precision.
        total_instances = []
        precisions = []
        for label, (average_precision,
                    num_annotations) in average_precisions.items():
            if self.verbose == 1:
                print(
                    '{:.0f} instances of class'.format(num_annotations),
                    self.generator.label_to_name(label),
                    'with average precision: {:.4f}'.format(average_precision))
            total_instances.append(num_annotations)
            precisions.append(average_precision)
        if self.weighted_average:
            self.mean_ap = sum([
                a * b for a, b in zip(total_instances, precisions)
            ]) / sum(total_instances)
        else:
            self.mean_ap = sum(precisions) / sum(x > 0
                                                 for x in total_instances)

        with self.file_writer.as_default():
            tf.summary.scalar("mAP", self.mean_ap, step=epoch)

        logs['mAP'] = self.mean_ap

        if self.verbose == 1:
            print('mAP: {:.4f}'.format(self.mean_ap))
コード例 #21
0
def validate(mval_loader, SM, eval_mode, GPU):
    tqdm.write("Validation...")
    submit = []
    gt     = []
    total_vloss    = 0
    total_vcorrects = 0
    total_vquery    = 0
    val_sessions_iter = iter(mval_loader)
    
    for val_session in trange(len(val_sessions_iter), desc='val-sessions', position=2, ascii=True):
        SM.eval()        
        x, labels, y_mask, num_items, index = val_sessions_iter.next() # FIXED 13.Dec. SEPARATE LOGS. QUERY SHOULT NOT INCLUDE LOGS
        # Sample data for 'support' and 'query': ex) 15 items = 7 sup, 8 queries...        
        num_support = num_items[:,0].detach().numpy().flatten() # If num_items was odd number, query has one more item. 
        num_query   = num_items[:,1].detach().numpy().flatten()
        batch_sz    = num_items.shape[0]
        
        # x: the first 10 items out of 20 are support items left-padded with zeros. The last 10 are queries right-padded.
        x = x.permute(0,2,1) # bx70*20
        x_feat = torch.zeros(batch_sz, 72, 20)
        x_feat[:,:70,:] = x.clone()
        x_feat[:, 70,:10] = 1  
        x_feat[:, 71,:10] = labels[:,:10].clone()
        x_feat = Variable(x_feat, requires_grad=False).cuda(GPU)
        
        # y
        y = labels.clone()
        
        # y_mask
        y_mask_que = y_mask.clone()
        y_mask_que[:,:10] = 0
        
        # Forward & update
        _, y_hat = SM(x_feat) # y_hat: b*20

#        if USE_PRED_LABEL is True:
#            # Predict
#            li = 70 if USE_SUPLOG is True else 29 # the label's dimension indice
#            _x = x[:,:,:11] # bx72*11
#            for q in range(11,20):
#                y_hat = SM(Variable(_x, requires_grad=False)) # will be bx11 at the first round 
#                # Append next features
#                _x = torch.cat((_x, x[:,:,q].unsqueeze(2)), 2) # now bx72*12
#                _x[:,li,q] = torch.sigmoid(y_hat[:,-1])
#            y_hat = SM(Variable(_x, requires_grad=False)) # y_hat(final): bx20
#            del _x
#        else:
#            y_hat = SM(x)
        
        # Calcultate BCE loss: 뒤에q만 봄
        loss = F.binary_cross_entropy_with_logits(input=y_hat*y_mask_que.cuda(GPU), target=y.cuda(GPU)*y_mask_que.cuda(GPU))
        total_vloss += loss.item()
        
        # Decision
        y_prob = torch.sigmoid(y_hat*y_mask_que.cuda(GPU)).detach().cpu().numpy() # bx20               
        y_pred = (y_prob[:,10:]>0.5).astype(np.int) # bx10
        y_numpy = labels[:,10:].numpy() # bx10
        # Acc
        total_vcorrects += np.sum((y_pred==y_numpy)*y_mask_que[:,10:].numpy())
        total_vquery += np.sum(num_query)
        
        # Eval, Submission
        if eval_mode is not 0:
            for b in np.arange(batch_sz):
                submit.append(y_pred[b,:num_query[b]].flatten())
                gt.append(y_numpy[b,:num_query[b]].flatten())
                
        if (val_session+1)%400 == 0:
            sample_sup = labels[0,(10-num_support[0]):10].long().numpy().flatten() 
            sample_que = y_numpy[0,:num_query[0]].astype(int)
            sample_pred = y_pred[0,:num_query[0]]
            sample_prob = y_prob[0,10:10+num_query[0]]
            tqdm.write("S:" + np.array2string(sample_sup) +'\n'+
                       "Q:" + np.array2string(sample_que) + '\n' +
                       "P:" + np.array2string(sample_pred) + '\n' +
                       "prob:" + np.array2string(sample_prob))
            tqdm.write("val_session:{0:}  vloss:{1:.6f}  vacc:{2:.4f}".format(val_session,loss.item(), total_vcorrects/total_vquery))
        del loss, y_hat, x # Restore GPU memory
        
    # Avg.Acc
    if eval_mode==1:
        aacc = evaluate(submit, gt)
        tqdm.write("AACC={0:.6f}, FirstAcc={1:.6f}".format(aacc[0], aacc[1]))     
        
    hist_vloss.append(total_vloss/val_session)
    hist_vacc.append(total_vcorrects/total_vquery)
    return submit
コード例 #22
0
def zeroshot_train(t_depth,
                   t_width,
                   t_wght_path,
                   s_depth,
                   s_width,
                   seed=42,
                   savedir=None,
                   dataset='cifar10',
                   sample_per_class=0):

    set_seed(seed)

    train_name = '%s_T-%d-%d_S-%d-%d_seed_%d' % (dataset, t_depth, t_width,
                                                 s_depth, s_width, seed)
    if sample_per_class > 0:
        train_name += "-m%d" % sample_per_class
    log_filename = train_name + '_training_log.csv'

    # save dir
    if not savedir:
        savedir = 'zeroshot_' + train_name
    full_savedir = os.path.join(os.getcwd(), savedir)
    mkdir(full_savedir)

    log_filepath = os.path.join(full_savedir, log_filename)
    logger = CustomizedCSVLogger(log_filepath)

    # Teacher
    teacher = WideResidualNetwork(t_depth,
                                  t_width,
                                  input_shape=Config.input_dim,
                                  dropout_rate=0.0,
                                  output_activations=True,
                                  has_softmax=False)

    teacher.load_weights(t_wght_path)
    teacher.trainable = False

    # Student
    student = WideResidualNetwork(s_depth,
                                  s_width,
                                  input_shape=Config.input_dim,
                                  dropout_rate=0.0,
                                  output_activations=True,
                                  has_softmax=False)

    if sample_per_class > 0:
        s_decay_steps = Config.n_outer_loop * Config.n_s_in_loop + Config.n_outer_loop
    else:
        s_decay_steps = Config.n_outer_loop * Config.n_s_in_loop

    s_optim = Adam(learning_rate=CosineDecay(Config.student_init_lr,
                                             decay_steps=s_decay_steps))
    # ---------------------------------------------------------------------------
    # Generator
    generator = NavieGenerator(input_dim=Config.z_dim)
    g_optim = Adam(learning_rate=CosineDecay(Config.generator_init_lr,
                                             decay_steps=Config.n_outer_loop *
                                             Config.n_g_in_loop))
    # ---------------------------------------------------------------------------
    # Test data
    if dataset == 'cifar10':
        (x_train, y_train_lbl), (x_test, y_test) = get_cifar10_data()
    elif dataset == 'fashion_mnist':
        (x_train, y_train_lbl), (x_test, y_test) = get_fashion_mnist_data()
    else:
        raise ValueError("Only Cifar-10 and Fashion-MNIST supported !!")
    test_data_loader = tf.data.Dataset.from_tensor_slices(
        (x_test, y_test)).batch(200)
    # ---------------------------------------------------------------------------
    # Train data (if using train data)
    train_dataflow = None
    if sample_per_class > 0:
        # sample first
        x_train, y_train_lbl = \
            balance_sampling(x_train, y_train_lbl, data_per_class=sample_per_class)
        datagen = ImageDataGenerator(width_shift_range=4,
                                     height_shift_range=4,
                                     horizontal_flip=True,
                                     vertical_flip=False,
                                     rescale=None,
                                     fill_mode='reflect')
        datagen.fit(x_train)
        y_train = to_categorical(y_train_lbl)
        train_dataflow = datagen.flow(x_train,
                                      y_train,
                                      batch_size=Config.batch_size,
                                      shuffle=True)

    # Generator loss metrics
    g_loss_met = tf.keras.metrics.Mean()

    # Student loss metrics
    s_loss_met = tf.keras.metrics.Mean()

    #
    n_cls_t_pred_metric = tf.keras.metrics.Mean()
    n_cls_s_pred_metric = tf.keras.metrics.Mean()

    max_g_grad_norm_metric = tf.keras.metrics.Mean()
    max_s_grad_norm_metric = tf.keras.metrics.Mean()

    test_data_loader = tf.data.Dataset.from_tensor_slices(
        (x_test, y_test)).batch(200)

    teacher.trainable = False

    # checkpoint
    chkpt_dict = {
        'teacher': teacher,
        'student': student,
        'generator': generator,
        's_optim': s_optim,
        'g_optim': g_optim,
    }
    # Saving checkpoint
    ckpt = tf.train.Checkpoint(**chkpt_dict)
    ckpt_manager = tf.train.CheckpointManager(ckpt,
                                              os.path.join(savedir, 'chpt'),
                                              max_to_keep=2)
    # ==========================================================================
    # if a checkpoint exists, restore the latest checkpoint.
    start_iter = 0
    if ckpt_manager.latest_checkpoint:
        ckpt.restore(ckpt_manager.latest_checkpoint)
        print('Latest checkpoint restored!!')
        with open(os.path.join(savedir, 'chpt', 'iteration'), 'r') as f:
            start_iter = int(f.read())
        logger = CustomizedCSVLogger(log_filepath, append=True)

    for iter_ in range(start_iter, Config.n_outer_loop):
        iter_stime = time.time()

        max_s_grad_norm = 0
        max_g_grad_norm = 0
        # sample from latern space to have an image
        z_val = tf.random.normal([Config.batch_size, Config.z_dim])

        # Generator training
        loss = 0
        for ng in range(Config.n_g_in_loop):
            loss, g_grad_norm = train_gen(generator, g_optim, z_val, teacher,
                                          student)
            max_g_grad_norm = max(max_g_grad_norm, g_grad_norm.numpy())
            g_loss_met(loss)

        # ==========================================================================
        # Student training
        loss = 0
        pseudo_imgs, t_logits, t_acts = prepare_train_student(
            generator, z_val, teacher)
        for ns in range(Config.n_s_in_loop):
            # pseudo_imgs, t_logits, t_acts = prepare_train_student(generator, z_val, teacher)
            loss, s_grad_norm, s_logits = train_student(
                pseudo_imgs, s_optim, t_logits, t_acts, student)
            max_s_grad_norm = max(max_s_grad_norm, s_grad_norm.numpy())

            n_cls_t_pred = len(np.unique(np.argmax(t_logits, axis=-1)))
            n_cls_s_pred = len(np.unique(np.argmax(s_logits, axis=-1)))
            # logging
            s_loss_met(loss)
            n_cls_t_pred_metric(n_cls_t_pred)
            n_cls_s_pred_metric(n_cls_s_pred)
        # ==========================================================================
        # train if provided n samples
        if train_dataflow:
            x_batch_train, y_batch_train = next(train_dataflow)
            t_logits, t_acts = forward(teacher, x_batch_train, training=False)
            loss = train_student_with_labels(student, s_optim, x_batch_train,
                                             t_logits, t_acts, y_batch_train)
        # ==========================================================================

        # --------------------------------------------------------------------
        iter_etime = time.time()
        max_g_grad_norm_metric(max_g_grad_norm)
        max_s_grad_norm_metric(max_s_grad_norm)
        # --------------------------------------------------------------------
        is_last_epoch = (iter_ == Config.n_outer_loop - 1)

        if iter_ != 0 and (iter_ % Config.print_freq == 0 or is_last_epoch):
            n_cls_t_pred_avg = n_cls_t_pred_metric.result().numpy()
            n_cls_s_pred_avg = n_cls_s_pred_metric.result().numpy()
            time_per_epoch = iter_etime - iter_stime

            s_loss = s_loss_met.result().numpy()
            g_loss = g_loss_met.result().numpy()
            max_g_grad_norm_avg = max_g_grad_norm_metric.result().numpy()
            max_s_grad_norm_avg = max_s_grad_norm_metric.result().numpy()

            # build ordered dict
            row_dict = OrderedDict()

            row_dict['time_per_epoch'] = time_per_epoch
            row_dict['epoch'] = iter_
            row_dict['generator_loss'] = g_loss
            row_dict['student_kd_loss'] = s_loss
            row_dict['n_cls_t_pred_avg'] = n_cls_t_pred_avg
            row_dict['n_cls_s_pred_avg'] = n_cls_s_pred_avg
            row_dict['max_g_grad_norm_avg'] = max_g_grad_norm_avg
            row_dict['max_s_grad_norm_avg'] = max_s_grad_norm_avg

            if sample_per_class > 0:
                s_optim_iter = iter_ * (Config.n_s_in_loop + 1)
            else:
                s_optim_iter = iter_ * Config.n_s_in_loop
            row_dict['s_optim_lr'] = s_optim.learning_rate(
                s_optim_iter).numpy()
            row_dict['g_optim_lr'] = g_optim.learning_rate(iter_).numpy()

            pprint.pprint(row_dict)
        # ======================================================================
        if iter_ != 0 and (iter_ % Config.log_freq == 0 or is_last_epoch):
            # calculate acc
            test_accuracy = evaluate(test_data_loader, student).numpy()
            row_dict['test_acc'] = test_accuracy
            logger.log_with_order(row_dict)
            print('Test Accuracy: ', test_accuracy)

            # for check poing
            ckpt_save_path = ckpt_manager.save()
            print('Saving checkpoint for epoch {} at {}'.format(
                iter_ + 1, ckpt_save_path))
            with open(os.path.join(savedir, 'chpt', 'iteration'), 'w') as f:
                f.write(str(iter_ + 1))

            s_loss_met.reset_states()
            g_loss_met.reset_states()
            max_g_grad_norm_metric.reset_states()
            max_s_grad_norm_metric.reset_states()

        if iter_ != 0 and (iter_ % 5000 == 0 or is_last_epoch):
            generator.save_weights(
                join(full_savedir, "generator_i{}.h5".format(iter_)))
            student.save_weights(
                join(full_savedir, "student_i{}.h5".format(iter_)))
コード例 #23
0
def validate(mval_loader, SM, eval_mode, GPU):
    tqdm.write("Validation...")
    submit = []
    gt     = []
    total_vloss    = 0
    total_vloss_qlog = 0
    total_vloss_skip = 0
    total_vcorrects = 0
    total_vquery    = 0
    val_sessions_iter = iter(mval_loader)
    for val_session in trange(len(val_sessions_iter), desc='val-sessions', position=2, ascii=True):
        SM.eval()        
        x, labels, y_mask, num_items, index = val_sessions_iter.next() # FIXED 13.Dec. SEPARATE LOGS. QUERY SHOULT NOT INCLUDE LOGS
        
        # Sample data for 'support' and 'query': ex) 15 items = 7 sup, 8 queries...        
        num_support = num_items[:,0].detach().numpy().flatten() # If num_items was odd number, query has one more item. 
        num_query   = num_items[:,1].detach().numpy().flatten()
        batch_sz    = num_items.shape[0]

        # x: bx70*20
        x = x.permute(0,2,1)
        
        # Prepare ground truth log and label, y
        y_qlog = x[:,:41,:].clone() # bx41*20
        y_skip = labels.clone() #bx20
        y_mask_qlog = y_mask.unsqueeze(1).repeat(1,41,1) #bx41*20
        y_mask_skip = y_mask #bx20

        # log shift: bx41*20
        log_shift = torch.zeros(batch_sz,41,20)
        log_shift[:,:,1:] = x[:,:41,:-1]
        log_shift[:,:,11:] = 0 # DELETE LOG QUE
        
        # labels_shift: bx1*20(model can only observe past labels)
        labels_shift = torch.zeros(batch_sz,1,20)
        labels_shift[:,0,1:] = labels[:,:-1].float()
        labels_shift[:,0,11:] = 0 #!!! NOLABEL for previous QUERY
        
        # support/query state labels: bx1*20
        sq_state = torch.zeros(batch_sz,1,20)
        sq_state[:,0,:11] = 1
        
        # Pack x: bx72*20 (or bx32*20 if not using sup_logs)
        x = torch.cat((log_shift, x[:,41:,:], labels_shift, sq_state), 1).cuda(GPU) # x: bx72*20
        
        if USE_PRED_LABEL is True:
            # Predict
            li = 70 # the label's dimension indice
            _x = x[:,:,:11].clone() # bx72*11
            for q in range(11,20):
                y_hat_qlog, y_hat_skip = SM(Variable(_x, requires_grad=False)) # will be bx11 at the first round 
                # Append next features
                _x = torch.cat((_x, x[:,:,q].unsqueeze(2)), 2) # now bx72*12
                _x[:,li,q] = torch.sigmoid(y_hat_skip[:,-1]) # replace with predicted label
                _x[:,:41,q] = torch.sigmoid(y_hat_qlog[:,-1])
            y_hat_qlog, y_hat_skip = SM(Variable(_x, requires_grad=False)) # y_hat(final): bx20
            del _x
        else:
            y_hat_qlog, y_hat_skip = SM(x) # y_hat_qlog: bx41*20, y_hat_skip: b*20
            
        # Calcultate BCE loss
        loss_qlog = F.binary_cross_entropy_with_logits(input=y_hat_qlog.cuda(GPU)*y_mask_qlog.cuda(GPU),
                                                       target=y_qlog.cuda(GPU)*y_mask_qlog.cuda(GPU))
        loss_skip = F.binary_cross_entropy_with_logits(input=y_hat_skip.cuda(GPU)*y_mask_skip.cuda(GPU),
                                                       target=y_skip.cuda(GPU)*y_mask_skip.cuda(GPU))
        loss      = loss_qlog + loss_skip
        total_vloss_qlog += loss_qlog.item()
        total_vloss_skip += loss_skip.item()
        total_vloss += loss.item()
        
        # Decision
        y_prob = torch.sigmoid(y_hat_skip.detach()*y_mask_skip.cuda(GPU)).cpu().numpy() # bx20               
        y_pred = (y_prob[:,10:]>=0.5).astype(np.int) # bx10
        y_numpy = y_skip[:,10:].numpy() # bx10
        # Acc
        total_vcorrects += np.sum((y_pred==y_numpy)*y_mask_skip[:,10:].numpy())
        total_vquery += np.sum(num_query)
        
        # Restore GPU memory
        del loss, loss_qlog, loss_skip, y_hat_qlog, y_hat_skip
            
        # Eval, Submission
        if eval_mode is not 0:
            for b in np.arange(batch_sz):
                submit.append(y_pred[b,:num_query[b]].flatten())
                gt.append(y_numpy[b,:num_query[b]].flatten())
                
        if (val_session+1)%400 == 0:
            sample_sup = labels[0,(10-num_support[0]):10].long().numpy().flatten() 
            sample_que = y_numpy[0,:num_query[0]].astype(int)
            sample_pred = y_pred[0,:num_query[0]]
            sample_prob = y_prob[0,10:10+num_query[0]]
            tqdm.write("S:" + np.array2string(sample_sup) +'\n'+
                       "Q:" + np.array2string(sample_que) + '\n' +
                       "P:" + np.array2string(sample_pred) + '\n' +
                       "prob:" + np.array2string(sample_prob))
            tqdm.write("val_session:{0:}  vloss(qlog|skip):{1:.6f}({2:.6f}|{3:.6f})  vacc:{4:.4f}".format(val_session,
                       total_vloss/total_vquery, total_vloss_qlog/total_vquery, 
                       total_vloss_skip/total_vquery, total_vcorrects/total_vquery))
        
    # Avg.Acc (skip labels only, log-generation acc is not implemented yet!)
    if eval_mode==1:
        aacc = evaluate(submit, gt)
        tqdm.write("AACC={0:.6f}, FirstAcc={1:.6f}".format(aacc[0], aacc[1]))    
        
    hist_vloss.append(total_vloss/total_vquery)
    hist_vloss_qlog.append(total_vloss_qlog/total_vquery)
    hist_vloss_skip.append(total_vloss_skip/total_vquery)
    hist_vacc.append(total_vcorrects/total_vquery)
    return submit
コード例 #24
0
def test(model,
         queryloader,
         galleryloader,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids, _) in enumerate(queryloader):
            if use_gpu:
                imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print('Extracted features for query set, obtained {}-by-{} matrix'.
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids, _) in enumerate(galleryloader):
            if use_gpu:
                imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print('Extracted features for gallery set, obtained {}-by-{} matrix'.
              format(gf.size(0), gf.size(1)))

    print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(
        batch_time.avg, args.test_batch_size))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print('Computing CMC and mAP')
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)

    print('Results ----------')
    print('mAP: {:.1%}'.format(mAP))
    print('CMC curve')
    for r in ranks:
        print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))
    print('------------------')

    if return_distmat:
        return distmat
    return cmc[0]
コード例 #25
0
def validate(mval_loader, SM, eval_mode):
    tqdm.write("Validation...")
    submit = []
    gt = []
    total_vloss = 0
    total_vcorrects = 0
    total_vquery = 0
    val_sessions_iter = iter(mval_loader)

    for val_session in trange(len(val_sessions_iter),
                              desc='val-sessions',
                              position=2,
                              ascii=True):
        SM.eval()
        x, labels, y_mask, num_items, index = val_sessions_iter.next(
        )  # FIXED 13.Dec. SEPARATE LOGS. QUERY SHOULT NOT INCLUDE LOGS
        # Sample data for 'support' and 'query': ex) 15 items = 7 sup, 8 queries...
        num_support = num_items[:, 0].detach().numpy().flatten(
        )  # If num_items was odd number, query has one more item.
        num_query = num_items[:, 1].detach().numpy().flatten()
        batch_sz = num_items.shape[0]

        x[:, 10:, :41] = 0  # DELETE METALOG QUE
        if USE_SUPLOG is False:
            x = x[:, :, 41:]  # bx20*70(29)
        labels_shift = torch.zeros(batch_sz, 20, 1)
        labels_shift[:, 1:, 0] = labels[:, :-1].float()
        labels_shift[:, 11:, 0] = 0  # REMOVE QUERY LABELS!
        sq_state = torch.zeros(batch_sz, 20, 1)
        sq_state[:, :11, 0] = 1
        # x: bx72(31)*20
        x = torch.cat((x, labels_shift, sq_state), dim=2).permute(0, 2,
                                                                  1).cuda(GPU)

        if USE_PRED_LABEL is True:
            # Predict
            li = 70 if USE_SUPLOG is True else 29  # the label's dimension indice
            _x = x[:, :, :11]  # bx72*11
            for q in range(11, 20):
                y_hat = SM(Variable(
                    _x,
                    requires_grad=False))  # will be bx11 at the first round
                # Append next features
                _x = torch.cat((_x, x[:, :, q].unsqueeze(2)), 2)  # now bx72*12
                _x[:, li, q] = torch.sigmoid(y_hat[:, -1])
            y_hat = SM(Variable(_x, requires_grad=False))  # y_hat(final): bx20
            del _x
        else:
            y_hat = SM(x)
        # Calcultate BCE loss
        loss = F.binary_cross_entropy_with_logits(
            input=y_hat * y_mask.cuda(GPU),
            target=labels.cuda(GPU) * y_mask.cuda(GPU))
        total_vloss += loss.item()

        # Decision
        y_prob = torch.sigmoid(y_hat *
                               y_mask.cuda(GPU)).detach().cpu().numpy()  # bx20
        y_pred = (y_prob[:, 10:] >= 0.5).astype(np.int)  # bx10
        y_numpy = labels[:, 10:].numpy()  # bx10
        # Acc
        y_query_mask = y_mask[:, 10:].numpy()
        total_vcorrects += np.sum((y_pred == y_numpy) * y_query_mask)
        total_vquery += np.sum(num_query)

        # Eval, Submission
        if eval_mode is not 0:
            for b in np.arange(batch_sz):
                submit.append(y_pred[b, :num_query[b]].flatten())
                gt.append(y_numpy[b, :num_query[b]].flatten())

        if (val_session + 1) % 400 == 0:
            sample_sup = labels[0, :num_support[0]].long().numpy().flatten()
            sample_que = y_numpy[0, :num_query[0]].astype(int)
            sample_pred = y_pred[0, :num_query[0]]
            sample_prob = y_prob[0, 10:10 + num_query[0]]
            tqdm.write("S:" + np.array2string(sample_sup) + '\n' + "Q:" +
                       np.array2string(sample_que) + '\n' + "P:" +
                       np.array2string(sample_pred) + '\n' + "prob:" +
                       np.array2string(sample_prob))
            tqdm.write("val_session:{0:}  vloss:{1:.6f}  vacc:{2:.4f}".format(
                val_session, loss.item(), total_vcorrects / total_vquery))
        del loss, y_hat, x  # Restore GPU memory

    # Avg.Acc
    if eval_mode == 1:
        aacc = evaluate(submit, gt)
        tqdm.write("AACC={0:.6f}, FirstAcc={1:.6f}".format(aacc[0], aacc[1]))

    hist_vloss.append(total_vloss / val_session)
    hist_vacc.append(total_vcorrects / total_vquery)
    return submit
コード例 #26
0
def validate(mval_loader, FeatEnc, RN, eval_mode):
    tqdm.write("Validation...")
    submit = []
    gt = []
    total_vloss = 0
    total_vcorrects = 0
    total_vquery = 0
    val_sessions_iter = iter(mval_loader)

    for val_session in trange(len(val_sessions_iter),
                              desc='val-sessions',
                              position=2,
                              ascii=True):
        FeatEnc.eval()
        RN.eval()
        x_sup, x_que, x_log_sup, x_log_que, label_sup, label_que, num_items, index = val_sessions_iter.next(
        )  # FIXED 13.Dec. SEPARATE LOGS. QUERY SHOULT NOT INCLUDE LOGS
        x_sup, x_que = Variable(x_sup).cuda(GPU), Variable(x_que).cuda(GPU)
        x_log_sup, x_log_que = Variable(x_log_sup).cuda(GPU), Variable(
            x_log_que).cuda(GPU)
        label_sup = Variable(label_sup).cuda(GPU)

        num_support = num_items[:, 0].detach().numpy().flatten(
        )  # If num_items was odd number, query has one more item.
        num_query = num_items[:, 1].detach().numpy().flatten()
        batch_sz = num_items.shape[0]

        x_sup = x_sup.unsqueeze(2)  # 1x7*29 --> 1x7x1*29
        x_que = x_que.unsqueeze(2)  # 1x8*29 --> 1x8x1*29
        x_feat_sup = FeatEnc(x_sup)  # 1x7x1*64
        x_feat_que = FeatEnc(x_que)  # 1x8x1*64

        y_hat = RN(x_feat_sup, x_feat_que, x_log_sup, x_log_que,
                   label_sup)  # bx8
        y_gt = label_que[:, :, 1]
        y_mask = np.zeros((batch_sz, 10), dtype=np.float32)
        for b in np.arange(batch_sz):
            y_mask[b, :num_query[b]] = 1
        y_mask = torch.FloatTensor(y_mask).cuda(GPU)

        loss = F.binary_cross_entropy_with_logits(input=y_hat * y_mask,
                                                  target=y_gt.cuda(GPU) *
                                                  y_mask)
        total_vloss += loss.item()

        # Decision
        y_prob = (torch.sigmoid(y_hat) * y_mask).detach().cpu().numpy()
        y_pred = ((torch.sigmoid(y_hat) > 0.5).float() *
                  y_mask).detach().cpu().long().numpy()

        # Eval, Submission
        _y_gt = label_que[:, :, 1].detach().cpu().numpy()
        if eval_mode is True:
            for b in np.arange(batch_sz):
                submit.append(y_pred[b, :num_query[b]].flatten())
                gt.append(_y_gt[b, :num_query[b]].flatten())

        # Prepare display
        sample_sup = label_sup[0, :num_support[0],
                               1].detach().long().cpu().numpy().flatten()
        sample_que = label_que[0, :num_query[0], 1].long().numpy().flatten()
        sample_pred = y_pred[0, :num_query[0]].flatten()
        sample_prob = y_prob[0, :num_query[0]].flatten()

        # Acc
        total_vcorrects += np.sum(
            (y_pred == label_que[:, :, 1].long().numpy()) *
            y_mask.cpu().numpy())
        total_vquery += np.sum(num_query)

        if (val_session + 1) % 400 == 0:
            tqdm.write("S:" + np.array2string(sample_sup) + '\n' + "Q:" +
                       np.array2string(sample_que) + '\n' + "P:" +
                       np.array2string(sample_pred) + '\n' + "prob:" +
                       np.array2string(sample_prob))
            tqdm.write("val_session:{0:}  vloss:{1:.6f}  vacc:{2:.4f}".format(
                val_session, total_vloss / val_session,
                total_vcorrects / total_vquery))
        # Restore GPU memory
        del loss, y_hat

    # Avg.Acc
    aacc = evaluate(submit, gt)
    tqdm.write("AACC={0:.6f}, FirstAcc={1:.6f}".format(aacc[0], aacc[1]))

    hist_vloss.append(total_vloss / (val_session + 1))
    hist_vacc.append(total_vcorrects / total_vquery)
    return submit
コード例 #27
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # make save path if it doesn't exist
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args.config)

    # create the generator
    generator = create_generator(args)

    # optionally load anchor parameters
    anchor_params = None
    if args.config and 'anchor_parameters' in args.config:
        anchor_params = parse_anchor_parameters(args.config)

    # load the model
    print('Loading model, this may take a second...')
    model = models.load_model(args.model, backbone_name=args.backbone)

    # optionally convert the model
    if args.convert_model:
        model = models.convert_model(model, anchor_params=anchor_params)

    # print model summary
    # print(model.summary())

    # start evaluation
    if args.dataset_type == 'coco':
        from ..utils.coco_eval import evaluate_coco
        evaluate_coco(generator, model, args.score_threshold)
    else:
        average_precisions = evaluate(generator,
                                      model,
                                      iou_threshold=args.iou_threshold,
                                      score_threshold=args.score_threshold,
                                      max_detections=args.max_detections,
                                      save_path=args.save_path)

        # print evaluation
        total_instances = []
        precisions = []
        for label, (average_precision,
                    num_annotations) in average_precisions.items():
            print('{:.0f} instances of class'.format(num_annotations),
                  generator.label_to_name(label),
                  'with average precision: {:.4f}'.format(average_precision))
            total_instances.append(num_annotations)
            precisions.append(average_precision)

        if sum(total_instances) == 0:
            print('No test instances found.')
            return

        print(
            'mAP using the weighted average of precisions among classes: {:.4f}'
            .format(
                sum([a * b for a, b in zip(total_instances, precisions)]) /
                sum(total_instances)))
        print('mAP: {:.4f}'.format(
            sum(precisions) / sum(x > 0 for x in total_instances)))

        return precisions, total_instances
コード例 #28
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args, 'evaluation')
        #print("----------------------------------")
        #print("ARGUMENTS IN CONFIG FILE:")
        #for sec in args.config.sections():
        #print(sec, "=", dict(args.config.items(sec)))
        #print("----------------------------------")

    # for arg in vars(args):
    #     print(arg, "=", getattr(args, arg))
    # exit()

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # make save path if it doesn't exist
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    # create the generator
    generator = create_generator(args)

    # optionally load anchor parameters
    anchor_params = None
    if args.config and 'anchor_parameters' in args.config:
        anchor_params = parse_anchor_parameters(args.config)

    # load the model
    print('Loading model, this may take a second...')
    model = models.load_model(args.model, backbone_name=args.backbone)

    # optionally convert the model
    if args.convert_model:
        model = models.convert_model(model, anchor_params=anchor_params)

    # print model summary
    # print(model.summary())

    # layer_outputs = []
    # layer_names = ['res2c_relu',                # C2
    #                'res3b3_relu',               # C3
    #                'res4b22_relu',              # C4
    #                'P2',                        # P2
    #                'P3',                        # P3
    #                'P4',                        # P4
    #                # 'regression_submodel',       # Subreg
    #                # 'classification_submodel',   # SubClas
    #                'regression',                # Regression
    #                'classification']            # Classification
    #
    # for layer in model.layers:
    #     if layer.name in layer_names:
    #         print('------------------------------------------------------------------------------------------------------------------')
    #         print('Layer found: ', layer.name)
    #         print('\tOutput:', layer.output)
    #         print('------------------------------------------------------------------------------------------------------------------')
    #         layer_outputs.append(layer.output)
    #
    # image = preprocess_image(generator.load_image(0))
    # image, scale = resize_image(image, args.image_min_side, args.image_max_side)
    #
    # activation_model = keras.Model(inputs=model.input, outputs=layer_outputs)
    # activations = activation_model.predict(np.expand_dims(image, axis=0))
    #
    # def display_activation(activations, col_size, row_size, act_index):
    #     activation = activations[act_index]
    #     activation_index=0
    #     fig, ax = plt.subplots(row_size, col_size, figsize=(row_size*2.5,col_size*1.5))
    #     for row in range(0,row_size):
    #         for col in range(0,col_size):
    #             ax[row][col].imshow(activation[0, :, :, activation_index], cmap='gray')
    #             activation_index += 1
    #             plt.savefig('layer_{}.png'.format(layer_names[act_index]))
    #
    # display_activation(activations, 8, 8, 0)
    # display_activation(activations, 8, 8, 1)
    # display_activation(activations, 8, 8, 2)
    # display_activation(activations, 8, 8, 3)
    # display_activation(activations, 8, 8, 4)
    # display_activation(activations, 8, 8, 5)
    #
    # exit()

    # start evaluation
    if args.dataset_type == 'coco':
        from ..utils.coco_eval import evaluate_coco
        evaluate_coco(generator, model, args.score_threshold)
    else:
        average_precisions = evaluate(generator,
                                      model,
                                      iou_threshold=args.iou_threshold,
                                      score_threshold=args.score_threshold,
                                      max_detections=args.max_detections,
                                      save_path=args.save_path,
                                      mask_base_path=args.mask_folder)

        # print evaluation
        total_instances = []
        precisions = []
        F1s = []
        for label, (recall, precision, F1, average_precision,
                    num_annotations) in average_precisions.items():
            print('{:.0f} instances of class'.format(num_annotations),
                  generator.label_to_name(label),
                  'with average precision: {:.4f}'.format(average_precision),
                  'precision: {:.4f}'.format(precision),
                  'recall: {:.4f}'.format(recall),
                  'and F1-score: {:.4f}'.format(F1))
            total_instances.append(num_annotations)
            precisions.append(average_precision)
            F1s.append(F1)

        if sum(total_instances) == 0:
            print('No test instances found.')
            return

        print(
            'mAP using the weighted average of precisions among classes: {:.4f}'
            .format(
                sum([a * b for a, b in zip(total_instances, precisions)]) /
                sum(total_instances)))
        print('mAP: {:.4f}'.format(
            sum(precisions) / sum(x > 0 for x in total_instances)))
        print('mF1: {:.4f}'.format(
            sum(F1s) / sum(x > 0 for x in total_instances)))
コード例 #29
0
def validate(mval_loader, SM, CF_model, eval_mode):
    tqdm.write("Validation...")
    submit = []
    gt = []
    total_vloss = 0
    total_vcorrects = 0
    total_vquery = 0
    val_sessions_iter = iter(mval_loader)

    for val_session in trange(len(val_sessions_iter),
                              desc='val-sessions',
                              position=2,
                              ascii=True):
        SM.eval()
        x, labels, y_mask, num_items, index = val_sessions_iter.next(
        )  # FIXED 13.Dec. SEPARATE LOGS. QUERY SHOULT NOT INCLUDE LOGS
        # Sample data for 'support' and 'query': ex) 15 items = 7 sup, 8 queries...
        num_support = num_items[:, 0].detach().numpy().flatten(
        )  # If num_items was odd number, query has one more item.
        num_query = num_items[:, 1].detach().numpy().flatten()
        batch_sz = num_items.shape[0]

        x[:, 10:, :41] = 0  # DELETE METALOG QUE
        labels_shift = torch.zeros(batch_sz, 20, 1)
        labels_shift[:, 1:, 0] = labels[:, :-1].float()
        labels_shift[:, 11:, 0] = 0  # REMOVE QUERY LABELS!
        sq_state = torch.zeros(batch_sz, 20, 1)
        sq_state[:, :11, 0] = 1

        x_audio = x[:, :, 41:].data.clone()
        x_audio = Variable(x_audio, requires_grad=False).cuda(GPU)
        x_emb_lastfm, x_lastfm = CF_model(x_audio)
        x_lastfm = x_lastfm.cpu()
        del x_emb_lastfm
        # x: bx122*20

        x = torch.cat((x_lastfm, x, labels_shift, sq_state),
                      dim=2).permute(0, 2, 1).cuda(GPU)

        y_hat = SM(x)
        # Calcultate BCE loss
        loss = F.binary_cross_entropy_with_logits(
            input=y_hat * y_mask.cuda(GPU),
            target=labels.cuda(GPU) * y_mask.cuda(GPU))
        total_vloss += loss.item()

        # Decision
        y_prob = torch.sigmoid(y_hat *
                               y_mask.cuda(GPU)).detach().cpu().numpy()  # bx20
        y_pred = (y_prob[:, 10:] >= 0.5).astype(np.int)  # bx10
        y_numpy = labels[:, 10:].numpy()  # bx10
        # Acc
        y_query_mask = y_mask[:, 10:].numpy()
        total_vcorrects += np.sum((y_pred == y_numpy) * y_query_mask)
        total_vquery += np.sum(num_query)

        # Eval, Submission
        if eval_mode is not 0:
            for b in np.arange(batch_sz):
                submit.append(y_pred[b, :num_query[b]].flatten())
                gt.append(y_numpy[b, :num_query[b]].flatten())

        if (val_session + 1) % 400 == 0:
            sample_sup = labels[0, :num_support[0]].long().numpy().flatten()
            sample_que = y_numpy[0, :num_query[0]].astype(int)
            sample_pred = y_pred[0, :num_query[0]]
            sample_prob = y_prob[0, 10:10 + num_query[0]]
            tqdm.write("S:" + np.array2string(sample_sup) + '\n' + "Q:" +
                       np.array2string(sample_que) + '\n' + "P:" +
                       np.array2string(sample_pred) + '\n' + "prob:" +
                       np.array2string(sample_prob))
            tqdm.write("val_session:{0:}  vloss:{1:.6f}  vacc:{2:.4f}".format(
                val_session, loss.item(), total_vcorrects / total_vquery))
        del loss, y_hat, x  # Restore GPU memory

    # Avg.Acc
    if eval_mode == 1:
        aacc = evaluate(submit, gt)
        tqdm.write("AACC={0:.6f}, FirstAcc={1:.6f}".format(aacc[0], aacc[1]))

    hist_vloss.append(total_vloss / val_session)
    hist_vacc.append(total_vcorrects / total_vquery)
    return submit
コード例 #30
0
def validate(mval_loader, SM, SMG, eval_mode, GPU):
    tqdm.write("Validation...")
    submit = []
    gt     = []
    total_vloss    = 0
    total_vcorrects = 0
    total_vquery    = 0
    val_sessions_iter = iter(mval_loader)
    for val_session in trange(len(val_sessions_iter), desc='val-sessions', position=2, ascii=True):
        SM.eval()        
        x, labels, y_mask, num_items, index = val_sessions_iter.next() # FIXED 13.Dec. SEPARATE LOGS. QUERY SHOULT NOT INCLUDE LOGS
        
        # Sample data for 'support' and 'query': ex) 15 items = 7 sup, 8 queries...        
        num_support = num_items[:,0].detach().numpy().flatten() # If num_items was odd number, query has one more item. 
        num_query   = num_items[:,1].detach().numpy().flatten()
        batch_sz    = num_items.shape[0]

        # x: bx70*20
        x = x.permute(0,2,1)
        
        log_shift = torch.zeros(batch_sz,41,20)
        log_shift[:,:,1:] = x[:,:41,:-1]
        log_shift[:,:,11:] = 0 # DELETE LOG QUE
        
        # labels_shift: bx1*20(model can only observe past labels)
        labels_shift = torch.zeros(batch_sz,1,20)
        labels_shift[:,0,1:] = labels[:,:-1].float()
        labels_shift[:,0,11:] = 0 #!!! NOLABEL for previous QUERY
        
        # support/query state labels: bx1*20
        sq_state = torch.zeros(batch_sz,1,20)
        sq_state[:,0,:11] = 1
        
        # Pack x: bx72*20 (or bx32*20 if not using sup_logs)
        x_1 = Variable(torch.cat((log_shift, x[:,41:,:], labels_shift, sq_state), 1)).cuda(GPU) # x: bx72*20
  
        # Pre-trained Generator: forward & get qlog^ 
        y_hat_qlog, _ = SMG(x_1) # y_hat: b*20
        x_feat_T = torch.zeros(batch_sz, 72, 20)
        x_feat_T[:,:70,:] = x.clone()
        x_feat_T[:, 70,:10] = 1 # Sup/Que state indicator  
        x_feat_T[:, 71,:10] = labels[:,:10].clone()
                    
        x_feat_S = x_feat_T.clone()
        x_feat_S[:, :41, 10:] = y_hat_qlog[:,:,10:].clone() # remove que-log
        x_feat_S = Variable(x_feat_S).cuda(GPU)
        del y_hat_qlog, x_1
        # y 
        y = labels.clone() # bx20
        
        # y_mask
        y_mask_que = y_mask.clone()
        y_mask_que[:,:10] = 0
        
        y_hat = SM(x_feat_S)
        
        # Calcultate BCE loss
        loss = F.binary_cross_entropy_with_logits(input=y_hat*y_mask_que.cuda(GPU), target=y.cuda(GPU)*y_mask_que.cuda(GPU))
        total_vloss += loss.item()
        
        # Decision
        y_prob = torch.sigmoid(y_hat*y_mask_que.cuda(GPU)).detach().cpu().numpy() # bx20               
        y_pred = (y_prob[:,10:]>0.5).astype(np.int) # bx10
        y_numpy = labels[:,10:].numpy() # bx10
        # Acc
        total_vcorrects += np.sum((y_pred==y_numpy)*y_mask_que[:,10:].numpy())
        total_vquery += np.sum(num_query)
        
        
        # Restore GPU memory
        del loss, y_hat
            
        # Eval, Submission
        if eval_mode is not 0:
            for b in np.arange(batch_sz):
                submit.append(y_pred[b,:num_query[b]].flatten())
                gt.append(y_numpy[b,:num_query[b]].flatten())
                
        if (val_session+1)%400 == 0:
            sample_sup = labels[0,(10-num_support[0]):10].long().numpy().flatten() 
            sample_que = y_numpy[0,:num_query[0]].astype(int)
            sample_pred = y_pred[0,:num_query[0]]
            sample_prob = y_prob[0,10:10+num_query[0]]
            tqdm.write("S:" + np.array2string(sample_sup) +'\n'+
                       "Q:" + np.array2string(sample_que) + '\n' +
                       "P:" + np.array2string(sample_pred) + '\n' +
                       "prob:" + np.array2string(sample_prob))
            tqdm.write("val_session:{0:}  vloss:{1:.6f}   vacc:{2:.4f}".format(val_session,
                       total_vloss/total_vquery, total_vcorrects/total_vquery))
        
    # Avg.Acc (skip labels only, log-generation acc is not implemented yet!)
    if eval_mode==1:
        aacc = evaluate(submit, gt)
        tqdm.write("AACC={0:.6f}, FirstAcc={1:.6f}".format(aacc[0], aacc[1]))    
        
    hist_vloss.append(total_vloss/total_vquery)
    hist_vacc.append(total_vcorrects/total_vquery)
    return submit