예제 #1
0
def logistic_regression(n,
                        mx1,
                        vx1,
                        my1,
                        vy1,
                        mx2,
                        vx2,
                        my2,
                        vy2,
                        optimizer='SGD'):
    """
    weights : shape=(k=3,1)
    Input
    -----
    optimizer : 'SGD' or 'NTM'
        'SGD' == 'Steepest Gradient Descent'
        'NTM' == 'Newton's Method'
    """
    inputs = []
    labels = []
    D1_label = 0.0
    D2_label = 1.0
    bias_term = 1.0
    for _ in range(n):
        # Data 1
        D1x = Generator.univariate_gaussian(mx1, vx1)
        D1y = Generator.univariate_gaussian(my1, vy1)
        inputs.append([bias_term, D1x, D1y])
        labels.append([D1_label])
        # Data 2
        D2x = Generator.univariate_gaussian(mx2, vx2)
        D2y = Generator.univariate_gaussian(my2, vy2)
        inputs.append([bias_term, D2x, D2y])
        labels.append([D2_label])
    inputs = Mat(inputs)
    labels = Mat(labels)
    # init weights
    weights = Mat([[-6.0], [1.0], [-0.1]])
    print('inputs shape:\t', inputs.shape)
    print('labels shape:\t', labels.shape)
    print('weights shape:\t', weights.shape)
    # optimization
    if optimizer == 'SGD':
        weights = steepest_gradient_descent(weights, inputs, labels)
    elif optimizer == 'NTM':
        weights = newton_method(weights, inputs, labels)
    else:
        raise AttributeError('{} is not a valid optimizor'.format(optimizer))
    # inference
    logits = inference(weights, inputs)
    # evaluate model
    CM = ConfusionMatrix(logits, labels)
    CM.show_matrix()
    CM.show_accuracy()
    CM.show_sensitivity()
    CM.show_specificity()
예제 #2
0
def main():
    print('Start data I/O...')
    train_images, train_labels = load_mnist(
        dataset='training', fetch_size=100)
    # test_images, test_labels = load_mnist(dataset='testing', fetch_size=10000)

    train_images = preprocessing(train_images)
    for i in range(28):
        print(train_images[0, i])
    print(train_labels[0])
    init_theta = Mat([[np.random.uniform(0.0, 1.0) for _ in range(train_images.shape[1]
                                                                  * train_images.shape[2])] for _ in range(10)])
    # init_theta = Mat([[0.5 for _ in range(train_images.shape[1]
    #                                       * train_images.shape[2])] for _ in range(10)])
    init_lambda = [0.05, 0.10, 0.15, 0.10, 0.05, 0.10, 0.15, 0.10, 0.10, 0.10]
    # init_lambda = [0.1 for _ in range(10)]
    logits, group_labels = em_algorithm(train_images, train_labels, init_theta, init_lambda)
    prediction = []
    for i in range(logits.shape[0]):
        largest_idx = 0
        for z in range(logits.shape[1]):
            # print(logits[i, z])
            if logits[i, z] > logits[i, largest_idx]:
                largest_idx = z
        # input()
        prediction.append([group_labels[largest_idx]])
    prediction = Mat(prediction)
    train_labels = Mat([train_labels]).t()
    # confusion matrix
    for z in range(10):
        print('digit {}'.format(z))
        tmp_logits = Mat(prediction)
        tmp_labels = Mat(train_labels)
        for i in range(tmp_logits.shape[0]):
            if tmp_logits[i, 0] == tmp_labels[i, 0]:
                tmp_logits[i, 0] = 1
            else:
                tmp_logits[i, 0] = 0
            if tmp_labels[i, 0] == z:
                tmp_labels[i, 0] = 1
            else:
                tmp_labels[i, 0] = 0
        CM = ConfusionMatrix(tmp_logits, tmp_labels)
        CM.show_matrix()
        CM.show_accuracy()
        CM.show_sensitivity()
        CM.show_specificity()
예제 #3
0
def ensemble_aug_eval(n_iter, class_model, with_temp_scal=False):
    acc_test = 0
    auc_test = 0
    ens_preds = torch.zeros_like(class_model.calibration_variables[2][0])

    start_time = time.time()
    # data_loader, test_data_loader, valid_data_loader = get_dataloader(dname='isic2019_testwaugm', size=class_model.size,
    #                                                                SRV=class_model.SRV,
    #                                                                batch_size=class_model.batch_size,
    #                                                                n_workers=class_model.n_workers,
    #                                                                augm_config=class_model.augm_config,
    #                                                                cutout_params=[class_model.cutout_nholes,
    #                                                                               class_model.cutout_pad_size])

    data_loader, test_data_loader, valid_data_loader = class_model.data_loader, class_model.test_data_loader, class_model.valid_data_loader

    for i in range(1, n_iter + 1):
        acc_test_temp, w_acc_test_temp, conf_matrix_test_temp, acc_1_test_temp, pr_test_temp, rec_test_temp, fscore_test_temp, auc_test_temp, preds, true_lab = \
            eval(class_model, test_data_loader, *class_model.calibration_variables[2], with_temp_scal)
        # print('iteration ' + str(i) + ' completed in ' + str(time.time()-start_time) + ' seconds')
        # print('Acc: ' + str(acc_test_temp) + ' | Weighted Acc: ' + str(w_acc_test_temp) + '\n')
        # print(conf_matrix_temp)
        acc_test += acc_test_temp
        auc_test += auc_test_temp

        ens_preds += preds

    conf_matrix_test = ConfusionMatrix(class_model.num_classes)
    temp_ens_preds = ens_preds / n_iter
    check_output, res = torch.max(torch.tensor(temp_ens_preds, device='cuda'),
                                  1)
    conf_matrix_test.update_matrix(res, torch.tensor(true_lab, device='cuda'))

    ens_acc, ens_w_acc = conf_matrix_test.get_metrics()
    ens_acc_1, pr, rec, fscore, auc = compute_accuracy_metrics(
        temp_ens_preds, true_lab)

    print(
        "\n|| took {:.1f} minutes \n"
        "| Mean Accuracy statistics: Acc: {:.3f} AUC: {:.3f} \n"
        "| Ensemble Accuracy statistics: Weighted Acc: {:.3f} AUC: {:.3f} Recall: {:.3f} Precision: {:.3f} Fscore: {:.3f} \n"
        .format((time.time() - start_time) / 60., acc_test / i, auc_test / i,
                ens_w_acc, auc, rec, pr, fscore))
    print(conf_matrix_test.conf_matrix)

    return ens_acc, ens_w_acc, conf_matrix_test, ens_acc, pr, rec, fscore, auc, temp_ens_preds, true_lab
예제 #4
0
def compute_stats(slots, slot_selection, classes, prediction, y,
                  joint_slot_name):
    conf_mats = {}
    conf_mats[joint_slot_name] = ConfusionMatrix(2)
    for slot in slots:
        if slot in slot_selection:
            conf_mats[slot] = ConfusionMatrix(len(classes[slot]))


    joint_correct = np.array([True for _ in prediction[0]])
    joint_all = np.array([True for _ in prediction[0]])
    for i, (slot, pred) in enumerate(zip(slots, prediction)):
        if slot in slot_selection:
            slot_y = y[i]
            slot_y_hat = np.argmax(pred, axis=1)

            conf_mats[slot].batchAdd(slot_y, slot_y_hat)

            joint_correct &= (slot_y == slot_y_hat)

    conf_mats[joint_slot_name].batchAdd(joint_all, joint_correct)
    return conf_mats
예제 #5
0
def ensemble_aug_eval(n_iter, class_model, with_temp_scal=False):
    acc_test = 0
    w_acc_test = 0
    ens_preds = torch.zeros_like(class_model.calibration_variables[2][0])

    start_time = time.time()

    data_loader, test_data_loader, valid_data_loader = class_model.data_loader, class_model.test_data_loader, class_model.valid_data_loader

    for i in range(1, n_iter + 1):
        acc_test_temp, w_acc_test_temp, calibration_statistics, conf_matrix_temp, _ = \
            eval(class_model, test_data_loader, *class_model.calibration_variables[2], with_temp_scal)
        acc_test += acc_test_temp
        w_acc_test += w_acc_test_temp

        _, preds, true_lab = calibration_statistics
        ens_preds += preds

    conf_matrix_test = ConfusionMatrix(class_model.num_classes)
    temp_ens_preds = ens_preds / n_iter
    check_output, res = torch.max(torch.tensor(temp_ens_preds, device='cuda'),
                                  1)
    conf_matrix_test.update_matrix(res, torch.tensor(true_lab, device='cuda'))

    ens_acc, ens_w_acc = conf_matrix_test.get_metrics()
    ECE_test, MCE_test, BRIER_test, NNL_test = compute_calibration_measures(
        temp_ens_preds, true_lab, apply_softmax=False, bins=15)
    print(
        "\n|| took {:.1f} minutes \n"
        "| Mean Accuracy statistics: weighted Acc test: {:.3f} Acc test: {:.3f} \n"
        "| Ensemble Accuracy statistics: weighted Acc test: {:.3f} Acc test: {:.3f} \n"
        "| Calibration test: ECE: {:.5f} MCE: {:.5f} BRIER: {:.5f}  NNL: {:.5f}\n\n"
        .format((time.time() - start_time) / 60., w_acc_test / i, acc_test / i,
                ens_w_acc, ens_acc, ECE_test * 100, MCE_test * 100, BRIER_test,
                NNL_test))
    print(conf_matrix_test.conf_matrix)

    return ens_acc, ens_w_acc, (ens_preds / n_iter), true_lab
예제 #6
0
def eval(class_model,
         e_loader,
         predictions,
         labels,
         with_temp_scal=False,
         compute_separate_metrics_for_errors=False):
    with torch.no_grad():
        entropy_of_predictions = torch.zeros_like(labels).float()
        '''to measure stuff on correct and incorrect classified samples'''
        if compute_separate_metrics_for_errors:
            corr_entropy = torch.zeros_like(labels).float()
            incorr_entropy = torch.zeros_like(labels).float()

            corr_labels = torch.zeros_like(labels).float()
            incorr_labels = torch.zeros_like(labels).float()

            corr_predictions = torch.zeros_like(predictions).float()
            incorr_predictions = torch.zeros_like(predictions).float()

            corr_count = 0
            incorr_count = 0

        class_model.n.eval()
        sofmx = nn.Softmax(dim=-1)
        conf_matrix = ConfusionMatrix(class_model.num_classes)

        start_time = time.time()
        for idx, (x, target, img_name) in enumerate(e_loader):
            # measure data loading time
            # print("data time: " + str(time.time() - start_time))
            # compute output
            x = x.to('cuda')
            out = class_model.n(x)

            if with_temp_scal:
                out = class_model.temp_scal_model(out)

            # output = torch.squeeze(out)
            output = out

            target = target.to('cuda', torch.long)
            check_output_all = sofmx(output)
            check_output, res = torch.max(check_output_all, -1)

            aux = target.size(0)
            predictions[idx *
                        class_model.batch_size:idx * class_model.batch_size +
                        aux, :] = check_output_all.data.cpu()
            labels[idx * class_model.batch_size:idx * class_model.batch_size +
                   aux] = target.data.cpu()

            entropy_of_predictions[idx * class_model.batch_size:idx *
                                   class_model.batch_size +
                                   aux] = entropy_categorical(
                                       check_output_all).cpu()

            # update the confusion matrix
            conf_matrix.update_matrix(res, target)
            # measure batch time
            # print("batch " + str(idx) + " of " + str(len(e_loader)) + "; time: " + str(time.time() - start_time))
            # start_time = time.time()

            # if idx == 0:
            #     break

            if compute_separate_metrics_for_errors:
                # if true we compute the entropy and calibration measures on correct and incorrect samples separately
                corr_idx = check_output_all.argmax(dim=1) == target
                incorr_idx = check_output_all.argmax(dim=1) != target

                corr_samples_prob = check_output_all[corr_idx, :]
                incorr_samples_prob = check_output_all[incorr_idx, :]

                corr_numel = corr_idx.sum().long()
                incorr_numel = incorr_idx.sum().long()

                corr_entropy[corr_count:corr_count +
                             corr_numel] = entropy_categorical(
                                 corr_samples_prob).cpu()
                incorr_entropy[incorr_count:incorr_count +
                               incorr_numel] = entropy_categorical(
                                   incorr_samples_prob).cpu()

                corr_predictions[corr_count:corr_count +
                                 corr_numel] = corr_samples_prob.cpu()
                incorr_predictions[incorr_count:incorr_count +
                                   incorr_numel] = incorr_samples_prob.cpu()

                corr_labels[corr_count:corr_count +
                            corr_numel] = target[corr_idx].cpu()
                incorr_labels[incorr_count:incorr_count +
                              incorr_numel] = target[incorr_idx].cpu()

                corr_count += corr_numel
                incorr_count += incorr_numel

        # filter out the zeros
        per_samples_stats = None
        if compute_separate_metrics_for_errors:
            corr_entropy = corr_entropy[0:corr_count]
            incorr_entropy = incorr_entropy[0:incorr_count]

            corr_predictions = corr_predictions[0:corr_count]
            incorr_predictions = incorr_predictions[0:incorr_count]

            corr_labels = corr_labels[0:corr_count]
            incorr_labels = incorr_labels[0:incorr_count]

            per_samples_stats = {
                'corr': [corr_entropy, corr_predictions, corr_labels],
                'incorr': [incorr_entropy, incorr_predictions, incorr_labels]
            }

        acc, w_acc = conf_matrix.get_metrics()

        acc_1, pr, rec, fscore, auc = compute_accuracy_metrics(
            predictions, labels)

        return acc, w_acc, conf_matrix.conf_matrix, acc_1, pr, rec, fscore, auc, predictions, labels
예제 #7
0
    def evaluate(self, eval_dataset, batch_size=1, epoch_id=None):
        """评估。

        Args:
            eval_dataset (paddlex.datasets): 评估数据读取器。
            batch_size (int): 评估时的batch大小。默认1。
            epoch_id (int): 当前评估模型所在的训练轮数。
            return_details (bool): 是否返回详细信息。默认False。

        Returns:
            dict: 当return_details为False时,返回dict。包含关键字:'miou'、'category_iou'、'macc'、
                'category_acc'和'kappa',分别表示平均iou、各类别iou、平均准确率、各类别准确率和kappa系数。
            tuple (metrics, eval_details):当return_details为True时,增加返回dict (eval_details),
                包含关键字:'confusion_matrix',表示评估的混淆矩阵。
        """
        self.arrange_transform(transforms=eval_dataset.transforms,
                               mode='train')
        total_steps = math.ceil(eval_dataset.num_samples * 1.0 / batch_size)
        conf_mat = ConfusionMatrix(self.num_classes, streaming=True)
        data_generator = eval_dataset.generator(batch_size=batch_size,
                                                drop_last=False)
        if not hasattr(self, 'parallel_test_prog'):
            self.parallel_test_prog = fluid.CompiledProgram(
                self.test_prog).with_data_parallel(
                    share_vars_from=self.parallel_train_prog)
        logging.info(
            "Start to evaluating(total_samples={}, total_steps={})...".format(
                eval_dataset.num_samples, total_steps))
        for step, data in tqdm.tqdm(enumerate(data_generator()),
                                    total=total_steps):
            images = np.array([d[0] for d in data])
            labels = np.array([d[1] for d in data])
            num_samples = images.shape[0]
            if num_samples < batch_size:
                num_pad_samples = batch_size - num_samples
                pad_images = np.tile(images[0:1], (num_pad_samples, 1, 1, 1))
                images = np.concatenate([images, pad_images])
            feed_data = {'image': images}
            outputs = self.exe.run(self.parallel_test_prog,
                                   feed=feed_data,
                                   fetch_list=list(self.test_outputs.values()),
                                   return_numpy=True)
            pred = outputs[0]
            if num_samples < batch_size:
                pred = pred[0:num_samples]

            mask = labels != self.ignore_index
            conf_mat.calculate(pred=pred, label=labels, ignore=mask)
            _, iou = conf_mat.mean_iou()

            logging.debug("[EVAL] Epoch={}, Step={}/{}, iou={}".format(
                epoch_id, step + 1, total_steps, iou))

        category_iou, miou = conf_mat.mean_iou()
        category_acc, macc = conf_mat.accuracy()

        metrics = OrderedDict(
            zip(['miou', 'category_iou', 'macc', 'category_acc', 'kappa'],
                [miou, category_iou, macc, category_acc,
                 conf_mat.kappa()]))

        logging.info('[EVAL] Finished, Epoch={}, {} .'.format(
            epoch_id, dict2str(metrics)))
        return metrics
예제 #8
0
        if not isinstance(preds, np.ndarray):
            preds = preds.numpy()

        np.save(output_file, preds)  # only save predictions of this line

        try:
            auc_test += auc
            w_acc_test += w_acc
            if ens_preds is None:
                ens_preds = preds
            else:
                ens_preds += preds
        except Exception as e:
            print(f'Exception {e}')

    conf_matrix_test = ConfusionMatrix(n.num_classes)
    temp_ens_preds = ens_preds / counter

    check_output, res = torch.max(torch.tensor(temp_ens_preds, device='cuda'),
                                  1)
    conf_matrix_test.update_matrix(res, torch.tensor(true_lab, device='cuda'))

    ens_acc, ens_w_acc = conf_matrix_test.get_metrics()
    ens_acc_1, pr, rec, fscore, auc = compute_accuracy_metrics(
        temp_ens_preds, true_lab)

    print("\n ----- FINAL PRINT ----- \n")

    print(
        "\n|| took {:.1f} minutes \n"
        "| Mean Accuracy statistics: Weighted Acc: {:.3f} AUC: {:.3f} \n"
예제 #9
0
                         vec_alloc=long_0_tensor_alloc,
                         ExType=TorchExamples)
print('Loaded test data')

nc = len(f2i)

mdsave(f2i, embeddings.vocab, args.outdir, args.save)

model = ConvModel(embeddings, nc, args.filtsz, args.cmotsz, args.hsz,
                  args.dropout, not args.static)
labels = revlut(f2i)
trainer = Trainer(gpu, model, args.optim, args.eta, args.mom)

max_acc = 0
last_improved = 0
confusion = ConfusionMatrix(labels)

for i in range(args.epochs):
    print('Training epoch %d' % (i + 1))
    trainer.train(ts, confusion, args.batchsz)
    this_acc = trainer.test(vs, confusion, args.batchsz, 'Validation')
    if this_acc > max_acc:
        max_acc = this_acc
        last_improved = i
        model.save(args.outdir, args.save)
        print('Highest dev acc achieved yet -- writing model')

    if (i - last_improved) > args.patience:
        print('Stopping due to persistent failures to improve')
        break
예제 #10
0
def train(args):
    train_logger = None
    if args.log_dir is not None:
        log_name = 'lr=%s_max_epochs=%s' % (args.learning_rate, args.num_epoch)
        train_logger = tb.SummaryWriter(path.join(args.log_dir, 'train') +
                                        '/%s' % (log_name),
                                        flush_secs=1)
    LABEL_NAMES = [str(i) for i in range(0, 43)]

    model = Detector().to(device)
    if args.continue_training:
        model.load_state_dict(
            torch.load(
                path.join(path.dirname(path.abspath(__file__)), 'det.th')))

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.learning_rate,
                                 weight_decay=1e-5)
    loss = torch.nn.CrossEntropyLoss(reduction='none').to(device)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 30, .5)

    import inspect
    transform = eval(
        args.transform, {
            k: v
            for k, v in inspect.getmembers(torchvision.transforms)
            if inspect.isclass(v)
        })
    train_data = load_data(TRAIN_PATH, batch_size=256, transform=transform)

    global_step = 0
    gamma = 2
    alpha = .25
    for epoch in range(args.num_epoch):
        model.train()
        confusion = ConfusionMatrix(len(LABEL_NAMES))
        for img, label in train_data:
            if train_logger is not None:
                train_logger.add_images('augmented_image', img[:4])
            img, label = img.to(device), label.to(device)

            logit = model(img)
            ce_loss = loss(logit, label)
            pt = torch.exp(-ce_loss)
            focal_loss = (alpha * ((1 - pt)**gamma) * ce_loss).mean()

            confusion.add(logit.argmax(1), label)

            if train_logger is not None:
                train_logger.add_scalar('loss', focal_loss, global_step)

            optimizer.zero_grad()
            focal_loss.backward()
            optimizer.step()
            global_step += 1

        if train_logger:
            train_logger.add_scalar('accuracy', confusion.global_accuracy,
                                    global_step)
            import matplotlib.pyplot as plt
            f, ax = plt.subplots()
            f.set_figheight(30)
            f.set_figwidth(30)

            ax.imshow(confusion.per_class,
                      interpolation='nearest',
                      cmap=plt.cm.Blues)
            for i in range(confusion.per_class.size(0)):
                for j in range(confusion.per_class.size(1)):
                    ax.text(j,
                            i,
                            format(confusion.per_class[i, j], '.2f'),
                            ha="center",
                            va="center",
                            color="black")
            train_logger.add_figure('confusion', f, global_step)
        # torch.set_printoptions(profile="full")
        # print(confusion.per_class)
        scheduler.step()
        save_model(model)