コード例 #1
0
    def test_model(test_loader, net):

        net.eval()
        device = params['device']
        batch_size = params['batch_size']
        test_loss = 0
        test_acc = 0
        test_iou = {}
        with torch.no_grad():
            for batch_index, (img, target) in enumerate(test_loader):
                img, target = img.to(device), target.to(device)

                if model_version == 'deeplab':
                    output = net(img)['out']
                else:
                    output = net(img)

                target = target.long()
                loss = criterion(output, target).item()
                test_loss += loss

                pred = aux.get_predicted_image(output)

                output, target, pred = output.detach().cpu(), target.detach(
                ).cpu(), pred.detach().cpu()
                # compute number of correct predictions in the batch
                test_accuracy = metrics.calculate_accuracy(output, target)
                test_acc += test_accuracy

                iou_inds = metrics.calculate_iou(pred, target)

                for key in iou_inds:
                    if key not in test_iou:
                        test_iou[key] = iou_inds[key]
                    else:
                        test_iou[key] += iou_inds[key]

        test_loss = test_loss / (len(test_loader.dataset) / batch_size)
        test_acc = 100 * (test_acc / (len(test_loader.dataset) / batch_size))
        test_iou = metrics.convert_batched_iou(
            test_iou, (len(test_loader.dataset) / batch_size))
        mIoU = metrics.get_mIoU(test_iou)

        mIoU_desc = metrics.miou_to_string(test_iou)
        return test_loss, test_acc, mIoU, mIoU_desc
コード例 #2
0
    def val_one_epoch(val_loader, net):

        net.eval()
        device = params['device']
        batch_size = params['batch_size']
        val_loss = 0
        val_acc = 0
        val_iou = {}
        pred = 0
        with torch.no_grad():
            for batch_index, (img, target) in enumerate(val_loader):
                img, target = img.to(device), target.to(device)
                output = net(img)
                target = target.long()

                loss = criterion(output, target).item()
                val_loss += loss

                pred = aux.get_predicted_image(output)
                # Desvinculamos el valor de los nuevos targets y los pasamos a CPU para calcular las métricas
                output, target, pred = output.detach().cpu(), target.detach(
                ).cpu(), pred.detach().cpu()

                # compute number of correct predictions in the batch
                val_accuracy = metrics.calculate_accuracy(output, target)
                val_acc += val_accuracy
                iou_inds = metrics.calculate_iou(pred, target)

                for key in iou_inds:
                    if key not in val_iou:
                        val_iou[key] = iou_inds[key]
                    else:
                        val_iou[key] += iou_inds[key]
                    #print('Batch index: {}, loss: {}, accuracy: {:.2f}%'.format(batch_index, loss, val_accuracy * 100))
        # Average acc across all correct predictions batches now
        val_loss = val_loss / (len(val_loader.dataset) / batch_size)
        val_acc = 100 * (val_acc / (len(val_loader.dataset) / batch_size))
        val_iou = metrics.convert_batched_iou(
            val_iou, (len(val_loader.dataset) / batch_size))
        mIoU = metrics.get_mIoU(val_iou)

        #print('\nValidation set: Average loss: {:.4f}, Accuracy: {:.0f}%, mIoU: {:.4f}\n'.format(val_loss,  val_acc, mIoU))
        mIoU_desc = metrics.miou_to_string(val_iou)
        return val_loss, val_acc, mIoU, mIoU_desc
コード例 #3
0
def tune_params(training_features,
                positive_test_features,
                negative_test_features,
                retrain=True,
                model_name='model2.sav'):
    """ Given training features of positive examples, positive and 
    negative test features, checks if the retrain argument is set to True
    or if the model described by model_name does not exist, and creates a 
    one class svm model. Parameter tuning is performed in a basic way,
    with the use of F1 score as the benchmark. Random samples are selected
    from the test sets to avoid inbalance. 
    If the retrain argument is set to False and the model exists, it is
    instead loaded from file. 
    """
    if (retrain == True or not Path(model_name).exists()):
        print("Fitting new model, model name:", model_name)
        print("Num training feats.", training_features.shape)
        print("Num pos test feats.", positive_test_features.shape)
        print("Num neg test feats.", negative_test_features.shape)
        max_batch_size = min(
            [len(positive_test_features),
             len(negative_test_features), 1500])
        print("Number of used:", max_batch_size, "\n")
        best_nu, best_g = 0, 0
        max_dist = 0
        prec = -1
        recall = 0
        acc = 0
        t_p = positive_test_features[np.random.choice(
            positive_test_features.shape[0], max_batch_size, replace=False)]
        t_n = negative_test_features[np.random.choice(
            negative_test_features.shape[0], max_batch_size, replace=False)]
        if ("one" in model_name):
            print("features:", len(t_p))
            print(t_p[0])
        f = 0
        Nus = [
            0.00126, 0.0025, 0.00375, 0.005, 0.675, 0.0075, 0.01, 0.015, 0.02,
            0.025, 0.05, 0.1
        ]
        #Nus = [5*10**(-0.5*i) for i in reversed(range(4, 16))]
        gammas = [
            0.0000001, 0.00000025, 0.0000005, 0.000001, 0.0000025, 0.000005,
            0.00001, 0.0001, 0.001, 0.0025, 0.005, 0.0075, 0.01, 0.015
        ]
        #gammas = [5*10**(-i) for i in reversed(range(4, 16))]
        best_model = None
        best_scores = (0, 0, 0)
        for nu in Nus:
            for g in gammas:
                model = svm.OneClassSVM(nu=nu, kernel='rbf', gamma=g)
                model.fit(training_features)

                # select random features for testing
                y_p = model.predict(t_p)
                y_n = model.predict(t_n)
                new_f = calculate_f_score(y_p, y_n)
                new_acc = calculate_accuracy(y_p, y_n)
                #if  new_f > f and sum(y_n) < 0:
                #print("new f score:", new_f, "y_p:", y_p, "y_n:", y_n)
                #if new_acc > acc:
                if new_f > f:
                    f = new_f
                    acc = new_acc
                    #print("acc:", acc)
                    best_scores = (calculate_precision(y_p, y_n),
                                   calculate_recall(y_p), f)
                    best_nu = nu
                    best_g = g
                    #print("for Nu", nu, "and gamma", g, "y_p:", sum(y_p), "and y_n:", sum(y_n), "f score was: ", f)
                    best_model = model
        pickle.dump(best_model, open(model_name, 'wb'))
        print(f"best nu: {best_nu}, best gamma: {best_g}, best f score: {f}")
    else:
        best_scores = (0, 0, 0)
        print("Loading saved model, model name:", model_name)
        best_model = pickle.load(open(model_name, 'rb'))

    return best_model, best_scores
コード例 #4
0
    def train_one_epoch(train_loader, net, optimizer, criterion, hparams):

        # Activate the train=True flag inside the model
        net.train()

        device = hparams['device']
        batch_size = hparams['batch_size']
        train_loss, train_accs = 0, 0
        train_iou = {}
        times_per_step_iteration = []
        times_per_metric_iteration = []
        times_per_iteration = []
        for batch_index, (img, target) in enumerate(train_loader):
            #Arrancamos temporizador general
            start_total.record()
            img, target = img.to(device), target.to(device)
            optimizer.zero_grad()

            # Arrancamos temporizador para inferencia
            start.record()
            output = net(img)

            target = target.long()

            loss = criterion(output, target)
            loss.backward()
            optimizer.step()

            pred = aux.get_predicted_image(output)

            #Paramos temporizador de inferencia
            end.record()
            torch.cuda.synchronize()
            times_per_step_iteration.append(start.elapsed_time(end))

            # Accuracy
            #Arrancamos temporizador para métricas
            start.record()

            # Desvinculamos el valor de los nuevos targets y los pasamos a CPU para calcular las métricas
            output, target, pred = output.detach().cpu(), target.detach().cpu(
            ), pred.detach().cpu()
            train_loss += loss.item()
            # Devuelve values, indices. Los indices son el nº de feature map (clase) en la que se encuentra el valor más alto en el pixel
            train_accuracy = metrics.calculate_accuracy(output,
                                                        target)  #, predicted
            train_accs += train_accuracy

            iou_inds = metrics.calculate_iou(pred, target)
            for key in iou_inds:
                if key not in train_iou:
                    train_iou[key] = iou_inds[key]
                else:
                    train_iou[key] += iou_inds[key]

            #Paramos temporizador para métricas
            end.record()
            torch.cuda.synchronize()
            times_per_metric_iteration.append(start.elapsed_time(end))

            #Paramos temporizador general
            end_total.record()
            torch.cuda.synchronize()
            times_per_iteration.append(start_total.elapsed_time(end))

            avg_time_taken = sum(times_per_iteration) / len(
                times_per_iteration)
            avg_time_step_taken = sum(times_per_step_iteration) / len(
                times_per_step_iteration)
            avg_time_metrics_taken = sum(times_per_metric_iteration) / len(
                times_per_metric_iteration)

        print('Average Time spent total: {:.02f}s'.format(avg_time_taken *
                                                          1e-3))
        print('Average Time spent by steps: {:.02f}s'.format(
            avg_time_step_taken * 1e-3))
        print('Average Time spent by metrics: {:.02f}s'.format(
            avg_time_metrics_taken * 1e-3))
        print('Average Time spent by data load: {:.02f}s'.format(
            avg_time_taken * 1e-3 - avg_time_step_taken * 1e-3 -
            avg_time_metrics_taken * 1e-3))

        train_loss = train_loss / (len(train_loader.dataset) / batch_size)
        train_accs = 100 * (train_accs /
                            (len(train_loader.dataset) / batch_size))
        train_iou = metrics.convert_batched_iou(
            train_iou, (len(train_loader.dataset) / batch_size))
        mIoU = metrics.get_mIoU(train_iou)
        mIoU_desc = metrics.miou_to_string(train_iou)
        return train_loss, train_accs, mIoU, mIoU_desc