Esempio n. 1
0
def k_validation(args, features, bag_labels, k_valid=5):
    """
    Uses k_cross_validation to evaluate model
    :param args: arguments from parser [parser]
    :param features: list of bags  [list]
    :param bag_labels: list of bag labels [list]
    :return:
    """
    accuracies = []
    #calculates 1 iterations of k-fold cv
    if 'validate' in args.split and args.valid_iter <= k_valid:
        cur_iteration = args.valid_iter
        x_train, x_val, y_train, y_val = batch_set(features, bag_labels,
                                                   cur_iteration, k_valid)
        model = MIL(args)
        model.fit(x_train, y_train)
        y_pred, y_instance_pred = model.predict(x_val)
        rec, prec, acc, f1 = calculate_metrics(y_pred, y_val, args.cm)
        print('Acc={}'.format(acc))
        return acc
    else:
        for cur_iteration in range(k_valid):
            x_train, x_val, y_train, y_val = batch_set(features, bag_labels,
                                                       cur_iteration, k_valid)
            model = MIL(args)
            model.fit(x_train, y_train)
            y_pred, y_instance_pred = model.predict(x_val)
            rec, prec, acc, f1 = calculate_metrics(y_pred, y_val, args.cm)
            accuracies.append(acc)
            print('Acc={}'.format(acc))
        mean = average(accuracies)
        print('Result of k-validation: mean = {}, std={}'.format(
            mean, standard_deviaton(accuracies, mean)))
        return mean
Esempio n. 2
0
def train(args, dataset):
    x_train, y_train = dataset.return_training_set()
    filepath = os.getcwd() + model_path(args)
    model = MIL(args)
    with open(filepath, 'wb') as model_file:
        model.fit(x_train, y_train)
        pickle.dump(model, model_file)
    y_pred, y_instance_pred = model.predict(x_train)
    if args.v:
        loss = model.return_loss_history()
        visualize_loss(args, loss)
Esempio n. 3
0
def run(args, dataset):
    accuracies = []
    for run in range(5):
        dataset.random_shuffle()
        x_train, y_train = dataset.return_training_set()
        x_test, y_test = dataset.return_testing_set()
        model = MIL(args)
        model.fit(x_train, y_train)
        y_pred, y_instance_pred = model.predict(x_test)
        rec, prec, acc, f1 = calculate_metrics(y_pred, y_test, args.cm)
        accuracies.append(acc)
        print('Acc={}'.format(acc))
    mean = average(accuracies)
    std_dev = standard_deviaton(accuracies, mean)
    print('Result of evaluation: mean = {}, std={}'.format(mean, std_dev))