Exemplo n.º 1
0
def get_weights():
    # Read validation labels
    _, labels, _, _, _ = utils.load_data()
    skf = StratifiedKFold(labels, n_folds=5, random_state=23)
    test_index = None
    for _, test_idx in skf:
        test_index = np.append(
            test_index, test_idx) if test_index is not None else test_idx
    val_labels = labels[test_index]
    # Read predictions on validation set
    val_predictions = []
    prediction_files = utils.get_prediction_files()
    for preds_file in prediction_files:
        vp = np.genfromtxt(os.path.join(consts.BLEND_PATH, preds_file),
                           delimiter=',')
        val_predictions.append(vp)
    # Minimize blending function
    p0 = [1.] * len(prediction_files)
    p = fmin_cobyla(error,
                    p0,
                    args=(val_predictions, val_labels),
                    cons=[constraint],
                    rhoend=1e-5)

    return p
Exemplo n.º 2
0
def get_weights():
    # Read validation labels
    _, labels, _, _, _ = utils.load_data()
    skf = StratifiedKFold(labels, n_folds=5, random_state=23)
    test_index = None
    for _, test_idx in skf:
        test_index = np.append(test_index, test_idx) if test_index is not None else test_idx
    val_labels = labels[test_index]
    # Read predictions on validation set
    val_predictions = []
    prediction_files = utils.get_prediction_files()
    for preds_file in prediction_files:
        vp = np.genfromtxt(os.path.join(consts.BLEND_PATH, preds_file), delimiter=',')
        val_predictions.append(vp)
    # Minimize blending function
    p0 = [1.] * len(prediction_files)
    p = fmin_cobyla(error, p0, args=(val_predictions, val_labels), cons=[constraint], rhoend=1e-5)

    return p
Exemplo n.º 3
0
import csv
import numpy as np
import os

import blender
import consts
import utils


if __name__ == '__main__':
    weights = blender.get_weights()
    prediction_files = utils.get_prediction_files()

    with open(os.path.join(consts.OUTPUT_PATH, 'ensembler_weighted_models.csv'), 'wb') as f_out:
        writer = csv.writer(f_out)
        readers = []
        f_ins = []
        for fpred in prediction_files:
            f_in = open(os.path.join(consts.ENSEMBLE_PATH, fpred), 'rb')
            f_ins.append(f_in)
            readers.append(csv.reader(f_in))
        # Copy header
        writer.writerow(readers[0].next())
        for r in readers[1:]:
            r.next()
        # Merge content
        for line in readers[0]:
            file_name = line[0]
            preds = weights[0] * np.array(map(float, line[1:]))
            for i, r in enumerate(readers[1:]):
                preds += weights[i+1] * np.array(map(float, r.next()[1:]))
Exemplo n.º 4
0
import csv
import numpy as np
import os

import blender
import consts
import utils

if __name__ == '__main__':
    weights = blender.get_weights()
    prediction_files = utils.get_prediction_files()

    with open(
            os.path.join(consts.OUTPUT_PATH, 'ensembler_weighted_models.csv'),
            'wb') as f_out:
        writer = csv.writer(f_out)
        readers = []
        f_ins = []
        for fpred in prediction_files:
            f_in = open(os.path.join(consts.ENSEMBLE_PATH, fpred), 'rb')
            f_ins.append(f_in)
            readers.append(csv.reader(f_in))
        # Copy header
        writer.writerow(readers[0].next())
        for r in readers[1:]:
            r.next()
        # Merge content
        for line in readers[0]:
            file_name = line[0]
            preds = weights[0] * np.array(map(float, line[1:]))
            for i, r in enumerate(readers[1:]):