Example #1
0
def score_training_set():
    # Generate a data frame that points to the challenge files
    tr_files, te_files = phyc.get_files()

    score = Challenge2018Score()
    for i in range(0, np.size(tr_files, 0)):
        gc.collect()
        sys.stdout.write('Evaluating training subject: %d/%d' %
                         (i + 1, np.size(tr_files, 0)))
        sys.stdout.flush()
        record_name = tr_files.header.values[i][:-4]
        predictions = R.classify_record(record_name)

        arousals = phyc.import_arousals(tr_files.arousal.values[i])
        arousals = np.ravel(arousals)

        score.score_record(arousals, predictions, record_name)
        auroc = score.record_auroc(record_name)
        auprc = score.record_auprc(record_name)
        print(' AUROC:%f AUPRC:%f' % (auroc, auprc))

    print()
    auroc_g = score.gross_auroc()
    auprc_g = score.gross_auprc()
    print('Training AUROC Performance (gross): %f' % auroc_g)
    print('Training AUPRC Performance (gross): %f' % auprc_g)
    print()
Example #2
0
def evaluate_test_set():
    # Generate a data frame that points to the challenge files
    tr_files, te_files = phyc.get_files()

    for i in range(0, np.size(te_files, 0)):
        gc.collect()
        print('Evaluating test subject: %d/%d' % (i + 1, np.size(te_files, 0)))
        record_name = te_files.header.values[i][:-4]
        output_file = os.path.basename(record_name) + '.vec'
        predictions = R.classify_record(record_name)
        np.savetxt(output_file, predictions, fmt='%.3f')
def score_training_set(model):

    try:
        os.mkdir('training_output')
    except OSError:
        pass

    # Generate a data frame that points to the challenge files
    tr_files, te_files = phyc.get_files()
    j = 0
    score = Challenge2018Score()
    for i in range(0, np.size(tr_files, 0)):
        gc.collect()
        sys.stdout.write('\nEvaluating training subject: %d/%d' %
                         (i + 1, np.size(tr_files, 0)))
        sys.stdout.flush()
        record_name = tr_files.header.values[i][:-4]
        predictions, pred_arousal_probabilities, model = T.classify_record(
            record_name, model)

        arousals = phyc.import_arousals(tr_files.arousal.values[i])
        #appiattisce in un array 1D
        arousals = np.ravel(arousals)

        print("arousals.shape: " + str(arousals.shape))
        print("predictions.shape: " + str(predictions.shape))
        print("pred_arousal_probabilities.shape: " +
              str(pred_arousal_probabilities.shape))

        print_arousal_predictions(arousals, pred_arousal_probabilities)

        score.score_record(arousals, pred_arousal_probabilities, record_name)
        auroc = score.record_auroc(record_name)
        auprc = score.record_auprc(record_name)
        L.log_info(' AUROC:%f AUPRC:%f' % (auroc, auprc))

        # save also training predictions to evaluate voting solutions between different models
        output_file = "training_output/" + os.path.basename(
            record_name) + '.vec'
        L.log_info("Salvo i files esito del training in " + str(output_file))
        np.savetxt(output_file, pred_arousal_probabilities, fmt='%.3f')

    print()
    auroc_g = score.gross_auroc()
    auprc_g = score.gross_auprc()
    L.log_info('Training AUROC Performance (gross): %f' % auroc_g)
    L.log_info('Training AUPRC Performance (gross): %f' % auprc_g)
    L.log_info("\n\r ")
    return model
Example #4
0
def train():
    T.init()

    # Generate a data frame that points to the challenge files
    tr_files, te_files = phyc.get_files()

    # For each subject in the training set...
    for i in range(0, np.size(tr_files, 0)):
        gc.collect()
        print('Preprocessing training subject: %d/%d' %
              (i + 1, np.size(tr_files, 0)))
        record_name = tr_files.header.values[i][:-4]
        T.preprocess_record(record_name)

    T.finish()
def evaluate_test_set():
    # Generate a data frame that points to the challenge files
    tr_files, te_files = phyc.get_files()
    try:
        os.mkdir('test_output')
    except OSError:
        pass

    for f in glob.glob('models/*.vec'):
        os.remove(f)

    for i in range(0, np.size(te_files, 0)):
        gc.collect()
        print('Evaluating test subject: %d/%d' % (i + 1, np.size(te_files, 0)))
        record_name = te_files.header.values[i][:-4]
        output_file = "test_output/" + os.path.basename(record_name) + '.vec'
        print("Salvo i files esito del test in " + str(output_file))
        predictions, pred_arousal_probabilities = T.classify_record(
            record_name)
        np.savetxt(output_file, pred_arousal_probabilities, fmt='%.3f')