Пример #1
0
def run_trainer():
    with open('SETTINGS.json') as f:
        settings_dict = json.load(f)

    reg_list = [10000000, 100, 10, 1.0, 0.1, 0.01]
    for reg_C in reg_list:
        print reg_C
        data_path = settings_dict['path']['processed_data_path'] + '/' + create_fft_data_name(settings_dict)
        submission_path = settings_dict['path']['submission_path'] + '/logreg_' + str(
            reg_C) + '_' + create_fft_data_name(settings_dict)

        if not os.path.exists(data_path):
            fft.run_fft_preprocessor()

        if not os.path.exists(submission_path):
            os.makedirs(submission_path)

        subjects = ['Dog_1', 'Dog_2', 'Dog_3', 'Dog_4', 'Dog_5', 'Patient_1', 'Patient_2']
        for subject in subjects:
            print subject
            model, data_scaler, = train(subject, data_path, reg_C)
            predict(subject, model, data_scaler, data_path, submission_path)

        merge_csv_files(submission_path, subjects, 'submission')
        merge_csv_files(submission_path, subjects, 'submission_softmax')
        merge_csv_files(submission_path, subjects, 'submission_minmax')
        merge_csv_files(submission_path, subjects, 'submission_median')
def run_trainer():
    with open('SETTINGS.json') as f:
        settings_dict = json.load(f)

    data_path = settings_dict['path']['processed_data_path'] + '/' + create_fft_data_name(settings_dict)
    submission_path = settings_dict['path']['submission_path'] + '/LDA_' + create_fft_data_name(settings_dict)
    print data_path

    if not os.path.exists(data_path):
        fft.run_fft_preprocessor()

    if not os.path.exists(submission_path):
        os.makedirs(submission_path)

    test_labels_path = '/mnt/sda4/CODING/python/kaggle_data/test_labels.csv'
    test_labels = load_test_labels(test_labels_path)

    subjects = ['Dog_1', 'Dog_2', 'Dog_3', 'Dog_4', 'Dog_5', 'Patient_1', 'Patient_2']
    coef_list = []
    for subject in subjects:
        print '***********************', subject, '***************************'
        model, data_scaler, coefs  = train(subject, data_path)
        predict(subject, model, data_scaler, data_path, submission_path, test_labels[subject]['preictal'])
        coef_list.append(coefs)

    merge_csv_files(submission_path, subjects, 'submission')
    merge_csv_files(submission_path, subjects, 'submission_softmax')
    merge_csv_files(submission_path, subjects, 'submission_minmax')
    merge_csv_files(submission_path, subjects, 'submission_median')
Пример #3
0
def run_trainer():
    with open('SETTINGS.json') as f:
        settings_dict = json.load(f)

    data_path = settings_dict['path'][
        'processed_data_path'] + '/' + create_fft_data_name(settings_dict)
    submission_path = settings_dict['path'][
        'submission_path'] + '/LDA_' + create_fft_data_name(settings_dict)
    print data_path

    if not os.path.exists(data_path):
        fft.run_fft_preprocessor()

    if not os.path.exists(submission_path):
        os.makedirs(submission_path)

    test_labels_path = '/mnt/sda4/CODING/python/kaggle_data/test_labels.csv'
    test_labels = load_test_labels(test_labels_path)

    subjects = [
        'Dog_1', 'Dog_2', 'Dog_3', 'Dog_4', 'Dog_5', 'Patient_1', 'Patient_2'
    ]
    coef_list = []
    for subject in subjects:
        print '***********************', subject, '***************************'
        model, data_scaler, coefs = train(subject, data_path)
        predict(subject, model, data_scaler, data_path, submission_path,
                test_labels[subject]['preictal'])
        coef_list.append(coefs)

    merge_csv_files(submission_path, subjects, 'submission')
    merge_csv_files(submission_path, subjects, 'submission_softmax')
    merge_csv_files(submission_path, subjects, 'submission_minmax')
    merge_csv_files(submission_path, subjects, 'submission_median')