Esempio n. 1
0
        print '#####################'
        print 'Current condition: '
        print '- hidden architecture', condition[0]
        print '- regularization rate', condition[1]
        print '- batch size', condition[2]
        print '- Epoch number', condition[3]

        # 10-fold
        result_array = numpy.zeros((10, 3))
        for i in xrange(10):
            print (('Experiment %d/10 is on-going') % (i+1))
            datasets = load10FeatureData(input_data, target_data, idx, i)
            # test_result = [training_accuracy, testing_accuracy, f-score]
            test_result = test_mlp(architecture = condition[0],
                                    reg_rate = condition[1],
                                    batch_size = condition[2],
                                    epoch_num = condition[3],
                                    datasets = datasets)
            result_array[i,:] = numpy.array(test_result)

        result_mean = numpy.mean(result_array, axis=0)
        result_std = numpy.std(result_array, axis=0)
        result = numpy.concatenate((result_mean, result_std), axis=1)
        cond_result_list.append(result)

        print 'Result: '
        print (('Mean training accuracy %f %%, testing accuracy %f %%, f-score %f') % (result_mean[0], result_mean[1], result_mean[2]))
        end_time = time.clock()
        print (('It takes for %.2fm') % ((end_time - start_time)/60.))

    # Output cond_result
Esempio n. 2
0
    # Ubuntu
    input_data = numpy.loadtxt(open('/home/heehwan/Documents/workspace/data/DBN_data/X_1405_10features.csv', 'rb'), delimiter=',')
    target_data = numpy.loadtxt(open('/home/heehwan/Documents/workspace/data/DBN_data/Y_1405_10features.csv', 'rb'), delimiter=',')

    # Make dataset index
    idx = range(750)
    random.seed(1)
    random.shuffle(idx)

    print '... starting experiments'
    start_time = time.clock()

    # 10-fold
    result_array = numpy.zeros((10, 4))
    for i in xrange(10):
        print (('Experiment %d/10 is on-going') % (i+1))
        datasets = load10FeatureData(input_data, target_data, idx, i)
        # test_result = [training_accuracy, f-score, testing_accuracy, f-score]
        test_result = test_mlp(architecture = arch,
                                reg_rate = reg,
                                batch_size = batch_size,
                                epoch_num = epoch,
                                datasets = datasets)
        result_array[i,:] = numpy.array(test_result)

    end_time = time.clock()
    print (('It takes for %.2fm') % ((end_time - start_time)/60.))
    # Result
    pickle.dump(result_array, open('result_400_3_v1.pkl', 'wb'))
    print result_array
Esempio n. 3
0
        '/home/heehwan/Documents/workspace/data/DBN_data/Y_1405_10features.csv',
        'rb'),
                                delimiter=',')

    # Make dataset index
    idx = range(750)
    random.seed(1)
    random.shuffle(idx)

    print '... starting experiments'
    start_time = time.clock()

    # 10-fold
    result_array = numpy.zeros((10, 4))
    for i in xrange(10):
        print(('Experiment %d/10 is on-going') % (i + 1))
        datasets = load10FeatureData(input_data, target_data, idx, i)
        # test_result = [training_accuracy, f-score, testing_accuracy, f-score]
        test_result = test_mlp(architecture=arch,
                               reg_rate=reg,
                               batch_size=batch_size,
                               epoch_num=epoch,
                               datasets=datasets)
        result_array[i, :] = numpy.array(test_result)

    end_time = time.clock()
    print(('It takes for %.2fm') % ((end_time - start_time) / 60.))
    # Result
    pickle.dump(result_array, open('result_400_3_v1.pkl', 'wb'))
    print result_array