示例#1
0
    parser.add_argument('-m', help='ECOC or VOTE')
    args = parser.parse_args(sys.argv[1:])

    # Load data using specialized script
    train_dataset = load_mnist(path="../data/mnist/", dataset="training")
    test_dataset = load_mnist(path="../data/mnist/", dataset="testing")
    
    # Take a fraction of the data to speed computation
    train_images, train_labels = sample(train_dataset, 5000)
    test_images, test_labels = sample(test_dataset, 100)

    # Get the bounds of the haar rectangles
    bounds = genbounds(28, 28, 100)
    
    # Create data, using same rectangles for training and testing
    train_data = genfeatures(train_images, bounds).astype(float)
    test_data = genfeatures(test_images, bounds).astype(float)

    # Normalize the data
    zmscaler = preprocessing.StandardScaler()
    train_data = zmscaler.fit_transform(train_data)
    test_data = zmscaler.transform(test_data)
    
    if args.m == 'ECOC':
        # Generate 50 random ECOC vectors
        codes = np.random.randint(0,2,size=(20,10))
        train_codes, test_codes = codes.copy(), codes.copy().astype(bool)
        train_codes[train_codes==0] = -1.0

        # Iterate through each ECOC
        pool = Pool(processes=11)
示例#2
0
        c = np.sum(Yi.ravel()==H)
        print "r=%f:" % 4.6, float(c)/float(len(Yi))
elif args.d == "d":
    # Load data using specialized script
    train_dataset = load_mnist(path="../data/mnist/", dataset="training")
    test_dataset = load_mnist(path="../data/mnist/", dataset="testing")

    # Take a fraction of the data to speed computation
    train_images, train_labels = sample(train_dataset, 10000)
    test_images, test_labels = sample(test_dataset, 1000)

    # Get the bounds of the haar rectangles
    bounds = genbounds(28, 28, 100)

    # Create data, using same rectangles for training and testing
    train_data = genfeatures(train_images, bounds)
    test_data = genfeatures(test_images, bounds)

    # Normalize the data
    zmscaler = preprocessing.StandardScaler()
    train_data = zmscaler.fit_transform(train_data)
    test_data = zmscaler.transform(test_data)

    # Run knn
    H = wnn(train_data, test_data, train_labels, d=cosine_distance, r=0.325)
    c = np.sum(test_labels.ravel()==H)
    print "r=%f:" % 0.325, float(c)/float(len(test_labels))
else:
    pass