コード例 #1
0
def main(args):
    '''
    flowers recognition gogogo!
    '''
    if args.method == 'vgg':
        print('Using vgg network for flowers recognition')
        vgg.run_vgg(args.lr, args.epochs, args.batch_size, args.reg)

    if args.method == 'fc':
        print('Using fully connected network for flowers recognition')
        fc.run_fc(args.lr, args.epochs, args.batch_size, args.reg)

    if args.method == 'resnet34':
        print('Using deep residual network(34-layers) for flowers recognition')
        resnet.run_resnet(args.lr, args.epochs, args.batch_size, args.reg)

    if args.method == 'resnet50':
        print(
            'Using deep residual network(50-layers) pretrained for flowers recognition'
        )
        res.run_resnet50(args.batch_size, args.epochs, args.lr)

    if args.method == 'svm':
        print('Using Support Vector Machine for flowers recognition')
        svm.run_svm()

    if args.method == 'knn':
        print('Using K nearest neighbors for flowers recognition')
        knn.run_knn()
コード例 #2
0
ファイル: runall.py プロジェクト: skeate/cs7641-p1
def run(data, dataset):
    # print('running decision tree for ' + dataset)
    # run_dt(data, dataset)
    print('running neural network for ' + dataset)
    run_ann(data, dataset)
    print('running boosted dt for ' + dataset)
    run_boost(data, dataset)
    print('running knn for ' + dataset)
    run_knn(data, dataset)
    print('running svm (linear) for ' + dataset)
    run_svm_linear(data, dataset)
    print('running svm (rbf) for ' + dataset)
    run_svm_rbf(data, dataset)
コード例 #3
0
        def generate_and_save_samples(tag):
            # SVHN KNN
            print "Running KNN"

            def extract_feats(_images):
                # return session.run(latents1, feed_dict={images: _images, total_iters: 99999, bn_is_training: False, bn_stats_iter:0})
                return session.run(mu1,
                                   feed_dict={
                                       images: _images,
                                       total_iters: 99999,
                                       bn_is_training: False,
                                       bn_stats_iter: 0
                                   })

            knn.run_knn(extract_feats)

            def color_grid_vis(X, nh, nw, save_path):
                # from github.com/Newmu
                X = X.transpose(0, 2, 3, 1)
                h, w = X[0].shape[:2]
                img = np.zeros((h * nh, w * nw, 3))
                for n, x in enumerate(X):
                    j = n / nw
                    i = n % nw
                    img[j * h:j * h + h, i * w:i * w + w, :] = x
                imsave(save_path, img)

            print "Generating latents1"

            latents1_copied = np.zeros((64, LATENT_DIM_2), dtype='float32')
            for i in xrange(8):
                latents1_copied[i::8] = sample_fn_latents1

            samples = np.zeros((64, N_CHANNELS, HEIGHT, WIDTH), dtype='int32')

            print "Generating samples"
            for y in xrange(HEIGHT):
                for x in xrange(WIDTH):
                    for ch in xrange(N_CHANNELS):
                        next_sample = dec1_fn(latents1_copied, samples, ch, y,
                                              x)
                        samples[:, ch, y, x] = next_sample

            print "Saving samples"
            color_grid_vis(samples, 8, 8, 'samples_{}.png'.format(tag))
コード例 #4
0
ファイル: resnet_svhn.py プロジェクト: igul222/nn
        def generate_and_save_samples(tag):
            # SVHN KNN
            print "Running KNN"
            def extract_feats(_images):
                # return session.run(latents1, feed_dict={images: _images, total_iters: 99999, bn_is_training: False, bn_stats_iter:0})
                return session.run(mu1, feed_dict={images: _images, total_iters: 99999, bn_is_training: False, bn_stats_iter:0})

            knn.run_knn(extract_feats)

            def color_grid_vis(X, nh, nw, save_path):
                # from github.com/Newmu
                X = X.transpose(0,2,3,1)
                h, w = X[0].shape[:2]
                img = np.zeros((h*nh, w*nw, 3))
                for n, x in enumerate(X):
                    j = n/nw
                    i = n%nw
                    img[j*h:j*h+h, i*w:i*w+w, :] = x
                imsave(save_path, img)

            print "Generating latents1"

            latents1_copied = np.zeros((64, LATENT_DIM_2), dtype='float32')
            for i in xrange(8):
                latents1_copied[i::8] = sample_fn_latents1

            samples = np.zeros(
                (64, N_CHANNELS, HEIGHT, WIDTH), 
                dtype='int32'
            )

            print "Generating samples"
            for y in xrange(HEIGHT):
                for x in xrange(WIDTH):
                    for ch in xrange(N_CHANNELS):
                        next_sample = dec1_fn(latents1_copied, samples, ch, y, x)
                        samples[:,ch,y,x] = next_sample

            print "Saving samples"
            color_grid_vis(
                samples, 
                8, 
                8, 
                'samples_{}.png'.format(tag)
            )
コード例 #5
0
ファイル: main.py プロジェクト: tmarathe/Machine-Learning-A3
    n=150
    pca = RandomizedPCA(n_components=n, whiten=True).fit(gc_train)
    eigenfaces = pca.components_.reshape((n, 32, 16))

    gc_train = pca.transform(gc_train)
    gc_val = pca.transform(gc_val)
    # save_train(X_train_pca, exp_train_labels, X_test_pca, exp_val_labels)
    #######################

    if chosen_classifier == "knn":
        # Enable to run knn classifier
        print("Running knn classifier")
        # knn.run_knn(train_images, train_labels, valid_images, valid_labels)
        # knn.run_knn(lbp_train_images, train_labels, lbp_val_images, valid_labels)
        # knn.run_knn(exp_train_images, exp_train_labels, exp_val_images, exp_val_labels)
        knn.run_knn(gc_train, exp_train_labels, gc_val, exp_val_labels)

    elif chosen_classifier == "svm":
        # svm classifier
        # print("Running svm classifier")
        # svm.run_svm(exp_train_images, exp_train_labels, exp_val_images, exp_val_labels)
        print("Running svm classifier on gamma corrected images")
        svm.run_svm(gc_train, exp_train_labels, gc_val, exp_val_labels)

    elif chosen_classifier == "mog":
        # mog classifier
        print("Running mog classifier")
        mog.run_mog(gc_train, exp_train_labels, gc_val, exp_val_labels)

    elif chosen_classifier == "dt":
        print("Running decision tree classifier")
コード例 #6
0
def knn():
    output = run_knn()
    return output
コード例 #7
0
    col.remove('type')
    col = col[5:15]

    sc = StandardScaler()
    temp = sc.fit_transform(df_movie[col])
    # df_movie[col] = temp

    df_standard = df_movie[list(df_movie.describe().columns)]
    return (df_movie, df_standard)

def classify(row):
    if row['imdbRating'] >= 0 and row['imdbRating'] < 4:
        return 0
    elif row['imdbRating'] >= 4 and row['imdbRating'] < 7:
        return 1
    elif row['imdbRating'] >= 7 and row['imdbRating'] <= 10:
        return 2

if __name__ == '__main__':
    df_movie, df_standard = data_prepocessing()
    run_pca(df_standard, df_movie)

    df_knn = df_movie
    df_knn["class"] = df_knn.apply(classify, axis=1)
    run_knn(df_knn)

    run_logistic_regression()

    run_xgboost_cornell()
    run_xgboost_imdb(df_knn)
コード例 #8
0
ファイル: run.py プロジェクト: jschmidtnj/cs584
    "worth", "sweet", "enjoyable", "boring", "bad", "dumb",
    "annoying", "female", "male", "queen", "king", "man", "woman", "rain", "snow",
    "hail", "coffee", "tea"]

visualizeIdx = [tokens[word] for word in visualizeWords]
visualizeVecs = wordVectors[visualizeIdx, :]
temp = (visualizeVecs - np.mean(visualizeVecs, axis=0))
covariance = 1.0 / len(visualizeIdx) * temp.T.dot(temp)
U, S, V = np.linalg.svd(covariance)
coord = temp.dot(U[:, 0:2])

for i, _ in enumerate(visualizeWords):
    plt.text(coord[i, 0], coord[i, 1], visualizeWords[i],
             bbox=dict(facecolor='green', alpha=0.1))

plt.xlim((np.min(coord[:, 0]), np.max(coord[:, 0])))
plt.ylim((np.min(coord[:, 1]), np.max(coord[:, 1])))

plt.savefig('word_vectors.png')

k_neighbors: int = 4
inverted_tokens: Dict[int, str] = dict(
    map(reversed, cast(List[str], tokens.items())))  # type: ignore
for i, word in enumerate(visualizeWords):
    logger.info(f'{i + 1}. running knn for "{word}"')
    vector = visualizeVecs[i]
    closest_indices = run_knn(vector, wordVectors, k_neighbors, tokens)
    logger.info(f'closest indices: {closest_indices}')
    closest_words = [inverted_tokens[index] for index in closest_indices]
    logger.info(f'closest words: {closest_words}')
コード例 #9
0
    df_movie["class"] = df_movie.apply(set_class, axis=1)

    df_movie = fill_nan(df_movie)
    df_movie = df_movie.drop(columns="imdb_score")

    print(df_movie["director_name"].head())

    col_mask = print(df_movie.isna().any(axis=0))
    print(col_mask)

    # return the processed dataset.
    return df_movie


if __name__ == "__main__":
    #     df_movie, df_standard = data_prepocessing()
    #     df_knn = df_movie
    #     df_knn = df_knn.reset_index()
    #     df_knn["class"] = df_knn.apply(classify, axis=1)
    #     classes = list(df_knn["class"])
    #     amazing = classes==['AMAZING']
    #     print(amazing)
    #     df_knn = df_knn.drop(columns="imdbRating")

    df_movie = load_metadata_dataset()
    run_knn(df_movie)
    # run_logistic_regression(df_movie)
    classifier = run_random_forest(df_movie)
    run(classifier)