Esempio n. 1
0
def query():
    """Perform inference on some examples of documents from our classes."""
    tf.logging.set_verbosity(FLAGS.verbosity)

    classes = get_data(FLAGS.data_dir, classes_only=True)
    FLAGS.output_dim = len(classes)

    queries = np.loadtxt(FLAGS.query_file, dtype=str, delimiter='\n')
    _, x_query, _, query_lengths, _, _ = process_vocabulary(
        None,
        queries,
        FLAGS,
        reuse=True,
        sequence_lengths=FLAGS.model == 'rnn')

    if FLAGS.model == 'perceptron':
        model = bag_of_words_perceptron_model
    elif FLAGS.model == 'mlp':
        model = bag_of_words_MLP_model
    elif FLAGS.model == 'rnn':
        model = rnn_model
    else:
        raise ValueError('unknown model')

    classifications = predict(x_query, query_lengths, model, FLAGS)
    for i, query in enumerate(queries):
        print('The model classifies "{}" as a member of the class {}.'.format(
            query, classes['class'][classifications[i]]))
Esempio n. 2
0
def test_cnn_lstm():
    props = {
        'nb_epoch': 1,
        'batch_size': 64,
    }

    model, hist = fit_model_cnn_lstm(props, char_limit=1000)

    kbrd = common.keyboardIOS7()
    l = common.generate_input('man', 100, kbrd)
    pred = common.predict(model, kbrd, l)
    print(pred)
Esempio n. 3
0
def op0():
    """
    generate lowest 15000 confident images
    :return: none
    """
    data = common.Data("mnist/mnist_train/train_data.npy",
                       "mnist/mnist_train/mnist_train_label",
                       "mnist/mnist_test/test_data.npy",
                       "mnist/mnist_test/mnist_test_label", 1, 28)

    res = common.predict('model/1.4.0', 60000, data.train_x, 28)
    common.gen_data(res, data.train_x, data.train_y_no_one_hot, 15000)
Esempio n. 4
0
def op1():
    """
    generate fc2 output for svm
    :return: none
    """
    data = common.Data("mnist/mnist_train/train_data.npy",
                       "mnist/mnist_train/mnist_train_label",
                       "mnist/mnist_test/test_data.npy",
                       "mnist/mnist_test/mnist_test_label", 1, 28)

    res = common.predict('CNN/model/SVM1', 60000, data.test_x, 28, "out1")

    data_fc = []
    for i in range(len(res)):
        data_fc.append(res[i][0][0][0])

    data_fc = np.array(data_fc)
    np.save('mnist/mnist_test/fc1_5.npy', data_fc)
Esempio n. 5
0
def getModel():
    model = Sequential()
    model.add(
        LSTM(1000,
             activation='tanh',
             input_shape=(4, 1),
             return_sequences=True))
    model.add(Dropout(0.2))
    model.add(LSTM(500, activation='tanh', return_sequences=True))
    model.add(Dropout(0.2))
    model.add(LSTM(200, activation='tanh', return_sequences=True))
    model.add(Dense(1, activation='linear'))
    adam = optimizers.Adam(lr=0.0001)
    model.compile(loss="mean_squared_error", optimizer=adam)
    return model


if __name__ == '__main__':
    file = common.readData()
    scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1))
    data = common.preprocessData(file, scaler)
    train_data, test_data = common.splitData(data, 0.8)
    common.plotTestAndTrain(train_data, test_data)
    x_train, y_train, x_test, y_test = common.prepareForTraining(
        train_data, test_data)
    model = getModel()
    common.train(x_train, y_train, model, 16, 10, "model01.h5")
    common.test(model, x_train, y_train, x_test, y_test)
    common.predict(model, x_test, y_test, scaler)
Esempio n. 6
0
    # f_ = lambda x: x*(x-1)+x
    f_ = lambda x: np.sin(2*np.pi*x)

    # Set coefs to interpolant
    if not dir_bcs:
        A_values = np.array([f_(mesh)]).T
    else:
        A_values = np.array([f_(mesh[1:-1])]).T
        
    f, weights = p1_net(x, mesh, dir_bcs=dir_bcs)

    grad_f = tf.gradients(f, x)[0]
    # Before starting, initialize the variables
    init = tf.initialize_all_variables()
    
    x_ = np.linspace(0, 1, 1001).astype(np.float64)
    x_mid = 0.5*(x_[:-1] + x_[1:])
    with tf.Session() as sess:
        sess.run(init)
        sess.run(weights.assign(A_values))
        
        y_ = predict(sess, f, x, x_)

        dy_dx_ = predict(sess, grad_f, x, x_mid)

    plt.figure()
    plt.plot(x_, y_)
    plt.plot(x_mid, dy_dx_)
    plt.plot(x_mid, np.cos(2*np.pi*x_mid)*2*np.pi)
    plt.show()
Esempio n. 7
0
    # create a list of gradients for all model parameters
    gparams = T.grad(cost, params)

    # train_model is a function that updates the model parameters by
    # SGD Since this model has many parameters, it would be tedious to
    # manually create an update rule for each model parameter. We thus
    # create the updates list by automatically looping over all
    # (params[i], grads[i]) pairs.
    updates = [(param, param - learning_rate * gparam)
               for param, gparam in zip(params, gparams)]

    logger.debug('building training model')
    train_model = theano.function(
        inputs=[index],
        outputs=cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size:(index + 1) * batch_size],
            y: train_set_y[index * batch_size:(index + 1) * batch_size]
        })

    logger.info('Training the model ...')
    utils.training.train(classifier, train_model, validate_model, test_model,
                         n_train_batches, n_valid_batches, n_test_batches,
                         n_epochs, learning_rate, patience, patience_increase,
                         improvement_threshold, MODEL, MODEL_ID, logger)

    logger.info('Testing the model ...')
    common.predict(MODEL, source, logger)
    pass
Esempio n. 8
0
# 3.6.3 バッチ処理


import numpy as np
import common as c


x, _ = c.get_data()
network = c.init_network()
W1, W2, W3 = network["W1"], network["W2"], network["W3"]

print(x.shape)
print(x[0].shape)
print(W1.shape)
print(W2.shape)
print(W3.shape)


x, t = c.get_data()
batch_size = 100
accuracy_cnt = 0

for i in range(0, len(x), batch_size):
    x_batch = x[i:i + batch_size]
    y_batch = c.predict(network, x_batch)
    p = np.argmax(y_batch, axis=1)
    accuracy_cnt += np.sum(p == t[i:i + batch_size])

print("Accuracy:" + str(float(accuracy_cnt) / len(x)))
Esempio n. 9
0
if __name__ == '__main__':
    import matplotlib.pyplot as plt
    from common import predict
    import numpy as np

    x = tf.placeholder(tf.float32, [None, 1])
    NN, params = tooth(x)

    # Before starting, initialize the variables
    init = tf.initialize_all_variables()

    # Launch the graph.
    with tf.Session() as sess:
        sess.run(init)
        # Set the variables as in numpy
        sess.run(params['A1'].assign(np.array([[1., 1., 1.]])))
        sess.run(params['b1'].assign(np.array([0, -0.5, -1.])))

        sess.run(params['A2'].assign(np.array([[2, -4, 2]]).T))
        sess.run(params['b2'].assign(np.array([0.])))

        unit_interval = np.linspace(0, 1, 1000)

        dNN_dx = tf.gradients(NN, x)[0]
        dx = predict(sess, dNN_dx, x, unit_interval)

        plt.figure()
        plt.plot(unit_interval, predict(sess, NN, x, unit_interval))
        plt.plot(unit_interval, dx)
        plt.show()
Esempio n. 10
0
    m = 4

    x = tf.placeholder(tf.float32, [None, 1])
    NN, params = saw_tooth(x, m=m)

    # Before starting, initialize the variables
    init = tf.initialize_all_variables()

    A = np.array([[2, -4, 2], [2, -4, 2], [2, -4, 2]]).T
    b = np.array([0, -0.5, -1])

    # Launch the graph.
    with tf.Session() as sess:
        sess.run(init)
        # Set the variables as in numpy
        sess.run(params['A0'].assign(np.array([[1., 1., 1.]])))
        sess.run(params['b0'].assign(np.array([0, -0.5, -1.])))

        sess.run(params['Aout'].assign(np.array([[2, -4, 2]]).T))
        sess.run(params['bout'].assign(np.array([0.])))

        for i in range(1, m):
            sess.run(params['A%d' % i].assign(A))
            sess.run(params['b%d' % i].assign(b))

        unit_interval = np.linspace(0, 1, 1001)

        plt.figure()
        plt.plot(unit_interval, predict(sess, NN, x, unit_interval))
        plt.show()
Esempio n. 11
0
        b0 = np.array([0, 0, 0, -1])
        A1 = np.array([[0, 1, 0, 1]]).T
        b1 = np.array([0])

        # Launch the graph.
        with tf.Session() as sess:
            # Set the variables as in numpy
            sess.run(params['A0'].assign(A0))
            sess.run(params['b0'].assign(b0))

            sess.run(params['A1'].assign(A1))
            sess.run(params['b1'].assign(b1))
            xx = np.linspace(0, pi, 1001)

            plt.figure()
            plt.plot(xx, predict(sess, NN, x, xx))
            plt.plot(xx, np.sin(pi * xx) + np.sin(3 * pi * xx - 1))
            plt.show()

    # -------------------------------------------------------------------------
    # Can we learn something
    # -------------------------------------------------------------------------
    x = tf.placeholder(tf.float64, [None, 1])

    # NN, params = fourier_series(x, m)
    # NOTE: with hidden layers the interpretation of first layer size
    # is the number of terms in the series
    # NN, _ = fourier_series(x, m)

    NN = deep_fourier(x, hidden_layer_dims=[m, m])
    # y = NN(x)
Esempio n. 12
0
axarr[0].plot(plotdata["batchsize"], plotdata["loss"], 'b--')
axarr[0].set_xlabel('Minibatch number')
axarr[0].set_ylabel('Loss')
axarr[0].set_title('Training Loss')
axarr[1].plot(plotdata["batchsize"], plotdata["error"], 'r--')
axarr[1].set_xlabel('Minibatch number')
axarr[1].set_ylabel('Error')
axarr[1].set_title('Training Error')
f.tight_layout()
plt.show()


# Get test data
features, labels = data_gen(minibatch_size, num_features, num_classes)
print(trainer.test_minibatch({inputs: features, label: labels}))
pred = predict(features, z, inputs)
eq = pred == labels
print("Error (1-acc):", 1.0 - float(sum(eq))/len(eq))

acc_colors = [get_color(label[0], num_classes) for label in labels]
pred_colors = [get_color(y[0], num_classes) for y in pred]
plt.scatter(features[:,0], features[:,1], c=acc_colors, s=55)
plt.scatter(features[:,0], features[:,1], c=pred_colors, marker="x")
plt.show()

# Visualize boundaries
print("Displaying decision boundaries")
plt.scatter(features[:,0], features[:,1], c=acc_colors, s=55)
part, part_y = particles(500, 5, z, inputs, num_features)
part_colors = [get_color(y[0], num_classes) for y in part_y]
plt.scatter(part[:,0], part[:,1], c=part_colors, marker="x", alpha=0.5)
Esempio n. 13
0
        # Set the variables as in numpy
        # For first layer
        sess.run(params['A0'].assign(np.array([[1., 1., 1.]])))
        sess.run(params['b0'].assign(np.array([0, -0.5, -1.])))

        sess.run(params['Ac'].assign(Ac))
        sess.run(params['bc'].assign(bc))

        sess.run(params['Ag'].assign(Ag))
        sess.run(params['bg'].assign(bg))
        # Collapse last
        Aout = np.array([np.r_[1., -1. / (2.**(2 * np.arange(1, m + 1)))]]).T
        bout = np.array([0])

        sess.run(params['Aout'].assign(Aout))
        sess.run(params['bout'].assign(bout))

        #for w in tf.trainable_variables():
        #    print sess.run(w)

        unit_interval = np.linspace(0, 1, 1001).astype(np.float64)

        yL = predict(sess, NN, x, unit_interval)
        yY = Yaro(unit_interval, m)
        print np.linalg.norm(yL - yY, np.inf)

        plt.figure()
        plt.plot(unit_interval, yL)
        plt.plot(unit_interval, yY)
        plt.show()
Esempio n. 14
0
def train(conv_model_name="../Models/model_conv47.hf5",
          bilstm_model_name="../Models/model_lstm47.hf5",
          corpus_path="http://localhost:8983/solr/mikes",
          excluded_file="excluded.txt"):
    excluded = []
    file = open(excluded_file, encoding="utf-8", mode="r")
    for w in file:
        excluded.append(w.strip("\n"))
    file.close()

    word2vec, word_len, char2vec, char_len, tag2vec, tag_len, vec2tag, example_array, excluded, test_array \
        = prepare_corpus(corpus_path+"/select", 0.0, excluded)

    print(len(excluded))
    print(len(example_array))
    print(len(test_array))

    # file = open(excluded_file, encoding="utf-8", mode="w")
    # for w in excluded:
    #     file.write(w+"\n")
    # file.close()

    d, l = prepare_dict_simple(corpus_path + "/select")

    test_array = np.random.permutation(test_array)
    (_, _, test_xw), (_, _, test_xc), (_, _, test_y), (_, _, test_sw) = \
        prepare_sets(word2vec, word_len, char2vec, char_len, tag2vec, tag_len, vec2tag, test_array[:1000], (0.0, 0.0),
                     (0, len(test_array[:1000])))

    es = EarlyStopping(monitor='val_loss',
                       mode='min',
                       verbose=1,
                       patience=3,
                       restore_best_weights=True)
    print(word_len, char_len)
    model = create_conv(word_len, char_len, vec2tag, 256, 'lecun_uniform',
                        "Nadam", conv_model_name)
    for i in range(0):
        print(
            str(i) +
            ". iteration\n-----------------------------------------\n")

        (train_xw, valid_xw, _), (train_xc, valid_xc, _), (train_y, valid_y, _), _ = \
            prepare_sets(word2vec, word_len, char2vec, char_len, tag2vec, tag_len, vec2tag, example_array,
                         (1.0, 0.0), (i*10000, (i+1)*10000))

        model.fit([train_xw, train_xc],
                  train_y,
                  batch_size=64,
                  epochs=32,
                  validation_split=0.1,
                  callbacks=[es])

        #scores = model.evaluate([test_xw, test_xc], test_y)
        #print(scores)

        if i == 0 or i == 15 or i == 31 or i == 47:
            model.save_weights('model_conv' + str(i) + '.hf5')

    model1 = create_lstm(word_len, char_len, vec2tag, 256, 'lecun_uniform',
                         'Nadam', bilstm_model_name)
    for i in range(0):
        print(
            str(i) +
            ". iteration\n-----------------------------------------\n")

        (train_xw, valid_xw, _), (train_xc, valid_xc, _), (train_y, valid_y, _), _ = \
            prepare_sets(word2vec, word_len, char2vec, char_len, tag2vec, tag_len, vec2tag, example_array,
                         (1.0, 0.0), (i*10000, (i+1)*10000))
        model1.fit([train_xw, train_xc],
                   train_y,
                   batch_size=64,
                   epochs=32,
                   validation_split=0.1,
                   callbacks=[es])

        #scores = model1.evaluate([test_xw, test_xc], test_y)
        #print(scores)

        if i == 0 or i == 15 or i == 31 or i == 47:
            model1.save_weights('model_lstm' + str(i) + '.hf5')

    (_, _, test_xw), (_, _, test_xc), (_, _, test_y), (_, _, test_sw) = \
        prepare_sets(word2vec, word_len, char2vec, char_len, tag2vec, tag_len, vec2tag, test_array, (0.0, 0.0),
                     (0, len(test_array)))

    scores = model.evaluate([test_xw, test_xc], test_y)
    print(scores)
    scores = model1.evaluate([test_xw, test_xc], test_y)
    print(scores)
    hits = 0
    pred = model.predict([test_xw, test_xc])
    pred1 = model1.predict([test_xw, test_xc])
    bywho = [0, 0, 0, 0, 0, 0, 0]  # abc, ab, ac, bc, a, b, c
    for i in range(len(test_array)):
        if i % 100 == 1:
            print(str(i) + ". sentence, " + str(hits * 100 / i) + " % acc., ")
        pred2 = predict(test_sw[i], d, l)
        if pred[i].argmax(axis=-1) == pred1[i].argmax(axis=-1):
            chosen = vec2tag[pred[i].argmax(axis=-1)]
        else:
            chosen = pred2
        if chosen == test_array[i][1]:
            hits += 1

        if test_array[i][1] == vec2tag[pred[i].argmax(axis=-1)]:
            if test_array[i][1] == vec2tag[pred1[i].argmax(axis=-1)]:
                if test_array[i][1] == pred2:
                    bywho[0] += 1
                else:
                    bywho[1] += 1
            elif test_array[i][1] == pred2:
                bywho[2] += 1
            else:
                bywho[4] += 1
        elif test_array[i][1] == vec2tag[pred1[i].argmax(axis=-1)]:
            if test_array[i][1] == pred2:
                bywho[3] += 1
            else:
                bywho[5] += 1
        elif test_array[i][1] == pred2:
            bywho[6] += 1
    print(hits / len(test_array))
    print(bywho)
    return
Esempio n. 15
0
 
 param_man.initalise(
     run_id = MODEL_ID,
     default_freq = patience,
     params = new_params,
     imgs = visualise_weights,
     cost = visualise_cost,
     updates = visualise_updates
     )
 
 # get the training, validation and testing function for the model
 logger.debug('building fine-tuning functions')
 train_model, validate_model, test_model = dbn.buildFinetuneFunctions(
     datasets=datasets,
     batch_size=batch_size,
     learning_rate=finetune_lr
 )
 
 logger.debug('training')
 
 utils.training.train(dbn, train_model, validate_model, test_model,
     n_train_batches, n_valid_batches, n_test_batches,
     training_epochs, finetune_lr,
     patience, patience_increase, improvement_threshold,
     MODEL, MODEL_ID, logger,
     visualise=param_man
 )
 
 logger.info('Testing the model ...')
 common.predict(os.path.join(MODEL, MODEL_ID + ".pkl"), source, logger)
 pass
Esempio n. 16
0
threshold = 15
common_path = "../mnist"
fig = 28
model_path = 'model/'
parent_model = '1.4.0'
parent_model_iter = 60000

logger = common.create_logger(
    'child', log_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
data = common.Data(common_path + "/mnist_train/train_data.npy",
                   common_path + "/mnist_train/mnist_train_label",
                   common_path + "/mnist_test/test_data.npy",
                   common_path + "/mnist_test/mnist_test_label", 1, fig)

# reference from parent model
res = common.predict(model_path + parent_model, parent_model_iter, data.test_x,
                     fig)

conf = []
conf_label = []
candidate = []
candidate_label = []
err = 0

for i in range(len(res)):
    m = np.max(res[i][0][0][0])
    temp = []
    for j in res[i][0][0][0]:
        if j < m:
            temp.append(j)
    m_ = np.max(temp)
Esempio n. 17
0
parser.add_argument('--top_k', default=[5], type=int, help='top K classes')
parser.add_argument('--category_names',
                    default=['cat_to_name.json'],
                    help='classes to real name')
parser.add_argument('--gpu', action='store_true', help='predict in GPU model')

args = parser.parse_args()

images = args.images
checkpoint = args.checkpoint[0]
top_k = args.top_k[0]
category_names = args.category_names[0]
gpu = args.gpu

print('checkpoint: {}'.format(checkpoint))
print('TOP K: {}'.format(top_k))
print('category names: {}'.format(category_names))
print('training on GPU: {}'.format(gpu))

model, class_to_idx = common.rebuild_model(checkpoint)

with open(category_names, 'r') as f:
    cat_to_name = json.load(f)

for image in images:
    values, classes = common.predict(image, model, class_to_idx, gpu, top_k)
    flowers = [cat_to_name[cls] for cls in classes]
    print('image {}'.format(image))
    for flower, prob in zip(flowers, values):
        print('{} ---> {}'.format(flower, prob))
    print('-' * 10)
Esempio n. 18
0
#
#
# def init_network():
#     with open("sample_weight.pkl", "rb") as f:
#         network = pickle.load(f)
#     return network
#
#
# def predict(network, x):
#     W1, W2, W3 = network["W1"], network["W2"], network["W3"]
#     b1, b2, b3 = network["b1"], network["b2"], network["b3"]
#     a1 = np.dot(x, W1) + b1
#     z1 = sig.sigmoid(a1)
#     a2 = np.dot(z1, W2) + b2
#     z2 = sig.sigmoid(a2)
#     a3 = np.dot(z2, W3) + b3
#     y = sm.softmax(a3)
#     return y
import numpy as np
import common as c

x, t = c.get_data()
network = c.init_network()
accuracy_cnt = 0
for i in range(len(x)):
    y = c.predict(network, x[i])
    p = np.argmax(y)
    if p == t[i]:
        accuracy_cnt += 1
print("accuracy:" + str(float(accuracy_cnt) / len(x)))
Esempio n. 19
0
    print("There is no a gpu device available")


message_cuda = "cuda is available" if torch.cuda.is_available() else "cuda is not available"

print(message_cuda)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model = load_checkpoint(checkpoint)

model.to(device);

class_to_idx = model.class_to_idx

probs, classes = predict(path_to_image, model, top_k)

if not category_names:
    print(classes)
    print(probs)

else:

    name_classes = []

    with open(category_names, 'r') as f:
        cat_to_name = json.load(f)

    for i in classes:
        flower_key = recover_key(class_to_idx, i)
        name_classes.append(cat_to_name[flower_key])