Пример #1
0
def main():
    tf_MSE = []
    sk_MSE = []
    tf_RUNTIME = []
    sk_RUNTIME = []
    times = []
    counts = 25
    for i in range(counts):
        X_train, X_test, y_train, y_test = load_data('../iris.data')
        tf_mse, tf_runtime = tf_linear(X_train, X_test, y_train, y_test)
        sk_mse, sk_runtime = sk_linear(X_train, X_test, y_train, y_test)
        tf_MSE.append(tf_mse)
        sk_MSE.append(sk_mse)
        tf_RUNTIME.append(tf_runtime)
        sk_RUNTIME.append(sk_runtime)
        times.append(i)

    #print('tesorflow better:'+str(tf_large)+' equal:'+ str(equal)+' sk better:'+str(tf_small))

#   plt.gca().set_color_cycle(['red', 'green'])
    tf_RMSE = [math.sqrt(n) for n in tf_MSE]
    sk_RMSE = [math.sqrt(n) for n in sk_MSE]
    plt.plot(times, tf_RMSE)
    plt.plot(times, sk_RMSE)
    print(tf_MSE)
    print(sk_MSE)
    plt.xticks(np.arange(min(times), max(times) + 1, 1.0))
    plt.xlabel('test id')
    plt.ylabel('mean square error')
    plt.legend(
        ['tensorflow mean square error', 'scikit-learn mean square error'])
    plt.grid(True)
    plt.savefig('comare' + str(counts) + '.png')
    plt.show()

    plt.plot(times, tf_RUNTIME)
    plt.plot(times, sk_RUNTIME)
    plt.xticks(np.arange(min(times), max(times) + 1, 1.0))
    plt.xlabel('test id')
    plt.ylabel('run time')
    plt.legend(['tensorflow run time', 'scikit-learn run time'])
    plt.grid(True)
    plt.savefig('comare' + str(counts) + '.png')
    plt.show()
Пример #2
0
train_batch_size = 50
test_batch_size = 50
lr = 1e-4

if (__name__ == '__main__'):
    # use pre-trained embed if avalible
    word_embed = th.Tensor(vocab_size, embed_dim)
    label_embed = th.Tensor(label_num, embed_dim)

    net = HyperIM(word_num,
                  word_embed,
                  label_embed,
                  hidden_size=embed_dim,
                  if_gru=if_gru)
    net.to(cuda_device)

    loss = nn.BCEWithLogitsLoss()
    optim = gt.optim.RiemannianAdam(net.parameters(), lr=lr)

    train_data_loader, test_data_loader = data.load_data(
        data_path, train_batch_size, test_batch_size, word_num)

    train.train(epoch,
                net,
                loss,
                optim,
                if_neg_samp=False,
                train_data_loader=train_data_loader)
    evalu.evaluate(net, if_log=if_log, test_data_loader=test_data_loader)
def main():
    X_train, X_test, y_plt_train, y_plt_test, y_train, y_test = load_data(
        '../iris.data')

    X = tf.placeholder(tf.float32, [None, 2])
    y = tf.placeholder(tf.float32, [None, 3])

    w = tf.Variable(tf.random_normal([2, 3], mean=0.0, stddev=1.0),
                    trainable=True,
                    dtype=tf.float32)
    b = tf.Variable(tf.zeros([3]), trainable=True)
    y_pred = tf.add(tf.matmul(X, w), b)
    y_prab = tf.nn.softmax(tf.add(tf.matmul(X, w), b))

    res = tf.argmax(y_prab, 1)
    classfier = Classifer()
    classfier.predict = res
    cost = -tf.reduce_sum(y * tf.log(y_prab))
    learning_rate = 0.01
    train = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
    correct_prediction = tf.equal(tf.argmax(y_prab, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    init = tf.initialize_all_variables()

    with tf.Session() as session:
        print('start')
        session.run(init)
        X_tr, y_tr = X_train, y_train
        bach = {X: X_tr, y: y_tr}
        bach_size = 1000
        for i in range(bach_size):

            train.run(bach)
            if i % (bach_size / 10) == 0:
                cur_accuracy = accuracy.eval(bach)
                print('times %5d : accuracy = %8.3f' % (i, cur_accuracy))

        bach = {X: X_test, y: y_test}
        final_accuracy = accuracy.eval(bach)
        print('accuracy = %8.3f' % final_accuracy)
        print(res.eval(bach))
        ws = session.run(w)
        bs = session.run(b)
        #print(ws)
        #print(bs)
        X_a = np.vstack((X_train, X_test))
        y_a = np.hstack((y_plt_train, y_plt_test))

        resolution = 0.02
        test_idx = range(105, 150)
        markers = ('s', 'x', 'o', '^', 'v')
        colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
        cmap = ListedColormap(colors[:len(np.unique(y_a))])
        # plot the decision surface
        x1_min, x1_max = X_a[:, 0].min() - 1, X_a[:, 0].max() + 1
        x2_min, x2_max = X_a[:, 1].min() - 1, X_a[:, 1].max() + 1
        xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
                               np.arange(x2_min, x2_max, resolution))
        Z = res.eval(
            {X: np.array([xx1.ravel(), xx2.ravel()], dtype="float32").T})

        Z = Z.reshape(xx1.shape)
        #print(Z)
        plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
        plt.xlim(xx1.min(), xx1.max())
        plt.ylim(xx2.min(), xx2.max())
        # plot class samples
        for idx, cl in enumerate(np.unique(y_a)):
            plt.scatter(x=X_a[y_a == cl, 0],
                        y=X_a[y_a == cl, 1],
                        alpha=0.8,
                        c=cmap(idx),
                        marker=markers[idx],
                        label=cl)
        # highlight test samples
        if test_idx:
            X_test, y_test = X_a[test_idx, :], y_a[test_idx]
            plt.scatter(X_test[:, 0],
                        X_test[:, 1],
                        c='',
                        alpha=1.0,
                        linewidth=1,
                        marker='o',
                        s=55,
                        label='test set')

        #plot_decision_regions(X_combined_std, y_combined, classifier=res,test_idx=range(105, 150))
        plt.show()
from sklearn import linear_model
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.feature_selection import SelectKBest, chi2
import matplotlib.pyplot as plt
import numpy as np
import time
from util.data import load_data

#calculate the time
start_time = time.time()

# iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)]
# Load data and normalize
train_X, test_X, train_Y, test_Y = load_data('iris.data')
#train_Y = np.reshape(train_Y, [len(train_Y), 1])
#test_Y = np.reshape(test_Y, [len(test_Y), 1])

#fit the model
regr = linear_model.LinearRegression()
regr.fit(train_X, train_Y)

y_pred = regr.predict(test_X)

print('Coefficients:', regr.coef_)
print('Intercept:', regr.intercept_)
print("Mean squared error: %.5f" % mean_squared_error(test_Y, y_pred))
#print('Variance score: %.5f' % r2_score(price_y_train, regr.coef_*price_X_train + regr.intercept_))
print('R²_score: %.5f' % r2_score(test_Y, y_pred))
#print(price_y_test)
Пример #5
0
def main():
    train_x, test_x = load_data()

    train_size = len(train_x)

    train_data = theano.shared(train_x.astype(theano.config.floatX),
                               borrow=True)

    batch_size = 100

    index = T.iscalar("index")

    data_batch = train_data[index:index + batch_size, :]

    random_streams = theano.tensor.shared_randomstreams.RandomStreams()

    z = random_streams.normal((batch_size, HIDDEN_VARS_NUMBER))

    l_x_generated = build_generator(batch_size, z)
    x_generated = lasagne.layers.get_output(l_x_generated)

    params = DiscriminatorParams()

    l_p_data = build_discriminator(batch_size, data_batch, params)
    l_p_gen = build_discriminator(batch_size, x_generated, params)

    log_p_data = T.nnet.logsoftmax(lasagne.layers.get_output(l_p_data))
    log_p_gen = T.nnet.logsoftmax(lasagne.layers.get_output(l_p_gen))

    loss_data = -log_p_data[:, 1].mean()
    loss_gen = -log_p_gen[:, 0].mean()
    loss1 = loss_data + loss_gen

    params1 = params.get_list()

    updates = lasagne.updates.adam(loss1, params1, learning_rate=0.002)

    train_discrim_fn = theano.function([index], [loss_data, loss_gen],
                                       updates=updates)

    x_gen_fn = theano.function([], x_generated)

    loss2 = -log_p_gen[:, 1].mean()

    params2 = lasagne.layers.get_all_params(l_x_generated, trainable=True)

    updates = lasagne.updates.adam(loss2, params2, learning_rate=0.0005)

    train_gen_fn = theano.function([], loss2, updates=updates)

    base_path = get_result_directory_path("dcgan_minst")
    logger = FileLogger(base_path, "main")

    logger.log("Starting training...")
    for epoch in range(num_epochs):
        indexes = list(range(train_size))
        random.shuffle(indexes)

        start_time = time.time()
        for offset in range(0, train_size, batch_size):
            loss_data, loss_gen = train_discrim_fn(offset)
            loss2 = train_gen_fn()
            logger.log(
                "loss_data {:.5f} loss_gen {:.5f}, loss2 {:.5f} ".format(
                    float(loss_data), float(loss_gen), float(loss2)))

        logger.log("Epoch {} of {} took {:.3f}s".format(
            epoch + 1, num_epochs,
            time.time() - start_time))

        show_image(x_gen_fn(),
                   os.path.join(base_path, "samples_{}.png".format(epoch + 1)))

    logger.close()
Пример #6
0
def main():
    train_x, test_x = load_data()

    train_size = len(train_x)

    train_data = theano.shared(train_x.astype(theano.config.floatX),
                               borrow=True)

    batch_size = 1000

    index = T.iscalar("index")

    data_batch = train_data[index:index + batch_size, :]

    random_streams = theano.tensor.shared_randomstreams.RandomStreams()
    l_batch_mean_z, l_sd2_z = build_encoder(batch_size, data_batch)

    batch_mean_z = lasagne.layers.get_output(l_batch_mean_z)
    batch_log_sd2_z = lasagne.layers.get_output(l_sd2_z)
    batch_log_sd2_z = T.clip(batch_log_sd2_z, -10, 10)

    eps = random_streams.normal((batch_size, HIDDEN_VARS_NUMBER))

    z = batch_mean_z + eps * np.exp(batch_log_sd2_z / 2)
    l_data_mean, l_data_log_sd2 = build_decoder(z)

    data_mean = lasagne.layers.get_output(l_data_mean)
    data_log_sd2 = lasagne.layers.get_output(l_data_log_sd2)
    data_log_sd2 = T.clip(data_log_sd2, -10, 10)

    kl = (batch_log_sd2_z - batch_mean_z**2 - T.exp(batch_log_sd2_z)) / 2.0

    pixel_loss = (-(data_mean - data_batch)**2 * T.exp(-data_log_sd2) -
                  data_log_sd2) / 2.0
    lower_bound = T.sum(kl, axis=1) + T.sum(pixel_loss, axis=1)
    loss = -lower_bound.mean()

    params = lasagne.layers.get_all_params(
        [l_data_mean, l_data_log_sd2, l_batch_mean_z, l_sd2_z], trainable=True)

    grads = theano.grad(loss, params)

    GRAD_CLIP = 100

    scaled_grads = lasagne.updates.total_norm_constraint(grads, GRAD_CLIP)

    grad_sum = None
    for g in scaled_grads:
        if grad_sum:
            grad_sum = grad_sum + (g**2).mean()
        else:
            grad_sum = (g**2).mean()

    updates = lasagne.updates.adam(scaled_grads, params, learning_rate=0.001)

    train_fn = theano.function([index], [loss, grad_sum], updates=updates)

    decode_fn = theano.function([index], [data_mean, data_log_sd2])

    base_path = get_result_directory_path("vae_mnist")

    print("Starting training...")
    for epoch in range(num_epochs):
        indexes = list(range(train_size))
        random.shuffle(indexes)

        train_err = 0
        train_batches = 0
        start_time = time.time()
        grads = []
        for i in range(0, train_size, batch_size):
            loss, grad = train_fn(i)
            print("loss: {}".format(loss))
            train_err += loss
            grads.append(grad)
            train_batches += 1

        # print("grads:{}".format(grads))

        print("Epoch {} of {} took {:.3f}s".format(epoch + 1, num_epochs,
                                                   time.time() - start_time))
        print("  training loss:\t\t{:.6f}".format(train_err / train_batches))

        show_image(decode_fn, train_x,
                   os.path.join(base_path, 'samples_{}.png'.format(epoch + 1)))
def main():
    tf_accuracies = []
    sk_accuracies = []
    times = []
    tf_time = []
    sk_time = []
    counts = 10
    for i in range(counts):
         X_train, X_test, y_plt_train, y_plt_test, y_train, y_test = load_data('../iris.data')
         tf_start_time = time.time()
         tf_ac = tf_l( X_train, X_test, y_plt_train, y_plt_test, y_train, y_test)
         sk_start_time = tf_end_time = time.time()

         sk_ac = sk_l( X_train, X_test, y_plt_train, y_plt_test, y_train, y_test)
         sk_end_time = time.time()
         tf_time.append((tf_end_time-tf_start_time))
         sk_time.append((sk_start_time-sk_end_time))
         print('tensorflow %s senconds' %(tf_end_time-tf_start_time))
         print('sk %s seconds' %(sk_end_time-sk_start_time))
         tf_accuracies.append(tf_ac)
         sk_accuracies.append(sk_ac)
         times.append(i)
    
    #print(tf_accuracies)
    #print(sk_accuracies)
    tf_large = 0
    equal = 0
    tf_small = 0
    for i in range(counts):
        if((tf_accuracies[i] - sk_accuracies[i])>1e-5):
            tf_large += 1
        elif(abs((tf_accuracies[i] - sk_accuracies[i])) <= 1e-5):
            equal += 1
        else:
            tf_small += 1
    print()
    print('tesorflow better:'+str(tf_large)+' equal:'+ str(equal)+' sk better:'+str(tf_small))
    plt.gca().set_color_cycle(['red', 'green'])
    plt.plot(times, tf_accuracies)
    plt.plot(times, sk_accuracies)
    plt.xticks(np.arange(min(times), max(times)+1, 1.0))
    plt.xlabel('test id')
    plt.ylabel('accuracy')
    plt.ylim(ymin=0)
    plt.ylim(ymax=1.5)
    plt.legend(['tensorflow accuracy', 'scikit-learn accuracy'])
    plt.grid(True)
    plt.savefig('comare'+str(counts)+'.png')
    plt.show()
    
    tf_time = [x*1000 for x in tf_time]
    sk_time = [x*10000 for x in sk_time]
    plt.gca().set_color_cycle(['red', 'green'])
    plt.plot(times, tf_time)
    plt.plot(times, sk_time)
    plt.xticks(np.arange(min(times), max(times)+1, 1.0))
  #  plt.yticks(np.arange(0, 6000, 1))
    plt.xlabel('test id')
    plt.ylabel('time /ms')
    plt.legend(['tensorflow', 'scikit-learn'])
    plt.grid(True)
    plt.savefig('comare_time'+str(counts)+'.png')
    plt.show()
    times = np.array(times) + 1
    ax = plt.subplot(111)
    ax.bar(times+0.25, tf_time, width=0.4, color='b', label='tensorflow')
    ax.bar(times-0.25, sk_time, width=0.4,color='r', label='sklearn')
    plt.legend(loc='upper left')
    plt.title('Time Comparison of Logistic Regression')
    plt.xlabel('test id')
    plt.ylabel('time /ms')
    plt.savefig('comare_time_bar'+str(counts)+'.png')
    plt.show()