Exemple #1
0
def main():
    training_data, validation_data, test_data = load_data_wrapper()
    network = NeuralNetwork([784, 16, 16, 10])

    layers = create_layers(network)
    win, wth = initialize_screen(network.sizes)
    draw_network(win, layers)  # draw initial network with random weights

    epochs = 30
    batch_size = 10
    learning_rate = 3.0

    txt = Text(Point(wth / 2, 830), "Initial Weights")
    txt.draw(win)
    # main training loop
    for i in range(epochs):  # for each iteration of training
        biases, weights = network.train_iteration(training_data, batch_size, \
        learning_rate, i, test_data=test_data)
        txt.setText("Iteration: {0}".format(i))
        for j in range(1, len(layers)):
            layers[j].update_layer(weights[j - 1], biases[j - 1])
        draw_network(win, layers)

    win.getMouse()
    win.close()
Exemple #2
0
    def run_single_step(self):
        data = data_loader.load_data_wrapper()  # IMPORT FROM  data_loader
        dataset = tf.data.Dataset.from_tensor_slices(data).batch(
            self.batch_size)
        iterator = dataset.make_initializable_iterator()
        next = iterator.get_next()

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver()

            sess.run(iterator.initializer)
            for i in range(320000):
                # print(val)
                if i > 0 and i % (320000 // self.batch_size) == 0:
                    sess.run(iterator.initializer)
                val = sess.run(next)
                _, losses = self.sess.run([self.train_op, self.losses],
                                          feed_dict={self.x: val})
                if i % 4000 == 0:
                    print(losses)
            # save_path = saver.save(sess, "./VAEcnn_model.ckpt")
            # print("Model saved in path: %s" % save_path)
            # zsample =  np.linspace(0,1, 10).reshape(10,1)
            # G_sample = sess.run(self.fully_connected_decoder1, feed_dict={self.z: zsample})
            # gsample = sess.run(self.x_hat, feed_dict={self.fully_connected_decoder1:G_sample})
            # # print(gsample.shape)
            # for i in range(10):
            #     self.draw_grid(8, gsample[i].reshape(8,8) , zsample[i])
        return
Exemple #3
0
    def train(self, epochs, batch_size=30, save_interval=50):

        # Load the dataset
        X_train = data_loader.load_data_wrapper()

        # Rescale -1 to 1
        X_train = X_train / 127.5 - 1.
        X_train = np.expand_dims(X_train, axis=3)

        # Adversarial ground truths
        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))

        for epoch in range(epochs):

            # ---------------------
            #  Train Discriminator
            # ---------------------

            # Select a random half of images
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            imgs = X_train[idx]

            # Sample noise and generate a batch of new images
            noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
            gen_imgs = self.generator.predict(noise)

            # Train the discriminator (real classified as ones and generated as zeros)
            d_loss_real = self.discriminator.train_on_batch(imgs, valid)
            d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # ---------------------
            #  Train Generator
            # ---------------------

            # Train the generator (wants discriminator to mistake images as real)
            g_loss = self.combined.train_on_batch(noise, valid)

            # Plot the progress
            print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
    def run_single_step(self):
        data = data_loader.load_data_wrapper() # IMPORT FROM  data_loader
        dataset = tf.data.Dataset.from_tensor_slices(data).batch(self.batch_size)
        iterator = dataset.make_initializable_iterator()
        next = iterator.get_next()

        with tf.Session() as sess:
            # all_vars= tf.global_variables()
            # def get_var(name):
            #     for i in range(len(all_vars)):
            #         if all_vars[i].name.startswith(name):
            #             return all_vars[i]
            #     return None
            # fc1_var_w = get_var('Fully_Conn1_decoder/weights')
            # fc1_var_b = get_var('Fully_Conn1_decoder/biases')
            # fc2_var_w = get_var('Fully_Conn2_decoder/weights')
            # fc2_var_b = get_var('Fully_Conn2_decoder/biases')
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver()

            sess.run(iterator.initializer)
            for i in range(320000):
                # print(val)
                if i % (320000 // self.batch_size) == 0:
                    sess.run(iterator.initializer)
                val = sess.run(next)
                _, losses = self.sess.run([self.train_op, self.losses],feed_dict={self.x: val })
                if i % 4000 == 0:
                    print(losses)
            # save_path = saver.save(sess, "./VAEmodel.ckpt")
            # print("Model saved in path: %s" % save_path)
            zsample =  np.linspace(0,1, 10).reshape(10,1)
            G_sample = sess.run(self.fully_connected_decoder1, feed_dict={self.z: zsample})
            gsample = sess.run(self.x_hat, feed_dict={self.fully_connected_decoder1:G_sample})
            # print(gsample.shape)
            for i in range(10):
                self.draw_grid(8, gsample[i].reshape(8,8) , zsample[i])
        return
        test_results = [(np.argmax(self.feedforward(x)), y)
                        for (x, y) in test_data]
        return sum(int(x == y) for (x, y) in test_results)

    def cost_derivative(self, output_activations, y):
        """Return the vector of partial derivatives \partial C_x /
        \partial a for the output activations."""
        return (output_activations - y)


#### Miscellaneous functions
def sigmoid(z):
    """The sigmoid function."""
    return 1.0 / (1.0 + np.exp(-z))


def sigmoid_prime(z):
    """Derivative of the sigmoid function."""
    return sigmoid(z) * (1 - sigmoid(z))


if __name__ == "__main__":
    # example usage: train a network to recognise digits using the mnist data
    # first load the data
    import data_loader
    training_data, _, test_data = data_loader.load_data_wrapper("mnist.pkl.gz")
    # then generate the neuronal network
    net = networks.Network([784, 30, 10])
    # and train it for 15 epochs
    net.SGD(training_data, 15, 10, 0.5, test_data=test_data)
    'total_loss': total_loss,
    'KL_loss':  KL_loss,
    'E_loss':energy_loss
}
def return_intersection(hist_1, hist_2):
    minima = np.minimum(hist_1, hist_2)
    intersection = np.true_divide(np.sum(minima), np.sum(hist_2))
    return intersection

saver = tf.train.Saver()
with tf.Session() as sess:
    if Is_train == False:
        saver.restore(sess,'./VAE_xy2.ckpt')
    if Is_train == True:
        writer = tf.summary.FileWriter('./graphs', sess.graph)
        training_data = data_loader.load_data_wrapper()
        tvals = np.repeat(np.linspace(-1.0,0.9,32),10000)
        c = list(zip(training_data,tvals))
        random.shuffle(c)
        training_data, tvals = zip(*c)
        m = tf.placeholder(tf.float32,[datapoints, 64])
        n = tf.placeholder(tf.float32,[datapoints, 1])
        dataset = tf.data.Dataset.from_tensor_slices((m,n))
        dataset = dataset.prefetch(buffer_size=1000)
        dataset = dataset.batch(batch_size)
        iterator = dataset.make_initializable_iterator()
        next = iterator.get_next()

        print("============< WARNING >===============")
        sess.run(tf.global_variables_initializer())
        print("==========< Model DELETED >===========")
Exemple #7
0
import numpy as np
import data_loader as dl

train_data = dl.load_data_wrapper()

weight = [[], []]
weak_classifier = []

no_of_train_image = 25  # number of training images
train_image_size = 361  # total pixel number

i_plus = 10
i_minus = no_of_train_image - i_plus


def initialize_weights():

    initial_wight_positive = 1.0 / (2 * i_plus)
    initial_wight_negative = 1.0 / (2 * i_minus)

    for i in range(i_plus):
        weight[0].append(initial_wight_positive)

    for i in range(i_plus, no_of_train_image):
        weight[0].append(initial_wight_negative)

        # for i in range(25):
        # 	print i, weight[0][i]


def update_weight(indexNo, beta, imageNo, ei):
Exemple #8
0
import data_loader as d
import network
from PIL import Image
training_data, validation_data, test_data = d.load_data_wrapper()
net = network.Network([784, 30, 10])
net.SGD(training_data, 22, 10, 3.0, test_data=test_data)

def image_to_number(location):
    x=Image.open(location,'r').convert('L')
    x.size
    y=np.asarray(x.resize((28,28)).getdata(),dtype=np.float64).reshape((784,1))
    y = 255-y
    y = y/256
    return np.argmax(net.feedforward(y))
Exemple #9
0
#
# -------------------
# D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real, labels=tf.ones_like(D_logit_real)))
# D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.zeros_like(D_logit_fake)))
# D_loss = D_loss_real + D_loss_fake
# G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake)))

D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)
G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)

mini_batch_size = 10


# mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
#############################||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||_________________==========================
data = data_loader.load_data_wrapper() #########################I=================================== IMPORT FROM  data_loader
dataset = tf.data.Dataset.from_tensor_slices(data).batch(mini_batch_size)
iterator = dataset.make_initializable_iterator()
next = iterator.get_next()

def draw_grid(lattice_size=8,angle=[],beta=0):
    height = 640
    width = 640
    image = Image.new(mode='L', size=(height, width), color=255)

    # Draw some lines
    draw = ImageDraw.Draw(image)
    y_start = 0
    y_end = image.height
    step_size = int(image.width / lattice_size)
Exemple #10
0
import data_loader
import time
from neutral_network_matrix import neutral_network_m, neutral_network_m2

train_data, validation_data, test_data = data_loader.load_data_wrapper(
    'mnist.pkl.gz')

# nnmodel_ori=neutral_network_m.NeutralNet([784,100,10])
# nnmodel_ori.train_SGD(train_data,40,10,3.0,test_data)
# 二次损失函数,40个迭代,获得了96.24%的准确率,一开始只有70%多的准确率

# nnmodel_CrsEntCost=neutral_network_m2.NeutralNet([784,100,10],cost_func=neutral_network_m2.CrossEntropyCost)
# nnmodel_CrsEntCost.train_SGD(train_data,40,10,3.0,test_data)
# 交叉伤损失函数,40个迭代,训练的更快了,而且获得了96.48%的准确率,一开始就有90%多的准确率

# nnmodel_CrsEntCost=neutral_network_m2.NeutralNet([784,100,10],cost_func=neutral_network_m2.CrossEntropyCost,init_func='paraminit')
# nnmodel_CrsEntCost.train_SGD(train_data,40,10,3.0,test_data)
# 优化初始化方法,40个迭代,10个以内就到95%了,训练的更快了,获得了96.24%的准确率,可能是调参的问题,效果不够好

# nnmodel_CrsEntCost=neutral_network_m2.NeutralNet([784,100,10],cost_func=neutral_network_m2.QuadraticCost,init_func='paraminit')
# nnmodel_CrsEntCost.train_SGD(train_data,40,10,3.0,test_data)
# 优化初始化方法,非常快,一开始就95%,最终达到97.7%

nnmodel_CrsEntCost = neutral_network_m2.NeutralNet(
    [784, 100, 10],
    cost_func=neutral_network_m2.QuadraticCost,
    init_func='paraminit')
evaluation_accs, evaluation_costs, train_accs, train_costs = nnmodel_CrsEntCost.train_SGD(
    train_data,
    40,
    10,
Exemple #11
0
import data_loader as loader
import network

train, valid, test = loader.load_data_wrapper()
net = network.Network([784, 100, 10])
net.SGD(train, 50, 100, 0.1, test)

l=8
lattice_shape=(l,l)
nsamples=100
index_set=range(0,1,1)
T_vals=np.linspace(0.01,2.5,20)
energy = []
S=[]
sp_heat=[]
mag=[]
mag_err=[]

######################################
#########Opening saved data###########
######################################
# pkl_file=open('./DATA/16by16lattices.pkl','rb')
allTlattices= data_loader.load_data_wrapper()#pickle.load(pkl_file)
# pkl_file.close()
#allTlattices contains 32 lists for each temperature
#Each list contains 5000 spin configurations
for index in index_set:
    temp=T_vals[index]
    lattices=allTlattices
    energy.append(get_energy(lattices))
    thetas = get_parameters.get_magnetization_direction(lattices)
    # sp_heat.append(get_specific_heat(lattices,temp))
    # [mag_mean,mag_std]=get_mean_magnetization(lattices)
    # mag.append(mag_mean)
    # mag_err.append(mag_std)

#################################
######Observing vortices#########