Exemplo n.º 1
0
pygame.display.set_caption("Hand written digits predictor")

# width and height of each of cells in the grid
width, height = 20, 20

w = 7
h = 7

# margin of the grid
margin = 5

# grid 
grid = np.zeros((w, h))

net = [Adaline(w*h,0.1,1000) for _ in range(10)]
num = train_data()

for i in range(10):
	net[i].train(num, labels(i))

# main loop
while True:
	screen.fill(pygame.Color("black"))
	for event in pygame.event.get():
		if event.type == pygame.QUIT:
			exit(0)
		elif event.type == pygame.MOUSEBUTTONDOWN:
			pos = pygame.mouse.get_pos()
			print('Current possition of the mouse {}'.format(pos))
			column = pos[0] // (width + margin)
			row = pos[1] // (height + margin)
Exemplo n.º 2
0
def main():

    print("Please run net.py!")
    return

    parser = argparse.ArgumentParser(description='Make everything 3D')
    parser.add_argument('--batch-size',
                        dest='batch_size',
                        help='Batch size',
                        default=120,
                        type=int)
    parser.add_argument('--iter',
                        dest='iter',
                        help='Number of iterations',
                        default=1000,
                        type=int)
    parser.add_argument('--weights',
                        dest='weights',
                        help='Pre-trained weights',
                        default=None)
    args = parser.parse_args()
    print('Called with args:', args)

    with tf.name_scope("Dataset"):
        x_train = dataset.train_data()
        y_train = dataset.train_labels()
    print("Finished reading dataset.")

    forward_pass = net.encoder_gru()

    decoder_pass = net.decoder()

    logits = decoder_pass

    prediction = tf.nn.softmax(logits)

    # Initialize the variables
    init = tf.global_variables_initializer()

    # Start training
    with tf.Session() as sess:

        # Run the initializer
        sess.run(init)

        # Merge all the summaries and write them out to /tmp/mnist_logs (by default)
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter('./train', sess.graph)

        iter = 0
        print("Started training.")
        for image_hash in x_train.keys():
            iter += 1

            initial_state = tf.zeros_like(
                tf.truncated_normal(
                    [1, n_gru_vox, n_deconvfilter[0], n_gru_vox, n_gru_vox],
                    stddev=0.5))
            initial_state = initial_state.eval()

            for image in x_train[image_hash]:
                image = tf.convert_to_tensor(image)
                image = tf.reshape(image, [1, 127, 127, 3])
                image = image.eval()
                initial_state = sess.run([forward_pass],
                                         feed_dict={
                                             X: image,
                                             S: initial_state
                                         })

            vox = tf.convert_to_tensor(y_train[image_hash])
            vox = vox.eval()

            loss, _ = sess.run([loss_op, update_step],
                               feed_dict={
                                   S: initial_state,
                                   Y: vox
                               })

            print("Image: ", iter, " LOSS:  ", loss)
            tf.summary.histogram('loss', loss)

            if iter % 2 == 0:
                print("Testing Model at Iter ", iter)
                # Save the prediction to an OBJ file (mesh file).
                net.predict(w, "test_image.png", iter)
                del x_train
                del y_train
                del args
                del w
                print("Finished early!")
                return

        print("Finished!")

    del x_train
    del y_train
    del args
    del w
Exemplo n.º 3
0
        "weights": model.get_weights(),
        "version": version,
        "points": x_train.shape[0],
        "metrics": metrics
    })

    print("Sending update...")
    aggregator.send_pyobj(_update)


print("[{}] Started".format(ip_addr))
my_id = register()
print("my_id: %s" % my_id)

print("Loading train data...")
train_data = dataset.train_data(os.environ["TRAIN_DATA_PATH"], my_id)

x_train = train_data["x_train"]
y_train = train_data["y_train"]

print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')

logs = []

while True:
    notify()
    model, version, hparam = request()
    if model is None:
        continue
Exemplo n.º 4
0
optimizer = tf.train.AdamOptimizer(1e-4).minimize(loss)

# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()

with tf.Session() as sess:
    # sess = tf_debug.TensorBoardDebugWrapperSession(sess, "Berkan-MacBook-Pro.local:4334")

    # Run the initializer
    sess.run(init)

    merged = tf.summary.merge_all()

    train_writer = tf.summary.FileWriter('./train', sess.graph)

    x_train = dataset.train_data()
    y_train = dataset.train_labels()

    i = 0

    prev_state = np.zeros([n_gru_vox, n_gru_vox, n_gru_vox, 1, n_deconvfilter[0]])

    while (i < num_steps):

        for image_hash in x_train.keys():

            i += 1

            # prev_state = np.zeros([n_gru_vox, n_gru_vox, n_gru_vox, 1, n_deconvfilter[0]])

            images = x_train[image_hash]
Exemplo n.º 5
0
import time

import torch
from torchvision import models, transforms
from torch import optim, nn
from torch.autograd import Variable
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter

batch_size = 8
epochs = 500
train_root = 'D:\\lhq\\catdog\\train\\'
val_root = 'D:\\lhq\\catdog\\val\\'

train_dataset = train_data(train_root)
val_dataset = val_data(val_root)
print()

train_loader = DataLoader(train_dataset,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=0)
val_loader = DataLoader(val_dataset,
                        batch_size=batch_size,
                        shuffle=False,
                        num_workers=0)

if torch.cuda.is_available() == True:
    #net=resnet(3, 2, False).cuda()
    net = Net().cuda()
Exemplo n.º 6
0
def main():

    print("Please run net.py!")
    return

    parser = argparse.ArgumentParser(description='Make everything 3D')
    parser.add_argument('--batch-size',
                        dest='batch_size',
                        help='Batch size',
                        default=120,
                        type=int)
    parser.add_argument('--iter',
                        dest='iter',
                        help='Number of iterations',
                        default=1000,
                        type=int)
    parser.add_argument('--weights',
                        dest='weights',
                        help='Pre-trained weights',
                        default=None)
    args = parser.parse_args()
    print('Called with args:', args)

    #w = net.initialize_weights()

    with tf.name_scope("Dataset"):
        x_train = dataset.train_data()
        y_train = dataset.train_labels()
    print("Finished reading dataset.")

    # TF Graph Input
    #X = tf.placeholder(tf.float32, shape=[1, 127, 127, 3],name = "Image")
    #Y = tf.placeholder(tf.float32, shape=[32, 32, 32],name = "Pred")
    #S = tf.placeholder(tf.float32, shape=[1,n_gru_vox,n_deconvfilter[0],n_gru_vox,n_gru_vox],name = "Hidden_State")

    #initial_state = tf.Variable(tf.zeros_like(
    #    tf.truncated_normal([1,n_gru_vox,n_deconvfilter[0],n_gru_vox,n_gru_vox], stddev=0.5)), name="initial_state")

    forward_pass = net.encoder_gru()

    decoder_pass = net.decoder()

    logits = decoder_pass

    prediction = tf.nn.softmax(logits)

    # Define loss and optimizer
    #loss_op = net.loss(logits,Y)

    # Calculate and clip gradients
    #params = tf.trainable_variables()
    #gradients = tf.gradients(loss_op, params)
    #clipped_gradients, _ = tf.clip_by_global_norm(
    #    gradients, 1) # 1 is max_gradient_norm

    # Optimization
    #optimizer = tf.train.AdamOptimizer(0.00001)
    #update_step = optimizer.apply_gradients(
    #    zip(clipped_gradients, params))

    # Initialize the variables
    init = tf.global_variables_initializer()

    # Start training
    with tf.Session() as sess:

        # Run the initializer
        sess.run(init)

        # Merge all the summaries and write them out to /tmp/mnist_logs (by default)
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter('./train', sess.graph)

        iter = 0
        print("Started training.")
        for image_hash in x_train.keys():
            iter += 1

            initial_state = tf.zeros_like(
                tf.truncated_normal(
                    [1, n_gru_vox, n_deconvfilter[0], n_gru_vox, n_gru_vox],
                    stddev=0.5))
            initial_state = initial_state.eval()

            for image in x_train[image_hash]:
                image = tf.convert_to_tensor(image)
                image = tf.reshape(image, [1, 127, 127, 3])
                image = image.eval()
                #print("XDXDXD")
                #print(initial_state.shape)
                initial_state = sess.run([forward_pass],
                                         feed_dict={
                                             X: image,
                                             S: initial_state
                                         })
                #initial_state = tf.convert_to_tensor(hidden_state)
                #initial_state = initial_state.eval()

            vox = tf.convert_to_tensor(y_train[image_hash])
            vox = vox.eval()

            loss, _ = sess.run([loss_op, update_step],
                               feed_dict={
                                   S: initial_state,
                                   Y: vox
                               })

            print("Image: ", iter, " LOSS:  ", loss)
            tf.summary.histogram('loss', loss)

            if iter % 2 == 0:
                print("Testing Model at Iter ", iter)
                # Save the prediction to an OBJ file (mesh file).
                net.predict(w, "test_image.png", iter)
                del x_train
                del y_train
                del args
                del w
                print("Finished early!")
                return

        print("Finished!")

    del x_train
    del y_train
    del args
    del w