Esempio n. 1
0
def train(X):
    #plaec holder
    X_p = tf.placeholder(dtype=tf.float32, shape=(None, 784), name="X_p")

    #regularizer
    regularizer = tf.contrib.layers.l2_regularizer(scale=0.0001)

    model = auto_encoder.AutoEncoder()

    #encode
    code = model.encoder(X_p, regularizer, "encoder", False)
    #decode
    generate = model.decoder(code, regularizer, "decoder", False)

    #loss
    loss = tf.losses.mean_squared_error(labels=tf.reshape(X_p, [-1]),
                                        predictions=tf.reshape(generate, [-1]))

    optimizer = tf.train.AdamOptimizer(
        learning_rate=LEARNING_RATE).minimize(loss)
    init_op = tf.global_variables_initializer()
    init_local_op = tf.local_variables_initializer()

    with tf.Session() as sess:
        print("Training Start")
        sess.run(init_op)  # initialize all variables
        sess.run(init_local_op)
        train_Size = X.shape[0]
        for epoch in range(1, MAX_EPOCH + 1):
            losses = []
            print("Epoch:", epoch)
            start_time = time.time()  # time evaluation

            # mini batch
            for i in range(0, (train_Size // BATCH_SIZE)):
                _, loss_ = sess.run(
                    fetches=(optimizer, loss),
                    feed_dict={X_p: X[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]})
                losses.append(loss_)

            print("loss:", sum(losses) / len(losses))

            #store the first pictre
            gen_pic = sess.run(fetches=generate, feed_dict={X_p: X})
            #print("gen_pic:\n",gen_pic)
            plt.imshow(np.reshape(X[3], newshape=[28, 28]))
            plt.imshow(np.reshape(gen_pic[3], newshape=[28, 28]))
            plt.show()
Esempio n. 2
0
def run():
    print('0.getting dataset')
    datapath = "../make_dataset/data_input_angle=5"
    train, test, train_target, test_target = process_data(datapath)
    print('1.creating model')
    model = auto_encoder.AutoEncoder()
    model.build()
    print('2.Creating trainer')
    t = trainer.Trainer(train, test, train_target, test_target)
    t.setup_graph(model)
    print('3.starting train')
    print(train.shape)
    t.train_model()
    print('4.starting test')
    res = t.test_model()
    #show output pic
    res = np.array(res)
    res = res[0] * 255
    res = np.rint(res)
    test_show1 = test[:24, :, :, :3]
    test_show1 = test_show1 * 255
    test_show1 = np.rint(test_show1)
    test_show2 = test[:24, :, :, 3:6]
    test_show2 = test_show2 * 255
    test_show2 = np.rint(test_show2)
    test_target = test_target[:24]
    test_target = test_target * 255
    test_target = np.rint(test_target)
    for num in range(0, 24):
        im1 = np.uint8(test_show1[num])
        im1 = Image.fromarray(im1)
        im1.save("test/%d-left.jpg" % num)
        im2 = np.uint8(test_show2[num])
        im2 = Image.fromarray(im2)
        im2.save("test/%d-right.jpg" % num)
        im3 = np.uint8(res[num])
        im3 = Image.fromarray(im3)
        im3.save("test/%d-output.jpg" % num)
        im4 = np.uint8(test_target[num])
        im4 = Image.fromarray(im4)
        im4.save("test/%d-target.jpg" % num)
Esempio n. 3
0
def run():
  """Runs evaluation in a loop, and logs summaries to TensorBoard."""
  # Create the evaluation directory if it doesn't exist.
  eval_dir = FLAGS.eval_dir
  if not tf.gfile.IsDirectory(eval_dir):
    tf.logging.info("Creating eval directory: %s", eval_dir)
    tf.gfile.MakeDirs(eval_dir)
    
  # generate eval dump file
  dump_file = open(os.path.join(eval_dir, 'evaluation.json'), 'a')

  g = tf.Graph()
  with g.as_default():
    # Build the model for evaluation.
    model_config = configuration.ModelConfig()
    model_config.input_file_pattern = FLAGS.input_file_pattern
    model = auto_encoder.AutoEncoder(model_config, mode="eval")
    model.build()

    # Create the Saver to restore model Variables.
    saver = tf.train.Saver()

    # Create the summary operation and the summary writer.
    summary_op = tf.merge_all_summaries()
    summary_writer = tf.train.SummaryWriter(eval_dir)

    g.finalize()

    # Run a new evaluation run every eval_interval_secs.
    try:
      while True:
        start = time.time()
        tf.logging.info("Starting evaluation at " + time.strftime(
            "%Y-%m-%d-%H:%M:%S", time.localtime()))
        run_once(model, saver, summary_writer, summary_op, dump_file)
        time_to_next_eval = start + FLAGS.eval_interval_secs - time.time()
        if time_to_next_eval > 0:
          time.sleep(time_to_next_eval)
    except KeyboardInterrupt:
      dump_file.close()
Esempio n. 4
0
image_dir = '/raid/workspace/zhengdaren/dresden/jpg'
log_dir = './logs'

image_list = list()

batch_size = 10
epoch = 20
keep_prob = 0.9
global_step = 0

image_list = list(os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith('.JPG') )

with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
    images_holder = tf.placeholder(tf.float32, [batch_size, 512, 512, 3])
    autoencoder = auto_encoder.AutoEncoder(npy_path='./autoencoder-save.npy')
    autoencoder.build(images_holder)

    cost = tf.reduce_sum((autoencoder.bn6 - autoencoder.bn1) ** 2)
    tf.summary.scalar(name='loss', tensor=cost)
    learning_rate = tf.train.exponential_decay(0.0001, 40000, 400, 0.975)
    train = tf.train.AdamOptimizer(learning_rate).minimize(cost)

    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(log_dir+'/train', sess.graph)
    sess.run(tf.global_variables_initializer())

    for i in range(epoch):
        for j in range(int(len(image_list) / batch_size)-1):
            image_batch = image_list[j*batch_size : (j+2)*batch_size]
            images = single_batch(image_batch)
Esempio n. 5
0
def main(unused_argv):
    assert FLAGS.input_file_pattern, "--input_file_pattern is required"
    assert FLAGS.train_dir, "--train_dir is required"
    assert FLAGS.embedding_checkpoint_dir, "--embedding_checkpoint_dir is required"

    model_config = configuration.ModelConfig()
    model_config.input_file_pattern = FLAGS.input_file_pattern
    model_config.inception_checkpoint_file = FLAGS.inception_checkpoint_file
    model_config.embedding_checkpoint_dir = FLAGS.embedding_checkpoint_dir
    training_config = configuration.TrainingConfig()

    # Create training directory.
    train_dir = FLAGS.train_dir
    if not tf.gfile.IsDirectory(train_dir):
        tf.logging.info("Creating training directory: %s", train_dir)
        tf.gfile.MakeDirs(train_dir)

    # Build the TensorFlow graph.
    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = auto_encoder.AutoEncoder(
            model_config,
            mode="train",
            train_inception=FLAGS.train_inception,
            train_embeddings=FLAGS.train_embeddings)
        model.build()

        # Set up the learning rate.
        learning_rate_decay_fn = None
        if FLAGS.finetune_model:
            learning_rate = tf.constant(
                training_config.finetuning_learning_rate)
        else:
            learning_rate = tf.constant(training_config.initial_learning_rate)
            if training_config.learning_rate_decay_factor > 0:
                num_batches_per_epoch = (
                    training_config.num_examples_per_epoch /
                    model_config.batch_size)
                decay_steps = int(num_batches_per_epoch *
                                  training_config.num_epochs_per_decay)

                def _learning_rate_decay_fn(learning_rate, global_step):
                    return tf.train.exponential_decay(
                        learning_rate,
                        global_step,
                        decay_steps=decay_steps,
                        decay_rate=training_config.learning_rate_decay_factor,
                        staircase=True)

                learning_rate_decay_fn = _learning_rate_decay_fn

        # Set up the training ops.
        train_op = tf.contrib.layers.optimize_loss(
            loss=model.total_loss,
            global_step=model.global_step,
            learning_rate=learning_rate,
            optimizer=training_config.optimizer,
            clip_gradients=training_config.clip_gradients,
            learning_rate_decay_fn=learning_rate_decay_fn)

        # Set up the Saver for saving and restoring model checkpoints.
        saver = tf.train.Saver(
            max_to_keep=training_config.max_checkpoints_to_keep)

    # Run training.
    tf.contrib.slim.learning.train(train_op,
                                   train_dir,
                                   log_every_n_steps=FLAGS.log_every_n_steps,
                                   graph=g,
                                   global_step=model.global_step,
                                   number_of_steps=FLAGS.number_of_steps,
                                   init_fn=model.init_fn,
                                   saver=saver)
Esempio n. 6
0
def run_network(jsn_file, project_save_path,evaluate=None):

    if evaluate is None:
        evaluate = False        #do not train
    else:
        evaluete = True         # do training

    fil_path = base_path + "/temp/out_temp.txt"
    print "filPath = ",fil_path
    open(fil_path, 'w').close()
    with open(jsn_file,'r') as f:
        options = json.load(f)

    print "just loaded options"
    

# Create a working directory
    nn_utils.random_seed_np_tf(-1)
    root_logdir = options['dir']
    project_name = options['name']
    now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
    logdir = "{}/{}-{}/".format(root_logdir,project_name, now)

    if evaluate==False:
        # Load data sets
        if 'training_data' in options['data']:
            for training_file in options['data']['training_data']:
                ##print "Loading training set ", training_file['file_name']
                writeToTemp("Loading training set " + training_file['file_name'])
                if 'trX' in locals():
                    trX, trRef = nn_utils.extract_data(training_file['file_name'], trX, trRef, input_size=2)
                else:
                    trX, trRef = nn_utils.extract_data(training_file['file_name'], input_size=2)
        if 'validation_data' in options['data']:
            for validation_file in options['data']['validation_data']:
                ##print "Loading validation set ", validation_file['file_name']
                writeToTemp("Loading validation set " + validation_file['file_name'])
                if 'vlX' in locals():
                    vlX, vlRef = nn_utils.extract_data(validation_file['file_name'], vlX, vlRef)
                else:
                    vlX, vlRef = nn_utils.extract_data(validation_file['file_name'])
        if 'testing_data' in options['data']:
            for testing_data in options['data']['testing_data']:
                ##print "Loading testing set ", testing_data['file_name']
                writeToTemp("Loading testing set " + testing_data['file_name'])
                if 'teX' in locals():
                    teX = nn_utils.extract_data(testing_data['file_name'], teX)
                else:
                    teX = nn_utils.extract_data(testing_data['file_name'])
    else:
        n_sampe = 1
        input_size = 17 #hardcoded

# Stack and build the layers
    if evaluate==False:
        n_samp, input_size = teX.shape

    net_layers = []
    layerNumber = -1
    print "Stacking layers..."
    writeToTemp("Stacking layers...")
    for layer in options['layers']:
        layerNumber = layerNumber + 1
        if layer['layer_type'] == "input":
            print "Adding input layer ", layer['layer_name']
            writeToTemp("adding input layer " + layer['layer_name'])
            net_layers.append(input_layer.InputLayer(layer['layer_name'],layer['layer_order'],input_size,False))
            print "Building graph for ", layer['layer_name']
            writeToTemp("Building graph for " + layer['layer_name'])
            net_layers[layerNumber].build_model()
        elif layer['layer_type'] == "autoencoder":
            print "Adding autoencoder layer ", layer['layer_name']
            writeToTemp("Adding autoencoder layer " + layer['layer_name'])
            net_layers.append(auto_encoder.AutoEncoder(layer['layer_name'], layer['layer_order'], layer['hidden_size'], layer['activation'], options['dir'],net_layers[layerNumber-1].encode,  layer['pre_train'], layer['tied_weights'], "FULL", 
            layer['pre_train_rounds'], layer['pre_train_batch_size'], layer['pre_train_cost'],layer['pre_train_optimizer'],layer['pre_train_learning_rate']))
            writeToTemp("Building graph for " + layer['layer_name'])
            print "Building graph for ", layer['layer_name']
            net_layers[layerNumber].build_model(net_layers[layerNumber-1].hidden_size)
        else:
            print "Adding fully connected layer ", layer['layer_name']
            writeToTemp("Adding fully connected layer " + layer['layer_name'])
            net_layers.append(auto_encoder.AutoEncoder(layer['layer_name'], layer['layer_order'], layer['hidden_size'], layer['activation'], options['dir'],net_layers[layerNumber-1].encode))
            print "Building graph for ", layer['layer_name']
            writeToTemp("Building graph for " + layer['layer_name'])
            net_layers[layerNumber].build_model(net_layers[layerNumber-1].hidden_size)

# Setup overall network to train
    if 'trX' in locals() and 'trRef' in locals():
        print "trRef.shpe=",trRef.shape
        print "outsize=",net_layers[len(net_layers)-1].hidden_size
        assert (net_layers[len(net_layers)-1].hidden_size == trRef.shape[1]), "Your final layer hidden size should be equal to your labels size!" 
        writeToTemp("Building full network training model...")
        y_ = tf.placeholder(tf.float32, [None, trRef.shape[1]])  #holds the truth data
        #create the cost function
        if options['cost'] == 'MSE':
            cost =  tf.reduce_mean(tf.square(net_layers[len(net_layers)-1].encode-y_))
        else:
            cost =  tf.reduce_mean(tf.square(net_layers[len(net_layers)-1].encode-y_))
       # train_step = nn_utils.get_optimizer_function(options['optimizer']).minimize(cost)  #fix this bug
        train_step = tf.train.AdamOptimizer(options['learning_rate']).minimize(cost)

        correct_prediction = tf.equal(tf.argmax(net_layers[len(net_layers)-1].encode, 1), tf.argmax(y_, 1))
        evaluate_full = net_layers[len(net_layers)-1].encode
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    elif evaluate==True:
        writeToTemp("Creating evaluation network")
        evaluate_full = net_layers[len(net_layers)-1].encodea



# Start the session
    init = tf.initialize_all_variables()
    sess = tf.Session()
    
    saver = tf.train.Saver()


    sess.run(init)

    if evaluate==False:
    # Do pretraining // asuume first layer is input layer
        pre_training_data = net_layers[0].encode_data(sess,teX)
        for i in range(1,len(net_layers),1):
            ae = net_layers[i]
            if ae.layer_type == "Autoencoder":
                if net_layers[i-1].layer_type == "Input" and ae.pre_train:
                    ##print "Pre Training encoder ", ae.name
                    writeToTemp("Pre training encoder " + ae.name)
                    ae.pre_train_model(sess,pre_training_data)
                    pre_training_data = ae.encode_data(sess,pre_training_data)
                elif ae.pre_train:
                    if net_layers[i-1].pre_train != True:
                        k = [] ##remove
                        ##print "WARNING: Cannot pre-train ", ae.name, " because a previous layer is not pre-trained."
                        writeToTemp("WARNING: Cannot pre-train " + ae.name + " because a previous layer is not pre-trained")
                    else:
                        ##print "Pre Training encoder ", ae.name
                        writeToTemp("Pre training encoder " + ae.name)
                        ae.pre_train_model(sess,pre_training_data)
                        pre_training_data = ae.encode_data(sess,pre_training_data)
                        ##print "size of pre_training_data ", pre_training_data.shape
    else:
        print "Restoring the past session"
        saver.restore(sess,project_save_path + options['name'] + ".ckpt")

# Run the training
    outputs = ""
    
    if evaluate==False:
        if 'trX' in locals() and 'trRef' in locals():
            ##print "Starting network training..."
            writeToTemp("Starting network training..")
            n_samp, n_input = trX.shape
            batch_size = min(options['batch_size'],n_samp)
            for i in range(options['training_rounds']):
                sample = np.random.randint(n_samp, size=batch_size)
                batch_xs = trX[sample][:]
                batch_ys = trRef[sample][:]
                sess.run(train_step, feed_dict={net_layers[0]._input_data: batch_xs, y_: batch_ys})
                if i % 100 == 0:
                    print "Round:",i, " Accuracy: ",sess.run(accuracy, feed_dict={net_layers[0]._input_data: trX,y_: trRef})
                    acc = sess.run(accuracy, feed_dict={net_layers[0]._input_data: trX,y_: trRef})
                    outputs = "Accuracy: " + str(acc)
                    #writeToTemp(outputs)
                    #print "Round:",i, " Accuracy: ",sess.run(accuracy, feed_dict={net_layers[0]._input_data: trX,y_: trRef})
            acc = sess.run(accuracy, feed_dict={net_layers[0]._input_data: trX,y_: trRef})
            outputs = "Final network accuracy: " + str(acc)
            writeToTemp(outputs)
    else:
        res = sess.run(evaluate_full, feed_dict={net_layers[0]._input_data: test})
        print "Result is " + str(trX)
    
    
    
############## train full network


   
   
# Close out session
    if evaluate==False:
        print "Saving network... "
        save_path = saver.save(sess, project_save_path + options['name'] + ".ckpt")
        print "Saving network to ", save_path
        sess.close()

        dirs = os.listdir(project_save_path)

    #for file in dirs:
    #    os.chmod(file,0777)

    #plt.show()
    #quit()
    return outputs
 layerNumber = layerNumber + 1
 if layer['layer_type'] == "input":
     print "Adding input layer ", layer['layer_name']
     net_layers.append(
         input_layer.InputLayer(layer['layer_name'],
                                layer['layer_order'], input_size,
                                False))
     print "Building graph for ", layer['layer_name']
     net_layers[layerNumber].build_model()
 elif layer['layer_type'] == "autoencoder":
     print "Adding autoencoder layer ", layer['layer_name']
     net_layers.append(
         auto_encoder.AutoEncoder(
             layer['layer_name'], layer['layer_order'],
             layer['hidden_size'], layer['activation'], options['dir'],
             net_layers[layerNumber - 1].encode, layer['pre_train'],
             layer['tied_weights'], "FULL", layer['pre_train_rounds'],
             layer['pre_train_batch_size'], layer['pre_train_cost'],
             layer['pre_train_optimizer'],
             layer['pre_train_learning_rate']))
     print "Building graph for ", layer['layer_name']
     net_layers[layerNumber].build_model(net_layers[layerNumber -
                                                    1].hidden_size)
 else:
     print "Adding fully connected layer ", layer['layer_name']
     net_layers.append(
         auto_encoder.AutoEncoder(layer['layer_name'],
                                  layer['layer_order'],
                                  layer['hidden_size'],
                                  layer['activation'], options['dir'],
                                  net_layers[layerNumber - 1].encode))
     print "Building graph for ", layer['layer_name']
Esempio n. 8
0
import image
import cifar
import auto_encoder

data = cifar.load()
patches = image.prepare_patches(data, 8, 10000)
patches = image.normalize(patches)

ae = auto_encoder.AutoEncoder(3 * 8**2, 200)
ae.cost(patches)