Пример #1
0
def test_cnn(sess):
    data_loader_test_data = dataset_loader.dataset(batch_size=batch_size, test_percentage=test_percentage,
                                         validation_percentage=validation_percentage)

    image_l, image_lab = data_loader_test_data.getTestData()

    # image = cv2.imread(dirName + 'sample.JPEG')
    # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    # images = []
    # images.append(image)
    #
    # images = np.array(images)
    #
    # image_l, images = data_loader.rgb2lab(images)
    # images = tf.cast(images, tf.float32)
    # # image_l = tf.cast(image_l, tf.float32)
    image_l = np.array(image_l, dtype=np.float32)
    # image_l = images[:,:,:,0:1]
    encoded_img = convolutional_neural_network(image_l)

    # sess.run(tf.global_variables_initializer())
    encoded_img_val = sess.run(encoded_img)
    # image_l = np.array(sess.run([image_l]))

    images_rgb = decode_batch(image_l,encoded_img_val,2.63)
    i = 0
    for image_rgb in images_rgb:
        i+=1
        imsave(dirName + str(i) + 'Gen.jpeg', image_rgb)
    i = 0
Пример #2
0
def test_encode(dirName="generatedPics/"):
    data_loader = dataset_loader.dataset()
    image = cv2.imread(dirName + 'sample.JPEG')
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    images = []
    images.append(image)
    images = np.array(images)
    image_l, images = data_loader.rgb2lab(images)
    # image_l = images[:,:,:,0:1]

    # pprint.pprint(images.shape)
    images = encode(images)
    # pprint.pprint(images.shape)
    image = decode(image_l, images, 2.63)

    # image = decode(X_l, sess.run(prediction))
    imsave(dirName + 'sample2.jpeg', image)
Пример #3
0
def train_neural_network(X):

    x_ab_ss = X[:, ::4, ::4, :]
    prediction = convolutional_neural_network(X)
    thresh = 5
    nongray_mask = (np.sum(np.sum(np.sum(np.abs(x_ab_ss) > thresh, axis=1), axis=1), axis=1) > 0)[:, np.newaxis, np.newaxis, np.newaxis]
    #Prior_Boost 
    #prior_boost: [N, 1, H/4, W/4]
    prior_boost = _prior_boost(prediction)

    #Eltwise
    #prior_boost_nongray: [N, 1, H/4, W/4]
    prior_boost_nongray = prior_boost * nongray_mask
    g_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=Y))
    dl2c = tf.gradients(g_loss, prediction)
    dl2c = tf.stop_gradient(dl2c)

    

    #
    cost = tf.reduce_sum(dl2c * prediction * prior_boost_nongray)

    
    optimizer = tf.train.AdamOptimizer(learning_rate=0.000001).minimize(cost)
    hm_epochs = 50
    with tf.device("/gpu:0"):
        with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess:
            sess.run(tf.global_variables_initializer())

            for epoch in range(hm_epochs):
                epoch_loss = 0
                data_loader = dataset_loader.dataset(batch_size=batch_size, test_percentage=test_percentage,
                                                     validation_percentage=validation_percentage)
                for _ in range(int(train_size / batch_size)):
                    epoch_x, epoch_y = data_loader.getNextBatch()
                    encoded_epoch_y = vectorized_encode(epoch_y)
                    _, c = sess.run([optimizer, cost], feed_dict={X: epoch_x, Y: encoded_epoch_y})
                    # print "Cost: ", c
                    epoch_loss += c
                print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss)
            test_cnn(sess)
Пример #4
0
    # image_l = np.array(sess.run([image_l]))

    images_rgb = decode_batch(image_l,encoded_img_val,2.63)
    i = 0
    for image_rgb in images_rgb:
        i+=1
        imsave(dirName + str(i) + 'Gen.jpeg', image_rgb)
    i = 0
    # for image_test in image_lab:
    #     i+=1
    #     imsave(dirName + str(i) + 'Test.jpeg', image_test)

batch_size = 3
test_percentage = 15
validation_percentage = 10
data_loader = dataset_loader.dataset(batch_size = batch_size, test_percentage = test_percentage, validation_percentage = validation_percentage)
train_size = data_loader.n_train_records
pprint = pprint.PrettyPrinter(indent=4)
dirName = "generatedPics/"

X = tf.placeholder(tf.float32,shape=[None,256,256,1])
Y = tf.placeholder(tf.float32,shape=[None,64,64,313])

def train_neural_network(X):

    x_ab_ss = X[:, ::4, ::4, :]
    prediction = convolutional_neural_network(X)
    thresh = 5
    nongray_mask = (np.sum(np.sum(np.sum(np.abs(x_ab_ss) > thresh, axis=1), axis=1), axis=1) > 0)[:, np.newaxis, np.newaxis, np.newaxis]
    #Prior_Boost 
    #prior_boost: [N, 1, H/4, W/4]
Пример #5
0
def cnn_constructor():
    """
    Referenced by https://github.com/oreilly-japan/deep-learning-from-scratch
    common modules referenced there too.
    """

    global network, classes, imsize

    (x_train, t_train), (x_test,
                         t_test), classes = dataset(image_dir="images",
                                                    test_percentage=10,
                                                    validation_percentage=10,
                                                    imsize=imsize)

    x_train = chenneling(x_train)
    x_test = chenneling(x_test)

    train_num = x_train.shape[0]
    test_num = x_test.shape[0]

    x_train, t_train = shuffle_dataset(x_train, t_train)
    x_test, t_test = shuffle_dataset(x_test, t_test)

    net_param = "cnn_params" + str(imsize) + ".pkl"
    if not os.path.exists("params/"):
        os.makedirs("params/")

    # make convolution eural network
    # x_train.shape[1:] returns channel, height, width
    network = ConvNet(input_dim=(x_train.shape[1:]),
                      conv_param={
                          'filter_num': 20,
                          'filter_size': 3,
                          'pad': 0,
                          'stride': 1
                      },
                      hidden_size=32,
                      output_size=classes,
                      weight_init_std=0.001)

    trainer = Trainer(network,
                      x_train,
                      t_train,
                      x_test,
                      t_test,
                      epochs=1,
                      mini_batch_size=FLAGS.batch_size,
                      optimizer='Adam',
                      optimizer_param={'lr': 0.001},
                      evaluate_sample_num_per_epoch=train_num)

    params_loaded = False
    if not os.path.exists("params/"):
        os.makedirs("params/")
    if (os.path.exists("params/" + net_param)):
        network.load_params("params/" + net_param)
        params_loaded = True
        print("\n* Loaded Network Parameters!  -  " + net_param)
    if ((FLAGS.train_epochs > 0) or (params_loaded == False)):
        if (FLAGS.train_epochs <= 0):
            FLAGS.train_epochs = 10
        # Training
        for ep in range(FLAGS.train_epochs):
            trainer.train()
            # Save parameters
            network.save_params("params/" + net_param)

            # plot graphs
            # Grpah 1: Accuracy
            markers = {'train': 'o', 'test': 's', 'loss': 'd'}
            x1 = np.arange(len(trainer.train_acc_list))
            plt.clf()
            plt.plot(x1,
                     trainer.train_acc_list,
                     marker='o',
                     label='train',
                     markevery=1)
            plt.plot(x1,
                     trainer.test_acc_list,
                     marker='s',
                     label='test',
                     markevery=1)
            plt.xlabel("epochs")
            plt.ylabel("accuracy")
            plt.ylim(0, 1.1)
            plt.legend(loc='lower right')
            plt.title("Accuracy")
            now = datetime.now()
            filename = "params/" + now.strftime(
                '%Y%m%d_%H%M%S%f') + "_" + "ep" + ".png"
            plt.savefig(filename)
            #plt.show()

            # Graph 2: Loss
            x2 = np.arange(len(trainer.train_loss_list))
            plt.clf()
            plt.plot(x2,
                     trainer.train_loss_list,
                     marker='o',
                     label='loss',
                     markevery=1)
            plt.xlabel("iter")
            plt.ylabel("loss")
            plt.legend(loc='lower right')
            plt.title("Cross entropy loss")
            now = datetime.now()
            filename = "params/" + now.strftime(
                '%Y%m%d_%H%M%S%f') + "_" + "ep" + ".png"
            plt.savefig(filename)
            #plt.show()
        print("\n* Saved Network Parameters!  -  " + net_param)