Esempio n. 1
0
def main(s):
    rd = rawDataLoader()
    # loading database is required before any further operation
    # this step takes ~30 sec on my mbp
    rd.loadImageNames()
    # placeholders for data and target
    x = tf.placeholder(tf.float32, shape=[None, 240, 320, 3])
    #y_ = tf.placeholder(tf.float32, shape=[None, 60, 80, 1])
    #m_ = tf.placeholder(tf.float32, shape=[None, 60, 80, 1])
    #y_est = tf.image.resize_images(y_, (58, 78))
    #m_est = tf.image.resize_images(m_, (58, 78), method = tf.image.ResizeMethod.NEAREST_NEIGHBOR)
    # res blocks

    coarse1 = tf.layers.conv2d(x, 96, 11, (4,4), activation=tf.nn.relu, padding='valid')
    coarse_pool1 = tf.layers.max_pooling2d(coarse1, strides=(2,2), pool_size=(3,3), padding='valid')
    coarse2 = tf.layers.conv2d(coarse_pool1, 256, 5, activation=tf.nn.relu, padding='valid')
    coarse_pool2 = tf.layers.max_pooling2d(coarse2, strides=(2,2), pool_size=(3,3), padding='same')
    coarse3 = tf.layers.conv2d(coarse_pool2, 384, 3, activation=tf.nn.relu, padding='valid')
    coarse4 = tf.layers.conv2d(coarse3, 384, 3, activation=tf.nn.relu, padding='valid')
    #coarse_pool3 = tf.layers.max_pooling2d(coarse4, strides=(2,2), pool_size=(2,2), padding='same')
    coarse5 = tf.layers.conv2d(coarse4, 256, 3, activation=tf.nn.relu, padding='valid')
    coarse5_flatten = tf.contrib.layers.flatten(coarse5)
    coarse6 = tf.contrib.layers.fully_connected(coarse5_flatten, 4800)
    coarse7 = tf.contrib.layers.fully_connected(coarse6, 4800)
    coarse7_output = tf.reshape(coarse7, [-1, 60, 80, 1])

    keep_prob = tf.placeholder(tf.float32)

    fine1 = tf.layers.conv2d(x, 63, 9, (2,2), activation=tf.nn.relu, padding='same')
    fine_pool1 = tf.layers.max_pooling2d(fine1, strides=(2,2), pool_size=(3,3), padding='same')
    fine_pool1_dp = tf.nn.dropout(fine_pool1, keep_prob)
    fine2 = tf.concat([fine_pool1_dp, coarse7_output], 3)
    fine3 = tf.layers.conv2d(fine2, 64, 5, activation=tf.nn.relu, padding='same')
    fine3_dp = tf.nn.dropout(fine3, keep_prob)
    fine4 = tf.layers.conv2d(fine3_dp, 1, 5, activation=tf.nn.relu, padding='same')
    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        print('testing')
        saver.restore(sess, "saved_Eigen/model.ckpt")
        img_path = 'result_images/Eigen/'
        [imt, dpt, mst] = rd.getNextBatchTesting(20)
        for i in range(0,20):
            tmpRes = sess.run(fine4, feed_dict={x: [imt[i,:,:,:]], keep_prob: 1})
            dp1 = cv2.resize(dpt[i,:,:,:], None,fx=4, fy=4, interpolation = cv2.INTER_CUBIC)*255
            re1 = cv2.resize(tmpRes[0,:,:,:], None,fx=4, fy=4, interpolation = cv2.INTER_CUBIC)*255
            cr = sess.run(coarse7_output, feed_dict={x: [imt[i,:,:,:]]})
            cr1 = cv2.resize(cr[0,:,:,:], None,fx=4, fy=4, interpolation = cv2.INTER_CUBIC)
            amincr = np.amin(cr1)
            amaxcr = np.amax(cr1)
            cr1 = (cr1-amincr) / (amaxcr - amincr) * 255
            org = imt[i, :, :, :]*255
            cv2.imwrite(img_path + str(i) + '_origin_dps.jpg', dp1)
            cv2.imwrite(img_path + str(i) + '_predicted_dps.jpg', re1)
            cv2.imwrite(img_path + str(i) + '_origin_rgb.jpg', org)
            cv2.imwrite(img_path + str(i) + 'coarse_output.jpg', cr1)
def main(s):
    rd = rawDataLoader()
    # loading database is required before any further operation
    # this step takes ~30 sec on my mbp
    rd.loadImageNames()
    # placeholders for data and target
    x = tf.placeholder(tf.float32, shape=[None, 240, 320, 3])
    #y_ = tf.placeholder(tf.float32, shape=[None, 60, 80, 1])
    #m_ = tf.placeholder(tf.float32, shape=[None, 60, 80, 1])
    #y_est = tf.image.resize_images(y_, (58, 78))
    #m_est = tf.image.resize_images(m_, (58, 78), method = tf.image.ResizeMethod.NEAREST_NEIGHBOR)
    # res blocks

    upper_conv1 = tf.layers.conv2d(x, 96, 11, (4,4), activation=tf.nn.relu, padding='same')
    upper_nm1 = tf.nn.l2_normalize(upper_conv1, [1,2])
    upper_mp1 = tf.layers.max_pooling2d(upper_nm1, strides=(2,2), pool_size=(3,3), padding='same')
    upper_conv2 = tf.layers.conv2d(upper_mp1, 256, 5, activation=tf.nn.relu, padding='same')
    upper_nm2 = tf.nn.l2_normalize(upper_conv2, [1,2])
    upper_mp2 = tf.layers.max_pooling2d(upper_nm2, strides=(2,2), pool_size=(2,2), padding='same')
    upper_conv3 = tf.layers.conv2d(upper_mp2, 256, 3, activation=tf.nn.relu, padding='same')
    upper_conv4 = tf.layers.conv2d(upper_conv3, 256, 3, activation=tf.nn.relu, padding='same')
    upper_conv5 = tf.layers.conv2d(upper_conv4, 256, 3, activation=tf.nn.relu, padding='same')
    upper_conv6 = tf.layers.conv2d(upper_conv5, 128, 3, activation=tf.nn.relu, padding='same')
    total_ft1 = tf.contrib.layers.flatten(upper_conv6)
    fc1 = tf.contrib.layers.fully_connected(total_ft1, 4800)
    keep_prob = tf.placeholder(tf.float32)
    h_fc1_drop = tf.nn.dropout(fc1, keep_prob)
    fc2 = tf.contrib.layers.fully_connected(h_fc1_drop, 80*60)
    res = tf.reshape(fc2, [-1, 60, 80, 1])
    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        print('testing')
        saver.restore(sess, "saved_Alexnet/model.ckpt")
        img_path = 'result_images/Alexnet/'
        [imt, dpt, mst] = rd.getNextBatchTesting(20)
        for i in range(0,20):
            tmpRes = sess.run(res, feed_dict={x: [imt[i,:,:,:]], keep_prob: 1})
            dp1 = cv2.resize(dpt[i,:,:,:], None,fx=4, fy=4, interpolation = cv2.INTER_CUBIC)*255
            re1 = cv2.resize(tmpRes[0,:,:,:], None,fx=4, fy=4, interpolation = cv2.INTER_CUBIC)*255
            org = imt[i, :, :, :]*255
            cv2.imwrite(img_path + str(i) + '_origin_dps.jpg', dp1)
            cv2.imwrite(img_path + str(i) + '_predicted_dps.jpg', re1)
            cv2.imwrite(img_path + str(i) + '_origin_rgb.jpg', org)
Esempio n. 3
0
def main():

    # placeholders for data and target
    x = tf.placeholder(tf.float32, shape=[None, 240, 320, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 60, 80, 1])
    m_ = tf.placeholder(tf.float32, shape=[None, 60, 80, 1])
    #y_est = tf.image.resize_images(y_, (58, 78))
    #m_est = tf.image.resize_images(m_, (58, 78), method = tf.image.ResizeMethod.NEAREST_NEIGHBOR)
    # res blocks

    coarse1 = tf.layers.conv2d(x, 96, 11, (4,4), activation=tf.nn.relu, padding='valid', trainable=False)
    coarse_pool1 = tf.layers.max_pooling2d(coarse1, strides=(2,2), pool_size=(3,3), padding='valid')
    coarse2 = tf.layers.conv2d(coarse_pool1, 256, 5, activation=tf.nn.relu, padding='valid', trainable=False)
    coarse_pool2 = tf.layers.max_pooling2d(coarse2, strides=(2,2), pool_size=(3,3), padding='same')
    coarse3 = tf.layers.conv2d(coarse_pool2, 384, 3, activation=tf.nn.relu, padding='valid', trainable=False)
    #coarse4 = tf.layers.conv2d(coarse3, 384, 3, activation=tf.nn.relu, padding='valid', trainable=False)
    #coarse_pool3 = tf.layers.max_pooling2d(coarse4, strides=(2,2), pool_size=(2,2), padding='same')
    coarse5 = tf.layers.conv2d(coarse3, 256, 3, activation=tf.nn.relu, padding='valid', trainable=False)
    coarse5_flatten = tf.contrib.layers.flatten(coarse5)
    coarse6 = tf.contrib.layers.fully_connected(coarse5_flatten, 4800, trainable=False)
    coarse7 = tf.contrib.layers.fully_connected(coarse6, 4800, trainable=False)
    coarse7_output = tf.reshape(coarse7, [-1, 60, 80, 1])

    keep_prob = tf.placeholder(tf.float32)

    fine1 = tf.layers.conv2d(x, 1, 9, (2,2), activation=tf.nn.relu, padding='same')
    fine_pool1 = tf.layers.max_pooling2d(fine1, strides=(2,2), pool_size=(3,3), padding='same')
    fine_pool1_dp = tf.nn.dropout(fine_pool1, keep_prob)
    fine2 = tf.concat([fine_pool1_dp, coarse7_output], 3)
    fine3 = tf.layers.conv2d(fine2, 64, 5, activation=tf.nn.relu, padding='same')
    fine3_dp = tf.nn.dropout(fine3, keep_prob)
    fine5 = tf.layers.conv2d(fine3_dp, 1, 5, activation=tf.nn.relu, padding='same')
    # loss definition, MSE for raw test
    #loss = tf.losses.huber_loss(y_est, fine4, delta=0.5)

    loss2 = lossFunc(fine5, y_, m_)
    train_step2 = tf.train.AdamOptimizer(1e-4).minimize(loss2)

    saver = tf.train.Saver()
    trainingLoss = []
    testingLoss = []

    batch_size = 20
    epoch = 100

    print('Loading start..')
    rd = rawDataLoader()
    # loading database is required before any further operation
    # this step takes ~30 sec on my mbp
    rd.loadImageNames()
    print('Loading end.')

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, "saved_Eigen_modified_1/model.ckpt")
        print('training ')
        it_count = int(epoch*2200/batch_size)
        for i in range(it_count):
            [im, dp, ms] = rd.getNextBatchTraining(batch_size)
            train_step2.run(feed_dict={x: im, y_: dp, m_:ms, keep_prob: 0.5})
            printProgress(i+1, it_count)
            if i % 50 == 0:
                [imt, dpt, mst] = rd.getNextBatchTesting(10)
                tmpTL = loss2.eval(feed_dict={x: imt, y_: dpt, m_:mst, keep_prob: 1})
                tmpLoss = loss2.eval(feed_dict={x: im, y_: dp, m_:ms, keep_prob: 1})
                print('loss = ' + str(tmpTL) + ' ' + str(tmpLoss) )
            if int(i*batch_size / 2200) > int((i-1)*batch_size/2200) :
                tmpLoss = loss2.eval(feed_dict={x: im, y_: dp, m_:ms, keep_prob: 1})
                trainingLoss.append( tmpLoss )
                [imt, dpt, mst] = rd.getNextBatchTesting(10)
                tmpTL = loss2.eval(feed_dict={x: imt, y_: dpt, m_:mst, keep_prob: 1})
                testingLoss.append( tmpTL )
        saver.save(sess, 'saved_' + MODEL_NAME + '/model.ckpt')
        print('trained model saved. ')
    f = open('analyst_new/loss_' + MODEL_NAME + '.pkl', 'wb')
    pickle.dump([trainingLoss, testingLoss], f)
def main(s):
    rd = rawDataLoader()
    # loading database is required before any further operation
    # this step takes ~30 sec on my mbp
    rd.loadImageNames()
    # placeholders for data and target
    x = tf.placeholder(tf.float32, shape=[None, 240, 320, 3])
    # res blocks

    conv0 = tf.layers.conv2d(x, 64, 7, (2, 2), activation=None, padding='same')
    bn0 = tf.layers.batch_normalization(conv0)
    first_conv = tf.nn.relu(bn0)

    # res_block_1
    mp1_1 = tf.layers.max_pooling2d(first_conv,
                                    strides=(2, 2),
                                    pool_size=(3, 3),
                                    padding='same')
    conv1_1 = tf.layers.conv2d(mp1_1, 64, 3, activation=None, padding='same')
    bn1_1 = tf.layers.batch_normalization(conv1_1)
    relu1_1 = tf.nn.relu(bn1_1)
    conv1_2 = tf.layers.conv2d(relu1_1, 64, 3, activation=None, padding='same')
    bn1_2 = tf.layers.batch_normalization(conv1_2)
    first_conv_resize = tf.image.resize_images(first_conv, [60, 80])
    add1 = tf.add(first_conv_resize, bn1_2)
    res1 = tf.nn.relu(add1)

    # res_block_2
    conv2_1 = tf.layers.conv2d(res1, 64, 3, activation=None, padding='same')
    bn2_1 = tf.layers.batch_normalization(conv2_1)
    relu2_1 = tf.nn.relu(bn2_1)
    conv2_2 = tf.layers.conv2d(relu2_1, 64, 3, activation=None, padding='same')
    bn2_2 = tf.layers.batch_normalization(conv2_2)
    add2 = tf.add(res1, bn2_2)
    res2 = tf.nn.relu(add2)

    # res_block_3
    conv3_1 = tf.layers.conv2d(res2,
                               128,
                               3, (2, 2),
                               activation=None,
                               padding='same')
    bn3_1 = tf.layers.batch_normalization(res2)
    relu3_1 = tf.nn.relu(bn3_1)
    conv3_2 = tf.layers.conv2d(relu3_1,
                               128,
                               3,
                               activation=None,
                               padding='same')
    bn3_2 = tf.layers.batch_normalization(conv3_2)
    res2_resize = tf.layers.conv2d(res2,
                                   128,
                                   3,
                                   activation=None,
                                   padding='same')
    add3 = tf.add(res2_resize, bn3_2)
    res3 = tf.nn.relu(add3)

    # res_block_4
    conv4_1 = tf.layers.conv2d(res3, 128, 3, activation=None, padding='same')
    bn4_1 = tf.layers.batch_normalization(conv4_1)
    relu4_1 = tf.nn.relu(bn4_1)
    conv4_2 = tf.layers.conv2d(relu4_1,
                               128,
                               3,
                               activation=None,
                               padding='same')
    bn4_2 = tf.layers.batch_normalization(conv4_2)
    add4 = tf.add(res3, bn4_2)
    res4 = tf.nn.relu(add4)

    # res_block_5
    conv5_1 = tf.layers.conv2d(res4,
                               256,
                               3, (2, 2),
                               activation=None,
                               padding='same')
    bn5_1 = tf.layers.batch_normalization(conv5_1)
    relu5_1 = tf.nn.relu(bn5_1)
    conv5_2 = tf.layers.conv2d(relu5_1,
                               256,
                               3,
                               activation=None,
                               padding='same')
    bn5_2 = tf.layers.batch_normalization(conv5_2)
    res4_resize = tf.layers.conv2d(res4,
                                   256,
                                   3, (2, 2),
                                   activation=None,
                                   padding='same')
    add5 = tf.add(res4_resize, bn5_2)
    res5 = tf.nn.relu(add5)

    # res_block_6
    conv6_1 = tf.layers.conv2d(res5, 256, 3, activation=None, padding='same')
    bn6_1 = tf.layers.batch_normalization(conv6_1)
    relu6_1 = tf.nn.relu(bn6_1)
    conv6_2 = tf.layers.conv2d(relu6_1,
                               256,
                               3,
                               activation=None,
                               padding='same')
    bn6_2 = tf.layers.batch_normalization(conv6_2)
    add6 = tf.add(res5, bn6_2)
    res6 = tf.nn.relu(add6)

    # res_block_7
    conv7_1 = tf.layers.conv2d(res6,
                               512,
                               3, (2, 2),
                               activation=None,
                               padding='same')
    bn7_1 = tf.layers.batch_normalization(conv7_1)
    relu7_1 = tf.nn.relu(bn7_1)
    conv7_2 = tf.layers.conv2d(relu7_1,
                               512,
                               3,
                               activation=None,
                               padding='same')
    bn7_2 = tf.layers.batch_normalization(conv7_2)
    res6_resize = tf.layers.conv2d(res6,
                                   512,
                                   3, (2, 2),
                                   activation=None,
                                   padding='same')
    add7 = tf.add(res6_resize, bn7_2)
    res7 = tf.nn.relu(add7)

    # res_block_8
    conv8_1 = tf.layers.conv2d(res7, 512, 3, activation=None, padding='same')
    bn8_1 = tf.layers.batch_normalization(conv8_1)
    relu8_1 = tf.nn.relu(bn8_1)
    conv8_2 = tf.layers.conv2d(relu8_1,
                               512,
                               3,
                               activation=None,
                               padding='same')
    bn8_2 = tf.layers.batch_normalization(conv8_2)
    add8 = tf.add(res7, bn8_2)
    res8 = tf.nn.relu(add8)

    conv_elim = tf.layers.conv2d(res8, 64, 3, activation=None, padding='same')
    res_flatten = tf.contrib.layers.flatten(conv_elim)
    fc1 = tf.contrib.layers.fully_connected(res_flatten, 80 * 60)
    fc2 = tf.contrib.layers.fully_connected(fc1, 80 * 60)
    y = tf.reshape(fc2, [-1, 60, 80, 1])

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        print('testing')
        saver.restore(sess, "saved_resnet/model.ckpt")
        img_path = 'result_images/resnet/'
        [imt, dpt, mst] = rd.getNextBatchTesting(20)
        for i in range(0, 20):
            tmpRes = sess.run(y, feed_dict={x: [imt[i, :, :, :]]})
            dp1 = cv2.resize(dpt[i, :, :, :],
                             None,
                             fx=4,
                             fy=4,
                             interpolation=cv2.INTER_CUBIC) * 255
            re1 = cv2.resize(tmpRes[0, :, :, :],
                             None,
                             fx=4,
                             fy=4,
                             interpolation=cv2.INTER_CUBIC) * 255
            org = imt[i, :, :, :] * 255
            cv2.imwrite(img_path + str(i) + '_origin_dps.jpg', dp1)
            cv2.imwrite(img_path + str(i) + '_predicted_dps.jpg', re1)
            cv2.imwrite(img_path + str(i) + '_origin_rgb.jpg', org)
def main():

    # placeholders for data and target
    x = tf.placeholder(tf.float32, shape=[None, 240, 320, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 60, 80, 1])
    m_ = tf.placeholder(tf.float32, shape=[None, 60, 80, 1])
    # res blocks

    conv0 = tf.layers.conv2d(x, 64, 7, (2,2), activation=None, padding='same')
    bn0 = tf.layers.batch_normalization(conv0)
    first_conv = tf.nn.relu(bn0)

    # res_block_1
    mp1_1 = tf.layers.max_pooling2d(first_conv, strides=(2,2), pool_size=(3,3), padding='same')
    conv1_1 = tf.layers.conv2d(mp1_1, 64, 3, activation=None, padding='same')
    bn1_1 = tf.layers.batch_normalization(conv1_1)
    relu1_1 = tf.nn.relu(bn1_1)
    conv1_2 = tf.layers.conv2d(relu1_1, 64, 3, activation=None, padding='same')
    bn1_2 = tf.layers.batch_normalization(conv1_2)
    first_conv_resize = tf.image.resize_images(first_conv, [60,80])
    add1 = tf.add(first_conv_resize, bn1_2)
    res1 = tf.nn.relu(add1)

    # res_block_2
    conv2_1 = tf.layers.conv2d(res1, 64, 3, activation=None, padding='same')
    bn2_1 = tf.layers.batch_normalization(conv2_1)
    relu2_1 = tf.nn.relu(bn2_1)
    conv2_2 = tf.layers.conv2d(relu2_1, 64, 3, activation=None, padding='same')
    bn2_2 = tf.layers.batch_normalization(conv2_2)
    add2 = tf.add(res1, bn2_2)
    res2 = tf.nn.relu(add2)

    # res_block_3
    conv3_1 = tf.layers.conv2d(res2, 128, 3, (2,2), activation=None, padding='same')
    bn3_1 = tf.layers.batch_normalization(res2)
    relu3_1 = tf.nn.relu(bn3_1)
    conv3_2 = tf.layers.conv2d(relu3_1, 128, 3, activation=None, padding='same')
    bn3_2 = tf.layers.batch_normalization(conv3_2)
    res2_resize = tf.layers.conv2d(res2, 128, 3, activation=None, padding='same')
    add3 = tf.add(res2_resize, bn3_2)
    res3 = tf.nn.relu(add3)

    # res_block_4
    conv4_1 = tf.layers.conv2d(res3, 128, 3, activation=None, padding='same')
    bn4_1 = tf.layers.batch_normalization(conv4_1)
    relu4_1 = tf.nn.relu(bn4_1)
    conv4_2 = tf.layers.conv2d(relu4_1, 128, 3, activation=None, padding='same')
    bn4_2 = tf.layers.batch_normalization(conv4_2)
    add4 = tf.add(res3, bn4_2)
    res4 = tf.nn.relu(add4)

    # res_block_5
    conv5_1 = tf.layers.conv2d(res4, 256, 3, (2,2), activation=None, padding='same')
    bn5_1 = tf.layers.batch_normalization(conv5_1)
    relu5_1 = tf.nn.relu(bn5_1)
    conv5_2 = tf.layers.conv2d(relu5_1, 256, 3, activation=None, padding='same')
    bn5_2 = tf.layers.batch_normalization(conv5_2)
    res4_resize = tf.layers.conv2d(res4, 256, 3, (2,2), activation=None, padding='same')
    add5 = tf.add(res4_resize, bn5_2)
    res5 = tf.nn.relu(add5)

    # res_block_6
    conv6_1 = tf.layers.conv2d(res5, 256, 3, activation=None, padding='same')
    bn6_1 = tf.layers.batch_normalization(conv6_1)
    relu6_1 = tf.nn.relu(bn6_1)
    conv6_2 = tf.layers.conv2d(relu6_1, 256, 3, activation=None, padding='same')
    bn6_2 = tf.layers.batch_normalization(conv6_2)
    add6 = tf.add(res5, bn6_2)
    res6 = tf.nn.relu(add6)

    # res_block_7
    conv7_1 = tf.layers.conv2d(res6, 512, 3, (2,2), activation=None, padding='same')
    bn7_1 = tf.layers.batch_normalization(conv7_1)
    relu7_1 = tf.nn.relu(bn7_1)
    conv7_2 = tf.layers.conv2d(relu7_1, 512, 3, activation=None, padding='same')
    bn7_2 = tf.layers.batch_normalization(conv7_2)
    res6_resize = tf.layers.conv2d(res6, 512, 3, (2,2), activation=None, padding='same')
    add7 = tf.add(res6_resize, bn7_2)
    res7 = tf.nn.relu(add7)

    # res_block_8
    conv8_1 = tf.layers.conv2d(res7, 512, 3, activation=None, padding='same')
    bn8_1 = tf.layers.batch_normalization(conv8_1)
    relu8_1 = tf.nn.relu(bn8_1)
    conv8_2 = tf.layers.conv2d(relu8_1, 512, 3, activation=None, padding='same')
    bn8_2 = tf.layers.batch_normalization(conv8_2)
    add8 = tf.add(res7, bn8_2)
    res8 = tf.nn.relu(add8)

    conv_elim = tf.layers.conv2d(res8, 64, 3, activation=None, padding='same')
    res_flatten = tf.contrib.layers.flatten(conv_elim)
    fc1 = tf.contrib.layers.fully_connected(res_flatten, 80*60)
    fc2 = tf.contrib.layers.fully_connected(fc1, 80*60)
    y = tf.reshape(fc2, [-1, 60, 80, 1])


    # loss definition, MSE for raw test
    #loss = tf.losses.huber_loss(y_est, fine4, delta=0.5)
    loss = lossFunc(y, y_, m_)
    train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
    saver = tf.train.Saver()
    trainingLoss = []
    testingLoss = []

    batch_size = 15
    epoch = 200

    print('Loading start..')
    rd = rawDataLoader()
    # loading database is required before any further operation
    # this step takes ~30 sec on my mbp
    rd.loadImageNames()
    print('Loading end.')

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        print('training ')
        it_count = int(epoch*2200/batch_size)
        for i in range(it_count):
            [im, dp, ms] = rd.getNextBatchTraining(batch_size)
            train_step.run(feed_dict={x: im, y_: dp, m_:ms})
            printProgress(i+1, it_count)
            if i % 50 == 0:
                [imt, dpt, mst] = rd.getNextBatchTesting(10)
                tmpTL = loss.eval(feed_dict={x: imt, y_: dpt, m_:mst})
                tmpLoss = loss.eval(feed_dict={x: im, y_: dp, m_:ms})
                print('loss = ' + str(tmpTL) + ' ' + str(tmpLoss) )
            if int(i*batch_size / 2200) > int((i-1)*batch_size/2200) :
                tmpLoss = loss.eval(feed_dict={x: im, y_: dp, m_:ms})
                trainingLoss.append( tmpLoss )
                [imt, dpt, mst] = rd.getNextBatchTesting(10)
                tmpTL = loss.eval(feed_dict={x: imt, y_: dpt, m_:mst})
                testingLoss.append( tmpTL )
        saver.save(sess, 'saved_' + MODEL_NAME + '/model.ckpt')
        print('trained model saved. ')
    f = open('analyst_new/loss_' + MODEL_NAME + '.pkl', 'wb')
    pickle.dump([trainingLoss, testingLoss], f)
def main():

    # placeholders for data and target
    x = tf.placeholder(tf.float32, shape=[None, 240, 320, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 60, 80, 1])
    m_ = tf.placeholder(tf.float32, shape=[None, 60, 80, 1])
    # res blocks

    upper_conv1 = tf.layers.conv2d(x,
                                   96,
                                   11, (4, 4),
                                   activation=tf.nn.relu,
                                   padding='same')
    upper_nm1 = tf.nn.l2_normalize(upper_conv1, [1, 2])
    upper_mp1 = tf.layers.max_pooling2d(upper_nm1,
                                        strides=(2, 2),
                                        pool_size=(3, 3),
                                        padding='same')
    upper_conv2 = tf.layers.conv2d(upper_mp1,
                                   256,
                                   5,
                                   activation=tf.nn.relu,
                                   padding='same')
    upper_nm2 = tf.nn.l2_normalize(upper_conv2, [1, 2])
    upper_mp2 = tf.layers.max_pooling2d(upper_nm2,
                                        strides=(2, 2),
                                        pool_size=(2, 2),
                                        padding='same')
    upper_conv3 = tf.layers.conv2d(upper_mp2,
                                   256,
                                   3,
                                   activation=tf.nn.relu,
                                   padding='same')
    upper_conv4 = tf.layers.conv2d(upper_conv3,
                                   256,
                                   3,
                                   activation=tf.nn.relu,
                                   padding='same')
    upper_conv5 = tf.layers.conv2d(upper_conv4,
                                   256,
                                   3,
                                   activation=tf.nn.relu,
                                   padding='same')
    upper_conv6 = tf.layers.conv2d(upper_conv5,
                                   128,
                                   3,
                                   activation=tf.nn.relu,
                                   padding='same')
    total_ft1 = tf.contrib.layers.flatten(upper_conv6)
    fc1 = tf.contrib.layers.fully_connected(total_ft1, 4800)
    keep_prob = tf.placeholder(tf.float32)
    h_fc1_drop = tf.nn.dropout(fc1, keep_prob)
    fc2 = tf.contrib.layers.fully_connected(h_fc1_drop, 80 * 60)
    res = tf.reshape(fc2, [-1, 80, 60, 1])

    # loss definition, MSE for raw test
    #loss = tf.losses.huber_loss(y_est, fine4, delta=0.5)
    loss = lossFunc(res, y_, m_)
    train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
    saver = tf.train.Saver()
    trainingLoss = []
    testingLoss = []

    batch_size = 20
    epoch = 200

    print('Loading start..')
    rd = rawDataLoader()
    # loading database is required before any further operation
    # this step takes ~30 sec on my mbp
    rd.loadImageNames()
    print('Loading end.')

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        print('training ')
        it_count = int(epoch * 2200 / batch_size)
        for i in range(it_count):
            [im, dp, ms] = rd.getNextBatchTraining(batch_size)
            train_step.run(feed_dict={x: im, y_: dp, m_: ms, keep_prob: 0.5})
            printProgress(i + 1, it_count)
            if i % 50 == 0:
                [imt, dpt, mst] = rd.getNextBatchTesting(10)
                tmpTL = loss.eval(feed_dict={
                    x: imt,
                    y_: dpt,
                    m_: mst,
                    keep_prob: 1
                })
                tmpLoss = loss.eval(feed_dict={
                    x: im,
                    y_: dp,
                    m_: ms,
                    keep_prob: 1
                })
                print('loss = ' + str(tmpTL) + ' ' + str(tmpLoss))
            if int(i * batch_size / 2200) > int((i - 1) * batch_size / 2200):
                tmpLoss = loss.eval(feed_dict={
                    x: im,
                    y_: dp,
                    m_: ms,
                    keep_prob: 1
                })
                trainingLoss.append(tmpLoss)
                [imt, dpt, mst] = rd.getNextBatchTesting(10)
                tmpTL = loss.eval(feed_dict={
                    x: imt,
                    y_: dpt,
                    m_: mst,
                    keep_prob: 1
                })
                testingLoss.append(tmpTL)
        saver.save(sess, 'saved_' + MODEL_NAME + '/model.ckpt')
        print('trained model saved. ')
    f = open('analyst_new/loss_' + MODEL_NAME + '.pkl', 'wb')
    pickle.dump([trainingLoss, testingLoss], f)