input_img = tf.placeholder(dtype='float',
                               shape=[None, 128, 128, 3],
                               name="Inputs")

with tf.name_scope("Target") as scope:
    target_labels = tf.placeholder(dtype='float',
                                   shape=[None, 2],
                                   name="Targets")
    nb = NetworkBuilder()

#with tf.name_scope("ModelWithCustomLayer") as scope:
with tf.name_scope("ModCosh") as scope:
    model = input_img
    model = nb.attach_cosh_layer(model)
    model = nb.attach_conv_layer(model, 32, summary='True')
    model = nb.attach_relu_layer(model)
    model = nb.attach_conv_layer(model, 32, summary='True')
    model = nb.attach_relu_layer(model)
    model = nb.attach_pooling_layer(model)

    model = nb.attach_cosh_layer(model)
    model = nb.attach_conv_layer(model, 64, summary=True)
    model = nb.attach_relu_layer(model)
    model = nb.attach_conv_layer(model, 64, summary=True)
    model = nb.attach_relu_layer(model)
    model = nb.attach_pooling_layer(model)

    model = nb.attach_cosh_layer(model)
    model = nb.attach_conv_layer(model, 128, summary=True)
    model = nb.attach_relu_layer(model)
    def _setup(self, input, is_training):

        num_classes = setting.num_classes
        nb = NetworkBuilder(is_training)
        keep_prob = tf.cond(is_training, lambda: 0.3, lambda: 1.0)

        model = input

        model = nb.attach_conv_layer(model,
                                     output_size=32,
                                     feature_size=[3, 3],
                                     strides=[1, 1, 1, 1],
                                     padding=1,
                                     use_bias=True)
        model = nb.attach_batch_norm_layer(model)
        model = nb.attach_relu_layer(model)
        self.conv_3x3_out = model

        model = nb.attach_conv_layer(model,
                                     output_size=64,
                                     feature_size=[7, 7],
                                     strides=[1, 2, 2, 1],
                                     padding=3,
                                     use_bias=True)
        model = nb.attach_batch_norm_layer(model)
        model = nb.attach_relu_layer(model)
        self.conv_7x7_out = model

        model = nb.attach_pooling_layer(model,
                                        ksize=[1, 2, 2, 1],
                                        strides=[1, 2, 2, 1],
                                        padding=0)
        self.max_pool_out = model

        model = nb.attach_resnet_block_2(model,
                                         d1=64,
                                         d2=256,
                                         strides=[1, 1, 1, 1])
        model = nb.attach_resnet_block_1(model, d1=64, d2=256)
        model = nb.attach_resnet_block_1(model, d1=64, d2=256)
        self.m_b1_out = model

        model = nb.attach_resnet_block_2(model,
                                         d1=128,
                                         d2=512,
                                         strides=[1, 2, 2, 1])
        model = nb.attach_resnet_block_1(model, d1=128, d2=512)
        model = nb.attach_resnet_block_1(model, d1=128, d2=512)
        model = nb.attach_multiscale_block_1(model,
                                             d1=128,
                                             d2=512,
                                             d3=128,
                                             p=1,
                                             d=2)
        self.m_b2_out = model

        #skip branch
        skip_connection = model
        # self.skip = model

        model = nb.attach_resnet_block_2(model,
                                         d1=256,
                                         d2=1024,
                                         strides=[1, 2, 2, 1])
        model = nb.attach_resnet_block_1(model, d1=256, d2=1024)
        model = nb.attach_multiscale_block_1(model,
                                             d1=256,
                                             d2=1024,
                                             d3=256,
                                             p=1,
                                             d=2)
        model = nb.attach_multiscale_block_1(model,
                                             d1=256,
                                             d2=1024,
                                             d3=256,
                                             p=1,
                                             d=4)
        model = nb.attach_multiscale_block_1(model,
                                             d1=256,
                                             d2=1024,
                                             d3=256,
                                             p=1,
                                             d=8)
        model = nb.attach_multiscale_block_1(model,
                                             d1=256,
                                             d2=1024,
                                             d3=256,
                                             p=1,
                                             d=16)
        self.m_b3_out = model

        model = nb.attach_multiscale_block_2(model,
                                             d1=512,
                                             d2=2048,
                                             d3=512,
                                             p=2,
                                             d=4)
        model = nb.attach_multiscale_block_1(model,
                                             d1=512,
                                             d2=2048,
                                             d3=512,
                                             p=2,
                                             d=8)
        model = nb.attach_multiscale_block_1(model,
                                             d1=512,
                                             d2=2048,
                                             d3=512,
                                             p=2,
                                             d=16)
        model = nb.add_dropout(model, keep_prob)
        self.m_b4_out = model

        model = nb.attach_conv_layer(model,
                                     output_size=num_classes,
                                     feature_size=[1, 1],
                                     strides=[1, 1, 1, 1],
                                     padding=0,
                                     use_bias=True)
        model = nb.attach_batch_norm_layer(model)
        self.out = model

        model = nb.attach_conv_transpose_layer(model,
                                               output_size=num_classes * 2,
                                               feature_size=[4, 4],
                                               strides=[2, 2],
                                               padding=1,
                                               use_bias=True,
                                               scale=2)
        # model = nb.attach_batch_norm_layer(model)
        #adding missing batch norm layer
        self.deconv_up1 = model

        #is saale ko check kr, kahi naatak na kr de baad mein
        skip_connection = nb.attach_conv_layer(skip_connection, output_size=num_classes*2, feature_size=[1,1], strides=[1,1,1,1],\
             padding=0, use_bias=True)
        skip_connection = nb.attach_batch_norm_layer(skip_connection)
        self.skip = skip_connection

        model = nb.attach_element_wise_sum(model, skip_connection)
        self.up1 = model

        model = nb.attach_conv_transpose_layer(model,
                                               output_size=num_classes,
                                               feature_size=[16, 16],
                                               strides=[8, 8],
                                               padding=4,
                                               use_bias=True,
                                               scale=8)
        model = nb.attach_batch_norm_layer(model)
        self.deconv_up2 = model

        #could be removed
        # model = nb.attach_cropping_layer(input, model)
        # self.cropped_deconv_up2 = model

        #could be removed
        model = nb.attach_conv_layer(model, output_size=num_classes, feature_size=[1,1], strides=[1,1,1,1], padding="SAME",\
            use_bias=False)

        self.up2 = model
        return model
Ejemplo n.º 3
0
def train():

    dg = DataSetGenerator("training_face_rec")
    MAX_LABELS  = len(dg.data_labels)
    dg.save_labels()

    with tf.name_scope("Input") as scope:
        input_img = tf.placeholder(dtype='float', shape=[None, 128, 128, 3], name="input")

    with tf.name_scope("Target") as scope:
        target_labels = tf.placeholder(dtype='float', shape=[None, MAX_LABELS], name="Targets")

    with tf.name_scope("Keep_prob_input") as scope:
        keep_prob = tf.placeholder(dtype='float',name='keep_prob')

    nb = NetworkBuilder()

    with tf.name_scope("ModelV2") as scope:
        model = input_img
        model = nb.attach_conv_layer(model, 32, summary=True)
        model = nb.attach_relu_layer(model)
        model = nb.attach_conv_layer(model, 32, summary=True)
        model = nb.attach_relu_layer(model)
        model = nb.attach_pooling_layer(model)

        model = nb.attach_conv_layer(model, 64, summary=True)
        model = nb.attach_relu_layer(model)
        model = nb.attach_conv_layer(model, 64, summary=True)
        model = nb.attach_relu_layer(model)
        model = nb.attach_pooling_layer(model)

        model = nb.attach_conv_layer(model, 128, summary=True)
        model = nb.attach_relu_layer(model)
        model = nb.attach_conv_layer(model, 128, summary=True)
        model = nb.attach_relu_layer(model)
        model = nb.attach_pooling_layer(model)

        model = nb.flatten(model)
        model = nb.attach_dense_layer(model, 200, summary=True)
        model = nb.attach_sigmoid_layer(model)
        model = nb.attach_dense_layer(model, 32, summary=True)
        model = nb.attach_sigmoid_layer(model)
        model = nb.attach_dense_layer(model, MAX_LABELS)
        prediction = nb.attach_softmax_layer(model)


    with tf.name_scope("Optimization") as scope:
        global_step = tf.Variable(0, name='global_step', trainable=False)
        cost = tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=target_labels)
        cost = tf.reduce_mean(cost)
        tf.summary.scalar("cost", cost)
        optimizer = tf.train.AdamOptimizer().minimize(cost, global_step=global_step)

    with tf.name_scope('accuracy') as scope:
        correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(target_labels, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
        tf.summary.scalar("accuracy", accuracy)
    # dg = DataSetGenerator("../data/twins")

    test_x , test_y =  dg.load_test_images(image_size=(128,128))
    epochs = 10
    batchSize = 10

    saver = tf.train.Saver()
    model_save_path="./saved model v2/"
    model_name='model'
    is_Train = True



    with tf.Session() as sess:
        summaryMerged = tf.summary.merge_all()
        
        filename = "./summary_log/run" + datetime.datetime.now().strftime("%Y-%m-%d--%H-%M-%s")
        # setting global steps
        tf.global_variables_initializer().run()

        if os.path.exists(model_save_path+'checkpoint'):
            # saver = tf.train.import_meta_graph('./saved '+modelName+'/model.ckpt.meta')
            saver.restore(sess, tf.train.latest_checkpoint(model_save_path))
        if is_Train:
            writer = tf.summary.FileWriter(filename+"_train", sess.graph)
        test_writer = tf.summary.FileWriter(filename+"_test", sess.graph)
        start_time = datetime.datetime.now()

        for epoch in range(epochs):
            
            batches = dg.get_mini_batches(batchSize,(128,128), allchannel=True)

            for imgs ,labels in batches:
                # X_train, X_test, y_train, y_test = train_test_split(imgs, labels, test_size=0.33, random_state=42)
                

                imgs=np.divide(imgs, 255)
                error, sumOut, acu, steps,_ = sess.run([cost, summaryMerged, accuracy,global_step,optimizer],
                                                feed_dict={input_img: imgs, target_labels: labels})
                writer.add_summary(sumOut, steps)
                print("epoch=", epoch,"steps=",steps, "Total Samples Trained=", steps*batchSize, "err=", error, "accuracy=", acu)
                if steps % 10 == 0:  # Record summaries and test-set accuracy
                    error, sumOut, acu, steps = sess.run([cost,summaryMerged, accuracy,global_step], feed_dict={input_img: test_x, target_labels: test_y})
                    test_writer.add_summary(sumOut, steps)
                    print("Test : error= ", error, "epoch=", epoch,"steps=",steps, "accuracy=", acu)
                if steps % 100 == 0:
                    print("Saving the model")
                    saver.save(sess, model_save_path+model_name, global_step=steps)
        saver.save(sess, model_save_path+model_name)
        sess.close()
        end_time = datetime.datetime.now()
        print(diff_time(start_time,end_time))