def defineEncodingVars(self):
        if(self.vggFile):
            npWeights = loadWeights(self.vggFile)
        else:
            print "Must load from weights"
            assert(0)

        self.W_conv1_1 = weight_variable_fromnp(npWeights["conv1_1_w"], "w_conv1_1")
        self.B_conv1_1 = weight_variable_fromnp(npWeights["conv1_1_b"], "b_conv1_1")
        self.W_conv1_2 = weight_variable_fromnp(npWeights["conv1_2_w"], "w_conv1_2")
        self.B_conv1_2 = weight_variable_fromnp(npWeights["conv1_2_b"], "b_conv1_2")

        self.W_conv2_1 = weight_variable_fromnp(npWeights["conv2_1_w"], "w_conv2_1")
        self.B_conv2_1 = weight_variable_fromnp(npWeights["conv2_1_b"], "b_conv2_1")
        self.W_conv2_2 = weight_variable_fromnp(npWeights["conv2_2_w"], "w_conv2_2")
        self.B_conv2_2 = weight_variable_fromnp(npWeights["conv2_2_b"], "b_conv2_2")

        self.W_conv3_1 = weight_variable_fromnp(npWeights["conv3_1_w"], "w_conv3_1")
        self.B_conv3_1 = weight_variable_fromnp(npWeights["conv3_1_b"], "b_conv3_1")
        self.W_conv3_2 = weight_variable_fromnp(npWeights["conv3_2_w"], "w_conv3_2")
        self.B_conv3_2 = weight_variable_fromnp(npWeights["conv3_2_b"], "b_conv3_2")
        self.W_conv3_3 = weight_variable_fromnp(npWeights["conv3_3_w"], "w_conv3_3")
        self.B_conv3_3 = weight_variable_fromnp(npWeights["conv3_3_b"], "b_conv3_3")

        self.W_conv4_1 = weight_variable_fromnp(npWeights["conv4_1_w"], "w_conv4_1")
        self.B_conv4_1 = weight_variable_fromnp(npWeights["conv4_1_b"], "b_conv4_1")
        self.W_conv4_2 = weight_variable_fromnp(npWeights["conv4_2_w"], "w_conv4_2")
        self.B_conv4_2 = weight_variable_fromnp(npWeights["conv4_2_b"], "b_conv4_2")
        self.W_conv4_3 = weight_variable_fromnp(npWeights["conv4_3_w"], "w_conv4_3")
        self.B_conv4_3 = weight_variable_fromnp(npWeights["conv4_3_b"], "b_conv4_3")

        self.W_conv5_1 = weight_variable_fromnp(npWeights["conv5_1_w"], "w_conv5_1")
        self.B_conv5_1 = weight_variable_fromnp(npWeights["conv5_1_b"], "b_conv5_1")
        self.W_conv5_2 = weight_variable_fromnp(npWeights["conv5_2_w"], "w_conv5_2")
        self.B_conv5_2 = weight_variable_fromnp(npWeights["conv5_2_b"], "b_conv5_2")
        self.W_conv5_3 = weight_variable_fromnp(npWeights["conv5_3_w"], "w_conv5_3")
        self.B_conv5_3 = weight_variable_fromnp(npWeights["conv5_3_b"], "b_conv5_3")

        #These are class vars, but defined here to load from npWeights
        self.class_fc_1_weight = weight_variable_fromnp(npWeights['fc6_w'], "class_fc_1_weight")
        self.class_fc_1_bias   = weight_variable_fromnp(npWeights['fc6_b'], "class_fc_1_bias")
        self.class_fc_2_weight = weight_variable_fromnp(npWeights['fc7_w'], "class_fc_2_weight")
        self.class_fc_2_bias   = weight_variable_fromnp(npWeights['fc7_b'], "class_fc_2_bias")
Exemple #2
0
    def buildModel(self, inputShape):
        if(self.vggFile):
            npWeights = loadWeights(self.vggFile)
        else:
            print "Must load from weights"
            assert(0)

        #Running on GPU
        with tf.device(self.device):
            with tf.name_scope("inputOps"):
                #Get convolution variables as placeholders
                self.inputImage = node_variable([self.batchSize, inputShape[0], inputShape[1], inputShape[2]], "inputImage")
                self.gt = node_variable([self.batchSize, self.numClasses], "gt")
                #Model variables for convolutions

            with tf.name_scope("Conv1Ops"):
                self.W_conv1_1 = weight_variable_fromnp(npWeights["conv1_1_w"], "w_conv1_1")
                self.B_conv1_1 = weight_variable_fromnp(npWeights["conv1_1_b"], "b_conv1_1")
                self.W_conv1_2 = weight_variable_fromnp(npWeights["conv1_2_w"], "w_conv1_2")
                self.B_conv1_2 = weight_variable_fromnp(npWeights["conv1_2_b"], "b_conv1_2")

                self.h_conv1_1 = tf.nn.relu(conv2d(self.inputImage, self.W_conv1_1, "conv1_1", stride=[1, 1, 1, 1]) + self.B_conv1_1)
                self.h_conv1_2 = tf.nn.relu(conv2d(self.h_conv1_1, self.W_conv1_2, "conv1_1", stride=[1, 1, 1, 1]) + self.B_conv1_2)
                self.h_pool1 = maxpool_2x2(self.h_conv1_2, "pool1")

            with tf.name_scope("Conv2Ops"):
                self.W_conv2_1 = weight_variable_fromnp(npWeights["conv2_1_w"], "w_conv2_1")
                self.B_conv2_1 = weight_variable_fromnp(npWeights["conv2_1_b"], "b_conv2_1")
                self.W_conv2_2 = weight_variable_fromnp(npWeights["conv2_2_w"], "w_conv2_2")
                self.B_conv2_2 = weight_variable_fromnp(npWeights["conv2_2_b"], "b_conv2_2")

                self.h_conv2_1 = tf.nn.relu(conv2d(self.h_pool1, self.W_conv2_1, "conv2_1") + self.B_conv2_1)
                self.h_conv2_2 = tf.nn.relu(conv2d(self.h_conv2_1, self.W_conv2_2, "conv2_2") + self.B_conv2_2)
                self.h_pool2 = maxpool_2x2(self.h_conv2_2, "pool2")

            with tf.name_scope("Conv3Ops"):
                self.W_conv3_1 = weight_variable_fromnp(npWeights["conv3_1_w"], "w_conv3_1")
                self.B_conv3_1 = weight_variable_fromnp(npWeights["conv3_1_b"], "b_conv3_1")
                self.W_conv3_2 = weight_variable_fromnp(npWeights["conv3_2_w"], "w_conv3_2")
                self.B_conv3_2 = weight_variable_fromnp(npWeights["conv3_2_b"], "b_conv3_2")
                self.W_conv3_3 = weight_variable_fromnp(npWeights["conv3_3_w"], "w_conv3_3")
                self.B_conv3_3 = weight_variable_fromnp(npWeights["conv3_3_b"], "b_conv3_3")

                self.h_conv3_1 = tf.nn.relu(conv2d(self.h_pool2, self.W_conv3_1, "conv3_1") + self.B_conv3_1)
                self.h_conv3_2 = tf.nn.relu(conv2d(self.h_conv3_1, self.W_conv3_2, "conv3_2") + self.B_conv3_2)
                self.h_conv3_3 = tf.nn.relu(conv2d(self.h_conv3_2, self.W_conv3_3, "conv3_2") + self.B_conv3_3)
                self.h_pool3 = maxpool_2x2(self.h_conv3_3, "pool3")

            with tf.name_scope("Conv4Ops"):
                self.W_conv4_1 = weight_variable_fromnp(npWeights["conv4_1_w"], "w_conv4_1")
                self.B_conv4_1 = weight_variable_fromnp(npWeights["conv4_1_b"], "b_conv4_1")
                self.W_conv4_2 = weight_variable_fromnp(npWeights["conv4_2_w"], "w_conv4_2")
                self.B_conv4_2 = weight_variable_fromnp(npWeights["conv4_2_b"], "b_conv4_2")
                self.W_conv4_3 = weight_variable_fromnp(npWeights["conv4_3_w"], "w_conv4_3")
                self.B_conv4_3 = weight_variable_fromnp(npWeights["conv4_3_b"], "b_conv4_3")

                self.h_conv4_1 = tf.nn.relu(conv2d(self.h_pool3, self.W_conv4_1, "conv4_1") + self.B_conv4_1)
                self.h_conv4_2 = tf.nn.relu(conv2d(self.h_conv4_1, self.W_conv4_2, "conv4_2") + self.B_conv4_2)
                self.h_conv4_3 = tf.nn.relu(conv2d(self.h_conv4_2, self.W_conv4_3, "conv4_2") + self.B_conv4_3)
                self.h_pool4 = maxpool_2x2(self.h_conv4_3, "pool4")


            with tf.name_scope("Conv5Ops"):
                self.W_conv5_1 = weight_variable_fromnp(npWeights["conv5_1_w"], "w_conv5_1")
                self.B_conv5_1 = weight_variable_fromnp(npWeights["conv5_1_b"], "b_conv5_1")
                self.W_conv5_2 = weight_variable_fromnp(npWeights["conv5_2_w"], "w_conv5_2")
                self.B_conv5_2 = weight_variable_fromnp(npWeights["conv5_2_b"], "b_conv5_2")
                self.W_conv5_3 = weight_variable_fromnp(npWeights["conv5_3_w"], "w_conv5_3")
                self.B_conv5_3 = weight_variable_fromnp(npWeights["conv5_3_b"], "b_conv5_3")

                self.h_conv5_1 = tf.nn.relu(conv2d(self.h_pool4, self.W_conv5_1, "conv5_1") + self.B_conv5_1)
                self.h_conv5_2 = tf.nn.relu(conv2d(self.h_conv5_1, self.W_conv5_2, "conv5_2") + self.B_conv5_2)
                self.h_conv5_3 = tf.nn.relu(conv2d(self.h_conv5_2, self.W_conv5_3, "conv5_2") + self.B_conv5_3)

            #16 comes from 4 2x2 pooling
            self.h_conv5_shape = [self.batchSize, inputShape[0]/16, inputShape[1]/16, 512]
            assert(inputShape[0]/16 == 14)
            with tf.name_scope("GAP"):
                self.h_gap = tf.reduce_mean(self.h_conv5_3, reduction_indices=[1, 2])
                self.W_gap = weight_variable_xavier([512, self.numClasses], "w_gap", conv=False)
                self.B_gap = bias_variable([self.numClasses], "b_gap")
                self.est = tf.nn.softmax(tf.matmul(self.h_gap, self.W_gap)+self.B_gap)

            with tf.name_scope("CAM"):
                self.h_reshape_gap = tf.reshape(self.h_conv5_3, [self.batchSize*self.h_conv5_shape[1]*self.h_conv5_shape[2], -1])
                self.flat_cam = tf.matmul(self.h_reshape_gap, self.W_gap)
                self.reshape_cam = tf.reshape(self.flat_cam, [self.batchSize, self.h_conv5_shape[1], self.h_conv5_shape[2], -1])
                #self.softmax_cam = pixelSoftmax(self.reshape_cam)
                self.cam = tf.transpose(self.reshape_cam, [0, 3, 1, 2])

            with tf.name_scope("Loss"):
                #Define loss
                self.loss = tf.reduce_mean(-tf.reduce_sum(self.gt * tf.log(self.est+self.epsilon), reduction_indices=[1]))

            with tf.name_scope("Opt"):
                #Define optimizer
                self.optimizerAll = tf.train.AdamOptimizer(self.learningRate, beta1=self.beta1, beta2=self.beta2, epsilon=self.epsilon).minimize(self.loss)
                #self.optimizerAll = tf.train.MomentumOptimizer(self.learningRate, momentum=self.beta1).minimize(self.loss)
                self.optimizerPre = tf.train.AdamOptimizer(self.learningRate, beta1=self.beta1, beta2=self.beta2, epsilon=self.epsilon).minimize(self.loss,
                        var_list=[
                            self.W_gap,
                            self.B_gap,
                            ]
                        )

            with tf.name_scope("Metric"):
                self.correct = tf.equal(tf.argmax(self.gt, 1), tf.argmax(self.est, 1))
                self.accuracy = tf.reduce_mean(tf.cast(self.correct, tf.float32))

        #Cannot be on GPU
        (self.eval_vals, self.eval_idx) = tf.nn.top_k(self.est, k=11)

        #Summaries
        tf.scalar_summary('loss', self.loss, name="lossSum")
        tf.scalar_summary('accuracy', self.accuracy, name="accSum")

        tf.histogram_summary('input', self.inputImage, name="image")
        tf.histogram_summary('gt', self.gt, name="gt")
        #Conv layer histograms
        tf.histogram_summary('conv1_1', self.h_conv1_1, name="conv1_1")
        tf.histogram_summary('conv1_2', self.h_conv1_2, name="conv1_2")
        tf.histogram_summary('conv2_1', self.h_conv2_1, name="conv2_1")
        tf.histogram_summary('conv2_2', self.h_conv2_2, name="conv2_2")
        tf.histogram_summary('conv3_1', self.h_conv3_1, name="conv3_1")
        tf.histogram_summary('conv3_2', self.h_conv3_2, name="conv3_2")
        tf.histogram_summary('conv3_3', self.h_conv3_3, name="conv3_3")
        tf.histogram_summary('conv4_1', self.h_conv4_1, name="conv4_1")
        tf.histogram_summary('conv4_2', self.h_conv4_2, name="conv4_2")
        tf.histogram_summary('conv4_3', self.h_conv4_3, name="conv4_3")
        tf.histogram_summary('conv5_1', self.h_conv5_1, name="conv5_1")
        tf.histogram_summary('conv5_2', self.h_conv5_2, name="conv5_2")
        tf.histogram_summary('conv5_3', self.h_conv5_3, name="conv5_3")
        tf.histogram_summary('gap', self.h_gap, name="gap")
        tf.histogram_summary('est', self.est, name="est")
        #Weight and bias hists
        tf.histogram_summary('w_conv1_1', self.W_conv1_1, name="w_conv1_1")
        tf.histogram_summary('b_conv1_1', self.B_conv1_1, name="b_conv1_1")
        tf.histogram_summary('w_conv1_2', self.W_conv1_2, name="w_conv1_2")
        tf.histogram_summary('b_conv1_2', self.B_conv1_2, name="b_conv1_2")
        tf.histogram_summary('w_conv2_1', self.W_conv2_1, name="w_conv2_1")
        tf.histogram_summary('b_conv2_1', self.B_conv2_1, name="b_conv2_1")
        tf.histogram_summary('w_conv2_2', self.W_conv2_2, name="w_conv2_2")
        tf.histogram_summary('b_conv2_2', self.B_conv2_2, name="b_conv2_2")
        tf.histogram_summary('w_conv3_1', self.W_conv3_1, name="w_conv3_1")
        tf.histogram_summary('b_conv3_1', self.B_conv3_1, name="b_conv3_1")
        tf.histogram_summary('w_conv3_2', self.W_conv3_2, name="w_conv3_2")
        tf.histogram_summary('b_conv3_2', self.B_conv3_2, name="b_conv3_2")
        tf.histogram_summary('w_conv3_3', self.W_conv3_3, name="w_conv3_3")
        tf.histogram_summary('b_conv3_3', self.B_conv3_3, name="b_conv3_3")
        tf.histogram_summary('w_conv4_1', self.W_conv4_1, name="w_conv4_1")
        tf.histogram_summary('b_conv4_1', self.B_conv4_1, name="b_conv4_1")
        tf.histogram_summary('w_conv4_2', self.W_conv4_2, name="w_conv4_2")
        tf.histogram_summary('b_conv4_2', self.B_conv4_2, name="b_conv4_2")
        tf.histogram_summary('w_conv4_3', self.W_conv4_3, name="w_conv4_3")
        tf.histogram_summary('b_conv4_3', self.B_conv4_3, name="b_conv4_3")
        tf.histogram_summary('w_conv5_1', self.W_conv5_1, name="w_conv5_1")
        tf.histogram_summary('b_conv5_1', self.B_conv5_1, name="b_conv5_1")
        tf.histogram_summary('w_conv5_2', self.W_conv5_2, name="w_conv5_2")
        tf.histogram_summary('b_conv5_2', self.B_conv5_2, name="b_conv5_2")
        tf.histogram_summary('w_conv5_3', self.W_conv5_3, name="w_conv5_3")
        tf.histogram_summary('b_conv5_3', self.B_conv5_3, name="b_conv5_3")
        tf.histogram_summary('w_gap', self.W_gap, name="w_gap")
        tf.histogram_summary('b_gap', self.B_gap, name="w_gap")
Exemple #3
0
    def buildModel(self, inputShape):
        if (self.vggFile):
            npWeights = loadWeights(self.vggFile)
        else:
            print "Must load from weights"
            assert (0)

        #Running on GPU
        with tf.device(self.device):
            with tf.name_scope("inputOps"):
                #Get convolution variables as placeholders
                self.inputImage = node_variable([
                    self.batchSize, inputShape[0], inputShape[1], inputShape[2]
                ], "inputImage")
                self.gt = node_variable([self.batchSize, self.numClasses],
                                        "gt")
                self.notGt = 1 - self.gt

            with tf.name_scope("Conv1Ops"):
                self.W_conv1_1 = weight_variable_fromnp(
                    npWeights["conv1_1_w"], "w_conv1_1")
                self.B_conv1_1 = weight_variable_fromnp(
                    npWeights["conv1_1_b"], "b_conv1_1")
                self.W_conv1_2 = weight_variable_fromnp(
                    npWeights["conv1_2_w"], "w_conv1_2")
                self.B_conv1_2 = weight_variable_fromnp(
                    npWeights["conv1_2_b"], "b_conv1_2")

                self.h_conv1_1 = tf.nn.relu(
                    conv2d(self.inputImage,
                           self.W_conv1_1,
                           "conv1_1",
                           stride=[1, 1, 1, 1]) + self.B_conv1_1)
                self.h_conv1_2 = tf.nn.relu(
                    conv2d(self.h_conv1_1,
                           self.W_conv1_2,
                           "conv1_1",
                           stride=[1, 1, 1, 1]) + self.B_conv1_2)
                self.h_pool1 = maxpool_2x2(self.h_conv1_2, "pool1")

            with tf.name_scope("Conv2Ops"):
                self.W_conv2_1 = weight_variable_fromnp(
                    npWeights["conv2_1_w"], "w_conv2_1")
                self.B_conv2_1 = weight_variable_fromnp(
                    npWeights["conv2_1_b"], "b_conv2_1")
                self.W_conv2_2 = weight_variable_fromnp(
                    npWeights["conv2_2_w"], "w_conv2_2")
                self.B_conv2_2 = weight_variable_fromnp(
                    npWeights["conv2_2_b"], "b_conv2_2")

                self.h_conv2_1 = tf.nn.relu(
                    conv2d(self.h_pool1, self.W_conv2_1, "conv2_1") +
                    self.B_conv2_1)
                self.h_conv2_2 = tf.nn.relu(
                    conv2d(self.h_conv2_1, self.W_conv2_2, "conv2_2") +
                    self.B_conv2_2)
                self.h_pool2 = maxpool_2x2(self.h_conv2_2, "pool2")

            with tf.name_scope("Conv3Ops"):
                self.W_conv3_1 = weight_variable_fromnp(
                    npWeights["conv3_1_w"], "w_conv3_1")
                self.B_conv3_1 = weight_variable_fromnp(
                    npWeights["conv3_1_b"], "b_conv3_1")
                self.W_conv3_2 = weight_variable_fromnp(
                    npWeights["conv3_2_w"], "w_conv3_2")
                self.B_conv3_2 = weight_variable_fromnp(
                    npWeights["conv3_2_b"], "b_conv3_2")
                self.W_conv3_3 = weight_variable_fromnp(
                    npWeights["conv3_3_w"], "w_conv3_3")
                self.B_conv3_3 = weight_variable_fromnp(
                    npWeights["conv3_3_b"], "b_conv3_3")

                self.h_conv3_1 = tf.nn.relu(
                    conv2d(self.h_pool2, self.W_conv3_1, "conv3_1") +
                    self.B_conv3_1)
                self.h_conv3_2 = tf.nn.relu(
                    conv2d(self.h_conv3_1, self.W_conv3_2, "conv3_2") +
                    self.B_conv3_2)
                self.h_conv3_3 = tf.nn.relu(
                    conv2d(self.h_conv3_2, self.W_conv3_3, "conv3_2") +
                    self.B_conv3_3)
                self.h_pool3 = maxpool_2x2(self.h_conv3_3, "pool3")

            with tf.name_scope("Conv4Ops"):
                self.W_conv4_1 = weight_variable_fromnp(
                    npWeights["conv4_1_w"], "w_conv4_1")
                self.B_conv4_1 = weight_variable_fromnp(
                    npWeights["conv4_1_b"], "b_conv4_1")
                self.W_conv4_2 = weight_variable_fromnp(
                    npWeights["conv4_2_w"], "w_conv4_2")
                self.B_conv4_2 = weight_variable_fromnp(
                    npWeights["conv4_2_b"], "b_conv4_2")
                self.W_conv4_3 = weight_variable_fromnp(
                    npWeights["conv4_3_w"], "w_conv4_3")
                self.B_conv4_3 = weight_variable_fromnp(
                    npWeights["conv4_3_b"], "b_conv4_3")

                self.h_conv4_1 = tf.nn.relu(
                    conv2d(self.h_pool3, self.W_conv4_1, "conv4_1") +
                    self.B_conv4_1)
                self.h_conv4_2 = tf.nn.relu(
                    conv2d(self.h_conv4_1, self.W_conv4_2, "conv4_2") +
                    self.B_conv4_2)
                self.h_conv4_3 = tf.nn.relu(
                    conv2d(self.h_conv4_2, self.W_conv4_3, "conv4_2") +
                    self.B_conv4_3)
                self.h_pool4 = maxpool_2x2(self.h_conv4_3, "pool4")

            with tf.name_scope("Conv5Ops"):
                self.W_conv5_1 = weight_variable_fromnp(
                    npWeights["conv5_1_w"], "w_conv5_1")
                self.B_conv5_1 = weight_variable_fromnp(
                    npWeights["conv5_1_b"], "b_conv5_1")
                self.W_conv5_2 = weight_variable_fromnp(
                    npWeights["conv5_2_w"], "w_conv5_2")
                self.B_conv5_2 = weight_variable_fromnp(
                    npWeights["conv5_2_b"], "b_conv5_2")
                self.W_conv5_3 = weight_variable_fromnp(
                    npWeights["conv5_3_w"], "w_conv5_3")
                self.B_conv5_3 = weight_variable_fromnp(
                    npWeights["conv5_3_b"], "b_conv5_3")

                self.h_conv5_1 = tf.nn.relu(
                    conv2d(self.h_pool4, self.W_conv5_1, "conv5_1") +
                    self.B_conv5_1)
                self.h_conv5_2 = tf.nn.relu(
                    conv2d(self.h_conv5_1, self.W_conv5_2, "conv5_2") +
                    self.B_conv5_2)
                self.h_conv5_3 = tf.nn.relu(
                    conv2d(self.h_conv5_2, self.W_conv5_3, "conv5_2") +
                    self.B_conv5_3)
                self.h_pool5 = maxpool_2x2(self.h_conv5_3, "pool5")

        #with tf.device('cpu:0'):

            self.keep_prob = tf.placeholder(tf.float32)
            with tf.name_scope("FC6"):
                self.W_fc6 = weight_variable_fromnp(npWeights["fc6_w"],
                                                    "w_fc6")
                self.B_fc6 = weight_variable_fromnp(npWeights["fc6_b"],
                                                    "b_fc6")
                h_pool5_flat = tf.reshape(self.h_pool5,
                                          [self.batchSize, 7 * 7 * 512])
                self.h_fc6 = tf.nn.relu(
                    tf.matmul(h_pool5_flat, self.W_fc6, name="fc6") +
                    self.B_fc6, "fc6_relu")
                self.drop_h_fc6 = tf.nn.dropout(self.h_fc6, self.keep_prob)

        #with tf.device(self.device):
            with tf.name_scope("FC7"):
                self.W_fc7 = weight_variable_fromnp(npWeights["fc7_w"],
                                                    "w_fc7")
                self.B_fc7 = weight_variable_fromnp(npWeights["fc7_b"],
                                                    "b_fc7")
                self.h_fc7 = tf.nn.relu(
                    tf.matmul(self.drop_h_fc6, self.W_fc7, name="fc7") +
                    self.B_fc7, "fc7_relu")
                self.drop_h_fc7 = tf.nn.dropout(self.h_fc7, self.keep_prob)

            with tf.name_scope("FC8"):
                self.W_fc8 = weight_variable_xavier([4096, 20], "w_fc8")
                self.B_fc8 = bias_variable([20], "b_fc8")
                #self.est = tf.nn.softmax(tf.matmul(self.drop_h_fc7, self.W_fc8, name="fc8") + self.B_fc8)
                self.est = tf.matmul(self.drop_h_fc7, self.W_fc8,
                                     name="fc8") + self.B_fc8

            with tf.name_scope("Loss"):
                #Grab positive and negative classes as needed
                self.posArray = tf.expand_dims(self.gt * self.est, dim=1)
                self.negArray = tf.expand_dims(self.notGt * self.est, dim=2)
                #tf broadcasting should take care of this to make this a [batch, class, class] matrix
                self.cSum = (1 - self.posArray) + self.negArray
                self.pairLoss = tf.reduce_mean(self.cSum,
                                               reduction_indices=[1, 2])
                self.loss = tf.reduce_mean(tf.nn.relu(self.pairLoss))

                #self.loss = tf.reduce_mean(-tf.reduce_sum(self.gt * tf.log(self.est+self.epsilon), reduction_indices=[1]))
                #self.regLoss = self.loss + self.regStrength * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
                #self.nan_check_loss = tf.verify_tensor_all_finite(self.loss, msg="check_nan")

            with tf.name_scope("Opt"):
                #Define optimizer
                self.optimizerAll = tf.train.AdamOptimizer(
                    self.learningRate,
                    beta1=self.beta1,
                    beta2=self.beta2,
                    epsilon=self.epsilon).minimize(self.loss)
                #self.optimizerAll = tf.train.MomentumOptimizer(self.learningRate, momentum=self.beta1).minimize(self.loss)
                self.optimizerPre = tf.train.AdamOptimizer(
                    self.learningRate,
                    beta1=self.beta1,
                    beta2=self.beta2,
                    epsilon=self.epsilon).minimize(self.loss,
                                                   var_list=[
                                                       self.W_fc6,
                                                       self.B_fc6,
                                                       self.W_fc7,
                                                       self.B_fc7,
                                                       self.W_fc8,
                                                       self.B_fc8,
                                                   ])

            with tf.name_scope("Metric"):
                self.correct = tf.equal(tf.argmax(self.gt, 1),
                                        tf.argmax(self.est, 1))
                self.accuracy = tf.reduce_mean(
                    tf.cast(self.correct, tf.float32))

        #Cannot be on GPU
        (self.eval_vals, self.eval_idx) = tf.nn.top_k(self.est, k=5)

        #Summaries
        tf.scalar_summary('loss', self.loss, name="lossSum")
        tf.scalar_summary('accuracy', self.accuracy, name="accSum")

        tf.histogram_summary('input', self.inputImage, name="image_vis")
        tf.histogram_summary('gt', self.gt, name="gt_vis")
        tf.histogram_summary('notGt', self.notGt, name="notGt_vis")
        #Conv layer histograms
        tf.histogram_summary('conv1_1', self.h_conv1_1, name="conv1_1_vis")
        tf.histogram_summary('conv1_2', self.h_conv1_2, name="conv1_2_vis")
        tf.histogram_summary('conv2_1', self.h_conv2_1, name="conv2_1_vis")
        tf.histogram_summary('conv2_2', self.h_conv2_2, name="conv2_2_vis")
        tf.histogram_summary('conv3_1', self.h_conv3_1, name="conv3_1_vis")
        tf.histogram_summary('conv3_2', self.h_conv3_2, name="conv3_2_vis")
        tf.histogram_summary('conv3_3', self.h_conv3_3, name="conv3_3_vis")
        tf.histogram_summary('conv4_1', self.h_conv4_1, name="conv4_1_vis")
        tf.histogram_summary('conv4_2', self.h_conv4_2, name="conv4_2_vis")
        tf.histogram_summary('conv4_3', self.h_conv4_3, name="conv4_3_vis")
        tf.histogram_summary('conv5_1', self.h_conv5_1, name="conv5_1_vis")
        tf.histogram_summary('conv5_2', self.h_conv5_2, name="conv5_2_vis")
        tf.histogram_summary('conv5_3', self.h_conv5_3, name="conv5_3_vis")
        tf.histogram_summary('fc6', self.h_fc6, name="fc6_vis")
        tf.histogram_summary('fc7', self.h_fc7, name="fc7_vis")
        tf.histogram_summary('est', self.est, name="est_vis")
        tf.histogram_summary('posArray', self.posArray, name="est_vis")
        tf.histogram_summary('negArray', self.negArray, name="est_vis")
        tf.histogram_summary('cSum', self.cSum, name="est_vis")
        tf.histogram_summary('pairLoss', self.pairLoss, name="est_vis")
        #Weight and bias hists
        tf.histogram_summary('w_conv1_1', self.W_conv1_1, name="w_conv1_1_vis")
        tf.histogram_summary('b_conv1_1', self.B_conv1_1, name="b_conv1_1_vis")
        tf.histogram_summary('w_conv1_2', self.W_conv1_2, name="w_conv1_2_vis")
        tf.histogram_summary('b_conv1_2', self.B_conv1_2, name="b_conv1_2_vis")
        tf.histogram_summary('w_conv2_1', self.W_conv2_1, name="w_conv2_1_vis")
        tf.histogram_summary('b_conv2_1', self.B_conv2_1, name="b_conv2_1_vis")
        tf.histogram_summary('w_conv2_2', self.W_conv2_2, name="w_conv2_2_vis")
        tf.histogram_summary('b_conv2_2', self.B_conv2_2, name="b_conv2_2_vis")
        tf.histogram_summary('w_conv3_1', self.W_conv3_1, name="w_conv3_1_vis")
        tf.histogram_summary('b_conv3_1', self.B_conv3_1, name="b_conv3_1_vis")
        tf.histogram_summary('w_conv3_2', self.W_conv3_2, name="w_conv3_2_vis")
        tf.histogram_summary('b_conv3_2', self.B_conv3_2, name="b_conv3_2_vis")
        tf.histogram_summary('w_conv3_3', self.W_conv3_3, name="w_conv3_3_vis")
        tf.histogram_summary('b_conv3_3', self.B_conv3_3, name="b_conv3_3_vis")
        tf.histogram_summary('w_conv4_1', self.W_conv4_1, name="w_conv4_1_vis")
        tf.histogram_summary('b_conv4_1', self.B_conv4_1, name="b_conv4_1_vis")
        tf.histogram_summary('w_conv4_2', self.W_conv4_2, name="w_conv4_2_vis")
        tf.histogram_summary('b_conv4_2', self.B_conv4_2, name="b_conv4_2_vis")
        tf.histogram_summary('w_conv4_3', self.W_conv4_3, name="w_conv4_3_vis")
        tf.histogram_summary('b_conv4_3', self.B_conv4_3, name="b_conv4_3_vis")
        tf.histogram_summary('w_conv5_1', self.W_conv5_1, name="w_conv5_1_vis")
        tf.histogram_summary('b_conv5_1', self.B_conv5_1, name="b_conv5_1_vis")
        tf.histogram_summary('w_conv5_2', self.W_conv5_2, name="w_conv5_2_vis")
        tf.histogram_summary('b_conv5_2', self.B_conv5_2, name="b_conv5_2_vis")
        tf.histogram_summary('w_conv5_3', self.W_conv5_3, name="w_conv5_3_vis")
        tf.histogram_summary('b_conv5_3', self.B_conv5_3, name="b_conv5_3_vis")
        tf.histogram_summary('w_fc6', self.W_fc6, name="w_fc6_vis")
        tf.histogram_summary('b_fc6', self.B_fc6, name="b_fc6_vis")
        tf.histogram_summary('w_fc7', self.W_fc7, name="w_fc7_vis")
        tf.histogram_summary('b_fc7', self.B_fc7, name="b_fc7_vis")
        tf.histogram_summary('w_fc8', self.W_fc7, name="w_fc8_vis")
        tf.histogram_summary('b_fc8', self.B_fc7, name="b_fc8_vis")
    def buildModel(self, inputShape, inMatFilename):
        if(inMatFilename):
            npWeights = loadWeights(inMatFilename)

        #Running on GPU
        with tf.device('gpu:0'):
            with tf.name_scope("inputOps"):
                #Get convolution variables as placeholders
                self.inputImage = node_variable([None, inputShape[0], inputShape[1], inputShape[2]], "inputImage")
                self.gt = node_variable([None, 2], "gt")
                #Model variables for convolutions

            with tf.name_scope("Conv1Ops"):
                if(inMatFilename):
                    self.W_conv1 = weight_variable_fromnp(npWeights["conv1_w"], "w_conv1")
                    self.B_conv1 = weight_variable_fromnp(npWeights["conv1_b"], "b_conv1")
                else:
                    self.W_conv1 = weight_variable_fromnp(np.zeros((11, 11, 3, 64), dtype=np.float32), "w_conv1")
                    self.B_conv1 = weight_variable_fromnp(np.zeros((64), dtype=np.float32), "b_conv1")
                    #self.W_conv1 = weight_variable_xavier([11, 11, 3, 64], "w_conv1", conv=True)
                    #self.B_conv1 = bias_variable([64], "b_conv1")
                self.h_conv1 = tf.nn.relu(conv2d(self.inputImage, self.W_conv1, "conv1", stride=[1, 4, 4, 1]) + self.B_conv1)
                self.h_norm1 = tf.nn.local_response_normalization(self.h_conv1, name="LRN1")
                self.h_pool1 = maxpool_2x2(self.h_norm1, "pool1")

            with tf.name_scope("Conv2Ops"):
                if(inMatFilename):
                    self.W_conv2 = weight_variable_fromnp(npWeights["conv2_w"], "w_conv2")
                    self.B_conv2 = weight_variable_fromnp(npWeights["conv2_b"], "b_conv2")
                else:
                    self.W_conv2 = weight_variable_fromnp(np.zeros((5, 5, 64, 256), dtype=np.float32), "w_conv2")
                    self.B_conv2 = weight_variable_fromnp(np.zeros((256), dtype=np.float32), "b_conv2")
                    #self.W_conv2 = weight_variable_xavier([5, 5, 64, 256], "w_conv2", conv=True)
                    #self.B_conv2 = bias_variable([256], "b_conv2")
                self.h_conv2 = tf.nn.relu(conv2d(self.h_pool1, self.W_conv2, "conv2") + self.B_conv2)
                self.h_norm2 = tf.nn.local_response_normalization(self.h_conv2, name="LRN2")
                self.h_pool2 = maxpool_2x2(self.h_norm2, "pool2")

            with tf.name_scope("Conv3Ops"):
                if(inMatFilename):
                    self.W_conv3 = weight_variable_fromnp(npWeights["conv3_w"], "w_conv3")
                    self.B_conv3 = weight_variable_fromnp(npWeights["conv3_b"], "b_conv3")
                else:
                    self.W_conv3 = weight_variable_fromnp(np.zeros((3, 3, 256, 256), dtype=np.float32), "w_conv3")
                    self.B_conv3 = weight_variable_fromnp(np.zeros((256), dtype=np.float32), "b_conv3")
                    #self.W_conv3 = weight_variable_xavier([3, 3, 256, 256], "w_conv3", conv=True)
                    #self.B_conv3 = bias_variable([256], "b_conv3")
                self.h_conv3 = tf.nn.relu(conv2d(self.h_pool2, self.W_conv3, "conv3") + self.B_conv3, name="relu3")

            with tf.name_scope("Conv4Ops"):
                if(inMatFilename):
                    self.W_conv4 = weight_variable_fromnp(npWeights["conv4_w"], "w_conv4")
                    self.B_conv4 = weight_variable_fromnp(npWeights["conv4_b"], "b_conv4")
                else:
                    self.W_conv4 = weight_variable_fromnp(np.zeros((3, 3, 256, 256), dtype=np.float32), "w_conv4")
                    self.B_conv4 = weight_variable_fromnp(np.zeros((256), dtype=np.float32), "b_conv4")
                    #self.W_conv4 = weight_variable_xavier([3, 3, 256, 256], "w_conv4", conv=True)
                    #self.B_conv4 = bias_variable([256], "b_conv4")
                self.h_conv4 = tf.nn.relu(conv2d(self.h_conv3, self.W_conv4, "conv4") + self.B_conv4, name="relu4")

            with tf.name_scope("Conv5Ops"):
                if(inMatFilename):
                    self.W_conv5 = weight_variable_fromnp(npWeights["conv5_w"], "w_conv5")
                    self.B_conv5 = weight_variable_fromnp(npWeights["conv5_b"], "b_conv5")
                else:
                    self.W_conv5 = weight_variable_fromnp(np.zeros((3, 3, 256, 256), dtype=np.float32), "w_conv5")
                    self.B_conv5 = weight_variable_fromnp(np.zeros((256), dtype = np.float32), "b_conv5")
                    #self.W_conv5 = weight_variable_xavier([3, 3, 256, 256], "w_conv5", conv=True)
                    #self.B_conv5 = bias_variable([256], "b_conv5")
                self.h_conv5 = tf.nn.relu(conv2d(self.h_conv4, self.W_conv5, "conv5") + self.B_conv5)
                self.h_pool5 = maxpool_2x2(self.h_conv5, "pool5")

            #placeholder for specifying dropout
            self.keep_prob = tf.placeholder(tf.float32)

            #32 comes from 4 stride in conv1, 2 stride in pool1, 2 stride in pool2, 2 stride in pool5
            numInputs = (inputShape[0]/32) * (inputShape[1]/32) * 256
            with tf.name_scope("FC1"):
                #if(inMatFilename):
                #    self.W_conv5 = weight_variable_fromnp(npWeights["fc1_w"], "w_fc1")
                #    self.B_conv5 = weight_variable_fromnp(npWeights["fc1_b"], "b_fc1")
                #else:
                #    self.W_conv5 = weight_variable_fromnp(np.zeros((6*6*256, 4096), dtype=np.float32), "w_fc1")
                #    self.B_conv5 = weight_variable_fromnp(np.zeros((4096), dtype = np.float32), "b_fc1")
                self.W_fc1 = weight_variable_xavier([numInputs, 4096], "w_fc1")
                self.B_fc1 = bias_variable([4096], "b_fc1")
                h_pool5_flat = tf.reshape(self.h_pool5, [-1, numInputs], name="pool5_flat")
                self.h_fc1 = tf.nn.relu(tf.matmul(h_pool5_flat, self.W_fc1, name="fc1") + self.B_fc1, "fc1_relu")
                self.h_fc1_drop = tf.nn.dropout(self.h_fc1, self.keep_prob)

            with tf.name_scope("FC2"):
                #if(inMatFilename):
                #    self.W_conv5 = weight_variable_fromnp(npWeights["fc2_w"], "w_fc2")
                #    self.B_conv5 = weight_variable_fromnp(npWeights["fc2_b"], "b_fc2")
                #else:
                #    self.W_conv5 = weight_variable_fromnp(np.zeros((4096, 4096), dtype=np.float32), "w_fc2")
                #    self.B_conv5 = weight_variable_fromnp(np.zeros((4096), dtype = np.float32), "b_fc2")
                self.W_fc2 = weight_variable_xavier([4096, 4096], "w_fc2")
                self.B_fc2 = bias_variable([4096], "b_fc2")
                self.h_fc2 = tf.nn.relu(tf.matmul(self.h_fc1_drop, self.W_fc2, name="fc2") + self.B_fc2, "fc2_relu")
                self.h_fc2_drop = tf.nn.dropout(self.h_fc2, self.keep_prob)

            #fc3 should have 16 channels
            #fc3 also uses a sigmoid function
            #We change it to tanh
            with tf.name_scope("FC3"):
                #if(inMatFilename):
                #    self.W_conv5 = weight_variable_fromnp(npWeights["fc3_w"], "w_fc3")
                #    self.B_conv5 = weight_variable_fromnp(npWeights["fc3_b"], "b_fc3")
                #else:
                #    self.W_conv5 = weight_variable_fromnp(np.zeros((4096, 2), dtype=np.float32), "w_fc3")
                #    self.B_conv5 = weight_variable_fromnp(np.zeros((2), dtype = np.float32), "b_fc3")
                self.W_fc3 = weight_variable_xavier([4096, 2], "w_fc3")
                self.B_fc3 = bias_variable([2], "b_fc3")
                self.est = tf.nn.softmax(tf.matmul(self.h_fc2_drop, self.W_fc3, name="fc3") + self.B_fc3, "fc3_softmax")

            with tf.name_scope("Loss"):
                #Define loss
                #self.loss = tf.reduce_mean(-tf.reduce_sum(self.gt * tf.log(self.est), reduction_indices=[1]))
                self.loss = tf.reduce_mean(-(self.gt[:, 1]*.8* tf.log(self.est[:, 1]) + self.gt[:, 0]*.2*tf.log(self.est[:, 0])))

            with tf.name_scope("Opt"):
                #Define optimizer
                #self.optimizerAll = tf.train.AdagradOptimizer(self.learningRate).minimize(self.loss)
                #self.optimizerFC = tf.train.AdagradOptimizer(self.learningRate).minimize(self.loss,
                self.optimizerAll = tf.train.AdamOptimizer(self.learningRate).minimize(self.loss)
                self.optimizerFC = tf.train.AdamOptimizer(self.learningRate).minimize(self.loss,
                        var_list=[
                            self.W_fc1,
                            self.B_fc1,
                            self.W_fc2,
                            self.B_fc2,
                            self.W_fc3,
                            self.B_fc3]
                        )

            with tf.name_scope("Metric"):
                self.gtIdx = tf.argmax(self.gt, 1)
                self.estIdx = tf.argmax(self.est, 1)
                boolGtIdx = tf.cast(self.gtIdx, tf.bool)
                boolEstIdx = tf.cast(self.estIdx, tf.bool)

                #Logical and for true positive
                lAnd = tf.logical_and(boolGtIdx, boolEstIdx)
                self.tp = tf.reduce_sum(tf.cast(lAnd, tf.float32))
                #Logical nor for true negatives
                lNor = tf.logical_not(tf.logical_or(boolGtIdx, boolEstIdx))
                self.tn = tf.reduce_sum(tf.cast(lNor, tf.float32))

                #Subtraction and comparison for others
                lSub = self.gtIdx - self.estIdx
                Ones = tf.cast(tf.ones(tf.shape(lSub)), tf.int64)
                self.fn = tf.reduce_sum(tf.cast(tf.equal(lSub, Ones), tf.float32))
                self.fp = tf.reduce_sum(tf.cast(tf.equal(lSub, -Ones), tf.float32))

                #Accuracy, precision, and recall calculations
                self.accuracy = (self.tp + self.tn)/(self.tp+self.tn+self.fp+self.fn)
                self.precision = self.tp/(self.tp+self.fp)
                self.recall = self.tp/(self.tp+self.fn)

        #Summaries
        tf.scalar_summary('loss', self.loss, name="lossSum")
        tf.scalar_summary('accuracy', self.accuracy, name="accSum")
        tf.scalar_summary('precision', self.precision, name="precSum")
        tf.scalar_summary('recall', self.recall, name="recallSum")
        tf.scalar_summary('tp', self.tp, name="tp")
        tf.scalar_summary('fp', self.fp, name="fp")
        tf.scalar_summary('tn', self.tn, name="tn")
        tf.scalar_summary('fn', self.fn, name="fn")

        tf.histogram_summary('input', self.inputImage, name="image")
        tf.histogram_summary('gt', self.gt, name="gt")
        tf.histogram_summary('conv1', self.h_pool1, name="conv1")
        tf.histogram_summary('conv2', self.h_pool2, name="conv2")
        tf.histogram_summary('conv3', self.h_conv3, name="conv3")
        tf.histogram_summary('conv4', self.h_conv4, name="conv4")
        tf.histogram_summary('conv5', self.h_pool5, name="conv5")
        tf.histogram_summary('fc1', self.h_fc1, name="fc1")
        tf.histogram_summary('fc2', self.h_fc2, name="fc2")
        tf.histogram_summary('est', self.est, name="fc3")
        tf.histogram_summary('w_conv1', self.W_conv1, name="w_conv1")
        tf.histogram_summary('b_conv1', self.B_conv1, name="b_conv1")
        tf.histogram_summary('w_conv2', self.W_conv2, name="w_conv2")
        tf.histogram_summary('b_conv2', self.B_conv2, name="b_conv2")
        tf.histogram_summary('w_conv3', self.W_conv3, name="w_conv3")
        tf.histogram_summary('b_conv3', self.B_conv3, name="b_conv3")
        tf.histogram_summary('w_conv4', self.W_conv4, name="w_conv4")
        tf.histogram_summary('b_conv4', self.B_conv4, name="b_conv4")
        tf.histogram_summary('w_conv5', self.W_conv5, name="w_conv5")
        tf.histogram_summary('b_conv5', self.B_conv5, name="b_conv5")
        tf.histogram_summary('w_fc1', self.W_fc1, name="w_fc1")
        tf.histogram_summary('b_fc1', self.B_fc1, name="b_fc1")
        tf.histogram_summary('w_fc2', self.W_fc2, name="w_fc2")
        tf.histogram_summary('b_fc2', self.B_fc2, name="b_fc2")
        tf.histogram_summary('w_fc3', self.W_fc3, name="w_fc3")
        tf.histogram_summary('b_fc3', self.B_fc3, name="b_fc3")

        #Define saver
        self.saver = tf.train.Saver()
Exemple #5
0
    def buildModel(self, inputShape):
        if(self.vggFile):
            npWeights = loadWeights(self.vggFile)
        else:
            print "Must load from weights"
            assert(0)

        #Running on GPU
        with tf.device(self.device):
            with tf.name_scope("inputOps"):
                #Get convolution variables as placeholders
                self.inputImage = node_variable([self.batchSize, inputShape[0], inputShape[1], inputShape[2]], "inputImage")
                self.gt = node_variable([self.batchSize, self.numClasses], "gt")
                self.notGt = 1-self.gt

            with tf.name_scope("Conv1Ops"):
                self.W_conv1_1 = weight_variable_fromnp(npWeights["conv1_1_w"], "w_conv1_1")
                self.B_conv1_1 = weight_variable_fromnp(npWeights["conv1_1_b"], "b_conv1_1")
                self.W_conv1_2 = weight_variable_fromnp(npWeights["conv1_2_w"], "w_conv1_2")
                self.B_conv1_2 = weight_variable_fromnp(npWeights["conv1_2_b"], "b_conv1_2")

                self.h_conv1_1 = tf.nn.relu(conv2d(self.inputImage, self.W_conv1_1, "conv1_1", stride=[1, 1, 1, 1]) + self.B_conv1_1)
                self.h_conv1_2 = tf.nn.relu(conv2d(self.h_conv1_1, self.W_conv1_2, "conv1_1", stride=[1, 1, 1, 1]) + self.B_conv1_2)
                self.h_pool1 = maxpool_2x2(self.h_conv1_2, "pool1")

            with tf.name_scope("Conv2Ops"):
                self.W_conv2_1 = weight_variable_fromnp(npWeights["conv2_1_w"], "w_conv2_1")
                self.B_conv2_1 = weight_variable_fromnp(npWeights["conv2_1_b"], "b_conv2_1")
                self.W_conv2_2 = weight_variable_fromnp(npWeights["conv2_2_w"], "w_conv2_2")
                self.B_conv2_2 = weight_variable_fromnp(npWeights["conv2_2_b"], "b_conv2_2")

                self.h_conv2_1 = tf.nn.relu(conv2d(self.h_pool1, self.W_conv2_1, "conv2_1") + self.B_conv2_1)
                self.h_conv2_2 = tf.nn.relu(conv2d(self.h_conv2_1, self.W_conv2_2, "conv2_2") + self.B_conv2_2)
                self.h_pool2 = maxpool_2x2(self.h_conv2_2, "pool2")

            with tf.name_scope("Conv3Ops"):
                self.W_conv3_1 = weight_variable_fromnp(npWeights["conv3_1_w"], "w_conv3_1")
                self.B_conv3_1 = weight_variable_fromnp(npWeights["conv3_1_b"], "b_conv3_1")
                self.W_conv3_2 = weight_variable_fromnp(npWeights["conv3_2_w"], "w_conv3_2")
                self.B_conv3_2 = weight_variable_fromnp(npWeights["conv3_2_b"], "b_conv3_2")
                self.W_conv3_3 = weight_variable_fromnp(npWeights["conv3_3_w"], "w_conv3_3")
                self.B_conv3_3 = weight_variable_fromnp(npWeights["conv3_3_b"], "b_conv3_3")

                self.h_conv3_1 = tf.nn.relu(conv2d(self.h_pool2, self.W_conv3_1, "conv3_1") + self.B_conv3_1)
                self.h_conv3_2 = tf.nn.relu(conv2d(self.h_conv3_1, self.W_conv3_2, "conv3_2") + self.B_conv3_2)
                self.h_conv3_3 = tf.nn.relu(conv2d(self.h_conv3_2, self.W_conv3_3, "conv3_2") + self.B_conv3_3)
                self.h_pool3 = maxpool_2x2(self.h_conv3_3, "pool3")

            with tf.name_scope("Conv4Ops"):
                self.W_conv4_1 = weight_variable_fromnp(npWeights["conv4_1_w"], "w_conv4_1")
                self.B_conv4_1 = weight_variable_fromnp(npWeights["conv4_1_b"], "b_conv4_1")
                self.W_conv4_2 = weight_variable_fromnp(npWeights["conv4_2_w"], "w_conv4_2")
                self.B_conv4_2 = weight_variable_fromnp(npWeights["conv4_2_b"], "b_conv4_2")
                self.W_conv4_3 = weight_variable_fromnp(npWeights["conv4_3_w"], "w_conv4_3")
                self.B_conv4_3 = weight_variable_fromnp(npWeights["conv4_3_b"], "b_conv4_3")

                self.h_conv4_1 = tf.nn.relu(conv2d(self.h_pool3, self.W_conv4_1, "conv4_1") + self.B_conv4_1)
                self.h_conv4_2 = tf.nn.relu(conv2d(self.h_conv4_1, self.W_conv4_2, "conv4_2") + self.B_conv4_2)
                self.h_conv4_3 = tf.nn.relu(conv2d(self.h_conv4_2, self.W_conv4_3, "conv4_2") + self.B_conv4_3)
                self.h_pool4 = maxpool_2x2(self.h_conv4_3, "pool4")


            with tf.name_scope("Conv5Ops"):
                self.W_conv5_1 = weight_variable_fromnp(npWeights["conv5_1_w"], "w_conv5_1")
                self.B_conv5_1 = weight_variable_fromnp(npWeights["conv5_1_b"], "b_conv5_1")
                self.W_conv5_2 = weight_variable_fromnp(npWeights["conv5_2_w"], "w_conv5_2")
                self.B_conv5_2 = weight_variable_fromnp(npWeights["conv5_2_b"], "b_conv5_2")
                self.W_conv5_3 = weight_variable_fromnp(npWeights["conv5_3_w"], "w_conv5_3")
                self.B_conv5_3 = weight_variable_fromnp(npWeights["conv5_3_b"], "b_conv5_3")

                self.h_conv5_1 = tf.nn.relu(conv2d(self.h_pool4, self.W_conv5_1, "conv5_1") + self.B_conv5_1)
                self.h_conv5_2 = tf.nn.relu(conv2d(self.h_conv5_1, self.W_conv5_2, "conv5_2") + self.B_conv5_2)
                self.h_conv5_3 = tf.nn.relu(conv2d(self.h_conv5_2, self.W_conv5_3, "conv5_2") + self.B_conv5_3)
                self.h_pool5 = maxpool_2x2(self.h_conv5_3, "pool5")

        #with tf.device('cpu:0'):

            self.keep_prob = tf.placeholder(tf.float32)
            with tf.name_scope("FC6"):
                self.W_fc6 = weight_variable_fromnp(npWeights["fc6_w"], "w_fc6")
                self.B_fc6 = weight_variable_fromnp(npWeights["fc6_b"], "b_fc6")
                h_pool5_flat = tf.reshape(self.h_pool5, [self.batchSize, 7*7*512])
                self.h_fc6 = tf.nn.relu(tf.matmul(h_pool5_flat, self.W_fc6, name="fc6") + self.B_fc6, "fc6_relu")
                self.drop_h_fc6 = tf.nn.dropout(self.h_fc6, self.keep_prob)

        #with tf.device(self.device):
            with tf.name_scope("FC7"):
                self.W_fc7 = weight_variable_fromnp(npWeights["fc7_w"], "w_fc7")
                self.B_fc7 = weight_variable_fromnp(npWeights["fc7_b"], "b_fc7")
                self.h_fc7 = tf.nn.relu(tf.matmul(self.drop_h_fc6, self.W_fc7, name="fc7") + self.B_fc7, "fc7_relu")
                self.drop_h_fc7 = tf.nn.dropout(self.h_fc7, self.keep_prob)

            with tf.name_scope("FC8"):
                self.W_fc8 = weight_variable_xavier([4096, 20], "w_fc8")
                self.B_fc8 = bias_variable([20], "b_fc8")
                #self.est = tf.nn.softmax(tf.matmul(self.drop_h_fc7, self.W_fc8, name="fc8") + self.B_fc8)
                self.est = tf.matmul(self.drop_h_fc7, self.W_fc8, name="fc8") + self.B_fc8

            with tf.name_scope("Loss"):
                #Grab positive and negative classes as needed
                self.posArray = tf.expand_dims(self.gt*self.est, dim=1)
                self.negArray = tf.expand_dims(self.notGt*self.est, dim=2)
                #tf broadcasting should take care of this to make this a [batch, class, class] matrix
                self.cSum = (1 - self.posArray) + self.negArray
                self.pairLoss = tf.reduce_mean(self.cSum, reduction_indices=[1, 2])
                self.loss = tf.reduce_mean(tf.nn.relu(self.pairLoss))

                #self.loss = tf.reduce_mean(-tf.reduce_sum(self.gt * tf.log(self.est+self.epsilon), reduction_indices=[1]))
                #self.regLoss = self.loss + self.regStrength * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
                #self.nan_check_loss = tf.verify_tensor_all_finite(self.loss, msg="check_nan")

            with tf.name_scope("Opt"):
                #Define optimizer
                self.optimizerAll = tf.train.AdamOptimizer(self.learningRate, beta1=self.beta1, beta2=self.beta2, epsilon=self.epsilon).minimize(self.loss)
                #self.optimizerAll = tf.train.MomentumOptimizer(self.learningRate, momentum=self.beta1).minimize(self.loss)
                self.optimizerPre = tf.train.AdamOptimizer(self.learningRate, beta1=self.beta1, beta2=self.beta2, epsilon=self.epsilon).minimize(self.loss,
                        var_list=[
                            self.W_fc6,
                            self.B_fc6,
                            self.W_fc7,
                            self.B_fc7,
                            self.W_fc8,
                            self.B_fc8,
                            ]
                        )

            with tf.name_scope("Metric"):
                self.correct = tf.equal(tf.argmax(self.gt, 1), tf.argmax(self.est, 1))
                self.accuracy = tf.reduce_mean(tf.cast(self.correct, tf.float32))

        #Cannot be on GPU
        (self.eval_vals, self.eval_idx) = tf.nn.top_k(self.est, k=5)

        #Summaries
        tf.scalar_summary('loss', self.loss, name="lossSum")
        tf.scalar_summary('accuracy', self.accuracy, name="accSum")

        tf.histogram_summary('input', self.inputImage, name="image_vis")
        tf.histogram_summary('gt', self.gt, name="gt_vis")
        tf.histogram_summary('notGt', self.notGt, name="notGt_vis")
        #Conv layer histograms
        tf.histogram_summary('conv1_1', self.h_conv1_1, name="conv1_1_vis")
        tf.histogram_summary('conv1_2', self.h_conv1_2, name="conv1_2_vis")
        tf.histogram_summary('conv2_1', self.h_conv2_1, name="conv2_1_vis")
        tf.histogram_summary('conv2_2', self.h_conv2_2, name="conv2_2_vis")
        tf.histogram_summary('conv3_1', self.h_conv3_1, name="conv3_1_vis")
        tf.histogram_summary('conv3_2', self.h_conv3_2, name="conv3_2_vis")
        tf.histogram_summary('conv3_3', self.h_conv3_3, name="conv3_3_vis")
        tf.histogram_summary('conv4_1', self.h_conv4_1, name="conv4_1_vis")
        tf.histogram_summary('conv4_2', self.h_conv4_2, name="conv4_2_vis")
        tf.histogram_summary('conv4_3', self.h_conv4_3, name="conv4_3_vis")
        tf.histogram_summary('conv5_1', self.h_conv5_1, name="conv5_1_vis")
        tf.histogram_summary('conv5_2', self.h_conv5_2, name="conv5_2_vis")
        tf.histogram_summary('conv5_3', self.h_conv5_3, name="conv5_3_vis")
        tf.histogram_summary('fc6', self.h_fc6, name="fc6_vis")
        tf.histogram_summary('fc7', self.h_fc7, name="fc7_vis")
        tf.histogram_summary('est', self.est, name="est_vis")
        tf.histogram_summary('posArray', self.posArray, name="est_vis")
        tf.histogram_summary('negArray', self.negArray, name="est_vis")
        tf.histogram_summary('cSum', self.cSum, name="est_vis")
        tf.histogram_summary('pairLoss', self.pairLoss, name="est_vis")
        #Weight and bias hists
        tf.histogram_summary('w_conv1_1', self.W_conv1_1, name="w_conv1_1_vis")
        tf.histogram_summary('b_conv1_1', self.B_conv1_1, name="b_conv1_1_vis")
        tf.histogram_summary('w_conv1_2', self.W_conv1_2, name="w_conv1_2_vis")
        tf.histogram_summary('b_conv1_2', self.B_conv1_2, name="b_conv1_2_vis")
        tf.histogram_summary('w_conv2_1', self.W_conv2_1, name="w_conv2_1_vis")
        tf.histogram_summary('b_conv2_1', self.B_conv2_1, name="b_conv2_1_vis")
        tf.histogram_summary('w_conv2_2', self.W_conv2_2, name="w_conv2_2_vis")
        tf.histogram_summary('b_conv2_2', self.B_conv2_2, name="b_conv2_2_vis")
        tf.histogram_summary('w_conv3_1', self.W_conv3_1, name="w_conv3_1_vis")
        tf.histogram_summary('b_conv3_1', self.B_conv3_1, name="b_conv3_1_vis")
        tf.histogram_summary('w_conv3_2', self.W_conv3_2, name="w_conv3_2_vis")
        tf.histogram_summary('b_conv3_2', self.B_conv3_2, name="b_conv3_2_vis")
        tf.histogram_summary('w_conv3_3', self.W_conv3_3, name="w_conv3_3_vis")
        tf.histogram_summary('b_conv3_3', self.B_conv3_3, name="b_conv3_3_vis")
        tf.histogram_summary('w_conv4_1', self.W_conv4_1, name="w_conv4_1_vis")
        tf.histogram_summary('b_conv4_1', self.B_conv4_1, name="b_conv4_1_vis")
        tf.histogram_summary('w_conv4_2', self.W_conv4_2, name="w_conv4_2_vis")
        tf.histogram_summary('b_conv4_2', self.B_conv4_2, name="b_conv4_2_vis")
        tf.histogram_summary('w_conv4_3', self.W_conv4_3, name="w_conv4_3_vis")
        tf.histogram_summary('b_conv4_3', self.B_conv4_3, name="b_conv4_3_vis")
        tf.histogram_summary('w_conv5_1', self.W_conv5_1, name="w_conv5_1_vis")
        tf.histogram_summary('b_conv5_1', self.B_conv5_1, name="b_conv5_1_vis")
        tf.histogram_summary('w_conv5_2', self.W_conv5_2, name="w_conv5_2_vis")
        tf.histogram_summary('b_conv5_2', self.B_conv5_2, name="b_conv5_2_vis")
        tf.histogram_summary('w_conv5_3', self.W_conv5_3, name="w_conv5_3_vis")
        tf.histogram_summary('b_conv5_3', self.B_conv5_3, name="b_conv5_3_vis")
        tf.histogram_summary('w_fc6', self.W_fc6, name="w_fc6_vis")
        tf.histogram_summary('b_fc6', self.B_fc6, name="b_fc6_vis")
        tf.histogram_summary('w_fc7', self.W_fc7, name="w_fc7_vis")
        tf.histogram_summary('b_fc7', self.B_fc7, name="b_fc7_vis")
        tf.histogram_summary('w_fc8', self.W_fc7, name="w_fc8_vis")
        tf.histogram_summary('b_fc8', self.B_fc7, name="b_fc8_vis")
Exemple #6
0
    def buildModel(self, inputShape):
        if (self.vggFile):
            npWeights = loadWeights(self.vggFile)
        else:
            print "Must load from weights"
            assert (0)

        #Running on GPU
        with tf.device(self.device):
            with tf.name_scope("inputOps"):
                #Get convolution variables as placeholders
                self.inputImage = node_variable([
                    self.batchSize, inputShape[0], inputShape[1], inputShape[2]
                ], "inputImage")
                self.gt = node_variable(
                    [self.batchSize, 7, 7, self.numClasses], "gt")
                self.norm_gt = self.gt / tf.reduce_sum(
                    self.gt, reduction_indices=3, keep_dims=True)
                #Model variables for convolutions

            with tf.name_scope("Conv1Ops"):
                self.W_conv1_1 = weight_variable_fromnp(
                    npWeights["conv1_1_w"], "w_conv1_1")
                self.B_conv1_1 = weight_variable_fromnp(
                    npWeights["conv1_1_b"], "b_conv1_1")
                self.W_conv1_2 = weight_variable_fromnp(
                    npWeights["conv1_2_w"], "w_conv1_2")
                self.B_conv1_2 = weight_variable_fromnp(
                    npWeights["conv1_2_b"], "b_conv1_2")

                self.h_conv1_1 = tf.nn.relu(
                    conv2d(self.inputImage,
                           self.W_conv1_1,
                           "conv1_1",
                           stride=[1, 1, 1, 1]) + self.B_conv1_1)
                self.h_conv1_2 = tf.nn.relu(
                    conv2d(self.h_conv1_1,
                           self.W_conv1_2,
                           "conv1_1",
                           stride=[1, 1, 1, 1]) + self.B_conv1_2)
                self.h_pool1 = maxpool_2x2(self.h_conv1_2, "pool1")

            with tf.name_scope("Conv2Ops"):
                self.W_conv2_1 = weight_variable_fromnp(
                    npWeights["conv2_1_w"], "w_conv2_1")
                self.B_conv2_1 = weight_variable_fromnp(
                    npWeights["conv2_1_b"], "b_conv2_1")
                self.W_conv2_2 = weight_variable_fromnp(
                    npWeights["conv2_2_w"], "w_conv2_2")
                self.B_conv2_2 = weight_variable_fromnp(
                    npWeights["conv2_2_b"], "b_conv2_2")

                self.h_conv2_1 = tf.nn.relu(
                    conv2d(self.h_pool1, self.W_conv2_1, "conv2_1") +
                    self.B_conv2_1)
                self.h_conv2_2 = tf.nn.relu(
                    conv2d(self.h_conv2_1, self.W_conv2_2, "conv2_2") +
                    self.B_conv2_2)
                self.h_pool2 = maxpool_2x2(self.h_conv2_2, "pool2")

            with tf.name_scope("Conv3Ops"):
                self.W_conv3_1 = weight_variable_fromnp(
                    npWeights["conv3_1_w"], "w_conv3_1")
                self.B_conv3_1 = weight_variable_fromnp(
                    npWeights["conv3_1_b"], "b_conv3_1")
                self.W_conv3_2 = weight_variable_fromnp(
                    npWeights["conv3_2_w"], "w_conv3_2")
                self.B_conv3_2 = weight_variable_fromnp(
                    npWeights["conv3_2_b"], "b_conv3_2")
                self.W_conv3_3 = weight_variable_fromnp(
                    npWeights["conv3_3_w"], "w_conv3_3")
                self.B_conv3_3 = weight_variable_fromnp(
                    npWeights["conv3_3_b"], "b_conv3_3")

                self.h_conv3_1 = tf.nn.relu(
                    conv2d(self.h_pool2, self.W_conv3_1, "conv3_1") +
                    self.B_conv3_1)
                self.h_conv3_2 = tf.nn.relu(
                    conv2d(self.h_conv3_1, self.W_conv3_2, "conv3_2") +
                    self.B_conv3_2)
                self.h_conv3_3 = tf.nn.relu(
                    conv2d(self.h_conv3_2, self.W_conv3_3, "conv3_2") +
                    self.B_conv3_3)
                self.h_pool3 = maxpool_2x2(self.h_conv3_3, "pool3")

            with tf.name_scope("Conv4Ops"):
                self.W_conv4_1 = weight_variable_fromnp(
                    npWeights["conv4_1_w"], "w_conv4_1")
                self.B_conv4_1 = weight_variable_fromnp(
                    npWeights["conv4_1_b"], "b_conv4_1")
                self.W_conv4_2 = weight_variable_fromnp(
                    npWeights["conv4_2_w"], "w_conv4_2")
                self.B_conv4_2 = weight_variable_fromnp(
                    npWeights["conv4_2_b"], "b_conv4_2")
                self.W_conv4_3 = weight_variable_fromnp(
                    npWeights["conv4_3_w"], "w_conv4_3")
                self.B_conv4_3 = weight_variable_fromnp(
                    npWeights["conv4_3_b"], "b_conv4_3")

                self.h_conv4_1 = tf.nn.relu(
                    conv2d(self.h_pool3, self.W_conv4_1, "conv4_1") +
                    self.B_conv4_1)
                self.h_conv4_2 = tf.nn.relu(
                    conv2d(self.h_conv4_1, self.W_conv4_2, "conv4_2") +
                    self.B_conv4_2)
                self.h_conv4_3 = tf.nn.relu(
                    conv2d(self.h_conv4_2, self.W_conv4_3, "conv4_2") +
                    self.B_conv4_3)
                self.h_pool4 = maxpool_2x2(self.h_conv4_3, "pool4")

            with tf.name_scope("Conv5Ops"):
                self.W_conv5_1 = weight_variable_fromnp(
                    npWeights["conv5_1_w"], "w_conv5_1")
                self.B_conv5_1 = weight_variable_fromnp(
                    npWeights["conv5_1_b"], "b_conv5_1")
                self.W_conv5_2 = weight_variable_fromnp(
                    npWeights["conv5_2_w"], "w_conv5_2")
                self.B_conv5_2 = weight_variable_fromnp(
                    npWeights["conv5_2_b"], "b_conv5_2")
                self.W_conv5_3 = weight_variable_fromnp(
                    npWeights["conv5_3_w"], "w_conv5_3")
                self.B_conv5_3 = weight_variable_fromnp(
                    npWeights["conv5_3_b"], "b_conv5_3")

                self.h_conv5_1 = tf.nn.relu(
                    conv2d(self.h_pool4, self.W_conv5_1, "conv5_1") +
                    self.B_conv5_1)
                self.h_conv5_2 = tf.nn.relu(
                    conv2d(self.h_conv5_1, self.W_conv5_2, "conv5_2") +
                    self.B_conv5_2)
                self.h_conv5_3 = tf.nn.relu(
                    conv2d(self.h_conv5_2, self.W_conv5_3, "conv5_2") +
                    self.B_conv5_3)
                self.h_pool5 = tf.nn.avg_pool(self.h_conv5_3,
                                              ksize=[1, 2, 2, 1],
                                              strides=[1, 2, 2, 1],
                                              padding='SAME',
                                              name="pool5")

            #16 comes from 4 2x2 pooling
            #self.h_conv5_shape = [self.batchSize, inputShape[0]/32, inputShape[1]/32, 512]
            self.h_conv5_shape = [self.batchSize, 7, 7, 512]
            with tf.name_scope("GAP"):
                self.h_gap = tf.reduce_mean(self.h_pool5,
                                            reduction_indices=[1, 2])
                self.W_gap = weight_variable_xavier([512, self.numClasses],
                                                    "w_gap",
                                                    conv=False)
                self.B_gap = bias_variable([self.numClasses], "b_gap")
                self.est = tf.nn.softmax(
                    tf.matmul(self.h_gap, self.W_gap) + self.B_gap)

            with tf.name_scope("CAM"):
                self.h_reshape_gap = tf.reshape(self.h_pool5, [
                    self.batchSize * self.h_conv5_shape[1] *
                    self.h_conv5_shape[2], -1
                ])
                self.flat_cam = tf.matmul(self.h_reshape_gap,
                                          self.W_gap) + self.B_gap
                self.reshape_cam = tf.reshape(self.flat_cam, [
                    self.batchSize, self.h_conv5_shape[1],
                    self.h_conv5_shape[2], -1
                ])
                self.softmax_cam = pixelSoftmax(self.reshape_cam)
                self.vis_cam = tf.transpose(self.softmax_cam, [0, 3, 1, 2])
                self.classRank = tf.reduce_mean(self.vis_cam,
                                                reduction_indices=[2, 3])

            with tf.name_scope("Loss"):
                #Define loss
                #self.loss = tf.reduce_mean(-tf.reduce_sum(self.gt * tf.log(self.est+self.epsilon), reduction_indices=[1]))
                self.loss = tf.reduce_mean(-tf.reduce_sum(
                    self.norm_gt * tf.log(self.softmax_cam + self.epsilon),
                    reduction_indices=[3]))
                self.regLoss = self.loss + self.regStrength * tf.add_n(
                    [tf.nn.l2_loss(v) for v in tf.trainable_variables()])
                self.nan_check_loss = tf.verify_tensor_all_finite(
                    self.loss, msg="check_nan")

            with tf.name_scope("Opt"):
                #Define optimizer
                self.optimizerAll = tf.train.AdamOptimizer(
                    self.learningRate,
                    beta1=self.beta1,
                    beta2=self.beta2,
                    epsilon=self.epsilon).minimize(self.loss)
                #self.optimizerAll = tf.train.MomentumOptimizer(self.learningRate, momentum=self.beta1).minimize(self.loss)
                self.optimizerPre = tf.train.AdamOptimizer(
                    self.learningRate,
                    beta1=self.beta1,
                    beta2=self.beta2,
                    epsilon=self.epsilon).minimize(self.loss,
                                                   var_list=[
                                                       self.W_gap,
                                                       self.B_gap,
                                                   ])

            with tf.name_scope("Metric"):
                self.correct = tf.equal(
                    tf.argmax(
                        tf.reduce_mean(self.gt, reduction_indices=[1, 2]), 1),
                    tf.argmax(self.est, 1))
                self.accuracy = tf.reduce_mean(
                    tf.cast(self.correct, tf.float32))

        #Cannot be on GPU
        #Remove distractor class
        rankNoDist = self.classRank[:, :self.numClasses - 1]
        (self.eval_vals, self.eval_idx) = tf.nn.top_k(rankNoDist, k=5)

        #Summaries
        tf.scalar_summary('loss', self.loss, name="lossSum")
        tf.scalar_summary('accuracy', self.accuracy, name="accSum")

        tf.histogram_summary('input', self.inputImage, name="image_vis")
        tf.histogram_summary('gt', self.gt, name="gt_vis")
        tf.histogram_summary('norm_gt', self.norm_gt, name="norm_gt_vis")
        #Conv layer histograms
        tf.histogram_summary('conv1_1', self.h_conv1_1, name="conv1_1_vis")
        tf.histogram_summary('conv1_2', self.h_conv1_2, name="conv1_2_vis")
        tf.histogram_summary('conv2_1', self.h_conv2_1, name="conv2_1_vis")
        tf.histogram_summary('conv2_2', self.h_conv2_2, name="conv2_2_vis")
        tf.histogram_summary('conv3_1', self.h_conv3_1, name="conv3_1_vis")
        tf.histogram_summary('conv3_2', self.h_conv3_2, name="conv3_2_vis")
        tf.histogram_summary('conv3_3', self.h_conv3_3, name="conv3_3_vis")
        tf.histogram_summary('conv4_1', self.h_conv4_1, name="conv4_1_vis")
        tf.histogram_summary('conv4_2', self.h_conv4_2, name="conv4_2_vis")
        tf.histogram_summary('conv4_3', self.h_conv4_3, name="conv4_3_vis")
        tf.histogram_summary('conv5_1', self.h_conv5_1, name="conv5_1_vis")
        tf.histogram_summary('conv5_2', self.h_conv5_2, name="conv5_2_vis")
        tf.histogram_summary('conv5_3', self.h_conv5_3, name="conv5_3_vis")
        tf.histogram_summary('cam', self.flat_cam, name="flat_cam")
        tf.histogram_summary('softmax_cam',
                             self.softmax_cam,
                             name="softmax_cam")
        tf.histogram_summary('gap', self.h_gap, name="gap_vis")
        tf.histogram_summary('est', self.est, name="est_vis")
        #Weight and bias hists
        tf.histogram_summary('w_conv1_1', self.W_conv1_1, name="w_conv1_1_vis")
        tf.histogram_summary('b_conv1_1', self.B_conv1_1, name="b_conv1_1_vis")
        tf.histogram_summary('w_conv1_2', self.W_conv1_2, name="w_conv1_2_vis")
        tf.histogram_summary('b_conv1_2', self.B_conv1_2, name="b_conv1_2_vis")
        tf.histogram_summary('w_conv2_1', self.W_conv2_1, name="w_conv2_1_vis")
        tf.histogram_summary('b_conv2_1', self.B_conv2_1, name="b_conv2_1_vis")
        tf.histogram_summary('w_conv2_2', self.W_conv2_2, name="w_conv2_2_vis")
        tf.histogram_summary('b_conv2_2', self.B_conv2_2, name="b_conv2_2_vis")
        tf.histogram_summary('w_conv3_1', self.W_conv3_1, name="w_conv3_1_vis")
        tf.histogram_summary('b_conv3_1', self.B_conv3_1, name="b_conv3_1_vis")
        tf.histogram_summary('w_conv3_2', self.W_conv3_2, name="w_conv3_2_vis")
        tf.histogram_summary('b_conv3_2', self.B_conv3_2, name="b_conv3_2_vis")
        tf.histogram_summary('w_conv3_3', self.W_conv3_3, name="w_conv3_3_vis")
        tf.histogram_summary('b_conv3_3', self.B_conv3_3, name="b_conv3_3_vis")
        tf.histogram_summary('w_conv4_1', self.W_conv4_1, name="w_conv4_1_vis")
        tf.histogram_summary('b_conv4_1', self.B_conv4_1, name="b_conv4_1_vis")
        tf.histogram_summary('w_conv4_2', self.W_conv4_2, name="w_conv4_2_vis")
        tf.histogram_summary('b_conv4_2', self.B_conv4_2, name="b_conv4_2_vis")
        tf.histogram_summary('w_conv4_3', self.W_conv4_3, name="w_conv4_3_vis")
        tf.histogram_summary('b_conv4_3', self.B_conv4_3, name="b_conv4_3_vis")
        tf.histogram_summary('w_conv5_1', self.W_conv5_1, name="w_conv5_1_vis")
        tf.histogram_summary('b_conv5_1', self.B_conv5_1, name="b_conv5_1_vis")
        tf.histogram_summary('w_conv5_2', self.W_conv5_2, name="w_conv5_2_vis")
        tf.histogram_summary('b_conv5_2', self.B_conv5_2, name="b_conv5_2_vis")
        tf.histogram_summary('w_conv5_3', self.W_conv5_3, name="w_conv5_3_vis")
        tf.histogram_summary('b_conv5_3', self.B_conv5_3, name="b_conv5_3_vis")
        tf.histogram_summary('w_gap', self.W_gap, name="w_gap_vis")
        tf.histogram_summary('b_gap', self.B_gap, name="b_gap_vis")
    def buildModel(self, inMatFilename = None):
        if(inMatFilename):
            npWeights = loadWeights(inMatFilename)

        #Put all conv layers on gpu
        with tf.device('gpu:0'):
            with tf.name_scope("inputOps"):
                inputShape = self.dataObj.inputShape
                #Get convolution variables as placeholders
                self.inputImage = node_variable([None, inputShape[0], inputShape[1], inputShape[2]], "inputImage")
                self.gt = node_variable([None, 1], "gt")
                #Model variables for convolutions

            with tf.name_scope("Conv1Ops"):
                if(inMatFilename):
                    self.W_conv1 = weight_variable_fromnp(npWeights["conv1_w"], "w_conv1")
                    self.B_conv1 = weight_variable_fromnp(npWeights["conv1_b"], "b_conv1")
                else:
                    self.W_conv1 = weight_variable_fromnp(np.zeros((11, 11, 3, 64), dtype=np.float32), "w_conv1")
                    self.B_conv1 = weight_variable_fromnp(np.zeros((64), dtype=np.float32), "b_conv1")
                    ##First conv layer is 11x11, 3 input channels into 64 output channels
                    #self.W_conv1 = weight_variable_xavier([11, 11, 3, 64], "w_conv1", conv=True)
                    #self.B_conv1 = bias_variable([64], "b_conv1")
                self.h_conv1 = tf.nn.relu(conv2d(self.inputImage, self.W_conv1, "conv1") + self.B_conv1)
                self.h_norm1 = tf.nn.local_response_normalization(self.h_conv1, name="LRN1")
                #relu is communative op, so do relu after pool for efficiency
                self.h_pool1 = maxpool_2x2(self.h_norm1, "pool1")

            with tf.name_scope("Conv2Ops"):
                #Second conv layer is 5x5 conv, into 256 output channels
                if(inMatFilename):
                    self.W_conv2 = weight_variable_fromnp(npWeights["conv2_w"], "w_conv2")
                    self.B_conv2 = weight_variable_fromnp(npWeights["conv2_b"], "b_conv2")
                else:
                    self.W_conv2 = weight_variable_fromnp(np.zeros((5, 5, 64, 256), dtype=np.float32), "w_conv2")
                    self.B_conv2 = weight_variable_fromnp(np.zeros((256), dtype=np.float32), "b_conv2")
                    #self.W_conv2 = weight_variable_xavier([5, 5, 64, 256], "w_conv2", conv=True)
                    #self.B_conv2 = bias_variable([256], "b_conv2")
                self.h_conv2 = tf.nn.relu(conv2d(self.h_pool1, self.W_conv2, "conv2") + self.B_conv2)
                self.h_norm2 = tf.nn.local_response_normalization(self.h_conv2, name="LRN2")
                self.h_pool2 = maxpool_2x2(self.h_norm2, "pool2")

            #Third layer is 3x3 conv into 256 output channels
            #No pooling
            with tf.name_scope("Conv3Ops"):
                #Second conv layer is 5x5 conv, into 256 output channels
                if(inMatFilename):
                    self.W_conv3 = weight_variable_fromnp(npWeights["conv3_w"], "w_conv3")
                    self.B_conv3 = weight_variable_fromnp(npWeights["conv3_b"], "b_conv3")
                else:
                    self.W_conv3 = weight_variable_fromnp(np.zeros((3, 3, 256, 256), dtype=np.float32), "w_conv3")
                    self.B_conv3 = weight_variable_fromnp(np.zeros((256), dtype=np.float32), "b_conv3")
                    #self.W_conv3 = weight_variable_xavier([3, 3, 256, 256], "w_conv3", conv=True)
                    #self.B_conv3 = bias_variable([256], "b_conv3")
                self.h_conv3 = tf.nn.relu(conv2d(self.h_pool2, self.W_conv3, "conv3") + self.B_conv3, name="relu3")

            #Fourth layer is 3x3 conv into 256 output channels
            #No pooling
            with tf.name_scope("Conv4Ops"):
                #Second conv layer is 5x5 conv, into 256 output channels
                if(inMatFilename):
                    self.W_conv4 = weight_variable_fromnp(npWeights["conv4_w"], "w_conv4")
                    self.B_conv4 = weight_variable_fromnp(npWeights["conv4_b"], "b_conv4")
                else:
                    self.W_conv4 = weight_variable_fromnp(np.zeros((3, 3, 256, 256), dtype=np.float32), "w_conv4")
                    self.B_conv4 = weight_variable_fromnp(np.zeros((256), dtype=np.float32), "b_conv4")
                    #self.W_conv4 = weight_variable_xavier([3, 3, 256, 256], "w_conv4", conv=True)
                    #self.B_conv4 = bias_variable([256], "b_conv4")
                self.h_conv4 = tf.nn.relu(conv2d(self.h_conv3, self.W_conv4, "conv4") + self.B_conv4, name="relu4")

            #Fifth layer is 3x3 conv into 256 output channels
            #with pooling
            with tf.name_scope("Conv5Ops"):
                #Second conv layer is 5x5 conv, into 256 output channels
                if(inMatFilename):
                    self.W_conv5 = weight_variable_fromnp(npWeights["conv5_w"], "w_conv5")
                    self.B_conv5 = weight_variable_fromnp(npWeights["conv5_b"], "b_conv5")
                else:
                    self.W_conv5 = weight_variable_fromnp(np.zeros((3, 3, 256, 256), dtype=np.float32), "w_conv5")
                    self.B_conv5 = weight_variable_fromnp(np.zeros((256), dtype = np.float32), "b_conv5")
                    #self.W_conv5 = weight_variable_xavier([3, 3, 256, 256], "w_conv5", conv=True)
                    #self.B_conv5 = bias_variable([256], "b_conv5")
                self.h_conv5 = tf.nn.relu(conv2d(self.h_conv4, self.W_conv5, "conv5") + self.B_conv5)
                self.h_norm5 = tf.nn.local_response_normalization(self.h_conv5, name="LRN5")
                self.h_pool5 = maxpool_2x2(self.h_norm5, "pool5")

            #6th layer (not in paper) is 3x3 conv into 256 output channels
            #with pooling
            with tf.name_scope("Conv6Ops"):
                self.W_conv6 = weight_variable_xavier([3, 3, 256, 256], "w_conv6", conv=True)
                self.B_conv6 = bias_variable([256], "b_conv6")
                self.h_conv6 = conv2d(self.h_pool5, self.W_conv6, "conv6") + self.B_conv6
                self.h_pool6 = tf.nn.relu(maxpool_2x2(self.h_conv6, "pool6"), name="relu6")

            self.keep_prob = tf.placeholder(tf.float32)

            #Next is 3 fully connected layers
            #We should have downsampled by 8 at this point
            #fc1 should have 4096 channels
            numInputs = (inputShape[0]/16) * (inputShape[1]/16) * 256
            with tf.name_scope("FC1"):
                self.W_fc1 = weight_variable([numInputs, 2048], "w_fc1", 1e-6)
                self.B_fc1 = bias_variable([2048], "b_fc1")
                h_pool6_flat = tf.reshape(self.h_pool6, [-1, numInputs], name="pool6_flat")
                self.h_fc1 = tf.nn.relu(tf.matmul(h_pool6_flat, self.W_fc1, name="fc1") + self.B_fc1, "fc1_relu")
                self.h_fc1_drop = tf.nn.dropout(self.h_fc1, self.keep_prob)

        #Put all opt layers on cpu
        with tf.device('/cpu:0'):

            #fc2 should have 128 channels
            with tf.name_scope("FC2"):
                self.W_fc2 = weight_variable_xavier([2048, 128], "w_fc2", conv=False)
                self.B_fc2 = bias_variable([128], "b_fc2")
                self.h_fc2 = tf.nn.relu(tf.matmul(self.h_fc1_drop, self.W_fc2, name="fc2") + self.B_fc2, "fc2_relu")
                self.h_fc2_drop = tf.nn.dropout(self.h_fc2, self.keep_prob)

            #fc3 should have 16 channels
            #fc3 also uses a sigmoid function
            #We change it to tanh
            with tf.name_scope("FC3"):
                self.W_fc3 = weight_variable_xavier([128, 16], "w_fc3", conv=False)
                self.B_fc3 = bias_variable([16], "b_fc3")
                self.h_fc3 = tf.tanh(tf.matmul(self.h_fc2, self.W_fc3, name="fc3") + self.B_fc3, "fc3_relu")


            #Finally, fc4 condenses into 1 output value
            with tf.name_scope("FC4"):
                self.W_fc4 = weight_variable_xavier([16, 1], "w_fc4", conv=False)
                self.B_fc4 = bias_variable([1], "b_fc4")
                self.est = tf.matmul(self.h_fc3, self.W_fc4, name="est") + self.B_fc4

            with tf.name_scope("Loss"):
                #Define loss
                self.loss = tf.reduce_mean(tf.square(self.gt - self.est))/2

            with tf.name_scope("Opt"):
                #Define optimizer
                #self.optimizerAll = tf.train.AdagradOptimizer(self.learningRate).minimize(self.loss)
                #self.optimizerFC = tf.train.AdagradOptimizer(self.learningRate).minimize(self.loss,
                self.optimizerAll = tf.train.AdamOptimizer(self.learningRate).minimize(self.loss)
                self.optimizerFC = tf.train.AdamOptimizer(self.learningRate).minimize(self.loss,
                        var_list=[self.W_conv6,
                            self.B_conv6,
                            self.W_fc1,
                            self.B_fc1,
                            self.W_fc2,
                            self.B_fc2,
                            self.W_fc3,
                            self.B_fc3,
                            self.W_fc4,
                            self.B_fc4]
                        )

        #Summaries
        tf.scalar_summary('l2 loss', self.loss)
        tf.histogram_summary('input', self.inputImage)
        tf.histogram_summary('gt', self.gt)
        tf.histogram_summary('conv1', self.h_pool1)
        tf.histogram_summary('conv2', self.h_pool2)
        tf.histogram_summary('conv3', self.h_conv3)
        tf.histogram_summary('conv4', self.h_conv4)
        tf.histogram_summary('conv5', self.h_pool5)
        tf.histogram_summary('conv6', self.h_pool6)
        tf.histogram_summary('fc1', self.h_fc1)
        tf.histogram_summary('fc2', self.h_fc2)
        tf.histogram_summary('fc3', self.h_fc3)
        tf.histogram_summary('est', self.est)
        tf.histogram_summary('w_conv1', self.W_conv1)
        tf.histogram_summary('b_conv1', self.B_conv1)
        tf.histogram_summary('w_conv2', self.W_conv2)
        tf.histogram_summary('b_conv2', self.B_conv2)
        tf.histogram_summary('w_conv3', self.W_conv3)
        tf.histogram_summary('b_conv3', self.B_conv3)
        tf.histogram_summary('w_conv4', self.W_conv4)
        tf.histogram_summary('b_conv4', self.B_conv4)
        tf.histogram_summary('w_conv5', self.W_conv5)
        tf.histogram_summary('b_conv5', self.B_conv5)
        tf.histogram_summary('w_conv6', self.W_conv6)
        tf.histogram_summary('b_conv6', self.B_conv6)
        tf.histogram_summary('w_fc1', self.W_fc1)
        tf.histogram_summary('b_fc1', self.B_fc1)
        tf.histogram_summary('w_fc2', self.W_fc2)
        tf.histogram_summary('b_fc2', self.B_fc2)
        tf.histogram_summary('w_fc3', self.W_fc3)
        tf.histogram_summary('b_fc3', self.B_fc3)
        tf.histogram_summary('w_fc4', self.W_fc4)
        tf.histogram_summary('b_fc4', self.B_fc4)

        #Define saver
        self.saver = tf.train.Saver()
Exemple #8
0
    def buildModel(self, inputShape, inMatFilename):
        if (inMatFilename):
            npWeights = loadWeights(inMatFilename)

        #Running on GPU
        with tf.device('gpu:0'):
            with tf.name_scope("inputOps"):
                #Get convolution variables as placeholders
                self.inputImage = node_variable(
                    [None, inputShape[0], inputShape[1], inputShape[2]],
                    "inputImage")
                self.gt = node_variable([None, 2], "gt")
                #Model variables for convolutions

            with tf.name_scope("Conv1Ops"):
                if (inMatFilename):
                    self.W_conv1 = weight_variable_fromnp(
                        npWeights["conv1_w"], "w_conv1")
                    self.B_conv1 = weight_variable_fromnp(
                        npWeights["conv1_b"], "b_conv1")
                else:
                    self.W_conv1 = weight_variable_fromnp(
                        np.zeros((11, 11, 3, 64), dtype=np.float32), "w_conv1")
                    self.B_conv1 = weight_variable_fromnp(
                        np.zeros((64), dtype=np.float32), "b_conv1")
                    #self.W_conv1 = weight_variable_xavier([11, 11, 3, 64], "w_conv1", conv=True)
                    #self.B_conv1 = bias_variable([64], "b_conv1")
                self.h_conv1 = tf.nn.relu(
                    conv2d(self.inputImage,
                           self.W_conv1,
                           "conv1",
                           stride=[1, 4, 4, 1]) + self.B_conv1)
                self.h_norm1 = tf.nn.local_response_normalization(self.h_conv1,
                                                                  name="LRN1")
                self.h_pool1 = maxpool_2x2(self.h_norm1, "pool1")

            with tf.name_scope("Conv2Ops"):
                if (inMatFilename):
                    self.W_conv2 = weight_variable_fromnp(
                        npWeights["conv2_w"], "w_conv2")
                    self.B_conv2 = weight_variable_fromnp(
                        npWeights["conv2_b"], "b_conv2")
                else:
                    self.W_conv2 = weight_variable_fromnp(
                        np.zeros((5, 5, 64, 256), dtype=np.float32), "w_conv2")
                    self.B_conv2 = weight_variable_fromnp(
                        np.zeros((256), dtype=np.float32), "b_conv2")
                    #self.W_conv2 = weight_variable_xavier([5, 5, 64, 256], "w_conv2", conv=True)
                    #self.B_conv2 = bias_variable([256], "b_conv2")
                self.h_conv2 = tf.nn.relu(
                    conv2d(self.h_pool1, self.W_conv2, "conv2") + self.B_conv2)
                self.h_norm2 = tf.nn.local_response_normalization(self.h_conv2,
                                                                  name="LRN2")
                self.h_pool2 = maxpool_2x2(self.h_norm2, "pool2")

            with tf.name_scope("Conv3Ops"):
                if (inMatFilename):
                    self.W_conv3 = weight_variable_fromnp(
                        npWeights["conv3_w"], "w_conv3")
                    self.B_conv3 = weight_variable_fromnp(
                        npWeights["conv3_b"], "b_conv3")
                else:
                    self.W_conv3 = weight_variable_fromnp(
                        np.zeros((3, 3, 256, 256), dtype=np.float32),
                        "w_conv3")
                    self.B_conv3 = weight_variable_fromnp(
                        np.zeros((256), dtype=np.float32), "b_conv3")
                    #self.W_conv3 = weight_variable_xavier([3, 3, 256, 256], "w_conv3", conv=True)
                    #self.B_conv3 = bias_variable([256], "b_conv3")
                self.h_conv3 = tf.nn.relu(
                    conv2d(self.h_pool2, self.W_conv3, "conv3") + self.B_conv3,
                    name="relu3")

            with tf.name_scope("Conv4Ops"):
                if (inMatFilename):
                    self.W_conv4 = weight_variable_fromnp(
                        npWeights["conv4_w"], "w_conv4")
                    self.B_conv4 = weight_variable_fromnp(
                        npWeights["conv4_b"], "b_conv4")
                else:
                    self.W_conv4 = weight_variable_fromnp(
                        np.zeros((3, 3, 256, 256), dtype=np.float32),
                        "w_conv4")
                    self.B_conv4 = weight_variable_fromnp(
                        np.zeros((256), dtype=np.float32), "b_conv4")
                    #self.W_conv4 = weight_variable_xavier([3, 3, 256, 256], "w_conv4", conv=True)
                    #self.B_conv4 = bias_variable([256], "b_conv4")
                self.h_conv4 = tf.nn.relu(
                    conv2d(self.h_conv3, self.W_conv4, "conv4") + self.B_conv4,
                    name="relu4")

            with tf.name_scope("Conv5Ops"):
                if (inMatFilename):
                    self.W_conv5 = weight_variable_fromnp(
                        npWeights["conv5_w"], "w_conv5")
                    self.B_conv5 = weight_variable_fromnp(
                        npWeights["conv5_b"], "b_conv5")
                else:
                    self.W_conv5 = weight_variable_fromnp(
                        np.zeros((3, 3, 256, 256), dtype=np.float32),
                        "w_conv5")
                    self.B_conv5 = weight_variable_fromnp(
                        np.zeros((256), dtype=np.float32), "b_conv5")
                    #self.W_conv5 = weight_variable_xavier([3, 3, 256, 256], "w_conv5", conv=True)
                    #self.B_conv5 = bias_variable([256], "b_conv5")
                self.h_conv5 = tf.nn.relu(
                    conv2d(self.h_conv4, self.W_conv5, "conv5") + self.B_conv5)
                self.h_pool5 = maxpool_2x2(self.h_conv5, "pool5")

            #placeholder for specifying dropout
            self.keep_prob = tf.placeholder(tf.float32)

            #32 comes from 4 stride in conv1, 2 stride in pool1, 2 stride in pool2, 2 stride in pool5
            numInputs = (inputShape[0] / 32) * (inputShape[1] / 32) * 256
            with tf.name_scope("FC1"):
                #if(inMatFilename):
                #    self.W_conv5 = weight_variable_fromnp(npWeights["fc1_w"], "w_fc1")
                #    self.B_conv5 = weight_variable_fromnp(npWeights["fc1_b"], "b_fc1")
                #else:
                #    self.W_conv5 = weight_variable_fromnp(np.zeros((6*6*256, 4096), dtype=np.float32), "w_fc1")
                #    self.B_conv5 = weight_variable_fromnp(np.zeros((4096), dtype = np.float32), "b_fc1")
                self.W_fc1 = weight_variable_xavier([numInputs, 4096], "w_fc1")
                self.B_fc1 = bias_variable([4096], "b_fc1")
                h_pool5_flat = tf.reshape(self.h_pool5, [-1, numInputs],
                                          name="pool5_flat")
                self.h_fc1 = tf.nn.relu(
                    tf.matmul(h_pool5_flat, self.W_fc1, name="fc1") +
                    self.B_fc1, "fc1_relu")
                self.h_fc1_drop = tf.nn.dropout(self.h_fc1, self.keep_prob)

            with tf.name_scope("FC2"):
                #if(inMatFilename):
                #    self.W_conv5 = weight_variable_fromnp(npWeights["fc2_w"], "w_fc2")
                #    self.B_conv5 = weight_variable_fromnp(npWeights["fc2_b"], "b_fc2")
                #else:
                #    self.W_conv5 = weight_variable_fromnp(np.zeros((4096, 4096), dtype=np.float32), "w_fc2")
                #    self.B_conv5 = weight_variable_fromnp(np.zeros((4096), dtype = np.float32), "b_fc2")
                self.W_fc2 = weight_variable_xavier([4096, 4096], "w_fc2")
                self.B_fc2 = bias_variable([4096], "b_fc2")
                self.h_fc2 = tf.nn.relu(
                    tf.matmul(self.h_fc1_drop, self.W_fc2, name="fc2") +
                    self.B_fc2, "fc2_relu")
                self.h_fc2_drop = tf.nn.dropout(self.h_fc2, self.keep_prob)

            #fc3 should have 16 channels
            #fc3 also uses a sigmoid function
            #We change it to tanh
            with tf.name_scope("FC3"):
                #if(inMatFilename):
                #    self.W_conv5 = weight_variable_fromnp(npWeights["fc3_w"], "w_fc3")
                #    self.B_conv5 = weight_variable_fromnp(npWeights["fc3_b"], "b_fc3")
                #else:
                #    self.W_conv5 = weight_variable_fromnp(np.zeros((4096, 2), dtype=np.float32), "w_fc3")
                #    self.B_conv5 = weight_variable_fromnp(np.zeros((2), dtype = np.float32), "b_fc3")
                self.W_fc3 = weight_variable_xavier([4096, 2], "w_fc3")
                self.B_fc3 = bias_variable([2], "b_fc3")
                self.est = tf.nn.softmax(
                    tf.matmul(self.h_fc2_drop, self.W_fc3, name="fc3") +
                    self.B_fc3, "fc3_softmax")

            with tf.name_scope("Loss"):
                #Define loss
                #self.loss = tf.reduce_mean(-tf.reduce_sum(self.gt * tf.log(self.est), reduction_indices=[1]))
                self.loss = tf.reduce_mean(
                    -(self.gt[:, 1] * .8 * tf.log(self.est[:, 1]) +
                      self.gt[:, 0] * .2 * tf.log(self.est[:, 0])))

            with tf.name_scope("Opt"):
                #Define optimizer
                #self.optimizerAll = tf.train.AdagradOptimizer(self.learningRate).minimize(self.loss)
                #self.optimizerFC = tf.train.AdagradOptimizer(self.learningRate).minimize(self.loss,
                self.optimizerAll = tf.train.AdamOptimizer(
                    self.learningRate).minimize(self.loss)
                self.optimizerFC = tf.train.AdamOptimizer(
                    self.learningRate).minimize(self.loss,
                                                var_list=[
                                                    self.W_fc1, self.B_fc1,
                                                    self.W_fc2, self.B_fc2,
                                                    self.W_fc3, self.B_fc3
                                                ])

            with tf.name_scope("Metric"):
                self.gtIdx = tf.argmax(self.gt, 1)
                self.estIdx = tf.argmax(self.est, 1)
                boolGtIdx = tf.cast(self.gtIdx, tf.bool)
                boolEstIdx = tf.cast(self.estIdx, tf.bool)

                #Logical and for true positive
                lAnd = tf.logical_and(boolGtIdx, boolEstIdx)
                self.tp = tf.reduce_sum(tf.cast(lAnd, tf.float32))
                #Logical nor for true negatives
                lNor = tf.logical_not(tf.logical_or(boolGtIdx, boolEstIdx))
                self.tn = tf.reduce_sum(tf.cast(lNor, tf.float32))

                #Subtraction and comparison for others
                lSub = self.gtIdx - self.estIdx
                Ones = tf.cast(tf.ones(tf.shape(lSub)), tf.int64)
                self.fn = tf.reduce_sum(
                    tf.cast(tf.equal(lSub, Ones), tf.float32))
                self.fp = tf.reduce_sum(
                    tf.cast(tf.equal(lSub, -Ones), tf.float32))

                #Accuracy, precision, and recall calculations
                self.accuracy = (self.tp + self.tn) / (self.tp + self.tn +
                                                       self.fp + self.fn)
                self.precision = self.tp / (self.tp + self.fp)
                self.recall = self.tp / (self.tp + self.fn)

        #Summaries
        tf.scalar_summary('loss', self.loss, name="lossSum")
        tf.scalar_summary('accuracy', self.accuracy, name="accSum")
        tf.scalar_summary('precision', self.precision, name="precSum")
        tf.scalar_summary('recall', self.recall, name="recallSum")
        tf.scalar_summary('tp', self.tp, name="tp")
        tf.scalar_summary('fp', self.fp, name="fp")
        tf.scalar_summary('tn', self.tn, name="tn")
        tf.scalar_summary('fn', self.fn, name="fn")

        tf.histogram_summary('input', self.inputImage, name="image")
        tf.histogram_summary('gt', self.gt, name="gt")
        tf.histogram_summary('conv1', self.h_pool1, name="conv1")
        tf.histogram_summary('conv2', self.h_pool2, name="conv2")
        tf.histogram_summary('conv3', self.h_conv3, name="conv3")
        tf.histogram_summary('conv4', self.h_conv4, name="conv4")
        tf.histogram_summary('conv5', self.h_pool5, name="conv5")
        tf.histogram_summary('fc1', self.h_fc1, name="fc1")
        tf.histogram_summary('fc2', self.h_fc2, name="fc2")
        tf.histogram_summary('est', self.est, name="fc3")
        tf.histogram_summary('w_conv1', self.W_conv1, name="w_conv1")
        tf.histogram_summary('b_conv1', self.B_conv1, name="b_conv1")
        tf.histogram_summary('w_conv2', self.W_conv2, name="w_conv2")
        tf.histogram_summary('b_conv2', self.B_conv2, name="b_conv2")
        tf.histogram_summary('w_conv3', self.W_conv3, name="w_conv3")
        tf.histogram_summary('b_conv3', self.B_conv3, name="b_conv3")
        tf.histogram_summary('w_conv4', self.W_conv4, name="w_conv4")
        tf.histogram_summary('b_conv4', self.B_conv4, name="b_conv4")
        tf.histogram_summary('w_conv5', self.W_conv5, name="w_conv5")
        tf.histogram_summary('b_conv5', self.B_conv5, name="b_conv5")
        tf.histogram_summary('w_fc1', self.W_fc1, name="w_fc1")
        tf.histogram_summary('b_fc1', self.B_fc1, name="b_fc1")
        tf.histogram_summary('w_fc2', self.W_fc2, name="w_fc2")
        tf.histogram_summary('b_fc2', self.B_fc2, name="b_fc2")
        tf.histogram_summary('w_fc3', self.W_fc3, name="w_fc3")
        tf.histogram_summary('b_fc3', self.B_fc3, name="b_fc3")

        #Define saver
        self.saver = tf.train.Saver()