Exemple #1
0
    def _create_network(self):
        with tf.device('/cpu:0'):
            with tf.name_scope('Conv1'):
                self.conv1_bn = batch_norm(conv2d(self.x_image, self.W_conv1),
                                           self.beta_conv1, self.gamma_conv1,
                                           self.phase_train)
                self.h_conv1 = tf.nn.relu(self.conv1_bn)
                self.h_pool1 = max_pool_2x2(self.h_conv1)

            with tf.name_scope('Conv2'):
                self.conv2_bn = batch_norm(conv2d(self.h_pool1, self.W_conv2),
                                           self.beta_conv2, self.gamma_conv2,
                                           self.phase_train)

                self.h_conv2 = tf.nn.relu(self.conv2_bn)
                self.h_pool2 = max_pool_2x2(self.h_conv2)

            with tf.name_scope('Avg_pool'):
                self.h_avg_pool = tf.nn.avg_pool(self.h_pool2,
                                                 ksize=[1, 7, 7, 1],
                                                 strides=[1, 1, 1, 1],
                                                 padding='VALID',
                                                 name='Avg_pool')
                self.h_drop = tf.nn.dropout(self.h_avg_pool,
                                            keep_prob=self.keep_prob,
                                            name='Dropout')

            with tf.name_scope('Readout'):
                self.h_drop_flat = tf.reshape(self.h_drop, [-1, 64])
                self.y_conv = tf.matmul(self.h_drop_flat,
                                        self.W_fc) + self.b_fc
Exemple #2
0
    def _create_network(self):

        with tf.variable_scope(self.name):
            self.state = tf.placeholder(
                shape=[None, self.height, self.width, 1], dtype=tf.float32)

            conv1, w1, b1 = conv2d(self.state, 32, [8, 8, 1, 32], [4, 4],
                                   "conv1")
            conv2, w2, b2 = conv2d(conv1, 64, [4, 4, 32, 64], [2, 2], "conv2")
            conv3, w3, b3 = conv2d(conv2, 64, [3, 3, 64, 64], [1, 1], "conv3")
            self.vars += [w1, b1, w2, b2, w3, b3]

            shape = conv3.get_shape().as_list()
            conv3_flat = tf.reshape(
                conv3, [-1, reduce(lambda x, y: x * y, shape[1:])])

            # Dueling
            value_hid, w4, b4 = linear(conv3_flat, 512, "value_hid")
            adv_hid, w5, b5 = linear(conv3_flat, 512, "adv_hid")

            value, w6, b6 = linear(value_hid, 1, "value", activation_fn=None)
            advantage, w7, b7 = linear(adv_hid,
                                       self.num_actions,
                                       "advantage",
                                       activation_fn=None)
            self.vars += [w4, b4, w5, b5, w6, b6, w7, b7]

            # Average Dueling
            self.Qs = value + (
                advantage - tf.reduce_mean(advantage, axis=1, keep_dims=True))

            # action with highest Q values
            self.a = tf.argmax(self.Qs, 1)
            # Q value belonging to selected action
            self.Q = tf.reduce_max(self.Qs, 1)

            # For training
            self.Q_target = tf.placeholder(shape=[None], dtype=tf.float32)
            self.actions = tf.placeholder(shape=[None], dtype=tf.int32)
            actions_onehot = tf.one_hot(self.actions,
                                        self.num_actions,
                                        on_value=1.,
                                        off_value=0.,
                                        axis=1,
                                        dtype=tf.float32)

            Q_tmp = tf.reduce_sum(tf.multiply(self.Qs, actions_onehot), axis=1)
            loss = tf.reduce_mean(tf.square(self.Q_target - Q_tmp))
            optimizer = tf.train.AdamOptimizer()
            self.minimize = optimizer.minimize(loss)
    def create_tf_training_model(self):
        self.logger.info("Creating tf training model")
        self.x = tf.placeholder(tf.float32, [None, 784], "x")
        x_image = tf.reshape(self.x, [-1, 28, 28, 1])

        W_conv1 = tools.weight_variable([5, 5, 1, 32])
        b_conv1 = tools.bias_variable([32])
        h_conv1 = tf.nn.relu(tools.conv2d(x_image, W_conv1) + b_conv1)
        h_pool1 = tools.max_pool_2x2(h_conv1)

        W_conv2 = tools.weight_variable([5, 5, 32, 64])
        b_conv2 = tools.bias_variable([64])
        h_conv2 = tf.nn.relu(tools.conv2d(h_pool1, W_conv2) + b_conv2)
        h_pool2 = tools.max_pool_2x2(h_conv2)
        self.features = tf.reshape(h_pool2, [-1, 49, 64])

        self.h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])

        W_fc1 = tools.weight_variable([7 * 7 * 64, 1024])
        b_fc1 = tools.bias_variable([1024])
        h_fc1 = tf.nn.relu(tf.matmul(self.h_pool2_flat, W_fc1) + b_fc1)
        self.keep_prob = tf.placeholder(tf.float32)
        h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)
        W_fc2 = tools.weight_variable([1024, 47])
        b_fc2 = tools.bias_variable([47])
        y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2

        y_ = tf.placeholder(tf.float32, [None, 47])

        cross_entropy = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
        self.tf_training_model = tf.train.AdamOptimizer(1e-4).minimize(
            cross_entropy)
        self.tf_test_model = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
        self.tf_accuracy_model = tf.reduce_mean(
            tf.cast(self.tf_test_model, tf.float32))
Exemple #4
0
def ResNet(x, _dropout, is_training):
    ResNet_demo = {
        "layer_41": [{
            "depth": [64, 64, 256],
            "num_class": 3
        }, {
            "depth": [128, 128, 512],
            "num_class": 4
        }, {
            "depth": [256, 256, 1024],
            "num_class": 6
        }],
        "layer_50": [{
            "depth": [64, 64, 256],
            "num_class": 3
        }, {
            "depth": [128, 128, 512],
            "num_class": 4
        }, {
            "depth": [256, 256, 1024],
            "num_class": 6
        }, {
            "depth": [512, 512, 2048],
            "num_class": 3
        }],
        "layer_101": [{
            "depth": [64, 64, 256],
            "num_class": 3
        }, {
            "depth": [128, 128, 512],
            "num_class": 4
        }, {
            "depth": [256, 256, 1024],
            "num_class": 23
        }, {
            "depth": [512, 512, 2048],
            "num_class": 3
        }],
        "layer_152": [{
            "depth": [64, 64, 256],
            "num_class": 3
        }, {
            "depth": [128, 128, 512],
            "num_class": 8
        }, {
            "depth": [256, 256, 1024],
            "num_class": 36
        }, {
            "depth": [512, 512, 2048],
            "num_class": 3
        }]
    }
    Res_demo = ResNet_demo["layer_41"]
    layers = []

    # scale1
    with tf.variable_scope('scale1'):
        conv1 = tools.conv2d(x, [7, 7], 64, 2, is_training, False, True, True)
    with tf.variable_scope('pool1'):
        pool1 = tf.nn.max_pool(conv1,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding="SAME")
    layers.append(pool1)

    # scale2,scale3,scale4,scale5
    for k in range(3):
        with tf.variable_scope('scale{}'.format(k + 2)):
            for i in range(Res_demo[k]["num_class"]):
                with tf.variable_scope('block{}'.format(i + 1)):
                    conv_layer = tools.residual_block(layers[-1],
                                                      Res_demo[k]["depth"],
                                                      is_training,
                                                      first_block=(i == 0),
                                                      first_stage=(k == 0))
                layers.append(conv_layer)

    fea = tf.reduce_mean(layers[-1], [1, 2])
    '''
    output = tf.layers.dense(inputs=fc, units=4, activation=None, name='class_map')
    output = tf.stop_gradient(output)
    '''
    '''
    in_dim = fc.get_shape().as_list()[-1]
    fc1 = tf.layers.dense(inputs=fc, units=in_dim, activation=tf.nn.sigmoid, name='m1')
    y = tf.layers.dense(inputs=fc1, units=4, activation=None, name='m2')
    '''
    fea_con = head_mapping(fea)

    return fea, fea_con
Exemple #5
0
#from tensorflow.python import debug as tf_debug
import tools as tools

d = 784
x = tf.placeholder(tf.float32, [d, d], "x")
x2 = tf.placeholder(tf.float32, [d, d], "x2")
# Define loss and optimizer

s, u, v = tf.svd(x, full_matrices=True, compute_uv=True, name="svd")

W_conv1 = tools.weight_variable([5, 5, 1, 32], "w1")
b_conv1 = tools.bias_variable([32], "b1")
#
x_image = tf.reshape(u, [-1, 28, 28, 1])
#
h_conv1 = tf.nn.relu(tools.conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = tools.max_pool_2x2(h_conv1)

W_conv2 = tools.weight_variable([5, 5, 32, 16], "w1")
b_conv2 = tools.bias_variable([16], "b1")
#
h_conv2 = tf.nn.relu(tools.conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = tools.max_pool_2x2(h_conv2)

print("hconv1", h_pool2.shape)
h_pool2_flat = tf.reshape(h_pool2, [d, 7 * 7 * 16])

print("hflat1", h_pool2_flat.shape)
# h_pool1 = tools.max_pool_2x2(h_conv1)
#
# W_conv2 = tools.weight_variable([5, 5, 32, 64], "w2")
Exemple #6
0
def vgg16(x, CLASS_NUM, _dropout, is_training):
    with tf.variable_scope('layer1_1'):
        conv1_1 = tools.conv2d(x, [3, 3], 64, 1, is_training, True, True, True)
    with tf.variable_scope('layer1_2'):
        conv1_2 = tools.conv2d(conv1_1, [3, 3], 64, 1, is_training, True, True,
                               True)
    with tf.variable_scope('pool1'):
        pool1 = tf.nn.max_pool(conv1_2,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding="SAME")

    with tf.variable_scope('layer2_1'):
        conv2_1 = tools.conv2d(pool1, [3, 3], 128, 1, is_training, True, True,
                               True)
    with tf.variable_scope('layer2_2'):
        conv2_2 = tools.conv2d(conv2_1, [3, 3], 128, 1, is_training, True,
                               True, True)
    with tf.variable_scope('pool2'):
        pool2 = tf.nn.max_pool(conv2_2,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding="SAME")

    with tf.variable_scope('layer3_1'):
        conv3_1 = tools.conv2d(pool2, [3, 3], 256, 1, is_training, True, True,
                               True)
    with tf.variable_scope('layer3_2'):
        conv3_2 = tools.conv2d(conv3_1, [3, 3], 256, 1, is_training, True,
                               True, True)
    with tf.variable_scope('layer3_3'):
        conv3_3 = tools.conv2d(conv3_2, [3, 3], 256, 1, is_training, True,
                               True, True)
    with tf.variable_scope('pool3'):
        pool3 = tf.nn.max_pool(conv3_3,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding="SAME")

    with tf.variable_scope('layer4_1'):
        conv4_1 = tools.conv2d(pool3, [3, 3], 512, 1, is_training, True, True,
                               True)
    with tf.variable_scope('layer4_2'):
        conv4_2 = tools.conv2d(conv4_1, [3, 3], 512, 1, is_training, True,
                               True, True)
    with tf.variable_scope('layer4_3'):
        conv4_3 = tools.conv2d(conv4_2, [3, 3], 512, 1, is_training, True,
                               True, True)
    with tf.variable_scope('pool4'):
        pool4 = tf.nn.max_pool(conv4_3,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding="SAME")

    with tf.variable_scope('layer5_1'):
        conv5_1 = tools.conv2d(pool4, [3, 3], 512, 1, is_training, True, False,
                               True)
    with tf.variable_scope('layer5_2'):
        conv5_2 = tools.conv2d(conv5_1, [3, 3], 512, 1, is_training, True,
                               False, True)
    with tf.variable_scope('layer5_3'):
        conv5_3 = tools.conv2d(conv5_2, [3, 3], 512, 1, is_training, True,
                               False, True)

    fmp_3 = conv(conv3_3,
                 kernel_size=[1, 1],
                 out_channels=256,
                 stride=[1, 1, 1, 1],
                 is_pretrain=_training,
                 bias=False,
                 bn=False,
                 layer_name='conv_3')
    fmp_3 = tf.image.resize_bilinear(fmp_3, [56, 56])
    fmp_4 = conv(conv4_3,
                 kernel_size=[1, 1],
                 out_channels=256,
                 stride=[1, 1, 1, 1],
                 is_pretrain=_training,
                 bias=False,
                 bn=False,
                 layer_name='conv_4')
    fmp_4 = tf.image.resize_bilinear(fmp_4, [56, 56])
    fmp_5 = conv(conv5_3,
                 kernel_size=[1, 1],
                 out_channels=256,
                 stride=[1, 1, 1, 1],
                 is_pretrain=_training,
                 bias=False,
                 bn=False,
                 layer_name='conv_5')
    fmp_5 = tf.image.resize_bilinear(fmp_5, [56, 56])
    fmp = tf.concat([fmp_3, fmp_4, fmp_5], -1)
    with tf.variable_scope('dilation'):
        fmp_dil_1 = dil_conv(fmp,
                             kernel_size=[3, 3],
                             out_channels=256,
                             rate=1,
                             is_pretrain=_training,
                             bias=False,
                             bn=False,
                             layer_name='dilation1')
        fmp_dil_2 = dil_conv(fmp,
                             kernel_size=[3, 3],
                             out_channels=256,
                             rate=2,
                             is_pretrain=_training,
                             bias=False,
                             bn=False,
                             layer_name='dilation2')
        fmp_dil_3 = dil_conv(fmp,
                             kernel_size=[3, 3],
                             out_channels=256,
                             rate=4,
                             is_pretrain=_training,
                             bias=False,
                             bn=False,
                             layer_name='dilation3')
        fmp_dil_4 = dil_conv(fmp,
                             kernel_size=[3, 3],
                             out_channels=256,
                             rate=8,
                             is_pretrain=_training,
                             bias=False,
                             bn=False,
                             layer_name='dilation4')
        fmp_dilation = tf.concat([fmp_dil_1, fmp_dil_2, fmp_dil_3, fmp_dil_4],
                                 -1)
        fmp = tools.conv(fmp_dilation,
                         kernel_size=[1, 1],
                         out_channels=512,
                         stride=[1, 1, 1, 1],
                         is_pretrain=_training,
                         bias=False,
                         bn=False,
                         layer_name='conv_dilation')

    gap = tf.reduce_mean(fmp, [1, 2])

    with tf.variable_scope('CAM_fc'):
        cam_w = tf.get_variable(
            'CAM_W',
            shape=[512, CLASS_NUM],
            initializer=tf.contrib.layers.xavier_initializer(0.0))

    output = tf.matmul(gap, cam_w)

    annotation_pred = tf.argmax(output, axis=-1)

    fmp = tf.image.resize_bilinear(fmp, [224, 224])

    return annotation_pred, output, fmp
Exemple #7
0
def ResNet(x, _dropout, is_training):
    ResNet_demo = {
        "layer_41": [{
            "depth": [64, 64, 256],
            "num_class": 3
        }, {
            "depth": [128, 128, 512],
            "num_class": 4
        }, {
            "depth": [256, 256, 1024],
            "num_class": 6
        }],
        "layer_50": [{
            "depth": [64, 64, 256],
            "num_class": 3
        }, {
            "depth": [128, 128, 512],
            "num_class": 4
        }, {
            "depth": [256, 256, 1024],
            "num_class": 6
        }, {
            "depth": [512, 512, 2048],
            "num_class": 3
        }],
        "layer_101": [{
            "depth": [64, 64, 256],
            "num_class": 3
        }, {
            "depth": [128, 128, 512],
            "num_class": 4
        }, {
            "depth": [256, 256, 1024],
            "num_class": 23
        }, {
            "depth": [512, 512, 2048],
            "num_class": 3
        }],
        "layer_152": [{
            "depth": [64, 64, 256],
            "num_class": 3
        }, {
            "depth": [128, 128, 512],
            "num_class": 8
        }, {
            "depth": [256, 256, 1024],
            "num_class": 36
        }, {
            "depth": [512, 512, 2048],
            "num_class": 3
        }]
    }
    Res_demo = ResNet_demo["layer_41"]
    layers = []

    # scale1
    with tf.variable_scope('scale1'):
        conv1 = tools.conv2d(x, [7, 7], 64, 2, is_training, False, True, True)
    with tf.variable_scope('pool1'):
        pool1 = tf.nn.max_pool(conv1,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding="SAME")
    layers.append(pool1)

    # scale2,scale3,scale4,scale5
    for k in range(3):
        with tf.variable_scope('scale{}'.format(k + 2)):
            for i in range(Res_demo[k]["num_class"]):
                with tf.variable_scope('block{}'.format(i + 1)):
                    conv_layer = tools.residual_block(layers[-1],
                                                      Res_demo[k]["depth"],
                                                      is_training,
                                                      first_block=(i == 0),
                                                      first_stage=(k == 0))
                layers.append(conv_layer)

    fc = tf.reduce_mean(layers[-1], [1, 2])

    with tf.variable_scope('CAM_fc'):
        cam_w = tf.get_variable(
            'CAM_W',
            shape=[fc.get_shape().as_list()[-1], 2],
            initializer=tf.contrib.layers.xavier_initializer(0.0))
    output = tf.matmul(fc, cam_w)

    return output
    def train_one_lda_norm(self):
        def weight_variable(shape, name=None):
            initial = tf.truncated_normal(shape, stddev=0.1)
            return tf.Variable(initial, name=name)

        def bias_variable(shape, name=None):
            initial = tf.constant(0.1, shape=shape)
            return tf.Variable(initial, name=name)

        feature_size = 7 * 7 * 64
        batch_size = 60
        rank = 100
        class_size = 10

        saver = tf.train.Saver()

        self.logger.info("Creating tf training model with angle separation")

        x = tf.placeholder(tf.float32, [None, 784], "x")
        wrong_norm_ref = tf.placeholder(tf.float32)
        keep_prob = tf.placeholder(tf.float32)
        y_ = tf.placeholder(tf.int32, [None])
        pro_input = tf.placeholder(tf.float32, [None, feature_size])

        x_image = tf.reshape(x, [-1, 28, 28, 1])

        W_conv1 = tools.weight_variable([3, 3, 1, 32])
        b_conv1 = tools.bias_variable([32])

        h_conv1 = tf.nn.relu(tools.conv2d(x_image, W_conv1) + b_conv1)
        h_pool1 = tools.max_pool(h_conv1)

        W_conv2 = tools.weight_variable([3, 3, 32, 64])
        b_conv2 = tools.bias_variable([64])

        h_conv2 = tf.nn.relu(tools.conv2d(h_pool1, W_conv2) + b_conv2)
        h_pool2 = tools.max_pool(h_conv2)

        features = tf.reshape(h_pool2, [-1, 7 * 7 * 64])

        #features = deeplda_mnist.create_norm_network(x, keep_prob)

        features_norm_tra = tf.transpose(tf.nn.l2_normalize(features, axis=1))

        norm = tf.transpose(features_norm_tra -
                            tf.matmul(pro_input, features_norm_tra))

        fcc_weights = {
            'W_fc1': weight_variable([feature_size, 256]),
            'b_fc1': bias_variable([256]),
            'W_fc2': weight_variable([256, 1]),
            'b_fc2': bias_variable([1])
        }
        norm_weighted = tf.nn.relu(
            tf.matmul(norm, fcc_weights['W_fc1']) + fcc_weights['b_fc1'])
        norm_weighted_mean = tf.reduce_sum(norm_weighted, axis=1)
        #norm_summed = tf.nn.relu(tf.matmul(norm_weighted, fcc_weights['W_fc2']) + fcc_weights['b_fc2'])
        #
        # #norm_minimized = norm
        #

        #wrong_norm = wrong_norm_ref-norm_weighted_mean
        wrong_norm = 1 / norm_weighted_mean
        #
        norm_minimize = tf.where(tf.greater(y_, 0), norm_weighted_mean,
                                 wrong_norm)

        tf_training_model = tf.train.AdamOptimizer(1e-4).minimize(
            norm_minimize)

        input_set = np.concatenate([
            np.concatenate(
                (self.image_clustered_with_gt[number_to_class[data]],
                 self.clustered_test[data])) for data in range(class_size)
        ],
                                   axis=0)
        labels = [np.ones(2800), np.zeros(2800 * 9)]
        label_set = np.concatenate(labels)

        # input_test = np.concatenate([self.clustered_test[data] for data in range(class_size)], axis=0)
        # label_test_list = [np.zeros((len(self.clustered_test[0]), class_size)) for i in range(class_size)]
        # for i in range(class_size):
        #     label_test_list[i][:, i] = 1
        # label_test = np.concatenate(label_test_list, axis=0)

        dataset = tf.data.Dataset.from_tensor_slices((input_set, label_set))
        dataset = dataset.repeat(20000)
        dataset = dataset.shuffle(buffer_size=10000)
        batched_dataset = dataset.batch(batch_size)
        iterator = batched_dataset.make_initializable_iterator()
        next_element = iterator.get_next()

        with tf.Session() as sess:

            sess.run(tf.global_variables_initializer())
            sess.run(iterator.initializer)
            wrong_norm_value = 1

            for m in range(10000):

                #print("first norm max: {}".format(norm_value[0]))

                # for _ in range(10):
                #     norm_value = sess.run(norm,
                #                    feed_dict={x: self.image_clustered_with_gt[number_to_class[0]][0:2400], pro_input: pro, keep_prob: 1.0})
                #     print("norm value shape {}".format(norm_value.shape))
                #     #print("first norm max: {}".format(norm_value[0]))
                #
                #     norm_value_index = sess.run(norm,
                #                           feed_dict={x: self.image_clustered_with_gt[number_to_class[0]][0:1],
                #                                      pro_input: pro,
                #                                      keep_prob: 1.0})
                #
                #     print("second norm shape: {}".format(norm_value_index.shape))
                #     diff = norm_value[0] - norm_value_index[0]
                #     print("diff: {}".format(diff))
                #     print("diff sum: {}".format(np.sum(diff)))

                max_norm_ref = 1
                # while max_norm_ref > 0.05:

                feature_matrix = np.transpose(
                    sess.run(
                        features,
                        feed_dict={
                            x:
                            self.image_clustered_with_gt[number_to_class[0]],
                            keep_prob: 1.0
                        }))

                feature_matrix = feature_matrix / np.linalg.norm(
                    feature_matrix)
                u, s, v = svds(feature_matrix, rank)
                pro = np.matmul(u, np.transpose(u))

                # for k in range(40):
                #     sess.run(tf_training_model,
                #              feed_dict={x: self.image_clustered_with_gt[number_to_class[0]][k * 60:60 * (k + 1)],
                #                         y_: np.ones(60), pro_input: pro,
                #                         keep_prob: 1.0, wrong_norm_ref: wrong_norm_value})

                for p in range(10):
                    print("traning {}".format(p))
                    for k in range(400):
                        batch_xs, batch_ys = sess.run(next_element)
                        sess.run(tf_training_model,
                                 feed_dict={
                                     x: batch_xs,
                                     y_: batch_ys,
                                     pro_input: pro,
                                     keep_prob: 1.0,
                                     wrong_norm_ref: wrong_norm_value
                                 })
                norm_value = sess.run(norm_weighted_mean,
                                      feed_dict={
                                          x:
                                          self.image_clustered_with_gt[
                                              number_to_class[0]][0:2800],
                                          pro_input:
                                          pro,
                                          keep_prob:
                                          1.0
                                      })
                max_norm_ref = np.max(norm_value)
                print("max {}".format(max_norm_ref))

                print("testing")
                norm_value = sess.run(norm_weighted_mean,
                                      feed_dict={
                                          x:
                                          self.image_clustered_with_gt[
                                              number_to_class[0]][0:2800],
                                          pro_input:
                                          pro,
                                          keep_prob:
                                          1.0
                                      })
                if np.max(norm_value) != 0:
                    max_norm_ref = np.max(norm_value)
                print("True norm max {}".format(max_norm_ref))
                wrong_norm_value = 0
                wrong_min = 1
                total_norm_diff = 0
                achieved_count = 0
                for j in range(0, 10):
                    if j != 0:
                        norm_value = sess.run(norm_weighted_mean,
                                              feed_dict={
                                                  x:
                                                  self.image_clustered_with_gt[
                                                      number_to_class[j]],
                                                  pro_input:
                                                  pro,
                                                  keep_prob:
                                                  1.0
                                              })

                        wrong_norm_value = max(wrong_norm_value,
                                               np.max(norm_value))
                        wrong_min = min(wrong_min, np.min(norm_value))
                        diff_value = np.min(norm_value) - max_norm_ref
                        if (diff_value >= 0.0):
                            achieved_count += 1
                        total_norm_diff += diff_value
                        print("{} norm min {}".format(j, diff_value))

                #wrong_norm_value *= 1.5
                #print("wrong norm ref: {}".format(wrong_norm_value))
                norm_ref = (max_norm_ref + wrong_min) / 2
                print("norm ref: {}".format(norm_ref))
                print("total norm min: {}".format(total_norm_diff))
                print("achieved count : {}".format(achieved_count))
                if achieved_count > 7:
                    correct = 0
                    incorrect = 0
                    for q in range(10):
                        test_max = 0
                        for t in range(400):
                            norm_value = sess.run(
                                norm_weighted_mean,
                                feed_dict={
                                    x: self.clustered_test[q][t:t + 1],
                                    pro_input: pro,
                                    keep_prob: 1.0
                                })
                            if q == 0:
                                test_max = max(test_max, norm_value)
                                if norm_value < norm_ref:
                                    correct += 1
                                else:
                                    incorrect += 1
                            else:
                                if norm_value > norm_ref:
                                    correct += 1
                                else:
                                    incorrect += 1

                        print("checking {}, correct {}, incorrect {}".format(
                            q, correct, incorrect))
                        print(test_max)
                    print("accucacy: {}".format(correct /
                                                (correct + incorrect)))