Пример #1
0
def siamese_vlad(x_sat, x_grd, keep_prob):
    with tf.device('/gpu:0'):
        with tf.variable_scope('vgg_grd'):
            vgg_grd = Vgg16()
            grd_local = vgg_grd.build(x_grd)

        with tf.variable_scope('netvlad_grd'):
            netvlad_grd = lp.NetVLAD(feature_size=512,
                                     max_samples=tf.shape(grd_local)[1] *
                                     tf.shape(grd_local)[2],
                                     cluster_size=64,
                                     output_dim=4096,
                                     gating=True,
                                     add_batch_norm=False,
                                     is_training=True)
            grd_vlad = netvlad_grd.forward(grd_local)
        with tf.variable_scope('vgg_sat'):
            vgg_sat = Vgg16()
            sat_local = vgg_sat.build(x_sat)

        with tf.variable_scope('netvlad_sat'):
            netvlad_sat = lp.NetVLAD(feature_size=512,
                                     max_samples=tf.shape(sat_local)[1] *
                                     tf.shape(sat_local)[2],
                                     cluster_size=64,
                                     output_dim=4096,
                                     gating=True,
                                     add_batch_norm=False,
                                     is_training=True)
            sat_vlad = netvlad_sat.forward(sat_local)

    with tf.device('/gpu:1'):
        sat_global, grd_global = siamese_fc(sat_vlad, grd_vlad,
                                            'dim_reduction', keep_prob)
    return sat_global, grd_global, sat_local, grd_local
def pointnetvlad(point_cloud,trans_mat,is_training, bn_decay=None):
    """PointNetVLAD,    INPUT is batch_num_queries X num_pointclouds_per_query X num_points_per_pointcloud X 3, 
                        OUTPUT batch_num_queries X num_pointclouds_per_query X output_dim """
    num_points = point_cloud.get_shape()[1].value
    CLUSTER_SIZE=64
    OUTPUT_DIM=1000
    
    with tf.variable_scope('transform_net1') as sc:
        input_transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, input_transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        feature_transform = feature_transform_net(net, is_training, bn_decay, K=64)
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), feature_transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)

    NetVLAD = lp.NetVLAD(feature_size=1024, max_samples=num_points, cluster_size=CLUSTER_SIZE, 
                    output_dim=OUTPUT_DIM, gating=True, add_batch_norm=True,
                    is_training=is_training)

    print(trans_mat)
    print(tf.reshape(net,[-1,num_points,1024]))
    pc_trans_feat = tf.matmul(trans_mat,tf.reshape(net,[-1,num_points,1024]))
    print(pc_trans_feat)
    pc_trans_feat = tf.reshape(pc_trans_feat,[-1,8,10,1024])
    print(pc_trans_feat)
    net= tf.reshape(net,[-1,1024])

    net = tf.nn.l2_normalize(net,1)
    output = NetVLAD.forward(net)
    print(output)

    #normalize to have norm 1
    output = tf.nn.l2_normalize(output,1)
    output =  tf.reshape(output,[-1,OUTPUT_DIM])

    return output,pc_trans_feat
Пример #3
0
def cvm_net_I(x_sat, x_grd, coords_geo, keep_prob, trainable):
    with tf.device('/gpu:1'):
        # local descriptors / local feature extraction; use only the convolution layers (until conv_5) of vgg for this
        vgg_grd = VGG16()
        # output of conv_5 layer; the input dimension and output dimension of conv2 layer (512 x 512)
        grd_local = vgg_grd.VGG16_conv(x_grd, keep_prob, False, 'VGG_grd')
        with tf.variable_scope('netvlad_grd', reuse=tf.AUTO_REUSE):
            # embed netvlad on each cnn branch to get global descriptor

            # constructor!
            # feature size is the depth of the final convolution layer
            # grd local
            netvlad_grd = lp.NetVLAD(feature_size=512,
                                     max_samples=tf.shape(grd_local)[1] *
                                     tf.shape(grd_local)[2],
                                     cluster_size=64,
                                     output_dim=4096,
                                     gating=True,
                                     add_batch_norm=False,
                                     is_training=trainable)
            # forward prop; give input of conv_5 layer
            # output is the batch*(vlad vector) i.e 12 * [(K*D) x 1] (64*512) x 1
            grd_vlad = netvlad_grd.forward(grd_local)

        vgg_sat = VGG16()
        # local satellite descriptors ;
        sat_local = vgg_sat.VGG16_conv(x_sat, keep_prob, False, 'VGG_sat')
        with tf.variable_scope('netvlad_sat', reuse=tf.AUTO_REUSE):
            # shape of sat_local is batch_sz x 14 x 14 x 512 (i guess)
            # so max samples is 14*14 = 196? is this equal to N?
            # is cluster size equal to K or N?
            netvlad_sat = lp.NetVLAD(feature_size=512,
                                     max_samples=tf.shape(sat_local)[1] *
                                     tf.shape(sat_local)[2],
                                     cluster_size=64,
                                     output_dim=4096,
                                     gating=True,
                                     add_batch_norm=False,
                                     is_training=trainable)
            sat_vlad = netvlad_sat.forward(sat_local)

    with tf.device('/gpu:0'):
        fc = Siamese_FC()
        sat_global, grd_global = fc.siamese_fc(sat_vlad, grd_vlad, trainable,
                                               'dim_reduction')
        # grd_global = fc.new_fc_layer(sat_global, coords_geo)
    return sat_global, grd_global
Пример #4
0
def cvm_net_II(x_sat, x_grd, keep_prob, trainable):
    with tf.device('/gpu:1'):
        vgg_grd = VGG16()
        grd_local = vgg_grd.VGG16_conv(x_grd, keep_prob, trainable, 'VGG_grd')

        vgg_sat = VGG16()
        sat_local = vgg_sat.VGG16_conv(x_sat, keep_prob, trainable, 'VGG_sat')

        transnet = TransNet()
        trans_sat, trans_grd = transnet.transform(sat_local, grd_local,
                                                  keep_prob, trainable,
                                                  'transformation')

        with tf.variable_scope('netvlad') as scope:
            netvlad_sat = lp.NetVLAD(feature_size=512,
                                     max_samples=tf.shape(trans_sat)[1] *
                                     tf.shape(trans_sat)[2],
                                     cluster_size=64,
                                     output_dim=4096,
                                     gating=True,
                                     add_batch_norm=False,
                                     is_training=trainable)
            sat_global = netvlad_sat.forward(trans_sat, True)

            scope.reuse_variables()

            netvlad_grd = lp.NetVLAD(feature_size=512,
                                     max_samples=tf.shape(trans_grd)[1] *
                                     tf.shape(trans_grd)[2],
                                     cluster_size=64,
                                     output_dim=4096,
                                     gating=True,
                                     add_batch_norm=False,
                                     is_training=trainable)
            grd_global = netvlad_grd.forward(trans_grd, True)

    return sat_global, grd_global
Пример #5
0
def RNN(tensor_input, weights, biases):
    if opt.model == 'NetVLAD':
        NetVLAD = lp.NetVLAD(feature_size=opt.in_features,
                             max_samples=opt.seq_len,
                             cluster_size=opt.cluster_size,
                             output_dim=opt.output_dim,
                             gating=True,
                             add_batch_norm=True,
                             is_training=True)
    if opt.model == 'NetRVLAD':
        NetVLAD = lp.NetRVLAD(feature_size=opt.in_features,
                              max_samples=opt.seq_len,
                              cluster_size=opt.cluster_size,
                              output_dim=opt.output_dim,
                              gating=True,
                              add_batch_norm=True,
                              is_training=True)
    if opt.model == 'SoftDBoW':
        NetVLAD = lp.SoftDBoW(feature_size=opt.in_features,
                              max_samples=opt.seq_len,
                              cluster_size=opt.cluster_size,
                              output_dim=opt.output_dim,
                              gating=True,
                              add_batch_norm=True,
                              is_training=True)
    if opt.model == 'NetFV':
        NetVLAD = lp.NetFV(feature_size=opt.in_features,
                           max_samples=opt.seq_len,
                           cluster_size=opt.cluster_size,
                           output_dim=opt.output_dim,
                           gating=True,
                           add_batch_norm=True,
                           is_training=True)
    reshaped_input = tf.reshape(tensor_input, [-1, opt.in_features])
    tensor_output = NetVLAD.forward(reshaped_input)
    results = tf.matmul(
        tensor_output,
        weights['out']) + biases['out']  # [batch_size, n_classes]
    return results
Пример #6
0
def forward(point_cloud, is_training, bn_decay=None):
    """LPD-Net:FNSF,    INPUT is batch_num_queries X num_pointclouds_per_query X num_points_per_pointcloud X 13,
                        OUTPUT batch_num_queries X num_pointclouds_per_query X output_dim """
    batch_num_queries = point_cloud.get_shape()[0].value
    num_pointclouds_per_query = point_cloud.get_shape()[1].value
    num_points = point_cloud.get_shape()[2].value
    CLUSTER_SIZE=64
    OUTPUT_DIM=256
    k=20
    point_cloud = tf.reshape(point_cloud, [batch_num_queries*num_pointclouds_per_query, num_points,13])

    point_cloud, feature_cloud = tf.split(point_cloud, [3,10], 2)

    with tf.variable_scope('transform_net1') as sc:
        input_transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, input_transform)

    # Neural Network to learn neighborhood features
    # feature_cloud = neural_feature_net(point_cloud, is_training, bn_decay, knn_k=20, F=10)

    point_cloud_input = tf.concat([point_cloud_transformed, feature_cloud], 2)

    point_cloud_input = tf.expand_dims(point_cloud_input, -1)

    net = tf_util.conv2d(point_cloud_input, 64, [1, 13],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1, 1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        feature_transform = feature_transform_net(net, is_training, bn_decay, K=64)
    feature_transform = tf.matmul(tf.squeeze(net, axis=[2]), feature_transform)

    # Serial structure
    # Danymic Graph cnn for feature space
    with tf.variable_scope('DGfeature') as sc:
        adj_matrix = tf_util.pairwise_distance(feature_transform)
        nn_idx = tf_util.knn(adj_matrix, k=k)
        edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

        net = tf_util.conv2d(edge_feature, 64, [1, 1],
                             padding='VALID', stride=[1, 1],
                             bn=True, is_training=is_training,
                             scope='dgmlp1', bn_decay=bn_decay)
        net = tf_util.conv2d(net, 64, [1, 1],
                             padding='VALID', stride=[1, 1],
                             bn=True, is_training=is_training,
                             scope='dgmlp2', bn_decay=bn_decay)
        net = tf.reduce_max(net, axis=-2, keep_dims=True)

    # Spatial Neighborhood fusion for cartesian space
    with tf.variable_scope('SNfeature') as sc:
        adj_matrix = tf_util.pairwise_distance(point_cloud)
        nn_idx = tf_util.knn(adj_matrix, k=k)

        idx_ = tf.range(batch_num_queries*num_pointclouds_per_query) * num_points
        idx_ = tf.reshape(idx_, [batch_num_queries*num_pointclouds_per_query, 1, 1])

        feature_cloud = tf.reshape(net, [-1, 64])
        edge_feature = tf.gather(feature_cloud, nn_idx+idx_)

        net = tf_util.conv2d(edge_feature, 64, [1, 1],
                             padding='VALID', stride=[1, 1],
                             bn=True, is_training=is_training,
                             scope='snmlp1', bn_decay=bn_decay)
        net = tf_util.conv2d(net, 64, [1, 1],
                             padding='VALID', stride=[1, 1],
                             bn=True, is_training=is_training,
                             scope='snmlp2', bn_decay=bn_decay)
        net = tf.reduce_max(net, axis=-2, keep_dims=True)

    # MLP for fusion
    net = tf_util.conv2d(net, 64, [1, 1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1, 1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1, 1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)
    point_wise_feature = net

    NetVLAD = lp.NetVLAD(feature_size=1024, max_samples=num_points, cluster_size=CLUSTER_SIZE, 
                    output_dim=OUTPUT_DIM, gating=True, add_batch_norm=True,
                    is_training=is_training)

    net= tf.reshape(net,[-1,1024])
    net = tf.nn.l2_normalize(net,1)
    output = NetVLAD.forward(net)
    print(output)

    #normalize to have norm 1
    output = tf.nn.l2_normalize(output,1)
    output =  tf.reshape(output,[batch_num_queries,num_pointclouds_per_query,OUTPUT_DIM])

    return output
Пример #7
0
    def __init__(self, dataset, network_type="CNN", VLAD_k=64, VLAD_gating=True, VLAD_batch_norm=True):
        tf.set_random_seed(1234)

        self.network_type = network_type
        self.VLAD_k = VLAD_k
        self.VLAD_gating = VLAD_gating
        self.VLAD_batch_norm = VLAD_batch_norm
        # define placeholder
        self.input = tf.placeholder(tf.float32, shape=(None, dataset.number_frames_in_window, 512), name="x")
        self.keep_prob = tf.placeholder(tf.float32, name="keep_prob")
        self.learning_rate = tf.placeholder(tf.float32, name="learning_rate")
        self.weights = tf.placeholder(tf.float32, shape=(4), name="weights")
        # self.weights_labels = tf.placeholder(tf.float32, shape=(4), name="weights_labels")
        
        with tf.name_scope('network'):

            x = self.input

            #CNN
            if ("CNN" in network_type.upper()):
                print("Using Convolution Neural Network")
                x = tf.contrib.layers.conv2d(x, num_outputs=128, kernel_size=9, stride=1, padding='SAME')
                x = tf.contrib.layers.flatten(x)
            

            elif ("FC" in network_type.upper()):
                print("Using Fully Connected")
                x = tf.contrib.layers.flatten(x)
                x = tf.contrib.layers.fully_connected(x, dataset.number_frames_in_window*512)


            elif ("MAX" in network_type.upper()):
                print("Using MaxPooling")
                x = tf.layers.max_pooling1d(x, pool_size=120, strides=1, padding='SAME', name="MaxPooling")
                x = tf.contrib.layers.flatten(x)


            elif ("AVERAGE" in network_type.upper()):
                print("Using AveragePooling")
                x = tf.layers.average_pooling1d(x, pool_size=120, strides=1, padding='SAME', name="MaxPooling")
                x = tf.contrib.layers.flatten(x)

            elif ("RVLAD" in network_type.upper()):
                print("Using NetRVLAD")
                NetRVLAD = lp.NetRVLAD(feature_size=512, max_samples=dataset.number_frames_in_window, cluster_size=int(VLAD_k), 
                         output_dim=512, gating=VLAD_gating, add_batch_norm=VLAD_batch_norm,
                         is_training=True)
                x = tf.reshape(x, [-1, 512])
                x = NetRVLAD.forward(x)

            elif ("VLAD" in network_type.upper()):
                print("Using NetVLAD")
                NetVLAD = lp.NetVLAD(feature_size=512, max_samples=dataset.number_frames_in_window, cluster_size=int(VLAD_k), 
                         output_dim=512, gating=VLAD_gating, add_batch_norm=VLAD_batch_norm,
                         is_training=True)
                x = tf.reshape(x, [-1, 512])
                x = NetVLAD.forward(x)

            elif ("SOFTDBOW" in network_type.upper()):
                print("Using SOFTDBOW")
                SOFTDBOW = lp.SoftDBoW(feature_size=512, max_samples=dataset.number_frames_in_window, cluster_size=int(VLAD_k), 
                         output_dim=512, gating=VLAD_gating, add_batch_norm=VLAD_batch_norm,
                         is_training=True)
                x = tf.reshape(x, [-1, 512])
                x = SOFTDBOW.forward(x)

            elif ("NETFV" in network_type.upper()):
                print("Using NETFV")
                NETFV = lp.NetFV(feature_size=512, max_samples=dataset.number_frames_in_window, cluster_size=int(VLAD_k), 
                         output_dim=512, gating=VLAD_gating, add_batch_norm=VLAD_batch_norm,
                         is_training=True)
                x = tf.reshape(x, [-1, 512])
                x = NETFV.forward(x)

            x = tf.nn.dropout(x, self.keep_prob)
            x_output = tf.contrib.layers.fully_connected(x, dataset.num_classes, activation_fn=None)


        with tf.name_scope('logits'):
            self.logits = tf.identity(x_output, name='logits')
            # self.logits_0 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.logits), 0))
            # self.logits_1 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.logits), 1))
            # self.logits_2 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.logits), 2))
            # self.logits_3 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.logits), 3))
            # self.logits_0 = tf.transpose(tf.transpose(self.logits)[0])
            # self.logits_1 = tf.transpose(tf.transpose(self.logits)[1])
            # self.logits_2 = tf.transpose(tf.transpose(self.logits)[2])
            # self.logits_3 = tf.transpose(tf.transpose(self.logits)[3])
            # self.weighted_logits = self.weights_logits * self.logits
            # self.weighted_logits_0 = tf.transpose(tf.transpose(self.weighted_logits)[0])
            # self.weighted_logits_1 = tf.transpose(tf.transpose(self.weighted_logits)[1])
            # self.weighted_logits_2 = tf.transpose(tf.transpose(self.weighted_logits)[2])
            # self.weighted_logits_3 = tf.transpose(tf.transpose(self.weighted_logits)[3])
        # dataset.updateResults(self.logits)


        # self.weights = tf.constant(list((dataset.size_batch*1.0)/(dataset.count_labels+1.0)))
        # self.logits_weighted = self.class_weights * self.logits 
        # with tf.name_scope('predictions'):
        with tf.name_scope('predictions'):
            self.predictions = tf.nn.sigmoid(self.logits, name='predictions')
            # self.predictions_0 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.predictions), 0))
            # self.predictions_1 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.predictions), 1))
            # self.predictions_2 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.predictions), 2))
            # self.predictions_3 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.predictions), 3))
            self.predictions_0 = tf.transpose(tf.transpose(self.predictions)[0])
            self.predictions_1 = tf.transpose(tf.transpose(self.predictions)[1])
            self.predictions_2 = tf.transpose(tf.transpose(self.predictions)[2])
            self.predictions_3 = tf.transpose(tf.transpose(self.predictions)[3])

            # self.weighted_predictions = tf.nn.sigmoid(self.weighted_logits, name='weighted_predictions')

            # self.weighted_predictions_0 = tf.transpose(tf.transpose(self.weighted_predictions)[0])
            # self.weighted_predictions_1 = tf.transpose(tf.transpose(self.weighted_predictions)[1])
            # self.weighted_predictions_2 = tf.transpose(tf.transpose(self.weighted_predictions)[2])
            # self.weighted_predictions_3 = tf.transpose(tf.transpose(self.weighted_predictions)[3])
            # self.accuracy_tf = tf.contrib.metrics.streaming_accuracy(self.predictions, self.labels)
        # self.predictions_ = tf.argmax(self.logits, 1)
            # tf.summary.histogram("predictions", self.predictions)


        with tf.name_scope('labels'):
            self.labels = tf.placeholder(tf.float32, shape=(None, 4), name="y")
            # self.labels_0 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.labels), 0))
            # self.labels_1 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.labels), 1))
            # self.labels_2 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.labels), 2))
            # self.labels_3 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.labels), 3))
            self.labels_0 = tf.transpose(tf.transpose(self.labels)[0])
            self.labels_1 = tf.transpose(tf.transpose(self.labels)[1])
            self.labels_2 = tf.transpose(tf.transpose(self.labels)[2])
            self.labels_3 = tf.transpose(tf.transpose(self.labels)[3])

            # self.weighted_labels = self.weights_labels*self.labels
            # self.weighted_labels_0 = tf.transpose(tf.transpose(self.weighted_labels)[0])
            # self.weighted_labels_1 = tf.transpose(tf.transpose(self.weighted_labels)[1])
            # self.weighted_labels_2 = tf.transpose(tf.transpose(self.weighted_labels)[2])
            # self.weighted_labels_3 = tf.transpose(tf.transpose(self.weighted_labels)[3])



        # with tf.name_scope('weights'):
        #     # self.labels = tf.placeholder(tf.float32, shape=(None, 4), name="y")
        #     # self.weights_logits_0 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.weights_logits), 0))
        #     # self.weights_logits_1 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.weights_logits), 1))
        #     # self.weights_logits_2 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.weights_logits), 2))
        #     # self.weights_logits_3 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.weights_logits), 3))
        #     self.weights_logits_0 = tf.transpose(tf.transpose(self.se)[0])
        #     self.weights_logits_1 = tf.transpose(tf.transpose(self.weights_logits)[1])
        #     self.weights_logits_2 = tf.transpose(tf.transpose(self.weights_logits)[2])
        #     self.weights_logits_3 = tf.transpose(tf.transpose(self.weights_logits)[3])

        #     # self.weights_labels_0 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.weights_labels), 0))
        #     # self.weights_labels_1 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.weights_labels), 1))
        #     # self.weights_labels_2 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.weights_labels), 2))
        #     # self.weights_labels_3 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.weights_labels), 3))
        #     self.weights_labels_0 = tf.transpose(tf.transpose(self.weights_labels)[0])
        #     self.weights_labels_1 = tf.transpose(tf.transpose(self.weights_labels)[1])
        #     self.weights_labels_2 = tf.transpose(tf.transpose(self.weights_labels)[2])
        #     self.weights_labels_3 = tf.transpose(tf.transpose(self.weights_labels)[3])
        # #     self.labels_1 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.labels), 1))
        # #     self.labels_2 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.labels), 2))
        # #     self.labels_3 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(self.labels), 3))




        with tf.name_scope('cost'):
            # https://stackoverflow.com/questions/35155655/loss-function-for-class-imbalanced-binary-classifier-in-tensor-flow
            # self.cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.weights_logits*self.logits, labels=self.weights_labels*self.labels) 
            # self.cross_entropy = self.weights_labels_0
            # self.cross_entropy_0 = -self.weighted_labels_0 * tf.log(self.weighted_predictions_0)/4
            # self.cross_entropy_1 = -self.weighted_labels_1 * tf.log(self.weighted_predictions_1)/4
            # self.cross_entropy_2 = -self.weighted_labels_2 * tf.log(self.weighted_predictions_2)/4
            # self.cross_entropy_3 = -self.weighted_labels_3 * tf.log(self.weighted_predictions_3)/4
            # self.cross_entropy = self.cross_entropy_0 + self.cross_entropy_1 + self.cross_entropy_2 + self.cross_entropy_3
            # self.cross_entropy = - tf.reduce_sum( ( (self.weighted_labels*tf.log(self.weighted_predictions + 1e-9)) + ((1-self.weighted_labels) * tf.log(1 - self.weighted_predictions + 1e-9)) )  , name='xentropy' )   
            self.cross_entropies = tf.nn.weighted_cross_entropy_with_logits(logits = self.logits, 
                                                                            targets = self.labels,
                                                                            pos_weight = self.weights)
            self.cross_entropy = tf.reduce_sum(self.cross_entropies, axis=1)

            # self.cross_entropy_test1 = tf.reduce_sum(
            #     tf.nn.weighted_cross_entropy_with_logits(logits = self.logits, 
            #                                             targets = self.weights_labels * self.labels,
            #                                             pos_weight = self.weights_logits))
            # self.cross_entropy_test2 = tf.reduce_sum(
            #     tf.nn.weighted_cross_entropy_with_logits(logits = self.logits, 
            #                                             targets = self.labels,
            #                                             pos_weight = self.weights_labels))           
            # self.cross_entropy_test3 = tf.reduce_sum(
            #     tf.nn.weighted_cross_entropy_with_logits(logits = self.logits, 
            #                                             targets = self.labels,
            #                                             pos_weight = self.weights_logits * self.weights_labels))
            # self.cross_entropy_test4 = tf.reduce_sum(
            #     tf.nn.weighted_cross_entropy_with_logits(logits = self.logits, 
            #                                             targets = self.labels,
            #                                             pos_weight = self.weights_labels))


            # .reduce_sum( ( (self.weighted_logits*tf.log(self.weighted_predictions + 1e-9)) + ((1-self.weighted_logits) * tf.log(1 - self.weighted_predictions + 1e-9)) )  , name='xentropy' )    

             # + \
             #                     -self.weighted_labels_1 * tf.log(self.weighted_predictions_1)/4 + \
             #                     -self.weighted_labels_2 * tf.log(self.weighted_predictions_2)/4 + \
             #                     -self.weighted_labels_3 * tf.log(self.weighted_predictions_3)/4

            # self.cross_entropy = self.Xent(labels=self.labels, logits=self.logits, weights=self.class_weights)

            self._batch_loss = tf.reduce_mean(self.cross_entropy, name='batch_loss')
            self._loss = tf.Variable(0.0, trainable=False, name='loss')
            self._loss_update = tf.assign(self._loss, self._loss + self._batch_loss, name='loss_update')
            self._reset_loss_op = tf.assign(self._loss, 0.0, name='reset_loss_op')
            # self.loss = tf.reduce_mean(self.cross_entropy, name="loss")  
            
            self.optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(self._batch_loss)
            # self.summary_train_loss = tf.summary.scalar("train_loss", self.cost)
            # self.summary_valid_loss = tf.summary.scalar("valid_loss", self.cost)



        with tf.name_scope('metrics'):

            # self._precision, self._precision_update = tf.metrics.precision(labels=self.labels, predictions=tf.argmax(self.logits, 1), name='precision')
            # self._reset_precision_op = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='precision'))
            # self._precision = tf.Variable(0.0, trainable=False, name='precision')

            # self._recall, self._recall_update = tf.metrics.recall(labels=self.labels, predictions=tf.argmax(self.logits, 1), name='recall')
            # self._reset_recall_op = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='recall'))
            # self._recall = tf.Variable(0.0, trainable=False, name='recall')

            # self._f1_score = 2.0 * self._precision * self._recall / (self._precision + self._recall)

            # with tf.name_scope("accuracy"):
            #     self._accuracy, self._accuracy_update = tf.metrics.accuracy(labels=self.labels, predictions=self.predictions, name='accuracy')
            #     self._reset_accuracy_op = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='accuracy'))

            # self._auc_PR, self._auc_PR_update = tf.metrics.auc(labels=self.labels_0, predictions=self.predictions_0, curve='PR', name='auc_PR', )
            # self._reset_auc_PR_op = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='auc_PR'))

            # with tf.name_scope('count'):
            #     self._TP_0, self._TP_update_0  = tf.metrics.true_positives(labels=self.labels_0,  predictions=self.predictions_0, name="TP_0")
            #     # self._accuracy_0, self._accuracy_update_0 = tf.metrics.accuracy(labels=self.labels_0, predictions=self.predictions_0, name='accuracy_0', )
            #     self._reset_TP_0 = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='TP_0'))


            # ACCURACY
            # with tf.name_scope('accuracy'):
            #     correct_pred = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.labels, 1))
            #     self._accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
            # with tf.name_scope("accuracy"):
            #     self._accuracy_0, self._accuracy_update_0 = tf.metrics.accuracy(labels=self.labels_0, predictions=self.predictions_0, name='accuracy_0', )
            #     self._reset_accuracy_op_0 = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='accuracy_0'))

            #     self._accuracy_1, self._accuracy_update_1 = tf.metrics.accuracy(labels=self.labels_1, predictions=self.predictions_1, name='accuracy_1', )
            #     self._reset_accuracy_op_1 = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='accuracy_1'))

            #     self._accuracy_2, self._accuracy_update_2 = tf.metrics.accuracy(labels=self.labels_2, predictions=self.predictions_2, name='accuracy_2', )
            #     self._reset_accuracy_op_2 = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='accuracy_2'))

            #     self._accuracy_3, self._accuracy_update_3 = tf.metrics.accuracy(labels=self.labels_3, predictions=self.predictions_3, name='accuracy_3', )
            #     self._reset_accuracy_op_3 = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='accuracy_3'))

            #     self._batch_accuracy = tf.reduce_mean([self._accuracy_0, self._accuracy_1, self._accuracy_2, self._accuracy_3], name='batch_accuracy')
            #     self._accuracy = tf.Variable(0.0, trainable=False, name='accuracy')
            #     self._accuracy_update = tf.assign(self._accuracy,  tf.reduce_mean([self._accuracy_update_0, self._accuracy_update_1, self._accuracy_update_2, self._accuracy_update_3]), name='accuracy_update' )
            #     self._reset_accuracy_op = tf.assign(self._accuracy, 0.0, name='reset_accuracy_op')


            # PRECISION
            # with tf.name_scope("precision"):
            #     # self._precision, self._precision_update = tf.metrics.precision(labels=tf.argmax(self.labels,1), predictions=tf.argmax(self.logits,1), name='precision', )
            #     # self._reset_precision_op = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='precision'))


            #     self._precision_0, self._precision_update_0 = tf.metrics.precision(labels=self.labels_0, predictions=self.predictions_0, name='precision_0', )
            #     self._reset_precision_op_0 = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='precision_0'))

            #     self._precision_1, self._precision_update_1 = tf.metrics.precision(labels=self.labels_1, predictions=self.predictions_1, name='precision_1', )
            #     self._reset_precision_op_1 = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='precision_1'))

            #     self._precision_2, self._precision_update_2 = tf.metrics.precision(labels=self.labels_2, predictions=self.predictions_2, name='precision_2', )
            #     self._reset_precision_op_2 = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='precision_2'))

            #     self._precision_3, self._precision_update_3 = tf.metrics.precision(labels=self.labels_3, predictions=self.predictions_3, name='precision_3', )
            #     self._reset_precision_op_3 = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='precision_3'))

            #     self._batch_precision = tf.reduce_mean([self._precision_0, self._precision_1, self._precision_2, self._precision_3], name='batch_precision')
            #     self._precision = tf.Variable(0.0, trainable=False, name='precision')
            #     self._precision_update = tf.assign(self._precision,  tf.reduce_mean([self._precision_update_0, self._precision_update_1, self._precision_update_2, self._precision_update_3]), name='precision_update' )
            #     self._reset_precision_op = tf.assign(self._precision, 0.0, name='reset_precision_op')




            # RECALL
            # with tf.name_scope("recall"):
            #     self._recall_0, self._recall_update_0 = tf.metrics.recall(labels=self.labels_0, predictions=self.predictions_0, name='recall_0', )
            #     self._reset_recall_op_0 = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='recall_0'))

            #     self._recall_1, self._recall_update_1 = tf.metrics.recall(labels=self.labels_1, predictions=self.predictions_1, name='recall_1', )
            #     self._reset_recall_op_1 = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='recall_1'))

            #     self._recall_2, self._recall_update_2 = tf.metrics.recall(labels=self.labels_2, predictions=self.predictions_2, name='recall_2', )
            #     self._reset_recall_op_2 = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='recall_2'))

            #     self._recall_3, self._recall_update_3 = tf.metrics.recall(labels=self.labels_3, predictions=self.predictions_3, name='recall_3', )
            #     self._reset_recall_op_3 = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='recall_3'))

            #     self._batch_recall = tf.reduce_mean([self._recall_0, self._recall_1, self._recall_2, self._recall_3], name='batch_recall')
            #     self._recall = tf.Variable(0.0, trainable=False, name='recall')
            #     self._recall_update = tf.assign(self._recall,  tf.reduce_mean([self._recall_update_0, self._recall_update_1, self._recall_update_2, self._recall_update_3]), name='recall_update' )
            #     self._reset_recall_op = tf.assign(self._recall, 0.0, name='reset_recall_op')



            with tf.name_scope("mAP"):
                # AUC PR = mAP
                self._auc_PR_0, self._auc_PR_update_0 = tf.metrics.auc(labels=self.labels_0, predictions=self.predictions_0, num_thresholds=200, curve='PR', name='auc_PR_0', )
                # self._reset_auc_PR_op_0 = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='auc_PR_0'))

                self._auc_PR_1, self._auc_PR_update_1 = tf.metrics.auc(labels=self.labels_1, predictions=self.predictions_1, num_thresholds=200, curve='PR', name='auc_PR_1', )
                # self._reset_auc_PR_op_1 = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='auc_PR_1'))

                self._auc_PR_2, self._auc_PR_update_2 = tf.metrics.auc(labels=self.labels_2, predictions=self.predictions_2, num_thresholds=200, curve='PR', name='auc_PR_2', )
                # self._reset_auc_PR_op_2 = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='auc_PR_2'))

                self._auc_PR_3, self._auc_PR_update_3 = tf.metrics.auc(labels=self.labels_3, predictions=self.predictions_3, num_thresholds=200, curve='PR', name='auc_PR_3', )
                # self._reset_auc_PR_op_3 = tf.variables_initializer(var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='auc_PR_3'))

                self._batch_auc_PR = tf.reduce_mean([self._auc_PR_update_1, self._auc_PR_update_2, self._auc_PR_update_3], name='batch_auc_PR')
                self._auc_PR = tf.Variable(0.0, trainable=False, name='auc_PR')
                self._auc_PR_update = tf.assign(self._auc_PR, self._batch_auc_PR, name='auc_PR_update' )
                # self._reset_auc_PR_op = tf.assign(self._auc_PR, 0.0, name='reset_auc_PR_op')

            # self.logits_0

            # self._auc_PR, self._auc_PR_update = tf.metrics.auc(labels=self.labels[:,0], predictions=self.predictions[:,0], curve='PR', name='auc_PR', )
            # self._reset_auc_PR_op = [self._reset_auc_PR_op_0, self._reset_auc_PR_op_1, self._reset_auc_PR_op_2, self._reset_auc_PR_op_3]


            # self._batch_auc_PR = tf.reduce_mean([self._reset_auc_PR_op_0, self._reset_auc_PR_op_1, self._reset_auc_PR_op_2, self._reset_auc_PR_op_3], name='batch_auc')
            # self._auc_PR = tf.Variable(np.zeros((dataset.num_classes, dataset.num_classes), dtype=np.int32), trainable=False, name='confusion_matrix')

            # self._auc_PR_update = tf.assign(self._confusion_matrix, self._confusion_matrix + self._batch_auc_PR, name='confusion_matrix_update' )
            # self._reset_auc_PR = [self._reset_auc_PR_op_0, self._reset_auc_PR_op_1, self._reset_auc_PR_op_2, self._reset_auc_PR_op_3]




            # CONFUSION MATRIX
            self._batch_confusion_matrix = tf.contrib.metrics.confusion_matrix(tf.argmax(self.logits, 1), tf.argmax(self.labels, 1), num_classes=dataset.num_classes, name='batch_confusion_matrix')
            self._confusion_matrix = tf.Variable(np.zeros((dataset.num_classes, dataset.num_classes), dtype=np.int32), trainable=False, name='confusion_matrix')
            self._confusion_matrix_update = tf.assign(self._confusion_matrix, self._confusion_matrix + self._batch_confusion_matrix, name='confusion_matrix_update' )
            self._reset_confusion_matrix_op = tf.assign(self._confusion_matrix, np.zeros((dataset.num_classes, dataset.num_classes), dtype=np.int32), name='reset_confusion_matrix_op')