コード例 #1
0
    def __init__(self, multi_label, nbChannels, nbCategories):

        self._X = tf.placeholder(name="X", dtype=tf.float32)
        self._Y = tf.placeholder(name="Y", dtype=tf.float32)

        self.learning_rate = tf.get_variable("learning_rate",
                                             initializer=1e-4,
                                             trainable=False)
        self.keep_proba = tf.get_variable("keep_proba",
                                          initializer=1.,
                                          trainable=False)
        self.threshold = tf.get_variable("threshold",
                                         initializer=0.5,
                                         trainable=False)

        self.hat = Hat_multi_vs_mono(self._X, multi_label, nbChannels,
                                     nbCategories, self.keep_proba)
        """la loss et l'accuracy sont calculée de manière très différentes quand c'est du multi-label"""
        if multi_label:
            self._loss = ing.crossEntropy_multiLabel(self._Y, self.hat.Y)
            Y_hat_binary = tf.cast(
                tf.greater(self.hat.Y, self.threshold), tf.float32
            )  #ing.make_binary_with_threshold(self._Y_hat,self.threshold)
            """ l'accuracy dépend d'un threshold. Attention, une accuracy proche de 1 n'est pas forcément bonne: 
              Ex:  Y_hat_binary = [ 0,0,0,0,0,0,0,0,0,1]
                    Y_hat       = [ 1,0,0,0,0,0,0,0,0,0]
                    ---> accuracy = 80 % 
                    alors que le modèle n'a rien compris.
              """
            self._accuracy = tf.reduce_mean(
                tf.cast(tf.equal(Y_hat_binary, self._Y), tf.float32))
        else:
            self._loss = ing.crossEntropy(self._Y, self.hat.Y)
            Y_cat = tf.argmax(self._Y, dimension=1)
            Y_cat_hat = tf.argmax(self.hat.Y, dimension=1)
            self._accuracy = tf.reduce_mean(
                tf.cast(tf.equal(Y_cat, Y_cat_hat), tf.float32))

        self._minimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(
            self._loss)

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())

        self.verbose = True
コード例 #2
0
ファイル: model.py プロジェクト: vincentvigon/neural
    def __init__(self, h_img: int, w_img: int, nbChannels: int, nbCategories,
                 favoritism, depth0, depth1):

        self.nbConsecutiveOptForOneFit = 1
        self.summaryEither_cat_proba = 0

        (self.batch_size, self.h_img, self.w_img,
         self.nbChannels) = (None, h_img, w_img, nbChannels)

        self.nbCategories = nbCategories
        """ PLACEHOLDER """
        self._X = tf.placeholder(name="X",
                                 dtype=tf.float32,
                                 shape=(None, h_img, w_img, nbChannels))
        """les annotations : une image d'entier, chaque entier correspond à une catégorie"""
        self._Y_cat = tf.placeholder(dtype=tf.float32,
                                     shape=[None, h_img, w_img, nbCategories],
                                     name="Y_cat")
        self._Y_background = tf.placeholder(dtype=tf.float32,
                                            shape=[None, h_img, w_img, 2],
                                            name="Y_background")

        self._itr = tf.placeholder(name="itr", dtype=tf.int32)

        self.keep_proba = tf.get_variable("keep_proba",
                                          initializer=1.,
                                          trainable=False)
        self.learning_rate = tf.get_variable("learning_rate",
                                             initializer=1e-2,
                                             trainable=False)
        """la sorties est un volume 7*7*64.  """
        encoder = Encoder(self._X, nbChannels)

        self.hat = Decoder(encoder, nbCategories, self.keep_proba, favoritism,
                           depth0, depth1, True)

        self.hat_background = Decoder(encoder, 2, self.keep_proba, favoritism,
                                      depth0, depth1, False)
        """ les loss qu'on suivra sur le long terme. Les coef, c'est juste pour avoir des grandeurs faciles à lire  """

        where = tf.cast((self._Y_background[:, :, :, 1] == 1),
                        dtype=tf.float32)
        self._loss_background = -tf.reduce_mean(
            self._Y_background * tf.log(self.hat_background.Y_proba + 1e-10))
        self._loss_cat = ing.crossEntropy_multiLabel(self._Y_cat,
                                                     self.hat.Y_proba)

        self._penalty = 10 * sobel_penalty(self.hat.Y_proba, self.nbCategories)
        """ si le coef devant la _loss_background est trop grand, la loss_instance reste bloquée à 0.
            mais s'il est trop petit le background se transforme en damier !"""

        self._loss = self._loss_cat  #+self._loss_background

        tf.summary.scalar("loss", self._loss)
        tf.summary.scalar("loss cat", self._loss_cat)
        tf.summary.scalar("loss background", self._loss_background)
        tf.summary.scalar("penalty", self._penalty)

        tf.summary.histogram("hat_Y_cat", self.hat.Y_proba)
        shape = self.hat.Y_proba[0, :, :, :].get_shape().as_list()
        tf.summary.scalar(
            "zero of Y_hat_proba",
            tf.count_nonzero(self.hat.Y_proba[0, :, :, :]) -
            shape[0] * shape[1] * shape[2])
        """ optimizer, monitoring des gradients """
        adam_opt = tf.train.AdamOptimizer(self.learning_rate)
        _grads_vars = adam_opt.compute_gradients(self._loss)
        # for index, grad in enumerate(_grads_vars):
        #     tf.summary.histogram("{}-grad".format(_grads_vars[index][0].name), _grads_vars[index][0])
        #     tf.summary.histogram("{}-var".format(_grads_vars[index][1].name), _grads_vars[index][1])
        #     if len(_grads_vars[index][0].get_shape().as_list())==4:
        #         ing.summarizeW_asImage(_grads_vars[index][0])

        self._summary = tf.summary.merge_all()
        """ la minimisation est faite via cette op:  """
        self.step_op = adam_opt.apply_gradients(_grads_vars)

        self.rien = tf.ones(1)

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())

        self.verbose = True

        max_outputs = 8
        tf.summary.image("input_image", self._X, max_outputs=max_outputs)

        self._Y_cat_sum = tf.reduce_sum(self._Y_cat, axis=3)

        if self.summaryEither_cat_proba == 0:

            output = tf.expand_dims(
                tf.cast(self.hat.Y_cat_sum, dtype=tf.float32), 3)
            output_color = ing.colorize(output,
                                        vmin=0.0,
                                        vmax=self.nbCategories,
                                        cmap='plasma')
            tf.summary.image("Y hat strates",
                             output_color,
                             max_outputs=max_outputs)

            # output = tf.expand_dims(tf.cast(self._Y_cat_sum,dtype=tf.float32),3)
            # output_color = ing.colorize(output, vmin=0.0, vmax=self.nbCategories, cmap='plasma') #'viridis', 'plasma', 'inferno', 'magma'
            # tf.summary.image("ground truth",output_color)
            #
            # output = tf.expand_dims(tf.cast(self.hat.Y_cat_sum, dtype=tf.float32), 3)
            # output_color = ing.colorize(output, vmin=None, vmax=None,
            #                             cmap='plasma')  # 'viridis', 'plasma', 'inferno', 'magma'
            # tf.summary.image("hat strates", output_color)
            #
            #
            # output = tf.expand_dims(tf.cast(self.hat_background.Y_proba[:,:,:,0], dtype=tf.float32), 3)
            # output_color = ing.colorize(output, vmin=0.0, vmax=self.nbCategories,
            #                             cmap='plasma')  # 'viridis', 'plasma', 'inferno', 'magma'
            # tf.summary.image("hat background", output_color)

        else:
            for cat in range(0, self.nbCategories):
                tf.summary.image("hat_proba cat" + str(cat),
                                 tf.expand_dims(self.hat.Y_proba[:, :, :, cat],
                                                3),
                                 max_outputs=max_outputs)

        self._summary = tf.summary.merge_all()