Exemplo n.º 1
0
    def __init__(self,h_img:int,w_img:int,nbChannels:int,nbCategories,nbRegressor,favoritism,depth0,depth1):

        self.nbConsecutiveOptForOneFit=1
        self.summaryEither_cat_proba=0
        self.nbRegressor=nbRegressor

        (self.batch_size,self.h_img, self.w_img, self.nbChannels)=(None,h_img,w_img,nbChannels)

        self.nbCategories=nbCategories

        """ PLACEHOLDER """
        self._X = tf.placeholder(name="X", dtype=tf.float32,shape=(None,h_img,w_img,nbChannels))
        """les annotations : une image d'entier, chaque entier correspond à une catégorie"""
        self._Y_cat = tf.placeholder(dtype=tf.int32, shape=[None, h_img, w_img], name="Y_cat" )
        self._Y_reg = tf.placeholder(dtype=tf.float32, shape=[None, h_img, w_img,nbRegressor], name="Y_cat" )

        self._itr = tf.placeholder(name="itr", dtype=tf.float32)


        self.keep_proba=tf.get_variable("keep_proba",initializer=1.,trainable=False)
        self.learning_rate=tf.get_variable("learning_rate",initializer=1e-2,trainable=False)



        """la sorties est un volume 7*7*64.  """
        encoder = Encoder(self._X, nbChannels)

        self.hat=Decoder(encoder,  nbCategories, self.keep_proba, favoritism, depth0, depth1)
        self.hat_reg=Decoder(encoder, self.nbRegressor, self.keep_proba, favoritism, depth0, depth1)



        """ les loss qu'on suivra sur le long terme. Les coef, c'est juste pour avoir des grandeurs faciles à lire  """
        self.where=tf.cast((self._Y_cat!=0),dtype=tf.float32)
        self._loss_reg = 0.1 * tf.reduce_mean(self.where*(self._Y_reg-self.hat_reg.Y_logits)**2)

        self._loss_cat =tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.hat.Y_logits, labels=self._Y_cat)))

        self._penalty=10*sobel_penalty(self.hat.Y_proba,self.nbCategories)

        """ si le coef devant la _loss_background est trop grand, la loss_instance reste bloquée à 0.
            mais s'il est trop petit le background se transforme en damier !"""


        self._loss=self._loss_cat + self._loss_reg


        tf.summary.scalar("log loss", tf.log(self._loss))
        tf.summary.scalar("log loss reg", tf.log(self._loss_reg))
        tf.summary.scalar("log loss cat", tf.log(self._loss_cat))
        tf.summary.scalar("log penalty", tf.log(self._penalty))


        """ optimizer, monitoring des gradients """
        adam_opt = tf.train.AdamOptimizer(self.learning_rate)
        _grads_vars = adam_opt.compute_gradients(self._loss)
        # for index, grad in enumerate(_grads_vars):
        #     tf.summary.histogram("{}-grad".format(_grads_vars[index][0].name), _grads_vars[index][0])
        #     tf.summary.histogram("{}-var".format(_grads_vars[index][1].name), _grads_vars[index][1])
        #     if len(_grads_vars[index][0].get_shape().as_list())==4:
        #         ing.summarizeW_asImage(_grads_vars[index][0])


        self._summary = tf.summary.merge_all()

        """ la minimisation est faite via cette op:  """
        self.step_op = adam_opt.apply_gradients(_grads_vars)




        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())

        self.verbose=True


        max_outputs=8
        tf.summary.image("input_image", self._X, max_outputs=max_outputs)


        if self.summaryEither_cat_proba==0:
            output = tf.expand_dims(tf.cast(self.hat.Y_cat,dtype=tf.float32),3)
            output_color = ing.colorize(output, vmin=0.0, vmax=self.nbCategories, cmap='plasma') #'viridis', 'plasma', 'inferno', 'magma'
            tf.summary.image("Y_hat",output_color)


            for i in range(self.nbRegressor):
                output2 = tf.expand_dims(tf.cast(self.hat_reg.Y_logits[:,:,:,i], dtype=tf.float32), 3)
                """   maxNbStrate depend of the size of cells. """
                output_color2 = ing.colorize(output2, vmin=None, vmax=None,cmap='plasma')  # 'viridis', 'plasma', 'inferno', 'magma'
                tf.summary.image("Y_reg_hat"+str(i), output_color2)

        else :
            for cat in range(0,self.nbCategories):
                tf.summary.image("hat_proba cat"+str(cat), tf.expand_dims(self.hat.Y_proba[:,:,:,cat],3), max_outputs=max_outputs)


        self._summary=tf.summary.merge_all()
Exemplo n.º 2
0
    def __init__(self, h_img: int, w_img: int, nbChannels: int, nbCategories,
                 favoritism, depth0, depth1):

        self.nbConsecutiveOptForOneFit = 1
        self.summaryEither_cat_proba = 0

        (self.batch_size, self.h_img, self.w_img,
         self.nbChannels) = (None, h_img, w_img, nbChannels)

        self.nbCategories = nbCategories
        """ PLACEHOLDER """
        self._X = tf.placeholder(name="X",
                                 dtype=tf.float32,
                                 shape=(None, h_img, w_img, nbChannels))
        """les annotations : une image d'entier, chaque entier correspond à une catégorie"""
        self._Y_proba = tf.placeholder(
            dtype=tf.float32,
            shape=[None, h_img, w_img, nbCategories],
            name="Y")
        self._itr = tf.placeholder(name="itr", dtype=tf.float32)

        self.keep_proba = tf.get_variable("keep_proba",
                                          initializer=1.,
                                          trainable=False)
        self.learning_rate = tf.get_variable("learning_rate",
                                             initializer=1e-2,
                                             trainable=False)

        self.hat = Hat_fullyConv(self._X, nbChannels, nbCategories,
                                 self.keep_proba, favoritism, depth0, depth1)
        """ les loss qu'on suivra sur le long terme. Le *10 c'est juste pour mieux interpréter  """
        self._loss_instances = -10 * matching_IoU_batch(
            self._Y_proba[:, :, :, 1:], self.hat.Y_proba[:, :, :, 1:])
        self._loss_background = -10 * just_IoU_batch(
            self._Y_proba[:, :, :, 0], self.hat.Y_proba[:, :, :, 0])
        self._penalty = 10 * sobel_penalty(self.hat.Y_proba, self.nbCategories)
        """ si le coef devant la _loss_background est trop grand, la loss_instance reste bloquée à 0.
            mais s'il est trop petit le background se transforme en damier !"""

        self._loss = self._loss_instances + tf.nn.sigmoid(
            self._itr - 5) * self._loss_background + 5. * self._penalty

        tf.summary.scalar("loss", self._loss)
        tf.summary.scalar("loss instances", self._loss_instances)
        tf.summary.scalar("loss background", self._loss_background)
        tf.summary.scalar("penalty", self._penalty)
        """ optimizer, monitoring des gradients """
        adam_opt = tf.train.AdamOptimizer(self.learning_rate)
        _grads_vars = adam_opt.compute_gradients(self._loss)
        for index, grad in enumerate(_grads_vars):
            tf.summary.histogram("{}-grad".format(_grads_vars[index][0].name),
                                 _grads_vars[index][0])
            tf.summary.histogram("{}-var".format(_grads_vars[index][1].name),
                                 _grads_vars[index][1])
            if len(_grads_vars[index][0].get_shape().as_list()) == 4:
                ing.summarizeW_asImage(_grads_vars[index][0])

        self._summary = tf.summary.merge_all()
        """ la minimisation est faite via cette op:  """
        self.step_op = adam_opt.apply_gradients(_grads_vars)

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())

        self.verbose = True

        max_outputs = 4
        tf.summary.image("input_image", self._X, max_outputs=max_outputs)

        if self.summaryEither_cat_proba == 0:
            output = tf.expand_dims(tf.cast(self.hat.Y_cat, dtype=tf.float32),
                                    3)
            output_color = ing.colorize(
                output, vmin=0.0, vmax=self.nbCategories,
                cmap='plasma')  #'viridis', 'plasma', 'inferno', 'magma'
            tf.summary.image("Y_hat", output_color)
        else:
            for cat in range(0, self.nbCategories):
                tf.summary.image("hat_proba cat" + str(cat),
                                 tf.expand_dims(self.hat.Y_proba[:, :, :, cat],
                                                3),
                                 max_outputs=max_outputs)

        self._summary = tf.summary.merge_all()
Exemplo n.º 3
0
    def __init__(self, h_img: int, w_img: int, nbChannels: int, nbCategories,
                 favoritism, depth0, depth1):

        self.nbConsecutiveOptForOneFit = 1
        self.summaryEither_cat_proba = 0

        (self.batch_size, self.h_img, self.w_img,
         self.nbChannels) = (None, h_img, w_img, nbChannels)

        self.nbCategories = nbCategories
        """ PLACEHOLDER """
        self._X = tf.placeholder(name="X",
                                 dtype=tf.float32,
                                 shape=(None, h_img, w_img, nbChannels))
        """les annotations : une image d'entier, chaque entier correspond à une catégorie"""
        self._Y_cat = tf.placeholder(dtype=tf.float32,
                                     shape=[None, h_img, w_img, nbCategories],
                                     name="Y_cat")
        self._Y_background = tf.placeholder(dtype=tf.float32,
                                            shape=[None, h_img, w_img, 2],
                                            name="Y_background")

        self._itr = tf.placeholder(name="itr", dtype=tf.int32)

        self.keep_proba = tf.get_variable("keep_proba",
                                          initializer=1.,
                                          trainable=False)
        self.learning_rate = tf.get_variable("learning_rate",
                                             initializer=1e-2,
                                             trainable=False)
        """la sorties est un volume 7*7*64.  """
        encoder = Encoder(self._X, nbChannels)

        self.hat = Decoder(encoder, nbCategories, self.keep_proba, favoritism,
                           depth0, depth1, True)

        self.hat_background = Decoder(encoder, 2, self.keep_proba, favoritism,
                                      depth0, depth1, False)
        """ les loss qu'on suivra sur le long terme. Les coef, c'est juste pour avoir des grandeurs faciles à lire  """

        where = tf.cast((self._Y_background[:, :, :, 1] == 1),
                        dtype=tf.float32)
        self._loss_background = -tf.reduce_mean(
            self._Y_background * tf.log(self.hat_background.Y_proba + 1e-10))
        self._loss_cat = ing.crossEntropy_multiLabel(self._Y_cat,
                                                     self.hat.Y_proba)

        self._penalty = 10 * sobel_penalty(self.hat.Y_proba, self.nbCategories)
        """ si le coef devant la _loss_background est trop grand, la loss_instance reste bloquée à 0.
            mais s'il est trop petit le background se transforme en damier !"""

        self._loss = self._loss_cat  #+self._loss_background

        tf.summary.scalar("loss", self._loss)
        tf.summary.scalar("loss cat", self._loss_cat)
        tf.summary.scalar("loss background", self._loss_background)
        tf.summary.scalar("penalty", self._penalty)

        tf.summary.histogram("hat_Y_cat", self.hat.Y_proba)
        shape = self.hat.Y_proba[0, :, :, :].get_shape().as_list()
        tf.summary.scalar(
            "zero of Y_hat_proba",
            tf.count_nonzero(self.hat.Y_proba[0, :, :, :]) -
            shape[0] * shape[1] * shape[2])
        """ optimizer, monitoring des gradients """
        adam_opt = tf.train.AdamOptimizer(self.learning_rate)
        _grads_vars = adam_opt.compute_gradients(self._loss)
        # for index, grad in enumerate(_grads_vars):
        #     tf.summary.histogram("{}-grad".format(_grads_vars[index][0].name), _grads_vars[index][0])
        #     tf.summary.histogram("{}-var".format(_grads_vars[index][1].name), _grads_vars[index][1])
        #     if len(_grads_vars[index][0].get_shape().as_list())==4:
        #         ing.summarizeW_asImage(_grads_vars[index][0])

        self._summary = tf.summary.merge_all()
        """ la minimisation est faite via cette op:  """
        self.step_op = adam_opt.apply_gradients(_grads_vars)

        self.rien = tf.ones(1)

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())

        self.verbose = True

        max_outputs = 8
        tf.summary.image("input_image", self._X, max_outputs=max_outputs)

        self._Y_cat_sum = tf.reduce_sum(self._Y_cat, axis=3)

        if self.summaryEither_cat_proba == 0:

            output = tf.expand_dims(
                tf.cast(self.hat.Y_cat_sum, dtype=tf.float32), 3)
            output_color = ing.colorize(output,
                                        vmin=0.0,
                                        vmax=self.nbCategories,
                                        cmap='plasma')
            tf.summary.image("Y hat strates",
                             output_color,
                             max_outputs=max_outputs)

            # output = tf.expand_dims(tf.cast(self._Y_cat_sum,dtype=tf.float32),3)
            # output_color = ing.colorize(output, vmin=0.0, vmax=self.nbCategories, cmap='plasma') #'viridis', 'plasma', 'inferno', 'magma'
            # tf.summary.image("ground truth",output_color)
            #
            # output = tf.expand_dims(tf.cast(self.hat.Y_cat_sum, dtype=tf.float32), 3)
            # output_color = ing.colorize(output, vmin=None, vmax=None,
            #                             cmap='plasma')  # 'viridis', 'plasma', 'inferno', 'magma'
            # tf.summary.image("hat strates", output_color)
            #
            #
            # output = tf.expand_dims(tf.cast(self.hat_background.Y_proba[:,:,:,0], dtype=tf.float32), 3)
            # output_color = ing.colorize(output, vmin=0.0, vmax=self.nbCategories,
            #                             cmap='plasma')  # 'viridis', 'plasma', 'inferno', 'magma'
            # tf.summary.image("hat background", output_color)

        else:
            for cat in range(0, self.nbCategories):
                tf.summary.image("hat_proba cat" + str(cat),
                                 tf.expand_dims(self.hat.Y_proba[:, :, :, cat],
                                                3),
                                 max_outputs=max_outputs)

        self._summary = tf.summary.merge_all()