예제 #1
0
    def __init__(self,
                 channels=3,
                 n_class=2,
                 cost="cross_entropy",
                 cost_kwargs={},
                 **kwargs):
        tf.reset_default_graph()

        self.n_class = n_class

        self.summaries = kwargs.get("summaries", True)

        self.x = tf.placeholder("float", shape=[None, None, None, channels])
        self.y = tf.placeholder("float", shape=[None, None, None, n_class])
        self.keep_prob = tf.placeholder(
            tf.float32)  #dropout (keep probability)

        #import pdb;pdb.set_trace()

        logits, self.variables, self.offset = create_conv_net(
            self.x, self.keep_prob, channels, n_class, **kwargs)

        self.cost = self._get_cost(logits, cost, cost_kwargs)

        self.gradients_node = tf.gradients(self.cost, self.variables)

        self.cross_entropy = tf.reduce_mean(
            cross_entropy(
                tf.reshape(self.y, [-1, n_class]),
                tf.reshape(pixel_wise_softmax_2(logits), [-1, n_class])))

        self.predicter = pixel_wise_softmax_2(logits)
        self.correct_pred = tf.equal(tf.argmax(self.predicter, 3),
                                     tf.argmax(self.y, 3))
        self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
예제 #2
0
 def __init__(self, nx=None, ny=None, channels=3, n_class=2, add_regularizers=True, class_weights=None, **kwargs):
     tf.reset_default_graph()
     
     self.n_class = n_class
     self.summaries = kwargs.get("summaries", True)
     
     self.x = tf.placeholder("float", shape=[None, nx, ny, channels])
     self.y = tf.placeholder("float", shape=[None, None, None, n_class])
     self.keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
     
     logits, self.variables, self.offset = create_conv_net(self.x, self.keep_prob, channels, n_class, **kwargs)
     
     if class_weights is not None:
         class_weights = tf.constant(np.array(class_weights, dtype=np.float32))
         weighted_logits = tf.mul(tf.reshape(logits, [-1, n_class]), class_weights)
         loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(weighted_logits, tf.reshape(self.y, [-1, n_class])))
         
     else:
         loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(tf.reshape(logits, [-1, n_class]), 
                                                                            tf.reshape(self.y, [-1, n_class])))
     self.gradients_node = tf.gradients(loss, self.variables)
      
     self.cross_entropy = tf.reduce_mean(cross_entropy(tf.reshape(self.y, [-1, n_class]),
                                         tf.reshape(pixel_wise_softmax_2(logits), [-1, n_class]), 
                                         ))
     self.cost = loss
     
     if add_regularizers:
         reg_constant = 0.001  # Choose an appropriate one.
         regularizers = sum([tf.nn.l2_loss(variable) for variable in self.variables])
         self.cost += (reg_constant * regularizers)
     
     self.predicter = pixel_wise_softmax_2(logits)
     self.correct_pred = tf.equal(tf.argmax(self.predicter, 3), tf.argmax(self.y, 3))
     self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
예제 #3
0
    def __init__(self, channels=3, n_class=2, height=None, width=None, cost="cross_entropy", cost_kwargs={}, **kwargs):
        tf.reset_default_graph()
        
        self.n_class = n_class
        self.summaries = kwargs.get("summaries", True)
        
        self.x = tf.placeholder("float", shape=[None, height, width, channels], name='images')
        self.keep_prob = tf.placeholder(tf.float32, shape=(), name='keep_prob') #dropout (keep probability)
        
        logits, self.variables, self.offset = create_conv_net(self.x, self.keep_prob, channels, n_class, nx=height, ny=width, **kwargs)
        
        pred_shape = logits.get_shape().as_list()
 
        self.y = tf.placeholder("float", shape=[None, pred_shape[1], pred_shape[2], n_class], name='labels')

        self.cost = self._get_cost(logits, cost, cost_kwargs)
        
        self.gradients_node = tf.gradients(self.cost, self.variables)
         
        self.predicter = pixel_wise_softmax_2(logits)

        self.cross_entropy = tf.reduce_mean(cross_entropy(tf.reshape(self.y, [-1, n_class]),
                                                          tf.reshape(pixel_wise_softmax_2(logits), [-1, n_class])))
        
        self.correct_pred = tf.equal(tf.argmax(self.predicter, 3), tf.argmax(self.y, 3))
        self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
예제 #4
0
    def __init__(self,
                 channels=3,
                 n_class=2,
                 cost="cross_entropy",
                 cost_kwargs={},
                 **kwargs):
        tf.reset_default_graph()

        self.n_class = n_class
        self.summaries = kwargs.get("summaries", True)

        self.x = tf.placeholder("float", shape=[None, None, None, channels])
        self.y = tf.placeholder("float", shape=[None, None, None, n_class])
        self.keep_prob = tf.placeholder(
            tf.float32)  #dropout (keep probability)

        logits, self.variables, self.offset = create_conv_net(
            self.x, self.keep_prob, channels, n_class, **kwargs)

        self.cost = self._get_cost(logits, cost, cost_kwargs)

        self.gradients_node = tf.gradients(self.cost, self.variables)

        self.cross_entropy = tf.reduce_mean(
            cross_entropy(
                tf.reshape(self.y, [-1, n_class]),
                tf.reshape(pixel_wise_softmax_2(logits), [-1, n_class])))

        self.predicter = pixel_wise_softmax_2(logits)
        self.correct_pred = tf.equal(tf.argmax(self.predicter, 3),
                                     tf.argmax(self.y, 3))
        self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))

        #showing images in tensorboard
        self.sum_img_y = tf.reshape(
            self.y[0, :, :, 0],
            shape=[1, tf.shape(self.y)[1],
                   tf.shape(self.y)[2], 1])
        self.sum_img_y2 = tf.reshape(
            self.y[0, :, :, 1],
            shape=[1, tf.shape(self.y)[1],
                   tf.shape(self.y)[2], 1])
        self.sum_img_pred = tf.reshape(
            self.predicter[0, :, :, 0],
            shape=[1, tf.shape(self.y)[1],
                   tf.shape(self.y)[2], 1])
        self.sum_img_pred2 = tf.reshape(
            self.predicter[0, :, :, 1],
            shape=[1, tf.shape(self.y)[1],
                   tf.shape(self.y)[2], 1])

        eps = 1e-5
        intersection = tf.reduce_sum(self.predicter[:, :, :, 1] *
                                     self.y[:, :, :, 1])
        union = eps + tf.reduce_sum(
            self.predicter[:, :, :, 1]) + tf.reduce_sum(self.y[:, :, :, 1])
        self.dice_score = (2 * intersection / (union))
예제 #5
0
    def __init__(self,
                 channels=3,
                 n_class=2,
                 cost="cross_entropy",
                 cost_kwargs={},
                 **kwargs):
        tf.reset_default_graph()

        self.n_class = n_class
        self.summaries = kwargs.get("summaries", True)
        with tf.name_scope('inputs'):
            self.x = tf.placeholder("float",
                                    shape=[None, None, None, channels],
                                    name='image')
            self.y = tf.placeholder("float",
                                    shape=[None, None, None, n_class],
                                    name='label')
            tf.summary.histogram('image', self.x)
            tf.summary.histogram('label', self.y)
            tf.summary.image('image_sum', get_image_summary(self.x))
            tf.summary.image('label_sum', get_image_summary(self.y, idx=1))
            self.keep_prob = tf.placeholder(
                tf.float32, name='drop')  #dropout (keep probability)
            self.trainphase = tf.placeholder(tf.bool, name='trainphase')

        logits, self.variables, self.offset = create_conv_net(
            self.x, self.keep_prob, self.trainphase, channels, n_class,
            **kwargs)

        self.cost = self._get_cost(logits, cost, cost_kwargs)

        self.gradients_node = tf.gradients(self.cost, self.variables)

        with tf.name_scope('CrossEntropyforScale'):
            self.cross_entropy = tf.reduce_mean(
                cross_entropy(
                    tf.reshape(self.y, [-1, n_class]),
                    tf.reshape(pixel_wise_softmax_2(logits), [-1, n_class])))

        with tf.name_scope('Predict'):
            self.predicter = pixel_wise_softmax_2(logits)
            tf.summary.image('predict1',
                             get_image_summary(self.predicter, idx=0))
            tf.summary.image('predict2',
                             get_image_summary(self.predicter, idx=1))
        with tf.name_scope('Accuracy'):
            self.correct_pred = tf.equal(tf.argmax(self.predicter, 3),
                                         tf.argmax(self.y, 3))
            self.accuracy = tf.reduce_mean(
                tf.cast(self.correct_pred, tf.float32))
예제 #6
0
파일: unet.py 프로젝트: Zach-ER/tf_unet
    def _get_cost(self, logits, cost_name, cost_kwargs):
        """
        Constructs the cost function, either cross_entropy, weighted cross_entropy or dice_coefficient.
        Optional arguments are: 
        class_weights: weights for the different classes in case of multi-class imbalance
        regularizer: power of the L2 regularizers added to the loss function
        """
        
        flat_logits = tf.reshape(logits, [-1, self.n_class])
        flat_labels = tf.reshape(self.y, [-1, self.n_class])
        if cost_name == "cross_entropy":
            class_weights = cost_kwargs.pop("class_weights", None)
            
            if class_weights is not None:
                class_weights = tf.constant(np.array(class_weights, dtype=np.float32))
        
                weight_map = tf.mul(flat_labels, class_weights)
                weight_map = tf.reduce_sum(weight_map, axis=1)
        
                loss_map = tf.nn.softmax_cross_entropy_with_logits(flat_logits, flat_labels)
                weighted_loss = tf.mul(loss_map, weight_map)
        
                loss = tf.reduce_mean(weighted_loss)
                
            else:
                loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(flat_logits, 
                                                                              flat_labels))
        elif cost_name == "dice_coefficient":
	        predictions = pixel_wise_softmax_2(logits)
	        flat_predictions = tf.reshape(predos, [-1, self.n_class])
            intersection = tf.reduce_sum(tf.mul(flat_predictions, flat_labels))
            union = tf.reduce_sum(tf.mul(flat_predictions, flat_predictions)) + tf.reduce_sum(
                tf.mul(flat_labels, flat_labels))
            loss = 1 - (2 * intersection / union)
예제 #7
0
    def _get_cost(self, logits, cost_name, cost_kwargs):
        """
        Constructs the cost function, either cross_entropy, weighted cross_entropy or dice_coefficient.
        Optional arguments are: 
        class_weights: weights for the different classes in case of multi-class imbalance
        regularizer: power of the L2 regularizers added to the loss function
        """
        
        flat_logits = tf.reshape(logits, [-1, self.n_class])
        flat_labels = tf.reshape(self.y, [-1, self.n_class])
        if cost_name == "cross_entropy":
            fore_weights = cost_kwargs.pop("fore_weights", None)
            back_weights = cost_kwargs.pop("back_weights", None)
            
            if fore_weights is not None:

                # By XY

                logits_softmax = tf.nn.softmax(flat_logits)
                weight_fore = tf.constant(np.array(fore_weights, dtype=np.float32))
                weight_back = tf.constant(np.array(back_weights, dtype=np.float32))
                weight_map_fore = tf.multiply(flat_labels, weight_fore)
                weight_map_back = tf.multiply(flat_labels, weight_back)

                # Weighted loss - use this loss at the first-step training
                weight_loss = -weight_map_fore[..., 0] * tf.log(logits_softmax[..., 0])
                for i_map in range(1, self.n_class):
                    weight_loss = weight_loss-weight_map_back[..., i_map]*tf.log(logits_softmax[..., i_map])
                loss = tf.reduce_mean(weight_loss)

                # # Focal loss - use this loss at the second-step training
                # focal_map = tf.ones(tf.shape(logits_softmax), tf.float32) - logits_softmax
                # focal_map_2 = tf.multiply(focal_map, focal_map)
                # focal_loss = -weight_map_fore[..., 0]*focal_map_2[..., 0]*tf.log(logits_softmax[..., 0])# weighted background
                # for i_map in range(1, self.n_class):
                #     focal_loss = focal_loss-weight_map_back[..., i_map]*focal_map_2[..., i_map]*tf.log(logits_softmax[..., i_map])
                # loss = tf.reduce_mean(focal_loss)

                # By XY
                
            else:
                loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=flat_logits, 
                                                                              labels=flat_labels))
        elif cost_name == "dice_coefficient":
            eps = 1e-5
            prediction = pixel_wise_softmax_2(logits)
            intersection = tf.reduce_sum(prediction * self.y)
            union =  eps + tf.reduce_sum(prediction) + tf.reduce_sum(self.y)
            loss = -(2 * intersection/ (union))
            
        else:
            raise ValueError("Unknown cost function: "%cost_name)

        regularizer = cost_kwargs.pop("regularizer", None)
        if regularizer is not None:
            regularizers = sum([tf.nn.l2_loss(variable) for variable in self.variables])
            loss += (regularizer * regularizers)
            
        return loss
예제 #8
0
파일: unet.py 프로젝트: dvidmar/tf_unet
    def _get_cost(self, logits, cost_name, cost_kwargs):
        """
        Constructs the cost function, either cross_entropy, weighted cross_entropy or dice_coefficient.
        Optional arguments are: 
        class_weights: weights for the different classes in case of multi-class imbalance
        regularizer: power of the L2 regularizers added to the loss function
        """

        flat_logits = tf.reshape(logits, [-1, self.n_class])
        flat_labels = tf.reshape(self.y, [-1, self.n_class])
        if cost_name == "cross_entropy":
            class_weights = cost_kwargs.pop("class_weights", None)

            if class_weights is not None:
                class_weights = tf.constant(
                    np.array(class_weights, dtype=np.float32))

                weight_map = tf.multiply(flat_labels, class_weights)
                weight_map = tf.reduce_sum(weight_map, axis=1)

                loss_map = tf.nn.softmax_cross_entropy_with_logits(
                    logits=flat_logits, labels=flat_labels)
                weighted_loss = tf.multiply(loss_map, weight_map)

                #weight each pixel by value specified in weight map (added 1/10/2018)
                loss = tf.reduce_mean(
                    tf.multiply(weighted_loss, tf.reshape(self.w, [-1])))

            else:
                #weight each pixel by value specified in weight map (added 1/10/2018)
                weight_map = tf.reshape(self.w, [-1])

                loss_map = tf.nn.softmax_cross_entropy_with_logits(
                    logits=flat_logits, labels=flat_labels)
                weighted_loss = tf.multiply(loss_map, weight_map)

                loss = tf.reduce_mean(weighted_loss)

        elif cost_name == "dice_coefficient":
            eps = 1e-5
            prediction = pixel_wise_softmax_2(logits)
            intersection = tf.reduce_sum(prediction * self.y)
            union = eps + tf.reduce_sum(prediction) + tf.reduce_sum(self.y)
            loss = -(2 * intersection / (union))

        else:
            raise ValueError("Unknown cost function: " % cost_name)

        regularizer = cost_kwargs.pop("regularizer", None)
        if regularizer is not None:
            regularizers = sum(
                [tf.nn.l2_loss(variable) for variable in self.variables])
            loss += (regularizer * regularizers)

        return loss
예제 #9
0
    def _get_cost(self, logits, cost_name, cost_kwargs):
        """
        Constructs the cost function, either cross_entropy, weighted cross_entropy,
        dice_coefficient, or iou (intersection over union).
        Optional arguments are:
        class_weights: weights for the different classes in case of multi-class imbalance
        regularizer: power of the L2 regularizers added to the loss function
        """

        flat_logits = tf.reshape(logits, [-1, self.n_class])
        flat_labels = tf.reshape(self.y, [-1, self.n_class])
        if cost_name == "cross_entropy":
            class_weights = cost_kwargs.pop("class_weights", None)

            if class_weights is not None:
                class_weights = tf.constant(
                    np.array(class_weights, dtype=np.float32))

                weight_map = tf.multiply(flat_labels, class_weights)
                weight_map = tf.reduce_sum(weight_map, axis=1)

                loss_map = tf.nn.softmax_cross_entropy_with_logits_v2(
                    logits=flat_logits, labels=flat_labels)
                weighted_loss = tf.multiply(loss_map, weight_map)

                loss = tf.reduce_mean(weighted_loss)

            else:
                loss = tf.reduce_mean(
                    tf.nn.softmax_cross_entropy_with_logits_v2(
                        logits=flat_logits, labels=flat_labels))
        elif cost_name == "dice_coefficient" or cost_name == "iou":
            eps = 1e-5
            prediction = pixel_wise_softmax_2(logits)
            A_intersect_B = tf.reduce_sum(prediction * self.y, axis=[0, 1, 2])
            A_plus_B = tf.reduce_sum(
                prediction, axis=[0, 1, 2]) + tf.reduce_sum(self.y,
                                                            axis=[0, 1, 2])
            if cost_name == "dice_coefficient":
                denominator = A_plus_B
            else:  # intersection over union
                A_union_B = A_plus_B - A_intersect_B
                denominator = A_union_B
            loss = tf.reduce_sum(-(2 * A_intersect_B / (eps + denominator)))

        else:
            raise ValueError("Unknown cost function: " % cost_name)

        regularizer = cost_kwargs.pop("regularizer", None)
        if regularizer is not None:
            regularizers = sum(
                [tf.nn.l2_loss(variable) for variable in self.variables])
            loss += (regularizer * regularizers)

        return loss
예제 #10
0
    def __init__(self,
                 channels=3,
                 n_class=2,
                 cost="cross_entropy",
                 cost_kwargs={},
                 **kwargs):
        tf.reset_default_graph()

        self.n_class = n_class
        self.summaries = kwargs.get("summaries", True)

        with tf.name_scope('inputs') as scope:
            self.x = tf.placeholder("float",
                                    shape=[None, None, None, channels])
            self.y = tf.placeholder("float", shape=[None, None, None, n_class])
            self.keep_prob = tf.placeholder(
                tf.float32)  #dropout (keep probability)

        with tf.name_scope('CONVNET') as scope:
            logits, self.variables, self.offset = create_conv_net(
                self.x, self.keep_prob, channels, n_class, **kwargs)
        with tf.name_scope('COST') as scope:
            self.cost = self._get_cost(logits, cost, cost_kwargs)
        with tf.name_scope('GRADIENT') as scope:
            self.gradients_node = tf.gradients(self.cost, self.variables)
        with tf.name_scope('CROSSENTROPY') as scope:
            self.cross_entropy = tf.reduce_mean(
                cross_entropy(
                    tf.reshape(self.y, [-1, n_class]),
                    tf.reshape(pixel_wise_softmax_2(logits), [-1, n_class])))
        with tf.name_scope('PREDICTER') as scope:
            self.predicter = pixel_wise_softmax_2(logits)
            self.correct_pred = tf.equal(tf.argmax(self.predicter, 3),
                                         tf.argmax(self.y, 3))
            self.accuracy = tf.reduce_mean(
                tf.cast(self.correct_pred, tf.float32))
예제 #11
0
파일: unet.py 프로젝트: mayurdb/U-Net
    def _get_cost(self, logits, cost_name, cost_kwargs):

        flat_logits = tf.reshape(logits, [-1, self.n_class])
        flat_labels = tf.reshape(self.y, [-1, self.n_class])
        if cost_name == "cross_entropy":
            class_weights = cost_kwargs.pop("class_weights", None)

            if class_weights is not None:
                class_weights = tf.constant(
                    np.array(class_weights, dtype=np.float32))

                weight_map = tf.multiply(flat_labels, class_weights)
                weight_map = tf.reduce_sum(weight_map, axis=1)

                loss_map = tf.nn.softmax_cross_entropy_with_logits(
                    flat_logits, flat_labels)
                weighted_loss = tf.multiply(loss_map, weight_map)

                loss = tf.reduce_mean(weighted_loss)

            else:
                loss = tf.reduce_mean(
                    tf.nn.softmax_cross_entropy_with_logits(
                        logits=flat_logits, labels=flat_labels))
        elif cost_name == "dice_coefficient":
            eps = 1e-5
            prediction = pixel_wise_softmax_2(logits)
            intersection = tf.reduce_sum(prediction * self.y)
            union = eps + tf.reduce_sum(prediction) + tf.reduce_sum(self.y)
            loss = -(2 * intersection / (union))

        else:
            raise ValueError("Unknown cost function: " % cost_name)

        regularizer = cost_kwargs.pop("regularizer", None)
        if regularizer is not None:
            regularizers = sum(
                [tf.nn.l2_loss(variable) for variable in self.variables])
            loss += (regularizer * regularizers)

        return loss
예제 #12
0
    def _get_cost(self, logits, cost_name, cost_kwargs):
        """
        Constructs the cost function, either cross_entropy, weighted cross_entropy or dice_coefficient.
        Optional arguments are:
        class_weights: weights for the different classes in case of multi-class imbalance
        regularizer: power of the L2 regularizers added to the loss function
        """

        flat_logits = tf.reshape(logits, [-1, self.n_class])
        flat_labels = tf.reshape(self.y, [-1, self.n_class])
        if cost_name == "cross_entropy":
            class_weights = cost_kwargs.pop("class_weights", None)

            if class_weights is not None:
                class_weights = tf.constant(
                    np.array(class_weights, dtype=np.float32))

                weight_map = tf.multiply(flat_labels, class_weights)
                weight_map = tf.reduce_sum(weight_map, axis=1)

                loss_map = tf.nn.softmax_cross_entropy_with_logits(
                    logits=flat_logits, labels=flat_labels)
                weighted_loss = tf.multiply(loss_map, weight_map)

                loss = tf.reduce_mean(weighted_loss)

            else:
                loss = tf.reduce_mean(
                    tf.nn.softmax_cross_entropy_with_logits(
                        logits=flat_logits, labels=flat_labels))
                # loss_map = tf.nn.softmax_cross_entropy_with_logits(logits=flat_logits,
                #                                                                labels=flat_labels)
                # weight = (1000000.0-tf.reduce_sum(loss_map[..., 1]))/tf.reduce_sum(loss_map[..., 1])
                # loss = (tf.reduce_sum(loss_map[..., 0])+tf.reduce_sum(loss_map[..., 1]*weight))/1000000.0
        # elif cost_name == "sigmoid_cross_entropy"

        elif cost_name == "dice_coefficient":
            eps = 1e-5
            prediction = pixel_wise_softmax_2(logits)
            intersection = tf.reduce_sum(prediction * self.y)
            union = eps + tf.reduce_sum(prediction) + tf.reduce_sum(self.y)
            loss = -(2 * intersection / (union))

        elif cost_name == 'IoU':
            eps = 1e-5
            logits = pixel_wise_softmax_2(logits)
            inter_ground = tf.reduce_sum(logits[..., 0] * self.y[..., 0])
            inter_pred = tf.reduce_sum(logits[..., 1] * self.y[..., 1])
            ground_loss = -tf.div(
                inter_ground,
                tf.reduce_sum(logits[..., 0]) + tf.reduce_sum(self.y[..., 0]) -
                inter_ground + eps)
            pred_loss = -tf.div(
                inter_pred,
                tf.reduce_sum(logits[..., 1]) + tf.reduce_sum(self.y[..., 1]) -
                inter_pred + eps)
            loss = 1 + pred_loss
            # loss = tf.cond(sum_labels_map > 0, lambda: 1.0 - tf.div(inter,union + eps), lambda: 0.0)

        else:
            raise ValueError("Unknown cost function: " % cost_name)

        regularizer = cost_kwargs.pop("regularizer", None)
        if regularizer is not None:
            regularizers = sum([
                tf.nn.l2_loss(variable)
                for variable in self.generator_variables
            ])
            loss += (regularizer * regularizers)

        return loss, logits
예제 #13
0
    def __init__(self,
                 channels=3,
                 n_class=2,
                 cost="cross_entropy",
                 adversarial=True,
                 border_addition=0,
                 patch_size=1000,
                 summaries=True,
                 cost_kwargs={},
                 unet_kwargs={},
                 resnet_kwargs={}):
        tf.reset_default_graph()

        self.n_class = n_class
        self.summaries = summaries

        self.x = tf.placeholder("float", shape=[None, None, None, channels])
        self.y = tf.placeholder("float", shape=[None, None, None, n_class])
        self.keep_prob = tf.placeholder(
            tf.float32)  #dropout (keep probability)
        self.is_training = tf.placeholder(tf.bool)

        generator_logits, self.generator_variables, self.offset = create_conv_net(
            self.x, self.keep_prob, channels, n_class, unet_kwargs)

        self.border_addition = border_addition
        if border_addition != 0:
            generator_logits = generator_logits[:, border_addition:
                                                -border_addition,
                                                border_addition:
                                                -border_addition, ...]

        self.predicter = pixel_wise_softmax_2(generator_logits)
        self.bce_loss, self.pred = self._get_cost(generator_logits, cost,
                                                  cost_kwargs)

        self.cross_entropy = tf.reduce_mean(
            cross_entropy(
                tf.reshape(self.y, [-1, n_class]),
                tf.reshape(pixel_wise_softmax_2(generator_logits),
                           [-1, n_class])))

        self.argmax = tf.argmax(self.predicter, 3)
        self.correct_pred = tf.equal(self.argmax, tf.argmax(self.y, 3))
        self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))

        self.tp = tf.reduce_sum(
            tf.cast(tf.argmax(self.predicter, 3), tf.float32) * self.y[..., 1])
        self.fp = tf.reduce_sum(
            tf.cast(tf.argmax(self.predicter, 3), tf.float32)) - self.tp
        self.fn = tf.reduce_sum(self.y[..., 1]) - self.tp

        self.precision = self.tp / (self.tp + self.fp)
        self.recall = self.tp / (self.tp + self.fn)
        self.f1 = 2 * self.recall * self.precision / (self.recall +
                                                      self.precision)

        # smooth_labels = smooth(self.y, 2, 0.1)*np.random.normal(0.95, 0.5)
        # print(smooth_labels.shape)
        # smooth_labels = tf.reshape(self.y[:,:,:,1]*np.random.normal(0.85, 0.15), (1, patch_size, patch_size, 1))
        # smooth_labels[...,0] = 1.0 - smooth_labels[...,1]
        # smooth_labels = tf.reshape(smooth_labels, (1, patch_size, patch_size, n_class))
        # smooth_labels = tf.concat([1.0 -smooth_labels, smooth_labels], axis=3)
        #prediction = tf.cast(tf.stack([1 - self.argmax, self.argmax], axis=3), tf.float32)
        # image_patches = tf.extract_image_patches(
        #     image, PATCH_SIZE, PATCH_SIZE, [1, 1, 1, 1], 'VALID')

        self.generator_cost = self.bce_loss
예제 #14
0
    def _get_cost(self, logits, cost_name, cost_kwargs):
        """
        Constructs the cost function, either cross_entropy, weighted cross_entropy or dice_coefficient.
        Optional arguments are: 
        class_weights: weights for the different classes in case of multi-class imbalance
        regularizer: power of the L2 regularizers added to the loss function
        """

        class_weights = cost_kwargs.pop("class_weights", None)

        flat_logits = tf.reshape(logits, [-1, self.n_class])
        flat_labels = tf.reshape(self.y, [-1, self.n_class])
        if cost_name == "cross_entropy":
            if class_weights is None:
                loss = tf.reduce_mean(
                    tf.nn.softmax_cross_entropy_with_logits(
                        logits=flat_logits, labels=flat_labels))
            else:
                class_weights = tf.constant(
                    np.array(class_weights, dtype=np.float32))
                weight_map = tf.multiply(flat_labels, class_weights)
                weight_map = tf.reduce_sum(weight_map, axis=1)

                loss_map = tf.nn.softmax_cross_entropy_with_logits(
                    logits=flat_logits, labels=flat_labels)
                weighted_loss = tf.multiply(loss_map, weight_map)

                loss = tf.reduce_mean(weighted_loss)

        elif cost_name == "dice_coefficient":
            # eps = 1e-5
            eps = tf.constant(value=(2**-14))
            prediction = pixel_wise_softmax_2(logits)
            # prediction[tf.is_nan(prediction)] = 0
            prediction = tf.where(tf.is_nan(prediction),
                                  tf.zeros_like(prediction), prediction)
            if class_weights is None:
                intersection = tf.reduce_sum(prediction * self.y)
            else:
                class_weights = tf.constant(
                    np.array(class_weights, dtype=np.float32))
                intersection = tf.reduce_sum(prediction * self.y *
                                             class_weights)
            union = eps + tf.reduce_sum(prediction) + tf.reduce_sum(self.y)
            loss = -(2 * intersection / union)
            loss = tf.where(tf.is_nan(loss), tf.zeros_like(loss),
                            loss)  # Note: loss is only a scalar value

        else:
            raise ValueError("Unknown cost function: " % cost_name)

        # regularizer_l1 = cost_kwargs.pop("regularizer_l1", None)
        # if regularizer_l1 is not None:
        #     regularizers = sum([tf.nn.l1_loss(variable) for variable in self.variables])
        #     loss += (regularizer_l1 * regularizers)

        regularizer_l2 = cost_kwargs.pop("regularizer", None)
        if regularizer_l2 is not None:
            regularizers = sum(
                [tf.nn.l2_loss(variable) for variable in self.variables])
            loss += (regularizer_l2 * regularizers)

        return loss