示例#1
0
    def __init__(self,
                 channels=3,
                 n_class=2,
                 cost="cross_entropy",
                 cost_kwargs={},
                 **kwargs):
        tf.reset_default_graph()

        self.n_class = n_class

        self.summaries = kwargs.get("summaries", True)

        self.x = tf.placeholder("float", shape=[None, None, None, channels])
        self.y = tf.placeholder("float", shape=[None, None, None, n_class])
        self.keep_prob = tf.placeholder(
            tf.float32)  #dropout (keep probability)

        #import pdb;pdb.set_trace()

        logits, self.variables, self.offset = create_conv_net(
            self.x, self.keep_prob, channels, n_class, **kwargs)

        self.cost = self._get_cost(logits, cost, cost_kwargs)

        self.gradients_node = tf.gradients(self.cost, self.variables)

        self.cross_entropy = tf.reduce_mean(
            cross_entropy(
                tf.reshape(self.y, [-1, n_class]),
                tf.reshape(pixel_wise_softmax_2(logits), [-1, n_class])))

        self.predicter = pixel_wise_softmax_2(logits)
        self.correct_pred = tf.equal(tf.argmax(self.predicter, 3),
                                     tf.argmax(self.y, 3))
        self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
示例#2
0
    def __init__(self, channels=3, n_class=2, height=None, width=None, cost="cross_entropy", cost_kwargs={}, **kwargs):
        tf.reset_default_graph()
        
        self.n_class = n_class
        self.summaries = kwargs.get("summaries", True)
        
        self.x = tf.placeholder("float", shape=[None, height, width, channels], name='images')
        self.keep_prob = tf.placeholder(tf.float32, shape=(), name='keep_prob') #dropout (keep probability)
        
        logits, self.variables, self.offset = create_conv_net(self.x, self.keep_prob, channels, n_class, nx=height, ny=width, **kwargs)
        
        pred_shape = logits.get_shape().as_list()
 
        self.y = tf.placeholder("float", shape=[None, pred_shape[1], pred_shape[2], n_class], name='labels')

        self.cost = self._get_cost(logits, cost, cost_kwargs)
        
        self.gradients_node = tf.gradients(self.cost, self.variables)
         
        self.predicter = pixel_wise_softmax_2(logits)

        self.cross_entropy = tf.reduce_mean(cross_entropy(tf.reshape(self.y, [-1, n_class]),
                                                          tf.reshape(pixel_wise_softmax_2(logits), [-1, n_class])))
        
        self.correct_pred = tf.equal(tf.argmax(self.predicter, 3), tf.argmax(self.y, 3))
        self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
示例#3
0
 def __init__(self, nx=None, ny=None, channels=3, n_class=2, add_regularizers=True, class_weights=None, **kwargs):
     tf.reset_default_graph()
     
     self.n_class = n_class
     self.summaries = kwargs.get("summaries", True)
     
     self.x = tf.placeholder("float", shape=[None, nx, ny, channels])
     self.y = tf.placeholder("float", shape=[None, None, None, n_class])
     self.keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
     
     logits, self.variables, self.offset = create_conv_net(self.x, self.keep_prob, channels, n_class, **kwargs)
     
     if class_weights is not None:
         class_weights = tf.constant(np.array(class_weights, dtype=np.float32))
         weighted_logits = tf.mul(tf.reshape(logits, [-1, n_class]), class_weights)
         loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(weighted_logits, tf.reshape(self.y, [-1, n_class])))
         
     else:
         loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(tf.reshape(logits, [-1, n_class]), 
                                                                            tf.reshape(self.y, [-1, n_class])))
     self.gradients_node = tf.gradients(loss, self.variables)
      
     self.cross_entropy = tf.reduce_mean(cross_entropy(tf.reshape(self.y, [-1, n_class]),
                                         tf.reshape(pixel_wise_softmax_2(logits), [-1, n_class]), 
                                         ))
     self.cost = loss
     
     if add_regularizers:
         reg_constant = 0.001  # Choose an appropriate one.
         regularizers = sum([tf.nn.l2_loss(variable) for variable in self.variables])
         self.cost += (reg_constant * regularizers)
     
     self.predicter = pixel_wise_softmax_2(logits)
     self.correct_pred = tf.equal(tf.argmax(self.predicter, 3), tf.argmax(self.y, 3))
     self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
示例#4
0
    def __init__(self, channels, n_class, cost="cross_entropy", cost_kwargs={}, **kwargs):
        tf.reset_default_graph()

        self.n_class = n_class
        self.summaries = kwargs.get("summaries", True)

        self.x = tf.placeholder("float", shape=[None, None, None, channels], name="x")
        self.y = tf.placeholder("float", shape=[None, None, None, n_class], name="y")
        self.keep_prob = tf.placeholder(tf.float32, name="dropout_probability")  # dropout (keep probability)

        # samarinm adjusted:
        logits, self.variables, self.offset, self.convs_RELU = create_conv_net(self.x, self.keep_prob, channels, n_class, **kwargs)
        # logits, self.variables, self.offset = create_conv_net(self.x, self.keep_prob, channels, n_class, **kwargs)

        self.cost = self._get_cost(logits, cost, cost_kwargs)

        self.gradients_node = tf.gradients(self.cost, self.variables)

        with tf.name_scope("cross_entropy"):
            self.cross_entropy = cross_entropy(tf.reshape(self.y, [-1, n_class]),
                                               tf.reshape(pixel_wise_softmax(logits), [-1, n_class]))

        with tf.name_scope("results"):
            self.predicter = pixel_wise_softmax(logits)
            self.correct_pred = tf.equal(tf.argmax(self.predicter, 3), tf.argmax(self.y, 3))
            self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
示例#5
0
    def __init__(self,
                 channels=3,
                 n_class=2,
                 cost="cross_entropy",
                 cost_kwargs={},
                 **kwargs):
        tf.reset_default_graph()

        self.n_class = n_class
        self.summaries = kwargs.get("summaries", True)

        self.x = tf.placeholder("float", shape=[None, None, None, channels])
        self.y = tf.placeholder("float", shape=[None, None, None, n_class])
        self.keep_prob = tf.placeholder(
            tf.float32)  #dropout (keep probability)

        logits, self.variables, self.offset = create_conv_net(
            self.x, self.keep_prob, channels, n_class, **kwargs)

        self.cost = self._get_cost(logits, cost, cost_kwargs)

        self.gradients_node = tf.gradients(self.cost, self.variables)

        self.cross_entropy = tf.reduce_mean(
            cross_entropy(
                tf.reshape(self.y, [-1, n_class]),
                tf.reshape(pixel_wise_softmax_2(logits), [-1, n_class])))

        self.predicter = pixel_wise_softmax_2(logits)
        self.correct_pred = tf.equal(tf.argmax(self.predicter, 3),
                                     tf.argmax(self.y, 3))
        self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))

        #showing images in tensorboard
        self.sum_img_y = tf.reshape(
            self.y[0, :, :, 0],
            shape=[1, tf.shape(self.y)[1],
                   tf.shape(self.y)[2], 1])
        self.sum_img_y2 = tf.reshape(
            self.y[0, :, :, 1],
            shape=[1, tf.shape(self.y)[1],
                   tf.shape(self.y)[2], 1])
        self.sum_img_pred = tf.reshape(
            self.predicter[0, :, :, 0],
            shape=[1, tf.shape(self.y)[1],
                   tf.shape(self.y)[2], 1])
        self.sum_img_pred2 = tf.reshape(
            self.predicter[0, :, :, 1],
            shape=[1, tf.shape(self.y)[1],
                   tf.shape(self.y)[2], 1])

        eps = 1e-5
        intersection = tf.reduce_sum(self.predicter[:, :, :, 1] *
                                     self.y[:, :, :, 1])
        union = eps + tf.reduce_sum(
            self.predicter[:, :, :, 1]) + tf.reduce_sum(self.y[:, :, :, 1])
        self.dice_score = (2 * intersection / (union))
示例#6
0
    def __init__(self,
                 channels=1,
                 n_class=2,
                 cost="cross_entropy",
                 cost_kwargs={},
                 **kwargs):
        tf.reset_default_graph()
        print(tf.__version__)
        self.n_class = n_class
        self.summaries = kwargs.get("summaries", True)
        print('Track_1')
        self.x = tf.placeholder("float", shape=[None, None, None, channels])
        self.y = tf.placeholder("float", shape=[None, None, None, n_class])

        eps = 1e-5
        self.y = tf.cast(self.y > 0.15, tf.float32)
        self.keep_prob = tf.placeholder(
            tf.float32)  #dropout (keep probability)

        logits = make_unet(self.x)

        self.variables = [v for v in tf.trainable_variables()]
        #        logits, self.variables, self.offset = create_conv_net(self.x, self.keep_prob, channels, n_class, **kwargs)
        #resize --- halving the images

        self.cost = self._get_cost(logits, cost, cost_kwargs)

        self.gradients_node = tf.gradients(self.cost, self.variables)

        self.cross_entropy = tf.reduce_mean(
            cross_entropy(tf.reshape(self.y, [-1, n_class]),
                          tf.reshape(logits, [-1, n_class])))

        self.predicter = logits
        self.correct_pred = tf.equal(tf.argmax(self.predicter, 3),
                                     tf.argmax(self.y, 3))
        self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))

        #showing images in tensorboard
        self.sum_img_y = tf.reshape(
            self.y, shape=[1, tf.shape(self.y)[1],
                           tf.shape(self.y)[2], 1])
        self.sum_img_pred = tf.reshape(
            self.predicter,
            shape=[1, tf.shape(self.y)[1],
                   tf.shape(self.y)[2], 1])

        eps = 1e-5
        #y = tf.cast(self.y>0.15, tf.float32)
        intersection = tf.reduce_sum(self.predicter * self.y)
        union = eps + tf.reduce_sum(self.predicter) + tf.reduce_sum(self.y)
        self.dice_score = (2 * intersection / (union))
示例#7
0
    def __init__(self,
                 channels=3,
                 n_class=2,
                 cost="cross_entropy",
                 cost_kwargs={},
                 **kwargs):
        tf.reset_default_graph()

        self.n_class = n_class
        self.summaries = kwargs.get("summaries", True)
        with tf.name_scope('inputs'):
            self.x = tf.placeholder("float",
                                    shape=[None, None, None, channels],
                                    name='image')
            self.y = tf.placeholder("float",
                                    shape=[None, None, None, n_class],
                                    name='label')
            tf.summary.histogram('image', self.x)
            tf.summary.histogram('label', self.y)
            tf.summary.image('image_sum', get_image_summary(self.x))
            tf.summary.image('label_sum', get_image_summary(self.y, idx=1))
            self.keep_prob = tf.placeholder(
                tf.float32, name='drop')  #dropout (keep probability)
            self.trainphase = tf.placeholder(tf.bool, name='trainphase')

        logits, self.variables, self.offset = create_conv_net(
            self.x, self.keep_prob, self.trainphase, channels, n_class,
            **kwargs)

        self.cost = self._get_cost(logits, cost, cost_kwargs)

        self.gradients_node = tf.gradients(self.cost, self.variables)

        with tf.name_scope('CrossEntropyforScale'):
            self.cross_entropy = tf.reduce_mean(
                cross_entropy(
                    tf.reshape(self.y, [-1, n_class]),
                    tf.reshape(pixel_wise_softmax_2(logits), [-1, n_class])))

        with tf.name_scope('Predict'):
            self.predicter = pixel_wise_softmax_2(logits)
            tf.summary.image('predict1',
                             get_image_summary(self.predicter, idx=0))
            tf.summary.image('predict2',
                             get_image_summary(self.predicter, idx=1))
        with tf.name_scope('Accuracy'):
            self.correct_pred = tf.equal(tf.argmax(self.predicter, 3),
                                         tf.argmax(self.y, 3))
            self.accuracy = tf.reduce_mean(
                tf.cast(self.correct_pred, tf.float32))
示例#8
0
    def __init__(self, channels=3, n_class=2, cost="cross_entropy", cost_kwargs={}, **kwargs):
        tf.reset_default_graph()

        self.n_class = n_class
        self.summaries = kwargs.get("summaries", True)

        self.x = tf.placeholder("float", shape=[None, None, None, channels], name="x")
        self.y = tf.placeholder("float", shape=[None, None, None, n_class], name="y")
        self.keep_prob = tf.placeholder(tf.float32, name="dropout_probability")  # dropout (keep probability)

        logits, self.variables, self.offset = create_conv_net(self.x, self.keep_prob, channels, n_class, **kwargs)

        self.cost = self._get_cost(logits, cost, cost_kwargs)

        self.gradients_node = tf.gradients(self.cost, self.variables)

        with tf.name_scope("cross_entropy"):
            self.cross_entropy = cross_entropy(tf.reshape(self.y, [-1, n_class]),
                                               tf.reshape(pixel_wise_softmax(logits), [-1, n_class]))

        with tf.name_scope("results"):
            self.predicter = pixel_wise_softmax(logits)
            self.correct_pred = tf.equal(tf.argmax(self.predicter, 3), tf.argmax(self.y, 3))
            self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
示例#9
0
    def __init__(self,
                 channels=3,
                 n_class=2,
                 cost="cross_entropy",
                 cost_kwargs={},
                 **kwargs):
        tf.reset_default_graph()

        self.n_class = n_class
        self.summaries = kwargs.get("summaries", True)

        with tf.name_scope('inputs') as scope:
            self.x = tf.placeholder("float",
                                    shape=[None, None, None, channels])
            self.y = tf.placeholder("float", shape=[None, None, None, n_class])
            self.keep_prob = tf.placeholder(
                tf.float32)  #dropout (keep probability)

        with tf.name_scope('CONVNET') as scope:
            logits, self.variables, self.offset = create_conv_net(
                self.x, self.keep_prob, channels, n_class, **kwargs)
        with tf.name_scope('COST') as scope:
            self.cost = self._get_cost(logits, cost, cost_kwargs)
        with tf.name_scope('GRADIENT') as scope:
            self.gradients_node = tf.gradients(self.cost, self.variables)
        with tf.name_scope('CROSSENTROPY') as scope:
            self.cross_entropy = tf.reduce_mean(
                cross_entropy(
                    tf.reshape(self.y, [-1, n_class]),
                    tf.reshape(pixel_wise_softmax_2(logits), [-1, n_class])))
        with tf.name_scope('PREDICTER') as scope:
            self.predicter = pixel_wise_softmax_2(logits)
            self.correct_pred = tf.equal(tf.argmax(self.predicter, 3),
                                         tf.argmax(self.y, 3))
            self.accuracy = tf.reduce_mean(
                tf.cast(self.correct_pred, tf.float32))
示例#10
0
    def __init__(self,
                 channels=3,
                 n_class=2,
                 cost="cross_entropy",
                 adversarial=True,
                 border_addition=0,
                 patch_size=1000,
                 summaries=True,
                 cost_kwargs={},
                 unet_kwargs={},
                 resnet_kwargs={}):
        tf.reset_default_graph()

        self.n_class = n_class
        self.summaries = summaries

        self.x = tf.placeholder("float", shape=[None, None, None, channels])
        self.y = tf.placeholder("float", shape=[None, None, None, n_class])
        self.keep_prob = tf.placeholder(
            tf.float32)  #dropout (keep probability)
        self.is_training = tf.placeholder(tf.bool)

        generator_logits, self.generator_variables, self.offset = create_conv_net(
            self.x, self.keep_prob, channels, n_class, unet_kwargs)

        self.border_addition = border_addition
        if border_addition != 0:
            generator_logits = generator_logits[:, border_addition:
                                                -border_addition,
                                                border_addition:
                                                -border_addition, ...]

        self.predicter = pixel_wise_softmax_2(generator_logits)
        self.bce_loss, self.pred = self._get_cost(generator_logits, cost,
                                                  cost_kwargs)

        self.cross_entropy = tf.reduce_mean(
            cross_entropy(
                tf.reshape(self.y, [-1, n_class]),
                tf.reshape(pixel_wise_softmax_2(generator_logits),
                           [-1, n_class])))

        self.argmax = tf.argmax(self.predicter, 3)
        self.correct_pred = tf.equal(self.argmax, tf.argmax(self.y, 3))
        self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))

        self.tp = tf.reduce_sum(
            tf.cast(tf.argmax(self.predicter, 3), tf.float32) * self.y[..., 1])
        self.fp = tf.reduce_sum(
            tf.cast(tf.argmax(self.predicter, 3), tf.float32)) - self.tp
        self.fn = tf.reduce_sum(self.y[..., 1]) - self.tp

        self.precision = self.tp / (self.tp + self.fp)
        self.recall = self.tp / (self.tp + self.fn)
        self.f1 = 2 * self.recall * self.precision / (self.recall +
                                                      self.precision)

        # smooth_labels = smooth(self.y, 2, 0.1)*np.random.normal(0.95, 0.5)
        # print(smooth_labels.shape)
        # smooth_labels = tf.reshape(self.y[:,:,:,1]*np.random.normal(0.85, 0.15), (1, patch_size, patch_size, 1))
        # smooth_labels[...,0] = 1.0 - smooth_labels[...,1]
        # smooth_labels = tf.reshape(smooth_labels, (1, patch_size, patch_size, n_class))
        # smooth_labels = tf.concat([1.0 -smooth_labels, smooth_labels], axis=3)
        #prediction = tf.cast(tf.stack([1 - self.argmax, self.argmax], axis=3), tf.float32)
        # image_patches = tf.extract_image_patches(
        #     image, PATCH_SIZE, PATCH_SIZE, [1, 1, 1, 1], 'VALID')

        self.generator_cost = self.bce_loss