Example #1
0
    def __init__(self,
                 img_rows=224,
                 img_cols=224,
                 weights="imagenet",
                 classes=1000,
                 **kwargs):
        super(ResNet50, self).__init__(use_queue=False, **kwargs)
        self.img_cols = img_cols
        self.img_rows = img_rows
        self.weights = weights
        self.classes = classes

        input = Feature(shape=(None, self.img_rows, self.img_cols, 3))
        labels = Label(shape=(None, self.classes))

        conv1 = Conv2D(num_outputs=64,
                       kernel_size=7,
                       stride=2,
                       activation='linear',
                       padding='same',
                       in_layers=[input])
        bn1 = BatchNorm(in_layers=[conv1])
        ac1 = ReLU(bn1)
        pool1 = MaxPool2D(ksize=[1, 3, 3, 1], in_layers=[bn1])

        cb1 = self.conv_block(pool1, 3, [64, 64, 256], 1)
        id1 = self.identity_block(cb1, 3, [64, 64, 256])
        id1 = self.identity_block(id1, 3, [64, 64, 256])

        cb2 = self.conv_block(id1, 3, [128, 128, 512])
        id2 = self.identity_block(cb2, 3, [128, 128, 512])
        id2 = self.identity_block(id2, 3, [128, 128, 512])
        id2 = self.identity_block(id2, 3, [128, 128, 512])

        cb3 = self.conv_block(id2, 3, [256, 256, 1024])
        id3 = self.identity_block(cb3, 3, [256, 256, 1024])
        id3 = self.identity_block(id3, 3, [256, 256, 1024])
        id3 = self.identity_block(id3, 3, [256, 256, 1024])
        id3 = self.identity_block(cb3, 3, [256, 256, 1024])
        id3 = self.identity_block(id3, 3, [256, 256, 1024])

        cb4 = self.conv_block(id3, 3, [512, 512, 2048])
        id4 = self.identity_block(cb4, 3, [512, 512, 2048])
        id4 = self.identity_block(id4, 3, [512, 512, 2048])

        pool2 = MaxPool2D(ksize=[1, 7, 7, 1], in_layers=[id4])

        flatten = Flatten(in_layers=[pool2])
        dense = Dense(classes, in_layers=[flatten])

        loss = SoftMaxCrossEntropy(in_layers=[labels, dense])
        loss = ReduceMean(in_layers=[loss])
        self.set_loss(loss)
        self.add_output(dense)
Example #2
0
def test_MaxPool2D_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 10, 10, 10))
  layer = MaxPool2D(in_layers=feature)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
Example #3
0
 def test_maxpool2D(self):
     """Test that MaxPool2D can be invoked."""
     length = 2
     width = 2
     in_channels = 2
     batch_size = 20
     in_tensor = np.random.rand(batch_size, length, width, in_channels)
     with self.session() as sess:
         in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
         out_tensor = MaxPool2D()(in_tensor)
         sess.run(tf.global_variables_initializer())
         out_tensor = out_tensor.eval()
         assert out_tensor.shape == (batch_size, 1, 1, in_channels)
Example #4
0
    def __init__(self,
                 img_rows=512,
                 img_cols=512,
                 filters=[64, 128, 256, 512, 1024],
                 model=dc.models.TensorGraph(),
                 **kwargs):
        super(UNet, self).__init__(use_queue=False, **kwargs)
        self.img_cols = img_cols
        self.img_rows = img_rows
        self.filters = filters
        self.model = dc.models.TensorGraph()

        input = Feature(shape=(None, self.img_rows, self.img_cols))

        conv1 = Conv2D(num_outputs=self.filters[0],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[input])
        conv1 = Conv2D(num_outputs=self.filters[0],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv1])
        pool1 = MaxPool2D(ksize=2, in_layers=[conv1])

        conv2 = Conv2D(num_outputs=self.filters[1],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[pool1])
        conv2 = Conv2D(num_outputs=self.filters[1],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv2])
        pool2 = MaxPool2D(ksize=2, in_layers=[conv2])

        conv3 = Conv2D(num_outputs=self.filters[2],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[pool2])
        conv3 = Conv2D(num_outputs=self.filters[2],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv3])
        pool3 = MaxPool2D(ksize=2, in_layers=[conv3])

        conv4 = Conv2D(num_outputs=self.filters[3],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[pool3])
        conv4 = Conv2D(num_outputs=self.filters[3],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv4])
        pool4 = MaxPool2D(ksize=2, in_layers=[conv4])

        conv5 = Conv2D(num_outputs=self.filters[4],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[pool4])
        conv5 = Conv2D(num_outputs=self.filters[4],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv5])

        up6 = Conv2DTranspose(num_outputs=self.filters[3],
                              kernel_size=2,
                              in_layers=[conv5])
        concat6 = Concat(in_layers=[conv4, up6], axis=1)
        conv6 = Conv2D(num_outputs=self.filters[3],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[concat6])
        conv6 = Conv2D(num_outputs=self.filters[3],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv6])

        up7 = Conv2DTranspose(num_outputs=self.filters[2],
                              kernel_size=2,
                              in_layers=[conv6])
        concat7 = Concat(in_layers=[conv3, up7], axis=1)
        conv7 = Conv2D(num_outputs=self.filters[2],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[concat7])
        conv7 = Conv2D(num_outputs=self.filters[2],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv7])

        up8 = Conv2DTranspose(num_outputs=self.filters[1],
                              kernel_size=2,
                              in_layers=[conv7])
        concat8 = Concat(in_layers=[conv2, up8], axis=1)
        conv8 = Conv2D(num_outputs=self.filters[1],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[concat8])
        conv8 = Conv2D(num_outputs=self.filters[1],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv8])

        up9 = Conv2DTranspose(num_outputs=self.filters[0],
                              kernel_size=2,
                              in_layers=[conv8])
        concat9 = Concat(in_layers=[conv1, up9], axis=1)
        conv9 = Conv2D(num_outputs=self.filters[0],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[concat9])
        conv9 = Conv2D(num_outputs=self.filters[0],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv9])

        conv10 = Conv2D(num_outputs=1,
                        kernel_size=1,
                        activation='sigmoid',
                        in_layers=[conv9])

        model.add_output(conv10)
    def build_graph(self):
        # inputs placeholder
        self.inputs = Feature(shape=(None, self.image_size, self.image_size,
                                     3),
                              dtype=tf.float32)
        # data preprocessing and augmentation
        in_layer = DRAugment(self.augment,
                             self.batch_size,
                             size=(self.image_size, self.image_size),
                             in_layers=[self.inputs])
        # first conv layer
        in_layer = Conv2D(int(self.n_init_kernel),
                          kernel_size=7,
                          activation_fn=None,
                          in_layers=[in_layer])
        in_layer = BatchNorm(in_layers=[in_layer])
        in_layer = ReLU(in_layers=[in_layer])

        # downsample by max pooling
        res_in = MaxPool2D(ksize=[1, 3, 3, 1],
                           strides=[1, 2, 2, 1],
                           in_layers=[in_layer])

        for ct_module in range(self.n_downsample - 1):
            # each module is a residual convolutional block
            # followed by a convolutional downsample layer
            in_layer = Conv2D(int(self.n_init_kernel * 2**(ct_module - 1)),
                              kernel_size=1,
                              activation_fn=None,
                              in_layers=[res_in])
            in_layer = BatchNorm(in_layers=[in_layer])
            in_layer = ReLU(in_layers=[in_layer])
            in_layer = Conv2D(int(self.n_init_kernel * 2**(ct_module - 1)),
                              kernel_size=3,
                              activation_fn=None,
                              in_layers=[in_layer])
            in_layer = BatchNorm(in_layers=[in_layer])
            in_layer = ReLU(in_layers=[in_layer])
            in_layer = Conv2D(int(self.n_init_kernel * 2**ct_module),
                              kernel_size=1,
                              activation_fn=None,
                              in_layers=[in_layer])
            res_a = BatchNorm(in_layers=[in_layer])

            res_out = res_in + res_a
            res_in = Conv2D(int(self.n_init_kernel * 2**(ct_module + 1)),
                            kernel_size=3,
                            stride=2,
                            in_layers=[res_out])
            res_in = BatchNorm(in_layers=[res_in])

        # max pooling over the final outcome
        in_layer = ReduceMax(axis=(1, 2), in_layers=[res_in])

        for layer_size in self.n_fully_connected:
            # fully connected layers
            in_layer = Dense(layer_size,
                             activation_fn=tf.nn.relu,
                             in_layers=[in_layer])
            # dropout for dense layers
            #in_layer = Dropout(0.25, in_layers=[in_layer])

        logit_pred = Dense(self.n_tasks * self.n_classes,
                           activation_fn=None,
                           in_layers=[in_layer])
        logit_pred = Reshape(shape=(None, self.n_tasks, self.n_classes),
                             in_layers=[logit_pred])

        weights = Weights(shape=(None, self.n_tasks))
        labels = Label(shape=(None, self.n_tasks), dtype=tf.int32)

        output = SoftMax(logit_pred)
        self.add_output(output)
        loss = SparseSoftMaxCrossEntropy(in_layers=[labels, logit_pred])
        weighted_loss = WeightedError(in_layers=[loss, weights])

        # weight decay regularizer
        # weighted_loss = WeightDecay(0.1, 'l2', in_layers=[weighted_loss])
        self.set_loss(weighted_loss)
Example #6
0
    def __init__(self,
                 img_rows=512,
                 img_cols=512,
                 filters=[64, 128, 256, 512, 1024],
                 **kwargs):
        super(UNet, self).__init__(use_queue=False, **kwargs)
        self.img_cols = img_cols
        self.img_rows = img_rows
        self.filters = filters

        input = Feature(shape=(None, self.img_rows, self.img_cols, 3))
        labels = Label(shape=(None, self.img_rows * self.img_cols))

        conv1 = Conv2D(num_outputs=self.filters[0],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[input])
        conv1 = Conv2D(num_outputs=self.filters[0],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv1])
        pool1 = MaxPool2D(ksize=[1, 2, 2, 1], in_layers=[conv1])

        conv2 = Conv2D(num_outputs=self.filters[1],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[pool1])
        conv2 = Conv2D(num_outputs=self.filters[1],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv2])
        pool2 = MaxPool2D(ksize=[1, 2, 2, 1], in_layers=[conv2])

        conv3 = Conv2D(num_outputs=self.filters[2],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[pool2])
        conv3 = Conv2D(num_outputs=self.filters[2],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv3])
        pool3 = MaxPool2D(ksize=[1, 2, 2, 1], in_layers=[conv3])

        conv4 = Conv2D(num_outputs=self.filters[3],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[pool3])
        conv4 = Conv2D(num_outputs=self.filters[3],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv4])
        pool4 = MaxPool2D(ksize=[1, 2, 2, 1], in_layers=[conv4])

        conv5 = Conv2D(num_outputs=self.filters[4],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[pool4])
        conv5 = Conv2D(num_outputs=self.filters[4],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv5])

        up6 = Conv2DTranspose(num_outputs=self.filters[3],
                              kernel_size=2,
                              stride=2,
                              in_layers=[conv5])
        concat6 = Concat(in_layers=[conv4, up6], axis=3)
        conv6 = Conv2D(num_outputs=self.filters[3],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[concat6])

        conv6 = Conv2D(num_outputs=self.filters[3],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv6])

        up7 = Conv2DTranspose(num_outputs=self.filters[2],
                              kernel_size=2,
                              stride=2,
                              in_layers=[conv6])
        concat7 = Concat(in_layers=[conv3, up7], axis=3)
        conv7 = Conv2D(num_outputs=self.filters[2],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[concat7])
        conv7 = Conv2D(num_outputs=self.filters[2],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv7])

        up8 = Conv2DTranspose(num_outputs=self.filters[1],
                              kernel_size=2,
                              stride=2,
                              in_layers=[conv7])
        concat8 = Concat(in_layers=[conv2, up8], axis=3)
        conv8 = Conv2D(num_outputs=self.filters[1],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[concat8])
        conv8 = Conv2D(num_outputs=self.filters[1],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv8])

        up9 = Conv2DTranspose(num_outputs=self.filters[0],
                              kernel_size=2,
                              stride=2,
                              in_layers=[conv8])
        concat9 = Concat(in_layers=[conv1, up9], axis=3)
        conv9 = Conv2D(num_outputs=self.filters[0],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[concat9])
        conv9 = Conv2D(num_outputs=self.filters[0],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv9])

        conv10 = Conv2D(num_outputs=1,
                        kernel_size=1,
                        activation='sigmoid',
                        in_layers=[conv9])

        loss = SoftMaxCrossEntropy(in_layers=[labels, conv10])
        loss = ReduceMean(in_layers=[loss])
        self.set_loss(loss)
        self.add_output(conv10)