예제 #1
0
    def test_conv_module(self):
        model = ConvModule(64, kernel_size=(3, 3))
        self.y = tf.random_normal((1, 300, 400, 64))
        print("ConvModule (3,3)x64 Average Time: {:.3f}".format(
            model_perf(model, self.x)))
        print("ConvModule (3,3)x64 Average Train Time: {:.3f}".format(
            model_train_perf(model, self.x, self.y)))
        print("ConvModule (3,3)x64 Parameters: {:,d}".format(
            model_params(model)))

        model = ConvModule(64, kernel_size=(1, 1))
        print("ConvModule (1,1)x64 Average Time: {:.3f}".format(
            model_perf(model, self.x)))
        print("ConvModule (1,1)x64 Average Train Time: {:.3f}".format(
            model_train_perf(model, self.x, self.y)))
        print("ConvModule (1,1)x64 Parameters: {:,d}".format(
            model_params(model)))

        model = ConvModule(128, kernel_size=(3, 3))
        print("ConvModule (3,3)x128 Average Time: {:.3f}".format(
            model_perf(model, self.x)))
        print("ConvModule (3,3)128 Average Train Time: {:.3f}".format(
            model_train_perf(model, self.x, self.y)))
        print("ConvModule (3,3)x128 Parameters: {:,d}".format(
            model_params(model)))

        model = ConvModule(128, kernel_size=(1, 1))
        print("ConvModule (1,1)x128 Average Time: {:.3f}".format(
            model_perf(model, self.x)))
        print("ConvModule (1,1)128 Average Train Time: {:.3f}".format(
            model_train_perf(model, self.x, self.y)))
        print("ConvModule (1,1)x128 Parameters: {:,d}".format(
            model_params(model)))
예제 #2
0
    def __init__(self, start_filter=64, *args, **kwargs):
        super().__init__(*args, **kwargs)
        filters = start_filter
        self.down1 = UNetConvAndDownModule(filters=filters)
        self.pool1 = tf.keras.layers.MaxPool2D()
        self.down2 = UNetConvAndDownModule(filters=filters * 2)
        self.pool2 = tf.keras.layers.MaxPool2D()
        self.down3 = UNetConvAndDownModule(filters=filters * 4)
        self.pool3 = tf.keras.layers.MaxPool2D()
        self.down4 = UNetConvAndDownModule(filters=filters * 8)
        self.pool4 = tf.keras.layers.MaxPool2D()

        self.up5 = UNetConvAndUpModule(filters=filters * 16)

        self.concat4 = tf.keras.layers.Concatenate()
        self.up4 = UNetConvAndUpModule(filters=filters * 8)
        self.concat3 = tf.keras.layers.Concatenate()
        self.up3 = UNetConvAndUpModule(filters=filters * 4)
        self.concat2 = tf.keras.layers.Concatenate()
        self.up2 = UNetConvAndUpModule(filters=filters * 2)
        self.concat1 = tf.keras.layers.Concatenate()

        self.up1_conv0 = ConvModule(filters=filters)
        self.up1_conv1 = ConvModule(filters=filters)
        self.up1_conv2 = tf.keras.layers.Conv2D(3,
                                                kernel_size=(3, 3),
                                                padding='same',
                                                activation=tf.nn.softmax)
예제 #3
0
  def __init__(self, reduce_depth, expand_depth, dropout_rate, dilation_rate=(2, 2)):
    super(DilatedModule, self).__init__()
    self.conv1 = ConvModule(reduce_depth, kernel_size=(1, 1))
    self.conv2 = ConvModule(expand_depth, kernel_size=(3, 3), dilation_rate=dilation_rate)
    self.conv3 = ConvModule(expand_depth, kernel_size=(1, 1))

    self.regularizer = tf.keras.layers.SpatialDropout2D(dropout_rate)
    self.prelu_last = tf.keras.layers.PReLU()
예제 #4
0
  def __init__(self, *args, **kwargs):
    super().__init__(*args, **kwargs)
    self.dice_loss = True
    self.initial = InitialModule(filters=29)

    self.down1_1 = ConvolutionModule(reduce_depth=64, expand_depth=64, dropout_rate=0.01, down_sample=True)
    self.down1_2 = ConvolutionModule(reduce_depth=64, expand_depth=64, dropout_rate=0.01, down_sample=False)
    self.down1_3 = ConvolutionModule(reduce_depth=64, expand_depth=64, dropout_rate=0.01, down_sample=False)
    self.down1_4 = ConvolutionModule(reduce_depth=64, expand_depth=64, dropout_rate=0.01, down_sample=False)
    self.down1_5 = ConvolutionModule(reduce_depth=64, expand_depth=64, dropout_rate=0.01, down_sample=False)

    self.down2_0 = ConvolutionModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, down_sample=True)
    self.down2_1 = ConvolutionModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, down_sample=False)
    self.down2_2 = DilatedModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, dilation_rate=(2, 2))
    self.down2_3 = ConvolutionModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, down_sample=False, factorize=True)
    self.down2_4 = DilatedModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, dilation_rate=(4, 4))
    self.down2_5 = ConvolutionModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, down_sample=False)
    self.down2_6 = DilatedModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, dilation_rate=(8, 8))
    self.down2_7 = ConvolutionModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, down_sample=False, factorize=True)
    self.down2_8 = DilatedModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, dilation_rate=(16, 16))
    self.down2_9 = DilatedModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, dilation_rate=(8, 8))
    self.down2_10 = DilatedModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, dilation_rate=(4, 4))
    self.down2_11 = DilatedModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, dilation_rate=(2, 2))
    self.down2_12 = DilatedModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, dilation_rate=(1, 1))

    # self.down3_0 = ConvolutionModule(reduce_depth=32, expand_depth=128, dropout_rate=0.1, down_sample=True)
    self.down3_1 = ConvolutionModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, down_sample=False)
    self.down3_2 = DilatedModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, dilation_rate=(2, 2))
    self.down3_3 = ConvolutionModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, down_sample=False, factorize=True)
    self.down3_4 = DilatedModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, dilation_rate=(4, 4))
    self.down3_5 = ConvolutionModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, down_sample=False)
    self.down3_6 = DilatedModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, dilation_rate=(8, 8))
    self.down3_7 = ConvolutionModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, down_sample=False, factorize=True)
    self.down3_8 = DilatedModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, dilation_rate=(16, 16))
    self.down3_9 = DilatedModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, dilation_rate=(8, 8))
    self.down3_10 = DilatedModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, dilation_rate=(4, 4))
    self.down3_11 = DilatedModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, dilation_rate=(2, 2))
    self.down3_12 = DilatedModule(reduce_depth=128, expand_depth=128, dropout_rate=0.1, dilation_rate=(1, 1))

    # should we repeat section 2 without down2_0?

    self.up1_concat = tf.keras.layers.Concatenate()
    self.up1_0_1x1 = ConvModule(64, kernel_size=(1, 1))
    self.up1_0 = UpSampleModule(reduce_depth=64, expand_depth=64, dropout_rate=0.1)
    self.up1_1 = ConvolutionModule(reduce_depth=64, expand_depth=64, dropout_rate=0.1, down_sample=False)
    self.up1_2 = ConvolutionModule(reduce_depth=64, expand_depth=64, dropout_rate=0.1, down_sample=False)

    self.up2_concat = tf.keras.layers.Concatenate()
    self.up2_0_1x1 = ConvModule(32, kernel_size=(1, 1))
    self.up2_0 = UpSampleModule(reduce_depth=32, expand_depth=32, dropout_rate=0.1)
    self.up2_1 = ConvolutionModule(reduce_depth=32, expand_depth=32, dropout_rate=0.1, down_sample=False)

    self.up3_0 = UpSampleModule(reduce_depth=32, expand_depth=32, dropout_rate=0.1)
    self.final_0 = tf.keras.layers.Conv2D(32, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
    self.final_1 = tf.keras.layers.Conv2D(3, kernel_size=(3, 3), padding='same', activation=tf.nn.softmax)
    self.final_concat = tf.keras.layers.Concatenate()
예제 #5
0
    def test_conv_module(self):
        model = ConvModule(64, kernel_size=(3, 3))
        print("ConvModule (3,3)x64 Average Time: {:.3f}".format(
            model_perf(model, self.x)))

        model = ConvModule(64, kernel_size=(1, 1))
        print("ConvModule (1,1)x64 Average Time: {:.3f}".format(
            model_perf(model, self.x)))

        model = ConvModule(128, kernel_size=(3, 3))
        print("ConvModule (3,3)x128 Average Time: {:.3f}".format(
            model_perf(model, self.x)))

        model = ConvModule(128, kernel_size=(1, 1))
        print("ConvModule (1,1)x128 Average Time: {:.3f}".format(
            model_perf(model, self.x)))
예제 #6
0
  def __init__(self, reduce_depth, expand_depth, dropout_rate):
    super(UpSampleModule, self).__init__()
    # self.upconv1 = tf.keras.layers.Conv2D(expand_depth, kernel_size=(1, 1), padding='same')
    self.upbn1 = tf.keras.layers.BatchNormalization()
    self.upsample = tf.keras.layers.Conv2DTranspose(expand_depth, kernel_size=(3, 3), strides=(2, 2), padding='same')
    self.prelu0 = tf.keras.layers.PReLU()

    self.conv1 = ConvModule(reduce_depth, kernel_size=(1, 1))

    self.conv2 = tf.keras.layers.Conv2DTranspose(expand_depth, kernel_size=(3, 3), strides=(2, 2), padding='same')
    self.prelu2 = tf.keras.layers.PReLU()
    self.bn2 = tf.keras.layers.BatchNormalization()

    self.conv3 = ConvModule(expand_depth, kernel_size=(1, 1))

    self.regularizer = tf.keras.layers.SpatialDropout2D(dropout_rate)
    self.prelu_last = tf.keras.layers.PReLU()
예제 #7
0
    def test_keras_model(self):
        inputs = tf.keras.layers.Input(shape=(300, 400, 3))
        out = ConvModule(64, kernel_size=(3, 3))(inputs)
        # out = ConvModule(64, kernel_size=(3, 3))(out)

        model = tf.keras.Model(inputs=inputs, outputs=out)
        optimizer = tf.keras.optimizers.Adam()

        model.compile(optimizer, loss='mean_squared_error')
        # model.summary()

        iter = 100
        now = time.time()
        for i in range(iter):
            y = model.predict_on_batch(self.x)
        duration = (time.time() - now) / iter
        print("Time taken on keras model:", duration)
예제 #8
0
def freeze_model():
    x = tf.random_normal((1, 300, 400, 3))
    model = ConvModule(64, kernel_size=(3, 3))
    adam = tf.train.AdamOptimizer()
    checkpoint_prefix = os.path.join(flags.model_dir, 'ckpt')
    global_step = tf.train.get_or_create_global_step()

    y = model(x)

    print("y:", y.shape)

    checkpoint = tfe.Checkpoint(model=model,
                                optimizer=adam,
                                step_counter=global_step)
    checkpoint.restore(tf.train.latest_checkpoint(flags.model_dir))

    print("Global_step:", global_step)

    checkpoint.save(checkpoint_prefix)
예제 #9
0
    def _add_conv_fc_branch(self,
                            num_branch_convs,
                            num_branch_fcs,
                            in_channels,
                            is_shared=False):
        """Add shared or separable branch

        convs -> avg pool (optional) -> fcs
        """
        last_layer_dim = in_channels
        # add branch specific conv layers
        branch_convs = nn.ModuleList()
        if num_branch_convs > 0:
            for i in range(num_branch_convs):
                conv_in_channels = (last_layer_dim
                                    if i == 0 else self.conv_out_channels)
                branch_convs.append(
                    ConvModule(
                        conv_in_channels,
                        self.conv_out_channels,
                        3,
                        padding=1,
                        normalize=self.normalize,
                        bias=self.with_bias))
            last_layer_dim = self.conv_out_channels
        # add branch specific fc layers
        branch_fcs = nn.ModuleList()
        if num_branch_fcs > 0:
            # for shared branch, only consider self.with_avg_pool
            # for separated branches, also consider self.num_shared_fcs
            if (is_shared
                    or self.num_shared_fcs == 0) and not self.with_avg_pool:
                last_layer_dim *= (self.roi_feat_size * self.roi_feat_size)
            for i in range(num_branch_fcs):
                fc_in_channels = (last_layer_dim
                                  if i == 0 else self.fc_out_channels)
                branch_fcs.append(
                    nn.Linear(fc_in_channels, self.fc_out_channels))
            last_layer_dim = self.fc_out_channels
        return branch_convs, branch_fcs, last_layer_dim
예제 #10
0
  def __init__(self, reduce_depth, expand_depth, dropout_rate=0., down_sample=False, factorize=False):
    super(ConvolutionModule, self).__init__()
    self.down_sample = down_sample
    self.factorize = factorize
    strides = (1, 1)
    if down_sample:
      strides=(2, 2)
      self.maxpool = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=strides, padding='same')

    self.conv1 = ConvModule(reduce_depth, kernel_size=(1, 1))

    if factorize:
      self.conv21 = ConvModule(expand_depth, kernel_size=(5, 1), strides=strides)
      self.conv22 = ConvModule(expand_depth, kernel_size=(1, 5), strides=strides)
    else:
      self.conv2 = ConvModule(expand_depth, kernel_size=(3, 3), strides=strides)

    self.conv3 = ConvModule(expand_depth, kernel_size=(1, 1))

    self.regularizer = tf.keras.layers.SpatialDropout2D(dropout_rate)
    self.prelu_last = tf.keras.layers.PReLU()

    self.concat = tf.keras.layers.Concatenate()
    self.conv4 = ConvModule(expand_depth, kernel_size=(1, 1))
예제 #11
0
 def __init__(self, filters):
   super(InitialModule, self).__init__()
   self.conv = ConvModule(filters=filters, kernel_size=(3, 3), strides=(2, 2))
   self.maxpool = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), padding='same')
   self.concat = tf.keras.layers.Concatenate()
예제 #12
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 num_outs,
                 start_level=0,
                 end_level=-1,
                 add_extra_convs=False,
                 normalize=None,
                 activation=None):
        super(FPN, self).__init__()
        assert isinstance(in_channels, list)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.num_ins = len(in_channels)
        self.num_outs = num_outs
        self.activation = activation
        self.with_bias = normalize is None

        if end_level == -1:
            self.backbone_end_level = self.num_ins
            assert num_outs >= self.num_ins - start_level
        else:
            # if end_level < inputs, no extra level is allowed
            self.backbone_end_level = end_level
            assert end_level <= len(in_channels)
            assert num_outs == end_level - start_level
        self.start_level = start_level
        self.end_level = end_level
        self.add_extra_convs = add_extra_convs

        self.lateral_convs = nn.ModuleList()
        self.fpn_convs = nn.ModuleList()

        for i in range(self.start_level, self.backbone_end_level):
            l_conv = ConvModule(in_channels[i],
                                out_channels,
                                1,
                                normalize=normalize,
                                bias=self.with_bias,
                                activation=self.activation,
                                inplace=False)
            fpn_conv = ConvModule(out_channels,
                                  out_channels,
                                  3,
                                  padding=1,
                                  normalize=normalize,
                                  bias=self.with_bias,
                                  activation=self.activation,
                                  inplace=False)

            self.lateral_convs.append(l_conv)
            self.fpn_convs.append(fpn_conv)

            # lvl_id = i - self.start_level
            # setattr(self, 'lateral_conv{}'.format(lvl_id), l_conv)
            # setattr(self, 'fpn_conv{}'.format(lvl_id), fpn_conv)

        # add extra conv layers (e.g., RetinaNet)
        extra_levels = num_outs - self.backbone_end_level + self.start_level
        if add_extra_convs and extra_levels >= 1:
            for i in range(extra_levels):
                in_channels = (self.in_channels[self.backbone_end_level -
                                                1] if i == 0 else out_channels)
                extra_fpn_conv = ConvModule(in_channels,
                                            out_channels,
                                            3,
                                            stride=2,
                                            padding=1,
                                            normalize=normalize,
                                            bias=self.with_bias,
                                            activation=self.activation,
                                            inplace=False)
                self.fpn_convs.append(extra_fpn_conv)
예제 #13
0
 def __init__(self, filters=64, *args, **kwargs):
     super().__init__(*args, **kwargs)
     # self.conv0 = SpatialConvModule(filters)
     # self.conv1 = SpatialConvModule(filters)
     self.conv0 = ConvModule(filters)
     self.conv1 = ConvModule(filters, dilation_rate=(2, 2))
예제 #14
0
 def __init__(self, filters=64, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.conv0 = ConvModule(filters)
     self.conv1 = ConvModule(filters, dilation_rate=(2, 2))
     self.upsample = tf.keras.layers.UpSampling2D()