コード例 #1
0
ファイル: densenet.py プロジェクト: NobleKennamer/DeepPoseKit
    def __init__(self, compression_factor=0.5, **kwargs):
        # super(TransitionDown, self).__init__(self, **kwargs)
        self.concat = Concatenate()
        self.compression_factor = compression_factor

        self.upsample = (SubPixelUpscaling()
                         )  # layers.UpSampling2D(interpolation='bilinear')
コード例 #2
0
    def __init_model__(self):

        batch_shape = (
            None,
            self.train_generator.height,
            self.train_generator.width,
            self.train_generator.n_channels,
        )

        input_layer = Input(batch_shape=batch_shape, dtype="uint8")
        to_float = Float()(input_layer)
        if batch_shape[-1] is 1:
            to_float = Concatenate()([to_float] * 3)
        if self.backbone in list(MODELS.keys()):
            normalized = ImageNetPreprocess(self.backbone)(to_float)
        else:
            raise ValueError(
                "backbone model {} is not supported. Must be one of {}".format(
                    self.backbone, list(MODELS.keys())))
        backbone = MODELS[self.backbone]
        if self.backbone in list(MODELS.keys()):
            input_shape = (self.train_generator.height,
                           self.train_generator.width, 3)
        if self.backbone.startswith("mobile"):
            input_shape = None
            backbone = partial(backbone, alpha=self.alpha)
        pretrained_model = backbone(include_top=False,
                                    weights=self.weights,
                                    input_shape=input_shape)
        pretrained_features = pretrained_model(normalized)
        if self.train_generator.downsample_factor is 4:
            x = pretrained_features
            x_out = Conv2D(self.train_generator.n_output_channels, (1, 1))(x)
        elif self.train_generator.downsample_factor is 3:
            x = pretrained_features
            x_out = Conv2DTranspose(
                self.train_generator.n_output_channels,
                (3, 3),
                strides=(2, 2),
                padding="same",
            )(x)
        elif self.train_generator.downsample_factor is 2:
            x = pretrained_features
            x = SubPixelUpscaling()(x)
            x_out = Conv2DTranspose(
                self.train_generator.n_output_channels,
                (3, 3),
                strides=(2, 2),
                padding="same",
            )(x)
        else:
            raise ValueError(
                "`downsample_factor={}` is not supported for DeepLabCut. Adjust your TrainingGenerator"
                .format(self.train_generator.downsample_factor))

        self.train_model = Model(input_layer,
                                 x_out,
                                 name=self.__class__.__name__)