def build_saveuav_large(self):
        num_filters = 64
        size = self.chip_size
        input = layers.Input((size, size, 3))
        down1 = layers.Conv2D(filters=num_filters,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(input)
        down1 = layers.Conv2D(filters=num_filters,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(down1)
        down1pool = layers.MaxPool2D(padding='same')(down1)

        down2 = layers.Conv2D(filters=num_filters * 2,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(down1pool)
        down2 = layers.Conv2D(filters=num_filters,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(down2)
        down2pool = layers.MaxPool2D(padding='same')(down2)

        down3 = layers.Conv2D(filters=num_filters * 4,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(down2pool)
        down3 = layers.Conv2D(filters=num_filters,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(down3)
        down3pool = layers.MaxPool2D(padding='same')(down3)

        y_dilate_concat = self.build_bottleneck_block(num_filters, down3pool)

        up3_tr = layers.Conv2DTranspose(filters=num_filters * 4,
                                        kernel_size=3,
                                        strides=(2, 2),
                                        activation="relu",
                                        padding="same")(y_dilate_concat)
        y_up3 = layers.Concatenate()([up3_tr, down3])
        y_up3 = layers.Conv2D(filters=num_filters * 4,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(y_up3)
        y_up3 = layers.Conv2D(filters=num_filters * 4,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(y_up3)
        y_up3 = layers.Conv2D(filters=num_filters * 4,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(y_up3)

        up2_tr = layers.Conv2DTranspose(filters=num_filters * 2,
                                        kernel_size=3,
                                        strides=(2, 2),
                                        activation="relu",
                                        padding="same")(y_up3)
        y_up2_tr_add = layers.Concatenate()([up2_tr, down2])
        y_up2 = layers.Conv2D(filters=num_filters * 2,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(y_up2_tr_add)
        y_up2 = layers.Conv2D(filters=num_filters * 2,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(y_up2)
        y_up2_tr_add = layers.Conv2D(filters=num_filters * 2,
                                     kernel_size=3,
                                     activation="relu",
                                     padding="same")(y_up2)

        up1_tr = layers.Conv2DTranspose(filters=num_filters,
                                        kernel_size=3,
                                        strides=(2, 2),
                                        activation="relu",
                                        padding="same")(y_up2_tr_add)
        y_up1_tr_add = layers.Concatenate()([up1_tr, down1])
        y_up1 = layers.Conv2D(filters=num_filters,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(y_up1_tr_add)
        y_up1 = layers.Conv2D(filters=num_filters,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(y_up1)
        y_up1_tr_add = layers.Conv2D(filters=num_filters,
                                     kernel_size=3,
                                     activation="relu",
                                     padding="same")(y_up1)

        final_conv = layers.Conv2D(filters=6, kernel_size=1)(y_up1_tr_add)
        x = layers.Activation("softmax")(final_conv)

        model = models.Model(input, x)

        return model
Пример #2
0
def train_m():
  run = wandb.init()
  config=run.config
  modell = MyCNN(config.Dropout,config.Layer1,config.Layer2,config.Layer3,config.Layer4,config.Layer5,config.Batch_Normalisation,config.Augmentation)
          
  modell.train()
    
  

wandb.agent(sweep_id,train_m)

input_shape=(224,224,3)
Layer=[16,256,32,16,128]
model =models.Sequential()
model.add(layers.Conv2D(Layer[0], (3, 3), input_shape=input_shape))
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.BatchNormalization())


model.add(layers.Conv2D(Layer[1], (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.BatchNormalization())


model.add(layers.Conv2D(Layer[2], (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.BatchNormalization())


model.add(layers.Conv2D(Layer[3], (3, 3), activation='relu'))
Пример #3
0
def generator_model(nclasses=101, gen_arch='p4_food101', latent_dim=64):
    """
    Return a tf image generator model.

    Args:
        nclasses: int
            Number of categories in the image set.
        gen_arch: str
            Generator arch in format "x_y" where x in {"z2", "p4", "p4m"}
            and y in {"anhir", "lysto", "rotmnist", "cifar10", "food101"}
        latent_dim: int
            Dimensionality of Gaussian latents.
    """

    # Get latent vector:
    latent_vec = KL.Input(shape=(latent_dim, ))
    label_vec = KL.Input(shape=(nclasses, ))

    sc, proj_dim, proj_shape, labelemb_dim = generator_dimensionality(gen_arch)
    label_proj = KL.Dense(  # Using SN here seems to lead to collapse
        labelemb_dim,
        use_bias=False,
    )(label_vec)

    # Concatenate noise and condition feature maps to modulate the generator:
    cla = KL.concatenate([latent_vec, label_proj])

    # Project and reshape to spatial feature maps:
    gen = SN(KL.Dense(proj_dim))(cla)
    gen = KL.Reshape(proj_shape)(gen)

    if gen_arch == 'p4m_anhir':
        # (Residual) Convolutions + Upsampling:
        fea = ResBlockG_film(
            gen,
            cla,
            int(512 // sc),
            h_input='Z2',
            h_output='D4',
            pad='same',
        )
        for ch in [256, 128, 64, 32]:
            fea = ResBlockG_film(
                fea,
                cla,
                int(ch // sc),
                h_input='D4',
                h_output='D4',
                pad='same',
            )

        fea = GBatchNorm(h='D4', momentum=0.1)(fea)
        fea = KL.Activation('relu')(fea)

        fea = SN(
            GConv2D(3,
                    kernel_size=3,
                    h_input='D4',
                    h_output='D4',
                    padding='same',
                    kernel_initializer='orthogonal'))(fea)
        op = GroupMaxPool('D4')(fea)

    elif gen_arch == 'z2_anhir':
        # (Residual) Convolutions + Upsampling:
        fea = ResBlockG_film(
            gen,
            cla,
            512,
            h_input='Z2',
            h_output='Z2',
            pad='same',
            stride=1,
            group_equiv=False,
        )
        for ch in [256, 128, 64, 32]:
            fea = ResBlockG_film(
                fea,
                cla,
                ch,
                h_input='Z2',
                h_output='Z2',
                pad='same',
                group_equiv=False,
            )

        fea = KL.BatchNormalization(
            momentum=0.1,
            center=False,
            scale=False,
        )(fea)
        fea = CCBN(fea, cla, 'Z2')
        fea = KL.Activation('relu')(fea)

        op = SN(KL.Conv2D(
            3,
            kernel_size=3,
            padding='same',
            use_bias=False,
        ))(fea)

    elif gen_arch == 'p4m_lysto':
        # (Residual) Convolutions + Upsampling:
        fea = ResBlockG_film(
            gen,
            cla,
            int(512 // sc),
            h_input='Z2',
            h_output='D4',
            pad='same',
        )
        for ch in [256, 128, 64, 32, 16]:
            fea = ResBlockG_film(
                fea,
                cla,
                int(ch // sc),
                h_input='D4',
                h_output='D4',
                pad='same',
            )

        fea = GBatchNorm(h='D4', momentum=0.1)(fea)
        fea = KL.Activation('relu')(fea)

        fea = SN(
            GConv2D(3,
                    kernel_size=3,
                    h_input='D4',
                    h_output='D4',
                    padding='same',
                    kernel_initializer='orthogonal'))(fea)
        op = GroupMaxPool('D4')(fea)

    elif gen_arch == 'z2_lysto':
        # (Residual) Convolutions + Upsampling:
        fea = ResBlockG_film(
            gen,
            cla,
            512,
            h_input='Z2',
            h_output='Z2',
            pad='same',
            group_equiv=False,
        )
        for ch in [256, 128, 64, 32, 16]:
            fea = ResBlockG_film(
                fea,
                cla,
                ch,
                h_input='Z2',
                h_output='Z2',
                pad='same',
                group_equiv=False,
            )

        fea = KL.BatchNormalization(momentum=0.1)(fea)
        fea = KL.Activation('relu')(fea)

        op = SN(KL.Conv2D(
            3,
            kernel_size=3,
            padding='same',
            use_bias=False,
        ))(fea)

    elif gen_arch == 'p4_rotmnist':
        # Convolutions + Upsampling:
        fea = SN(
            GConv2D(256,
                    kernel_size=3,
                    h_input='Z2',
                    h_output='C4',
                    padding='same'))(gen)
        fea = GShift(h='C4')(fea)
        fea = KL.Activation('relu')(fea)

        fea = KL.UpSampling2D()(fea)
        fea = SN(
            GConv2D(128,
                    kernel_size=3,
                    h_input='C4',
                    h_output='C4',
                    padding='same'))(fea)
        fea = GBatchNorm(h='C4', momentum=0.1, center=False, scale=False)(fea)
        fea = CCBN(fea, cla, 'C4')
        fea = KL.Activation('relu')(fea)

        fea = KL.UpSampling2D()(fea)
        fea = SN(
            GConv2D(64,
                    kernel_size=3,
                    h_input='C4',
                    h_output='C4',
                    padding='same'))(fea)
        fea = GBatchNorm(h='C4', momentum=0.1, center=False, scale=False)(fea)
        fea = CCBN(fea, cla, 'C4')
        fea = KL.Activation('relu')(fea)

        fea = SN(
            GConv2D(1,
                    kernel_size=3,
                    h_input='C4',
                    h_output='C4',
                    padding='same'))(fea)
        op = GroupMaxPool('C4')(fea)

    elif gen_arch == 'z2_rotmnist':
        # Convolutions + Upsampling:
        fea = SN(
            KL.Conv2D(512,
                      kernel_size=3,
                      padding='same',
                      use_bias=True,
                      kernel_initializer='orthogonal'))(gen)
        fea = KL.Activation('relu')(fea)

        fea = KL.UpSampling2D()(fea)
        fea = SN(
            KL.Conv2D(256,
                      kernel_size=3,
                      padding='same',
                      use_bias=False,
                      kernel_initializer='orthogonal'))(fea)
        fea = KL.BatchNormalization(
            momentum=0.1,
            center=False,
            scale=False,
        )(fea)
        fea = CCBN(fea, cla, 'Z2')
        fea = KL.Activation('relu')(fea)

        fea = KL.UpSampling2D()(fea)
        fea = SN(
            KL.Conv2D(128,
                      kernel_size=3,
                      padding='same',
                      use_bias=False,
                      kernel_initializer='orthogonal'))(fea)
        fea = KL.BatchNormalization(
            momentum=0.1,
            center=False,
            scale=False,
        )(fea)
        fea = CCBN(fea, cla, 'Z2')
        fea = KL.Activation('relu')(fea)

        op = SN(
            KL.Conv2D(1,
                      kernel_size=3,
                      padding='same',
                      use_bias=False,
                      kernel_initializer='orthogonal'))(fea)

    elif gen_arch == 'p4_food101':
        # (Residual) Convolutions + Upsampling:
        fea = ResBlockG_film(
            gen,
            cla,
            int(512 // sc),
            h_input='Z2',
            h_output='C4',
            pad='same',
        )
        for ch in [384, 256, 192]:
            fea = ResBlockG_film(
                fea,
                cla,
                int(ch // sc),
                h_input='C4',
                h_output='C4',
                pad='same',
            )

        fea = GBatchNorm(h='C4', momentum=0.1)(fea)
        fea = KL.Activation('relu')(fea)

        fea = SN(
            GConv2D(3,
                    kernel_size=3,
                    h_input='C4',
                    h_output='C4',
                    padding='same',
                    kernel_initializer='orthogonal'))(fea)
        op = GroupMaxPool('C4')(fea)

    elif gen_arch == 'z2_food101':
        # (Residual) Convolutions + Upsampling:
        fea = ResBlockG_film(gen,
                             cla,
                             512,
                             h_input='Z2',
                             h_output='Z2',
                             pad='same',
                             group_equiv=False)
        fea = ResBlockG_film(fea,
                             cla,
                             384,
                             h_input='Z2',
                             h_output='Z2',
                             pad='same',
                             group_equiv=False)
        fea = ResBlockG_film(fea,
                             cla,
                             256,
                             h_input='Z2',
                             h_output='Z2',
                             pad='same',
                             group_equiv=False)
        fea = ResBlockG_film(fea,
                             cla,
                             192,
                             h_input='Z2',
                             h_output='Z2',
                             pad='same',
                             group_equiv=False)

        fea = KL.BatchNormalization(momentum=0.1)(fea)
        fea = KL.Activation('relu')(fea)

        op = SN(
            KL.Conv2D(3,
                      kernel_size=3,
                      padding='same',
                      kernel_initializer='orthogonal'))(fea)

    elif gen_arch == 'p4_cifar10':
        # (Residual) Convolutions + Upsampling:
        fea = ResBlockG_film(
            gen,
            cla,
            int(256 // sc),
            h_input='Z2',
            h_output='C4',
            pad='same',
        )
        fea = ResBlockG_film(
            fea,
            cla,
            int(256 // sc),
            h_input='C4',
            h_output='C4',
            pad='same',
        )
        fea = ResBlockG_film(
            fea,
            cla,
            int(256 // sc),
            h_input='C4',
            h_output='C4',
            pad='same',
        )

        fea = GBatchNorm(h='C4', momentum=0.1)(fea)
        fea = KL.Activation('relu')(fea)

        fea = SN(
            GConv2D(3,
                    kernel_size=3,
                    h_input='C4',
                    h_output='C4',
                    padding='same',
                    kernel_initializer='orthogonal'))(fea)
        op = GroupMaxPool('C4')(fea)

    elif gen_arch == 'z2_cifar10':
        # (Residual) Convolutions + Upsampling:
        fea = ResBlockG_film(
            gen,
            cla,
            256,
            h_input='Z2',
            h_output='Z2',
            pad='same',
            group_equiv=False,
        )
        fea = ResBlockG_film(
            fea,
            cla,
            256,
            h_input='Z2',
            h_output='Z2',
            pad='same',
            stride=1,
            group_equiv=False,
        )
        fea = ResBlockG_film(
            fea,
            cla,
            256,
            h_input='Z2',
            h_output='Z2',
            pad='same',
            stride=1,
            group_equiv=False,
        )

        fea = KL.BatchNormalization(momentum=0.1)(fea)
        fea = KL.Activation('relu')(fea)

        op = SN(KL.Conv2D(
            3,
            kernel_size=3,
            padding='same',
            use_bias=False,
        ))(fea)

    else:
        raise ValueError('Generator Architecture Unrecognized')

    gen_img = KL.Activation('tanh')(op)  # Get final synthesized image batch

    # Generator model:
    generator = Model([latent_vec, label_vec], gen_img)

    return generator
Пример #4
0
def discriminator(input_size=256,
                  A_channel=3,
                  B_channel=3,
                  n_layers=0,
                  name="Discriminator"):

    # D1: C64-C128
    # D16: C64-C128
    # D70: C64-C128-C256-C512
    # D286: C64-C128-C256-C512-C512-C512

    def encoding_block(x,
                       filters=32,
                       ksize=(4, 4),
                       strides=(2, 2),
                       padding="valid",
                       use_act=True,
                       use_bn=True,
                       name="Encoding"):
        x = layers.Conv2D(filters,
                          ksize,
                          strides,
                          padding,
                          name=name + "_Conv")(x)
        if use_bn:
            x = layers.BatchNormalization(name=name + "_BN")(x)
        if use_act:
            x = layers.LeakyReLU(0.2, name=name + "_Act")(x)
        return x

    input_layer_A = layers.Input(shape=(input_size, input_size, A_channel),
                                 name=name + "_Input_A")
    input_layer_B = layers.Input(shape=(input_size, input_size, B_channel),
                                 name=name + "_Input_B")

    input_layer = layers.Concatenate(name=name + "_Input_Combin")(
        [input_layer_A, input_layer_B])

    if n_layers == 0:
        x = encoding_block(input_layer,
                           64,
                           ksize=(1, 1),
                           strides=(1, 1),
                           use_bn=False,
                           name=name + "_En1")
        x = encoding_block(x,
                           128,
                           ksize=(1, 1),
                           strides=(1, 1),
                           name=name + "En2")
        x = encoding_block(x,
                           1,
                           ksize=(1, 1),
                           strides=(1, 1),
                           use_act=False,
                           use_bn=False,
                           name=name + "En3")
        output = layers.Avtivation("sigmoid", name=name + "_Output")(x)
        return models.Model(inputs=input_layer, outputs=output, name=name)
    else:

        x = encoding_block(input_layer,
                           64,
                           padding="same",
                           use_bn=False,
                           name=name + "_En1")

        for i in range(1, n_layers - 1):
            mul_fact = min(2**i, 8)
            x = encoding_block(x,
                               mul_fact * 64,
                               padding="same",
                               name=name + f"_En{i+1}")

        mul_fact = min(2**(n_layers - 1), 8)
        x = encoding_block(x,
                           mul_fact * 64,
                           ksize=(4, 4),
                           strides=(1, 1),
                           padding="same",
                           name=name + f"_En{n_layers+1}")
        x = encoding_block(x,
                           1,
                           ksize=(4, 4),
                           strides=(1, 1),
                           padding="same",
                           use_act=False,
                           use_bn=False,
                           name=name + f"_En{n_layers+2}")
        x = layers.Activation("sigmoid", name=name + "_Output")(x)
        return models.Model(inputs=[input_layer_A, input_layer_B],
                            outputs=x,
                            name=name)
#%% Perpare U-Net Xception-style model

from tensorflow.keras import layers
from tensorflow import keras
import tensorflow as tf

img_size = (448, 448)

#Input da rede
layer_in = layers.Input(shape=img_size + (3, ))

#########################################   Encoding:

x = layers.Conv2D(32, kernel_size=3, padding='same', strides=(2, 2))(layer_in)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)

x = layers.DepthwiseConv2D(kernel_size=3, padding="same", strides=(1, 1))(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)

x = layers.Conv2D(64, kernel_size=1, padding='same', strides=(1, 1))(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)

x = layers.DepthwiseConv2D(kernel_size=3, padding="same", strides=(2, 2))(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)

for filters in [128, 256]:
    x = layers.Conv2D(filters, kernel_size=1, padding='same',
Пример #6
0
def conv_unit(inputs,
              num_filters=64,
              kernel_size=3,
              activation='relu',
              batch_normalization=True,
              use3D=False):

    if use3D:
        max_pool = layers.MaxPooling3D(pool_size=3,
                                       strides=2,
                                       padding='same',
                                       data_format=None)
    else:
        max_pool = layers.MaxPooling2D(pool_size=3,
                                       strides=2,
                                       padding='same',
                                       data_format=None)

    x = inputs
    path1, path2, path3, path4 = (conv(strides=1,
                                       use3D=use3D,
                                       useBias=(not batch_normalization),
                                       num_filters=num_filters)(x),
                                  conv(strides=1,
                                       use3D=use3D,
                                       useBias=(not batch_normalization),
                                       num_filters=num_filters)(x),
                                  conv(strides=2,
                                       use3D=use3D,
                                       useBias=(not batch_normalization),
                                       num_filters=num_filters)(x),
                                  max_pool(x))
    if batch_normalization:
        path1, path2, path3 = (layers.BatchNormalization()(path1),
                               layers.BatchNormalization()(path2),
                               layers.BatchNormalization()(path3))
    if activation is not None:
        path1, path2, path3 = (layers.Activation(activation)(path1),
                               layers.Activation(activation)(path2),
                               layers.Activation(activation)(path3))
    path1, path2 = (conv(strides=1,
                         use3D=use3D,
                         useBias=(not batch_normalization),
                         num_filters=num_filters)(path1),
                    conv(strides=2,
                         use3D=use3D,
                         useBias=(not batch_normalization),
                         num_filters=num_filters)(path2))
    if batch_normalization:
        path1, path2 = (layers.BatchNormalization()(path1),
                        layers.BatchNormalization()(path2))

    if activation is not None:
        path1, path2 = (layers.Activation(activation)(path1),
                        layers.Activation(activation)(path2))

    path1 = conv(strides=2,
                 use3D=use3D,
                 useBias=(not batch_normalization),
                 num_filters=num_filters)(path1)
    if batch_normalization:
        path1 = layers.BatchNormalization()(path1)

    if activation is not None:
        path1 = layers.Activation(activation)(path1)
    x = layers.Concatenate()([path1, path2, path3, path4])
    return x
Пример #7
0
gt_class_ids = input_gt_class_ids[0]
gt_boxes = gt_boxes[0]
gt_masks = input_gt_masks[0]

inputs_fpn = [rois, input_train[0][1], *mrcnn_feature_maps]
# class, bbox
mrcnn_class_logits, mrcnn_class, mrcnn_bbox = model.fpn_clf(inputs_fpn)

mrcnn_mask = model.fpn_mask(inputs_fpn)

x = mutil.PyramidROIAlign([14, 14])(inputs_fpn)

# Conv layers
x = model.fpn_mask.mask_conv1(x)
x = model.fpn_mask.bn1(x, model.fpn_mask.train_bn)
x = KL.Activation('relu')(x)

x = model.fpn_mask.mask_conv2(x)
x = model.fpn_mask.bn2(x, model.fpn_mask.train_bn)
x = KL.Activation('relu')(x)

x = model.fpn_mask.mask_conv3(x)
x = model.fpn_mask.bn3(x, model.fpn_mask.train_bn)
x = KL.Activation('relu')(x)

x = model.fpn_mask.mask_conv4(x)
x = model.fpn_mask.bn4(x, model.fpn_mask.train_bn)
x = KL.Activation('relu')(x)

x = model.fpn_mask.deconv(x)
x = model.fpn_mask.mrcnn_mask(x)
def VGG19(num_classes,
          input_shape=(48, 48, 3),
          dropout=None,
          block5=True,
          batch_norm=True):
    img_input = layers.Input(shape=input_shape)

    #Block1
    x = layers.Conv2D(64, (3, 3), padding='same',
                      name='block1_conv1')(img_input)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(64, (3, 3), padding='same', name='block1_conv2')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    #Block2
    x = layers.Conv2D(128, (3, 3), padding='same', name='block2_conv1')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(128, (3, 3), padding='same', name='block2_conv2')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    #Block3
    x = layers.Conv2D(256, (3, 3), padding='same', name='block3_conv1')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(256, (3, 3), padding='same', name='block3_conv2')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(256, (3, 3), padding='same', name='block3_conv3')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(256, (3, 3), padding='same', name='block3_conv4')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    #Block4
    x = layers.Conv2D(512, (3, 3), padding='same', name='block4_conv1')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(512, (3, 3), padding='same', name='block4_conv2')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv3')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(512, (3, 3), padding='same', name='block4_conv4')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    #Block5
    if block5:
        x = layers.Conv2D(512, (3, 3), padding='same', name='block5_conv1')(x)
        if batch_norm:
            x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)

        x = layers.Conv2D(512, (3, 3), padding='same', name='block5_conv2')(x)
        if batch_norm:
            x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)

        x = layers.Conv2D(512, (3, 3),
                          activation='relu',
                          padding='same',
                          name='block5_conv3')(x)
        if batch_norm:
            x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)

        x = layers.Conv2D(512, (3, 3), padding='same', name='block5_conv4')(x)
        if batch_norm:
            x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)

        x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
    x = layers.AveragePooling2D((1, 1), strides=(1, 1), name='block6_pool')(x)
    x = layers.Flatten()(x)
    if dropout:
        x = layers.Dropout(dropout)(x)
    x = layers.Dense(num_classes, activation='softmax', name='predictions')(x)
    model = models.Model(img_input, x, name='vgg19')
    return model
Пример #9
0
def block2(x,
           filters,
           kernel_size=3,
           stride=1,
           dilate=False,
           conv_shortcut=False,
           name=None):
    """A residual block.

    # Arguments
        x: input tensor.
        filters: integer, filters of the bottleneck layer.
        kernel_size: default 3, kernel size of the bottleneck layer.
        stride: default 1, stride of the first layer.
        conv_shortcut: default False, use convolution shortcut if True,
            otherwise identity shortcut.
        name: string, block label.

    # Returns
        Output tensor for the residual block.
    """
    bn_axis = 3

    preact = SyncBatchNormalization(axis=bn_axis,
                                    epsilon=1.001e-5,
                                    name=name + '_preact_bn')(x)
    preact = layers.Activation('relu', name=name + '_preact_relu')(preact)

    if conv_shortcut is True:
        shortcut = layers.Conv2D(4 * filters, 1, name=name + '_0_conv')(preact)
    else:
        if not dilate:
            shortcut = layers.MaxPooling2D(
                1, strides=stride)(x) if stride > 1 else x
        else:
            shortcut = x

    x = layers.Conv2D(filters,
                      1,
                      strides=1,
                      use_bias=False,
                      name=name + '_1_conv')(preact)
    x = SyncBatchNormalization(axis=bn_axis,
                               epsilon=1.001e-5,
                               name=name + '_1_bn')(x)
    x = layers.Activation('relu', name=name + '_1_relu')(x)

    if dilate:
        x = layers.Conv2D(filters,
                          kernel_size,
                          dilation_rate=2,
                          padding='SAME',
                          use_bias=False,
                          name=name + '_2_conv')(x)
    else:
        x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)),
                                 name=name + '_2_pad')(x)
        x = layers.Conv2D(filters,
                          kernel_size,
                          strides=stride,
                          use_bias=False,
                          name=name + '_2_conv')(x)
    x = SyncBatchNormalization(axis=bn_axis,
                               epsilon=1.001e-5,
                               name=name + '_2_bn')(x)
    x = layers.Activation('relu', name=name + '_2_relu')(x)
    x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)
    x = layers.Add(name=name + '_out')([shortcut, x])
    return x
Пример #10
0
# Instantiate teh stack of residual units
for stack in range(3):
    for res_block in range(num_res_blocks):
        strides = 1
        if stack > 0 and res_block == 0: # first layer but not first stack
            strides = 2 # downsample
        y = resnet_layer(x, num_filters, strides=strides)  
        y = resnet_layer(y, num_filters, activation=None)
        
        if stack > 0 and res_block == 0: # first layer but not first stack
            # linear projection residual shortcut connection to match
            # change dims
            x = resnet_layer(x, num_filters, kernel_size=1, strides=strides,
                             activation=None, batch_normalization=False)
        x = layers.add([x, y])
        x = layers.Activation('relu')(x)           
    num_filters *= 2
    
# Add classifier on top.
# v1 does not use BN after last shortcut connection-ReLU
x = layers.GlobalAveragePooling2D()(x)

y_predict = layers.Dense(num_classes, activation='softmax',
                       kernel_initializer='he_normal')(x)

X_train, X_test = X_train_Kf[0], X_test_Kf[0]
y_train, y_test = y_train_Kf[0], y_test_Kf[0]
y_train = to_categorical(y_train, 7)
y_test = to_categorical(y_test, 7)

total_batch = np.ceil(num_samples/batch_size)
Пример #11
0
def SVBRDF(num_classes):
    #=============== first layer ==================

    inputs = keras.Input(shape=(256, 256) + (3, ))

    #GF = layers.LeakyReLU()(inputs)
    GF = layers.AveragePooling2D((inputs.shape[1], inputs.shape[1]))(inputs)
    GF = layers.Dense(128)(GF)
    GF = layers.Activation('selu')(GF)

    x = layers.SeparableConv2D(128, 4, 2, padding="same")(inputs)
    x = layers.BatchNormalization()(x)
    #previous_block_activation = x  # Set aside residual

    #========== define filters for unet ===================

    downfilters = np.array([128, 256, 512, 512, 512, 512, 512])
    Upfilters = np.flip(np.copy(downfilters))
    downfilters = np.delete(downfilters, 0)
    prefilter = 128

    #===================== upsampling =======================

    for filters in downfilters:
        #print(x.shape)
        #print(filters)
        GFdown = layers.AveragePooling2D((x.shape[1], x.shape[1]))(x)
        GFup = layers.Dense(prefilter)(GF)
        GF = layers.Concatenate()([GF, GFdown])
        GF = layers.Dense(filters)(GF)
        GF = layers.Activation('selu')(GF)

        x = layers.Add()([x, GFup])
        x = layers.LeakyReLU()(x)
        x = layers.SeparableConv2D(filters, 4, 2, padding="same")(x)
        x = layers.BatchNormalization()(x)
        prefilter = filters

    #====================== downsampling ============================

    for filters in Upfilters:

        GFdown = layers.AveragePooling2D((x.shape[1], x.shape[1]))(x)
        GFup = layers.Dense(prefilter)(GF)
        GF = layers.Concatenate()([GF, GFdown])
        GF = layers.Dense(filters)(GF)
        GF = layers.Activation('selu')(GF)

        x = layers.Add()([x, GFup])
        x = layers.LeakyReLU()(x)
        x = layers.Conv2DTranspose(filters, 4, 2, padding="same")(x)
        x = layers.BatchNormalization()(x)
        prefilter = filters

    #====================== last connection =====================

    GFup = layers.Dense(prefilter)(x)
    x = layers.Add()([x, GFup])
    outputs = layers.Conv2D(num_classes,
                            3,
                            activation="softmax",
                            padding="same")(x)
    model = keras.Model(inputs, outputs)
    return model
Пример #12
0
def makeModel(input_shape, learning_rate):
    inputs = keras.Input(shape=input_shape)

    # Augment the image
    x = data_augmentation(inputs)

    # Enter the network
    x = layers.experimental.preprocessing.Rescaling(1.0 / 255)(x)
    x = layers.Conv2D(32, 3, strides=2, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    x = layers.Conv2D(64, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    previous_block_activation = x # Set aside residual

    for size in [128, 256, 512, 728]:
        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(size, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(size, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.MaxPooling2D(3, strides=2, padding="same")(x)

        # Project residual
        residual = layers.Conv2D(size, 1, strides=2, padding="same")(
            previous_block_activation
        )
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    x = layers.SeparableConv2D(1024, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    x = layers.GlobalAveragePooling2D()(x)
    activation = "softmax"
    units = 3 # number of classes

    x = layers.Dropout(0.5)(x)
    outputs = layers.Dense(units, activation=activation)(x)

    model = keras.Model(inputs, outputs)
    model.compile(
        optimizer=keras.optimizers.SGD(momentum=0.01, nesterov=True),
        loss="categorical_crossentropy",
        metrics=[
            keras.metrics.categorical_accuracy
        ]
    )

    callbacks = [
        keras.callbacks.ModelCheckpoint("./train_checkpoints/save_at_{epoch}.h5")
    ]

    return model, callbacks
Пример #13
0
def build_model(nx: Optional[int] = None,
                ny: Optional[int] = None,
                channels: int = 1,
                num_classes: int = 2,
                layer_depth: int = 5,
                filters_root: int = 64,
                kernel_size: int = 3,
                pool_size: int = 2,
                dropout_rate: int = 0.5,
                padding: str = "valid",
                activation: Union[str, Callable] = "relu",
                last_bias_initializer=None,
                last_activation=None,
                prelu=True) -> Union[Model, None]:
    """
    Constructs a U-Net model

    :param nx: (Optional) image size on x-axis
    :param ny: (Optional) image size on y-axis
    :param channels: number of channels of the input tensors
    :param num_classes: number of classes
    :param layer_depth: total depth of unet
    :param filters_root: number of filters in top unet layer
    :param kernel_size: size of convolutional layers
    :param pool_size: size of maxplool layers
    :param dropout_rate: rate of dropout
    :param padding: padding to be used in convolutions
    :param activation: activation to be used

    :return: A TF Keras model
    """

    inputs = Input(shape=(nx, ny, channels), name="inputs")

    x = inputs
    contracting_layers = {}

    conv_params = dict(filters_root=filters_root,
                       kernel_size=kernel_size,
                       dropout_rate=dropout_rate,
                       padding=padding,
                       activation=activation,
                       prelu=prelu)

    for layer_idx in range(0, layer_depth - 1):
        x = ConvBlock(layer_idx, **conv_params)(x)
        contracting_layers[layer_idx] = x
        x = layers.MaxPooling2D((pool_size, pool_size))(x)

    x = ConvBlock(layer_idx + 1, **conv_params)(x)

    for layer_idx in range(layer_idx, -1, -1):
        x = UpconvBlock(layer_idx,
                        filters_root,
                        kernel_size,
                        pool_size,
                        padding,
                        activation,
                        prelu=prelu)(x)
        x = CropConcatBlock()(x, contracting_layers[layer_idx])
        x = ConvBlock(layer_idx, **conv_params)(x)

    if last_bias_initializer is not None:
        x = layers.Conv2D(filters=num_classes,
                          kernel_size=(1, 1),
                          kernel_initializer=_get_kernel_initializer(
                              filters_root, kernel_size),
                          strides=1,
                          padding=padding,
                          bias_initializer=last_bias_initializer)(x)
    else:
        x = layers.Conv2D(filters=num_classes,
                          kernel_size=(1, 1),
                          kernel_initializer=_get_kernel_initializer(
                              filters_root, kernel_size),
                          strides=1,
                          padding=padding)(x)

    if last_activation is None:
        print(
            "Wrong Usage: Build model must include a last_activation parameter that can be either 'softmax' or 'sigmoid'. The model has not been built."
        )
        return None
    if last_activation == "softmax":
        x = layers.Activation(activation)(x)

    outputs = layers.Activation(last_activation, name="outputs")(x)
    model = Model(inputs, outputs, name="unet")

    return model
Пример #14
0
conv1 = layers.Conv2D(16, kernel_size=5, strides=2, activation=tf.nn.relu, padding='same')(features/255.0)
conv2 = layers.Conv2D(32, kernel_size=5, strides=2, activation=tf.nn.relu, padding='same')(conv1)
conv3 = layers.Conv2D(64, kernel_size=5, strides=2, activation=tf.nn.relu, padding='same')(conv2)
# Do a 1x1 convolution.
conv4 = layers.Conv2D(64, kernel_size=1, strides=1)(conv3)
# Upsample three times.
concat1 = layers.Concatenate(axis=3)([conv3, conv4])
deconv1 = layers.Conv2DTranspose(32, kernel_size=5, strides=2, activation=tf.nn.relu, padding='same')(concat1)
concat2 = layers.Concatenate(axis=3)([conv2, deconv1])
deconv2 = layers.Conv2DTranspose(16, kernel_size=5, strides=2, activation=tf.nn.relu, padding='same')(concat2)
concat3 = layers.Concatenate(axis=3)([conv1, deconv2])
deconv3 = layers.Conv2DTranspose(1, kernel_size=5, strides=2, activation=tf.nn.relu, padding='same')(concat3)
# Compute the final output.
concat4 = layers.Concatenate(axis=3)([features, deconv3])
logits = layers.Conv2D(1, kernel_size=5, strides=1, padding='same')(concat4)
output = layers.Activation(tf.math.sigmoid)(logits)
keras_model = tf.keras.Model(inputs=features, outputs=[output, logits])
learning_rate = dc.models.optimizers.ExponentialDecay(0.01, 0.9, 250)
model = dc.models.KerasModel(
    keras_model,
    loss=dc.models.losses.SigmoidCrossEntropy(),
    output_types=['prediction', 'loss'],
    learning_rate=learning_rate,
    model_dir='models/segmentation')

if not os.path.exists('./models'):
  os.mkdir('models')
if not os.path.exists('./models/segmentation'):
  os.mkdir('models/segmentation')

if not RETRAIN:
Пример #15
0
# ('mse' 'mae', 'mape', 'cosine') - regression  
# ('acc') - classification
metrics = ['mse', 'mae', 'mape']

# Best config for...
# ux --> hidden_layers: 2, neurons: profile_points
# uy --> hidden_layers: 2, neurons: profile_points
# f --> hidden_layers: 2, neurons: profile_points

hidden_layers = 2
neurons = profile_points

model = tf.keras.Sequential([                             
layers.Dense(profile_points, input_shape=(inputs,), kernel_initializer=kernel_initializer),
#layers.BatchNormalization(),
layers.Activation(activation=activation),
layers.Dropout(rate=dropout),
layers.Dense(profile_points, kernel_initializer=kernel_initializer),
#layers.BatchNormalization(),
layers.Activation(activation=activation),
layers.Dropout(rate=dropout),
layers.Dense(outputs, kernel_initializer=kernel_initializer)])

model.summary()

init = tf.global_variables_initializer()
saver_def = tf.train.Saver().as_saver_def()
with open('graph.pb', 'wb') as f:
  f.write(tf.get_default_graph().as_graph_def().SerializeToString())

if download_files:
Пример #16
0
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

# build the model
model = Sequential()
model.add(layers.Dense(256, input_shape=(784, )))
model.add(Antirectifier())
model.add(layers.Dropout(0.1))
model.add(layers.Dense(256))
model.add(Antirectifier())
model.add(layers.Dropout(0.1))
model.add(layers.Dense(num_classes))
model.add(layers.Activation('softmax'))

# compile the model
model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

# train the model
model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(x_test, y_test))

# next, compare with an equivalent network
Пример #17
0
def resnet(input_shape, depth, num_classes, use3D=False, useBatchNorm=True):
    """ResNet Version 1 Model builder [a]

    Stacks of 2 x (3 x 3) Conv2D-BN-ReLU
    Last ReLU is after the shortcut connection.
    At the beginning of each stage, the feature map size is downsampled
    by a convolutional layer with strides=2, while the number of filters is
    doubled. Within each stage, the layers have the same number filters and the
    same number of filters.
    Features maps sizes:
    stage 0: 32x32, 16
    stage 1: 16x16, 32
    stage 2:  8x8,  64
    The Number of parameters is approx the same as Table 6 of [a]:
    ResNet20 0.27M
    ResNet32 0.46M
    ResNet44 0.66M
    ResNet56 0.85M
    ResNet110 1.7M

    # Arguments
        input_shape (tensor): shape of input image tensor
        depth (int): number of core convolutional layers
        num_classes (int): number of classes (CIFAR10 has 10)

    # Returns
        model (Model): Keras model instance
    """
    if (depth - 2) % 6 != 0:
        raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
    # Start model definition.
    num_filters = 16
    num_res_blocks = int((depth - 2) / 6)

    inputs = layers.Input(shape=input_shape)
    x = resnet_layer(inputs=inputs, use3D=use3D)
    # Instantiate the stack of residual units
    for stack in range(3):
        for res_block in range(num_res_blocks):
            strides = 1
            if stack > 0 and res_block == 0:  # first layer but not first stack
                strides = 2  # downsample
            y = resnet_layer(inputs=x,
                             num_filters=num_filters,
                             strides=strides,
                             batch_normalization=useBatchNorm,
                             use3D=use3D)
            y = resnet_layer(inputs=y,
                             num_filters=num_filters,
                             activation=None,
                             batch_normalization=useBatchNorm,
                             use3D=use3D)
            if stack > 0 and res_block == 0:  # first layer but not first stack
                # linear projection residual shortcut connection to match
                # changed dims
                x = resnet_layer(inputs=x,
                                 num_filters=num_filters,
                                 kernel_size=1,
                                 strides=strides,
                                 activation=None,
                                 batch_normalization=False,
                                 use3D=use3D)
            x = layers.add([x, y])
            x = layers.Activation('relu')(x)
        num_filters *= 2

    # Add classifier on top.
    # v1 does not use BN after last shortcut connection-ReLU
    if use3D:
        x = layers.AveragePooling3D(pool_size=8)(x)
    else:
        x = layers.AveragePooling2D(pool_size=8)(x)
    y = layers.Flatten()(x)
    outputs = layers.Dense(num_classes,
                           activation='softmax',
                           kernel_initializer='he_normal')(y)

    # Instantiate model.
    model = keras.Model(inputs=inputs, outputs=outputs)
    return model
Пример #18
0
def build_generator():
    input = tf.keras.Input(shape=(L_node, W_node, Time_steps))  # (None, 256, 256, 5)

    l1 = input  # (None, 256, 256, 5)

    l2_1 = layers.Conv2D(64, 1, 1, 'same', activation='relu')(l1)  # (None, 256, 256, 64)
    l2_1 = layers.BatchNormalization()(l2_1)

    l2_2 = layers.Conv2D(48, 1, 1, 'same', activation='relu')(l1)  # (None, 256, 256, 48)
    l2_2 = layers.BatchNormalization()(l2_2)
    l2_2 = layers.Conv2D(64, 3, 1, 'same', activation='relu')(l2_2)  # (None, 256, 256, 64)
    l2_2 = layers.BatchNormalization()(l2_2)

    l2_3 = layers.Conv2D(48, 1, 1, 'same', activation='relu')(l1)  # (None, 256, 256, 48)
    l2_3 = layers.BatchNormalization()(l2_3)
    l2_3 = layers.Conv2D(64, 5, 1, 'same', activation='relu')(l2_3)  # (None, 256, 256, 64)
    l2_3 = layers.BatchNormalization()(l2_3)

    l2_4 = layers.AvgPool2D(3, 1, 'same')(l1)
    l2_4 = layers.Conv2D(64, 1, 1, 'same', activation='relu')(l2_4)  # (None, 256, 256, 64)
    l2_4 = layers.BatchNormalization()(l2_4)

    l2 = layers.concatenate([l2_1, l2_2, l2_3, l2_4], 3)  # (None, 256, 256, 256)

    l3 = layers.Conv2D(64, 3, 1, 'same', activation='relu')(l2)  # (None, 256, 256, 64)

    l4_1 = layers.Conv2D(128, 3, 2, 'same')(l3)  # (None, 128, 128, 128)
    l4_1 = layers.BatchNormalization()(l4_1)
    l4_1 = layers.LeakyReLU(0.2)(l4_1)

    l5_1 = layers.Conv2D(256, 3, 2, 'same')(l4_1)  # (None, 64, 64, 256)
    l5_1 = layers.BatchNormalization()(l5_1)
    l5_1 = layers.LeakyReLU(0.2)(l5_1)

    l6_1 = layers.Conv2D(512, 3, 2, 'same')(l5_1)  # (None, 32, 32, 512)
    l6_1 = layers.BatchNormalization()(l6_1)
    l6_1 = layers.LeakyReLU(0.2)(l6_1)

    l7_1 = layers.Conv2D(512, 3, 2, 'same')(l6_1)  # (None, 16, 16, 512)
    l7_1 = layers.BatchNormalization()(l7_1)
    l7_1 = layers.LeakyReLU(0.2)(l7_1)

    l8_1 = layers.Conv2DTranspose(512, 3, 2, 'same')(l7_1)  # (None, 32, 32, 512)
    l8_1 = layers.BatchNormalization()(l8_1)
    l8_1 = layers.Dropout(0.3)(l8_1)
    l8_1 = layers.Activation('relu')(l8_1)

    l9_1 = layers.Conv2DTranspose(256, 3, 2, 'same')(l8_1)  # (None, 64, 64, 256)
    l9_1 = layers.BatchNormalization()(l9_1)
    l9_1 = layers.Dropout(0.3)(l9_1)
    l9_1 = layers.Activation('relu')(l9_1)

    l10_1 = layers.Conv2DTranspose(128, 3, 2, 'same')(l9_1)  # (None, 128, 128, 128)
    l10_1 = layers.BatchNormalization()(l10_1)
    l10_1 = layers.Dropout(0.3)(l10_1)
    l10_1 = layers.Activation('relu')(l10_1)

    l11_1 = layers.Conv2DTranspose(64, 3, 2, 'same')(l10_1)  # (None, 256, 256, 64)
    l11_1 = layers.BatchNormalization()(l11_1)
    l11_1 = layers.Dropout(0.3)(l11_1)
    l11_1 = layers.Activation('tanh')(l11_1)

    l4_2 = layers.Conv2D(32, 3, 2, 'same')(l3)  # (None, 128, 128, 32)

    l5_2 = layers.Conv2D(5, 3, 2, 'same')(l4_2)  # (None, 64, 64, 5)
    # l5_2_1 = tf.reshape(l5_2(-1, -1, -1, 0), shape=(1, L_node, W_node, Channel))
    # l5_2_2 = tf.reshape(l5_2(-1, -1, -1, 1), shape=(1, L_node, W_node, Channel))
    # l5_2_3 = tf.reshape(l5_2(-1, -1, -1, 2), shape=(1, L_node, W_node, Channel))
    # l5_2_4 = tf.reshape(l5_2(-1, -1, -1, 3), shape=(1, L_node, W_node, Channel))
    # l5_2_5 = tf.reshape(l5_2(-1, -1, -1, 4), shape=(1, L_node, W_node, Channel))
    l5_2 = tf.reshape(l5_2, shape=(-1, 5, 64, 64, 1))  # (None, 5, 64, 64, 1)

    l6_2 = layers.ConvLSTM2D(10, 3, 1, 'same', return_sequences=True)(l5_2)  # (None, 5, 64, 64, 10)

    l7_2 = layers.ConvLSTM2D(20, 3, 1, 'same', return_sequences=True)(l6_2)  # (None, 5, 64, 64, 20)

    l8_2 = layers.ConvLSTM2D(20, 3, 1, 'same', return_sequences=True)(l7_2)  # (None, 5, 64, 64, 20)

    l9_2 = layers.ConvLSTM2D(10, 3, 1, 'same', return_sequences=False)(l8_2)  # (None, 64, 64, 10)

    l10_2 = layers.Conv2DTranspose(32, 3, 2, 'same')(l9_2)  # (None, 128, 128, 32)
    l10_2 = layers.BatchNormalization()(l10_2)
    l10_2 = layers.Dropout(0.3)(l10_2)
    l10_2 = layers.Activation('relu')(l10_2)

    l11_2 = layers.Conv2DTranspose(64, 3, 2, 'same')(l10_2)  # (None, 256, 256, 64)
    l11_2 = layers.BatchNormalization()(l11_2)
    l11_2 = layers.Dropout(0.3)(l11_2)
    l11_2 = layers.Activation('relu')(l11_2)

    l12 = layers.add([l11_1, l11_2])  # (None, 256, 256, 64)

    l13 = layers.Conv2D(1, 3, 1, 'same', activation='tanh')(l12)  # (None, 256, 256, 1)

    Model = tf.keras.Model(input, l13, name='generator')
    return Model
Пример #19
0
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()

plt.show()

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')

plt.show()

export_model = tf.keras.Sequential([
  vectorize_layer,
  model,
  layers.Activation('sigmoid')
])

export_model.compile(
    loss=losses.BinaryCrossentropy(from_logits=False), optimizer="adam", metrics=['accuracy']
)

# Test it with `raw_test_ds`, which yields raw strings
loss, accuracy = export_model.evaluate(raw_test_ds)
print(accuracy)

Пример #20
0
    def _make_block_basic(self,
                          input_tensor,
                          first_block=True,
                          filters=64,
                          stride=2,
                          radix=1,
                          avd=False,
                          avd_first=False,
                          is_first=False):
        '''Conv2d_BN_Relu->Bn_Relu_Conv2d
        '''
        x = input_tensor
        x = layers.BatchNormalization(axis=self.channel_axis,
                                      epsilon=1.001e-5)(x)
        x = layers.Activation(self.active)(x)

        short_cut = x
        inplanes = input_tensor.shape[-1]
        if stride != 1 or inplanes != filters * self.block_expansion:
            if self.avg_down:
                if self.dilation == 1:
                    short_cut = layers.AveragePooling2D(
                        pool_size=stride,
                        strides=stride,
                        padding='same',
                        data_format='channels_last')(short_cut)
                else:
                    short_cut = layers.AveragePooling2D(
                        pool_size=1,
                        strides=1,
                        padding='same',
                        data_format='channels_last')(short_cut)
                short_cut = layers.Conv2D(
                    filters,
                    kernel_size=1,
                    strides=1,
                    padding='same',
                    kernel_initializer='he_normal',
                    use_bias=False,
                    data_format='channels_last')(short_cut)
            else:
                short_cut = layers.Conv2D(
                    filters,
                    kernel_size=1,
                    strides=stride,
                    padding='same',
                    kernel_initializer='he_normal',
                    use_bias=False,
                    data_format='channels_last')(short_cut)

        group_width = int(filters *
                          (self.bottleneck_width / 64.)) * self.cardinality
        avd = avd and (stride > 1 or is_first)
        avd_first = avd_first

        if avd:
            avd_layer = layers.AveragePooling2D(pool_size=3,
                                                strides=stride,
                                                padding='same',
                                                data_format='channels_last')
            stride = 1

        if avd and avd_first:
            x = avd_layer(x)

        if radix >= 1:
            x = self._SplAtConv2d(x,
                                  filters=group_width,
                                  kernel_size=3,
                                  stride=stride,
                                  dilation=self.dilation,
                                  groups=self.cardinality,
                                  radix=radix)
        else:
            x = layers.Conv2D(filters,
                              kernel_size=3,
                              strides=stride,
                              padding='same',
                              kernel_initializer='he_normal',
                              dilation_rate=self.dilation,
                              use_bias=False,
                              data_format='channels_last')(x)

        if avd and not avd_first:
            x = avd_layer(x)
            # print('can')

        x = layers.BatchNormalization(axis=self.channel_axis,
                                      epsilon=1.001e-5)(x)
        x = layers.Activation(self.active)(x)
        x = layers.Conv2D(filters,
                          kernel_size=3,
                          strides=1,
                          padding='same',
                          kernel_initializer='he_normal',
                          dilation_rate=self.dilation,
                          use_bias=False,
                          data_format='channels_last')(x)
        m2 = layers.Add()([x, short_cut])
        return m2
Пример #21
0
# plt.show()

# plt.plot(epochs, acc, 'bo', label='Training acc')
# plt.plot(epochs, val_acc, 'b', label='Validation acc')
# plt.title('Training and validation accuracy')
# plt.xlabel('Epochs')
# plt.ylabel('Accuracy')
# plt.legend(loc='lower right')

# plt.show()

# creating a new model for brute text
print("Building and compiling export_model..")
export_model = tf.keras.Sequential(
    [vectorize_layer, model,
     layers.Activation("sigmoid")])

export_model.compile(
    loss=losses.BinaryCrossentropy(from_logits=False),
    optimizer="adam",
    metrics=["accuracy"],
)
print("Compilation ended.")

# Test it with `raw_test_ds`, which yields raw strings
loss, accuracy = export_model.evaluate(raw_test_ds)
print("Accuracy:", accuracy)

examples = [
    "This movie was great!", "The movie is great!", "The movie was terrible!"
]
Пример #22
0
    def build(self):
        input_sig = Input(shape=self.input_shape)
        x = self._make_stem(input_sig,
                            stem_width=self.stem_width,
                            deep_stem=self.deep_stem)

        if self.preact is False:
            x = layers.BatchNormalization(axis=self.channel_axis,
                                          epsilon=1.001e-5)(x)
            x = layers.Activation(self.active)(x)
        if self.verbose: print('stem_out', x.shape)

        x = MaxPool2D(pool_size=3,
                      strides=2,
                      padding='same',
                      data_format='channels_last')(x)
        if self.verbose: print('MaxPool2D out', x.shape)

        if self.preact is True:
            x = layers.BatchNormalization(axis=self.channel_axis,
                                          epsilon=1.001e-5)(x)
            x = layers.Activation(self.active)(x)

        x = self._make_layer(x,
                             blocks=self.blocks_set[0],
                             filters=64,
                             stride=1,
                             is_first=False)
        if self.verbose: print('-' * 5, 'layer1 out', x.shape, '-' * 5)
        x = self._make_layer(x,
                             blocks=self.blocks_set[1],
                             filters=128,
                             stride=2)
        if self.verbose: print('-' * 5, 'layer2 out', x.shape, '-' * 5)
        x = self._make_layer(x,
                             blocks=self.blocks_set[2],
                             filters=256,
                             stride=2)
        if self.verbose: print('-' * 5, 'layer3 out', x.shape, '-' * 5)
        x = self._make_layer(x,
                             blocks=self.blocks_set[3],
                             filters=512,
                             stride=2)
        if self.verbose: print('-' * 5, 'layer4 out', x.shape, '-' * 5)

        concats = GlobalAveragePooling2D(name='avg_pool')(x)
        if self.verbose: print("pool_out:", concats.shape)

        if self.dropout_rate > 0:
            x = Dropout(self.dropout_rate, noise_shape=None)(x)

        fc_out = Dense(self.n_classes,
                       kernel_initializer='he_normal',
                       use_bias=False,
                       name='fc_NObias')(concats)
        if self.verbose: print("fc_out:", fc_out.shape)

        if self.fc_activation:
            fc_out = Activation(self.fc_activation)(fc_out)

        model = models.Model(inputs=input_sig, outputs=fc_out)

        if self.verbose:
            print("Resnest builded with input {}, output{}".format(
                input_sig.shape, fc_out.shape))
        if self.verbose: print('-------------------------------------------')
        if self.verbose: print('')

        return model
Пример #23
0
train_X = train_X /255.0
test_X = test_X / 255.0
plt.imshow(train_X[0])
plt.colorbar()
plt.show()

print(train_X.shape)
print(test_X.shape)

train_X = train_X.reshape(-1,28,28,1)
test_X = test_X.reshape(-1,28,28,1)

input = layers.Input(shape=(28,28,1))
t = layers.Conv2D(filters=16,kernel_size=(5,5),strides=1)(input)
t = layers.BatchNormalization(axis=1)(t)
t = layers.Activation('relu')(t)
t = layers.MaxPooling2D(pool_size=2,strides=1)(t)
t = layers.Dropout(0.2)(t)
#f1 = layers.ZeroPadding2D(padding=(1))(t)
f1 = layers.Conv2D(filters=16,kernel_size=(3,3),strides=1,padding='SAME')(t)
f1 = layers.BatchNormalization(axis=1)(f1)
t = layers.Add()([f1,t])
t = layers.Activation('relu')(t)
t = layers.MaxPooling2D(pool_size=2,strides=1)(t)
t = layers.Dropout(0.2)(t)

t = layers.Conv2D(filters=32,kernel_size=(3,3),strides=2,padding='SAME')(t)
t = layers.BatchNormalization(axis=1)(t)

f2 = layers.Conv2D(filters=32,kernel_size=(3,3),strides=1,padding='SAME')(t)
f2 = layers.BatchNormalization(axis=1)(f2)
Пример #24
0
def construct_models(feature_extractor,
                     embedding_dim,
                     n_centers_per_class,
                     n_classes,
                     lr,
                     sigma,
                     kernel_type="inverse"):
    ''' Creates an RBF Model.
        feature_extractor   -> Feature stractor used (CONVNET or RESNET).
        embedding_dim       -> Size of the output Embedding.
        n_centers_per_class -> Number of Centers per class.
        n_classes           -> Number of Classes.
        lr                  -> Learning Rate.
        sigma               -> Regularization parameter.
        kerney_type         -> Used kernel (Inverse, Cosine, Gauss).
        Returns a training model.
    '''
    if feature_extractor == "RESNET":

        conv_base = ResNet50(weights='imagenet',
                             include_top=False,
                             input_shape=(200, 200, 3))
        input = layers.Input(shape=(
            32,
            32,
            3,
        ))
        x = layers.UpSampling2D((2, 2))(input)
        x = layers.UpSampling2D((2, 2))(x)
        x = layers.UpSampling2D((2, 2))(x)
        x = conv_base(x)
        x = layers.Flatten()(x)
        x = layers.BatchNormalization()(x)
        x = layers.Dense(512, activation='relu')(x)
        x = layers.Dropout(0.5)(x)
        x = layers.BatchNormalization()(x)
        x = layers.Dense(embedding_dim, activation='relu')(x)
        x = layers.BatchNormalization()(x)

        varkeys_output = RBF(embedding_dim, n_centers_per_class, n_classes,
                             kernel_type)(x)
        plain_output = layers.Activation('softmax')(layers.Dense(n_classes)(x))

        softmax_model = Model(inputs=input, outputs=plain_output)
        rbf_model = Model(inputs=input, outputs=varkeys_output)
        embeddings = Model(inputs=input, outputs=x)

        rbf_model.compile(loss=custom_loss(rbf_model.layers[-1], sigma, 1),
                          optimizer=optimizers.RMSprop(lr=lr),
                          metrics=['accuracy'])

        softmax_model.compile(loss=keras.losses.categorical_crossentropy,
                              optimizer=optimizers.RMSprop(lr=lr),
                              metrics=['accuracy'])

    else:

        layers_dim = [32, 64, 512]
        input = layers.Input(shape=(
            32,
            32,
            3,
        ))
        x = layers.Conv2D(layers_dim[0], (3, 3),
                          padding='same',
                          input_shape=[32, 32, 3])(input)
        x = layers.Activation('relu')(x)
        x = layers.Conv2D(layers_dim[0], (3, 3))(x)
        x = layers.Activation('relu')(x)
        x = layers.Conv2D(layers_dim[0], (3, 3))(x)
        x = layers.Activation('relu')(x)
        x = layers.MaxPooling2D(pool_size=(2, 2))(x)
        x = layers.Dropout(0.25)(x)

        x = layers.Conv2D(layers_dim[1], (3, 3), padding='same')(x)
        x = layers.Activation('relu')(x)
        x = layers.Conv2D(layers_dim[1], (3, 3))(x)
        x = layers.Activation('relu')(x)
        x = layers.MaxPooling2D(pool_size=(2, 2))(x)
        x = layers.Dropout(0.25)(x)

        x = layers.Flatten()(x)
        x = layers.Dense(layers_dim[2])(x)
        x = layers.Activation('relu')(x)
        x = layers.Dropout(0.5)(x)
        x = layers.Dense(embedding_dim)(x)
        x = layers.Activation('relu')(x)
        x = layers.BatchNormalization()(x)

        varkeys_output = RBF(embedding_dim, n_centers_per_class, n_classes,
                             kernel_type)(x)
        plain_output = layers.Activation('softmax')(layers.Dense(n_classes)(x))

        softmax_model = Model(inputs=input, outputs=plain_output)
        rbf_model = Model(inputs=input, outputs=varkeys_output)
        embeddings = Model(inputs=input, outputs=x)

        rbf_model.compile(loss=custom_loss(rbf_model.layers[-1], sigma, 1),
                          optimizer=optimizers.RMSprop(lr=lr),
                          metrics=['accuracy'])

        softmax_model.compile(loss=keras.losses.categorical_crossentropy,
                              optimizer=optimizers.RMSprop(lr=lr),
                              metrics=['accuracy'])

    return rbf_model, softmax_model, embeddings
Пример #25
0
    def __init__(self, input_shape):
        """

        :param input_shape: [32, 32, 3]
        """
        super(VGG16, self).__init__()

        weight_decay = 0.000
        self.num_classes = 10

        model = models.Sequential()

        model.add(
            layers.Conv2D(64, (3, 3),
                          padding='same',
                          input_shape=input_shape,
                          kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.3))

        model.add(
            layers.Conv2D(64, (3, 3),
                          padding='same',
                          kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())

        model.add(layers.MaxPooling2D(pool_size=(2, 2)))

        model.add(
            layers.Conv2D(128, (3, 3),
                          padding='same',
                          kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.4))

        model.add(
            layers.Conv2D(128, (3, 3),
                          padding='same',
                          kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())

        model.add(layers.MaxPooling2D(pool_size=(2, 2)))

        model.add(
            layers.Conv2D(256, (3, 3),
                          padding='same',
                          kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.4))

        model.add(
            layers.Conv2D(256, (3, 3),
                          padding='same',
                          kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.4))

        model.add(
            layers.Conv2D(256, (3, 3),
                          padding='same',
                          kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())

        model.add(layers.MaxPooling2D(pool_size=(2, 2)))

        model.add(
            layers.Conv2D(512, (3, 3),
                          padding='same',
                          kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.4))

        model.add(
            layers.Conv2D(512, (3, 3),
                          padding='same',
                          kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.4))

        model.add(
            layers.Conv2D(512, (3, 3),
                          padding='same',
                          kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())

        model.add(layers.MaxPooling2D(pool_size=(2, 2)))

        model.add(
            layers.Conv2D(512, (3, 3),
                          padding='same',
                          kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.4))

        model.add(
            layers.Conv2D(512, (3, 3),
                          padding='same',
                          kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.4))

        model.add(
            layers.Conv2D(512, (3, 3),
                          padding='same',
                          kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())

        model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        model.add(layers.Dropout(0.5))

        model.add(layers.Flatten())
        model.add(
            layers.Dense(512,
                         kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())

        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(self.num_classes))
        # model.add(layers.Activation('softmax'))

        self.model = model
Пример #26
0
def construct_model_STL(feature_extractor, embedding_dim, n_centers_per_class,
                        n_classes, lr, gamma):
    ''' Creates a Soft Triple Loss Model.
        feature_extractor   -> Feature stractor used (CONVNET or RESNET).
        embedding_dim       -> Size of the output Embedding.
        n_centers_per_class -> Number of Centers per class.
        n_classes           -> Number of Classes.
        lr                  -> Learning Rate.
        gamma               -> Gamma parameter according to STL paper.
        Returns a training model.
    '''
    if feature_extractor == "RESNET":

        conv_base = ResNet50(weights='imagenet',
                             include_top=False,
                             input_shape=(200, 200, 3))
        input = layers.Input(shape=(
            32,
            32,
            3,
        ))
        x = layers.UpSampling2D((2, 2))(input)
        x = layers.UpSampling2D((2, 2))(x)
        x = layers.UpSampling2D((2, 2))(x)
        x = conv_base(x)
        x = layers.Flatten()(x)
        x = layers.BatchNormalization()(x)
        x = layers.Dense(512, activation='relu')(x)
        x = layers.Dropout(0.5)(x)
        x = layers.BatchNormalization()(x)
        x = layers.Dense(embedding_dim, activation='relu')(x)
        x = layers.BatchNormalization()(x)

        x = RelaxedSimilarity(embedding_dim, n_centers_per_class, n_classes,
                              gamma)(x)
        output = layers.Softmax()(x)
        model = Model(inputs=input, outputs=output)

        model.compile(optimizer=optimizers.RMSprop(lr=lr),
                      loss=SoftTripleLoss(model.layers[-2]),
                      metrics=['acc'])

    else:
        layers_dim = [32, 64, 512]
        input = layers.Input(shape=(
            32,
            32,
            3,
        ))
        x = layers.Conv2D(layers_dim[0], (3, 3),
                          padding='same',
                          input_shape=[32, 32, 3])(input)
        x = layers.Activation('relu')(x)
        x = layers.Conv2D(layers_dim[0], (3, 3))(x)
        x = layers.Activation('relu')(x)
        x = layers.Conv2D(layers_dim[0], (3, 3))(x)
        x = layers.Activation('relu')(x)
        x = layers.MaxPooling2D(pool_size=(2, 2))(x)
        x = layers.Dropout(0.25)(x)

        x = layers.Conv2D(layers_dim[1], (3, 3), padding='same')(x)
        x = layers.Activation('relu')(x)
        x = layers.Conv2D(layers_dim[1], (3, 3))(x)
        x = layers.Activation('relu')(x)
        x = layers.MaxPooling2D(pool_size=(2, 2))(x)
        x = layers.Dropout(0.25)(x)

        x = layers.Flatten()(x)
        x = layers.Dense(layers_dim[2])(x)
        x = layers.Activation('relu')(x)
        x = layers.Dropout(0.5)(x)
        x = layers.Dense(embedding_dim)(x)
        x = layers.Activation('relu')(x)
        x = layers.BatchNormalization()(x)

        x = RelaxedSimilarity(embedding_dim, n_centers_per_class, n_classes,
                              gamma)(x)
        output = layers.Softmax()(x)
        model = Model(inputs=input, outputs=output)
        model.compile(optimizer=optimizers.RMSprop(lr=lr),
                      loss=SoftTripleLoss(model.layers[-2]),
                      metrics=['acc'])

    return model
Пример #27
0
def train():
    max_features = 10000
    sequence_length = 250

    # Load all datasets
    raw_train_ds = load_train_dataset()
    raw_val_ds = load_val_dataset()
    raw_test_ds = load_test_dataset()

    # Define the vectorization layer
    global vectorization_layer
    vectorization_layer = TextVectorization(
        standardize='lower_and_strip_punctuation',
        max_tokens=max_features,
        output_mode='int',
        output_sequence_length=sequence_length)

    # Adapt the vectorization layer
    vectorization_layer.adapt(raw_train_ds.map(lambda x, y: x))

    # Vectorize the dataset
    train_ds = raw_train_ds.map(text_to_vector)
    val_ds = raw_val_ds.map(text_to_vector)
    test_ds = raw_test_ds.map(text_to_vector)

    auto_tune = tf.data.experimental.AUTOTUNE
    train_ds = train_ds.cache().prefetch(buffer_size=auto_tune)
    val_ds = val_ds.cache().prefetch(buffer_size=auto_tune)
    test_ds = test_ds.cache().prefetch(buffer_size=auto_tune)

    embedding_dim = 32

    # Define the model
    model = tf.keras.Sequential([
        layers.Embedding(max_features + 1, embedding_dim),
        layers.Dropout(0.2),
        layers.GlobalAveragePooling1D(),
        layers.Dropout(0.2),
        layers.Dense(embedding_dim, activation='relu'),
        layers.Dropout(0.2),
        layers.Dense(1)
    ])

    # Compile the model
    model.compile(loss=losses.BinaryCrossentropy(from_logits=True),
                  optimizer='adam',
                  metrics=tf.metrics.BinaryAccuracy(threshold=0.0))

    # Train the model
    epochs = 10
    csv_logger = CSVLogger('training.log')
    history = model.fit(train_ds,
                        validation_data=val_ds,
                        epochs=epochs,
                        callbacks=[csv_logger])

    # Test the model
    loss, accuracy = model.evaluate(test_ds)
    print("Model loss against test dataset = ", loss)
    print("Model accuracy against test dataset = ", accuracy)

    # Save the model
    export_model = tf.keras.Sequential([
        tf.keras.Input(shape=(1, ), dtype="string"), vectorization_layer,
        model,
        layers.Activation('sigmoid')
    ])

    export_model.compile(loss=losses.BinaryCrossentropy(from_logits=False),
                         optimizer="adam",
                         metrics=['accuracy'])
    export_model.save('saved_model')
Пример #28
0
    def call(self, inputs):
        return tf.matmul(inputs, self.kernel)

    def compute_output_shape(self, input_shape):
        shape = tf.TensorShape(input_shape).as_list()
        shape[-1] = self.output_dim
        return tf.TensorShape(shape)

    def get_config(self):
        base_config = super(MyLayer, self).get_config()
        base_config['output_dim'] = self.output_dim
        return base_config

    @classmethod
    def from_config(cls, config):
        return cls(**config)


model = tf.keras.Sequential([MyLayer(10), layers.Activation('softmax')])

# The compile step specifies the training configuration.
model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
              loss='categorical_crossentropy',
              metrics=['accuracy'])
# Trains for 5 epochs.
import numpy as np
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
model.fit(data, labels, batch_size=32, epochs=5)
    def create_model(self):
        """构建model
            :return t_model: train model
            :return base_model: pred/test
        """
        input_data = layers.Input(shape=(self.AUDIO_LENGTH,
                                         self.AUDIO_FEATURE_LENGTH,
                                         1))  # (1600, 200, 1)

        conv1 = self.conv_layer_no_bias(32)(input_data)
        drop1 = layers.Dropout(0.05)(conv1)
        conv2 = self.conv_layer(32)(drop1)
        pool1 = self.maxpooling_layer(2)(conv2)

        drop2 = layers.Dropout(0.05)(pool1)
        conv3 = self.conv_layer(64)(drop2)
        drop3 = layers.Dropout(0.1)(conv3)
        conv4 = self.conv_layer(64)(drop3)
        pool2 = self.maxpooling_layer(2)(conv4)

        drop4 = layers.Dropout(0.1)(pool2)
        conv5 = self.conv_layer(128)(drop4)
        drop5 = layers.Dropout(0.15)(conv5)
        conv6 = self.conv_layer(128)(drop5)
        pool3 = self.maxpooling_layer(2)(conv6)

        drop6 = layers.Dropout(0.15)(pool3)
        conv7 = self.conv_layer(128)(drop6)
        drop7 = layers.Dropout(0.2)(conv7)
        conv8 = self.conv_layer(128)(drop7)
        pool4 = self.maxpooling_layer(1)(conv8)

        drop8 = layers.Dropout(0.2)(pool4)
        conv9 = self.conv_layer(128)(drop8)
        drop9 = layers.Dropout(0.2)(conv9)
        conv10 = self.conv_layer(128)(drop9)
        pool5 = self.maxpooling_layer(1)(conv10)

        reshape_layer = layers.Reshape((200, 3200))(pool5)
        drop10 = layers.Dropout(0.3)(reshape_layer)
        dense1 = self.dense_layer(128, activation='relu')(drop10)
        drop11 = layers.Dropout(0.3)(dense1)
        dense2 = self.dense_layer(self.SM_OUTPUT_SIZE)(drop11)

        y_pred = layers.Activation('softmax')(dense2)

        # 测试用模型
        base_model = models.Model(inputs=input_data,
                                  outputs=y_pred,
                                  name='base_model')
        #base_model.summary()

        # ctc loss
        y_true = layers.Input(shape=[self.LABEL_MAX_LENGTH])
        y_pred = y_pred
        input_length = layers.Input(shape=[1], dtype='int64')
        label_length = layers.Input(shape=[1], dtype='int64')

        # func: ctc_lambda_func
        ctc_loss = layers.Lambda(self.ctc_lambda_func,
                                 output_shape=(1, ),
                                 name='ctc_loss')([
                                     y_true, y_pred, input_length, label_length
                                 ])

        # 训练用模型
        t_model = models.Model(
            inputs=[input_data, y_true, input_length, label_length],
            outputs=ctc_loss,
            name='t_model')
        t_model.summary()

        # optimizers
        opt = tf.keras.optimizers.Adam(learning_rate=0.001,
                                       beta_1=0.9,
                                       beta_2=0.999,
                                       epsilon=1e-07)

        t_model.compile(loss={
            'ctc_loss': lambda y_true, y_pred: y_pred
        },
                        optimizer=opt)
        print('[*Info] Create Model Successful, Compiles Model Successful. ')

        return t_model, base_model
    def build_saveuav_small(self):
        num_filters = 16
        size = self.chip_size
        input = layers.Input((size, size, 3))
        down1 = layers.Conv2D(filters=num_filters,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(input)
        down1pool = layers.Conv2D(filters=num_filters,
                                  kernel_size=3,
                                  strides=(2, 2),
                                  activation="relu",
                                  padding="same")(down1)
        down2 = layers.Conv2D(filters=num_filters * 2,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(down1pool)
        down2pool = layers.Conv2D(filters=num_filters * 2,
                                  kernel_size=3,
                                  strides=(2, 2),
                                  activation="relu",
                                  padding="same")(down2)
        down3 = layers.Conv2D(filters=num_filters * 4,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(down2pool)
        down3pool = layers.Conv2D(filters=num_filters * 4,
                                  kernel_size=3,
                                  strides=(2, 2),
                                  activation="relu",
                                  padding="same")(down3)

        y_dilate1 = layers.Conv2D(filters=num_filters * 8,
                                  kernel_size=3,
                                  dilation_rate=1,
                                  padding="same",
                                  activation="relu")(down3pool)
        y_dilate2 = layers.Conv2D(filters=num_filters * 8,
                                  kernel_size=3,
                                  dilation_rate=2,
                                  padding="same",
                                  activation="relu")(y_dilate1)
        y_dilate3 = layers.Conv2D(filters=num_filters * 8,
                                  kernel_size=3,
                                  dilation_rate=4,
                                  padding="same",
                                  activation="relu")(y_dilate2)
        y_dilate4 = layers.Conv2D(filters=num_filters * 8,
                                  kernel_size=3,
                                  dilation_rate=8,
                                  padding="same",
                                  activation="relu")(y_dilate3)
        y_dilate5 = layers.Conv2D(filters=num_filters * 8,
                                  kernel_size=3,
                                  dilation_rate=16,
                                  padding="same",
                                  activation="relu")(y_dilate4)
        y_dilate6 = layers.Conv2D(filters=num_filters * 8,
                                  kernel_size=3,
                                  dilation_rate=32,
                                  padding="same",
                                  activation="relu")(y_dilate5)
        y_dilate_sum = layers.Add()(
            [y_dilate1, y_dilate2, y_dilate3, y_dilate4, y_dilate5, y_dilate6])

        up3_tr = layers.Conv2DTranspose(filters=num_filters * 4,
                                        kernel_size=3,
                                        strides=(2, 2),
                                        activation="relu",
                                        padding="same")(y_dilate_sum)
        y_up3_tr_add = layers.Add()([up3_tr, down3])
        y_up3 = layers.Conv2D(filters=num_filters * 4,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(y_up3_tr_add)

        up2_tr = layers.Conv2DTranspose(filters=num_filters * 2,
                                        kernel_size=3,
                                        strides=(2, 2),
                                        activation="relu",
                                        padding="same")(y_up3)
        y_up2_tr_add = layers.Add()([up2_tr, down2])
        y_up2 = layers.Conv2D(filters=num_filters * 2,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(y_up2_tr_add)

        up1_tr = layers.Conv2DTranspose(filters=num_filters,
                                        kernel_size=3,
                                        strides=(2, 2),
                                        activation="relu",
                                        padding="same")(y_up2)
        y_up1_tr_add = layers.Add()([up1_tr, down1])
        y_up1 = layers.Conv2D(filters=num_filters,
                              kernel_size=3,
                              activation="relu",
                              padding="same")(y_up1_tr_add)

        final_conv = layers.Conv2D(filters=6, kernel_size=1)(y_up1)
        x = layers.Activation("softmax")(final_conv)

        model = models.Model(input, x)

        return model