コード例 #1
0
def residual_block_m(y, nb_channels, _strides=(1, 1), _project_shortcut=False):
    shortcut = y

    # down-sampling is performed with a stride of 2
    y = InstanceNormalization(axis=-1)(y)
    y = layers.ELU()(y)
    y = layers.Conv2D(nb_channels,
                      kernel_size=(3, 3),
                      strides=_strides,
                      padding='same')(y)

    y = InstanceNormalization(axis=-1)(y)
    y = layers.ELU()(y)
    y = layers.Conv2D(nb_channels,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      padding='same')(y)

    # identity shortcuts used directly when the input and
    # output are of the same dimensions
    if _project_shortcut or _strides != (1, 1):
        # when the dimensions increase projection shortcut is used to
        # match dimensions (done by 1x1 convolutions)
        # when the shortcuts go across feature maps of two sizes,
        # they are performed with a stride of 2
        shortcut = InstanceNormalization(axis=-1)(shortcut)
        shortcut = layers.Conv2D(nb_channels,
                                 kernel_size=(1, 1),
                                 strides=_strides,
                                 padding='same')(shortcut)

    y = layers.add([shortcut, y])

    return y
コード例 #2
0
    def __init__(self,
                 INPUT_DIM=None,
                 OUTPUT_DIM=None,
                 LOSS='mse',
                 HIDDEN_DIM_2=None,
                 HIDDEN_DIM_3=None,
                 HIDDEN_DIM_1=None,
                 BATCH_SIZE=None,
                 EPOCHS=1000,
                 DROPOUT=0.2,
                 ACTIVATION=layers.ELU(alpha=0.1),
                 LEARNING_RATE=0.01,
                 METRICS=['mae', 'mse'],
                 REGULARIZER=None,
                 DATA_USED='',
                 VARIABLES='',
                 GPU=None,
                 PATH='./',
                 RESUME_PATH=None,
                 RESUME_EPOCH=0):

        self.input_dim = INPUT_DIM
        self.output_dim = OUTPUT_DIM
        self.hidden_dim_1 = HIDDEN_DIM_1
        self.hidden_dim_2 = HIDDEN_DIM_2
        self.hidden_dim_3 = HIDDEN_DIM_3

        self.loss = LOSS
        self.batch_size = BATCH_SIZE
        self.epochs = EPOCHS
        self.dropout = DROPOUT
        self.activation = ACTIVATION
        self.lr = LEARNING_RATE
        self.optimizer = optimizers.adam(lr=self.lr)
        self.metrics = METRICS
        self.model_datetime = datetime.now().strftime('%d-%mT%H-%M-%S')
        self.data_used = DATA_USED
        self.variables = VARIABLES
        self.regularizers = REGULARIZER
        self.gpu = GPU
        self.path = PATH
        self.resume_epoch = RESUME_EPOCH
        self.resume_path = RESUME_PATH

        if PATH != None:
            self.path = PATH + self.model_datetime
            if not os.path.exists(self.path):
                os.makedirs(self.path)
                os.makedirs(self.path + '/model')
                os.makedirs(self.path + '/checkpoints')
                os.makedirs(self.path + '/checkpoints/weights')
        else:
            self.path = self.model_datetime
            if not os.path.exists(self.path):
                os.makedirs(self.path)
                os.makedirs(self.path + '/model')
                os.makedirs(self.path + '/checkpoints')
                os.makedirs(self.path + '/checkpoints/weights')
        self.path += '/'
コード例 #3
0
def add_convolutional_block(model, filters, size, subsample):
    model.add(
        l.Convolution2D(filters,
                        size,
                        size,
                        subsample=(subsample, subsample),
                        border_mode='valid',
                        init='glorot_uniform',
                        W_regularizer=l2(0.01)))
    model.add(l.ELU())
    model.add(l.Dropout(0.2))
    return model
コード例 #4
0
def create_model(param: Param) -> keras.Model:
    inputs = keras.Input((28, 28, 1))
    x = inputs
    if param.noise_stddev is not None:
        x = layers.GaussianNoise(param.noise_stddev)(x)
    x = layers.Lambda(lambda z: z - K.mean(z, axis=1, keepdims=True))(x)
    # x = layers.Lambda(lambda z: z / K.sqrt(K.var(z, axis=1, keepdims=True)))(x)

    for i in range(len(param.conv_filters)):
        x = layers.Conv2D(param.conv_filters[i],
                          kernel_size=param.kernel_sizes[i],
                          strides=param.strides[i],
                          padding='same')(x)
        x = layers.BatchNormalization(axis=-1)(x)
        x = layers.ELU()(x)
        if param.pool_sizes[i] is not None:
            x = layers.MaxPooling2D(pool_size=param.pool_sizes[i],
                                    strides=param.pool_strides[i])(x)
        if param.conv_dropout_rates[i] is not None:
            x = layers.Dropout(param.conv_dropout_rates[i])(x)
    x = layers.Flatten()(x)

    for units, dropout_rate in zip(param.dense_units,
                                   param.dense_dropout_rates):
        x = layers.Dense(units, activation='elu')(x)
        if dropout_rate is not None:
            x = layers.Dropout(dropout_rate)(x)

    if param.l2_constrained_scale:
        scale = param.l2_constrained_scale
        x = layers.Lambda(lambda z: K.l2_normalize(z, axis=1) * scale)(x)
        outputs = layers.Dense(10,
                               kernel_constraint=keras.constraints.UnitNorm(),
                               use_bias=False)(x)
    else:
        outputs = layers.Dense(10)(x)

    model = keras.Model(inputs=inputs, outputs=outputs)
    if param.center_loss_margin:
        loss = CenterLoss(param.center_loss_margin)
    else:
        loss = tf.losses.softmax_cross_entropy
    model.compile(loss=loss, optimizer='adam', metrics=['accuracy'])
    return model
コード例 #5
0
def create_model():
    """
    Create a convolutional neural network mentioned in NVIDIA paper
    """
    model = Sequential()

    # The model input is image data (320x160)
    # We need to crop it
    model.add(
        layers.Cropping2D(cropping=((50, 20), (0, 0)),
                          data_format="channels_last",
                          input_shape=(160, 320, 3)))

    # Normalize the images to [0,1], a requirement of tf.image.rgb_to_hsv(x)
    model.add(layers.Lambda(lambda x: x / 255.0))

    # Convert images to YUV color space
    model.add(layers.Lambda(rgb2yuv))

    # Model in NVIDIA's paper
    # Add three 5x5 convolution layers (output depth 24, 36, and 48), each with 2x2 stride
    model.add(
        layers.Conv2D(24, (5, 5),
                      strides=(2, 2),
                      padding='valid',
                      kernel_regularizer=regularizers.l2(0.001)))
    model.add(layers.ELU(alpha=1.0))
    model.add(
        layers.Conv2D(36, (5, 5),
                      strides=(2, 2),
                      padding='valid',
                      kernel_regularizer=regularizers.l2(0.001)))
    model.add(layers.ELU(alpha=1.0))
    model.add(
        layers.Conv2D(48, (5, 5),
                      strides=(2, 2),
                      padding='valid',
                      kernel_regularizer=regularizers.l2(0.001)))
    model.add(layers.ELU(alpha=1.0))

    # Add two 3x3 convolution layers (output depth 64, and 64)
    model.add(
        layers.Conv2D(64, (3, 3),
                      padding='valid',
                      kernel_regularizer=regularizers.l2(0.001)))
    model.add(layers.ELU(alpha=1.0))
    model.add(
        layers.Conv2D(64, (3, 3),
                      padding='valid',
                      kernel_regularizer=regularizers.l2(0.001)))
    model.add(layers.ELU(alpha=1.0))

    # Add a flatten layer
    model.add(layers.Flatten())

    # Add three fully connected layers
    model.add(layers.Dense(100, kernel_regularizer=regularizers.l2(0.001)))
    model.add(layers.ELU(alpha=1.0))
    model.add(layers.Dense(50, kernel_regularizer=regularizers.l2(0.001)))
    model.add(layers.ELU(alpha=1.0))
    model.add(layers.Dense(10, kernel_regularizer=regularizers.l2(0.001)))
    model.add(layers.ELU(alpha=1.0))
    model.add(layers.Dense(1))

    return model
コード例 #6
0
def getTextNet():
    words_input = kl.Input(shape=(max_wlen, ), name='words_input')
    padding_masks = kl.Input(shape=(max_wlen, 1), name='padding_masks')
    x2 = kl.Embedding(vocab_size + 1,
                      embedding_size,
                      mask_zero=False,
                      name='w2v_emb')(words_input)
    xk3 = kl.Conv1D(filters=324, kernel_size=3, strides=1, padding='same')(x2)
    xk3 = kl.ELU(alpha=elu_alpha)(xk3)
    xk5 = kl.Conv1D(filters=324, kernel_size=5, strides=1, padding='same')(x2)
    xk5 = kl.ELU(alpha=elu_alpha)(xk5)
    xk7 = kl.Conv1D(filters=324, kernel_size=7, strides=1, padding='same')(x2)
    xk7 = kl.ELU(alpha=elu_alpha)(xk7)
    xk3d2 = kl.Conv1D(filters=324,
                      kernel_size=3,
                      strides=1,
                      dilation_rate=2,
                      padding='same')(x2)
    xk3d2 = kl.ELU(alpha=elu_alpha)(xk3d2)
    xk5d2 = kl.Conv1D(filters=324,
                      kernel_size=5,
                      strides=1,
                      dilation_rate=2,
                      padding='same')(x2)
    xk5d2 = kl.ELU(alpha=elu_alpha)(xk5d2)
    xk7d2 = kl.Conv1D(filters=324,
                      kernel_size=7,
                      strides=1,
                      dilation_rate=2,
                      padding='same')(x2)
    xk7d2 = kl.ELU(alpha=elu_alpha)(xk7d2)
    x2 = kl.Concatenate()([xk3, xk5, xk7, xk3d2, xk5d2, xk7d2])
    #     x2 = kl.BatchNormalization()(x2)
    #     x2 = kl.ELU(alpha=elu_alpha)(x2)
    x2 = kl.Conv1D(filters=100, kernel_size=1, strides=1, padding='same')(x2)
    x2 = kl.BatchNormalization()(x2)
    x2 = kl.ELU(alpha=elu_alpha)(x2)
    # print('x2.shape:',x2.shape)
    x2 = kl.Dropout(dropout_rate)(x2)
    sa_out_x2_1, s_x2_1 = SelfAttention(ch=int(x2.shape[-1]),
                                        name='sa_2_1')([x2, padding_masks])
    sa_out_x2_2, s_x2_2 = SelfAttention(ch=int(x2.shape[-1]),
                                        name='sa_2_2')([x2, padding_masks])
    sa_out_x2_3, s_x2_3 = SelfAttention(ch=int(x2.shape[-1]),
                                        name='sa_2_3')([x2, padding_masks])
    sa_out_x2_4, s_x2_4 = SelfAttention(ch=int(x2.shape[-1]),
                                        name='sa_2_4')([x2, padding_masks])
    #     print(sa_out_x2_4)
    x3 = kl.Concatenate(name='concat_sa_2')(
        [sa_out_x2_1, sa_out_x2_2, sa_out_x2_3, sa_out_x2_4])
    # x3 = kl.ELU(alpha=elu_alpha,name='act_concat_sa_2')(x3)
    x3comb, x3_g1, x3_g2 = ResidualCombine1D(ch_in=int(x3.shape[-1]),
                                             ch_out=100)([x2, x3])
    x3comb = kl.BatchNormalization()(x3comb)
    # x3comb = kl.ELU(alpha=elu_alpha)(x3comb)
    x3comb = kl.Conv1D(filters=100, kernel_size=1, strides=1,
                       padding='same')(x3comb)
    x3comb = kl.BatchNormalization()(x3comb)
    x3comb = kl.ELU()(x3comb)

    x3comb = kl.Dropout(dropout_rate)(x3comb)

    sa_out_x3_1, s_x3_1 = SelfAttention(ch=int(x3comb.shape[-1]),
                                        name='sa_3_1')([x3comb, padding_masks])
    sa_out_x3_2, s_x3_2 = SelfAttention(ch=int(x3comb.shape[-1]),
                                        name='sa_3_2')([x3comb, padding_masks])
    sa_out_x3_3, s_x3_3 = SelfAttention(ch=int(x3comb.shape[-1]),
                                        name='sa_3_3')([x3comb, padding_masks])
    sa_out_x3_4, s_x3_4 = SelfAttention(ch=int(x3comb.shape[-1]),
                                        name='sa_3_4')([x3comb, padding_masks])
    x4 = kl.Concatenate(name='concat_sa_3')(
        [sa_out_x3_1, sa_out_x3_2, sa_out_x3_3, sa_out_x3_4])
    # x4 = kl.ELU(alpha=elu_alpha,name='act_concat_sa_3')(x4)
    x4comb, x4_g1, x4_g2 = ResidualCombine1D(ch_in=int(x4.shape[-1]),
                                             ch_out=100)([x3comb, x4])
    x4comb = kl.BatchNormalization()(x4comb)
    # x4comb = kl.ELU(alpha=elu_alpha)(x4comb)
    x4comb = kl.Conv1D(filters=100, kernel_size=1, strides=1,
                       padding='same')(x4comb)
    x4comb = kl.BatchNormalization()(x4comb)
    x4comb = kl.ELU(alpha=elu_alpha)(x4comb)
    # x4comb = kl.Dropout(dropout_rate)(x4comb)

    # sa_out_x4_1,s_x4_1 = SelfAttention(ch=int(x4comb.shape[-1]),name='sa_4_1')([x4comb,padding_masks])
    # sa_out_x4_2,s_x4_2 = SelfAttention(ch=int(x4comb.shape[-1]),name='sa_4_2')([x4comb,padding_masks])
    # sa_out_x4_3,s_x4_3 = SelfAttention(ch=int(x4comb.shape[-1]),name='sa_4_3')([x4comb,padding_masks])
    # sa_out_x4_4,s_x4_4 = SelfAttention(ch=int(x4comb.shape[-1]),name='sa_4_4')([x4comb,padding_masks])
    # x5 = kl.Concatenate(name='concat_sa_4')([sa_out_x4_1,sa_out_x4_2,sa_out_x4_3,sa_out_x4_4])
    # x5 = kl.ELU(alpha=elu_alpha,name='act_concat_sa_4')(x5)
    # x5comb,x5_g1,x5_g2 = ResidualCombine1D(ch_in=int(x5.shape[-1]),ch_out=256)([x4comb,x5])
    # x5comb = kl.BatchNormalization()(x5comb)
    # x5comb = kl.ELU(alpha=elu_alpha)(x5comb)
    # x5comb = kl.Conv1D(filters=256,kernel_size=1,strides=1,padding='same')(x5comb)
    # x5comb = kl.BatchNormalization()(x5comb)
    # x5comb = kl.ELU(alpha=elu_alpha)(x5comb)

    return Model([words_input, padding_masks], x4comb, name='textModel')
コード例 #7
0
a_out_im_4, beta_im_4 = Img2TextCA(text_ch=int(text_features.shape[-1]),
                                   img_ch=int(image_features.shape[-1]))([
                                       image_features, text_features,
                                       padding_masks
                                   ])
# a_out_im_4 = kl.ELU(alpha=elu_alpha)(a_out_im_4)
a_conc_im_out = kl.Concatenate(name='img2text_concat')(
    [a_out_im_1, a_out_im_2, a_out_im_3, a_out_im_4])
# print('a_conc_im_out.shape:',a_conc_im_out.shape)

img2text_comb, g1, g2 = ResidualCombine2D(ch_in=int(a_conc_im_out.shape[-1]),
                                          ch_out=512)(
                                              [image_features, a_conc_im_out])
img2text_comb = kl.BatchNormalization(
    name='img2text_comb_batchnorm')(img2text_comb)
img2text_comb = kl.ELU(alpha=elu_alpha)(img2text_comb)
img2text_comb = kl.Dropout(dropout_rate)(img2text_comb)
img2text_pool = kl.GlobalAveragePooling2D(
    name='img2text_global_pool')(img2text_comb)

# In[146]:

a_out_1, beta_1 = Text2ImgCA(text_ch=int(text_features.shape[-1]),
                             img_ch=int(image_features.shape[-1]))([
                                 text_features, image_features, padding_masks
                             ])
# a_out_1 = kl.ELU(alpha=elu_alpha)(a_out_1)
# print(a_out_1.shape)
a_out_2, beta_2 = Text2ImgCA(text_ch=int(text_features.shape[-1]),
                             img_ch=int(image_features.shape[-1]))([
                                 text_features, image_features, padding_masks
コード例 #8
0
def add_dense_block(model, size, dropout):
    model.add(l.Dense(size, init='normal'))
    model.add(l.ELU())
    if dropout:
        model.add(l.Dropout(dropout))
    return model