def build_network(self):
        states = layers.Input(shape=self.state_size, name='states')

        layer_1 = layers.Dense(units=self.num_nodes,
                               activation='relu',
                               kernel_regularizer=regularizers.l2(
                                   self.reg))(states)
        layer_1 = layers.GaussianNoise(.1)(layer_1)

        layer_2 = layers.Dense(units=self.num_nodes,
                               activation='relu',
                               kernel_regularizer=regularizers.l2(
                                   self.reg))(layer_1)
        layer_2 = layers.GaussianNoise(.1)(layer_2)

        layer_3 = layers.Dense(units=self.num_nodes,
                               activation='relu',
                               kernel_regularizer=regularizers.l2(
                                   self.reg))(layer_2)
        layer_3 = layers.GaussianNoise(.1)(layer_3)

        output = layers.Dense(
            self.action_size[0],
            activation='tanh',
            kernel_initializer=keras.initializers.RandomUniform(
                minval=-5e-2, maxval=5e-2))(layer_3)
        output = layers.Lambda(lambda i: i * self.action_range)(output)

        return models.Model(inputs=[states], outputs=[output]), output
def set_VGG16_model():
    # 设置卷积网络基础VGG16
    conv_base = VGG16(include_top=False, weights='imagenet', input_shape=(224, 224, 3))
    conv_base.trainable = True
    # 组建网络
    model = models.Sequential()
    model.add(layers.GaussianNoise(0.125, input_shape=(224, 224, 3)))
    model.add(conv_base)
    model.add(layers.Flatten())
    model.add(layers.GaussianNoise(0.25))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(1024, activation='relu'))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(128, activation='relu'))
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(1, activation='sigmoid'))

    # 解冻VGG16卷积的最后几层
    set_trainable = False
    for layer in conv_base.layers:
        if layer.name == 'block4_conv1':
            set_trainable = True
        if set_trainable:
            layer.trainable = True
        else:
            layer.trainable = False

    return model
# model.load_weights(r'')
예제 #3
0
    def build_generator(self):
        # Generates samples from noisey class label conditions
        noise = kls.Input(shape=(self.latent_dim, ))
        label = kls.Input(shape=(self.num_classes + 1, ))

        model_input = kls.concatenate([noise, label])

        x = kls.Dense(256, )(
            model_input)  #activity_regularizer=regularizers.l1(0.0001)
        x = kls.GaussianNoise(.1)(x)
        x = kls.Activation('relu')(x)
        x = kls.BatchNormalization(momentum=0.1)(x)

        x = kls.Dense(256, )(x)
        x = kls.GaussianNoise(.1)(x)
        x = kls.Activation('relu')(x)
        x = kls.BatchNormalization(momentum=0.1)(x)
        #        x = kls.Dropout(0.3)(x)
        #        x = kls.Dense(256)(x)
        #        x = kls.GaussianNoise(.4)(x)
        #        x = kls.Activation('relu')(x)
        #        x = kls.BatchNormalization(momentum=0.5)(x)

        x = kls.Dense(self.gesture_size, )(x)
        x = kls.Activation('linear')(x)
        #        x = kls.ActivityRegularization(0.0,0.01)(x)
        return Model([noise, label], x)
예제 #4
0
    def run(self):
        (train_images, train_target), (test_images, test_target) = mnist.load_data()

        train_target = np_utils.to_categorical(train_target)
        test_target = np_utils.to_categorical(test_target)

        train_images = (train_images).astype('float32')
        test_images = (test_images).astype('float32')

        train_images = train_images.reshape(60000, 28 * 28)
        test_images = test_images.reshape(10000, 28 * 28)

        scaler = StandardScaler()
        scaler.fit(train_images)
        train_images_sc = scaler.transform(train_images)
        test_images_sc = scaler.transform(test_images)

        pca = PCA(n_components=500)
        pca.fit(train_images)

        NCOMPONENTS = 100

        pca = PCA(n_components=NCOMPONENTS)
        train_images_pca = pca.fit_transform(train_images_sc)
        test_images_pca = pca.transform(test_images_sc)
        pca_std = np.std(train_images_pca)

        model = models.Sequential()
        layer = 1
        units = 128

        model.add(layers.Dense(units, input_dim=NCOMPONENTS, activation='relu'))
        model.add(layers.GaussianNoise(pca_std))
        for i in range(layer):
            model.add(layers.Dense(units, activation='relu'))
            model.add(layers.GaussianNoise(pca_std))
            model.add(layers.Dropout(0.1))
        model.add(layers.Dense(10, activation='softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['categorical_accuracy'])

        model.fit(train_images_pca, train_target, epochs=100, batch_size=256, validation_split=0.15, verbose=2)

        predictions = model.predict_classes(test_images_pca, verbose=0)

        y_true = np.argmax(test_target, axis=1)
        cm = confusion_matrix(y_true, predictions)

        print('cm: ', cm)

        ax = plt.subplot()
        sns.heatmap(cm, annot=True, ax=ax, square=True, fmt='g')

        ax.set_xlabel('Predicted')
        ax.set_ylabel('True')
        ax.set_title('Confusion Matrix')
        ax.xaxis.set_ticklabels(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])
        ax.yaxis.set_ticklabels(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])

        plt.show()
예제 #5
0
    def build_model(self):
        """Build an actor (policy) network that maps states -> actions."""
        # Define input layer (states)
        states = layers.Input(shape=(self.state_size, ), name='states')

        # Add hidden layers
        net = layers.Dense(units=32)(states)
        net = layers.BatchNormalization()(net)
        net = layers.Activation('relu')(net)

        # Based on: https://openai.com/blog/better-exploration-with-parameter-noise/
        net = layers.GaussianNoise(1.0)(net)

        net = layers.Dense(units=64)(net)
        net = layers.BatchNormalization()(net)
        net = layers.Activation('relu')(net)
        net = layers.GaussianNoise(1.0)(net)

        net = layers.Dense(units=32)(net)
        net = layers.BatchNormalization()(net)
        net = layers.Activation('relu')(net)
        net = layers.GaussianNoise(1.0)(net)

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.

        # Add final output layer with sigmoid activation
        raw_actions = layers.Dense(units=self.action_size,
                                   activation='sigmoid',
                                   name='raw_actions')(net)

        # Scale [0, 1] output for each action dimension to proper range
        actions = layers.Lambda(lambda x:
                                (x * self.action_range) + self.action_low,
                                name='actions')(raw_actions)

        # Create Keras model
        self.model = models.Model(inputs=states, outputs=actions)

        # Define loss function using action value (Q value) gradients
        action_gradients = layers.Input(shape=(self.action_size, ))
        loss = K.mean(-action_gradients * actions)

        # Incorporate any additional losses here (e.g. from regularizers)

        # Define optimizer and training function
        optimizer = optimizers.Adam(lr=self.lr)
        #         optimizer = optimizers.RMSprop(lr=self.lr)
        #         optimizer = optimizers.SGD(lr=self.lr)

        updates_op = optimizer.get_updates(params=self.model.trainable_weights,
                                           loss=loss)
        self.train_fn = K.function(
            inputs=[self.model.input, action_gradients,
                    K.learning_phase()],
            outputs=[],
            updates=updates_op)
예제 #6
0
def mk_conv_32(*, channels):
    i = kr.Input((32, 32, 4), name='x0')

    cc_stack = [
        # kr.BatchNormalization(),
        kr.GaussianNoise(0.025),
        kr.Conv2D(10, 1, name='cconv_0'),
        kr.LeakyReLU(alpha=0.4),
        kr.Conv2D(channels, 1, name='cconv_1'),
        kr.LeakyReLU(alpha=0.4),
    ]

    h = apply_layers(i, cc_stack)

    conv_stack_0 = [
        kr.GaussianNoise(0.025),
        kr.Conv2D(64, 3, activation='relu'),
        kr.Conv2D(128, 3, activation='relu'),
        kr.Conv2D(256, 3, activation='relu'),
        kr.Conv2D(256, 3, strides=2, activation='relu'),
    ]

    h = apply_layers(h, conv_stack_0)
    ga0 = kr.GlobalAveragePooling2D()(h)
    gm0 = kr.GlobalMaxPooling2D()(h)

    conv_stack_1 = [
        kr.Conv2D(196, 3, activation='relu'),
        kr.Conv2D(196, 3, strides=2, activation='relu'),
        kr.Conv2D(196, 3, activation='relu'),
        kr.Flatten(),
    ]

    h = apply_layers(h, conv_stack_1)
    cat = kr.Concatenate()([h, gm0, ga0])

    head = [
        kr.Dropout(0.5),
        kr.Dense(512, activation='elu'),
        kr.Dropout(0.5),
        kr.Dense(256, activation='elu'),
        kr.Dropout(0.5),
        kr.Dense(Y_TRAIN.shape[1], activation='sigmoid', name='labs'),
    ]

    y = apply_layers(cat, head)

    m = krm.Model(inputs=[i], outputs=[y], name='conv_32')
    m.compile(loss=f2_crossentropy,
              optimizer='adam',
              metrics=['binary_accuracy'])

    return m
예제 #7
0
    def build_model(self):
        """Build actor(policy) network that maps states -> actions."""
        #Define input layer (states)
        states = layers.Input(shape=(self.state_size,), name='states')
        #Add hidden layers, try different num layers, sizes, activation, batch_normalization, regularizers, etc
        net = layers.Dense(units=128, )(states)
        #net = layers.BatchNormalization()(net)
        net = layers.Activation('relu')(net)
        net = layers.GaussianNoise(1.0)(net)
        net = layers.Dense(units=256)(net)
        #net = layers.BatchNormalization()(net)
        net = layers.Activation('relu')(net)
        net = layers.GaussianNoise(1.0)(net)
        net = layers.Dense(units=128)(net)
        #net = layers.BatchNormalization()(net)
        net = layers.Activation('relu')(net)
        net = layers.GaussianNoise(1.0)(net)

        #final layer with tanh which is a scaled sigmoid activation (between -1 and 1)
        #https://medium.com/the-theory-of-everything/understanding-activation-functions-in-neural-networks-9491262884e0
        #raw_actions = layers.Dense(units=self.action_size, activation='sigmoid', name='raw_actions')(net)
        raw_actions = layers.Dense(units=self.action_size, activation='tanh', name='raw_actions')(net)
        
        #rescale to -2 and +2 with Lambda
        raw_actions = layers.Lambda(lambda x: x * 2.)(raw_actions)
        #Create keras model
        self.model = models.Model(input=states, outputs=(raw_actions))

        self.model.summary()
        """these lines are called from DDPG once the gradients of Q-value
        obtained from critic
        we define action_gradients which appears in K.function so it chains back from
        K.function back up here I think
        Define loss function using action value (Q value) gradients from critic
        placeholder below"""
        #scaled_actions = raw_actions * 2
        action_gradients = layers.Input(shape=(self.action_size,)) #returns tensor
        #rescale actions here to calculate loss (raw_actions *2)???????????????????????????
        loss = K.mean(-action_gradients * raw_actions)
        #####################################################
        #other losses here, ie. from regularizers
        #TODO: add entropy??? but that is OU noise

        #Define optimizer and training function for actor_local
        optimizer = optimizers.Adam(lr=self.actor_local_lr)
        updates_op = optimizer.get_updates(params=self.model.trainable_weights, loss=loss)
        """self.train_fn can be called in learn function
        call:
        self.actor_local.train_fn([states, action_gradients, 1])
        """
        self.train_fn = K.function(inputs=[self.model.input, action_gradients, K.learning_phase()], \
            outputs=[], updates=updates_op)
    def network(self):
        states = layers.Input(shape=(self.state_dim, ))
        #
        x = layers.Dense(32,
                         activation='tanh',
                         kernel_initializer=keras.initializers.RandomNormal(
                             mean=0.0, stddev=0.1, seed=None),
                         bias_initializer='zeros')(states)
        x = layers.BatchNormalization()(x)
        x = layers.GaussianNoise(self.gaussian_std)(x)
        #
        # x = layers.Dense(64, activation='relu',
        #                  kernel_initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.1, seed=None),
        #                  bias_initializer='zeros')(x)
        # x = layers.GaussianNoise(self.gaussian_std)(x)
        #
        x = layers.Dense(32,
                         activation='tanh',
                         kernel_initializer=keras.initializers.RandomNormal(
                             mean=0.0, stddev=0.1, seed=None),
                         bias_initializer='zeros')(x)
        x = layers.BatchNormalization()(x)
        x = layers.GaussianNoise(self.gaussian_std)(x)
        #
        actions = layers.Dense(
            self.act_dim,
            activation='linear',
            kernel_initializer=keras.initializers.RandomNormal(mean=0.0,
                                                               stddev=0.1,
                                                               seed=None),
            bias_initializer='zeros')(x)
        actions = layers.GaussianNoise(0.5)(actions)
        # actions = layers.Lambda(lambda x: K.clip(x, -self.clip, self.clip), (1, ))(actions)

        #
        self.model = models.Model(inputs=states, outputs=actions)

        # Define loss function using action value (Q value) gradients
        action_gradients = layers.Input(shape=(self.act_dim, ))
        loss = 0.000001 * K.mean(-action_gradients * actions)

        # Define optimizer and training function
        optimizer = optimizers.Adam()
        updates_op = optimizer.get_updates(params=self.model.trainable_weights,
                                           loss=loss)
        self.train_fn = K.function(
            inputs=[self.model.input, action_gradients,
                    K.learning_phase()],
            outputs=[loss],
            updates=updates_op)
예제 #9
0
def set_ResNet_model():
    conv_base = ResNet50(include_top=False,
                         weights='imagenet',
                         input_shape=(224, 224, 3))
    conv_base.trainable = True

    # 组建网络
    model = models.Sequential()
    model.add(layers.GaussianNoise(0.125, input_shape=(224, 224, 3)))
    model.add(conv_base)
    model.add(layers.Flatten())
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(256, activation='relu'))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(1, activation='sigmoid'))

    # 解冻ResNet50的最后几层
    set_trainable = False
    for layer in conv_base.layers:
        if layer.name == 'conv5_block1_1_conv':
            set_trainable = True
        if set_trainable:
            layer.trainable = True
        else:
            layer.trainable = False

    return model
def setVGG16(dropout_rate, lr):
    main_input = layers.Input([config.img_size, config.img_size, 1])

    x = layers.BatchNormalization()(main_input)
    x = layers.GaussianNoise(0.01)(x)

    base_model = VGG16(weights=None, input_tensor=x, include_top=False)

    # flatten = layers.GlobalAveragePooling2D()(base_model.output)
    flatten = Flatten()(base_model.output)

    fc = Dense(
        2048,
        activation='relu',
        kernel_regularizer=l2(0.001),
        bias_regularizer=l2(0.001),
    )(flatten)
    fc = Dropout(dropout_rate)(fc)
    fc = Dense(
        2048,
        activation='relu',
        kernel_regularizer=l2(0.001),
        bias_regularizer=l2(0.001),
    )(fc)
    fc = Dropout(dropout_rate)(fc)

    predictions = Dense(config.class_num, activation="softmax")(fc)

    model = keras.Model(inputs=main_input, outputs=predictions, name='vgg16')

    optimizer = keras.optimizers.Adam(lr)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['categorical_accuracy'])
    return model
예제 #11
0
def convo_block(y, filtros, num_conv, res=0):

    if (res):
        x = y

    s = 2
    for i in range(num_conv):

        y = layers.Conv2D(filtros,
                          kernel_size=(3, 3),
                          strides=(s, s),
                          padding='same')(y)
        s = 1
        y = layers.BatchNormalization()(y)
        y = layers.GaussianNoise(0.3)(y)

        if (i < num_conv - 1):
            y = layers.ReLU()(y)

    if (res):

        x = layers.Conv2D(filtros,
                          kernel_size=(1, 1),
                          strides=(2, 2),
                          padding='same')(x)
        y = layers.add([x, y])
        y = layers.ReLU()(y)

    else:
        y = layers.MaxPooling2D(pool_size=(2, 2))(y)

    return y
예제 #12
0
파일: models.py 프로젝트: HSIYJND/HRWN
def vgg_like_branch(input_tensor, small_mode=True):
    filters = [16, 32, 64, 128] if small_mode else [64, 128, 256, 512, 640]

    conv0 = L.Conv2D(filters[3], (3, 3), padding='same')(input_tensor)
    conv0 = L.BatchNormalization(axis=-1)(conv0)  # 9-2
    conv0 = L.advanced_activations.LeakyReLU(alpha=0.2)(conv0)

    conv1 = L.Conv2D(filters[2], (1, 1), padding='same')(conv0)
    conv1 = L.advanced_activations.LeakyReLU(alpha=0.2)(conv1)

    conv2 = L.Conv2D(filters[1], (3, 3), padding='same')(conv1)
    conv2 = L.advanced_activations.LeakyReLU(alpha=0.2)(conv2)
    conv2 = L.GaussianNoise(stddev=0.2)(conv2)  # 7-2

    # conv3 = L.Conv2D(filters[3], (3, 3), padding='same')(conv2)
    conv3 = L.MaxPool2D(pool_size=(2, 2), padding='same')(conv2)
    conv3 = L.Conv2D(filters[2], (3, 3), padding='same')(conv3)  # 5-2
    conv3 = L.BatchNormalization(axis=-1)(conv3)
    conv3 = L.advanced_activations.LeakyReLU(alpha=0.2)(conv3)
    conv4 = L.Conv2D(filters[3], (3, 3), padding='same')(conv3)  # 3-2
    conv4 = L.advanced_activations.LeakyReLU(alpha=0.2)(conv4)
    conv4 = L.Flatten()(conv4)
    conv4 = L.Dense(2048)(conv4)
    conv4 = L.advanced_activations.LeakyReLU(alpha=0.2)(conv4)
    return conv4
예제 #13
0
def generate_model(sizeH, sizeW, num_classes, num_conv_blocks, fully_depth):

    shape = (sizeH, sizeW, 3)
    filtros = 32
    n_conv = 1

    #Convolutional part
    for i in range(num_conv_blocks):

        if (i == 0):
            input_layer = layers.Input(shape=shape)
            x = convo_block(input_layer, filtros, n_conv)

        else:
            x = convo_block(x, filtros, n_conv, res=1)
            n_conv = n_conv * 2
            if filtros < 256:
                filtros = filtros * 2

    x = layers.Flatten()(x)

    #Fully connected part
    for i in range(fully_depth):
        x = layers.Dense(512)(x)
        x = layers.BatchNormalization()(x)
        x = layers.GaussianNoise(0.3)(x)
        x = layers.ReLU()(x)

    x = layers.Dense(num_classes, activation='softmax')(x)
    model = models.Model(inputs=[input_layer], outputs=[x])

    return model
예제 #14
0
def build_model(shape, vocab_size):
    """构建模型
    """
    input_layer = layers.Input(shape=shape)

    m = input_layer
    m = layers.Embedding(vocab_size, 64)(m)
    m = layers.Dropout(0.1)(m)

    m = layers.GRU(
        32,
        return_sequences=True,
        # recurrent_dropout=0.2,
        kernel_regularizer=regularizers.l2(0.001))(m)

    m = layers.GRU(
        32,
        return_sequences=True,
        # recurrent_dropout=0.2,
        kernel_regularizer=regularizers.l2(0.001))(m)

    atten = m
    atten = layers.Flatten()(atten)
    atten = layers.Dense(shape[0], activation='softmax')(atten)
    atten = layers.RepeatVector(32)(atten)
    atten = layers.Permute((2, 1))(atten)

    m = layers.Multiply()([m, atten])

    # m = layers.Add()([m, emb])

    m = layers.Flatten()(m)
    m = layers.GaussianNoise(0.01)(m)

    m = layers.Dense(300,
                     activation='linear',
                     kernel_regularizer=regularizers.l2(0.01))(m)
    m = layers.BatchNormalization()(m)
    m = layers.Activation('tanh')(m)
    m = layers.Dropout(0.4)(m)
    m = layers.Dense(300,
                     activation='linear',
                     kernel_regularizer=regularizers.l2(0.01))(m)
    m = layers.BatchNormalization()(m)
    m = layers.Activation('tanh')(m)
    m = layers.Dropout(0.4)(m)
    m = layers.Dense(len(LABEL_DICT), activation='softmax')(m)

    atten_model = models.Model(inputs=[input_layer], outputs=atten)

    model = models.Model(inputs=[input_layer], outputs=m)

    optimizer = optimizers.Adam(lr=0.001, clipnorm=5.)

    model.compile(optimizer, 'categorical_crossentropy', metrics=['accuracy'])

    model.summary()

    return model, atten_model
예제 #15
0
def create_complex_model(param: Param) -> keras.Model:
    inputs = keras.Input((28, 28, 1))
    x = inputs
    if param is not None:
        x = layers.GaussianNoise(param.noise_stddev)(x)
    x_re = x
    x_im = layers.Lambda(lambda z: K.zeros((1, 28, 28, 1)))([])

    for i in range(len(param.conv_filters)):
        x_re, x_im = complexize_kernel(layers.Conv2D,
                                       param.conv_filters[i],
                                       kernel_size=param.kernel_sizes[i],
                                       strides=param.strides[i],
                                       padding='same',
                                       activation=layers.Activation("tanh"))(
                                           x_re, x_im)
        x_re = layers.BatchNormalization(axis=-1)(x_re)
        x_im = layers.BatchNormalization(axis=-1)(x_im)
        if param.pool_sizes[i] is not None:
            pool_size = param.pool_sizes[i]
            pool_strides = param.pool_strides[i]
            x_re = layers.AveragePooling2D(pool_size=pool_size,
                                           strides=pool_strides)(x_re)
            x_im = layers.AveragePooling2D(pool_size=pool_size,
                                           strides=pool_strides)(x_im)
        if param.conv_dropout_rates[i] is not None:
            dropout_rate = param.conv_dropout_rates[i]
            x_re = layers.Dropout(dropout_rate)(x_re)
            x_im = layers.Dropout(dropout_rate)(x_im)

    x_re = layers.Flatten()(x_re)
    x_im = layers.Flatten()(x_im)
    for units, dropout_rate in zip(param.dense_units,
                                   param.dense_dropout_rates):
        x_re, x_im = complexize_kernel(layers.Dense,
                                       units,
                                       activation=layers.Activation("tanh"))(
                                           x_re, x_im)
        if dropout_rate is not None:
            x_re = layers.Dropout(dropout_rate)(x_re)
            x_im = layers.Dropout(dropout_rate)(x_im)

    # x = layers.Lambda(lambda d: K.sqrt(K.square(d[0]) + K.square(d[1])))([x_re, x_im])
    if param.l2_constrained_scale:
        x = layers.Lambda(lambda z: K.l2_normalize(z, axis=1) * param.
                          l2_constrained_scale)(x_re)
        outputs = layers.Dense(10,
                               kernel_constraint=keras.constraints.UnitNorm(),
                               use_bias=False)(x)
    else:
        outputs = layers.Dense(10)(x_re)
    model = keras.Model(inputs=inputs, outputs=outputs)

    if param.center_loss_margin:
        loss = CenterLoss(param.center_loss_margin)
    else:
        loss = tf.losses.softmax_cross_entropy
    model.compile(loss=loss, optimizer='adam', metrics=['accuracy'])
    return model
예제 #16
0
def unet():
    input_img = layers.Input((256, 256, 3), name='RGB_Input')
    pp_in_layer = input_img
    if NET_SCALING is not None:
        pp_in_layer = layers.AvgPool2D(NET_SCALING)(pp_in_layer)

    pp_in_layer = layers.GaussianNoise(GAUSSIAN_NOISE)(pp_in_layer)
    pp_in_layer = layers.BatchNormalization()(pp_in_layer)

    c1 = layers.Conv2D(8, (3, 3), activation='relu',
                       padding='same')(pp_in_layer)
    c1 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(c1)
    p1 = layers.MaxPooling2D((2, 2))(c1)

    c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(p1)
    c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(c2)
    p2 = layers.MaxPooling2D((2, 2))(c2)

    c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(p2)
    c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(c3)
    p3 = layers.MaxPooling2D((2, 2))(c3)

    c4 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(p3)
    c4 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(c4)
    p4 = layers.MaxPooling2D(pool_size=(2, 2))(c4)

    c5 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(p4)
    c5 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(c5)

    u6 = upsample(64, (2, 2), strides=(2, 2), padding='same')(c5)
    u6 = layers.concatenate([u6, c4])
    c6 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(u6)
    c6 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(c6)

    u7 = upsample(32, (2, 2), strides=(2, 2), padding='same')(c6)
    u7 = layers.concatenate([u7, c3])
    c7 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(u7)
    c7 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(c7)

    u8 = upsample(16, (2, 2), strides=(2, 2), padding='same')(c7)
    u8 = layers.concatenate([u8, c2])
    c8 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(u8)
    c8 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(c8)

    u9 = upsample(8, (2, 2), strides=(2, 2), padding='same')(c8)
    u9 = layers.concatenate([u9, c1], axis=3)
    c9 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(u9)
    c9 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(c9)

    d = layers.Conv2D(1, (1, 1), activation='sigmoid')(c9)
    d = layers.Cropping2D((EDGE_CROP, EDGE_CROP))(d)
    d = layers.ZeroPadding2D((EDGE_CROP, EDGE_CROP))(d)
    if NET_SCALING is not None:
        d = layers.UpSampling2D(NET_SCALING)(d)

    seg_model = models.Model(input[input_img], outputs=[d])
    return seg_model
def res_dense_block_explosive(inputs, dim, activation='linear'):
    a = attention_2d_block(inputs)
    a = layers.BatchNormalization()(a)
    a = layers.GaussianNoise(0.3)(a)
    a = layers.Concatenate()([inputs, a])
    a = layers.Dense(dim, activation=activation)(a)
    a = layers.BatchNormalization()(a)
    a = layers.Concatenate()([inputs, a])
    a = layers.BatchNormalization()(a)
    return a
예제 #18
0
    def build_discriminator(self):
        inputs = kls.Input(shape=(self.gesture_size, ))

        x = kls.GaussianNoise(.4)(inputs)
        x = kls.Dense(300)(x)
        x = kls.Activation('relu')(x)

        x = kls.GaussianNoise(.4)(x)
        x = kls.Dense(300)(x)
        x = kls.Activation('relu')(x)
        x = kls.Dropout(0.3)(x)

        # Determine validity and label of the image
        validity = kls.Dense(1, activation="sigmoid",
                             name='d_output_source')(x)
        label = kls.Dense(self.num_classes + 1,
                          activation="softmax",
                          name='d_output_class')(x)

        return Model(inputs, [validity, label])
예제 #19
0
def bottleneck_Block(input,
                     out_filters,
                     strides=(1, 1),
                     with_conv_shortcut=False):
    expansion = 4
    de_filters = int(out_filters / expansion)

    x = Conv2D(de_filters, 1, use_bias=False,
               kernel_initializer='he_normal')(input)
    x = BatchNormalization(axis=3)(x)
    x = kl.GaussianNoise(GNOISE)(x)
    x = Activation('relu')(x)

    x = Conv2D(de_filters,
               3,
               strides=strides,
               padding='same',
               use_bias=False,
               kernel_initializer='he_normal')(x)
    x = BatchNormalization(axis=3)(x)
    x = kl.GaussianNoise(GNOISE)(x)
    x = Activation('relu')(x)

    x = Conv2D(out_filters, 1, use_bias=False,
               kernel_initializer='he_normal')(x)
    x = BatchNormalization(axis=3)(x)
    x = kl.GaussianNoise(GNOISE)(x)

    if with_conv_shortcut:
        residual = Conv2D(out_filters,
                          1,
                          strides=strides,
                          use_bias=False,
                          kernel_initializer='he_normal')(input)
        residual = BatchNormalization(axis=3)(residual)
        x = add([x, residual])
    else:
        x = add([x, input])

    x = Activation('relu')(x)
    return x
예제 #20
0
def build_discriminator():
    inputs = kls.Input(shape=(16, ))

    x = kls.GaussianNoise(.4)(inputs)
    x = kls.Dense(300)(x)
    x = kls.Activation('relu')(x)

    x = kls.GaussianNoise(.4)(x)
    x = kls.Dense(300)(x)
    x = kls.Activation('relu')(x)
    x = kls.Dropout(0.3)(x)

    label = kls.Dense(8, activation="softmax", name='d_output_class')(x)

    discriminator = Model(inputs, label)

    discriminator.compile(
        loss='categorical_crossentropy',
        optimizer=Adam(0.0002, .5, decay=1e-7),  #SGD(0.1, 0.1), #
        metrics=['accuracy'])

    return discriminator
예제 #21
0
def mk_conv_16(channels, loss_weights=None, mode='joint'):

    i = kr.Input((16, 16, channels), name='x0')

    conv_stack = [
        kr.GaussianNoise(0.05),
        kr.Conv2D(32, 3, strides=1, activation='relu', name='conv16_c0'),
        kr.Conv2D(128, 3, strides=1, activation='relu', name='conv16_c1'),
        kr.Conv2D(196, 3, strides=1, activation='relu', name='conv16_c2'),
        kr.Conv2D(256, 3, strides=1, activation='relu', name='conv16_c3'),
        kr.Conv2D(196, 3, strides=1, activation='relu', name='conv16_c4'),
        kr.Conv2D(196, 3, strides=1, activation='relu', name='conv16_c5'),
        kr.Conv2D(196, 3, strides=1, activation='relu', name='conv16_c6'),
        kr.Conv2D(128, 2, strides=1, activation='relu', name='conv16_c7'),
    ]

    h = apply_layers(i, conv_stack)

    head = [
        kr.Flatten(),
        kr.Dense(128, activation='elu', name='conv16_d0'),
        kr.Dropout(0.5),
        kr.Dense(128, activation='elu', name='conv16_d1'),
    ]

    h = apply_layers(h, head)

    if mode == 'joint':
        ys = [kr.Dense(NL, activation='sigmoid', name='labs')(h)]
        loss = f2_crossentropy
        metrics = ['binary_accuracy']

    elif mode in {'binary', 'indiv'}:
        loss = f2_crossentropy
        metrics = ['categorical_accuracy']

        if mode == 'binary':
            ys = [
                kr.Dense(2, activation='softmax', name=f'lab_{lab}')(h)
                for lab in LABELS
            ]
        else:
            ys = [kr.Dense(2, activation='softmax', name='lab')(h)]

    else:
        raise ValueError(f'invalid mode {mode}')

    m = krm.Model(inputs=[i], outputs=ys, name=f'conv_16_{mode}')
    m.compile(loss=loss, metrics=metrics, optimizer='adam')

    return m
예제 #22
0
def make_dsae(image_size_x=None, image_size_y=None, n_channels=3):
    original_dim = image_size_x * image_size_y
    output_dim = image_size_x * image_size_y * n_channels
    # network parameters 640x360 image
    input_shape = (image_size_x * image_size_y * n_channels, )
    intermediate_dim = 250
    latent_dim = 5

    # VAE model = encoder + decoder
    # build encoder model
    inputs = Input(shape=input_shape, name='encoder_input')
    x = kl.Reshape((image_size_x, image_size_y, n_channels))(inputs)
    x = kl.Conv2D(32, (5, 5), padding="same")(x)
    #x = kl.MaxPooling2D()(x)
    x = kl.Conv2D(16, (3, 3), padding="same")(x)
    #x = kl.MaxPooling2D()(x)
    #x = kl.Conv2D(16, (3,3), padding="same")(x)
    #x = kl.MaxPooling2D()(x)
    x = kl.GaussianNoise(0.001)(x)
    ##x = kl.Conv2D(64,(7,7), padding="same")(x)
    ##x = kl.Conv2D(32, (5,5), padding="same")(x)
    ##x = kl.Conv2D(16, (5,5), padding="same")(x)
    #x = kl.Lambda(lambda y: tf.contrib.layers.spatial_softmax(y))(x)
    x = kl.Flatten()(x)
    #x = kl.Dropout(0.5)(x)
    x = Dense(5)(x)
    x = Dense(latent_dim)(x)
    #x = kl.Dropout(0.2)(x)
    x = Dense(image_size_x * image_size_y)(x)
    z_mean = Dense(latent_dim, name='z_mean')(x)
    z_log_var = Dense(latent_dim, name='z_log_var')(x)
    # use reparameterization trick to push the sampling out as input
    # note that "output_shape" isn't necessary with the TensorFlow backend
    z = Lambda(sampling, output_shape=(latent_dim, ),
               name='z')([z_mean, z_log_var])
    # instantiate encoder model
    encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
    # build decoder model
    latent_inputs = Input(shape=(latent_dim, ), name='z_sampling')
    x = Dense(intermediate_dim, activation='relu')(latent_inputs)
    x = kl.Dropout(0.5)(x)
    x = Dense(output_dim, activation='sigmoid')(x)
    # instantiate decoder model
    decoder = Model(latent_inputs, x, name='decoder')
    #decoder.summary()

    # instantiate VAE model
    outputs = decoder(encoder(inputs)[2])
    output_tensors = [z_mean, z_log_var, z]
    vae = Model(inputs, outputs, name='vae_mlp')
    return vae, encoder, decoder, inputs, outputs, output_tensors
예제 #23
0
def basic_Block(input, out_filters, strides=(1, 1), with_conv_shortcut=False):
    x = conv3x3(input, out_filters, strides)
    x = BatchNormalization(axis=3)(x)
    x = kl.GaussianNoise(GNOISE)(x)  #NCFC
    x = Activation('relu')(x)

    x = conv3x3(x, out_filters)
    x = BatchNormalization(axis=3)(x)
    x = kl.GaussianNoise(GNOISE)(x)  #NCFC

    if with_conv_shortcut:
        residual = Conv2D(out_filters,
                          1,
                          strides=strides,
                          use_bias=False,
                          kernel_initializer='he_normal')(input)
        residual = BatchNormalization(axis=3)(residual)
        x = add([x, residual])
    else:
        x = add([x, input])

    x = Activation('relu')(x)
    return x
예제 #24
0
def tiny_model():
    model = keras.Sequential()
    model.add(layers.GaussianNoise(0.1, input_shape=(None, FEATURE_SIZE)))
    model.add(layers.Conv1D(25, 3, activation="relu"))
    model.add(layers.MaxPooling1D(3))
    model.add(layers.Conv1D(50, 3, activation="relu"))
    model.add(layers.GlobalMaxPooling1D())
    model.add(layers.Dense(50, activation="relu"))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(25, activation="relu"))
    model.add(layers.Dropout(0.5))
    model.add(
        layers.Dense(max(allophone_mapping.values()) + 1,
                     activation="softmax"))
    return model
예제 #25
0
def basic_block(y, K, args, ishape=0, residual=0, tlist=[]):

    if (residual):
        x = y

    str = np.ones(args.autonconv)
    if (residual):
        str[args.autonconv - 1] = 2
    str = np.int32(str)

    for i in range(args.autonconv):
        if (args.autocdwise == True):
            y = layers.SeparableConv2D(K,
                                       kernel_size=(3, 3),
                                       strides=(str[i], str[i]),
                                       padding='same')(y)
        else:
            y = layers.Conv2D(K,
                              kernel_size=(3, 3),
                              strides=(str[i], str[i]),
                              padding='same')(y)

        if (args.autonobn == False):
            y = layers.BatchNormalization()(y)
            if (args.da_gauss != 0.0):
                y = layers.GaussianNoise(0.3)(y)

        if (residual == 0) | (i < args.autonconv - 1):
            y = layers.ReLU()(y)
            tlist.append(y)

    if (residual):
        if (args.autocdwise == True):
            x = layers.SeparableConv2D(K,
                                       kernel_size=(1, 1),
                                       strides=(2, 2),
                                       padding='same')(x)
        else:
            x = layers.Conv2D(K,
                              kernel_size=(1, 1),
                              strides=(2, 2),
                              padding='same')(x)
        y = layers.add([x, y])
        y = layers.ReLU()(y)
        tlist.append(y)
    else:
        y = layers.MaxPooling2D(pool_size=(2, 2))(y)
    return y
예제 #26
0
 def make_model(self, obs_shape, act_shape):
     if self.model_type == "neural":
         input_shape = obs_shape
         inputs = kl.Input(shape=input_shape, name='encoder_input')
         x = kl.Dense(16)(inputs)
         x = kl.Dropout(0.5)(x)
         #x = kl.LSTM(10, return_sequences=True, stateful=True)(x)
         x = kl.GaussianNoise(0.00001)(x)
         x = kl.Dense(8)(x)
         x = kl.Dropout(0.5)(x)
         il_output = kl.Dense(act_shape)(x)
         self.model = keras.models.Model(inputs, [il_output],
                                         name='IL node')
         self.model.compile(optimizer='adam', loss="mse")
     else:
         self.model = RFR(max_depth=20, criterion="mse", oob_score=True)
예제 #27
0
def auto_model(args, num_classes):

    [input, x, _] = FCN(args)

    x = layers.Flatten()(x)
    for i in range(args.autodlayers):
        x = layers.Dense(args.autodsize)(x)
        if (args.autonobn == False):
            x = layers.BatchNormalization()(x)
            if (args.da_gauss != 0.0):
                x = layers.GaussianNoise(0.3)(x)
        x = layers.ReLU()(x)

    x = layers.Dense(num_classes, activation='softmax')(x)
    model = models.Model(inputs=[input], outputs=[x])

    return model
예제 #28
0
def create_model(param: Param) -> keras.Model:
    inputs = keras.Input((28, 28, 1))
    x = inputs
    if param.noise_stddev is not None:
        x = layers.GaussianNoise(param.noise_stddev)(x)
    x = layers.Lambda(lambda z: z - K.mean(z, axis=1, keepdims=True))(x)
    # x = layers.Lambda(lambda z: z / K.sqrt(K.var(z, axis=1, keepdims=True)))(x)

    for i in range(len(param.conv_filters)):
        x = layers.Conv2D(param.conv_filters[i],
                          kernel_size=param.kernel_sizes[i],
                          strides=param.strides[i],
                          padding='same')(x)
        x = layers.BatchNormalization(axis=-1)(x)
        x = layers.ELU()(x)
        if param.pool_sizes[i] is not None:
            x = layers.MaxPooling2D(pool_size=param.pool_sizes[i],
                                    strides=param.pool_strides[i])(x)
        if param.conv_dropout_rates[i] is not None:
            x = layers.Dropout(param.conv_dropout_rates[i])(x)
    x = layers.Flatten()(x)

    for units, dropout_rate in zip(param.dense_units,
                                   param.dense_dropout_rates):
        x = layers.Dense(units, activation='elu')(x)
        if dropout_rate is not None:
            x = layers.Dropout(dropout_rate)(x)

    if param.l2_constrained_scale:
        scale = param.l2_constrained_scale
        x = layers.Lambda(lambda z: K.l2_normalize(z, axis=1) * scale)(x)
        outputs = layers.Dense(10,
                               kernel_constraint=keras.constraints.UnitNorm(),
                               use_bias=False)(x)
    else:
        outputs = layers.Dense(10)(x)

    model = keras.Model(inputs=inputs, outputs=outputs)
    if param.center_loss_margin:
        loss = CenterLoss(param.center_loss_margin)
    else:
        loss = tf.losses.softmax_cross_entropy
    model.compile(loss=loss, optimizer='adam', metrics=['accuracy'])
    return model
예제 #29
0
def get_model0(Input_img_shape=(300, 300, 3)):
    def conv_bn(x, filt, dl_rate=(1, 1), preblock=False):
        y = layers.Convolution2D(filt, (3, 3),
                                 activation='linear',
                                 padding='same',
                                 dilation_rate=dl_rate,
                                 use_bias=False)(x)
        if preblock:
            return y
        y = layers.BatchNormalization()(y)
        return layers.Activation('elu')(y)

    #in_layer = layers.Input(t_x.shape[1:], name = 'RGB_Input')
    in_layer = layers.Input(Input_img_shape, name='RGB_Input')
    pp_in_layer = layers.GaussianNoise(GAUSSIAN_NOISE)(in_layer)
    pp_in_layer = layers.BatchNormalization()(pp_in_layer)

    c = conv_bn(pp_in_layer, BASE_DEPTH // 2)
    c = conv_bn(c, BASE_DEPTH // 2)
    c = conv_bn(c, BASE_DEPTH)

    skip_layers = [pp_in_layer]
    for j in range(BLOCK_COUNT):
        depth_steps = int(np.log2(Input_img_shape[0]) - 2)
        d = layers.concatenate(skip_layers + [
            conv_bn(c, BASE_DEPTH * 2**j, (2**i, 2**i), preblock=True)
            for i in range(depth_steps)
        ])
        d = layers.SpatialDropout2D(SPATIAL_DROPOUT)(d)
        d = layers.BatchNormalization()(d)
        d = layers.Activation('elu')(d)
        # bottleneck
        d = conv_bn(d, BASE_DEPTH * 2**(j + 1))
        skip_layers += [c]
        c = d

    d = layers.Convolution2D(1, (1, 1), activation='sigmoid',
                             padding='same')(d)
    d = layers.Cropping2D((EDGE_CROP, EDGE_CROP))(d)
    d = layers.ZeroPadding2D((EDGE_CROP, EDGE_CROP))(d)
    seg_model = models.Model(inputs=[in_layer], outputs=[d])
    #seg_model.summary()
    return seg_model
예제 #30
0
파일: gan.py 프로젝트: zhf459/GAN-Sandbox
def discriminator_network(x):
    def add_common_layers(y):
        y = layers.advanced_activations.LeakyReLU()(y)
        y = layers.Dropout(0.25)(y)
        return y

    x = layers.GaussianNoise(stddev=0.2)(x)

    x = layers.Conv2D(64, kernel_size, **conv_layer_keyword_args)(x)
    x = add_common_layers(x)

    x = layers.Conv2D(128, kernel_size, **conv_layer_keyword_args)(x)
    x = add_common_layers(x)

    x = layers.Flatten()(x)

    x = layers.Dense(1024)(x)
    x = add_common_layers(x)

    return layers.Dense(1, activation='sigmoid')(x)