示例#1
0
def model(is_train, drop_rate):
    # input : 227x227x3
    m = K.Sequential(name="crack_detection")
    m.add(
        layers.Conv2D(24, (20, 20),
                      strides=2,
                      padding="same",
                      activation='relu'))
    m.add(layers.MaxPooling2D((7, 7), strides=2, padding="same"))
    m.add(layers.BatchNormalization(trainable=is_train))

    m.add(
        layers.Conv2D(48, (15, 15),
                      strides=2,
                      padding="valid",
                      activation='relu'))
    m.add(layers.MaxPooling2D((4, 4), strides=2, padding="valid"))
    m.add(layers.BatchNormalization(trainable=is_train))

    m.add(
        layers.Conv2D(96, (10, 10),
                      strides=2,
                      padding="valid",
                      activation='relu'))
    m.add(layers.Flatten())

    m.add(layers.Dropout(drop_rate))
    m.add(layers.Dense(2))
    return m
示例#2
0
def create_disc(Xt,
                Ct,
                img_shape=(28, 28, 1),
                filter_size=3,
                strides=[2, 2],
                filters=[64, 128]):

    with tf.name_scope("Disc"):
        X = kl.Input(img_shape, tensor=Xt, name="X")
        C = kl.Input(img_shape, tensor=Ct, name="C")

        layer = kl.concatenate([X, C], axis=1)
        layer = kl.GaussianNoise(stddev=0.1)(layer)
        # Discriminator

        layer = kl.Conv2D(filters=filters[0],
                          kernel_size=filter_size,
                          padding="same",
                          strides=2)(layer)
        layer = kl.LeakyReLU()(layer)

        for l in range(1, len(filters)):
            conv = kl.Conv2D(filters=filters[l],
                             kernel_size=filter_size,
                             padding="same",
                             strides=strides[l])(layer)
            layer = kl.LeakyReLU()(conv)
            layer = kl.Dropout(0.2)(layer)
            layer = kl.BatchNormalization()(layer)

        layer = kl.Flatten()(layer)
        D_out = kl.Dense(1, activation="sigmoid")(layer)

        model = k.Model(inputs=[X, C], outputs=D_out)
    return model
    def testKerasModelHealthyPredictAndFitCalls(self):
        """Test a simple healthy keras model runs fine under the callback."""
        check_numerics_callback.enable_check_numerics()

        model = models.Sequential()
        model.add(
            layers.Dense(units=100,
                         input_shape=(5, ),
                         use_bias=False,
                         activation="relu",
                         kernel_initializer="ones"))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.5))
        model.add(
            layers.Dense(units=1,
                         activation="linear",
                         kernel_initializer="ones"))

        model.compile(loss="mse",
                      optimizer=optimizer_v2.gradient_descent.SGD(1e-3))

        batch_size = 16
        xs = np.zeros([batch_size, 5])
        ys = np.ones([batch_size, 1])

        outputs = model.predict(xs)
        self.assertEqual(outputs.shape, (batch_size, 1))

        epochs = 100
        history = model.fit(xs, ys, epochs=epochs, verbose=0)
        self.assertEqual(len(history.history["loss"]), epochs)
def create_mem_network():
    sentence = layers.Input(shape=(story_maxlen,), dtype=tf.int32)
    encoded_sentence = layers.Embedding(input_dim=vocab_size, output_dim=50)(sentence)
    encoded_sentence = layers.Dropout(0.3)(encoded_sentence)

    question = layers.Input(shape=(query_maxlen,), dtype=tf.int32)
    encoded_ques = layers.Embedding(input_dim=vocab_size, output_dim=50)(question)
    encoded_ques = layers.Dropout(0.3)(encoded_ques)
    encoded_ques = layers.LSTM(50)(encoded_ques)
    encoded_ques = layers.RepeatVector(story_maxlen)(encoded_ques)

    merged = layers.add([encoded_sentence, encoded_ques])
    merged = layers.LSTM(50)(merged)
    merged = layers.Dropout(0.3)(merged)
    preds = layers.Dense(vocab_size, activation=None)(merged)
    return models.Model(inputs=[sentence, question], outputs=preds)
示例#5
0
文件: Alexnet.py 项目: HOJULIN/review
def AlexNet_inference(in_shape):  #输入图片的形状
    model = keras.models.Sequential(name="AlexNet")
    model.add(
        layers.Conv2D(
            96,
            (11, 11),
            strides=(2, 2),
            input_shape=(in_shape[1], in_shape[2], in_shape[3]),
            padding='same',
            activation='relu',
        ))
    #kernel_initializer='uniform'这个是用来神魔?
    model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(
        layers.Conv2D(256, (5, 5),
                      strides=(1, 1),
                      padding='same',
                      activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(
        layers.Conv2D(384, (3, 3),
                      strides=(1, 1),
                      padding='same',
                      activation='relu'))
    model.add(
        layers.Conv2D(384, (3, 3),
                      strides=(1, 1),
                      padding='same',
                      activation='relu'))
    model.add(
        layers.Conv2D(256, (3, 3),
                      strides=(1, 1),
                      padding='same',
                      activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dense(2048, activation='relu'))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(2048, activation='relu'))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(10, activation='softmax'))
    model.compile(
        optimizer='adam',
        loss='sparse_categorical_crossentropy',  # 不能直接用函数,否则在与测试加载模型不成功!
        metrics=['accuracy'])
    model.summary()
    return model
示例#6
0
def Construct3DUnetModel(input_images,
                         nclasses,
                         use_bn=True,
                         use_dropout=True):
    with name_scope("contract1"):
        x, contract1 = CreateConv3DBlock(input_images, (32, 64),
                                         n=2,
                                         use_bn=use_bn,
                                         name='contract1')

    with name_scope("contract2"):
        x, contract2 = CreateConv3DBlock(x, (64, 128),
                                         n=2,
                                         use_bn=use_bn,
                                         name='contract2')

    with name_scope("contract3"):
        x, contract3 = CreateConv3DBlock(x, (128, 256),
                                         n=2,
                                         use_bn=use_bn,
                                         name='contract3')

    with name_scope("contract4"):
        x, _ = CreateConv3DBlock(x, (256, 512),
                                 n=2,
                                 use_bn=use_bn,
                                 apply_pooling=False,
                                 name='contract4')

    with name_scope("dropout"):
        if use_dropout:
            x = klayers.Dropout(0.5, name='dropout')(x)

    with name_scope("expand3"):
        x = CreateUpConv3DBlock(x, [contract3], (256, 256),
                                n=2,
                                use_bn=use_bn,
                                name='expand3')

    with name_scope("expand2"):
        x = CreateUpConv3DBlock(x, [contract2], (128, 128),
                                n=2,
                                use_bn=use_bn,
                                name='expand2')

    with name_scope("expand1"):
        x = CreateUpConv3DBlock(x, [contract1], (64, 64),
                                n=2,
                                use_bn=use_bn,
                                name='expand1')

    with name_scope("segmentation"):
        layername = 'segmentation_{}classes'.format(nclasses)
        x = klayers.Conv3D(nclasses, (1, 1, 1),
                           activation='softmax',
                           padding='same',
                           name=layername)(x)

    return x
 def generate_outputs(self):
     conv_l1 = layers.Conv2D(32, (3, 3), padding='same',
                             activation='relu')(self.inputs)
     conv_l1 = layers.MaxPooling2D((2, 2), strides=(2, 2))(conv_l1)
     conv_l2 = layers.Conv2D(64, (3, 3), padding='same',
                             activation='relu')(conv_l1)
     conv_l2 = layers.MaxPooling2D((2, 2), strides=(2, 2))(conv_l2)
     conv_l3 = layers.Conv2D(128, (3, 3), padding='same',
                             activation='relu')(conv_l2)
     conv_l3 = layers.MaxPooling2D((2, 2), strides=(2, 2))(conv_l3)
     l3_dropout = layers.Dropout(rate=0.5)(conv_l3)
     flatterned = layers.Flatten()(l3_dropout)
     l4 = layers.Dense(1024, activation='relu')(flatterned)
     l4_droupout = layers.Dropout(rate=0.5)(l4)
     outputs = layers.Dense(self.num_classes,
                            activation='softmax')(l4_droupout)
     return outputs
示例#8
0
    def encoder_block(self, input_tensor, num_filters, dropout_rate):
        encoder = self.conv_block(input_tensor, num_filters)
        encoder_pool = layers.MaxPooling2D(
            (self.pool_size, self.pool_size),
            strides=(self.stride, self.stride))(encoder)
        dropout = layers.Dropout(dropout_rate)(encoder_pool)

        return dropout, encoder
def build_model():

    input1 = layers.Input(shape=(X_unstructure.shape[1], ))
    input2 = layers.Input(shape=(X_structure.shape[1], ))

    input1_bn = layers.BatchNormalization()(input1)
    x1_1 = layers.Dense(600, activation='tanh')(input1_bn)
    input2_bn = layers.BatchNormalization()(input2)
    x2_1 = layers.Dense(200, activation='tanh')(input2_bn)

    x_connect = tf.concat([x1_1, x2_1], 1)
    x_connect_1 = layers.Dense(600, activation='tanh')(x_connect)
    x_connect_2 = layers.Dense(200, activation='tanh')(x_connect_1)
    x_connect_2_d = layers.Dropout(0.3)(x_connect_2)
    #x_connect_3 = layers.Dense(1)(x_connect_2)
    # w = tf.reduce_mean(x_connect_3, axis=1)
    # w = tf.reshape(x_connect_3, (batch_size, 1))
    #x1_1 = x_connect_3 * x1_1 + x1_1
    #x2_1 = x_connect_3 * x2_1 + x2_1

    x1_2 = layers.Dense(600,
                        activation='tanh',
                        kernel_regularizer=regularizers.l2())(x_connect_2_d)
    x2_2 = layers.Dense(200,
                        activation='tanh',
                        kernel_regularizer=regularizers.l2())(x_connect_2_d)
    x1_2_d = layers.Dropout(0.3)(x1_2)
    x2_2_d = layers.Dropout(0.3)(x2_2)

    x1_3 = layers.Dense(600,
                        activation='tanh',
                        kernel_regularizer=regularizers.l2())(x1_2)
    x2_3 = layers.Dense(200,
                        activation='tanh',
                        kernel_regularizer=regularizers.l2())(x2_2)
    x1_3_d = layers.Dropout(0.3)(x1_3)
    x2_3_d = layers.Dropout(0.3)(x2_3)

    y1_ = layers.Dense(2, activation='softmax')(x1_3)
    y2_ = layers.Dense(2, activation='softmax')(x2_3)

    model = models.Model(inputs=[input1, input2], outputs=[y1_, y2_])
    model.compile(optimizer='sgd',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
def create_cnn_model(input_shape, num_classes=10):
    img_input = layers.Input(shape=input_shape)
    x = layers.Conv2D(32, (3, 3), padding='same', activation='relu')(img_input)
    x = layers.Conv2D(32, (3, 3), activation='relu')(x)
    x = layers.MaxPooling2D((2, 2))(x)
    x = layers.Dropout(0.25)(x)

    x = layers.Conv2D(64, (3, 3), padding='same', activation='relu')(x)
    x = layers.Conv2D(64, (3, 3), activation='relu')(x)
    x = layers.MaxPooling2D((2, 2))(x)
    x = layers.Dropout(0.25)(x)

    x = layers.Flatten()(x)
    x = layers.Dense(512, activation='relu')(x)
    x = layers.Dropout(0.5)(x)
    out_logits = layers.Dense(num_classes)(x)
    return models.Model(inputs=img_input, outputs=out_logits)
示例#11
0
    def vgg16(num_classes=1000):
        model = Sequential()
        model.add(layers.ZeroPadding2D((1, 1), input_shape=(384, 384, 3)))
        model.add(layers.Conv2D(64, (3, 3), activation='relu'))
        model.add(layers.ZeroPadding2D((1, 1)))
        model.add(layers.Conv2D(64, (3, 3), activation='relu'))
        model.add(layers.MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(layers.ZeroPadding2D((1, 1)))
        model.add(layers.Conv2D(128, (3, 3), activation='relu'))
        model.add(layers.ZeroPadding2D((1, 1)))
        model.add(layers.Conv2D(128, (3, 3), activation='relu'))
        model.add(layers.MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(layers.ZeroPadding2D((1, 1)))
        model.add(layers.Conv2D(256, (3, 3), activation='relu'))
        model.add(layers.ZeroPadding2D((1, 1)))
        model.add(layers.Conv2D(256, (3, 3), activation='relu'))
        model.add(layers.ZeroPadding2D((1, 1)))
        model.add(layers.Conv2D(256, (3, 3), activation='relu'))
        model.add(layers.MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(layers.ZeroPadding2D((1, 1)))
        model.add(layers.Conv2D(512, (3, 3), activation='relu'))
        model.add(layers.ZeroPadding2D((1, 1)))
        model.add(layers.Conv2D(512, (3, 3), activation='relu'))
        model.add(layers.ZeroPadding2D((1, 1)))
        model.add(layers.Conv2D(512, (3, 3), activation='relu'))
        model.add(layers.MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(layers.ZeroPadding2D((1, 1)))
        model.add(layers.Conv2D(512, (3, 3), activation='relu'))
        model.add(layers.ZeroPadding2D((1, 1)))
        model.add(layers.Conv2D(512, (3, 3), activation='relu'))
        model.add(layers.ZeroPadding2D((1, 1)))
        model.add(layers.Conv2D(512, (3, 3), activation='relu'))
        model.add(layers.MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(layers.Flatten())
        model.add(layers.Dense(4096, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(1024, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(num_classes, activation='softmax'))

        return model
示例#12
0
    def __init__(self, cfg=Args, vocab=40558, n_ctx=512):
        super(TransformerModel, self).__init__()

        self.vocab = vocab
        self.embed = layers.Embedding(vocab, cfg.n_embed)
        # 构造输入embed的位置信息
        self.embed.build([1])
        self.drop = layers.Dropout(cfg.embed_pdrop)
        self.h = [Block(n_ctx, cfg, scale=False) for _ in range(cfg.n_layer)]
示例#13
0
    def __init__(self, n_state=3072, cfg=Args):
        super(FFT, self).__init__()

        nx = cfg.n_embed
        self.c_fc = Conv1D(n_state, 1, nx)
        self.c_proj = Conv1D(nx, 1, n_state)
        # gelu激活函数
        self.act = cfg.afn
        self.dropout = layers.Dropout(cfg.resid_pdrop)
示例#14
0
def make_discriminator_model():
    model = tf.keras.Sequential()
    model.add(
        layers.Conv2D(64, (5, 5),
                      strides=(2, 2),
                      padding='same',
                      input_shape=[28, 28, 1]))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))

    model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))

    model.add(layers.Flatten())
    model.add(layers.Dense(1))

    return model
示例#15
0
def construct_model():
    # line 1: how do we keep all layers of this model ?
    model = EfficientNetB3(weights=None, include_top=False, pooling='avg')
    x = model.output
    x = KL.Dropout(0.1)(x)
    x = KL.Dense(1, kernel_initializer=dense_kernel_initializer)(x)
    new_output = KL.Activation('sigmoid')(x)
    new_model = Model(model.inputs, new_output)
    return new_model
示例#16
0
def create_classifier():

    with tf.name_scope("Disc"):
        X = kl.Input((32, 32, 3), name="X")

        layer = kl.Conv2D(filters=16,
                          kernel_size=3,
                          padding="same",
                          activation="relu")(X)
        layer = kl.BatchNormalization()(layer)
        layer = kl.Conv2D(filters=32,
                          kernel_size=3,
                          padding="same",
                          activation="relu")(layer)
        layer = kl.BatchNormalization()(layer)
        layer = kl.MaxPool2D()(layer)

        layer = kl.Conv2D(filters=64,
                          kernel_size=4,
                          padding="same",
                          activation="relu")(layer)
        layer = kl.BatchNormalization()(layer)
        layer = kl.MaxPool2D()(layer)

        layer = kl.Conv2D(filters=128,
                          kernel_size=4,
                          padding="same",
                          activation="relu")(layer)
        layer = kl.BatchNormalization()(layer)
        layer = kl.MaxPool2D()(layer)

        layer = kl.Dropout(0.2)(layer)

        layer = kl.Flatten()(layer)
        fidout = layer
        layer = kl.Dense(512, activation="relu")(layer)
        layer = kl.Dropout(0.2)(layer)

        D_out = kl.Dense(10, activation="softmax")(layer)

        model = k.Model(inputs=X, outputs=D_out)
        fidmodel = k.Model(inputs=X, outputs=fidout)
    return model, fidmodel
def build_model():

    input1 = layers.Input(shape=(X_unstructure.shape[1], ))
    input2 = layers.Input(shape=(X_structure.shape[1], ))

    input1_bn = layers.BatchNormalization()(input1)
    x1_1 = layers.Dense(600, activation='tanh')(input1_bn)
    input2_bn = layers.BatchNormalization()(input2)
    x2_1 = layers.Dense(200, activation='tanh')(input2_bn)

    x_connect = tf.concat([x1_1, x2_1], 1)
    x_connect_1 = layers.Dense(600, activation='tanh')(x_connect)
    x_connect_2 = layers.Dense(400, activation='tanh')(x_connect_1)
    x_connect_2_d = layers.Dropout(0.3)(x_connect_2)
    y_connect_ = layers.Dense(2, activation='softmax')(x_connect_2_d)

    x1_2 = layers.Dense(600,
                        activation='tanh',
                        kernel_regularizer=regularizers.l2())(x1_1)
    x2_2 = layers.Dense(400,
                        activation='tanh',
                        kernel_regularizer=regularizers.l2())(x2_1)
    x1_2_d = layers.Dropout(0.3)(x1_2)
    x2_2_d = layers.Dropout(0.3)(x2_2)

    x1_3 = layers.Dense(600,
                        activation='tanh',
                        kernel_regularizer=regularizers.l2())(x1_2)
    x2_3 = layers.Dense(400,
                        activation='tanh',
                        kernel_regularizer=regularizers.l2())(x2_2)
    x1_3_d = layers.Dropout(0.3)(x1_3)
    x2_3_d = layers.Dropout(0.3)(x2_3)

    y1_ = layers.Dense(2, activation='softmax')(x1_3)
    y2_ = layers.Dense(2, activation='softmax')(x2_3)

    model = models.Model(inputs=[input1, input2],
                         outputs=[y_connect_, y1_, y2_])
    sgd = optimizers.SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
    #sgd = optimizers.SGD()
    model.compile(optimizer=sgd, loss=total_loss, metrics=['accuracy'])
    return model
示例#18
0
 def __init__(
         self,
         nx=768,
         n_ctx: "the embedding layers, produce outputs of dimension " = 512,
         cfg=Args,
         scale=False):
     super(Attention, self).__init__()
     n_state = nx
     assert n_state % cfg.n_head == 0
     self.b = self.add_weight(shape=[1, 1, n_ctx, n_ctx],
                              initializer=keras.initializers.Zeros())
     self.b.assign(tf.linalg.LinearOperatorLowerTriangular().to_dense())
     self.n_head = cfg.n_head
     self.scale = scale
     # Linear
     self.c_attn = Conv1D(n_state * 3, 1, nx)
     self.c_proj = Conv1D(n_state, 1, nx)
     self.attn_dropout = layers.Dropout(cfg.attn_pdrop)
     self.resid_dropout = layers.Dropout(cfg.resid_pdrop)
示例#19
0
def transition_block(x, name, dropout_rate, reduction=0.5):
    x = layers.BatchNormalization(name=name + '_bn')(x)
    x = layers.Activation('relu', name=name + '_relu')(x)
    x = layers.Conv2D(int(x.get_shape().as_list()[-1] * reduction),
                      1,
                      padding='same',
                      name=name + '_conv')(x)
    x = layers.Dropout(rate=dropout_rate)(x)
    x = layers.AveragePooling2D(strides=2, name=name + '_pool')(x)
    return x
    def build_model(self):
        """Build an actor (policy) network that maps states -> actions."""
        # Define input layer (states)
        states = layers.Input(shape=(self.state_size, ), name='states')

        # Add hidden layers
        net = layers.Dense(units=32, activation='relu')(states)
        net = layers.Dropout(0.8)(net)
        net = layers.Dense(units=64, activation='relu')(net)
        net = layers.Dropout(0.8)(net)
        net = layers.Dense(units=32, activation='relu')(net)

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.

        # Add final output layer with sigmoid activation
        raw_actions = layers.Dense(units=self.action_size,
                                   activation='sigmoid',
                                   name='raw_actions')(net)

        # Scale [0, 1] output for each action dimension to proper range
        actions = layers.Lambda(lambda x:
                                (x * self.action_range) + self.action_low,
                                name='actions')(raw_actions)

        # Create Keras model
        self.model = models.Model(inputs=states, outputs=actions)

        # Define loss function using action value (Q value) gradients
        action_gradients = layers.Input(shape=(self.action_size, ))
        loss = K.mean(-action_gradients * actions)

        # Incorporate any additional losses here (e.g. from regularizers)

        # Define optimizer and training function
        optimizer = optimizers.Adam(lr=0.0001)
        updates_op = optimizer.get_updates(params=self.model.trainable_weights,
                                           loss=loss)
        self.train_fn = K.function(
            inputs=[self.model.input, action_gradients,
                    K.learning_phase()],
            outputs=[],
            updates=updates_op)
示例#21
0
def vgg(layer_cfg,
        input_shape=(img_size, img_size, channel),
        weight_decay=5e-4,
        use_bias=False,
        use_fc=False):

    block_num = 1
    conv_num = 1
    x = layers.Input(input_shape, name='vgg16_bn')
    main_input = x

    for layer in layer_cfg:
        if layer == 'M':
            x = layers.MaxPooling2D((2, 2),
                                    strides=(2, 2),
                                    name='block%d_pool' % block_num)(x)
            block_num += 1
            conv_num = 1
            continue

        x = layers.Conv2D(layer, (3, 3),
                          padding='same',
                          name='block%d_conv%d' % (block_num, conv_num),
                          kernel_regularizer=l2(weight_decay),
                          use_bias=use_bias)(x)
        x = layers.BatchNormalization(name='block%d_bn%d' %
                                      (block_num, conv_num))(x)
        x = layers.Activation('relu',
                              name='block%d_relu%d' % (block_num, conv_num))(x)
        conv_num += 1

    if use_fc:
        x = layers.Dense(4096)(x)
        x = layers.Activation('relu')(x)
        x = layers.Dropout(rate=0.5)(x)
        x = layers.Dense(4096)(x)
        x = layers.Activation('relu')(x)
        x = layers.Dropout(rate=0.5)(x)
    else:
        x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(num_classes, activation='softmax')(x)
    return Model(main_input, x)
示例#22
0
def bn_relu_conv(add_dropout=False, *args, **kwargs):
    '''batch normalization -> ReLU -> conv を作成する。
    '''
    if add_dropout:
        return utils.compose(layers.BatchNormalization(),
                             layers.Activation('relu'),
                             layers.Dropout(rate=0.35),
                             ResNetConv2D(*args, **kwargs))
    return utils.compose(layers.BatchNormalization(),
                         layers.Activation('relu'),
                         ResNetConv2D(*args, **kwargs))
示例#23
0
    def create_model(self):
        model = models.Sequential()
        model.add(
            layers.Conv2D(32, (3, 3),
                          activation='relu',
                          input_shape=(self.img_rows, self.img_cols, 1)))
        model.add(layers.Dropout(0.5))
        model.add(layers.Conv2D(32, (3, 3), strides=2, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Conv2D(64, (3, 3), activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Flatten())
        model.add(layers.Dense(256, activation='relu'))
        model.add(layers.Dense(self.num_classes, activation='softmax'))

        model.compile(optimizer='adam',
                      loss=tf.keras.losses.categorical_crossentropy,
                      metrics=['accuracy'])

        return model
示例#24
0
def create_model():
    # Our input feature map is 150x150x3: 150x150 for the image pixels, and 3 for
    # the three color channels: R, G, and B
    img_input = layers.Input(shape=(150, 150, 3))

    # First convolution extracts 128 filters that are 5x5
    # Convolution is followed by max-pooling layer with a 2x2 window
    x = layers.Conv2D(128, 5, activation='relu')(img_input)
    x = layers.MaxPooling2D(2)(x)

    x = layers.Dropout(0.5)(x)

    # Flatten feature map to a 1-dim tensor so we can add fully connected layers
    x = layers.Flatten()(x)

    x = layers.Dense(96, activation='relu')(x)

    # Add a dropout rate of 0.5
    x = layers.Dropout(0.25)(x)

    x = layers.Dense(54, activation='relu')(x)

    # Add a dropout rate of 0.5
    x = layers.Dropout(0.25)(x)

    # Create output layer with a single node and sigmoid activation
    output = layers.Dense(1, activation='softmax')(x)

    # Create model:
    # input = input feature map
    # output = input feature map + stacked convolution/maxpooling layers + fully
    # connected layer + sigmoid output layer
    model = Model(img_input, output)

    model.compile(loss='binary_crossentropy',
                  optimizer=RMSprop(lr=0.001),
                  metrics=['acc'])

    # model.summary()

    return model
示例#25
0
    def build_model(self):
        """Build a critic (value) network that maps (state, action) pairs -> Q-values."""
        # Define input layers
        states = layers.Input(shape=(self.state_size, ), name='states')
        actions = layers.Input(shape=(self.action_size, ), name='actions')

        # Add hidden layer(s) for state pathway
        net_states = layers.Dense(units=32, activation='relu')(states)
        net_states = layers.Dropout(0.8)(net_states)
        net_states = layers.Dense(units=64, activation='relu')(net_states)
        net_states = layers.Dropout(0.8)(net_states)

        # Add hidden layer(s) for action pathway
        net_actions = layers.Dense(units=32, activation='relu')(actions)
        net_actions = layers.Dense(units=64, activation='relu')(net_actions)

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.

        # Combine state and action pathways
        net = layers.Add()([net_states, net_actions])
        net = layers.Activation('relu')(net)

        # Add more layers to the combined network if needed

        # Add final output layer to prduce action values (Q values)
        Q_values = layers.Dense(units=1, name='q_values')(net)

        # Create Keras model
        self.model = models.Model(inputs=[states, actions], outputs=Q_values)

        # Define optimizer and compile model for training with built-in loss function
        optimizer = optimizers.Adam(lr=0.0001)
        self.model.compile(optimizer=optimizer, loss='mse')

        # Compute action gradients (derivative of Q values w.r.t. to actions)
        action_gradients = K.gradients(Q_values, actions)

        # Define an additional function to fetch action gradients (to be used by actor model)
        self.get_action_gradients = K.function(
            inputs=[*self.model.input, K.learning_phase()],
            outputs=action_gradients)
示例#26
0
    def __init__(self, num_hidden, num_classes, use_bn=False, use_dp=False):
        super(SmallSubclassMLP, self).__init__(name='test_model')
        self.use_bn = use_bn
        self.use_dp = use_dp

        self.layer_a = layers.Dense(num_hidden, activation='relu')
        activation = 'sigmoid' if num_classes == 1 else 'softmax'
        self.layer_b = layers.Dense(num_classes, activation=activation)
        if self.use_dp:
            self.dp = layers.Dropout(0.5)
        if self.use_bn:
            self.bn = layers.BatchNormalization(axis=-1)
示例#27
0
def vgg16_head(features):
    """

    :param features: [batch_size,rois_num,H,W,C]
    :return:
    """
    fc_layers_size = 4096
    # 打平
    x = TimeDistributed(layers.Flatten())(
        features)  # [batch_size,rois_num,H*W*C]
    # fc6
    x = TimeDistributed(layers.Dense(fc_layers_size),
                        name='fc1')(x)  # 变为(batch_size,roi_num,channels)
    x = layers.Activation(activation='relu')(x)
    x = layers.Dropout(rate=0.5, name='drop_fc6')(x)

    x = TimeDistributed(layers.Dense(fc_layers_size),
                        name='fc2')(x)  # 变为(batch_size,roi_num,channels)
    x = layers.Activation(activation='relu')(x)
    x = layers.Dropout(rate=0.5, name='drop_fc7')(x)
    return x
示例#28
0
  def mnist(inputs): 
 
      """
      Creates and returns neural net model
      """
  
      x = layers.Conv2D(32, kernel_size=(3, 3), 
                            activation='relu',
                            padding='valid',
                            data_format=backend.image_data_format(),
                            input_shape=self.dims)(inputs)
      x = layers.Dropout(0.5)(x)
      x = layers.Conv2D(32, kernel_size=(3, 3), activation='relu',padding='valid')(x)
      x = layers.MaxPooling2D(pool_size=(2, 2),strides=(2,2))(x)
      x = layers.Dropout(0.5)(x)
      x = layers.Flatten()(x)
      x = layers.Dense(128, activation='relu')(x)
      x = layers.Dropout(0.5)(x)
      #x = layers.Dense(self.nclasses, activation='sigmoid')(x)
      x = layers.Dense(self.nclasses, activation='softmax')(x)
      return x
示例#29
0
def create_classifier():
    print("Building model")
    inception = k.applications.inception_resnet_v2.InceptionResNetV2(
        include_top=False, input_shape=(HEIGHT, WIDTH, 3))
    layer = kl.Flatten()(inception.output)
    layer = kl.Dense(1000, activation="relu")(layer)
    fidmodel = k.Model(inception.input, layer)
    layer = kl.Dropout(0.2)(layer)
    D_out = kl.Dense(47, activation="softmax")(layer)

    model = k.models.Model(inception.input, D_out)
    return model, fidmodel
示例#30
0
 def create_model(self):
     input_img = Input(shape=(self.img_height, self.img_width, self.num_channels))
     
     x = layers.Conv2D(filters=64, kernel_size=(5, 5), strides=(2, 2), padding='same')(input_img)
     x = layers.LeakyReLU()(x)
     x = layers.Dropout(0.3)(x)
     
     x = layers.Conv2D(filters=128, kernel_size=(5, 5), strides=(2, 2), padding='same')(x)
     x = layers.LeakyReLU()(x)
     x = layers.Dropout(rate=0.3)(x)
     
     x = layers.Conv2D(filters=128, kernel_size=(5, 5), strides=(2, 2), padding='same')(x)
     x = layers.LeakyReLU()(x)
     x = layers.Dropout(rate=0.3)(x)
     
     x = layers.Flatten()(x)
     x = layers.Dense(units=1)(x)
     
     model = Model(name='discriminator', inputs=input_img, outputs=x)
     
     return model