Пример #1
0
def nature_dqn(num_actions):

    model = tf.keras.Sequential()
    model.add(
        layers.Conv2D(32, [8, 8],
                      strides=4,
                      input_shape=(84, 84, 4),
                      activation="relu"))
    model.add(layers.Conv2D(64, [4, 4], strides=2, activation="relu"))
    model.add(layers.Conv2D(64, [3, 3], strides=1, activation="relu"))
    model.add(layers.Flatten())
    model.add(layers.Dense(512, activation="relu"))
    model.add(layers.Dense(num_actions))

    model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
                  loss="mse",
                  metrics=["mae"])

    model_target = tf.keras.Sequential()
    model_target.add(
        layers.Conv2D(32, [8, 8],
                      strides=4,
                      input_shape=(84, 84, 4),
                      activation="relu"))
    model_target.add(layers.Conv2D(64, [4, 4], strides=2, activation="relu"))
    model_target.add(layers.Conv2D(64, [3, 3], strides=1, activation="relu"))
    model_target.add(layers.Flatten())
    model_target.add(layers.Dense(512, activation="relu"))
    model_target.add(layers.Dense(num_actions))

    return model, model_target
Пример #2
0
    def create_model(self):
        input_text = Input(shape=self.max_sequence_length)
        input_image = Input(shape=(self.img_height, self.img_width,
                                   self.num_channels))

        embedded_id = layers.Embedding(self.vocab_size,
                                       self.embedding_size)(input_text)
        embedded_id = layers.Flatten()(embedded_id)
        embedded_id = layers.Dense(units=input_image.shape[1] *
                                   input_image.shape[2])(embedded_id)
        embedded_id = layers.Reshape(target_shape=(input_image.shape[1],
                                                   input_image.shape[2],
                                                   1))(embedded_id)

        x = layers.Concatenate(axis=3)([input_image, embedded_id])

        x = layers.Conv2D(filters=64,
                          kernel_size=(3, 3),
                          strides=(2, 2),
                          padding='same')(x)
        x = layers.LeakyReLU()(x)
        x = layers.Dropout(0.3)(x)

        x = layers.Conv2D(filters=64,
                          kernel_size=(3, 3),
                          strides=(2, 2),
                          padding='same')(x)
        x = layers.LeakyReLU()(x)
        x = layers.Dropout(rate=0.3)(x)

        # x = layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
        # x = layers.LeakyReLU()(x)
        # x = layers.Dropout(rate=0.3)(x)
        #
        # x = layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
        # x = layers.LeakyReLU()(x)
        # x = layers.Dropout(rate=0.3)(x)

        x = layers.Conv2D(filters=128,
                          kernel_size=(3, 3),
                          strides=(2, 2),
                          padding='same')(x)
        x = layers.LeakyReLU()(x)
        x = layers.Dropout(rate=0.3)(x)

        x = layers.Flatten()(x)
        x = layers.Dense(units=1000)(x)
        x = layers.LeakyReLU()(x)

        x = layers.Dense(units=1)(x)

        model = Model(name='discriminator',
                      inputs=[input_text, input_image],
                      outputs=x)

        return model
Пример #3
0
def make_discriminator_model():
    """ Discriminator network structure.

  Returns:
    Sequential model.

  """
    model = tf.keras.Sequential()
    model.add(layers.InputLayer(input_shape=(28, 28, 1)))
    model.add(
        layers.Conv2D(64,
                      3,
                      strides=(2, 2),
                      padding='same',
                      activation=tf.nn.relu))

    model.add(
        layers.Conv2D(128,
                      3,
                      strides=(2, 2),
                      activation=tf.nn.relu,
                      padding='same'))

    model.add(layers.Flatten())
    model.add(layers.Dense(1, activation=tf.nn.sigmoid))

    return model
def define_discriminator(in_shape=(28, 28, 1), nclasses=10):
    img = layers.Input(shape=in_shape)
    x = layers.Conv2D(filters=16, kernel_size=3, strides=2, padding="same")(img)
    x = layers.LeakyReLU(alpha=0.2)(x)
    x = layers.Dropout(0.25)(x)

    x = layers.Conv2D(filters=32, kernel_size=3, strides=2, padding="same")(x)
    x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(x)
    x = layers.LeakyReLU(alpha=0.2)(x)
    x = layers.Dropout(0.25)(x)
    x = layers.BatchNormalization(momentum=0.8)(x)

    x = layers.Conv2D(filters=64, kernel_size=3, strides=2, padding="same")(x)
    x = layers.LeakyReLU(alpha=0.2)(x)
    x = layers.Dropout(0.25)(x)
    x = layers.BatchNormalization(momentum=0.8)(x)

    x = layers.Conv2D(filters=128, kernel_size=3, strides=2, padding="same")(x)
    x = layers.LeakyReLU(alpha=0.2)(x)
    x = layers.Dropout(0.25)(x)

    x = layers.Flatten()(x)

    out = layers.Dense(1, activation="sigmoid")(x)
    label = layers.Dense(nclasses+1, activation="softmax")(out)

    model = tf.keras.Model(img, [out, label])
    return model
Пример #5
0
def create_classifier():

    with tf.name_scope("Disc"):
        X = kl.Input((28, 28, 1), name="X")
        layer = X

        for l in range(3):
            layer = kl.Conv2D(filters=64 * (2**l),
                              kernel_size=3,
                              padding="same",
                              use_bias=False,
                              activation="relu",
                              kernel_regularizer=kr.l2())(layer)
            layer = kl.Conv2D(filters=64 * (2**l),
                              kernel_size=3,
                              padding="same",
                              use_bias=False,
                              activation="relu",
                              kernel_regularizer=kr.l2())(layer)
            layer = kl.MaxPool2D()(layer)
            layer = kl.BatchNormalization()(layer)

        layer = kl.Flatten()(layer)
        layer = kl.Dense(256, kernel_regularizer=kr.l2())(layer)
        layer = kl.LeakyReLU()(layer)
        D_out = kl.Dense(10, activation="softmax",
                         kernel_regularizer=kr.l2())(layer)

        model = k.Model(inputs=X, outputs=D_out)
        fidmodel = k.Model(inputs=X, outputs=layer)
    return model, fidmodel
Пример #6
0
def ResNet50(input_tensor=None,
             pooling=None,
             **kwargs):
    """Instantiates the ResNet50 architecture.
    # Arguments       
    # Returns
        A Keras model instance.
    """
    # Input arguments
    include_top = get_varargin(kwargs, 'include_top', True)
    nb_classes = get_varargin(kwargs, 'nb_classes', 1000)
    default_input_shape = _obtain_input_shape(None,
                                      default_size=224,
                                      min_size=197,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top)
    input_shape = get_varargin(kwargs, 'input_shape', default_input_shape)
    if input_tensor is None:
        img_input = KL.Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = KL.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
            
    bn_axis = 3 if K.image_data_format() == 'channels_last' else 1        

    x = KL.ZeroPadding2D((3, 3))(img_input)
    x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
    x = KL.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = KL.Activation('relu')(x)
    x = KL.MaxPooling2D((3, 3), strides=(2, 2))(x)
    
    for stage, nb_block in zip([2,3,4,5], [3,4,6,3]):
        for blk in range(nb_block):
            conv_block = True if blk == 0 else False
            strides = (2,2) if stage>2 and blk==0 else (1,1)           
            x = identity_block(x, stage = stage, block_id = blk + 1,
                               conv_block = conv_block, strides = strides)
            
    x = KL.AveragePooling2D((7, 7), name='avg_pool')(x)

    if include_top:
        x = KL.Flatten()(x)
        x = KL.Dense(nb_classes, activation='softmax', name='fc1000')(x)
    else:
        if pooling == 'avg':
            x = KL.GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = KL.GlobalMaxPooling2D()(x)
    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
        
    # Create model.
    model = Model(inputs, x, name='resnet50')
    return model
def get_convolutional_network(shape, use_periodic_pad=False):
    inputs = Input(shape=(shape, shape, 1))
    x = inputs
    if use_periodic_pad:
        x = layers.Lambda(periodic_pad)(x)
    x = layers.Conv2D(PARAMS["conv_start_filters"], (3, 3),
                      activation="relu")(x)
    x = layers.MaxPooling2D((2, 2))(x)
    for i in range(1, PARAMS["conv_depth"]):
        x_1 = layers.Conv2D(PARAMS["conv_start_filters"] +
                            i * PARAMS["conv_increment"], (3, 3),
                            activation='relu')(x)
        x = layers.add([
            layers.Conv2D(
                PARAMS["conv_start_filters"] + i * PARAMS["conv_increment"],
                (3, 3))(x), x_1
        ])
        x = layers.MaxPooling2D((2, 2))(x)

    x = layers.Flatten()(x)
    x = layers.Dense(128, activation='relu')(x)
    x = layers.Dropout(0.25)(x)
    x = layers.Dense(1, activation='sigmoid')(x)
    model = models.Model(inputs=inputs, outputs=x)
    return model
def create_model():
    dropout = 0.5
    reg = l2(0.01)
    opt = Adam(lr=LEARN_RATE)

    conv_base = ResNet50(weights='imagenet',
                         include_top=False,
                         input_shape=INPUT_SHAPE)
    conv_base.trainable = False
    model = models.Sequential()
    model.add(conv_base)
    model.add(AveragePooling2D(pool_size=(2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dropout(dropout))
    model.add(
        layers.Dense(512,
                     activation='relu',
                     kernel_initializer='uniform',
                     kernel_regularizer=reg))
    model.add(layers.Dropout(dropout))
    model.add(
        layers.Dense(512,
                     activation='relu',
                     kernel_initializer='uniform',
                     kernel_regularizer=reg))
    model.add(layers.Dropout(dropout))
    model.add(layers.Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
    model.summary()
    # tf.keras.utils.plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
    return model
Пример #9
0
def make_discriminator_model():
    model = tf.keras.Sequential()

    model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',
                            use_bias=False, input_shape=(64, 64, 3), kernel_initializer=weights_initializer))
    assert model.output_shape == (None, 32, 32, 64)
    # model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU(alpha=0.2))
    model.add(layers.BatchNormalization())

    model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same',
                            use_bias=False, kernel_initializer=weights_initializer))
    assert model.output_shape == (None, 16, 16, 128)
    # model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU(alpha=0.2))
    model.add(layers.BatchNormalization())

    model.add(layers.Conv2D(256, (5, 5), strides=(2, 2), padding='same',
                            use_bias=False, kernel_initializer=weights_initializer))
    assert model.output_shape == (None, 8, 8, 256)
    # model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU(alpha=0.2))
    model.add(layers.BatchNormalization())

    model.add(layers.Conv2D(512, (5, 5), strides=(2, 2), padding='same',
                            use_bias=False, kernel_initializer=weights_initializer))
    assert model.output_shape == (None, 4, 4, 512)
    # model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU(alpha=0.2))
    model.add(layers.BatchNormalization())

    model.add(layers.Flatten())
    model.add(layers.Dense(1, kernel_initializer=weights_initializer, activation='sigmoid'))
    return model
Пример #10
0
def make_encoder_model():
    """ Encoder network structure.

  Returns:
    tf.keras.Model.

  """
    model = tf.keras.Sequential()
    model.add(layers.InputLayer(input_shape=(28, 28, 1)))
    model.add(
        layers.Conv2D(32, (3, 3),
                      strides=(2, 2),
                      padding='same',
                      activation=tf.nn.relu))

    model.add(
        layers.Conv2D(64, (3, 3),
                      strides=(2, 2),
                      activation=tf.nn.relu,
                      padding='same'))

    model.add(layers.Flatten())
    model.add(layers.Dense(64))

    return model
Пример #11
0
def fully_connected_net(args):
    window_len = args[0]

    input_state_shape = (10, )
    pre_int_shape = (window_len, 3)
    imu_input_shape = (window_len, 7, 1)

    # Input layers. Don't change names
    imu_in = layers.Input(imu_input_shape, name="imu_input")
    state_in = layers.Input(input_state_shape, name="state_input")

    _, _, dt_vec = custom_layers.PreProcessIMU()(imu_in)

    x = layers.Flatten()(imu_in)
    x = layers.Dense(200)(x)
    x = norm_activate(x, 'relu')
    x = layers.Dense(400)(x)
    x = norm_activate(x, 'relu')
    x = layers.Dense(400)(x)
    feat_vec = norm_activate(x, 'relu')

    r_flat = layers.Dense(tf.reduce_prod(pre_int_shape))(x)
    rot_prior = layers.Reshape(pre_int_shape, name="pre_integrated_R")(r_flat)

    x = layers.Concatenate()([feat_vec, r_flat])
    v_flat = layers.Dense(tf.reduce_prod(pre_int_shape))(x)
    v_prior = layers.Reshape(pre_int_shape, name="pre_integrated_v")(v_flat)

    x = layers.Concatenate()([feat_vec, r_flat, v_flat])
    p_flat = layers.Dense(tf.reduce_prod(pre_int_shape))(x)
    p_prior = layers.Reshape(pre_int_shape, name="pre_integrated_p")(p_flat)

    return Model(inputs=(imu_in, state_in),
                 outputs=(rot_prior, v_prior, p_prior))
Пример #12
0
def vel_cnn(window_len):
    input_s = (window_len, 6, 1)
    inputs = layers.Input(input_s, name="imu_input")
    x = layers.Conv2D(filters=60,
                      kernel_size=(3, 6),
                      padding='same',
                      activation='relu',
                      input_shape=input_s)(inputs)
    x = layers.Conv2D(filters=120,
                      kernel_size=(3, 6),
                      padding='same',
                      activation='relu')(x)
    x = layers.Conv2D(filters=240,
                      kernel_size=(3, 1),
                      padding='valid',
                      activation='relu')(x)
    x = layers.MaxPooling2D(pool_size=(10, 1), strides=(6, 1))(x)
    x = layers.Flatten()(x)
    x = layers.Dense(400, activation='relu')(x)
    x = layers.Dense(100, activation='relu')(x)
    x = layers.Dense(1, name="state_output")(x)

    model = Model(inputs, x)

    return model
Пример #13
0
    def __init__(self, height, width, channels, condition_dim):
        # prepare real images
        discriminator_input1 = layers.Input(shape=(height, width, channels))

        x1 = layers.Conv2D(64, 5, padding='same')(discriminator_input1)
        x1 = layers.Activation('tanh')(x1)
        x1 = layers.MaxPooling2D(pool_size=(2, 2))(x1)
        x1 = layers.Conv2D(128, 5)(x1)
        x1 = layers.Activation('tanh')(x1)
        x1 = layers.MaxPooling2D(pool_size=(2, 2))(x1)

        # condition input from generator
        discriminator_input2 = layers.Input(shape=(condition_dim, ))

        x2 = layers.Dense(1024)(discriminator_input2)
        x2 = layers.Activation('tanh')(x2)
        x2 = layers.Dense(5 * 5 * 128)(x2)
        x2 = layers.BatchNormalization()(x2)
        x2 = layers.Activation('tanh')(x2)
        x2 = layers.Reshape((5, 5, 128))(x2)

        # concatenate 2 inputs
        discriminator_input = layers.concatenate([x1, x2])

        x = layers.Flatten()(discriminator_input)
        x = layers.Dense(1024)(x)
        x = layers.Activation('tanh')(x)
        x = layers.Dense(1, activation='sigmoid')(x)

        self.discriminator = tf.keras.models.Model(inputs=[discriminator_input1, discriminator_input2], outputs=x)
Пример #14
0
    def create_layers(self):
        model = Sequential()

        model.add(layers.Conv2D(32,
                                kernel_size=(3, 3),
                                activation='relu',
                                input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1)))

        model.add(layers.MaxPooling2D((2, 2)))

        model.add(layers.Conv2D(64,
                         kernel_size=(3, 3),
                         activation='relu'))

        model.add(layers.MaxPooling2D((2, 2)))

        model.add(layers.Conv2D(128,
                         kernel_size=(3, 3),
                         activation='relu'))

        model.add(layers.Flatten())

        model.add(layers.Dense(256, activation='relu'))

        model.add(layers.Dense(self.number_of_classes, activation='softmax'))

        model.compile(loss=keras.losses.categorical_crossentropy,
                      optimizer='adam',
                      metrics=['accuracy'])
        debug(model.summary())
        return model
Пример #15
0
def make_model(env):
    num_frames = len(env.observation_space.spaces)
    height, width = env.observation_space.spaces[0].shape
    input_shape = height, width, num_frames

    # input state
    ph_state = layers.Input(shape=input_shape)

    # convolutional layers
    conv1 = layers.Conv2D(32, (8, 8), strides=(4, 4))(ph_state)
    conv1 = layers.Activation('relu')(conv1)
    conv2 = layers.Conv2D(64, (4, 4), strides=(2, 2))(conv1)
    conv2 = layers.Activation('relu')(conv2)
    conv3 = layers.Conv2D(64, (3, 3), strides=(1, 1))(conv2)
    conv3 = layers.Activation('relu')(conv3)
    conv_flat = layers.Flatten()(conv3)
    feature = layers.Dense(512)(conv_flat)
    feature = layers.Activation('relu')(feature)

    # actor (policy) and critic (value) streams
    size_logits = size_value = env.action_space.n
    logits_init = initializers.RandomNormal(stddev=1e-3)
    logits = layers.Dense(size_logits, kernel_initializer=logits_init)(feature)
    value = layers.Dense(size_value)(feature)
    return models.Model(inputs=ph_state, outputs=[logits, value])
Пример #16
0
def make_model(env):
    num_frames = len(env.observation_space.spaces)
    height, width = env.observation_space.spaces[0].shape
    input_shape = height, width, num_frames

    # input state
    ph_state = layers.Input(shape=input_shape)

    # convolutional layers
    conv1 = layers.Conv2D(32, (8, 8), strides=(4, 4))(ph_state)
    conv1 = layers.Activation('relu')(conv1)
    conv2 = layers.Conv2D(64, (4, 4), strides=(2, 2))(conv1)
    conv2 = layers.Activation('relu')(conv2)
    conv3 = layers.Conv2D(64, (3, 3), strides=(1, 1))(conv2)
    conv3 = layers.Activation('relu')(conv3)
    conv_flat = layers.Flatten()(conv3)
    feature = layers.Dense(512)(conv_flat)
    feature = layers.Activation('relu')(feature)

    # actor (policy) and critic (value) streams
    size_value = env.action_space.n

    value = NoisyDenseFG(size_value)(feature)
    value = layers.Activation('linear')(value)

    return models.Model(inputs=ph_state, outputs=value)
Пример #17
0
def lenet(network_input: NetworkInput) -> KerasModel:
    model = Sequential()

    input_shape = network_input.input_shape
    if len(network_input.input_shape) < 3:
        model.add(
            layers.Lambda(lambda x: tf.expand_dims(x, -1),
                          input_shape=input_shape))
        input_shape = (input_shape[0], input_shape[1], 1)

    if network_input.mean is not None and network_input.std is not None:
        model.add(
            layers.Lambda(
                lambda x: norm(x, network_input.mean, network_input.std),
                input_shape=input_shape))

    model.add(
        layers.Conv2D(32,
                      kernel_size=(3, 3),
                      input_shape=input_shape,
                      activation='relu'))

    model.add(layers.Conv2D(64, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Dropout(0.2))

    model.add(layers.Flatten())
    model.add(layers.Dense(128, activation='relu'))
    model.add(layers.Dropout(0.2))

    model.add(
        layers.Dense(network_input.number_of_classes, activation='softmax'))

    return model
Пример #18
0
def TrongNet(input_shape, num_classes, pretrained_weights=None):
    input_image = layers.Input(shape=input_shape, name='input_1')
    x = layers.Conv2D(filters=16,
                      kernel_size=(3, 3),
                      activation=activations.relu)(input_image)
    x = layers.MaxPool2D(pool_size=(2, 2))(x)
    x = layers.Conv2D(filters=64,
                      kernel_size=(3, 3),
                      activation=activations.relu)(x)
    x = layers.MaxPool2D(pool_size=(2, 2))(x)
    x = layers.Conv2D(filters=64,
                      kernel_size=(3, 3),
                      activation=activations.relu)(x)
    x = layers.MaxPool2D(pool_size=(2, 2))(x)
    x = layers.Conv2D(filters=128,
                      kernel_size=(3, 3),
                      activation=activations.relu)(x)
    x = layers.MaxPool2D(pool_size=(2, 2))(x)
    x = layers.BatchNormalization()(x)
    x = layers.Flatten()(x)
    x = layers.Dense(units=256, activation=activations.relu)(x)
    x = layers.Dropout(0.3)(x)
    x = layers.Dense(units=64, activation=activations.relu)(x)
    x = layers.Dense(units=num_classes,
                     activation=activations.softmax,
                     name='predictions')(x)
    model = models.Model(input_image, x, name='trongnet')
    if pretrained_weights:
        model.load_weights(pretrained_weights)
    return model
Пример #19
0
def discriminator(input_shape=(28, 28, 1)):
    model = tf.keras.Sequential()
    model.add(
        layers.Conv2D(16, (5, 5),
                      strides=(2, 2),
                      padding='same',
                      kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
                      input_shape=input_shape))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU(0.2))
    model.add(
        layers.Conv2D(32, (5, 5),
                      strides=(2, 2),
                      padding='same',
                      kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU(0.2))
    model.add(
        layers.Conv2D(64, (5, 5),
                      strides=(2, 2),
                      padding='same',
                      kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU(0.2))
    model.add(
        layers.Conv2D(128, (5, 5),
                      strides=(2, 2),
                      padding='same',
                      kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)))
    model.add(layers.LeakyReLU(0.2))
    model.add(layers.Flatten())
    model.add(layers.Dense(1, activation="sigmoid"))
    return model
Пример #20
0
def LeNet5(input_shape, num_classes, pretrained_weights=None):
    input_image = layers.Input(shape=input_shape, name='input_1')
    x = layers.Conv2D(filters=6,
                      kernel_size=(3, 3),
                      activation=activations.relu)(input_image)
    x = layers.AveragePooling2D(pool_size=(2, 2))(x)
    x = layers.Conv2D(filters=16,
                      kernel_size=(3, 3),
                      activation=activations.relu)(x)
    x = layers.AveragePooling2D(pool_size=(2, 2))(x)
    x = layers.Conv2D(filters=16,
                      kernel_size=(3, 3),
                      activation=activations.relu)(x)
    x = layers.AveragePooling2D(pool_size=(2, 2))(x)
    x = layers.Flatten()(x)
    x = layers.Dense(units=120, activation=activations.relu)(x)
    x = layers.Dropout(0.5)(x)
    x = layers.Dense(units=84, activation=activations.relu)(x)
    x = layers.Dense(units=num_classes,
                     activation=activations.softmax,
                     name='predictions')(x)
    model = models.Model(input_image, x, name='lenet5')
    if pretrained_weights:
        model.load_weights(pretrained_weights)
    return model
Пример #21
0
def flatten(x, reshape=False):
    """
    Flattens the input to two dimensional.
    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    reshape : bool, default False
        Whether do reshape instead of flatten.
    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    if not is_channels_first():

        def channels_last_flatten(z):
            z = K.permute_dimensions(z, pattern=(0, 3, 1, 2))
            z = K.reshape(z, shape=(-1, np.prod(K.int_shape(z)[1:])))
            updateshape(z)
            return z

        return nn.Lambda(channels_last_flatten)(x)
    else:
        if reshape:
            x = nn.Reshape((-1, ))(x)
        else:
            x = nn.Flatten()(x)
        return x
def build_cnn_model():
  #Instantiate a Keras tensor
  sequences= layers.Input(shape = (max_length, )) 
  #Turns positive integers (indexes) into dense vectors of fixed size
  embedded = layers.Embedding(12000, 64) (sequences)
  #Convolution kernel is convoled with the layer to produce a tensor of outputs
  #(output_space, kernel_size, linear_unit_activation_function)
  x = layers.Conv1D(64, 3, activation='relu') (embedded)
  #Normalize and scale inputs or activations
  x = layers.BatchNormalization() (x)
  #Downsamples the input representation by taking the maximum value over the window
  x = layers.MaxPool1D(3) (x)
  x = layers.Conv1D(64, 5, activation='relu') (x)
  x = layers.BatchNormalization() (x)
  x = layers.MaxPool1D(5) (x)
  x = layers.Conv1D(64, 5, activation='relu') (x)
  #Downsamples the input representation by taking the maximum value over the time dimension
  x = layers.GlobalMaxPool1D() (x)
  x = layers.Flatten() (x)
  #First parameter represents the dimension of the output space
  x = layers.Dense(100, activation='relu') (x)

  #Sigmoid function: values <-5 => value close to 0; values >5 => values close to 1
  predictions = layers.Dense(1, activation='sigmoid') (x)

  model = models.Model(inputs = sequences, outputs = predictions)

  model.compile(
      optimizer='rmsprop',
      loss='binary_crossentropy',
      metrics=['binary_accuracy']
  )

  return model
Пример #23
0
def my_model():
    # prep layers
    inp = layers.Input(shape=(32, 32, 3))
    x = layers.Conv2D(64, 3, padding='same')(inp)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    # layer1
    x = layers.Conv2D(128, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.Add()([x, residual(x, 128)])
    # layer2
    x = layers.Conv2D(256, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    # layer3
    x = layers.Conv2D(512, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.Add()([x, residual(x, 512)])
    # layers4
    x = layers.GlobalMaxPool2D()(x)
    x = layers.Flatten()(x)
    x = layers.Dense(10)(x)
    x = layers.Activation('softmax', dtype='float32')(x)
    model = tf.keras.Model(inputs=inp, outputs=x)

    return model
Пример #24
0
def create_classifier():
    print("Building model")

    inp = kl.Input((32, 32, 3))

    layer = kl.Conv2D(64, (3, 3), padding='same')(inp)
    layer = kl.Activation('relu')(layer)
    layer = kl.Conv2D(64, (3, 3))(layer)
    layer = kl.Activation('relu')(layer)
    layer = kl.MaxPooling2D(pool_size=(2, 2))(layer)
    layer = kl.Dropout(0.25)(layer)

    layer = kl.Conv2D(128, (3, 3), padding='same')(layer)
    layer = kl.Activation('relu')(layer)
    layer = kl.Conv2D(128, (3, 3))(layer)
    layer = kl.Activation('relu')(layer)
    layer = kl.MaxPooling2D(pool_size=(2, 2))(layer)
    layer = kl.Dropout(0.25)(layer)

    layer = kl.Flatten()(layer)
    layer = kl.Dense(512)(layer)
    layer = kl.Activation('relu')(layer)

    fidmodel = k.Model(inp, layer)

    layer = kl.Dropout(0.5)(layer)
    layer = kl.Dense(47)(layer)
    layer = kl.Activation('softmax')(layer)

    model = k.models.Model(inp, layer)

    return model, fidmodel
Пример #25
0
def discriminator():
    """
    Purpose of the Discriminator model is to learn to tell real images
    apart from fakes. During training, the Discriminator progressively
    becomes better at telling fake images from real ones. The process
    reaches equilibrium when the Discriminator can no longer distinguish
    real images from fakes.

    The Discriminator is a simple CNN-based image classifier. It outputs
    positive values for real images, and negative values for fake images.

    :return: The Discriminator model.
    """
    model = keras.Sequential([
        layers.Conv2D(filters=64, kernel_size=(5, 5), strides=(2, 2), padding='same', input_shape=[28, 28, 1]),
        layers.LeakyReLU(),
        layers.Dropout(rate=0.3),

        layers.Conv2D(filters=128, kernel_size=(5, 5), strides=(2, 2), padding='same'),
        layers.LeakyReLU(),
        layers.Dropout(rate=0.3),

        layers.Flatten(),
        layers.Dense(units=1),
    ])

    return model
Пример #26
0
def cnn_mnist(input_shape=(28, 28, 1), nb_classes=10):
    """
    Defines a CNN model using Keras sequential model
    :param input_shape:
    :param nb_classes:
    :return:
    """
    MODEL.set_architecture('cnn')
    img_rows, img_cols, nb_channels = input_shape

    # Define the layers successively (convolution layers are version dependent)
    if tf.keras.backend.image_data_format() == 'channels_first':
        input_shape = (nb_channels, img_rows, img_cols)

    struct = [
        layers.Conv2D(filters=32, kernel_size=(3, 3), input_shape=input_shape),
        layers.Activation('relu'),
        layers.MaxPooling2D(pool_size=(2, 2)),
        layers.Conv2D(filters=64, kernel_size=(3, 3)),
        layers.Activation('relu'),
        layers.MaxPooling2D(pool_size=(2, 2)),
        layers.Flatten(),
        layers.Dense(64 * 64),
        layers.Dropout(rate=0.4),
        layers.Dense(nb_classes),
        layers.Activation('softmax')
    ]
    # construct the cnn model
    model = models.Sequential()
    for layer in struct:
        model.add(layer)

    if MODE.DEBUG:
        print(model.summary())
    return model
Пример #27
0
def discriminator():
    """
    Purpose of the Discriminator model is to learn to tell real images
    apart from fakes. During training, the Discriminator progressively
    becomes better at telling fake images from real ones. The process
    reaches equilibrium when the Discriminator can no longer distinguish
    real images from fakes.

    The Discriminator is a simple CNN-based image classifier. It outputs
    positive values for real images, and negative values for fake images.

    Returns:
        The Discriminator model.
    """
    start = time.time()

    model = keras.Sequential([
        layers.Conv2D(filters=64, kernel_size=(5, 5), strides=(2, 2), padding='same',
                      input_shape=[IMG_SHAPE[0], IMG_SHAPE[1], N_CHANNELS]),
        layers.LeakyReLU(),
        layers.Dropout(rate=0.3),

        layers.Conv2D(filters=128, kernel_size=(5, 5), strides=(2, 2), padding='same'),
        layers.LeakyReLU(),
        layers.Dropout(rate=0.3),

        layers.Flatten(),
        layers.Dense(units=1),
    ])

    end = time.time()
    if DEBUG_LOG:
        print("Execution time: {:.9f}s (discriminator)".format(end - start))

    return model
Пример #28
0
    def __init__(self):
        super(Network, self).__init__()

        self.mylayers = [
            # unit1
            layers.Conv2D(filters=32,
                          kernel_size=[5, 5],
                          padding='same',
                          activation=nn.relu),
            layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),
            # unit2
            layers.Conv2D(filters=64,
                          kernel_size=[5, 5],
                          padding='same',
                          activation=nn.relu),
            layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),
            # flatten the tensor
            layers.Flatten(),
            # 2 full-connected layers
            layers.Dense(512, activation=nn.relu),
            layers.Dense(10, activation=nn.softmax)
            # layers.Dense(10, activation=None)
        ]
        # 根据tensorflow的版本确定网络的最后一层要不要加activation=nn.softmax
        # 如果tf版本低于1.13:
        #     则需要添加,
        #     训练时在model.compile中设置loss=keras.losses.categorical_crossentropy
        # 如果tf版本等于或者高于1.13:
        #     则不需要添加,
        #     训练时在model.compile中设置loss=keras.losses.CategoricalCrossentropy(from_logits=True)

        self.net = Sequential(self.mylayers)
    def __init__(self, action_size, ip_shape=(84, 84, 3)):
        super(CNN, self).__init__()
        self.action_size = action_size
        self.ip_shape = ip_shape

        self.conv1 = layers.Conv2D(filters=16,
                                   kernel_size=(8, 8),
                                   strides=(4, 4),
                                   activation=tf.keras.activations.relu,
                                   data_format='channels_last',
                                   input_shape=self.ip_shape
                                   )
        
        self.conv2 = layers.Conv2D(filters=32,
                                   kernel_size=(4, 4),
                                   strides=(2, 2),
                                   activation=tf.keras.activations.relu,
                                   data_format='channels_last'
                                   )

        # reshape
        self.flatten = layers.Flatten()
        self.fc1 = layers.Dense(units=256,
                                activation=tf.keras.activations.relu
                                )


        # policy output layer (Actor)
        self.policy_logits = layers.Dense(units=self.action_size, activation=tf.nn.softmax, name='policy_logits')

        # value output layer (Critic)
        self.values = layers.Dense(units=1, name='value')
Пример #30
0
def create_model(input_shape=(28, 28, 1), nb_classes=10):
    MODEL.set_architecture('cnn')

    # define model architecture
    struct = [
        layers.Conv2D(filters=32, kernel_size=(3, 3), input_shape=input_shape),
        layers.Activation('relu'),
        layers.MaxPooling2D(pool_size=(2, 2)),
        layers.Conv2D(filters=64, kernel_size=(3, 3)),
        layers.Activation('relu'),
        layers.MaxPooling2D(pool_size=(2, 2)),
        layers.Flatten(),
        layers.Dense(64 * 64),
        layers.Dropout(rate=0.4),
        layers.Dense(nb_classes),
        layers.Activation('softmax'),
    ]

    # construct the model
    model = models.Sequential()
    for layer in struct:
        model.add(layer)

    logger.info(model.summary())
    return model