示例#1
0
def onestep_conv_layer():
    mdl = kmodels.Sequential()

    mdl.add(
        klayers.Convolution2D(filters=32,
                              kernel_size=(3, 3),
                              padding='same',
                              data_format='channels_last',
                              input_shape=(50, 50, 1),
                              activation='relu'))
    mdl.add(klayers.BatchNormalization(axis=-1))
    mdl.add(klayers.MaxPooling2D(pool_size=(2, 2), strides=2))
    mdl.add(klayers.Dropout(0.4))

    mdl.add(
        klayers.Convolution2D(
            filters=64,
            kernel_size=(3, 3),
            activation='relu',
            padding='same',
        ))
    mdl.add(klayers.BatchNormalization(axis=-1))
    mdl.add(klayers.MaxPooling2D(pool_size=(2, 2), strides=2))
    mdl.add(klayers.Dropout(0.4))

    mdl.add(klayers.Flatten())
    return mdl
示例#2
0
    def resnet_block(input_features, nb_features=64, nb_kernel_rows=3, nb_kernel_cols=3): ##卷积神经网络  
        #with tf.name_scope('resnet_block'):
        """
        A ResNet block with two `nb_kernel_rows` x `nb_kernel_cols` convolutional layers,
        each with `nb_features` feature maps.

        See Figure 6 in https://arxiv.org/pdf/1612.07828v1.pdf.

        :param input_features: Input tensor to ResNet block.
        :return: Output tensor from ResNet block.来自ResNet块的输出张量
        """
        with tf.name_scope('resnet_block'):
        y = layers.Convolution2D(nb_features, nb_kernel_rows, nb_kernel_cols, border_mode='same')(input_features)
        y = layers.Activation('relu')(y)
        y = layers.Convolution2D(nb_features, nb_kernel_rows, nb_kernel_cols, border_mode='same')(y)

        y = layers.merge([input_features, y], mode='sum')
        return layers.Activation('relu')(y)#relu:线性整流函数  激励函数

    # an input image of size w × h is convolved with 3 × 3 filters that output 64 feature maps
    x = layers.Convolution2D(64, 3, 3, border_mode='same', activation='relu')(input_image_tensor)

    # the output is passed through 4 ResNet blocks
    for _ in range(4):
        x = resnet_block(x)

    # the output of the last ResNet block is passed to a 1 × 1 convolutional layer producing 1 feature map
    # corresponding to the refined synthetic image
    return layers.Convolution2D(img_channels, 1, 1, border_mode='same', activation='tanh')(x)
示例#3
0
def identity_block_td(input_tensor, kernel_size, filters, stage, block, trainable=True):

    # identity block time distributed

    nb_filter1, nb_filter2, nb_filter3 = filters
    if K.image_dim_ordering() == 'tf':
        bn_axis = 3
    else:
        bn_axis = 1

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = TimeDistributed(layers.Convolution2D(nb_filter1, (1, 1), trainable=trainable, kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor)
    x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2a')(x)
    x = layers.Activation('relu')(x)

    x = TimeDistributed(layers.Convolution2D(nb_filter2, (kernel_size, kernel_size), trainable=trainable, kernel_initializer='normal',padding='same'), name=conv_name_base + '2b')(x)
    x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2b')(x)
    x = layers.Activation('relu')(x)

    x = TimeDistributed(layers.Convolution2D(nb_filter3, (1, 1), trainable=trainable, kernel_initializer='normal'), name=conv_name_base + '2c')(x)
    x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2c')(x)

    x = Add()([x, input_tensor])
    x = layers.Activation('relu')(x)

    return x
def get_model(time_len=1, vgg_weights=False):
    model = k.models.Sequential()

    model.add(l.BatchNormalization(input_shape=(ch, row, col)))

    model.add(l.Convolution2D(24, 5, 5, subsample=(2, 2)))
    model.add(l.Convolution2D(36, 5, 5, subsample=(2, 2)))
    model.add(l.Convolution2D(48, 5, 5, subsample=(2, 2)))

    model.add(l.Convolution2D(64, 3, 3))
    model.add(l.Convolution2D(64, 3, 3))

    model.add(l.Flatten())

    model.add(l.Dense(100))
    model.add(l.Activation('relu'))
    model.add(l.Dense(50))
    model.add(l.Activation('relu'))
    model.add(l.Dense(10))
    model.add(l.Activation('relu'))

    model.add(l.Dense(1))

    #opt = k.optimizers.RMSProp()
    model.compile(optimizer='adam', loss="mse", metrics=['mae'])

    return model
示例#5
0
def simple_cnn_model():
    model = models.Sequential()
    model.add(
        layers.Convolution2D(32, (3, 3),
                             input_shape=(input_size, input_size, 3)))
    model.add(layers.Activation('relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Convolution2D(64, (3, 3)))
    model.add(layers.Activation('relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Convolution2D(128, (3, 3)))
    model.add(layers.Activation('relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Convolution2D(256, (3, 3)))
    model.add(layers.Activation('relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Convolution2D(512, (3, 3)))
    model.add(layers.Activation('relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dense(1024))
    model.add(layers.Activation('relu'))
    model.add(layers.Dense(512))
    model.add(layers.Activation('relu'))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(output_size))
    model.add(layers.Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.RMSprop(lr=1e-4),
                  metrics=['acc'])
    # model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'])
    return model
示例#6
0
def build_model():
    img_in = KL.Input(shape=(120, 160, 3), name='img_in')
    x = img_in

    # Convolution2D class name is an alias for Conv2D
    x = KL.Convolution2D(filters=24, kernel_size=(5, 5), strides=(2, 2), activation='relu')(x)
    x = KL.Convolution2D(filters=32, kernel_size=(5, 5), strides=(2, 2), activation='relu')(x)
    x = KL.Convolution2D(filters=64, kernel_size=(5, 5), strides=(2, 2), activation='relu')(x)
    x = KL.Convolution2D(filters=64, kernel_size=(3, 3), strides=(2, 2), activation='relu')(x)
    x = KL.Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu')(x)

    x = KL.Flatten(name='flattened')(x)
    x = KL.Dense(units=100, activation='linear')(x)
    x = KL.Dropout(rate=.1)(x)
    x = KL.Dense(units=50, activation='linear')(x)
    x = KL.Dropout(rate=.1)(x)
    # categorical output of the angle
    angle_out = KL.Dense(units=1, activation='linear', name='angle_out')(x)

    # continous output of throttle
    throttle_out = KL.Dense(units=1, activation='linear', name='throttle_out')(x)

    model = K.Model(inputs=[img_in], outputs=[angle_out, throttle_out])

    model.compile(optimizer='adam',
                  loss={'angle_out': 'mean_squared_error',
                        'throttle_out': 'mean_squared_error'},
                  loss_weights={'angle_out': 0.5, 'throttle_out': .5})

    return model
示例#7
0
    def conv_block(self, inputs, bn=True, k=1):
        bn_axis = 3 if K.image_data_format() == 'channels_last' else 1
        x = inputs
        if bn:
            x = layers.BatchNormalization(axis=bn_axis, scale=False)(
                x)  # Specifying the axis and mode allows for later merging
        x = layers.Activation('relu')(x)

        x = layers.Convolution2D(16 * k, (1, 1),
                                 padding='same',
                                 kernel_initializer='he_normal',
                                 use_bias=False,
                                 kernel_regularizer=regularizers.l2(5e-4))(x)
        if bn:
            x = layers.BatchNormalization(axis=bn_axis, scale=False)(
                x)  # Specifying the axis and mode allows for later merging
        x = layers.Activation('relu')(x)
        x = layers.Convolution2D(4 * k, (3, 3),
                                 padding='same',
                                 kernel_initializer='he_normal',
                                 use_bias=False,
                                 kernel_regularizer=regularizers.l2(5e-4))(x)

        x = layers.Concatenate(axis=bn_axis)([inputs, x])

        return x
示例#8
0
def refiner_network(input_image_tensor):
    """
    The refiner network, Rθ, is a residual network (ResNet). It modifies the synthetic image on a pixel level, rather
    than holistically modifying the image content, preserving the global structure and annotations.

    :param input_image_tensor: Input tensor that corresponds to a synthetic image.
    :return: Output tensor that corresponds to a refined synthetic image.
    """
    def resnet_block(input_features, nb_features=64, nb_kernel_rows=3, nb_kernel_cols=3):
        """
        A ResNet block with two `nb_kernel_rows` x `nb_kernel_cols` convolutional layers,
        each with `nb_features` feature maps.

        See Figure 6 in https://arxiv.org/pdf/1612.07828v1.pdf.

        :param input_features: Input tensor to ResNet block.
        :return: Output tensor from ResNet block.
        """
        y = layers.Convolution2D(nb_features, nb_kernel_rows, nb_kernel_cols, border_mode='same')(input_features)
        y = layers.Activation('relu')(y)
        y = layers.Convolution2D(nb_features, nb_kernel_rows, nb_kernel_cols, border_mode='same')(y)

        y = layers.merge([input_features, y], mode='sum')
        return layers.Activation('relu')(y)

    # an input image of size w × h is convolved with 3 × 3 filters that output 64 feature maps
    x = layers.Convolution2D(64, 3, 3, border_mode='same', activation='relu')(input_image_tensor)

    # the output is passed through 4 ResNet blocks
    for _ in range(4):
        x = resnet_block(x)

    # the output of the last ResNet block is passed to a 1 × 1 convolutional layer producing 1 feature map
    # corresponding to the refined synthetic image
    return layers.Convolution2D(1, 1, 1, border_mode='same', activation='tanh')(x)
示例#9
0
    def __init__(self,**kwargs):
        self.kwargs = kwargs
        self.name = kwargs['name']
        self.filters = kwargs['filters']
        self.kernel = kwargs['kernel_size']
        self.activation = 'linear'

        if 'activation' in kwargs:
            self.activation = kwargs['activation']
            kwargs['activation'] = 'linear'

        if isinstance(self.kernel, int):
            ks = self.kernel
            self.kernel = (ks, ks)

        ks = self.kernel

        kwargs['kernel_size'] = (1, ks[1])
        kwargs['name'] = self.name + '_x_axis'
        kwargs['data_format'] = 'channels_first'
        self.convx = KL.Convolution2D(**kwargs)
        kwargs['kernel_size'] = (ks[0], 1)
        kwargs['name'] = self.name + '_y_axis'
        kwargs['data_format'] = 'channels_first'
        self.convy = KL.Convolution2D(**kwargs)
        self.addLayer = KL.Add(name=self.name + '_add')
示例#10
0
def buildModel(shape, dr1=0.1, dr2=0.5):
    """ Build a keras model to be trained. This uses the architecture discussed in the lecture
  that is said to be published by the NVidia Autonomous Vehicle Team.

  'shape' is the input shape, assumed to be 3 dimensional.
  'dr1' is the drop out rate for the convolutional layers.
  'dr2' is the drop out rate for the fully connected layers.
  """
    assert len(shape) == 3

    # We import keras here to avoid importing it (and a ton of other stuff) when running
    # the 'show_gui.py' script (which imports this script).
    import keras.models as _kmod
    import keras.layers as _klay

    model = _kmod.Sequential()

    # First crop and normalize the image(s).
    # Note that this is part of the model, and not part of loading the data, since it
    # needs to be done when the model is invoked by the simulator (in drive.py), and I didn't
    # want to modify drive.py and try to keep it in sync with this.

    # Ignore the top 42% and the bottom 15%.
    cropTop = int(shape[0] * 0.42)
    cropBot = int(shape[0] * 0.15)
    model.add(
        _klay.Cropping2D(cropping=((cropTop, cropBot), (0, 0)),
                         input_shape=shape))

    # Use very basic image normalization to get values between -0.5 and 0.5.
    model.add(_klay.Lambda(lambda x: x / 255.0 - 0.5))

    # Do three 5x5 convolutions with stride 2.
    model.add(
        _klay.Convolution2D(24, 5, 5, subsample=(2, 2), activation='relu'))
    model.add(_klay.SpatialDropout2D(dr1))
    model.add(
        _klay.Convolution2D(36, 5, 5, subsample=(2, 2), activation='relu'))
    model.add(_klay.SpatialDropout2D(dr1))
    model.add(
        _klay.Convolution2D(48, 5, 5, subsample=(2, 2), activation='relu'))

    # Do two 3x3 convolutions with stride 1
    model.add(_klay.SpatialDropout2D(dr1))
    model.add(
        _klay.Convolution2D(64, 3, 3, subsample=(1, 1), activation='relu'))
    model.add(_klay.SpatialDropout2D(dr1))
    model.add(
        _klay.Convolution2D(64, 3, 3, subsample=(1, 1), activation='relu'))

    # Do three fully connected layers.
    model.add(_klay.Flatten())
    model.add(_klay.Dropout(dr2))
    model.add(_klay.Dense(100, activation='relu'))
    model.add(_klay.Dropout(dr2))
    model.add(_klay.Dense(50, activation='relu'))
    model.add(_klay.Dropout(dr2))
    model.add(_klay.Dense(1))

    return model
示例#11
0
def load_model():
    input_shape = [28, 28, 1]
    # Define model
    model = keras.Sequential()
    model.add(
        layers.Convolution2D(16, (3, 3),
                             padding='same',
                             input_shape=input_shape,
                             activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Dropout(0.2))
    model.add(
        layers.Convolution2D(32, (3, 3), padding='same', activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Dropout(0.2))
    model.add(
        layers.Convolution2D(64, (3, 3), padding='same', activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Dropout(0.2))
    model.add(layers.Flatten())
    model.add(layers.Dense(128, activation='tanh'))
    model.add(layers.Dense(100, activation='softmax'))
    # Train model
    adam = tf.train.AdamOptimizer()
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['top_k_categorical_accuracy'])

    model.load_weights('keras.h5')
    model._make_predict_function()
    # model.summary()
    return model
示例#12
0
def get_cnn(outputs, is_norm, activation):
    model = models.Sequential()

    model.add(
        layers.Convolution2D(32, 3, padding='same', input_shape=(32, 32, 3)))
    model.add(layers.Activation('relu'))
    model.add(layers.Convolution2D(32, 3))
    model.add(layers.Activation('relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Dropout(0.25))

    model.add(layers.Convolution2D(64, 3, padding='same'))
    model.add(layers.Activation('relu'))
    model.add(layers.Convolution2D(64, 3))
    model.add(layers.Activation('relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Dropout(0.25))

    model.add(layers.Flatten())
    model.add(layers.Dense(512))
    model.add(layers.Activation('relu'))
    # model.add(layers.Dropout(0.5))
    model.add(normilized_dense.NormilizedDense(outputs, is_norm=is_norm))
    if activation is not None:
        model.add(layers.Activation(activation))
    return model
示例#13
0
    def __init__(self, input_shape, output_labels_size):
        model = keras.Sequential()

        model.add(
            layers.Convolution2D(16, (3, 3),
                                 padding='same',
                                 input_shape=input_shape,
                                 activation='relu'))
        model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        model.add(
            layers.Convolution2D(32, (3, 3), padding='same',
                                 activation='relu'))
        model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        model.add(
            layers.Convolution2D(64, (3, 3), padding='same',
                                 activation='relu'))
        model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        model.add(layers.Flatten())
        model.add(layers.Dense(128, activation='relu'))
        model.add(layers.Dense(output_labels_size, activation='softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer=tf.train.AdamOptimizer(),
                      metrics=['top_k_categorical_accuracy'])

        self.model = model
        print(model.summary())
示例#14
0
文件: ops.py 项目: frozoul/SCVGAN
def learnConcatRealImagBlock(I, filter_size, featmaps, stage, block, bnArgs, init_act):
    """Learn initial imaginary component for input."""

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    O = KL.BatchNormalization(name=bn_name_base + '2a', **bnArgs)(I)
    O = KL.Activation(init_act)(O)
    O = KL.Convolution2D(featmaps[0], filter_size,
                      name=conv_name_base + '2a',
                      padding='same',
                      kernel_initializer='he_normal',
                      use_bias=False,
                      kernel_regularizer=KR.l2(0.0001))(O)

    O = KL.BatchNormalization(name=bn_name_base + '2b', **bnArgs)(O)
    O = KL.Activation(init_act)(O)
    O = KL.Convolution2D(featmaps[1], filter_size,
                      name=conv_name_base + '2b',
                      padding='same',
                      kernel_initializer='he_normal',
                      use_bias=False,
                      kernel_regularizer=KR.l2(0.0001))(O)

    return O
示例#15
0
    def convolution_block(self, x, nb_channels, dropout_rate=None, bottleneck=False, weight_decay=1e-4):
        """
        Creates a convolution block consisting of BN-ReLU-Conv.
        Optional: bottleneck, dropout
        """

        # Bottleneck
        if bottleneck:
            bottleneckWidth = 4
            x = layers.BatchNormalization()(x)
            x = layers.Activation('relu')(x)
            x = layers.Convolution2D(nb_channels * bottleneckWidth, (1, 1),
                                     kernel_regularizer=keras.regularizers.l2(weight_decay))(x)
            # Dropout
            if dropout_rate:
                x = layers.Dropout(dropout_rate)(x)

        # Standard (BN-ReLU-Conv)
        x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)
        x = layers.Convolution2D(nb_channels, (3, 3), padding='same')(x)

        # Dropout
        if dropout_rate:
            x = layers.Dropout(dropout_rate)(x)

        return x
示例#16
0
def refiner_network(input_image_tensor):
   

    def resnet_block(input_features, nb_features=64, nb_kernel_rows=3, nb_kernel_cols=3):
        """
        A ResNet block with two `nb_kernel_rows` x `nb_kernel_cols` convolutional layers,
        each with `nb_features` feature maps.

        See Figure 6 in https://arxiv.org/pdf/1612.07828v1.pdf.

        :param input_features: Input tensor to ResNet block.
        :return: Output tensor from ResNet block.
        """
        y = layers.Convolution2D(nb_features, nb_kernel_cols, padding='same',data_format = "channels_last")(input_features)
        y = layers.Activation('relu')(y)
        y = layers.Convolution2D(nb_features, nb_kernel_rows, padding='same',data_format = "channels_last")(y)

        y = layers.add([input_features, y])
        return layers.Activation('relu')(y)

    x = layers.Convolution2D(64, (3, 3), padding='same', activation='relu',data_format = "channels_last")(input_image_tensor)

    # the output is passed through 4 ResNet blocks
    for _ in range(5):
        x = resnet_block(x)

    return layers.Convolution2D(img_channels, (1, 1), padding='same', activation='tanh',data_format = "channels_last")(x)
示例#17
0
    def resnet_block(input_features,
                     nb_features=64,
                     nb_kernel_rows=3,
                     nb_kernel_cols=3):
        """
        A ResNet block with two `nb_kernel_rows` x `nb_kernel_cols` convolutional layers,
        each with `nb_features` feature maps.

        See Figure 6 in https://arxiv.org/pdf/1612.07828v1.pdf.

        :param input_features: Input tensor to ResNet block.
        :return: Output tensor from ResNet block.
        """
        y = layers.Convolution2D(nb_features,
                                 nb_kernel_rows,
                                 nb_kernel_cols,
                                 border_mode='same')(input_features)
        y = layers.Activation('relu')(y)
        y = layers.Convolution2D(nb_features,
                                 nb_kernel_rows,
                                 nb_kernel_cols,
                                 border_mode='same')(y)

        y = layers.merge.Add()([input_features, y])
        return layers.Activation('relu')(y)
示例#18
0
def build_model():
    print()
    print("Building the model ")

    model = Sequential()
    model.add(
        layers.Convolution2D(32,
                             8,
                             8,
                             subsample=(4, 4),
                             border_mode='same',
                             input_shape=(IMG_ROWS, IMG_COLS, STACK_SIZE)))
    model.add(layers.Activation('relu'))
    model.add(
        layers.Convolution2D(64, 4, 4, subsample=(2, 2), border_mode='same'))
    model.add(layers.Activation('relu'))
    model.add(
        layers.Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same'))
    model.add(layers.Activation('relu'))
    model.add(layers.Flatten())
    model.add(layers.Dense(512))
    model.add(layers.Activation('relu'))
    model.add(layers.Dense(NUM_ACTIONS))

    adam = Adam(lr=1e-6)
    model.compile(loss='mse', optimizer=adam)
    print("Finished building the model")
    print(model.summary())
    return model
示例#19
0
 def _build_model(self):
     # Neural Net for Deep-Q learning Model
     # Define input layer (states)
     states = layers.Input(shape=(self.state_size), name='input')
     c1 = layers.Convolution2D(filters=32,
                               kernel_size=8,
                               strides=4,
                               activation='relu')(states)  # edge detection
     c2 = layers.Convolution2D(filters=64,
                               kernel_size=4,
                               strides=2,
                               activation='relu')(c1)
     c3 = layers.Convolution2D(filters=64,
                               kernel_size=3,
                               strides=1,
                               activation='relu')(c2)
     l1 = layers.Flatten()(c3)
     l2 = layers.Dense(256, activation='relu')(l1)
     Q_val = layers.Dense(units=self.action_size,
                          name='Q_Values',
                          activation='linear')(l2)
     # Create Keras model
     model = models.Model(inputs=[states], outputs=Q_val)  #actions
     model.compile(loss='mse',
                   optimizer=optimizers.Adam(lr=self.learning_rate))
     self.get_conv = K.function(inputs=[model.input],
                                outputs=model.layers[1].output)
     return model
示例#20
0
    def build(self,
              rows,
              cols,
              output_dim,
              loss='binary_crossentropy',
              optimizer='adam',
              metrics='accuracy'):
        self.param_new(rows, cols, output_dim)

        self.model = models.Sequential()
        self.model.add(
            layers.Convolution2D(50, 1, 3, input_shape=(1, rows, cols)))
        self.model.add(layers.Activation('relu'))
        self.model.add(layers.Convolution2D(100, 1, 3))
        self.model.add(layers.Activation('relu'))
        self.model.add(layers.Convolution2D(100, 1, 3))
        self.model.add(layers.Activation('relu'))
        self.model.add(layers.Convolution2D(100, 1, 3))
        self.model.add(layers.Activation('relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(1, 2)))
        # self.model.add(Reshape((260, )))
        self.model.add(layers.Flatten())
        self.model.add(layers.Dense(1000))
        self.model.add(layers.Activation('relu'))
        self.model.add(layers.Dense(100))
        self.model.add(layers.Activation('relu'))
        self.model.add(
            layers.Dense(output_dim=self.output_dim, activation='sigmoid'))

        self.model.compile(loss=loss, optimizer=optimizer, meteics=[metrics])
示例#21
0
    def build_model(self):
        """Build an actor (policy) network that maps states -> actions."""
        # Define input layer (states)
        states = layers.Input(shape=(self.state_size), name='input')

        c1 = layers.Convolution2D(filters=24,
                                  kernel_size=5,
                                  strides=2,
                                  activation='elu')(states)
        c2 = layers.Convolution2D(filters=36,
                                  kernel_size=5,
                                  strides=2,
                                  activation='elu')(c1)
        c3 = layers.Convolution2D(filters=48,
                                  kernel_size=5,
                                  strides=2,
                                  activation='elu')(c2)
        c4 = layers.Convolution2D(filters=64, kernel_size=3,
                                  activation='elu')(c3)
        d1 = layers.Dropout(0.2)(c4)
        c5 = layers.Convolution2D(filters=64, kernel_size=3,
                                  activation='elu')(d1)

        l1 = layers.Flatten()(c5)
        l2 = layers.Dropout(0.2)(l1)

        l3 = layers.Dense(100, activation='elu')(l2)

        l4 = layers.Dense(50, activation='elu')(l3)
        l5 = layers.Dense(10, activation='elu')(l4)

        # Steering and Throttle
        # Add final output layer with sigmoid activation
        raw_actions = layers.Dense(units=self.action_size,
                                   name='raw_actions',
                                   activation='sigmoid')(l5)

        # Scale [0, 1] output for each action dimension to proper range
        actions = layers.Lambda(lambda x: (x * 180) + 0,
                                name='actions')(raw_actions)

        # Create Keras model
        self.model = models.Model(inputs=[states], outputs=actions)  #actions

        # Define loss function using action value (Q value) gradients
        action_gradients = layers.Input(shape=(self.action_size, ))
        loss = K.mean(-action_gradients * actions)

        # Define optimizer and training function
        optimizer = optimizers.Adam(self.lr)
        updates_op = optimizer.get_updates(params=self.model.trainable_weights,
                                           loss=loss)

        self.train_fn = K.function(
            inputs=[self.model.input, action_gradients,
                    K.learning_phase()],
            outputs=loss,
            updates=updates_op)
示例#22
0
文件: model.py 项目: OzAAI/OzIP2019
def simple_CNN(input_shape, num_classes):

    model = models.Sequential()
    model.add(
        layers.Convolution2D(filters=16,
                             kernel_size=(7, 7),
                             padding='same',
                             name='image_array',
                             input_shape=input_shape))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Convolution2D(filters=16, kernel_size=(7, 7), padding='same'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu'))
    model.add(layers.AveragePooling2D(pool_size=(2, 2), padding='same'))
    model.add(layers.Dropout(.5))

    model.add(
        layers.Convolution2D(filters=32, kernel_size=(5, 5), padding='same'))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Convolution2D(filters=32, kernel_size=(5, 5), padding='same'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu'))
    model.add(layers.AveragePooling2D(pool_size=(2, 2), padding='same'))
    model.add(layers.Dropout(.5))

    model.add(
        layers.Convolution2D(filters=64, kernel_size=(3, 3), padding='same'))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Convolution2D(filters=64, kernel_size=(3, 3), padding='same'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu'))
    model.add(layers.AveragePooling2D(pool_size=(2, 2), padding='same'))
    model.add(layers.Dropout(.5))

    model.add(
        layers.Convolution2D(filters=128, kernel_size=(3, 3), padding='same'))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Convolution2D(filters=128, kernel_size=(3, 3), padding='same'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu'))
    model.add(layers.AveragePooling2D(pool_size=(2, 2), padding='same'))
    model.add(layers.Dropout(.5))

    model.add(
        layers.Convolution2D(filters=256, kernel_size=(3, 3), padding='same'))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Convolution2D(filters=num_classes,
                             kernel_size=(3, 3),
                             padding='same'))
    model.add(layers.GlobalAveragePooling2D())
    model.add(layers.Activation('softmax', name='predictions'))
    return model
示例#23
0
def build_conv_model(det_shape=default_det_shape, n_tracks=1):
    """Build current iteration of convolutional tracking model.
        Returns tuple:
          (full_model, track_pred_model, conv_model, pretrain_layers)
          where:
          -full_model is the entire model
          -track_pred_model is the part that predicts track parameters
            (excluding covariances)
          -conv_model is the convolutional part only
          -pretrain_layers is a list of layers for which trainable=False
            should be set after training the track-finding portion 
            of the model (if training that part separately)"""
    pretrain_layers = []

    input_layer = layers.Input(shape=(1, det_shape[0], det_shape[1]))
    layer = layers.Convolution2D(8, 3, 3, border_mode='same')(input_layer)
    pretrain_layers.append(layer)
    layer = layers.Activation('relu')(layer)
    layer = layers.Convolution2D(8, 3, 3, border_mode='same')(layer)
    pretrain_layers.append(layer)
    layer = layers.Activation('relu')(layer)
    layer = layers.MaxPooling2D(pool_size=(2, 2))(layer)
    layer = layers.Convolution2D(32, 3, 3, border_mode='same')(layer)
    pretrain_layers.append(layer)
    layer = layers.Activation('relu')(layer)
    layer = layers.Convolution2D(32, 3, 3, border_mode='same')(layer)
    pretrain_layers.append(layer)
    layer = layers.Activation('relu')(layer)
    conv_model = models.Model(input=input_layer, output=layer)
    layer = layers.Flatten()(layer)

    layer_tracks = layers.Dense(400)(layer)
    pretrain_layers.append(layer_tracks)
    layer_tracks = layers.RepeatVector(n_tracks)(layer_tracks)
    layer_tracks = layers.LSTM(400, return_sequences=True)(layer_tracks)
    pretrain_layers.append(layer_tracks)
    output_layer_tracks = layers.TimeDistributed(layers.Dense(2))(
        layer_tracks)  # track parameters
    pretrain_layers.append(output_layer_tracks)
    track_pred_model = models.Model(input=input_layer,
                                    output=output_layer_tracks)

    layer_cov = layers.Dense(400)(layer)
    layer_cov = layers.RepeatVector(n_tracks)(layer_cov)
    layer_cov = layers.LSTM(400, return_sequences=True)(layer_cov)
    layer_cov = layers.TimeDistributed(layers.Dense(3))(
        layer_cov)  # track covariance matrix parameters
    output_layer_cov = layers.Lambda(
        gauss_likelihood_loss.covariance_from_network_outputs)(layer_cov)

    output_layer = layers.merge([output_layer_tracks, output_layer_cov],
                                mode='concat',
                                concat_axis=2)
    full_model = models.Model(input=input_layer, output=output_layer)

    return full_model, track_pred_model, conv_model, pretrain_layers
示例#24
0
def resblock(input_tensor, num_channels):
    """
    represent a single block in the network flow
    """
    conv = klay.Convolution2D(num_channels, 3, 3,
                              border_mode="same")(input_tensor)
    relu = klay.Activation("relu")(conv)
    second_conv = klay.Convolution2D(num_channels, 3, 3,
                                     border_mode="same")(relu)
    return klay.merge([input_tensor, second_conv], mode="sum")
def build_model():
    # Based on VGGNet
    classifier = kmodels.Sequential()

    # Conv -> RELU -> Norm -> Pool -> dropout
    # At the end, we have 25x25
    classifier.add(klayers.Convolution2D(
        filters=32,
        kernel_size=(3,3),
        padding='same',
        data_format='channels_last',
        input_shape=(25,50,1),
        activation='relu'
        ))

    classifier.add(klayers.BatchNormalization(axis=-1))
    classifier.add(klayers.MaxPooling2D(pool_size=(2,2),strides=2))
    classifier.add(klayers.Dropout(0.2))

    # 1 convolution layers into pool
    classifier.add(klayers.Convolution2D(
        filters=64,
        kernel_size=(3,3),
        activation='relu',
        padding='same',
        ))
    classifier.add(klayers.BatchNormalization(axis=-1))

    # classifier.add(klayers.Convolution2D(
    #     filters=64,
    #     kernel_size=(3,3),
    #     activation='relu',
    #     padding='same',
    #     ))
    # classifier.add(klayers.BatchNormalization(axis=-1))

    classifier.add(klayers.MaxPooling2D(pool_size=(2,2),strides=2))
    classifier.add(klayers.Dropout(0.2))

    # flatten to 1d, then into 2 dense layers
    classifier.add(klayers.Flatten())
    classifier.add(klayers.Dense(64, activation='relu'))
    classifier.add(klayers.Dense(64, activation='relu'))
    classifier.add(klayers.Dropout(0.2))

    # output layer, just 2 number signifying prob of going left/right
    classifier.add(klayers.Dense(2, activation='sigmoid'))

    classifier.compile(
        optimizer='rmsprop',
        loss='binary_crossentropy',
        metrics=['accuracy']
        )

    return classifier
示例#26
0
def discriminator_network(input_image_tensor):
    x = layers.Convolution2D(96, (3, 3), padding='same', strides=(2, 2), activation='relu',data_format = "channels_last")(input_image_tensor)
    x = layers.Convolution2D(64, (3, 3), padding='same', strides=(2, 2), activation='relu',data_format = "channels_last")(x)
    x = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(1, 1),data_format = "channels_last")(x)
    x = layers.Convolution2D(32, (3, 3), padding='same', strides=(1, 1), activation='relu',data_format = "channels_last")(x)
    x = layers.Convolution2D(32, (1, 1), padding='same', strides=(1, 1), activation='relu',data_format = "channels_last")(x)
    x = layers.Convolution2D(2, (1, 1), padding='same', strides=(1, 1), activation='relu',data_format = "channels_last")(x)

    # here one feature map corresponds to `is_real` and the other to `is_refined`,
    # and the custom loss function is then `tf.nn.sparse_softmax_cross_entropy_with_logits`
    return layers.Reshape((-1, 2))(x)
示例#27
0
def build_model_cnn(input_shape, actions):
    """
    Creates a Convolutional Neural Network.
    """
    model = models.Sequential()
    model.add(layers.Permute((2, 3, 1), input_shape=input_shape))
    model.add(layers.Convolution2D(32, (4, 4), strides=(2, 2), activation="relu"))
    model.add(layers.Convolution2D(64, (3, 3), strides=(1, 1), activation="relu"))
    model.add(layers.Flatten())
    model.add(layers.Dense(32, activation="relu"))
    model.add(layers.Dense(actions, activation="linear"))
    return model
示例#28
0
def get_proportions_model(image_shape,
                          bag_size,
                          filter1=5,
                          kernel1=9,
                          filters2=10,
                          kernel2=5):
    """Creates a model for classifying labels by learning from label proportions.

    Keyword arguments:
    image_shape: 2 tuplet, the dimensions of each image.
    bag_size: Integer, The number of images in each bag.
    filter1: Integer, The dimensionality of the output space of the first layer (default 5).
    kernel1: A 2 tuple specifying the height and width of the 2D convolutional window of the first layer (default 9).
    filters2: Integer, the dimensionaloty of the output space of the second layer (default 10).
    kernel2: A 2 tuple specifying the height and width of the 2D convolutional window of the second layer (default 5).
    """

    # Input > 2 conv layers > single output
    # Takes an image from the dataset and spits out the probability of the image containing the measured label

    image_input = layers.Input(shape=image_shape)
    conv1 = layers.Convolution2D(filter1,
                                 kernel1,
                                 activation='relu',
                                 input_shape=image_shape)(image_input)
    conv2 = layers.Convolution2D(filters2, kernel2, activation='relu')(conv1)
    flat = layers.Flatten()(conv2)
    image_output = layers.Dense(1, activation='sigmoid')(flat)

    conv_model = models.Model(inputs=image_input, outputs=image_output)

    conv_model.compile(optimizer='adam',
                       loss='mean_squared_error',
                       metrics=['accuracy'])

    # input > TimeDistributed > average of the results of each element of the TimeSitributed layer > output, the average as a single number
    # Takes in a bag of images, processes them through the same neural network (the one we created above), and then averages the outputs for every image. Both the average and the singular outputs will be used in the training.

    set_input = layers.Input(shape=(bag_size, ) + image_shape)
    set_processing = layers.TimeDistributed(conv_model,
                                            name='inter')(set_input)
    set_output = layers.AveragePooling1D(bag_size)(set_processing)
    set_flat_output = layers.Flatten()(set_output)

    set_model = models.Model(inputs=set_input, outputs=set_flat_output)

    # Arbitrary optimizer, I don't know if I should use a different one.
    set_model.compile(optimizer='adam',
                      loss='mean_squared_error',
                      metrics=['accuracy'])

    return set_model
def build_model(input_shape, actions):
    model = models.Sequential()
    model.add(layers.Permute((2, 3, 1), input_shape=input_shape))
    model.add(
        layers.Convolution2D(32, (8, 8), strides=(4, 4), activation="relu"))
    model.add(
        layers.Convolution2D(64, (4, 4), strides=(2, 2), activation="relu"))
    model.add(
        layers.Convolution2D(64, (3, 3), strides=(1, 1), activation="relu"))
    model.add(layers.Flatten())
    model.add(layers.Dense(512, activation="relu"))
    model.add(layers.Dense(actions, activation="linear"))
    return model
示例#30
0
def NvidiaModel():
    """
    Creates nvidia model for steering a car.

    :return:
    """
    nvidia_model = models.Sequential()

    nvidia_model.add(
        layers.Convolution2D(24,
                             5,
                             5,
                             subsample=(2, 2),
                             border_mode='valid',
                             batch_input_shape=(None, 66, 200, 3)))
    nvidia_model.add(layers.Activation('relu'))

    nvidia_model.add(
        layers.Convolution2D(36, 5, 5, subsample=(2, 2), border_mode='valid'))
    nvidia_model.add(layers.Activation('relu'))

    nvidia_model.add(
        layers.Convolution2D(48, 5, 5, subsample=(2, 2), border_mode='valid'))
    nvidia_model.add(layers.Activation('relu'))

    nvidia_model.add(
        layers.Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='valid'))
    nvidia_model.add(layers.Activation('relu'))

    nvidia_model.add(
        layers.Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='valid'))
    nvidia_model.add(layers.Activation('relu'))

    nvidia_model.add(layers.Flatten())

    nvidia_model.add(layers.Dense(1164))
    nvidia_model.add(layers.Activation('relu'))

    nvidia_model.add(layers.Dense(100))
    nvidia_model.add(layers.Activation('relu'))

    nvidia_model.add(layers.Dense(50))
    nvidia_model.add(layers.Activation('relu'))

    nvidia_model.add(layers.Dense(10))
    nvidia_model.add(layers.Activation('relu'))

    nvidia_model.add(layers.Dense(1))

    return nvidia_model