Пример #1
0
def mk_model_with_bn():
    model = Sequential()
    model.add(
        Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(
        Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(
        Convolution2D(filters=128,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(
        Convolution2D(filters=128,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(
        Convolution2D(filters=256,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(
        Convolution2D(filters=256,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(256, kernel_initializer='he_normal'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(LABEL_NUM, kernel_initializer='he_normal'))
    model.add(Activation('softmax'))

    return model
Пример #2
0
    def fit(self, eventlog_name):
        import tensorflow as tf
        from tensorflow.contrib.keras.python.keras.engine import Input, Model
        from tensorflow.contrib.keras.python.keras.layers import Dense, GaussianNoise, Dropout

        # load data
        features = self.dataset.load(eventlog_name)

        # parameters
        input_size = features.shape[1]
        hidden_size = np.round(input_size * 4)

        # input layer
        input_layer = Input(shape=(input_size,), name='input')

        # hidden layer
        hid = Dense(hidden_size, activation=tf.nn.relu)(GaussianNoise(0.1)(input_layer))
        hid = Dense(hidden_size, activation=tf.nn.relu)(Dropout(0.5)(hid))
        hid = Dense(hidden_size, activation=tf.nn.relu)(Dropout(0.5)(hid))
        hid = Dense(hidden_size, activation=tf.nn.relu)(Dropout(0.5)(hid))
        hid = Dense(hidden_size, activation=tf.nn.relu)(Dropout(0.5)(hid))

        # output layer
        output_layer = Dense(input_size, activation='linear')(Dropout(0.5)(hid))

        # build model
        self.model = Model(inputs=input_layer, outputs=output_layer)

        # compile model
        self.model.compile(
            optimizer=tf.train.AdamOptimizer(learning_rate=0.0001),
            loss=tf.losses.mean_squared_error
        )

        # train model
        self.model.fit(
            features,
            features,
            batch_size=100,
            epochs=100,
            validation_split=0.2,
        )
Пример #3
0
def bidirectional_model():
    inputs = Input(shape=(maxlen, ), dtype='int32')
    x = Embedding(max_features, 128, input_length=maxlen)(inputs)
    x = Bidirectional(LSTM(64))(x)
    x = Dropout(0.5)(x)
    x = Dense(1, activation='sigmoid')(x)

    model = Model(inputs=inputs, outputs=x)
    # try using different optimizers and different optimizer configs
    model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
    return model
def build_model(num_output_classes):
    conv1size = 32
    conv2size = 64
    convfiltsize = 4
    densesize = 128
    poolsize = (2, 2)
    imgdepth = 3
    dropout = 0.3
    if IMG_COLORMODE == 'grayscale':
        imgdepth = 1
    inpshape = IMG_TGT_SIZE + (imgdepth, )
    inputs = Input(shape=inpshape)
    conv1 = Convolution2D(conv1size,
                          convfiltsize,
                          strides=(1, 1),
                          padding='valid',
                          activation='relu',
                          name='conv1',
                          data_format='channels_last')(inputs)
    pool1 = MaxPooling2D(pool_size=poolsize, name='pool1')(conv1)
    drop1 = Dropout(dropout)(pool1)
    conv2 = Convolution2D(conv2size,
                          convfiltsize,
                          strides=(1, 1),
                          padding='valid',
                          activation='relu',
                          name='conv2',
                          data_format='channels_last')(drop1)
    pool2 = MaxPooling2D(pool_size=poolsize, name='pool2')(conv2)
    drop2 = Dropout(dropout)(pool2)
    flat2 = Flatten()(drop2)
    dense = Dense(densesize, name='dense')(flat2)
    denseact = Activation('relu')(dense)
    output = Dense(num_output_classes, name='output')(denseact)
    outputact = Activation('softmax')(output)

    model = Model(inputs=inputs, outputs=outputact)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
def cnn_model_fn():
    '''
    define the model in function way

    '''
    # input shape is (img_rows, img_cols, fea_channel)
    inputs = Input(shape=(img_rows, img_cols, 1))
    x = Conv2D(32, kernel_size=(3, 3), activation='relu')(inputs)
    x = Conv2D(64, (3, 3), activation='relu')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.25)(x)
    x = Flatten()(x)
    x = Dense(128, activation='relu')(x)
    x = Dropout(0.5)(x)
    pred = Dense(num_classes, activation='softmax')(x)
    # small change of Model parameters names, now is inputs, outputs
    model = Model(inputs=inputs, outputs=pred)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])
    return model
Пример #6
0
def mk_model():
    model = Sequential()
    model.add(
        Convolution2D(filters=32,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1),
                      kernel_initializer=kernel_initializer()))
    model.add(LeakyReLU(alpha=.1))
    model.add(
        Convolution2D(filters=32,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      kernel_initializer=kernel_initializer()))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(
        Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(LeakyReLU(alpha=0.1))
    model.add(
        Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(MaxoutDense(output_dim=256, nb_feature=4))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(LABEL_NUM, kernel_initializer='he_uniform'))
    model.add(Activation('softmax'))

    return model
def create_two_stream_classifier(
        num_fc_neurons,
        dropout_rate,
        num_classes=24):  # classifier_weights_path=None
    classifier = Sequential()
    classifier.add(
        Dense(num_fc_neurons, name='fc7', input_shape=(num_fc_neurons * 2, )))
    #classifier.add(BatchNormalization(axis=1, name='fc7_bn'))
    classifier.add(Activation('relu', name='fc7_ac'))
    classifier.add(Dropout(dropout_rate))
    classifier.add(Dense(num_classes, activation='softmax',
                         name='predictions'))
    return classifier
def cnn_model_fn():
    ''' '''
    print('Build model...')
    inputs = Input(shape=(maxlen, ),
                   dtype='int32')  # a index sequence with lenght = maxlen
    x = Embedding(max_features, embedding_dims, input_length=maxlen)(inputs)
    x = Dropout(0.2)(x)
    x = Conv1D(filters,
               kernel_size,
               padding='valid',
               activation='relu',
               strides=1)(x)
    x = GlobalMaxPooling1D()(x)
    x = Dense(hidden_dims)(x)
    x = Dropout(0.2)(x)
    x = Activation('relu')(x)
    x = Dense(1)(x)
    x = Activation('sigmoid')(x)
    model = Model(inputs=inputs, outputs=x)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
    def build(self,alpha, img_input, temp_softmax):

        shape = (1, 1, int(1024 * alpha))
	"""
	This looks dangerous. Not sure how the model would get affected with the laarning_phase variable set to True.
	"""
        
        K.set_learning_phase(True)

	with tf.name_scope('teacher') as scope:

	    self.conv1 = Conv2D(
                        int(32*alpha),
                        (3,3),
                        padding='same',
                        use_bias=False,
                        strides=(1,1),
                        name='teacher_conv1', trainable=self.trainable)(img_input)
            self.conv2 = BatchNormalization(axis=-1, name='teacher_conv1_bn', trainable=self.trainable)(self.conv1)
            self.conv3 = Activation(self.relu6, name='teacher_conv1_relu', trainable=self.trainable)(self.conv2)

	    self.conv4 = self._depthwise_conv_block(self.conv3, 64, alpha, depth_multiplier, block_id = 15)
	    self.conv5 = self._depthwise_conv_block(self.conv4, 128, alpha, depth_multiplier,strides=(2, 2), block_id =16)
	    self.conv6 =self. _depthwise_conv_block(self.conv5, 128, alpha, depth_multiplier,block_id =17)
	    self.conv7 = self._depthwise_conv_block(self.conv6, 256, alpha, depth_multiplier, strides=(2,2),block_id =18)
	    self.conv8 = self._depthwise_conv_block(self.conv7, 256, alpha, depth_multiplier, block_id =19)
	    self.conv9 = self._depthwise_conv_block(self.conv8, 512, alpha, depth_multiplier, strides = (2,2), block_id =20)
	    self.conv10 = self._depthwise_conv_block(self.conv9, 512, alpha, depth_multiplier, block_id =21)
	    self.conv11 = self._depthwise_conv_block(self.conv10, 512, alpha, depth_multiplier, block_id =22)
	    self.conv12 = self._depthwise_conv_block(self.conv11, 512, alpha, depth_multiplier, block_id =23)
	    self.conv13 = self._depthwise_conv_block(self.conv12, 512, alpha, depth_multiplier, block_id =24)
	    self.conv14 = self._depthwise_conv_block(self.conv13, 512, alpha, depth_multiplier, block_id =25)
	    self.conv15 = self._depthwise_conv_block(self.conv14, 1024, alpha, depth_multiplier,strides=(2,2), block_id =26)
	    self.conv16 = self._depthwise_conv_block(self.conv15, 1024, alpha, depth_multiplier, block_id =27)

            self.conv17 = GlobalAveragePooling2D()(self.conv16)
            self.conv18 = Reshape(shape, name='teacher_reshape_1', trainable=self.trainable)(self.conv17)
	
            self.conv19 = Dropout(0.5, name='teacher_dropout', trainable=self.trainable)(self.conv18)
            self.conv20 = Conv2D(self.num_classes, (1, 1), padding='same', name='teacher_conv_preds', trainable=self.trainable)(self.conv18)
            self.conv21 = Activation('softmax', name='teacher_act_softmax', trainable=self.trainable)(tf.divide(self.conv20, temp_softmax))
            self.conv22 = Reshape((self.num_classes,), name='teacher_reshape_2', trainable=self.trainable)(self.conv21)

        return self
Пример #10
0
def cnn_lstm_model():
    ''' '''
    print('Build model...')
    inputs = Input(shape=(maxlen, ), dtype='int32')
    x = Embedding(max_features, embedding_size, input_length=maxlen)(inputs)
    x = Dropout(0.25)(x)
    x = Conv1D(filters,
               kernel_size,
               padding='valid',
               activation='relu',
               strides=1)(x)
    x = MaxPooling1D(pool_size=pool_size)(x)
    x = LSTM(lstm_output_size)(x)
    x = Dense(1, activation='sigmoid')(x)
    model = Model(inputs=inputs, outputs=x)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
Пример #11
0
def mk_model(arch, data_type, classes):
    if data_type == 'mnist':
        channels = 1
    elif data_type == 'aerial':
        channels = 3

    if arch == 'lenet-5':
        model = Sequential()  #モデルの初期化

        #畳み込み第1層
        model.add(Conv2D(
            32, 5, padding='same',
            input_shape=(28, 28, channels)))  #output_shape=(None,28,28,32)
        #filters=32, kernel_size=(5,5), strides(1,1), use_bias=True
        #dilidation_rate(膨張率)=(1,1), kernel_initializer='glorot_uniform', bias_initializer='zeros'
        #padding='sane'は出力のshapeが入力と同じになるように調整
        #output_shape=(None(60000),28,28,32)
        model.add(Activation('relu'))
        model.add(MaxPooling2D(padding='same'))  #output_shape=(None,14,14,32)
        #pool_size=(2,2), strides(2,2)

        #畳み込み第2層
        model.add(Conv2D(64, 5, padding='same'))  #output_shape=(None,14,14,64)
        model.add(Activation('relu'))
        model.add(MaxPooling2D(padding='same'))  #output_shape=(None,7,7,64)

        #平坦化
        model.add(Flatten())  #output_shape=(None,3136(7*7*64))

        #全結合第1層
        model.add(Dense(1024))  #output_shape=(None,1024)
        model.add(Activation('relu'))
        model.add(Dropout(0.5))  #無視する割合を記述(例えば、0.2と記述した場合、80%の結合が残る)

        #全結合第2層
        model.add(Dense(classes))  #output_shape=(None,classes)
        model.add(Activation('softmax'))

        return model

    elif arch == 'alexnet':
        model = Sequential()  #モデルの初期化
Пример #12
0
def simple_model():
    model = Sequential()
    model.add(
        Convolution2D(filters=32,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(filters=64, kernel_size=(3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(LABEL_NUM))
    model.add(Activation('softmax'))

    return model
Пример #13
0
 def __init__(self, input_shape, lr=0.01, n_layers=2, n_hidden=8, rate_dropout=0.2, loss=risk_estimation):
     print("initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s." % (
     lr, n_layers, n_hidden, rate_dropout))
     self.model = Sequential()
     self.model.add(Dropout(rate=rate_dropout, input_shape=(input_shape[0], input_shape[1])))
     for i in range(0, n_layers - 1):
         self.model.add(LSTM(n_hidden * 4, return_sequences=True, activation='tanh',
                             recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                             recurrent_initializer='orthogonal', bias_initializer='zeros',
                             dropout=rate_dropout, recurrent_dropout=rate_dropout))
     self.model.add(LSTM(n_hidden, return_sequences=False, activation='tanh',
                         recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                         recurrent_initializer='orthogonal', bias_initializer='zeros',
                         dropout=rate_dropout, recurrent_dropout=rate_dropout))
     self.model.add(Dense(1, kernel_initializer=initializers.glorot_uniform()))
     # self.model.add(BatchNormalization(axis=-1, moving_mean_initializer=Constant(value=0.5),
     #               moving_variance_initializer=Constant(value=0.25)))
     self.model.add(BatchNormalization(axis=-1))
     self.model.add(Activation("relu(alpha=0., max_value=1.0)"))
     opt = RMSprop(lr=lr)
     self.model.compile(loss=loss,
                        optimizer=opt,
                        metrics=['accuracy'])
def create_VGG16(num_fc_neurons,
                 dropout_rate,
                 num_classes=24,
                 top_model_weights_path=None,
                 img_height=224,
                 img_width=224,
                 include_loc='all',
                 activation='softmax'):
    # load pre-trained convolutional model
    base_model = VGG16(weights='imagenet',
                       include_top=False,
                       input_shape=get_input_shape(img_height, img_width))

    # build a classifier model to put on top of the convolutional model
    top_model = Sequential()
    top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
    for i in range(6, 8):
        top_model.add(Dense(num_fc_neurons, name='fc' + str(i)))
        #top_model.add(BatchNormalization(axis=1, name='fc'+str(i)+'_bn'))
        top_model.add(Activation('relu', name='fc' + str(i) + '_ac'))
        top_model.add(Dropout(dropout_rate))
    top_model.add(Dense(num_classes, activation=activation,
                        name='predictions'))
    if top_model_weights_path != None:
        top_model.load_weights(top_model_weights_path)

    if include_loc == 'base':
        model = base_model
    elif include_loc == 'top':
        model = top_model
    elif include_loc == 'all':  # add the model on top of the convolutional base
        model = Model(inputs=base_model.input,
                      outputs=top_model(base_model.output))
    else:
        raise ValueError('Only "base", "top" and "all" can be included.')
    return model
Пример #15
0
"""
from tensorflow.contrib.keras.python.keras.models import Sequential
from tensorflow.contrib.keras.python.keras.layers import Dense, Dropout, Activation
from tensorflow.contrib.keras.python.keras.optimizers import SGD
from tensorflow.contrib.keras.python.keras.utils import to_categorical
import numpy as np

# Generate dummy data
x_train = np.random.random((1000, 20))
y_train = np.random.randint(2, size=(1000, 1))
x_test = np.random.random((100, 20))
y_test = np.random.randint(2, size=(100, 1))

model = Sequential()
model.add(Dense(64, input_dim=20, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy', # binary classification
              optimizer='rmsprop',
              metrics=['accuracy'])

hist = model.fit(x_train, y_train,
          validation_split=0.2,
          epochs=1,
          batch_size=128)

hist.history
score = model.evaluate(x_test, y_test, batch_size=128)
Пример #16
0
def MobileNet(
        input_shape=None,  # pylint: disable=invalid-name
        alpha=1.0,
        depth_multiplier=1,
        dropout=1e-3,
        include_top=True,
        weights='imagenet',
        input_tensor=None,
        pooling=None,
        classes=1000):
    """Instantiates the MobileNet architecture.

  Note that only TensorFlow is supported for now,
  therefore it only works with the data format
  `image_data_format='channels_last'` in your Keras config
  at `~/.keras/keras.json`.

  To load a MobileNet model via `load_model`, import the custom
  objects `relu6` and `DepthwiseConv2D` and pass them to the
  `custom_objects` parameter.
  E.g.
  model = load_model('mobilenet.h5', custom_objects={
                     'relu6': mobilenet.relu6,
                     'DepthwiseConv2D': mobilenet.DepthwiseConv2D})

  Arguments:
      input_shape: optional shape tuple, only to be specified
          if `include_top` is False (otherwise the input shape
          has to be `(224, 224, 3)` (with `channels_last` data format)
          or (3, 224, 224) (with `channels_first` data format).
          It should have exactly 3 input channels,
          and width and height should be no smaller than 32.
          E.g. `(200, 200, 3)` would be one valid value.
      alpha: controls the width of the network.
          - If `alpha` < 1.0, proportionally decreases the number
              of filters in each layer.
          - If `alpha` > 1.0, proportionally increases the number
              of filters in each layer.
          - If `alpha` = 1, default number of filters from the paper
               are used at each layer.
      depth_multiplier: depth multiplier for depthwise convolution
          (also called the resolution multiplier)
      dropout: dropout rate
      include_top: whether to include the fully-connected
          layer at the top of the network.
      weights: `None` (random initialization) or
          `imagenet` (ImageNet weights)
      input_tensor: optional Keras tensor (i.e. output of
          `layers.Input()`)
          to use as image input for the model.
      pooling: Optional pooling mode for feature extraction
          when `include_top` is `False`.
          - `None` means that the output of the model
              will be the 4D tensor output of the
              last convolutional layer.
          - `avg` means that global average pooling
              will be applied to the output of the
              last convolutional layer, and thus
              the output of the model will be a
              2D tensor.
          - `max` means that global max pooling will
              be applied.
      classes: optional number of classes to classify images
          into, only to be specified if `include_top` is True, and
          if no `weights` argument is specified.

  Returns:
      A Keras model instance.

  Raises:
      ValueError: in case of invalid argument for `weights`,
          or invalid input shape.
      RuntimeError: If attempting to run this model with a
          backend that does not support separable convolutions.
  """

    if K.backend() != 'tensorflow':
        raise RuntimeError('Only TensorFlow backend is currently supported, '
                           'as other backends do not support '
                           'depthwise convolution.')

    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as ImageNet with `include_top` '
                         'as true, `classes` should be 1000')

    # Determine proper input shape.
    if input_shape is None:
        default_size = 224
    else:
        if K.image_data_format() == 'channels_first':
            rows = input_shape[1]
            cols = input_shape[2]
        else:
            rows = input_shape[0]
            cols = input_shape[1]
        if rows == cols and rows in [128, 160, 192, 224]:
            default_size = rows
        else:
            default_size = 224
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=default_size,
                                      min_size=32,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)
    if K.image_data_format() == 'channels_last':
        row_axis, col_axis = (0, 1)
    else:
        row_axis, col_axis = (1, 2)
    rows = input_shape[row_axis]
    cols = input_shape[col_axis]

    if weights == 'imagenet':
        if depth_multiplier != 1:
            raise ValueError('If imagenet weights are being loaded, '
                             'depth multiplier must be 1')

        if alpha not in [0.25, 0.50, 0.75, 1.0]:
            raise ValueError('If imagenet weights are being loaded, '
                             'alpha can be one of'
                             '`0.25`, `0.50`, `0.75` or `1.0` only.')

        if rows != cols or rows not in [128, 160, 192, 224]:
            raise ValueError('If imagenet weights are being loaded, '
                             'input must have a static square shape (one of '
                             '(128,128), (160,160), (192,192), or (224, 224)).'
                             ' Input shape provided = %s' % (input_shape, ))

    if K.image_data_format() != 'channels_last':
        warnings.warn('The MobileNet family of models is only available '
                      'for the input data format "channels_last" '
                      '(width, height, channels). '
                      'However your settings specify the default '
                      'data format "channels_first" (channels, width, height).'
                      ' You should set `image_data_format="channels_last"` '
                      'in your Keras config located at ~/.keras/keras.json. '
                      'The model being returned right now will expect inputs '
                      'to follow the "channels_last" data format.')
        K.set_image_data_format('channels_last')
        old_data_format = 'channels_first'
    else:
        old_data_format = None

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = _conv_block(img_input, 32, alpha, strides=(2, 2))
    x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)

    x = _depthwise_conv_block(x,
                              128,
                              alpha,
                              depth_multiplier,
                              strides=(2, 2),
                              block_id=2)
    x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)

    x = _depthwise_conv_block(x,
                              256,
                              alpha,
                              depth_multiplier,
                              strides=(2, 2),
                              block_id=4)
    x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)

    x = _depthwise_conv_block(x,
                              512,
                              alpha,
                              depth_multiplier,
                              strides=(2, 2),
                              block_id=6)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)

    x = _depthwise_conv_block(x,
                              1024,
                              alpha,
                              depth_multiplier,
                              strides=(2, 2),
                              block_id=12)
    x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)

    if include_top:
        if K.image_data_format() == 'channels_first':
            shape = (int(1024 * alpha), 1, 1)
        else:
            shape = (1, 1, int(1024 * alpha))

        x = GlobalAveragePooling2D()(x)
        x = Reshape(shape, name='reshape_1')(x)
        x = Dropout(dropout, name='dropout')(x)
        x = Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)
        x = Activation('softmax', name='act_softmax')(x)
        x = Reshape((classes, ), name='reshape_2')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))

    # load weights
    if weights == 'imagenet':
        if K.image_data_format() == 'channels_first':
            raise ValueError('Weights for "channels_last" format '
                             'are not available.')
        if alpha == 1.0:
            alpha_text = '1_0'
        elif alpha == 0.75:
            alpha_text = '7_5'
        elif alpha == 0.50:
            alpha_text = '5_0'
        else:
            alpha_text = '2_5'

        if include_top:
            model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows)
            weigh_path = BASE_WEIGHT_PATH + model_name
            weights_path = get_file(model_name,
                                    weigh_path,
                                    cache_subdir='models')
        else:
            model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows)
            weigh_path = BASE_WEIGHT_PATH + model_name
            weights_path = get_file(model_name,
                                    weigh_path,
                                    cache_subdir='models')
        model.load_weights(weights_path)

    if old_data_format:
        K.set_image_data_format(old_data_format)
    return model
Пример #17
0
                    test_label_mat = "label_foot.mat",
                    seq_len=seq_len)

N_train = len(train)
vector_dim = train.shape[2]
train = train.reshape(-1, seq_len, vector_dim, 1)
test = test.reshape(-1, seq_len, vector_dim, 1)
'''
construction model
'''
inputs = Input(shape=(train.shape[1:]))

x = Conv2D(16, (8, 1), padding='same', use_bias=False)(inputs)
x = Conv2D(16, (8, 1), padding='same', use_bias=False, activation='relu')(x)
x = BatchNormalization(axis=1)(x)
x = Dropout(0.3)(x)

x = Conv2D(32, (8, 1), padding='same', use_bias=False)(inputs)
x = Conv2D(32, (8, 1), padding='same', use_bias=False, activation='relu')(x)
x = BatchNormalization(axis=1)(x)
x = Dropout(0.3)(x)

x = Conv2D(64, (8, 1), padding='same', use_bias=False)(inputs)
x = Conv2D(64, (8, 1), padding='same', use_bias=False, activation='relu')(x)
x = BatchNormalization(axis=1)(x)
x = Dropout(0.3)(x)

x = Conv2D(64, (1, 5), padding='valid', use_bias=False, activation='relu')(x)

x = Reshape((128, 64))(x)
x = Bidirectional(LSTM(20, dropout=0.2, return_sequences=False))(x)
Пример #18
0
- model.fit(batch_size), model.fit_generator() are to train a small batch at a time until all dataset are trained

"""

from tensorflow.contrib.keras.python.keras.layers import Dropout, BatchNormalization, Input, Dense
from tensorflow.contrib.keras.python.keras.models import Model
import numpy as np
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras import losses

x = np.random.random((10, 10)) * 2
y = np.random.randint(2, size=(10, 1))

input_tensor = Input(shape=(10, ))
bn_tensor = BatchNormalization()(input_tensor)
dp_tensor = Dropout(0.7)(bn_tensor)
final_tensor = Dense(1)(dp_tensor)

model = Model(input_tensor, final_tensor)
model.compile(optimizer='SGD', loss='mse')

# x, y won't get into batches for training here
loss_on_batch = model.train_on_batch(x, y)  # dive in for details
"""
('Runs a single gradient update on a single batch of data.\n'
 '\n'
 'Arguments:\n'
 '    x: Numpy array of training data,\n'
 '        or list of Numpy arrays if the model has multiple inputs.\n'
 '        If all inputs in the model are named,\n'
 '        you can also pass a dictionary\n'
- BatchNormalization layer output arrays on test and train mode

- Dropout layer output arrays on test and train mode
"""

from tensorflow.contrib.keras.python.keras.layers import Dropout, BatchNormalization, Input, Dense
from tensorflow.contrib.keras.python.keras.models import Model, Sequential, load_model
import numpy as np
from tensorflow.contrib.keras.python.keras import backend as K

input_array_small = np.random.random((500, 10)) * 2
target_small = np.random.random((500, 1))

input_tensor = Input(shape=(10, ))
bn_tensor = BatchNormalization()(input_tensor)
dp_tensor = Dropout(0.7)(input_tensor)

#### Access BatchNormalization layer's output as arrays in both test, train mode

# test mode from Model method
model_bn = Model(input_tensor, bn_tensor)
bn_array = model_bn.predict(input_array_small)

# test and train mode from K.function method
k_bn = K.function([input_tensor, K.learning_phase()], [bn_tensor])
bn_array_test = k_bn([input_array_small, 0])[0]
bn_array_train = k_bn([input_array_small, 1])[0]

# are test mode the same? and test mode array differ from train mode array
(bn_array == bn_array_test).sum()
bn_array.shape  # compare to see for equality
Пример #20
0
# test = np.r_[test1, test2]
# test_label = np.r_[test_label1, test_label2]

#train = np.r_[train, test]
#train_label = np.r_[train_label, test_label]

epochs = 100
batch_size = 256
f_dim = train.shape[2]
lstm1_dim = 20
lstm2_dim = 20

model = Sequential()
model.add(Convolution1D(8, 4, input_shape=(seq_len, f_dim)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(MaxPooling1D(3))
model.add(Convolution1D(12, 4, kernel_regularizer=l2(0.05)))
model.add(Activation('relu'))
model.add(Dropout(0.4))
model.add(MaxPooling1D(3))
model.add(Convolution1D(18, 4, kernel_regularizer=l2(0.04)))
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(MaxPooling1D(3))
model.add(Bidirectional(LSTM(lstm1_dim, dropout=0.2, return_sequences=True)))
model.add(Bidirectional(LSTM(lstm2_dim, dropout=0.2)))
model.add(BatchNormalization(axis=1))
model.add(Dense(30, kernel_regularizer=l2(0.02)))
model.add(BatchNormalization())
model.add(Activation('relu'))
Пример #21
0
                 'labels_' + str(lendata - 1) + '_' + str(lendata) + '.npy'),
    labels[lendata - 1:lendata])

tokenizer = None
data = None
labels = None

embedding_layer = Embedding(len(word_index) + 1,
                            EMBEDDING_DIM,
                            weights=[embedding_matrix],
                            input_length=MAX_SEQUENCE_LENGTH,
                            trainable=False)

sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH, ), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Dropout(0.5)(embedded_sequences)
x = Conv1D(filters=128, kernel_size=3, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x = LSTM(100, dropout=0.2, recurrent_dropout=0.2, return_sequences=True)(x)
x = LSTM(100, dropout=0.2, recurrent_dropout=0.2)(x)
x = Dense(32, activation='relu')(x)
preds = Dense(len(labels_index), activation='softmax')(x)

model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['acc'])

embedding_matrix = None
embeddings_index = None
def create_AlexNet(num_fc_neurons,
                   dropout_rate,
                   num_classes=24,
                   img_height=224,
                   img_width=224,
                   include_loc='all',
                   activation='softmax'):
    weight_decay = 0.0005
    kernel_regularizer = regularizers.l2(weight_decay)
    bias_regularizer = regularizers.l2(weight_decay)

    # build a convolutional model
    base_model = Sequential()
    base_model.add(
        Conv2D(96, (11, 11),
               strides=(4, 4),
               padding='valid',
               activation='relu',
               kernel_regularizer=kernel_regularizer,
               bias_regularizer=bias_regularizer,
               name='conv1',
               input_shape=get_input_shape(img_height, img_width)))
    base_model.add(LRN2D(name='lrn1'))
    base_model.add(MaxPooling2D((3, 3), strides=(2, 2), name='pool1'))

    base_model.add(
        Conv2D(256, (5, 5),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=kernel_regularizer,
               bias_regularizer=bias_regularizer,
               name='conv2'))
    base_model.add(LRN2D(name='lrn2'))
    base_model.add(MaxPooling2D((3, 3), strides=(2, 2), name='pool2'))

    base_model.add(
        Conv2D(384, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=kernel_regularizer,
               bias_regularizer=bias_regularizer,
               name='conv3'))
    base_model.add(
        Conv2D(384, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=kernel_regularizer,
               bias_regularizer=bias_regularizer,
               name='conv4'))
    base_model.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=kernel_regularizer,
               bias_regularizer=bias_regularizer,
               name='conv5'))
    base_model.add(MaxPooling2D((3, 3), strides=(2, 2), name='pool3'))

    # build a classifier model to put on top of the convolutional model
    top_model = Sequential()
    top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
    for i in range(6, 8):
        top_model.add(
            Dense(num_fc_neurons,
                  kernel_regularizer=kernel_regularizer,
                  bias_regularizer=bias_regularizer,
                  name='fc' + str(i)))
        #top_model.add(BatchNormalization(axis=1, name='fc'+str(i)+'_bn'))
        top_model.add(Activation('relu', name='fc' + str(i) + '_ac'))
        top_model.add(Dropout(dropout_rate))
    top_model.add(
        Dense(num_classes,
              activation=activation,
              kernel_regularizer=kernel_regularizer,
              bias_regularizer=bias_regularizer,
              name='predictions'))

    if include_loc == 'base':
        model = base_model
    elif include_loc == 'top':
        model = top_model
    elif include_loc == 'all':  # add the model on top of the convolutional base
        model = Model(inputs=base_model.input,
                      outputs=top_model(base_model.output))
    else:
        raise ValueError('Only "base", "top" and "all" can be included.')
    return model
Пример #23
0
tokenizer = None
data= None
labels=None

embedding_layer = Embedding(len(word_index) + 1,
                            EMBEDDING_DIM,
                            weights=[embedding_matrix],
                            input_length=MAX_SEQUENCE_LENGTH,
                            trainable=False)

sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Dropout (0.2)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)  # global max pooling
x = Dropout (0.5)(x)
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
preds = Dense(len(labels_index), activation='softmax')(x)

model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['acc'])
Пример #24
0
def cnn_sentiment_model(inputs,
                        nb_words,
                        embedding_dim=300,
                        static_embedding=True,
                        embedding_weights=None,
                        filter_hs=None,
                        nb_filters=100,
                        emb_size=100,
                        hidden_dropout=0.2,
                        is_training=True,
                        augmentation_function=None,
                        l2_weight=1e-4,
                        img_shape=None,
                        new_shape=None,
                        image_summary=False,
                        batch_norm_decay=0.99,
                        seed=0,
                        embedding_dropout=0.2):
    from tensorflow.contrib.keras.python.keras.layers import Embedding, Input, Convolution1D, MaxPooling1D, Flatten, \
        Dense, Dropout, Activation
    from tensorflow.contrib.keras.python.keras.initializers import glorot_uniform
    from tensorflow.contrib.keras.python.keras.layers.merge import Concatenate

    from tensorflow.contrib.keras.python.keras import backend as K
    K.set_learning_phase(1 if is_training else 0)

    sequence_length = img_shape[0]

    if filter_hs is None:
        filter_hs = [3, 4, 5]

    model = inputs

    def ci(shape, dtype=None, partition_info=None):
        assert shape[0] == embedding_weights.shape[0] and shape[
            1] == embedding_weights.shape[
                1], 'Shapes are not equal required={} init value={}'.format(
                    shape, embedding_weights.shape)
        return embedding_weights

    model = Embedding(nb_words,
                      embedding_dim,
                      input_length=sequence_length,
                      trainable=(not static_embedding),
                      embeddings_initializer='uniform'
                      if embedding_weights is None else ci)(model)
    if embedding_dropout > 0.0:
        model = Dropout(embedding_dropout, seed=seed)(model,
                                                      training=is_training)

    convs = list()
    for fsz in filter_hs:
        conv = Convolution1D(
            filters=nb_filters,
            kernel_size=fsz,
            padding='valid',
            activation='relu',
            kernel_initializer=glorot_uniform(seed=seed))(model)
        pool = MaxPooling1D(pool_size=sequence_length - fsz + 1)(conv)
        flatten = Flatten()(pool)
        convs.append(flatten)

    if len(filter_hs) > 0:
        graph_out = Concatenate()(convs)
    else:
        graph_out = convs[0]

    model = graph_out

    model = Dense(emb_size,
                  kernel_initializer=glorot_uniform(seed=seed))(model)
    model = Dropout(hidden_dropout, seed=seed)(model, training=is_training)
    model = Activation('relu')(model)

    return model
Пример #25
0
    def fit(self, eventlog_name):

        import tensorflow as tf
        from tensorflow.contrib.keras.python.keras.engine import Input, Model
        from tensorflow.contrib.keras.python.keras.layers import Dense, Dropout, GRU, Embedding, merge, Masking

        features, targets = self.dataset.load(eventlog_name, train=True)
        inputs = []
        layers = []

        with tf.device('/cpu:0'):
            # split attributes
            features = [features[:, :, i] for i in range(features.shape[2])]

            for i, t in enumerate(features):
                voc_size = np.array(self.dataset.attribute_dims[i]) + 1  # we start at 1, hence +1
                emb_size = np.floor(voc_size / 2.0).astype(int)

                i = Input(shape=(None, *t.shape[2:]))
                x = Embedding(input_dim=voc_size, output_dim=emb_size, input_length=t.shape[1], mask_zero=True)(i)
                inputs.append(i)
                layers.append(x)

            # merge layers
            x = merge.concatenate(layers)

        x = GRU(64, implementation=2)(x)

        # shared hidden layer
        x = Dense(512, activation=tf.nn.relu)(x)
        x = Dense(512, activation=tf.nn.relu)(Dropout(0.5)(x))

        # hidden layers per attribute
        outputs = []
        for i, l in enumerate(targets):
            o = Dense(256, activation=tf.nn.relu)(Dropout(0.5)(x))
            o = Dense(256, activation=tf.nn.relu)(Dropout(0.5)(o))
            o = Dense(l.shape[1], activation=tf.nn.softmax)(Dropout(0.5)(o))
            outputs.append(o)

        self.model = Model(inputs=inputs, outputs=outputs)

        # compile model

        # old setting : optimizers from tensorflow

        # self.model.compile(
        # optimizer=tf.train.AdamOptimizer(learning_rate=0.0001),
        # loss='categorical_crossentropy'
        # )

        # new setting : optimizers from keras

        self.model.compile(
            optimizer='Adadelta',
            loss='categorical_crossentropy'
        )

        # train model
        self.model.fit(
            features,
            targets,
            batch_size=100,
            epochs=100,
            validation_split=0.2,
        )
Пример #26
0
batch_size = 1024
f_dim = train.shape[2]
lstm1_dim = 20
lstm2_dim = 20

model = Sequential()
model.add(
    Bidirectional(LSTM(lstm1_dim, dropout=0.2, return_sequences=True),
                  input_shape=(seq_len, f_dim)))
model.add(Bidirectional(LSTM(lstm2_dim, dropout=0.2)))
model.add(BatchNormalization(axis=1))
model.add(Dense(30, kernel_regularizer=l2(0.02)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Dense(2, kernel_regularizer=l2(0.01)))
model.add(BatchNormalization())
model.add(Activation('softmax'))
model.compile(optimizer='Nadam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.summary()

csv_logger = CSVLogger('training.log')
hist = model.fit(train,
                 train_label,
                 epochs=epochs,
                 batch_size=batch_size,
                 validation_split=2 / 7,
                 callbacks=[csv_logger],
Пример #27
0
np.save(os.path.join(SAVE_DIR, 'gens_'+str(i)+'_'+str(lendata)+'.npy'), data_gen[i:lendata])
np.save(os.path.join(SAVE_DIR, 'labels_' + str(i) + '_' + str(lendata) + '.npy'), labels[i:lendata])
np.save(os.path.join(SAVE_DIR, 'data_' + str(lendata-1) + '_' + str(lendata) + '.npy'), data[lendata-1:lendata])
np.save(os.path.join(SAVE_DIR, 'gens_' + str(lendata-1) + '_' + str(lendata) + '.npy'), data_gen[lendata-1:lendata])
np.save(os.path.join(SAVE_DIR, 'labels_' + str(lendata-1) + '_' + str(lendata) + '.npy'), labels[lendata-1:lendata])

tokenizer = None
data= None
labels=None

embedding_layer_gen = Embedding(len(word_index_gen) + 1,
                            300,
                            input_length=2)
sequence_input_gen = Input(shape=(2,), dtype='int32')
embedded_sequences_gen = embedding_layer_gen(sequence_input_gen)
x_gen = Dropout (0.8)(embedded_sequences_gen)
x_gen = Flatten()(x_gen)
x_gen = Dense(512, activation='relu')(x_gen)


embedding_layer = Embedding(len(word_index) + 1,
                            EMBEDDING_DIM,
                            weights=[embedding_matrix],
                            input_length=MAX_SEQUENCE_LENGTH,
                            trainable=False)

sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 15, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
Пример #28
0
epochs = 100
batch_size = 256
f_dim = train.shape[2]

model = Sequential()

model.add(
    Convolution2D(32, (2, 2),
                  2,
                  padding='same',
                  input_shape=train.shape[1:],
                  kernel_regularizer=l2(0.005),
                  use_bias=False))
model.add(BatchNormalization(axis=3))
model.add(Dropout(0.3))
model.add(
    Convolution2D(64, (2, 2),
                  2,
                  padding='same',
                  kernel_regularizer=l2(0.005),
                  use_bias=False))
model.add(BatchNormalization(axis=3))
model.add(Dropout(0.3))
model.add(Activation("relu"))

model.add(
    Convolution2D(128, (2, 2),
                  2,
                  padding='same',
                  kernel_regularizer=l2(0.005),
Пример #29
0
#hidden_initializer = random_uniform(seed=SEED)
tf.set_random_seed(32)
hidden_initializer = tf.random_uniform([1])
dropout_rate = 0.2

savefile = './models.h5'


# create model
if os.path.isfile(savefile):
     model = load_model(savefile)
else:
    model = Sequential()
    model.add(Dense(128, input_dim=input_dimension, kernel_initializer=hidden_initializer, activation='relu'))
    #model.add(Dense(128, input_dim=input_dimension,  activation='relu'))
    model.add(Dropout(dropout_rate))
    model.add(Dense(64, kernel_initializer=hidden_initializer, activation='relu'))
    model.add(Dense(2, kernel_initializer=hidden_initializer, activation='softmax'))
    #model.add(Dense(64, activation='relu'))
    #model.add(Dense(2, activation='softmax'))

    sgd = SGD(lr=learning_rate, momentum=momentum)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['acc'])

model.fit(X_train, y_train, epochs=50, batch_size=128)
predictions = model.predict_proba(X_test)

ans = pd.DataFrame(predictions,columns=['target','dmy'])
print ans
outdf = pd.concat([outdf,ans['target']],axis=1)
outdf.to_csv('./submit_keras.csv',index=False)
Пример #30
0
from tensorflow.contrib.keras.python.keras.layers import Embedding
from tensorflow.contrib.keras.python.keras.layers import LSTM
from tensorflow.contrib.keras.python.keras import backend as K

model = Sequential()
model.add(Embedding(input_dim=64, output_dim=256, input_length=10))
# input_dim: Size of the vocabulary
# input_length: Length of input sequences, length of each sentences
# output_dim: Dimension of the dense embedding
model.input # (?, 10),
model.output # (?, 10, 256)

model.add(LSTM(128)) # unit=128, dimensionality of the output space
model.output

model.add(Dropout(0.5)) # percent to drop out
# model.ouput # will cause error on Dropout
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

import numpy as np
x_train = np.random.random((1000, 10))
y_train = np.random.randint(2, size=(1000, 1))
x_test = np.random.random((100, 10))
y_test = np.random.randint(2, size=(100, 1))

hist=model.fit(x_train, y_train, validation_split=0.2, batch_size=16, epochs=1)
hist.history