コード例 #1
0
ファイル: train.py プロジェクト: nikola-j/audio_tag
def define_model_selu():
    inputs = Input((128,))
    x1f = Dense(128, activation='selu')(inputs)
    x1b = BatchNormalization()(x1f)
    x1d = AlphaDropout(0.5)(x1b)
    x1 = keras.layers.add([x1d, inputs])
    x2f = Dense(128, activation='selu')(x1)
    x2b = BatchNormalization()(x2f)
    x2d = AlphaDropout(0.5)(x2b)
    x2 = keras.layers.add([x1, x2d, inputs])
    x3f = Dense(64, activation='selu')(x2)
    x3b = BatchNormalization()(x3f)
    x3d = AlphaDropout(0.5)(x3b)
    x4f = Dense(64, activation='selu')(x3d)
    x4b = BatchNormalization()(x4f)
    x4d = AlphaDropout(0.5)(x4b)
    x4 = keras.layers.add([x3d, x4d])
    x5f = Dense(64, activation='selu')(x4)
    x5b = BatchNormalization()(x5f)
    x6f = Dense(41, activation='softmax')(x5b)

    model = Model(inputs=inputs, outputs=x6f)

    model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01),
                  metrics=['accuracy', ])
    return model
コード例 #2
0
 def create_model(self):
     embeddingSize = 128
     maxSeqLength = self.Config["max_chars_seq_len"]
     convLayersData = [[256, 10], [256, 7], [256, 5], [256, 3]]
     dropout_p = 0.1
     optimizer = 'adam'
     inputs = Input(shape=(maxSeqLength, ), dtype='int64')
     x = Embedding(len(arabic_charset()) + 1,
                   embeddingSize,
                   input_length=maxSeqLength)(inputs)
     convolution_output = []
     for num_filters, filter_width in convLayersData:
         conv = Convolution1D(filters=num_filters,
                              kernel_size=filter_width,
                              activation='tanh')(x)
         pool = GlobalMaxPooling1D()(conv)
         convolution_output.append(pool)
     x = Concatenate()(convolution_output)
     x = Dense(1024, activation='selu',
               kernel_initializer='lecun_normal')(x)
     x = AlphaDropout(dropout_p)(x)
     x = Dense(1024, activation='selu',
               kernel_initializer='lecun_normal')(x)
     x = AlphaDropout(dropout_p)(x)
     predictions = Dense(len(self.Config["predefined_categories"]),
                         activation='sigmoid')(x)
     model = Model(inputs=inputs, outputs=predictions)
     model.compile(optimizer=optimizer,
                   loss='binary_crossentropy',
                   metrics=['accuracy'])
     return model
コード例 #3
0
ファイル: train.py プロジェクト: nikola-j/audio_tag
def define_model_snn_cifar10():
    model = Sequential()

    model.add(Conv1D(16, 3, strides=2, padding='same', input_shape=[128, 1],
                     kernel_initializer='lecun_normal', bias_initializer='zeros'))
    model.add(Activation('selu'))
    model.add(Conv1D(16, 3, kernel_initializer='lecun_normal', bias_initializer='zeros'))
    model.add(Activation('selu'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(AlphaDropout(0.1))

    model.add(Conv1D(32, 3, padding='same', kernel_initializer='lecun_normal', bias_initializer='zeros'))
    model.add(Activation('selu'))
    model.add(Conv1D(32, 3, kernel_initializer='lecun_normal', bias_initializer='zeros'))
    model.add(Activation('selu'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(AlphaDropout(0.1))

    model.add(Flatten())
    model.add(Dense(41, kernel_initializer='lecun_normal', bias_initializer='zeros'))
    model.add(Activation('selu'))
    model.add(AlphaDropout(0.2))
    model.add(Dense(41, kernel_initializer='lecun_normal', bias_initializer='zeros'))
    model.add(Activation('softmax'))

    # initiate RMSprop optimizer
    opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)

    # Let's train the model using RMSprop
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy', 'top_k_categorical_accuracy'])
    print(model.summary())
    return model
コード例 #4
0
def loadModel(vocabularySize, sequenceLength, vecSpaceSize):
    kernelSizes = [1, 2, 3, 4, 5]
    #kernelSizes2 = [3, 4, 5, 6, 7]
    #init = RandomNormal(stddev=0.02)
    inputs = Input(shape=(sequenceLength, ))
    embedding = Embedding(vocabularySize, vecSpaceSize)(inputs)
    #reshape = Reshape((sequenceLength, vecSpaceSize, 1))(embedding)

    conved = [
        Conv1D(filters=32,
               kernel_size=kernelSize,
               activation="selu",
               kernel_initializer="lecun_normal")(embedding)
        for kernelSize in kernelSizes
    ]

    #convedActivated = [LeakyReLU(alpha=0.20)(conv) for conv in conved]
    pooled = [MaxPooling1D(2, strides=2)(conv) for conv in conved]
    #batchNorms1 = [BatchNormalization()(pool) for pool in pooled]
    #dropouts = [Dropout(0.30)(pool) for pool in pooled]

    conved2 = [
        Conv1D(filters=64,
               kernel_size=5,
               strides=3,
               activation="selu",
               kernel_initializer="lecun_normal")(pool) for pool in pooled
    ]
    #convedActivated2 = [LeakyReLU(alpha=0.20)(conv) for conv in conved2]
    pooled2 = [MaxPooling1D(2, strides=2)(conv) for conv in conved2]

    #batchNorms2 = [BatchNormalization()(pool) for pool in pooled2]
    #dropouts = [Dropout(0.30)(pool) for pool in pooled]

    #conved3 = [Conv1D(filters=32, kernel_size=3, strides=2, activation="relu")(batchNorm) for batchNorm in batchNorms2]
    #convedActivated2 = [LeakyReLU(alpha=0.20)(conv) for conv in conved2]
    #pooled3 = [MaxPooling1D(2, strides=2)(conv) for conv in conved3]
    #batchNorms2 = [BatchNormalization()(pool) for pool in pooled2]
    #dropouts2 = [Dropout(0.50)(pool) for pool in pooled2]
    flattened = [Flatten()(pool) for pool in pooled]

    merged = Concatenate()(flattened)
    dropoutDense1 = AlphaDropout(0.30)(merged)

    dense = Dense(10, activation="selu",
                  kernel_initializer="lecun_normal")(dropoutDense1)
    #leakyReLU = LeakyReLU(alpha=0.20)(dense)
    #batchNorm3 = BatchNormalization()(leakyReLU)
    dropoutDense2 = AlphaDropout(0.20)(dense)

    outputs = Dense(1, activation="sigmoid",
                    kernel_initializer="lecun_normal")(dropoutDense2)

    model = Model(inputs=inputs, outputs=outputs, name="sentiment_analysis")
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    return model
コード例 #5
0
def build_snn_model(
    x_shape=(100, 1, 10),
    n_layers=1,
    n_units=64,
    kernel_reg=1e-9,
    activity_reg=1e-9,
    bias_reg=1e-9,
    dropout_rate=0.5,
    optimizer='nadam',
    lr_rate=1e-5,
    gauss_noise_std=1e-3,
    n_gpus=0,
):
    """build snn model"""

    opts_map = {
        'adam': opts.Adam,
        'nadam': opts.Nadam,
        'adamax': opts.Adamax,
        'sgd': opts.SGD,
        'rmsprop': opts.RMSprop
    }

    snn_cfg = {
        'units':
        int(n_units),
        #'batch_input_shape': (batch_size, x_shape[1], x_shape[2]),
        #'batch_size': batch_size,
        'input_shape':
        x_shape,
        'kernel_regularizer':
        regularizers.l2(kernel_reg),
        'activity_regularizer':
        regularizers.l2(activity_reg),
        'bias_regularizer':
        regularizers.l2(bias_reg),
        'kernel_initializer':
        initializers.lecun_normal(seed=cfg.data_cfg['random_seed']),
        'activation':
        'selu',
    }

    model = Sequential()
    model.add(Dense(**snn_cfg))
    model.add(GaussianNoise(gauss_noise_std))
    model.add(AlphaDropout(dropout_rate))
    if n_layers > 1:
        for i in range(n_layers - 1):
            snn_cfg.pop('batch_input_shape', None)
            model.add(Dense(**snn_cfg))
            model.add(GaussianNoise(gauss_noise_std))
            model.add(AlphaDropout(dropout_rate))
    model.add(Flatten())  # todo: WHy lol
    model.add(Dense(len(cfg.data_cfg['Target_param_names'])))

    opt = opts_map[optimizer](lr=lr_rate)
    model.compile(optimizer=opt, loss='mse')
    return model
コード例 #6
0
ファイル: model.py プロジェクト: 0xzayd/CNN_forest
    def build_model(self, img_height=256, img_width=256, activation = 'selu', filter_initializer = 'lecun_normal', blocks = [64, 64, 64, 64] ,use_tfboard = False):
        
        bands = 4
        if K.image_data_format() == 'channels_first':
            ch_axis = 1
            input_shape = (bands, img_height, img_width)
        if K.image_data_format() == 'channels_last':
            ch_axis = 3
            input_shape = (img_height, img_width, bands)
            
        inp = Input(input_shape)
        encoder = inp
        list_encoders = []
        
        print('building 2D Convolution Model ...')
        print(blocks)
        
        # Setting up the filter size
        filter_size = (3,3)
        # Encoding
        for block_id , n_block in enumerate(blocks):
            with K.name_scope('Encoder_block_{0}'.format(block_id)):
                encoder = Conv2D(filters = n_block, kernel_size = filter_size, activation = activation, padding = 'same',
                                kernel_initializer = filter_initializer)(encoder)
                encoder = AlphaDropout(0,1*block_id, )(encoder)
                encoder = Conv2D(filters = n_block, kernel_size = filter_size, dilation_rate = (2,2),
                                 activation = activation, padding='same', kernel_initializer = filter_initializer)(encoder)
                
                list_encoders.append(encoder)
                # maxpooling 'BETWEEN' every 2 blocks
                if block_id < len(blocks)-1:
                    encoder = MaxPooling2D(pool_size = (2,2))(encoder)

        # Decoding
        decoder = encoder
        decoder_blocks = blocks[::-1][1:]
        for block_id, n_block in enumerate(decoder_blocks):
            with K.name_scope('Decoder_block_{0}'.format(block_id)):
                block_id_inv = len(blocks) - 1 - block_id
                decoder = concatenate([decoder, list_encoders[block_id_inv]], axis = ch_axis) # concatenate the first decoder with the last encoder and so on, according to the channell axis
                decoder = Conv2D(filters=n_block, kernel_size = filter_size, activation = activation, padding = 'same',
                                dilation_rate = (2,2), kernel_initializer = filter_initializer)(decoder)
                decoder = AlphaDropout(0,1*block_id, )(decoder)
                decoder = Conv2D(filters=n_block, kernel_size = filter_size, activation = activation, padding = 'same',
                                kernel_initializer = filter_initializer)(decoder)
                decoder = Conv2DTranspose(filters=n_block, kernel_size = filter_size, kernel_initializer = filter_initializer,
                                         padding='same', strides=(2,2))(decoder)
                
        # Last Layer...
        outp = Conv2DTranspose(filters=1, kernel_size = filter_size, activation = 'sigmoid',
                               padding = 'same', kernel_initializer = 'glorot_normal')(decoder)
            
        self.model = Model(inputs=[inp], outputs=[outp])
        
        return
コード例 #7
0
def selu_base_network(input_shape):
    '''Base network to be shared (eq. to feature extraction).
    '''
    inputs = Input(shape=input_shape)
    x = Dense(128, activation='selu',
              kernel_initializer='lecun_normal')(inputs)
    x = AlphaDropout(0.1)(x)
    x = Dense(128, activation='selu', kernel_initializer='lecun_normal')(x)
    x = AlphaDropout(0.1)(x)
    x = Dense(128, activation='selu', kernel_initializer='lecun_normal')(x)
    return Model(inputs, x)
コード例 #8
0
    def __autoencoder(size, encoded_size, l1, activation, dropout_rate,
                      use_batch_norm, coefficients):
        # TODO activation fns: relu, elu, selu (AlphaDropout instead of dropout for selu)
        # TODO dropout rate: 0 (no dropout), 0.1, 0.2, 0.3
        # TODO batch norm: YES/NO
        # TODO l1: 0 (no regularization), 1e-7, 1e-6
        # TODO optimizers: rmsprop, adam, nadam
        # TODO coefficients: [4, 8, 12], [4, 8], [2, 4] etc.
        input_layer = Input(shape=(size, ), name='input_conformation')

        x = input_layer
        for i, c in enumerate(coefficients):
            idx = i + 1
            x = Dense(size // c, activation=activation, name="enc_%d" % idx)(x)

            if use_batch_norm:
                x = BatchNormalization(name="enc_%d_batch_norm" % idx)(x)

            if dropout_rate > 0:
                if activation == 'selu':
                    x = AlphaDropout(dropout_rate,
                                     name="enc_%d_dropout" % idx)(x)
                else:
                    x = Dropout(dropout_rate, name="enc_%d_dropout" % idx)(x)

        x = Dense(encoded_size,
                  activation="linear",
                  name="encoded",
                  activity_regularizer=regularizers.l1(l1))(x)

        for i, c in enumerate(reversed(coefficients)):
            idx = len(coefficients) - i
            x = Dense(size // c, activation=activation, name="dec_%d" % idx)(x)

            if use_batch_norm:
                x = BatchNormalization(name="dec_%d_batch_norm" % idx)(x)

            if dropout_rate > 0:
                if activation == 'selu':
                    x = AlphaDropout(dropout_rate,
                                     name="dec_%d_dropout" % idx)(x)
                else:
                    x = Dropout(dropout_rate, name="dec_%d_dropout" % idx)(x)

        decoded = Dense(size, activation="linear",
                        name="decoded_conformation")(x)

        autoencoder = Model(input_layer, decoded)
        autoencoder.compile(optimizer=Adam(lr=0.001),
                            loss='mse',
                            metrics=['mae'])
        autoencoder.summary()

        return autoencoder
コード例 #9
0
def getSELUClassifier(nIn, nOut, compileArgs):
  model = Sequential()
  model.add(Dense(16, input_dim=nIn, kernel_initializer='he_normal', activation='selu'))
  model.add(AlphaDropout(0.2))
  model.add(Dense(32, kernel_initializer='he_normal', activation='selu'))
  model.add(AlphaDropout(0.2))
  model.add(Dense(32, kernel_initializer='he_normal', activation='selu'))
  model.add(AlphaDropout(0.2))
  model.add(Dense(32, kernel_initializer='he_normal', activation='selu'))
  model.add(AlphaDropout(0.2))
  model.add(Dense(nOut, activation="sigmoid", kernel_initializer='glorot_normal'))
  model.compile(**compileArgs)
  return model
コード例 #10
0
def cnn_model():
    # create model
    model = Sequential()
    BatchNormalization()
    model.add(
        ZeroPadding2D(padding=(2, 2),
                      data_format=None,
                      input_shape=(1, 28, 28)))
    model.add(Conv2D(32, (5, 5), activation='relu', use_bias=False))
    BatchNormalization(axis=-1)
    model.add(Conv2D(32, (5, 5), activation='relu', use_bias=False))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.3))

    model.add(Conv2D(64, (3, 3), activation='relu', use_bias=False))
    BatchNormalization(axis=-1)
    model.add(Conv2D(64, (3, 3), activation='relu', use_bias=False))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.3))

    model.add(Flatten())
    BatchNormalization()

    model.add(
        Dense(512,
              activation='selu',
              kernel_initializer=lecun_uniform(seed=None),
              use_bias=True,
              bias_initializer=lecun_uniform(seed=None)))
    BatchNormalization()
    model.add(AlphaDropout(0.50))

    model.add(
        Dense(100,
              activation='selu',
              kernel_initializer=lecun_uniform(seed=None),
              use_bias=True,
              bias_initializer=lecun_uniform(seed=None)))
    model.add(AlphaDropout(0.50))

    model.add(Dense(num_classes, activation='softmax'))

    # Compile model
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.summary()

    return model
コード例 #11
0
ファイル: ResNet_SNN.py プロジェクト: coronnie/ML
    def f(input_features):
        conv_name_base, bn_name_base = _block_name_base(stage, block)
        if is_first_block_of_first_layer:
            # don't repeat bn->relu since we just did bn->relu->maxpool
            x = Conv2D(filters=filters,
                       kernel_size=(3, 3),
                       strides=transition_strides,
                       dilation_rate=dilation_rate,
                       padding="same",
                       kernel_initializer="lecun_normal",
                       kernel_regularizer=l2(1e-4),
                       name=conv_name_base + '2a')(input_features)
        else:
            x = residual_unit(filters=filters,
                              kernel_size=(3, 3),
                              strides=transition_strides,
                              dilation_rate=dilation_rate,
                              conv_name_base=conv_name_base + '2a',
                              bn_name_base=bn_name_base + '2a')(input_features)

        if dropout is not None:
            x = AlphaDropout(dropout)(x)

        x = residual_unit(filters=filters,
                          kernel_size=(3, 3),
                          conv_name_base=conv_name_base + '2b',
                          bn_name_base=bn_name_base + '2b')(x)

        return _shortcut(input_features, x)
コード例 #12
0
    def model_design(self):
        convolution_output = []
        inindex = 0
        for filter_width in self.filter_sizes:
            conv = Convolution1D(filters=256,
                                 kernel_size=filter_width,
                                 activation='relu',
                                 name='Conv1D_{}_{}_{}'.format(
                                     256, filter_width, inindex))(self.x)
            pool = GlobalMaxPooling1D(
                name='MaxPoolingOverTime_{}_{}_{}'.format(
                    256, filter_width, inindex))(conv)
            convolution_output.append(pool)
            inindex = inindex + 1

        self.x = Concatenate()(convolution_output)
        for fl in self.fully_connected_layers:
            self.x = Dense(fl,
                           activation='relu',
                           kernel_initializer='lecun_normal')(self.x)
            self.x = AlphaDropout(self.dropout_p)(self.x)

        predictions = Dense(self.label_size, activation='softmax')(self.x)
        self.model = Model(inputs=self.inputs, outputs=predictions)
        self.model.compile(optimizer=self.optimizer, loss=self.loss)
        self.model.summary()
コード例 #13
0
ファイル: training.py プロジェクト: teja-vsd/DSDevOps
def build_model(input_dim, hidden1, hidden2, output_dim, drpout_rate, lr):
    inputs = Input(shape=(input_dim,))
    dense_layer1 = Dense(name='first_dense_layer',
                         units=hidden1,
                         activation='relu')
    dense_layer2 = Dense(name='second_hidden_layer',
                         units=hidden2,
                         activation='relu')
    dense_layer3 = Dense(name='final_layer',
                         units=output_dim,
                         activation='softmax')
    input_bn_layer = BatchNormalization(name='input_batchnorm_layer')
    bn_layer1 = BatchNormalization(name='first_batchnorm_layer')
    bn_layer2 = BatchNormalization(name='second_batchnorm_layer')
    dropout_layer = AlphaDropout(rate=drpout_rate)

    y = input_bn_layer(inputs)
    y = dense_layer1(y)
    y = bn_layer1(y)
    y = dropout_layer(y)
    y = dense_layer2(y)
    y = bn_layer2(y)
    y = dropout_layer(y)
    outputs = dense_layer3(y)

    model = Model(inputs=inputs, outputs=outputs)
    adam = Adam(lr=lr)
    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.summary()
    return model
コード例 #14
0
def base_cnn():
    x = Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
    i = Conv2D(64, (3, 3),
               activation='selu',
               kernel_initializer='lecun_normal',
               name='conv1')(x)
    i = MaxPooling2D()(i)
    i = Conv2D(64, (3, 3),
               activation='selu',
               kernel_initializer='lecun_normal',
               name='conv2')(i)
    i = MaxPooling2D()(i)
    i = Conv2D(128, (3, 3),
               activation='selu',
               kernel_initializer='lecun_normal',
               name='conv3')(i)
    i = MaxPooling2D()(i)
    i = Conv2D(128, (3, 3),
               activation='selu',
               kernel_initializer='lecun_normal',
               name='conv4')(i)
    i = MaxPooling2D()(i)
    i = Conv2D(256, (3, 3),
               activation='selu',
               kernel_initializer='lecun_normal',
               name='conv5')(i)
    i = MaxPooling2D()(i)
    i = Conv2D(256, (3, 3),
               activation='selu',
               kernel_initializer='lecun_normal',
               name='conv6')(i)
    # i = Conv2D(512, (3, 3), activation='selu', kernel_initializer='lecun_normal')(i)
    i = AlphaDropout(0.3)(i)
    i = Flatten()(i)
    return x, i
コード例 #15
0
def alpha_dropout(layer, layer_in, layerId, tensor=True):
    rate = layer['params']['rate']
    seed = layer['params']['seed']
    out = {layerId: AlphaDropout(rate=rate, seed=seed)}
    if tensor:
        out[layerId] = out[layerId](*layer_in)
    return out
コード例 #16
0
def _fully_connected_layers(num_layers, num_neurons, x):
    x = _default_dense_layer(num_neurons, x)
    for _ in range(num_layers // 4):
        x = _dense_indentity(4, num_neurons, x)
    x = _dense_indentity(max(num_layers % 4 - 1, 0), num_neurons, x)
    x = AlphaDropout(0.5)(x)
    return x
コード例 #17
0
ファイル: training.py プロジェクト: netrack/dnscnn
    def _build_model(self):
        """
        Build and compile the Character Level CNN model

        Returns: None

        """
        # Input layer
        inputs = Input(shape=(self.input_size,), name='sent_input', dtype='int64')
        # Embedding layers
        x = Embedding(self.alphabet_size + 1, self.embedding_size, input_length=self.input_size)(inputs)
        # Convolution layers
        convolution_output = []
        for num_filters, filter_width in self.conv_layers:
            conv = Convolution1D(filters=num_filters,
                                 kernel_size=filter_width,
                                 activation='tanh',
                                 name='Conv1D_{}_{}'.format(num_filters, filter_width))(x)
            pool = GlobalMaxPooling1D(name='MaxPoolingOverTime_{}_{}'.format(num_filters, filter_width))(conv)
            convolution_output.append(pool)
        x = Concatenate()(convolution_output)
        # Fully connected layers
        for fl in self.fully_connected_layers:
            x = Dense(fl, activation='selu', kernel_initializer='lecun_normal')(x)
            x = AlphaDropout(self.dropout_p)(x)
        # Output layer
        predictions = Dense(self.num_of_classes, activation='softmax')(x)
        # Build and compile model
        model = Model(inputs=inputs, outputs=predictions)
        model.compile(optimizer=self.optimizer, loss=self.loss,
                      metrics=["accuracy"])
        self.model = model
        print("CharCNNKim model built: ")
        self.model.summary()
コード例 #18
0
    def f(input):
        residuals = []
        for i in range(num_residuals):
            residual = _norm_relu_conv(filters,
                                       kernel_size=1,
                                       subsample=subsample,
                                       normalization=normalization,
                                       weight_norm=weight_norm,
                                       weight_decay=weight_decay,
                                       norm_kwargs=norm_kwargs,
                                       init=init,
                                       nonlinearity=nonlinearity,
                                       ndim=ndim,
                                       name=name)(input)
            residual = _norm_relu_conv(filters,
                                       kernel_size=3,
                                       normalization=normalization,
                                       weight_norm=weight_norm,
                                       weight_decay=weight_decay,
                                       norm_kwargs=norm_kwargs,
                                       init=init,
                                       nonlinearity=nonlinearity,
                                       ndim=ndim,
                                       name=name)(residual)
            residual = _norm_relu_conv(filters * 4,
                                       kernel_size=1,
                                       upsample=upsample,
                                       normalization=normalization,
                                       weight_norm=weight_norm,
                                       weight_decay=weight_decay,
                                       norm_kwargs=norm_kwargs,
                                       init=init,
                                       nonlinearity=nonlinearity,
                                       ndim=ndim,
                                       name=name)(residual)
            if dropout > 0:
                if nonlinearity == 'selu':
                    residual = AlphaDropout(dropout)(residual)
                else:
                    residual = Dropout(dropout)(residual)
            residiuals.append(residual)

        if len(residuals) > 1:
            output = merge_add(residuals)
        else:
            output = residuals[0]
        if skip:
            output = _shortcut(input,
                               output,
                               subsample=subsample,
                               upsample=upsample,
                               weight_norm=weight_norm,
                               normalization=normalization,
                               weight_decay=weight_decay,
                               init=init,
                               ndim=ndim,
                               name=name)
        return output
コード例 #19
0
ファイル: storm_ua_gan.py プロジェクト: xwtang/deepsky
def generator_model(input_size=100,
                    filter_width=5,
                    min_data_width=4,
                    min_conv_filters=64,
                    output_size=(32, 32, 1),
                    stride=2,
                    activation="relu",
                    output_activation="linear",
                    dropout_alpha=0):
    """ 
    Creates a generator convolutional neural network for a generative adversarial network set. The keyword arguments
    allow aspects of the structure of the generator to be tuned for optimal performance.

    Args:
        input_size (int): Number of nodes in the input layer.
        filter_width (int): Width of each convolutional filter
        min_data_width (int): Width of the first convolved layer after the input layer
        min_conv_filters (int): Number of convolutional filters in the last convolutional layer
        output_size (tuple of size 3): Dimensions of the output
        stride (int): Number of pixels that the convolution filter shifts between operations.
        activation (str): Type of activation used for convolutional layers. Use "leaky" for Leaky ReLU.
        output_activation (str): Type of activation used on the output layer
        dropout_alpha (float): proportion of nodes dropped out
    Returns:
        Model output graph, model input
    """
    num_layers = int(np.log2(output_size[0]) - np.log2(min_data_width))
    max_conv_filters = int(min_conv_filters * 2**(num_layers - 1))
    curr_conv_filters = max_conv_filters
    vector_input = Input(shape=(input_size, ), name="gen_input")
    model = Dense(units=max_conv_filters * min_data_width * min_data_width,
                  kernel_regularizer=l2())(vector_input)
    model = Reshape((min_data_width, min_data_width, max_conv_filters))(model)
    if activation == "leaky":
        model = LeakyReLU(alpha=0.2)(model)
    else:
        model = Activation(activation)(model)
    for i in range(1, num_layers):
        curr_conv_filters //= 2
        model = Conv2DTranspose(curr_conv_filters,
                                filter_width,
                                strides=(stride, stride),
                                padding="same")(model)
        if activation == "leaky":
            model = LeakyReLU(alpha=0.2)(model)
        else:
            model = Activation(activation)(model)
        if activation == "selu":
            model = AlphaDropout(dropout_alpha)(model)
        else:
            model = Dropout(dropout_alpha)(model)
    model = Conv2DTranspose(output_size[-1],
                            filter_width,
                            strides=(stride, stride),
                            padding="same")(model)
    model = Activation(output_activation)(model)
    return model, vector_input
コード例 #20
0
ファイル: storm_ua_gan.py プロジェクト: xwtang/deepsky
def encoder_disc_model(input_size=(32, 32, 1),
                       filter_width=5,
                       min_data_width=4,
                       min_conv_filters=64,
                       output_size=100,
                       stride=2,
                       activation="relu",
                       encoder_output_activation="linear",
                       dropout_alpha=0):
    """
    Creates an encoder/discriminator convolutional neural network that reproduces the generator input vector.
    The keyword arguments allow aspects of the structure of the enocder/discriminator to be tuned
    for optimal performance.

    Args:
        input_size (tuple of ints): Number of nodes in the input layer.
        filter_width (int): Width of each convolutional filter
        min_data_width (int): Width of the last convolved layer
        min_conv_filters (int): Number of convolutional filters in the first convolutional layer
        output_size (int): Dimensions of the output
        stride (int): Number of pixels that the convolution filter shifts between operations.
        activation (str): Type of activation used for convolutional layers. Use "leaky" for Leaky ReLU.
        encoder_output_activation (str): Type of activation used on the output layer
        dropout_alpha (float): Proportion of nodes dropped out during training.
    Returns:
        discriminator model output, encoder model output, image input
    """
    num_layers = int(np.log2(input_size[0]) - np.log2(min_data_width))
    curr_conv_filters = min_conv_filters
    image_input = Input(shape=input_size, name="enc_input")
    model = image_input
    for c in range(num_layers):
        model = Conv2D(curr_conv_filters,
                       filter_width,
                       strides=(stride, stride),
                       padding="same")(model)
        if activation == "leaky":
            model = LeakyReLU(0.2)(model)
        else:
            model = Activation(activation)(model)
        if activation == "selu":
            model = AlphaDropout(dropout_alpha)(model)
        else:
            model = Dropout(dropout_alpha)(model)
        curr_conv_filters *= 2
    model = Flatten()(model)
    enc_model = Dense(256, kernel_regularizer=l2())(model)
    if activation == "leaky":
        enc_model = LeakyReLU(0.2)(enc_model)
    else:
        enc_model = Activation(activation)(enc_model)
    enc_model = Dense(output_size, kernel_regularizer=l2())(enc_model)
    enc_model = Activation(encoder_output_activation)(enc_model)
    disc_model = Dense(1, kernel_regularizer=l2())(model)
    disc_model = Activation("sigmoid")(disc_model)
    return disc_model, enc_model, image_input
コード例 #21
0
ファイル: ANN.py プロジェクト: laurasteinmann/NA_ANN
 def add_drops(model, drop_out, k):
     if DG[k].upper() == 'D':
         model.add(Dropout(drop_out[0]))
     elif DG[k].upper() == 'G':
         model.add(GaussianNoise(drop_out[k]))
     elif DG[k].upper() == "A":
         model.add(AlphaDropout(drop_out[k]))
     else:
         pass
     return model
コード例 #22
0
ファイル: caravana.py プロジェクト: kirk86/kaggle
def up(input_layer, residual, filters):
    filters = int(filters)
    upsample = UpSampling2D()(input_layer)
    upconv = Conv2D(filters, (2, 2), padding="same")(upsample)
    concat = Concatenate(axis=3)([residual, upconv])
    # drop = Dropout(0.3)(concat)
    drop = AlphaDropout(rate=0.3)(concat)
    conv1 = Conv2D(filters, (3, 3), padding='same', activation='elu')(drop)
    conv2 = Conv2D(filters, (3, 3), padding='same', activation='elu')(conv1)
    return conv2
コード例 #23
0
def build_sequential_model(input_rate, rate, shape):
    model = Sequential()

    model.add(AlphaDropout(input_rate, input_shape=(shape,)))

    model.add(Dense(6, activation="linear", kernel_initializer="lecun_normal"))
    model.add(Activation('selu'))
    model.add(AlphaDropout(rate))

    model.add(Dense(3, activation="linear", kernel_initializer="lecun_normal"))
    model.add(Activation('selu'))
    model.add(AlphaDropout(rate))

    model.add(Dense(units=1, activation="linear", kernel_initializer="lecun_normal"))

    optim = Adam(lr=0.01, beta_1=0.95)

    model.compile(loss='mean_squared_error',
                    optimizer=optim)
    return model
コード例 #24
0
def double_conv_layer(x, size, dropout, batch_norm):
    conv = Conv2D(size, (3, 3), padding='same')(x)
    if batch_norm is True:
        conv = BatchNormalization()(conv)
    conv = Activation('selu')(conv)
    conv = Conv2D(size, (3, 3), padding='same')(conv)
    if batch_norm is True:
        conv = BatchNormalization()(conv)
    conv = Activation('selu')(conv)
    if dropout > 0:
        conv = AlphaDropout(dropout)(conv)
    return conv
コード例 #25
0
def get_model_selu(input_size, output, crossentropy=True):
    model = Sequential()
    model.add(
        Dense(500,
              input_shape=(input_size, ),
              bias_initializer='zeros',
              kernel_initializer=keras.initializers.lecun_normal()))
    model.add(Activation('selu'))
    model.add(BatchNormalization(axis=1))
    model.add(AlphaDropout(0.25))

    for index in range(5):
        model.add(
            Dense(500,
                  bias_initializer='zeros',
                  kernel_initializer=keras.initializers.lecun_normal()))
        model.add(Activation('selu'))
        model.add(BatchNormalization(axis=1))
        model.add(AlphaDropout(0.25))

    model.add(
        Dense(500,
              bias_initializer='zeros',
              kernel_initializer=keras.initializers.lecun_normal()))
    model.add(Activation('selu'))
    model.add(BatchNormalization(axis=1))
    model.add(AlphaDropout(0.5))

    model.add(
        Dense(output,
              bias_initializer='zeros',
              kernel_initializer=keras.initializers.lecun_normal()))
    if crossentropy:
        model.add(Activation("softmax"))
    else:
        model.add(Activation("sigmoid"))

    model.summary()

    return model
コード例 #26
0
def prepare_processor(input_dim, output_dim, hidden_dim, dense_depth,
                      optimizer_type, loss_type) -> Model:
    """Prepares the processing LSTM decoder network

            Args:
                :param input_dim: The dimensions of the input
                :param output_dim: The dimensions of the output
                :param output_timesteps: The number of timesteps to predict for the output
                :param hidden_dim: The dimensionality of internal network layers
                :param dense_depth: The number of dense hidden layers
                :param lstm_depth: The number of LSTM hidden layers
                :param optimizer_type: The type of optimizer to use (eg. "rmsprop")
                :param loss_type: The type of loss to use (eg. "mse")

            Returns:
                Processing decoder model
        """

    model = Sequential()
    model.add(
        Dense(input_dim,
              input_shape=(input_dim, ),
              activation="selu",
              kernel_initializer='lecun_normal'))

    model.add(AlphaDropout(0.2))

    for i in range(dense_depth):
        model.add(
            Dense(hidden_dim,
                  activation="selu",
                  kernel_initializer='lecun_normal'))
        model.add(AlphaDropout(0.2))

    model.add(Dense(output_dim, activation="softmax"))

    model.compile(optimizer=optimizer_type, loss=loss_type, metrics=['acc'])
    print(model.summary())

    return model
コード例 #27
0
def build_sequential_model(input_rate, rate, shape):
    model = Sequential()

    model.add(AlphaDropout(input_rate, input_shape=(shape, )))

    model.add(Dense(6, activation="linear", kernel_initializer="lecun_normal"))
    #model.add(BatchNormalization())
    model.add(Activation('selu'))
    model.add(AlphaDropout(rate))

    model.add(Dense(3, activation="linear", kernel_initializer="lecun_normal"))
    #model.add(BatchNormalization())
    model.add(Activation('selu'))
    model.add(AlphaDropout(rate))

    model.add(
        Dense(activation="sigmoid", units=1,
              kernel_initializer="lecun_normal"))

    optim = Adam(lr=0.01, beta_1=0.95)

    model.compile(loss='binary_crossentropy', optimizer=optim)
    return model
コード例 #28
0
ファイル: char_cnn.py プロジェクト: sethips/nlp_tutorials
def define_model_1(conv_layers,
                   fully_connected_layers,
                   input_size=MAX_INPUT_LEN,
                   embedding_size=32,
                   alphabet_size=ALPHABET_SIZE,
                   num_classes=2,
                   optimizer='adam',
                   dropout_proba=0.5,
                   fl_activation='selu',
                   fl_initializer='lecun_normal',
                   conv_activations='tanh',
                   loss='categorical_crossentropy'):
    """
    Based on: https://arxiv.org/abs/1508.06615
    """
    inputs = Input(shape=(input_size, ), name='input_layer', dtype='int64')
    embeds = Embedding(alphabet_size + 1,
                       embedding_size,
                       input_length=input_size)(inputs)
    convs = list()
    for num_filters, filter_width in conv_layers:
        conv = Convolution1D(filters=num_filters,
                             kernel_size=filter_width,
                             activation=conv_activations,
                             name='ConvLayer{}{}'.format(
                                 num_filters, filter_width))(embeds)
        pool = GlobalMaxPooling1D(
            name='MaxPoolLayer{}{}'.format(num_filters, filter_width))(conv)
        convs.append(pool)

    x = Concatenate()(convs)
    for units in fully_connected_layers:
        x = Dense(units,
                  activation=fl_activation,
                  kernel_initializer=fl_initializer)(x)
        x = AlphaDropout(dropout_proba)(x)

    predictions = Dense(num_classes, activation='softmax')(x)

    model = Model(inputs=inputs, outputs=predictions)

    model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
    return model
コード例 #29
0
    def f(input):
        residuals = []
        for i in range(num_residuals):
            residual = input
            if normalization is not None:
                residual = normalization(name=name + "_norm_" + str(i),
                                         **norm_kwargs)(residual)
            residual = get_nonlinearity(nonlinearity)(residual)
            if subsample:
                residual = MaxPooling(pool_size=2, ndim=ndim)(residual)
            residual = Convolution(filters=filters,
                                   kernel_size=3,
                                   ndim=ndim,
                                   weight_norm=weight_norm,
                                   kernel_initializer=init,
                                   padding='same',
                                   kernel_regularizer=_l2(weight_decay),
                                   name=name + "_conv2d_" + str(i))(residual)
            if dropout > 0:
                if nonlinearity == 'selu':
                    residual = AlphaDropout(dropout)(residual)
                else:
                    residual = Dropout(dropout)(residual)
            if upsample:
                residual = UpSampling(size=2, ndim=ndim)(residual)
            residuals.append(residual)

        if len(residuals) > 1:
            output = merge_add(residuals)
        else:
            output = residuals[0]
        if skip:
            output = _shortcut(input,
                               output,
                               subsample=subsample,
                               upsample=upsample,
                               normalization=normalization,
                               weight_norm=weight_norm,
                               weight_decay=weight_decay,
                               init=init,
                               ndim=ndim,
                               name=name)
        return output
コード例 #30
0
ファイル: KerasUtilities.py プロジェクト: Zeigar/photon
def create_snn_classif(input_size,
                       n_classes,
                       layer_sizes=None,
                       actFunc='selu',
                       learning_rate=0.001,
                       alpha_dropout_rate=0,
                       nb_epoch=200,
                       loss='categorical_crossentropy',
                       metrics=None,
                       optimizer='adam',
                       gpu_device='/gpu:0'):
    if layer_sizes is None:
        layer_sizes = []
    if metrics is None:
        metrics = ['accuracy']
    model = Sequential()
    input_dim = input_size
    for i, dim in enumerate(layer_sizes):
        with tf.device(gpu_device):
            if i == 0:
                model.add(
                    Dense(dim,
                          input_dim=input_dim,
                          kernel_initializer='lecun_normal'))
            else:
                model.add(Dense(dim, kernel_initializer='lecun_normal'))

        with tf.device(gpu_device):
            model.add(Activation(actFunc))

            if alpha_dropout_rate > 0:
                model.add(AlphaDropout(alpha_dropout_rate))

    with tf.device(gpu_device):
        model.add(Dense(n_classes, activation='softmax'))

    # Compile model
    optimizer = define_optimizer(optimizer_type=optimizer, lr=learning_rate)

    model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
    return model