Пример #1
0
    def test_max_norm(self):
        array = get_example_array()
        for m in get_test_values():
            norm_instance = constraints.max_norm(m)
            normed = norm_instance(backend.variable(array))
            assert np.all(backend.eval(normed) < m)

        # a more explicit example
        norm_instance = constraints.max_norm(2.0)
        x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
        x_normed_target = np.array(
            [[0, 0, 0], [1.0, 0, 0], [2.0, 0, 0],
             [2. / np.sqrt(3), 2. / np.sqrt(3), 2. / np.sqrt(3)]]).T
        x_normed_actual = backend.eval(norm_instance(backend.variable(x)))
        self.assertAllClose(x_normed_actual, x_normed_target, rtol=1e-05)
Пример #2
0
def load(architecture,
         activation_fn,
         optimizer,
         learning_rate,
         input_size,
         output_size,
         max_norm_weights=False):
    input_layer = keras.layers.Input((input_size, ))
    clayer = input_layer
    for n in architecture:
        clayer = keras.layers.Dense(
            n,
            activation=activation_fn_map[activation_fn],
            kernel_initializer=keras.initializers.TruncatedNormal(
                mean=0.0, stddev=1 / np.sqrt(float(n)), seed=None),
            bias_initializer='zeros',
            kernel_constraint=(max_norm(max_value=float(max_norm_weights))
                               if max_norm_weights else None))(clayer)
    output_layer = keras.layers.Dense(output_size,
                                      activation='softmax')(clayer)

    model = keras.models.Model(inputs=input_layer, outputs=output_layer)
    optimizer = optimizer_map[optimizer](learning_rate=learning_rate)

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    return model
Пример #3
0
    def _add_generator_block(old_model, config):
        # get the end of the last block
        block_end = old_model.layers[-2].output

        # weights init
        w_init = RandomNormal(stddev=0.02)
        w_const = max_norm(1.0)

        # upsample, and define new block
        upsampling = UpSampling2D()(block_end)

        # conv layers
        x = upsampling
        for i, strides in enumerate([3, 3]):
            x = Conv2D(config['filters'],
                       strides,
                       padding='same',
                       kernel_initializer=w_init,
                       kernel_constraint=w_const)(x)
            x = PixelNormalization()(x)
            x = LeakyReLU(alpha=0.2)(x)

        # add new output layer
        out_image = Conv2D(config['n_channels'], 1, padding='same')(x)
        # define model
        model1 = Model(old_model.input, out_image)
        # get the output layer from old model
        out_old = old_model.layers[-1]
        # connect the upsampling to the old output layer
        out_image2 = out_old(upsampling)
        # define new output image as the weighted sum of the old and new models
        merged = WeightedSum()([out_image2, out_image])
        # define model
        model2 = Model(old_model.input, merged)
        return [model1, model2]
Пример #4
0
def EEGNet_SSVEP(nb_classes = 12, Chans = 8, Samples = 256, 
             dropoutRate = 0.5, kernLength = 256, F1 = 96, 
             D = 1, F2 = 96, dropoutType = 'Dropout'):
    """ SSVEP Variant of EEGNet, as used in [1]. 
    Inputs:
        
      nb_classes      : int, number of classes to classify
      Chans, Samples  : number of channels and time points in the EEG data
      dropoutRate     : dropout fraction
      kernLength      : length of temporal convolution in first layer
      F1, F2          : number of temporal filters (F1) and number of pointwise
                        filters (F2) to learn. 
      D               : number of spatial filters to learn within each temporal
                        convolution.
      dropoutType     : Either SpatialDropout2D or Dropout, passed as a string.
      
      
    [1]. Waytowich, N. et. al. (2018). Compact Convolutional Neural Networks
    for Classification of Asynchronous Steady-State Visual Evoked Potentials.
    Journal of Neural Engineering vol. 15(6). 
    http://iopscience.iop.org/article/10.1088/1741-2552/aae5d8
    """
    
    if dropoutType == 'SpatialDropout2D':
        dropoutType = SpatialDropout2D
    elif dropoutType == 'Dropout':
        dropoutType = Dropout
    else:
        raise ValueError('dropoutType must be one of SpatialDropout2D '
                         'or Dropout, passed as a string.')
    
    input1   = Input(shape = (1, Chans, Samples))

    ##################################################################
    block1       = Conv2D(F1, (1, kernLength), padding = 'same',
                                   input_shape = (1, Chans, Samples),
                                   use_bias = False)(input1)
    block1       = BatchNormalization(axis = 1)(block1)
    block1       = DepthwiseConv2D((Chans, 1), use_bias = False, 
                                   depth_multiplier = D,
                                   depthwise_constraint = max_norm(1.))(block1)
    block1       = BatchNormalization(axis = 1)(block1)
    block1       = Activation('elu')(block1)
    block1       = AveragePooling2D((1, 4))(block1)
    block1       = dropoutType(dropoutRate)(block1)
    
    block2       = SeparableConv2D(F2, (1, 16),
                                   use_bias = False, padding = 'same')(block1)
    block2       = BatchNormalization(axis = 1)(block2)
    block2       = Activation('elu')(block2)
    block2       = AveragePooling2D((1, 8))(block2)
    block2       = dropoutType(dropoutRate)(block2)
        
    flatten      = Flatten(name = 'flatten')(block2)
    
    dense        = Dense(nb_classes, name = 'dense')(flatten)
    softmax      = Activation('softmax', name = 'softmax')(dense)
    
    return Model(inputs=input1, outputs=softmax)
Пример #5
0
    def _add_discriminator_block(old_model, config):
        # new shape is double the size of previous one
        old_input_shape = list(old_model.input.shape)
        new_input_shape = (old_input_shape[-2] * 2, old_input_shape[-2] * 2,
                           old_input_shape[-1])
        model_input = Input(shape=new_input_shape, name="doubled_dis_input")

        # weights init
        w_init = RandomNormal(stddev=0.02)
        w_const = max_norm(1.0)

        # conv layers
        x = model_input
        for strides in [1, 3, 3]:
            x = Conv2D(config['filters'],
                       strides,
                       padding='same',
                       kernel_initializer=w_init,
                       kernel_constraint=w_const)(x)
            x = LeakyReLU()(x)

        x = AveragePooling2D()(x)

        new_block = x
        # skip the input, 1x1 and activation for the old model
        for i in range(config['num_input_layers'], len(old_model.layers)):
            x = old_model.layers[i](x)

        # define straight-through model
        model1 = Model(model_input, x)

        # compile model
        model1.compile(loss=wasserstein_loss,
                       optimizer=ProGan.get_optimizer(config))

        # downsample the new larger image
        downsample = AveragePooling2D()(model_input)

        # connect old input processing to downsampled new input
        old_block = old_model.layers[1](downsample)
        old_block = old_model.layers[2](old_block)

        # fade in output of old model input layer with new input
        x = WeightedSum()([old_block, new_block])
        # skip the input, 1x1 and activation for the old model
        for i in range(config['num_input_layers'], len(old_model.layers)):
            x = old_model.layers[i](x)

        # define fade-in model
        model2 = Model(model_input, x)

        # compile model
        model2.compile(loss=wasserstein_loss,
                       optimizer=ProGan.get_optimizer(config))

        return [model1, model2]
def ShallowConvNet(nb_classes, Chans=64, Samples=128, dropoutRate=0.5):
    """ Keras implementation of the Shallow Convolutional Network as described
    in Schirrmeister et. al. (2017), Human Brain Mapping.
    
    Assumes the input is a 2-second EEG signal sampled at 128Hz. Note that in 
    the original paper, they do temporal convolutions of length 25 for EEG
    data sampled at 250Hz. We instead use length 13 since the sampling rate is 
    roughly half of the 250Hz which the paper used. The pool_size and stride
    in later layers is also approximately half of what is used in the paper.
    
    Note that we use the max_norm constraint on all convolutional layers, as 
    well as the classification layer. We also change the defaults for the
    BatchNormalization layer. We used this based on a personal communication 
    with the original authors.
    
                     ours        original paper
    pool_size        1, 35       1, 75
    strides          1, 7        1, 15
    conv filters     1, 13       1, 25    
    
    Note that this implementation has not been verified by the original 
    authors. We do note that this implementation reproduces the results in the
    original paper with minor deviations. 
    """

    # start the model
    input_main = Input((1, Chans, Samples))
    block1 = Conv2D(40, (1, 13),
                    input_shape=(1, Chans, Samples),
                    kernel_constraint=max_norm(2., axis=(0, 1, 2)))(input_main)
    block1 = Conv2D(40, (Chans, 1),
                    use_bias=False,
                    kernel_constraint=max_norm(2., axis=(0, 1, 2)))(block1)
    block1 = BatchNormalization(axis=1, epsilon=1e-05, momentum=0.1)(block1)
    block1 = Activation(square)(block1)
    block1 = AveragePooling2D(pool_size=(1, 35), strides=(1, 7))(block1)
    block1 = Activation(log)(block1)
    block1 = Dropout(dropoutRate)(block1)
    flatten = Flatten()(block1)
    dense = Dense(nb_classes, kernel_constraint=max_norm(0.5))(flatten)
    softmax = Activation('softmax')(dense)

    return Model(inputs=input_main, outputs=softmax)
Пример #7
0
    def _build_discriminator(config):
        model_input = Input(shape=tuple(config['input_shape']),
                            name="base_dis_input")

        # weights init
        w_init = RandomNormal(stddev=0.02)
        w_const = max_norm(1.0)

        # conv layers
        x = model_input
        for i, strides in enumerate([1, 3, 4]):
            # ?? why minibatch layer only after 1x1 conv
            if i == 1:
                x = MinibatchStdev()(x)
            x = Conv2D(config['filters'],
                       strides,
                       padding='same',
                       kernel_initializer=w_init,
                       kernel_constraint=w_const)(x)
            x = LeakyReLU()(x)

        # dense output
        features = Flatten()(x)
        model_output = Dense(1)(features)

        # compile model
        model = Model(model_input, model_output)
        model.compile(loss=wasserstein_loss,
                      optimizer=Adam(lr=config['learning_rate'],
                                     beta_1=config['beta_1'],
                                     beta_2=config['beta_2'],
                                     epsilon=config['epsilon']))

        # store model
        model_list = [[model, model]]
        # create submodels
        for i in range(1, config['num_blocks']):
            # get prior model without the fade-on
            old_model = model_list[i - 1][0]
            # create new model for next resolution
            models = ProGan._add_discriminator_block(old_model, config)
            # store model
            model_list.append(models)
        return model_list
Пример #8
0
    def _build_generator(init_shape, config):
        latent_vector = Input(config['z_size'], name='generator_input')

        x = Dense(np.prod(init_shape))(latent_vector)
        x = Reshape(init_shape)(x)

        # weights init
        w_init = RandomNormal(stddev=0.02)
        w_const = max_norm(1.0)

        # conv layers
        for i, strides in enumerate([4, 3]):
            x = Conv2D(config['filters'],
                       strides,
                       padding='same',
                       kernel_initializer=w_init,
                       kernel_constraint=w_const)(x)
            x = PixelNormalization()(x)
            x = LeakyReLU()(x)

        # conv 1x1
        out_image = Conv2D(config['n_channels'], 1, padding='same')(x)

        model = Model(latent_vector, out_image)

        # store model
        model_list = [[model, model]]
        # create submodels
        for i in range(1, config['num_blocks']):
            # get prior model without the fade-on
            old_model = model_list[i - 1][0]
            # create new model for next resolution
            models = ProGan._add_generator_block(old_model, config)
            # store model
            model_list.append(models)
        return model_list
Пример #9
0
def attention_model_new_arch(src_vocab,
                             target_vocab,
                             src_timesteps,
                             target_timesteps,
                             units,
                             epochs=30):
    encoder_inputs = Input(shape=(src_timesteps, ), name='encoder_inputs')

    decoder_inputs = Input(shape=(target_timesteps - 1, target_vocab),
                           name='decoder_inputs')

    embedding = Embedding(src_vocab,
                          units,
                          input_length=src_timesteps,
                          name='enc_embedding',
                          mask_zero=True)

    embedding2 = Dropout(0.5)(embedding(encoder_inputs))

    encoder_lstm = Bidirectional(LSTM(units,
                                      return_sequences=True,
                                      return_state=True,
                                      kernel_constraint=max_norm(3.0),
                                      recurrent_constraint=max_norm(3.0),
                                      name='encoder_lstm'),
                                 name='bidirectional_encoder')

    encoder_out, forward_h, forward_c, backward_h, backward_c = encoder_lstm(
        embedding2)

    state_h = Concatenate()([forward_h, backward_h])
    state_c = Concatenate()([forward_c, backward_c])
    enc_states = [state_h, state_c]

    # Decoder
    decoder_lstm = LSTM(units * 2,
                        return_sequences=True,
                        return_state=True,
                        name='decoder_lstm')
    decoder_out, _, _ = decoder_lstm(decoder_inputs, initial_state=enc_states)

    # Attention
    attn_layer = AttentionLayer(name='attention_layer')
    attn_out, attn_states = attn_layer([encoder_out, decoder_out])

    # concat attention and decoder output
    decoder_output_concat = Concatenate(axis=-1)([decoder_out, attn_out])

    # FC layer
    tst = Dropout(0.5)(decoder_output_concat)
    decoder_dense = Dense(target_vocab, activation='softmax')
    decoder_pred = decoder_dense(tst)

    model = Model(inputs=[encoder_inputs, decoder_inputs],
                  outputs=decoder_pred)

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    # Inference models
    # Encoder Inference model
    encoder_inf_inputs = Input(batch_shape=(
        1,
        src_timesteps,
    ),
                               name='encoder_inf_inputs')

    encoder_inf_out, encoder_inf_fwd_h, encoder_inf_fwd_c, encoder_inf_back_h, encoder_inf_back_c = encoder_lstm(
        embedding(encoder_inf_inputs))

    encoder_inf_h = Concatenate()([encoder_inf_fwd_h, encoder_inf_back_h])
    encoder_inf_c = Concatenate()([encoder_inf_fwd_c, encoder_inf_back_c])

    encoder_model = Model(
        inputs=encoder_inf_inputs,
        outputs=[encoder_inf_out, encoder_inf_h, encoder_inf_c])

    # Decoder Inference model
    encoder_inf_states = Input(batch_shape=(1, src_timesteps, 2 * units),
                               name='encoder_inf_states')

    decoder_inf_inputs = Input(batch_shape=(1, 1, target_vocab),
                               name='decoder_word_inputs')

    decoder_init_fwd_state = Input(batch_shape=(1, units * 2),
                                   name='decoder_fwd_init')

    decoder_init_back_state = Input(batch_shape=(1, units * 2),
                                    name='decoder_back_init')

    decoder_states_inputs = [decoder_init_fwd_state, decoder_init_back_state]

    decoder_inf_out, decoder_inf_fwd_state, decoder_inf_back_state = decoder_lstm(
        decoder_inf_inputs, initial_state=decoder_states_inputs)

    attn_inf_out, attn_inf_states = attn_layer(
        [encoder_inf_states, decoder_inf_out])

    decoder_inf_concat = Concatenate(
        axis=-1, name='concat')([decoder_inf_out, attn_inf_out])

    decoder_inf_pred = decoder_dense(decoder_inf_concat)

    decoder_model = Model(inputs=[
        encoder_inf_states, decoder_init_fwd_state, decoder_init_back_state,
        decoder_inf_inputs
    ],
                          outputs=[
                              decoder_inf_pred, attn_inf_states,
                              decoder_inf_fwd_state, decoder_inf_back_state
                          ])

    return model, encoder_model, decoder_model
Пример #10
0
def EEGNet(nb_classes, Chans = 64, Samples = 128, 
             dropoutRate = 0.5, kernLength = 64, F1 = 8, 
             D = 2, F2 = 16, norm_rate = 0.25, dropoutType = 'Dropout'):
    """ Keras Implementation of EEGNet
    http://iopscience.iop.org/article/10.1088/1741-2552/aace8c/meta
    Note that this implements the newest version of EEGNet and NOT the earlier
    version (version v1 and v2 on arxiv). We strongly recommend using this
    architecture as it performs much better and has nicer properties than
    our earlier version. For example:
        
        1. Depthwise Convolutions to learn spatial filters within a 
        temporal convolution. The use of the depth_multiplier option maps 
        exactly to the number of spatial filters learned within a temporal
        filter. This matches the setup of algorithms like FBCSP which learn 
        spatial filters within each filter in a filter-bank. This also limits 
        the number of free parameters to fit when compared to a fully-connected
        convolution. 
        
        2. Separable Convolutions to learn how to optimally combine spatial
        filters across temporal bands. Separable Convolutions are Depthwise
        Convolutions followed by (1x1) Pointwise Convolutions. 
        
    
    While the original paper used Dropout, we found that SpatialDropout2D 
    sometimes produced slightly better results for classification of ERP 
    signals. However, SpatialDropout2D significantly reduced performance 
    on the Oscillatory dataset (SMR, BCI-IV Dataset 2A). We recommend using
    the default Dropout in most cases.
        
    Assumes the input signal is sampled at 128Hz. If you want to use this model
    for any other sampling rate you will need to modify the lengths of temporal
    kernels and average pooling size in blocks 1 and 2 as needed (double the 
    kernel lengths for double the sampling rate, etc). Note that we haven't 
    tested the model performance with this rule so this may not work well. 
    
    The model with default parameters gives the EEGNet-8,2 model as discussed
    in the paper. This model should do pretty well in general, although it is
	advised to do some model searching to get optimal performance on your
	particular dataset.
    We set F2 = F1 * D (number of input filters = number of output filters) for
    the SeparableConv2D layer. We haven't extensively tested other values of this
    parameter (say, F2 < F1 * D for compressed learning, and F2 > F1 * D for
    overcomplete). We believe the main parameters to focus on are F1 and D. 
    Inputs:
        
      nb_classes      : int, number of classes to classify
      Chans, Samples  : number of channels and time points in the EEG data
      dropoutRate     : dropout fraction
      kernLength      : length of temporal convolution in first layer. We found
                        that setting this to be half the sampling rate worked
                        well in practice. For the SMR dataset in particular
                        since the data was high-passed at 4Hz we used a kernel
                        length of 32.     
      F1, F2          : number of temporal filters (F1) and number of pointwise
                        filters (F2) to learn. Default: F1 = 8, F2 = F1 * D. 
      D               : number of spatial filters to learn within each temporal
                        convolution. Default: D = 2
      dropoutType     : Either SpatialDropout2D or Dropout, passed as a string.
    """
    
    if dropoutType == 'SpatialDropout2D':
        dropoutType = SpatialDropout2D
    elif dropoutType == 'Dropout':
        dropoutType = Dropout
    else:
        raise ValueError('dropoutType must be one of SpatialDropout2D '
                         'or Dropout, passed as a string.')
    
    input1   = Input(shape = (1, Chans, Samples))

    ##################################################################
    block1       = Conv2D(F1, (1, kernLength), padding = 'same',
                                   input_shape = (1, Chans, Samples),
                                   use_bias = False)(input1)
    block1       = BatchNormalization(axis = 1)(block1)
    block1       = DepthwiseConv2D((Chans, 1), use_bias = False, 
                                   depth_multiplier = D,
                                   depthwise_constraint = max_norm(1.))(block1)
    block1       = BatchNormalization(axis = 1)(block1)
    block1       = Activation('elu')(block1)
    block1       = AveragePooling2D((1, 4))(block1)
    block1       = dropoutType(dropoutRate)(block1)
    
    block2       = SeparableConv2D(F2, (1, 16),
                                   use_bias = False, padding = 'same')(block1)
    block2       = BatchNormalization(axis = 1)(block2)
    block2       = Activation('elu')(block2)
    block2       = AveragePooling2D((1, 8))(block2)
    block2       = dropoutType(dropoutRate)(block2)
        
    flatten      = Flatten(name = 'flatten')(block2)
    
    dense        = Dense(nb_classes, name = 'dense', 
                         kernel_constraint = max_norm(norm_rate))(flatten)
    softmax      = Activation('softmax', name = 'softmax')(dense)
    
    return Model(inputs=input1, outputs=softmax)
Пример #11
0
def DeepConvNet(nb_classes, Chans = 64, Samples = 256,
                dropoutRate = 0.5):
    """ Keras implementation of the Deep Convolutional Network as described in
    Schirrmeister et. al. (2017), Human Brain Mapping.
    
    This implementation assumes the input is a 2-second EEG signal sampled at 
    128Hz, as opposed to signals sampled at 250Hz as described in the original
    paper. We also perform temporal convolutions of length (1, 5) as opposed
    to (1, 10) due to this sampling rate difference. 
    
    Note that we use the max_norm constraint on all convolutional layers, as 
    well as the classification layer. We also change the defaults for the
    BatchNormalization layer. We used this based on a personal communication 
    with the original authors.
    
                      ours        original paper
    pool_size        1, 2        1, 3
    strides          1, 2        1, 3
    conv filters     1, 5        1, 10
    
    Note that this implementation has not been verified by the original 
    authors. 
    
    """

    # start the model
    input_main   = Input((1, Chans, Samples))
    block1       = Conv2D(25, (1, 5), 
                                 input_shape=(1, Chans, Samples),
                                 kernel_constraint = max_norm(2., axis=(0,1,2)))(input_main)
    block1       = Conv2D(25, (Chans, 1),
                                 kernel_constraint = max_norm(2., axis=(0,1,2)))(block1)
    block1       = BatchNormalization(axis=1, epsilon=1e-05, momentum=0.1)(block1)
    block1       = Activation('elu')(block1)
    block1       = MaxPooling2D(pool_size=(1, 2), strides=(1, 2))(block1)
    block1       = Dropout(dropoutRate)(block1)
  
    block2       = Conv2D(50, (1, 5),
                                 kernel_constraint = max_norm(2., axis=(0,1,2)))(block1)
    block2       = BatchNormalization(axis=1, epsilon=1e-05, momentum=0.1)(block2)
    block2       = Activation('elu')(block2)
    block2       = MaxPooling2D(pool_size=(1, 2), strides=(1, 2))(block2)
    block2       = Dropout(dropoutRate)(block2)
    
    block3       = Conv2D(100, (1, 5),
                                 kernel_constraint = max_norm(2., axis=(0,1,2)))(block2)
    block3       = BatchNormalization(axis=1, epsilon=1e-05, momentum=0.1)(block3)
    block3       = Activation('elu')(block3)
    block3       = MaxPooling2D(pool_size=(1, 2), strides=(1, 2))(block3)
    block3       = Dropout(dropoutRate)(block3)
    
    block4       = Conv2D(200, (1, 5),
                                 kernel_constraint = max_norm(2., axis=(0,1,2)))(block3)
    block4       = BatchNormalization(axis=1, epsilon=1e-05, momentum=0.1)(block4)
    block4       = Activation('elu')(block4)
    block4       = MaxPooling2D(pool_size=(1, 2), strides=(1, 2))(block4)
    block4       = Dropout(dropoutRate)(block4)
    
    flatten      = Flatten()(block4)
    
    dense        = Dense(nb_classes, kernel_constraint = max_norm(0.5))(flatten)
    softmax      = Activation('softmax')(dense)
    
    return Model(inputs=input_main, outputs=softmax)