Ejemplo n.º 1
0
# CNN model

tensorboard = TensorBoard(log_dir=f'logs/{NAME}')

model = Sequential()
model.add(Conv2D(30, (5, 5), input_shape=X.shape[1:]))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(50, (5, 5)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(500))
model.add(Activation("relu"))

model.add(Dense(100))
model.add(Activation("tanh"))

model.add(Dense(27))
model.add(Activation("softmax"))

model.compile(loss="categorical_crossentropy",
              optimizer="adam",
              metrics=['accuracy'])

model.fit(X,
          y,
          batch_size=32,
Ejemplo n.º 2
0
dataset = numpy.loadtxt("prima-indians-diabetes.csv", delimiter=",")

# split into input (X) and output (Y) variables, splitting csv data
X = dataset[:, 0:8]
Y = dataset[:, 8]

# split X, Y into a train and test set
x_train, x_test, y_train, y_test = train_test_split(X,
                                                    Y,
                                                    test_size=0.2,
                                                    random_state=42)

# create model, add dense layers one by one specifying activation function
model = Sequential()
# input layer requires input_dim param
model.add(Dense(15, input_dim=8, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dropout(.2))
# sigmoid instead of relu for final probability between 0 and 1
model.add(Dense(1, activation='sigmoid'))

# compile the model, adam gradient descent (optimized)
model.compile(loss="binary_crossentropy",
              optimizer="adam",
              metrics=['accuracy'])

# call the function to fit to the data (training the network)
model.fit(x_train,
          y_train,
          epochs=1000,
Ejemplo n.º 3
0
plt.plot(seq)
plt.show()

validation_data = (np.expand_dims(x_test_scaled,
                                  axis=0), np.expand_dims(y_test_scaled,
                                                          axis=0))

#Create the Recurrent Neural Network
model = Sequential()
model.add(
    GRU(units=512, return_sequences=True, input_shape=(
        None,
        num_x_signals,
    )))

model.add(Dense(num_y_signals, activation='sigmoid'))

if False:
    # Maybe use lower init-ranges.
    init = RandomUniform(minval=-0.05, maxval=0.05)

    model.add(
        Dense(num_y_signals, activation='linear', kernel_initializer=init))

warmup_steps = 20


def loss_mse_warmup(y_true, y_pred):
    """
    Calculate the Mean Squared Error between y_true and y_pred,
    but ignore the beginning "warmup" part of the sequences.
Ejemplo n.º 4
0
def DCN(
    feature_dim_dict,
    embedding_size='auto',
    cross_num=2,
    hidden_size=[
        128,
        128,
    ],
    l2_reg_embedding=1e-5,
    l2_reg_cross=1e-5,
    l2_reg_deep=0,
    init_std=0.0001,
    seed=1024,
    keep_prob=1,
    use_bn=False,
    activation='relu',
    final_activation='sigmoid',
):
    """Instantiates the Deep&Cross Network architecture.

    :param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
    :param embedding_size: positive int or str,sparse feature embedding_size.If set to "auto",it will be 6*pow(cardinality,025)
    :param cross_num: positive integet,cross layer number
    :param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_cross: float. L2 regularizer strength applied to cross net
    :param l2_reg_deep: float. L2 regularizer strength applied to deep net
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param keep_prob: float in (0,1]. keep_prob used in deep net
    :param use_bn: bool. Whether use BatchNormalization before activation or not.in deep net
    :param activation: Activation function to use in deep net
    :param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
    :return: A Keras model instance.

    """
    if len(hidden_size) == 0 and cross_num == 0:
        raise ValueError("Either hidden_layer or cross layer must > 0")
    if not isinstance(
            feature_dim_dict, dict
    ) or "sparse" not in feature_dim_dict or "dense" not in feature_dim_dict:
        raise ValueError(
            "feature_dim must be a dict like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_5',]}"
        )

    sparse_input, dense_input = get_input(
        feature_dim_dict,
        None,
    )
    sparse_embedding = get_embeddings(feature_dim_dict, embedding_size,
                                      init_std, seed, l2_reg_embedding)
    embed_list = [
        sparse_embedding[i](sparse_input[i]) for i in range(len(sparse_input))
    ]

    deep_input = Flatten()(Concatenate()(embed_list))
    if len(dense_input) > 0:
        if len(dense_input) == 1:
            continuous_list = dense_input[0]
        else:
            continuous_list = Concatenate()(dense_input)

        deep_input = Concatenate()([deep_input, continuous_list])

    if len(hidden_size) > 0 and cross_num > 0:  # Deep & Cross
        deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob, use_bn,
                       seed)(deep_input)
        cross_out = CrossNet(cross_num, l2_reg=l2_reg_cross)(deep_input)
        stack_out = Concatenate()([cross_out, deep_out])
        final_logit = Dense(1, use_bias=False, activation=None)(stack_out)
    elif len(hidden_size) > 0:  # Only Deep
        deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob, use_bn,
                       seed)(deep_input)
        final_logit = Dense(1, use_bias=False, activation=None)(deep_out)
    elif cross_num > 0:  # Only Cross
        cross_out = CrossNet(cross_num, l2_reg=l2_reg_cross)(deep_input)
        final_logit = Dense(1, use_bias=False, activation=None)(cross_out)
    else:  # Error
        raise NotImplementedError

    # Activation(self.final_activation)(final_logit)
    output = PredictionLayer(final_activation)(final_logit)
    model = Model(inputs=sparse_input + dense_input, outputs=output)

    return model
Ejemplo n.º 5
0
X_train = np.array(X_train)
X_test = np.array(X_test)

# One-Hot encoding for classes
y_train_saved = y_train
y_train = np.array(keras.utils.to_categorical(y_train, 2))
y_test_saved = y_test
y_test = np.array(keras.utils.to_categorical(y_test, 2))

print(X_train.shape)
print(X_test.shape)

#keras.normalise can be used
model = Sequential()
model.add(TimeDistributed(Dense(128), input_shape=(100, 320)))
model.add(Activation('relu'))
model.add(Dropout(rate=0.5))

model.add(TimeDistributed(Dense(128)))
model.add(Activation('relu'))
model.add(Dropout(rate=0.5))

model.add(TimeDistributed(Dense(128), name='dense_inter'))
model.add(Activation('relu'))
model.add(Dropout(rate=0.5))

model.add(TimeDistributed(Dense(2)))
model.add(Activation('sigmoid'))

time_distributed_merge_layer = tf.keras.layers.Lambda(
Ejemplo n.º 6
0
def train_CNN(combined_train, Y_train, combined_test, Y_test, batch_size,
              drop_out, lr, epochs):
    '''
    Input X_train,X_test: shape (number of sequences,4,2,41)
    Y_train,Y_test: (number of sequences,)
    batch_size: batch size for training LSTM
    drop_out: drop_out rate for LSTM layers
    lr: learning rate of Adam optimizer
    epochs: training epochs

    This function define the structure of CNN-LSTM model and trains with Adam optimizer.
    Output: a real number of average of validation MSE of 3-fold cross validation. 
    '''
    def create_CNN(serie_size, n_features, drop_out):
        inputs = Input(shape=(serie_size, n_features))
        x = (Conv1D(64,
                    kernel_size=2,
                    input_shape=(serie_size, n_features),
                    padding='same',
                    activation='relu'))(inputs)
        x = (Dropout(drop_out))(x)
        x = (Conv1D(32, kernel_size=2, padding='same', activation='relu'))(x)
        x = (MaxPooling1D(pool_size=2))(x)
        x = (Dropout(drop_out))(x)
        model = Model(inputs, x)
        return model

    kf = KFold(n_splits=3)
    score = []
    for train_index, test_index in kf.split(combined_train[0]):
        x_train, x_valid = [i[train_index] for i in combined_train
                            ], [i[test_index] for i in combined_train]
        print(len(x_train))
        print(len(x_train[0][0]))
        y_train, y_valid = np.array(Y_train)[train_index], np.array(
            Y_train)[test_index]
        temp_1 = create_CNN(2, 41, drop_out)
        temp_3 = create_CNN(2, 41, drop_out)
        temp_2 = create_CNN(2, 41, drop_out)
        temp_4 = create_CNN(2, 41, drop_out)
        optimizer = optimizers.Adam(lr=lr,
                                    beta_1=0.9,
                                    beta_2=0.999,
                                    decay=0.01)
        init = glorot_normal(seed=None)
        init1 = RandomUniform(minval=-0.05, maxval=0.05)
        ## 10 * 32
        combinedInput = concatenate(
            [temp_1.output, temp_2.output, temp_3.output, temp_4.output])
        combinedInput = Reshape((4, 32), input_shape=(4 * 32, ))(combinedInput)
        lstm = LSTM(units=16,
                    dropout=drop_out,
                    recurrent_dropout=drop_out,
                    return_sequences=False,
                    kernel_initializer=init)
        output = lstm(combinedInput)
        output = Dense(1, activation='linear',
                       kernel_initializer=init1)(output)
        model = Model(
            inputs=[temp_1.input, temp_2.input, temp_3.input, temp_4.input],
            outputs=output)
        model.compile(loss='mean_squared_error', optimizer=optimizer)
        model.summary()
        train_history = model.fit(x=[i for i in x_train],
                                  y=y_train,
                                  epochs=epochs,
                                  validation_data=(x_valid, y_valid),
                                  batch_size=batch_size)
        val_loss = train_history.history['val_loss'][-1]
        score.append(val_loss)
    return (np.mean(score))
Ejemplo n.º 7
0
def EEGNet_old(nb_classes,
               Chans=64,
               Samples=128,
               regRate=0.0001,
               dropoutRate=0.25,
               kernels=[(2, 32), (8, 4)],
               strides=(2, 4)):
    """ Keras Implementation of EEGNet_v1 (https://arxiv.org/abs/1611.08024v2)

    This model is the original EEGNet model proposed on arxiv
            https://arxiv.org/abs/1611.08024v2
    
    with a few modifications: we use striding instead of max-pooling as this 
    helped slightly in classification performance while also providing a 
    computational speed-up. 
    
    Note that we no longer recommend the use of this architecture, as the new
    version of EEGNet performs much better overall and has nicer properties.
    
    Inputs:
        
        nb_classes     : total number of final categories
        Chans, Samples : number of EEG channels and samples, respectively
        regRate        : regularization rate for L1 and L2 regularizations
        dropoutRate    : dropout fraction
        kernels        : the 2nd and 3rd layer kernel dimensions (default is 
                         the [2, 32] x [8, 4] configuration)
        strides        : the stride size (note that this replaces the max-pool
                         used in the original paper)
    
    """

    # start the model
    input_main = Input((1, Chans, Samples))
    layer1 = Conv2D(16, (Chans, 1),
                    input_shape=(1, Chans, Samples),
                    kernel_regularizer=l1_l2(l1=regRate,
                                             l2=regRate))(input_main)
    layer1 = BatchNormalization(axis=1)(layer1)
    layer1 = Activation('elu')(layer1)
    layer1 = Dropout(dropoutRate)(layer1)

    permute_dims = 2, 1, 3
    permute1 = Permute(permute_dims)(layer1)

    layer2 = Conv2D(4,
                    kernels[0],
                    padding='same',
                    kernel_regularizer=l1_l2(l1=0.0, l2=regRate),
                    strides=strides)(permute1)
    layer2 = BatchNormalization(axis=1)(layer2)
    layer2 = Activation('elu')(layer2)
    layer2 = Dropout(dropoutRate)(layer2)

    layer3 = Conv2D(4,
                    kernels[1],
                    padding='same',
                    kernel_regularizer=l1_l2(l1=0.0, l2=regRate),
                    strides=strides)(layer2)
    layer3 = BatchNormalization(axis=1)(layer3)
    layer3 = Activation('elu')(layer3)
    layer3 = Dropout(dropoutRate)(layer3)

    flatten = Flatten(name='flatten')(layer3)

    dense = Dense(nb_classes, name='dense')(flatten)
    softmax = Activation('softmax', name='softmax')(dense)

    return Model(inputs=input_main, outputs=softmax)
Ejemplo n.º 8
0
def DIN(
    feature_dim_dict,
    seq_feature_list,
    embedding_size=8,
    hist_len_max=16,
    use_din=True,
    use_bn=False,
    hidden_size=(200, 80),
    activation='relu',
    att_hidden_size=(80, 40),
    att_activation=Dice,
    att_weight_normalization=False,
    l2_reg_deep=0,
    l2_reg_embedding=1e-5,
    final_activation='sigmoid',
    keep_prob=1,
    init_std=0.0001,
    seed=1024,
):
    """Instantiates the Deep Interest Network architecture.

    :param feature_dim_dict: dict,to indicate sparse field (**now only support sparse feature**)like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':[]}
    :param seq_feature_list: list,to indicate  sequence sparse field (**now only support sparse feature**),must be a subset of ``feature_dim_dict["sparse"]``
    :param embedding_size: positive integer,sparse feature embedding_size.
    :param hist_len_max: positive int, to indicate the max length of seq input
    :param use_din: bool, whether use din pooling or not.If set to ``False``,use **sum pooling**
    :param use_bn: bool. Whether use BatchNormalization before activation or not in deep net
    :param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
    :param activation: Activation function to use in deep net
    :param att_hidden_size: list,list of positive integer , the layer number and units in each layer of attention net
    :param att_activation: Activation function to use in attention net
    :param att_weight_normalization: bool.Whether normalize the attention score of local activation unit.
    :param l2_reg_deep: float. L2 regularizer strength applied to deep net
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
    :param keep_prob: float in (0,1]. keep_prob used in deep net
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :return: A Keras model instance.

    """
    for feature_dim_dict in [feature_dim_dict]:
        if not isinstance(
                feature_dim_dict, dict
        ) or "sparse" not in feature_dim_dict or "dense" not in feature_dim_dict:
            raise ValueError(
                "feature_dim must be a dict like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_5',]}"
            )
    if len(feature_dim_dict['dense']) > 0:
        raise ValueError('Now DIN only support sparse input')
    sparse_input, user_behavior_input, user_behavior_length = get_input(
        feature_dim_dict, seq_feature_list, hist_len_max)
    sparse_embedding_dict = {
        feat: Embedding(feature_dim_dict["sparse"][feat],
                        embedding_size,
                        embeddings_initializer=RandomNormal(mean=0.0,
                                                            stddev=init_std,
                                                            seed=seed),
                        embeddings_regularizer=l2(l2_reg_embedding),
                        name='sparse_emb_' + str(i) + '-' + feat)
        for i, feat in enumerate(feature_dim_dict["sparse"])
    }
    query_emb_list = [
        sparse_embedding_dict[feat](sparse_input[feat])
        for feat in seq_feature_list
    ]
    keys_emb_list = [
        sparse_embedding_dict[feat](user_behavior_input[feat])
        for feat in seq_feature_list
    ]
    deep_input_emb_list = [
        sparse_embedding_dict[feat](sparse_input[feat])
        for feat in feature_dim_dict["sparse"]
    ]

    query_emb = Concatenate()(
        query_emb_list) if len(query_emb_list) > 1 else query_emb_list[0]
    keys_emb = Concatenate()(
        keys_emb_list) if len(keys_emb_list) > 1 else keys_emb_list[0]
    deep_input_emb = Concatenate()(deep_input_emb_list) if len(
        deep_input_emb_list) > 1 else deep_input_emb_list[0]

    if use_din:
        hist = AttentionSequencePoolingLayer(
            att_hidden_size,
            att_activation,
            weight_normalization=att_weight_normalization)(
                [query_emb, keys_emb, user_behavior_length])
    else:
        hist = SequencePoolingLayer(hist_len_max,
                                    'sum')([keys_emb, user_behavior_length])

    deep_input_emb = Concatenate()([deep_input_emb, hist])
    output = MLP(hidden_size, activation, l2_reg_deep, keep_prob, use_bn,
                 seed)(deep_input_emb)
    output = Dense(1, final_activation)(output)
    output = Reshape([1])(output)
    model_input_list = list(sparse_input.values()) + list(
        user_behavior_input.values()) + [user_behavior_length]

    model = Model(inputs=model_input_list, outputs=output)
    return model
Ejemplo n.º 9
0
def VGG_16(spatial_size,
           classes,
           channels,
           channel_first=True,
           weights_path=None):
    model = Sequential()
    if channel_first:
        model.add(
            ZeroPadding2D((1, 1),
                          input_shape=(channels, spatial_size, spatial_size)))
    else:
        model.add(
            ZeroPadding2D((1, 1),
                          input_shape=(spatial_size, spatial_size, channels)))

    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))  # 33

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))  # 34
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))  # 35
    model.add(Dropout(0.5))
    model.add(Dense(2622, activation='softmax'))  # Dropped

    if weights_path:
        model.load_weights(weights_path)
    model.pop()
    model.add(Dense(classes, activation='softmax'))  # 36

    return model
Ejemplo n.º 10
0
    def create_VGG_net(self, raw=120, column=320, channel=1):
        print('create VGG model!!')

        inputShape = (raw, column, channel)

        init = 'he_normal'
        # init = 'glorot_normal'
        activation = 'relu'
        keep_prob_conv = 0.25
        keep_prob_dense = 0.5

        chanDim = -1
        classes = 8

        model = Sequential()

        # CONV => RELU => POOL
        model.add(Conv2D(32, (3, 3), padding="same", input_shape=inputShape, kernel_initializer=init))
        model.add(Activation(activation))
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(3, 3)))
        model.add(Dropout(keep_prob_conv))

        # (CONV => RELU) * 2 => POOL
        model.add(Conv2D(64, (3, 3), padding="same", kernel_initializer=init))
        model.add(Activation(activation))
        model.add(BatchNormalization(axis=chanDim))
        model.add(Conv2D(64, (3, 3), padding="same", kernel_initializer=init))
        model.add(Activation(activation))
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(keep_prob_conv))

        # (CONV => RELU) * 2 => POOL
        model.add(Conv2D(128, (3, 3), padding="same", kernel_initializer=init))
        model.add(Activation(activation))
        model.add(BatchNormalization(axis=chanDim))
        model.add(Conv2D(128, (3, 3), padding="same", kernel_initializer=init))
        model.add(Activation(activation))
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(keep_prob_conv))

        # (CONV => RELU) * 2 => POOL
        model.add(Conv2D(128, (3, 3), padding="same", kernel_initializer=init))
        model.add(Activation(activation))
        model.add(BatchNormalization(axis=chanDim))
        model.add(Conv2D(128, (3, 3), padding="same", kernel_initializer=init))
        model.add(Activation(activation))
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(keep_prob_conv))

        # first (and only) set of FC => RELU layers
        model.add(Flatten())
        model.add(Dense(1024, kernel_initializer=init))
        model.add(Activation(activation))
        model.add(BatchNormalization())
        model.add(Dropout(keep_prob_dense))

        # softmax classifier
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        # return the constructed network architecture
        self.model = model
Ejemplo n.º 11
0
                        help="dimension of model ouput")

    args = parser.parse_args()
    dataloader = Dataloader(args)

    p = PointEmbedding(args)
    p_model = p.build()
    loss_func = p.custom_loss()

    x1 = Input((28, 28))
    x2 = Input((28, 28))
    x1_r = K.reshape(x1, (-1, 28, 28, 1))
    x2_r = K.reshape(x2, (-1, 28, 28, 1))
    pair1 = K.concatenate([x1_r, x2_r], axis=2)

    x3 = Input((28, 28))
    x4 = Input((28, 28))
    x3_r = K.reshape(x3, (-1, 28, 28, 1))
    x4_r = K.reshape(x4, (-1, 28, 28, 1))
    pair2 = K.concatenate([x3_r, x4_r], axis=2)

    output = Conv2D(1, (5, 5), (2, 2))(pair1)
    output = MaxPooling2D((2, 2), (1, 1))(output)
    output = Conv2D(1, (5, 5), (2, 2))(output)

    output = MaxPooling2D((2, 2), (1, 1))(output)
    output = Flatten()(output)
    output = Dense(2)(output)
    print(K.square(output))
    print(K.sum(K.square(output), axis=-1))
    print(K.sigmoid(K.sum(K.square(output), axis=-1)))
Ejemplo n.º 12
0
yp = dfprod[:,1:]
xc = dfcont[:,0:-1]
yc = dfcont[:,1:]

z = 1000000

xptrain  =  xp[0:z,:]
yptrain = yp[0:z,:]
xptest = xp[z:,:]
yptest = yp[z:,:]

vocab = np.max(dfprod)+1
inp = Input(shape=(19,))
emb = Embedding(input_dim=vocab,mask_zero=True,output_dim= 100, input_length=len(xp[0])) (inp)
g = GRU(units=100, return_sequences= True) (emb)
d = Dense(vocab, activation='softmax')(g)
model = Model(inputs=inp, outputs=d)
optimizeradam = tf.train.AdamOptimizer()
model.compile(loss='categorical_crossentropy', optimizer=optimizeradam, metrics= [tkca])
categorical_labels = to_categorical(yptrain)
categorical_labels2 = to_categorical(yptest)
model.fit(xptrain,categorical_labels, epochs= 5, batch_size= 1)
print(model.evaluate(xptest,categorical_labels2))

# model = Sequential()
# model.add(Embedding(input_dim=vocab,mask_zero=True,output_dim= 100, input_length=len(xp[0])))
# model.add(GRU(units=100, return_sequences= True))
# model.add(Dense(vocab, activation='softmax'))
# categorical_labels = to_categorical(yp)
# model.compile(loss='categorical_crossentropy', optimizer='adam', metrics= ['acc'])
# model.summary()
Ejemplo n.º 13
0
def EEGNet(nb_classes,
           Chans=64,
           Samples=128,
           dropoutRate=0.5,
           kernLength=64,
           F1=8,
           D=2,
           F2=16,
           norm_rate=0.25,
           dropoutType='Dropout'):
    """ Keras Implementation of EEGNet
    http://iopscience.iop.org/article/10.1088/1741-2552/aace8c/meta

    Note that this implements the newest version of EEGNet and NOT the earlier
    version (version v1 and v2 on arxiv). We strongly recommend using this
    architecture as it performs much better and has nicer properties than
    our earlier version. For example:
        
        1. Depthwise Convolutions to learn spatial filters within a 
        temporal convolution. The use of the depth_multiplier option maps 
        exactly to the number of spatial filters learned within a temporal
        filter. This matches the setup of algorithms like FBCSP which learn 
        spatial filters within each filter in a filter-bank. This also limits 
        the number of free parameters to fit when compared to a fully-connected
        convolution. 
        
        2. Separable Convolutions to learn how to optimally combine spatial
        filters across temporal bands. Separable Convolutions are Depthwise
        Convolutions followed by (1x1) Pointwise Convolutions. 
        
    
    While the original paper used Dropout, we found that SpatialDropout2D 
    sometimes produced slightly better results for classification of ERP 
    signals. However, SpatialDropout2D significantly reduced performance 
    on the Oscillatory dataset (SMR, BCI-IV Dataset 2A). We recommend using
    the default Dropout in most cases.
        
    Assumes the input signal is sampled at 128Hz. If you want to use this model
    for any other sampling rate you will need to modify the lengths of temporal
    kernels and average pooling size in blocks 1 and 2 as needed (double the 
    kernel lengths for double the sampling rate, etc). Note that we haven't 
    tested the model performance with this rule so this may not work well. 
    
    The model with default parameters gives the EEGNet-8,2 model as discussed
    in the paper. This model should do pretty well in general, although it is
	advised to do some model searching to get optimal performance on your
	particular dataset.

    We set F2 = F1 * D (number of input filters = number of output filters) for
    the SeparableConv2D layer. We haven't extensively tested other values of this
    parameter (say, F2 < F1 * D for compressed learning, and F2 > F1 * D for
    overcomplete). We believe the main parameters to focus on are F1 and D. 

    Inputs:
        
      nb_classes      : int, number of classes to classify
      Chans, Samples  : number of channels and time points in the EEG data
      dropoutRate     : dropout fraction
      kernLength      : length of temporal convolution in first layer. We found
                        that setting this to be half the sampling rate worked
                        well in practice. For the SMR dataset in particular
                        since the data was high-passed at 4Hz we used a kernel
                        length of 32.     
      F1, F2          : number of temporal filters (F1) and number of pointwise
                        filters (F2) to learn. Default: F1 = 8, F2 = F1 * D. 
      D               : number of spatial filters to learn within each temporal
                        convolution. Default: D = 2
      dropoutType     : Either SpatialDropout2D or Dropout, passed as a string.

    """

    if dropoutType == 'SpatialDropout2D':
        dropoutType = SpatialDropout2D
    elif dropoutType == 'Dropout':
        dropoutType = Dropout
    else:
        raise ValueError('dropoutType must be one of SpatialDropout2D '
                         'or Dropout, passed as a string.')

    input1 = Input(shape=(1, Chans, Samples))

    ##################################################################
    block1 = Conv2D(F1, (1, kernLength), padding='same',
                    use_bias=False)(input1)
    block1 = BatchNormalization(axis=1)(block1)
    block1 = DepthwiseConv2D((Chans, 1),
                             use_bias=False,
                             depth_multiplier=D,
                             depthwise_constraint=max_norm(1.))(block1)
    block1 = BatchNormalization(axis=1)(block1)
    block1 = Activation('elu')(block1)
    block1 = AveragePooling2D((1, 4))(block1)
    block1 = dropoutType(dropoutRate)(block1)

    block2 = SeparableConv2D(F2, (1, 16), padding='same',
                             use_bias=False)(block1)
    block2 = BatchNormalization(axis=1)(block2)
    block2 = Activation('elu')(block2)
    block2 = AveragePooling2D((1, 8))(block2)
    block2 = dropoutType(dropoutRate)(block2)

    flatten = Flatten(name='flatten')(block2)

    dense = Dense(nb_classes,
                  name='dense',
                  kernel_constraint=max_norm(norm_rate))(flatten)
    softmax = Activation('softmax', name='softmax')(dense)

    return Model(inputs=input1, outputs=softmax)
Ejemplo n.º 14
0
def DeepConvNet(nb_classes, Chans=64, Samples=256, dropoutRate=0.5):
    """ Keras implementation of the Deep Convolutional Network as described in
    Schirrmeister et. al. (2017), Human Brain Mapping.
    
    This implementation assumes the input is a 2-second EEG signal sampled at 
    128Hz, as opposed to signals sampled at 250Hz as described in the original
    paper. We also perform temporal convolutions of length (1, 5) as opposed
    to (1, 10) due to this sampling rate difference. 
    
    Note that we use the max_norm constraint on all convolutional layers, as 
    well as the classification layer. We also change the defaults for the
    BatchNormalization layer. We used this based on a personal communication 
    with the original authors.
    
                      ours        original paper
    pool_size        1, 2        1, 3
    strides          1, 2        1, 3
    conv filters     1, 5        1, 10
    
    Note that this implementation has not been verified by the original 
    authors. 
    
    """

    # start the model
    input_main = Input((1, Chans, Samples))
    block1 = Conv2D(25, (1, 5),
                    input_shape=(1, Chans, Samples),
                    kernel_constraint=max_norm(2., axis=(0, 1, 2)))(input_main)
    block1 = Conv2D(25, (Chans, 1),
                    kernel_constraint=max_norm(2., axis=(0, 1, 2)))(block1)
    block1 = BatchNormalization(axis=1, epsilon=1e-05, momentum=0.1)(block1)
    block1 = Activation('elu')(block1)
    block1 = MaxPooling2D(pool_size=(1, 2), strides=(1, 2))(block1)
    block1 = Dropout(dropoutRate)(block1)

    block2 = Conv2D(50, (1, 5),
                    kernel_constraint=max_norm(2., axis=(0, 1, 2)))(block1)
    block2 = BatchNormalization(axis=1, epsilon=1e-05, momentum=0.1)(block2)
    block2 = Activation('elu')(block2)
    block2 = MaxPooling2D(pool_size=(1, 2), strides=(1, 2))(block2)
    block2 = Dropout(dropoutRate)(block2)

    block3 = Conv2D(100, (1, 5),
                    kernel_constraint=max_norm(2., axis=(0, 1, 2)))(block2)
    block3 = BatchNormalization(axis=1, epsilon=1e-05, momentum=0.1)(block3)
    block3 = Activation('elu')(block3)
    block3 = MaxPooling2D(pool_size=(1, 2), strides=(1, 2))(block3)
    block3 = Dropout(dropoutRate)(block3)

    block4 = Conv2D(200, (1, 5),
                    kernel_constraint=max_norm(2., axis=(0, 1, 2)))(block3)
    block4 = BatchNormalization(axis=1, epsilon=1e-05, momentum=0.1)(block4)
    block4 = Activation('elu')(block4)
    block4 = MaxPooling2D(pool_size=(1, 2), strides=(1, 2))(block4)
    block4 = Dropout(dropoutRate)(block4)

    flatten = Flatten()(block4)

    dense = Dense(nb_classes, kernel_constraint=max_norm(0.5))(flatten)
    softmax = Activation('softmax')(dense)

    return Model(inputs=input_main, outputs=softmax)
Ejemplo n.º 15
0
def train_Combined(combined_train, Y_train, combined_test, Y_test, batch_size,
                   drop_out, lr, epochs):
    '''
    Input X_train,X_test: shape (10,number of sequences,17/24)
    Y_train,Y_test: (number of sequences,)
    batch_size: batch size for training LSTM
    drop_out: drop_out rate for LSTM layers
    lr: learning rate of Adam optimizer
    epochs: training epochs

    This function define the structure of MultiInput-LSTM model and trains with Adam optimizer.
    Output: a real number of average of validation MSE of 3-fold cross validation. 
    '''
    def create_dense(serie_size, n_features, drop_out):
        inputs = Input(shape=(serie_size, n_features))
        x = Dense(10, activation='relu')(inputs)
        x = Dense(8, activation='relu')(x)
        x = Flatten()(x)
        model = Model(inputs, x)
        return model

    kf = KFold(n_splits=3)
    score = []
    for train_index, test_index in kf.split(combined_train[0]):
        print(train_index)
        x_train, x_valid = [i[train_index] for i in combined_train
                            ], [i[test_index] for i in combined_train]
        y_train, y_valid = np.array(Y_train)[train_index], np.array(
            Y_train)[test_index]

        serie_size = 1
        n_features_gate = len(combined_train[0][0][0])
        n_features_net = len(combined_train[1][0][0])
        print(n_features_net)
        gate_1 = create_dense(serie_size, n_features_gate, drop_out)
        gate_3 = create_dense(serie_size, n_features_gate, drop_out)
        gate_5 = create_dense(serie_size, n_features_gate, drop_out)
        gate_7 = create_dense(serie_size, n_features_gate, drop_out)
        gate_9 = create_dense(serie_size, n_features_gate, drop_out)
        net_2 = create_dense(serie_size, n_features_net, drop_out)
        net_4 = create_dense(serie_size, n_features_net, drop_out)
        net_6 = create_dense(serie_size, n_features_net, drop_out)
        net_8 = create_dense(serie_size, n_features_net, drop_out)
        net_10 = create_dense(serie_size, n_features_net, drop_out)
        optimizer = optimizers.Adam(lr=lr,
                                    beta_1=0.9,
                                    beta_2=0.999,
                                    decay=0.01)
        init = glorot_normal(seed=None)
        init1 = RandomUniform(minval=-0.05, maxval=0.05)
        ## 10 * 32
        print(gate_1.output)
        combinedInput = concatenate([gate_1.output, net_2.output,gate_3.output,net_4.output,gate_5.output,net_6.output,\
                                    gate_7.output,net_8.output,gate_9.output,net_10.output])
        combinedInput = Reshape((10, 8), input_shape=(8 * 10, ))(combinedInput)
        lstm = LSTM(units=16,
                    dropout=drop_out,
                    recurrent_dropout=drop_out,
                    return_sequences=False,
                    kernel_initializer=init)
        output = lstm(combinedInput)
        output = Dense(1, activation='linear',
                       kernel_initializer=init1)(output)
        model = Model(inputs=[gate_1.input, net_2.input,gate_3.input,net_4.input,gate_5.input,net_6.input\
                                    ,gate_7.input,net_8.input,gate_9.input,net_10.input], outputs=output)
        model.compile(loss='mean_squared_error', optimizer=optimizer)
        model.summary()
        train_history = model.fit(x=[i for i in x_train],
                                  y=y_train,
                                  epochs=epochs,
                                  validation_data=(x_valid, y_valid),
                                  batch_size=batch_size)
        val_loss = train_history.history['val_loss'][-1]
        score.append(val_loss)
    return (np.mean(score))
Ejemplo n.º 16
0
def AFM(
    feature_dim_dict,
    embedding_size=8,
    use_attention=True,
    attention_factor=8,
    l2_reg_linear=1e-5,
    l2_reg_embedding=1e-5,
    l2_reg_att=1e-5,
    keep_prob=1.0,
    init_std=0.0001,
    seed=1024,
    final_activation='sigmoid',
):
    """Instantiates the Attentonal Factorization Machine architecture.

    :param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
    :param embedding_size: positive integer,sparse feature embedding_size
    :param use_attention: bool,whether use attention or not,if set to ``False``.it is the same as **standard Factorization Machine**
    :param attention_factor: positive integer,units in attention net
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_att: float. L2 regularizer strength applied to attention net
    :param keep_prob: float in (0,1]. keep_prob after attention net
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
    :return: A Keras model instance.
    """

    if not isinstance(
            feature_dim_dict, dict
    ) or "sparse" not in feature_dim_dict or "dense" not in feature_dim_dict:
        raise ValueError(
            "feature_dim_dict must be a dict like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}"
        )
    if not isinstance(feature_dim_dict["sparse"], dict):
        raise ValueError("feature_dim_dict['sparse'] must be a dict,cur is",
                         type(feature_dim_dict['sparse']))
    if not isinstance(feature_dim_dict["dense"], list):
        raise ValueError("feature_dim_dict['dense'] must be a list,cur is",
                         type(feature_dim_dict['dense']))

    sparse_input, dense_input = get_input(feature_dim_dict, None)
    sparse_embedding, linear_embedding, = get_embeddings(
        feature_dim_dict, embedding_size, init_std, seed, l2_reg_embedding,
        l2_reg_linear)

    embed_list = [
        sparse_embedding[i](sparse_input[i]) for i in range(len(sparse_input))
    ]
    linear_term = [
        linear_embedding[i](sparse_input[i]) for i in range(len(sparse_input))
    ]
    if len(linear_term) > 1:
        linear_term = add(linear_term)
    elif len(linear_term) > 0:
        linear_term = linear_term[0]
    else:
        linear_term = 0

    if len(dense_input) > 0:
        continuous_embedding_list = list(
            map(
                Dense(
                    embedding_size,
                    use_bias=False,
                    kernel_regularizer=l2(l2_reg_embedding),
                ), dense_input))
        continuous_embedding_list = list(
            map(Reshape((1, embedding_size)), continuous_embedding_list))
        embed_list += continuous_embedding_list

        dense_input_ = dense_input[0] if len(
            dense_input) == 1 else Concatenate()(dense_input)
        linear_dense_logit = Dense(
            1,
            activation=None,
            use_bias=False,
            kernel_regularizer=l2(l2_reg_linear))(dense_input_)
        linear_term = add([linear_dense_logit, linear_term])

    fm_input = Concatenate(axis=1)(embed_list)
    if use_attention:
        fm_out = AFMLayer(attention_factor, l2_reg_att, keep_prob,
                          seed)(embed_list)
    else:
        fm_out = FM()(fm_input)

    final_logit = add([linear_term, fm_out])
    output = PredictionLayer(final_activation)(final_logit)
    model = Model(inputs=sparse_input + dense_input, outputs=output)
    return model
Ejemplo n.º 17
0
def train_single_LSTM(X_train, Y_train, X_test, Y_test, batch_size, drop_out,
                      lr, epochs):
    '''
    Input X_train,X_test: shape(number of sequences,4 sliding window,(24+17)*2 features)
    Y_train,Y_test: (number of sequences,1)
    batch_size: batch size for training LSTM
    drop_out: drop_out rate for LSTM layers
    lr: learning rate of Adam optimizer
    epochs: training epochs

    This function define the structure of LSTM model and trains with Adam optimizer.
    Output: a real number of average of validation MSE of 3-fold cross validation. 

    '''
    serie_size = len(X_train[0])  # 4
    n_features = len(X_train[0][0])  # 82
    #     epochs = parameter_dict['epochs']
    #     batch =  parameter_dict['batch']
    #     lr =  parameter_dict['lr']
    ## input shape : samples, time steps, and features.
    lstm_model = Sequential()
    optimizer = optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, decay=0.01)
    #optimizer = RMSprop(lr=0.0005, rho=0.9, epsilon=None, decay=0.0)
    init = glorot_normal(seed=None)
    init1 = RandomUniform(minval=-0.05, maxval=0.05)
    lstm_model.add(
        LSTM(units=128,
             dropout=drop_out,
             recurrent_dropout=drop_out,
             input_shape=(serie_size, n_features),
             return_sequences=True,
             kernel_initializer=init))
    lstm_model.add(
        LSTM(units=64,
             dropout=drop_out,
             recurrent_dropout=drop_out,
             return_sequences=False,
             kernel_initializer=init))
    lstm_model.add(Dense(1, activation='linear', kernel_initializer=init1))
    lstm_model.compile(loss='mean_squared_error', optimizer=optimizer)
    lstm_model.summary()
    kf = KFold(n_splits=3)
    score = []
    for train_index, test_index in kf.split(X_train):
        x_train, x_valid = np.array(X_train)[train_index], np.array(
            X_train)[test_index]
        y_train, y_valid = np.array(Y_train)[train_index], np.array(
            Y_train)[test_index]
        x_train = np.asarray(x_train)
        y_train = np.asarray(y_train)
        x_valid = np.asarray(x_valid)
        y_valid = np.asarray(y_valid)
        x_test = np.asarray(X_test)
        train_history = lstm_model.fit(x_train,
                                       y_train,
                                       epochs=epochs,
                                       batch_size=batch_size,
                                       validation_data=(x_valid, y_valid),
                                       verbose=0,
                                       shuffle=True)
        loss = train_history.history['loss']
        val_loss = train_history.history['val_loss']
        plt.plot(loss)
        plt.plot(val_loss)
        plt.legend(['loss', 'val_loss'])
        plt.show()
        #         lstm_model.reset_states()
        lstm_model.save('my_model.h5')
        model = load_model('my_model.h5')
        x_test = np.asarray(X_test)
        #predicted = model.predict(x_test,batch_size=1)
        score.append(val_loss[-1])
    return np.mean(score)
Ejemplo n.º 18
0
                                                    test_size=0.2,
                                                    random_state=0)
x_train, x_val, y_train, y_val = train_test_split(x_train,
                                                  y_train,
                                                  test_size=0.2,
                                                  random_state=0)

import tensorflow
import keras
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense

classifier = Sequential()

classifier.add(
    Dense(83, kernel_initializer="uniform", activation='relu', input_dim=166))
classifier.add(Dense(83, kernel_initializer="uniform", activation='relu'))
classifier.add(Dense(83, kernel_initializer="uniform", activation='relu'))
classifier.add(
    Dense(1, kernel_initializer="uniform", activation='sigmoid',
          input_dim=166))
classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])
r = classifier.fit(x_train,
                   y_train,
                   batch_size=10,
                   epochs=80,
                   validation_data=(x_val, y_val))

print(r.history.keys())
model = Sequential()

model.add(Convolution2D(8, 3, 1, activation='relu', input_shape=(100,100,1),padding = 'same'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))

model.add(Convolution2D(16, 3, 1, activation='relu', padding = 'same'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))

model.add(Convolution2D(32, 3, 1, activation='relu', padding = 'same'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))

model.compile(loss='mean_squared_error',
              optimizer = 'Adam',
              metrics=['accuracy'])

# 9. Fit model on training data
history = model.fit(X_train, Y_train,
          batch_size=32, epochs=100, verbose=1, validation_data = (X_test, Y_test))
# 10. Evaluate model on test data
score = model.evaluate(X_test, Y_test, verbose=0)

# 11. Our scoring
print('This network scored an average val_acc of ' + str(np.mean(history.history['val_acc'][-10:])) + ' in the last 10 epochs')
Ejemplo n.º 20
0
def EEGNet_SSVEP(nb_classes=12,
                 Chans=8,
                 Samples=256,
                 dropoutRate=0.5,
                 kernLength=256,
                 F1=96,
                 D=1,
                 F2=96,
                 dropoutType='Dropout'):
    """ SSVEP Variant of EEGNet, as used in [1]. 

    Inputs:
        
      nb_classes      : int, number of classes to classify
      Chans, Samples  : number of channels and time points in the EEG data
      dropoutRate     : dropout fraction
      kernLength      : length of temporal convolution in first layer
      F1, F2          : number of temporal filters (F1) and number of pointwise
                        filters (F2) to learn. 
      D               : number of spatial filters to learn within each temporal
                        convolution.
      dropoutType     : Either SpatialDropout2D or Dropout, passed as a string.
      
      
    [1]. Waytowich, N. et. al. (2018). Compact Convolutional Neural Networks
    for Classification of Asynchronous Steady-State Visual Evoked Potentials.
    Journal of Neural Engineering vol. 15(6). 
    http://iopscience.iop.org/article/10.1088/1741-2552/aae5d8

    """

    if dropoutType == 'SpatialDropout2D':
        dropoutType = SpatialDropout2D
    elif dropoutType == 'Dropout':
        dropoutType = Dropout
    else:
        raise ValueError('dropoutType must be one of SpatialDropout2D '
                         'or Dropout, passed as a string.')

    input1 = Input(shape=(1, Chans, Samples))

    ##################################################################
    block1 = Conv2D(F1, (1, kernLength),
                    padding='same',
                    input_shape=(1, Chans, Samples),
                    use_bias=False)(input1)
    block1 = BatchNormalization(axis=1)(block1)
    block1 = DepthwiseConv2D((Chans, 1),
                             use_bias=False,
                             depth_multiplier=D,
                             depthwise_constraint=max_norm(1.))(block1)
    block1 = BatchNormalization(axis=1)(block1)
    block1 = Activation('elu')(block1)
    block1 = AveragePooling2D((1, 4))(block1)
    block1 = dropoutType(dropoutRate)(block1)

    block2 = SeparableConv2D(F2, (1, 16), use_bias=False,
                             padding='same')(block1)
    block2 = BatchNormalization(axis=1)(block2)
    block2 = Activation('elu')(block2)
    block2 = AveragePooling2D((1, 8))(block2)
    block2 = dropoutType(dropoutRate)(block2)

    flatten = Flatten(name='flatten')(block2)

    dense = Dense(nb_classes, name='dense')(flatten)
    softmax = Activation('softmax', name='softmax')(dense)

    return Model(inputs=input1, outputs=softmax)