コード例 #1
0
def conv(model, temp):
    model.add(
        layers.Conv1D(filters=random.choice(filters),
                      kernel_size=temp,
                      activation='relu'))
コード例 #2
0
    def build(self):

        # input is an image of time_window x 1
        input_exg = layers.Input(shape=(self.time_window, 1, self.exog_dim))
        input_end = layers.Input(shape=(self.time_window, 1))

        op_end = input_end
        op_exg = self.build_exog_cnn(input_exg)

        print '---> Start of parallel layers'
        # build parallel dilated layers
        for l in range(self.parallel_layers):
            print '--- parallel layer  ', l + 1
            d = math.pow(2, l)
            print 'dialtion rate :', d

            op_end = self.res_block(inp=op_end,
                                    num_filters=4,
                                    dilation=[d],
                                    kernel_size=2)

            op_exg = self.res_block(inp=op_exg,
                                    num_filters=4,
                                    dilation=[d],
                                    kernel_size=2)

        # build combined dilated layers
        print '---> Start of combined layers'
        comb_op = layers.add([op_exg, op_end])
        print comb_op

        for l in range(self.parallel_layers,
                       self.parallel_layers + self.comb_layers):

            d = math.pow(2, l)
            print 'layer', l + 1, 'dialtion rate :', d

            comb_op = self.res_block(inp=comb_op,
                                     num_filters=4,
                                     dilation=[d],
                                     kernel_size=2)

        network_op = layers.Conv1D(
            filters=1,
            kernel_size=1,
            dilation_rate=[1],
            strides=1,
            kernel_regularizer=keras.regularizers.l2(0.01),
            padding='valid')(comb_op)
        network_op = layers.LeakyReLU()(network_op)

        model = models.Model(inputs=[input_exg, input_end],
                             outputs=[network_op])

        model.compile(optimizer=keras.optimizers.Adam(),
                      loss=keras.losses.MSE,
                      metrics=[keras.metrics.mae, keras.metrics.mse])

        print model.summary()
        self.model = model
        return
from keras import layers
from keras.models import Sequential
max_features = 10000
max_len = 500
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), len(x_test))
x_train = sequence.pad_sequences(x_train, maxlen=max_len)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)

#training and evaluating a simple 1D convnet on the IMDB data
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.Embedding(max_features, 128, input_length=max_len))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.MaxPooling1D(5))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(1))
model.summary()
model.compile(optimizer=RMSprop(lr=1e-4),loss='binary_crossentropy',metrics=['acc'])
history = model.fit(x_train, y_train,epochs=10,batch_size=128,validation_split=0.2)


#because 1D convnets process input batches independently, they are not sensitive to the order of timesteps beyond a local scale(size of the convolution windows)
#to recorgnize longer-term patterns you can stack many convolution layers and pooling layers, resultingin upper ayers that will see long chunks of the original data
#however, this doenst work very well 

#on temperature dataset - because more recent data points should be interpreted differently from older data points, the convnet fails at producing meaningful results
コード例 #4
0
def ieee_net(x_train, y_train, ddg_train, epoch_best):
    row_num, col_num = x_train.shape[1:3]
    verbose = 1
    batch_size = 64
    epochs = epoch_best  #[15, 12, 16, 29, 16, 12, 10, 31, 10, 19]

    metrics = ('mae', pearson_r, rmse)

    def step_decay(epoch):
        # drops as progression proceeds, good for sgd
        if epoch > 0.9 * epochs:
            lr = 0.00001
        elif epoch > 0.75 * epochs:
            lr = 0.0001
        elif epoch > 0.5 * epochs:
            lr = 0.001
        else:
            lr = 0.01
        print('lr: %f' % lr)
        return lr

    lrate = callbacks.LearningRateScheduler(step_decay, verbose=verbose)
    # my_callbacks = [
    #     lrate
    # ]
    my_callbacks = None

    network = models.Sequential()
    network.add(
        layers.Conv1D(filters=16,
                      kernel_size=5,
                      activation='relu',
                      input_shape=(row_num, col_num)))
    network.add(layers.MaxPooling1D(pool_size=2))
    network.add(layers.Conv1D(32, 5, activation='relu'))
    network.add(layers.MaxPooling1D(pool_size=2))
    network.add(layers.Conv1D(64, 3, activation='relu'))
    network.add(layers.MaxPooling1D(pool_size=2))
    network.add(layers.Flatten())
    network.add(layers.Dense(128, activation='relu'))
    network.add(layers.Dropout(0.5))
    network.add(layers.Dense(16, activation='relu'))
    network.add(layers.Dropout(0.3))
    network.add(layers.Dense(1))
    # print(network.summary())
    # rmsp = optimizers.RMSprop(lr=0.0001,  decay=0.1)
    rmsp = optimizers.RMSprop(lr=0.0001)
    network.compile(
        optimizer=rmsp,  #'rmsprop',  # SGD,adam,rmsprop
        loss='mse',
        metrics=list(metrics))  # mae平均绝对误差(mean absolute error) accuracy
    result = network.fit(
        x=x_train,
        y=ddg_train,
        batch_size=batch_size,
        epochs=epochs,
        verbose=verbose,
        callbacks=my_callbacks,
        shuffle=True,
    )
    return network, result.history
コード例 #5
0
def model_creation(input_shape1,
                   input_shape2,
                   model_version,
                   optim,
                   problem_type=0):
    """
    op_sequence : False, by default. If True, then also uses the num_nodes parameter.
    num_nodes   : The number that denotes that how many values to predict at the output layer.
    """
    print("Inside model_creation")

    past_inp = L.Input(shape=(input_shape1))
    fut_inp = L.Input(shape=(input_shape2))

    if (model_version == "M1V1"):
        cnn1 = L.Conv1D(filters=32, kernel_size=5)(past_inp)
        cnn1 = L.Conv1D(filters=32, kernel_size=2)(cnn1)

        cnn2 = L.Conv1D(filters=32, kernel_size=5)(fut_inp)
        cnn2 = L.Conv1D(filters=32, kernel_size=2)(cnn2)

        lstm_inp = L.Average()([cnn1, cnn2])

        lstm_out = L.LSTM(32,
                          recurrent_dropout=0.2,
                          return_sequences=True,
                          bias_initializer='ones')(lstm_inp)

        x1 = L.Average()([lstm_out, lstm_inp])
        x1 = L.Flatten()(x1)

    elif (model_version == "M1V2"):
        cnn1 = L.Conv1D(filters=32, kernel_size=5)(past_inp)
        cnn1 = L.Conv1D(filters=32, kernel_size=3)(cnn1)

        cnn2 = L.Conv1D(filters=32, kernel_size=5)(fut_inp)
        cnn2 = L.Conv1D(filters=32, kernel_size=3)(cnn2)

        x1 = L.Average()([cnn1, cnn2])
        x1 = L.Flatten()(x1)

    elif (model_version == "M1V3"):
        x1 = L.LSTM(32,
                    recurrent_dropout=0.2,
                    return_sequences=True,
                    bias_initializer='ones')(past_inp)
        #x1 = L.LSTM(32, recurrent_dropout=0.2, return_sequences=True, bias_initializer='ones')(x1)
        x1 = L.LSTM(32, recurrent_dropout=0.2, bias_initializer='ones')(x1)

    elif (model_version == "M2V1"):
        cnn1 = L.Conv1D(filters=32, kernel_size=5)(past_inp)
        cnn1 = L.Conv1D(filters=32, kernel_size=2)(cnn1)

        lstm_out1 = L.LSTM(32,
                           recurrent_dropout=0.3,
                           return_sequences=True,
                           bias_initializer='ones')(cnn1)
        lstm_out1 = L.Average()([cnn1, lstm_out1])

        cnn2 = L.Conv1D(filters=32, kernel_size=5)(fut_inp)
        cnn2 = L.Conv1D(filters=32, kernel_size=2)(cnn2)

        lstm_out2 = L.LSTM(32,
                           recurrent_dropout=0.3,
                           return_sequences=True,
                           bias_initializer='ones')(cnn2)
        lstm_out2 = L.Average()([cnn2, lstm_out2])

        x1 = L.Average()([lstm_out1, lstm_out2])
        x1 = L.Flatten()(x1)

    elif (model_version == "M2V2"):
        lstm_out1 = L.LSTM(32,
                           recurrent_dropout=0.3,
                           return_sequences=True,
                           bias_initializer='ones')(past_inp)
        lstm_out2 = L.LSTM(32,
                           recurrent_dropout=0.3,
                           return_sequences=True,
                           bias_initializer='ones')(fut_inp)
        x1 = L.Average()([lstm_out1, lstm_out2])
        x1 = L.Flatten()(x1)

    elif (model_version == "M3V1"):
        #         cnn_inp = L.Concatenate(axis=1)([past_inp,fut_inp])
        cnn = L.Conv1D(filters=32, kernel_size=5)(past_inp)
        cnn = L.Conv1D(filters=32, kernel_size=2)(cnn)

        layer = L.Lambda(lambda cnn: K.reverse(cnn, axes=1))
        cnn2 = layer(cnn)

        lstm1 = L.LSTM(32,
                       recurrent_dropout=0.3,
                       return_sequences=True,
                       bias_initializer='ones')(cnn)
        lstm2 = L.LSTM(32,
                       recurrent_dropout=0.3,
                       return_sequences=True,
                       bias_initializer='ones')(cnn2)
        lstm = L.Average()([lstm1, lstm2])
        x1 = L.Average()([cnn, lstm])
        x1 = L.Flatten()(x1)

    elif (model_version == "M3V2"):
        cnn_inp = L.Concatenate(axis=1)([past_inp, fut_inp])
        cnn = L.Conv1D(filters=32, kernel_size=5)(cnn_inp)
        cnn = L.Conv1D(filters=32, kernel_size=2)(cnn)

        layer = L.Lambda(lambda cnn: K.reverse(cnn, axes=1))
        cnn2 = layer(cnn)

        lstm1 = L.LSTM(32,
                       recurrent_dropout=0.3,
                       return_sequences=True,
                       bias_initializer='ones')(cnn)
        lstm2 = L.LSTM(32,
                       recurrent_dropout=0.3,
                       return_sequences=True,
                       bias_initializer='ones')(cnn2)
        lstm = L.Average()([lstm1, lstm2])
        x1 = L.Average()([cnn, lstm])
        x1 = L.Flatten()(x1)

    elif (model_version == "M4"):
        cnn1 = L.Conv1D(filters=32, kernel_size=5)(past_inp)
        cnn1 = L.Conv1D(filters=32, kernel_size=2)(cnn1)

        cnn2 = L.Conv1D(filters=32, kernel_size=5)(fut_inp)
        cnn2 = L.Conv1D(filters=32, kernel_size=2)(cnn2)

        lstm1 = L.Bidirectional(
            L.LSTM(16,
                   recurrent_dropout=0.3,
                   return_sequences=True,
                   bias_initializer='ones'))(past_inp)
        lstm2 = L.Bidirectional(
            L.LSTM(16,
                   recurrent_dropout=0.3,
                   return_sequences=True,
                   bias_initializer='ones'))(fut_inp)

        out1 = L.Concatenate(axis=1)([cnn1, lstm1])
        out2 = L.Concatenate(axis=1)([cnn2, lstm2])

        #         out1 = L.Average()([cnn1,cnn2])
        #         ousst2 = L.Average()([lstm1,lstm2])

        #         x1 = L.Concatenate(axis=1)([out1,out2])
        x1 = L.Average()([out1, out2])
        x1 = L.Flatten()(x1)

    x1 = L.Dense(256)(x1)
    x1 = L.advanced_activations.LeakyReLU(0.2)(x1)
    x1 = L.Dense(256)(x1)
    x1 = L.advanced_activations.LeakyReLU(0.2)(x1)
    x1 = L.Dense(256)(x1)
    x1 = L.advanced_activations.LeakyReLU(0.2)(x1)
    x1 = L.Dense(256)(x1)
    x1 = L.advanced_activations.LeakyReLU(0.2)(x1)
    x1 = L.Dense(256)(x1)
    x1 = L.advanced_activations.LeakyReLU(0.2)(x1)
    x1 = L.Dense(256)(x1)
    x1 = L.advanced_activations.LeakyReLU(0.2)(x1)

    #Classification Part.
    if (problem_type == 1):
        main_out = L.Dense(2, activation='softmax')(x1)
        model = M.Model(inputs=[past_inp, fut_inp],
                        outputs=[main_out],
                        name=model_version)
        model.summary()
        model.compile(optimizer='adam',
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])

    #Regression Part.
    else:
        x1 = L.Dense(1)(x1)
        main_out = L.advanced_activations.LeakyReLU(0.2)(x1)
        model = M.Model(inputs=[past_inp, fut_inp],
                        outputs=[main_out],
                        name=model_version)
        model.compile(optimizer=optim,
                      loss=tf.losses.huber_loss,
                      metrics=['mae', 'mse', rmse])

    model.summary()

    return model
コード例 #6
0
def au_Exp():
    now = datetime.datetime.now()
    now_s = now.strftime("%Y-%m-%d-%H-%M-%S")
    config = tf.compat.v1.ConfigProto(gpu_options=tf.compat.v1.GPUOptions(
        allow_growth=True))
    sess = tf.compat.v1.Session(config=config)
    ## 准备几个参数,用于后续的自动化
    epochs_au = 50
    batch_size_au = 1
    jihuo = 'tanh'

    callback_list_test = [
        keras.callbacks.ModelCheckpoint(
            filepath=now_s + '.h5',  ##文件路径 存在当前路径下吧 还好找
            monitor='val_loss',  ## 监控指标
            save_best_only=True  ## 保持最佳模型
        )
    ]
    # dataspecimen,dataprop = gM.getOdataMilk()
    # test_data,test_lable,train_data,train_lable = sF.getTestDataMilk(dataspecimen,dataprop)
    test_data, test_lable, train_data, train_lable = mD.getOdatebmilkDep()

    model = models.Sequential()
    model.add(layers.Conv1D(64, 7, activation=jihuo,
                            input_shape=(1557, 1)))  #输入形状依据样品进行变换,此处牛奶为1577
    model.add(layers.MaxPooling1D(2))

    model.add(layers.Conv1D(32, 7, activation=jihuo))
    model.add(layers.MaxPooling1D(2))

    model.add(layers.Conv1D(32, 7, activation=jihuo))
    model.add(layers.MaxPooling1D(2))

    model.add(layers.Conv1D(16, 7, activation=jihuo))
    # model.add(layers.GlobalMaxPooling1D())  ## 实际效果极差1
    model.add(layers.Flatten())

    model.add(layers.Dense(16))
    model.add(layers.Dense(8))
    # model.add(layers.Dense(4))
    # model.add(layers.Dense(2))
    model.add(layers.Dense(1))

    model.summary()
    model.compile(optimizer=RMSprop(), loss='mse')
    history = model.fit(train_data,
                        train_lable,
                        epochs=epochs_au,
                        batch_size=batch_size_au,
                        validation_data=(test_data, test_lable),
                        callbacks=callback_list_test)

    sF.drawLoss(history)  ## 绘制当前的验证曲线

    model = load_model(now_s + '.h5')
    result_trian = model.predict(train_data)
    result_predict = model.predict(test_data)
    rmsec = sF.calculate_RMSE(result_trian, train_lable)  ## 训练集上的RMSE
    rmsep = sF.calculate_RMSE(result_predict, test_lable)  ## 测试集上的RMSE
    r_2_t = sF.calculate_R21(result_trian, train_lable)  ## 训练集上的R_2
    r_2_p = sF.calculate_R21(result_predict, test_lable)  ## 测试集上得R_2
    # print("Root Mean Square Error of Calibrationh is : %g"%(rmsec))
    # print("训练集上得决定系数:%f"%(r_2_t))
    # print("Root Mean Square Error of Prediction is : %g"%(rmsep))
    # print("测试集上得决定系数:%f"%(r_2_p))
    ###### 下面的代码用于自动记录实验数据

    write_data = [(now_s, epochs_au, batch_size_au, rmsec, r_2_t, rmsep, r_2_p)
                  ]  #需要新写入的数据
    sF.write_To_Csv(write_data)
    return rmsep, r_2_p
コード例 #7
0
    row_data = data.iloc[i:i+1,0].values[0]
    row_data = preprocessing(row_data)
    row_data = cut_word(row_data)
    input_text.append(row_data)
    print(i)
tf_data = vector.fit_transform(input_text)


# model
from keras import layers , Input
from keras.models import Model
vocabulary_size = len(data)
num_income_groups = 10
posts_input = Input(shape = (None,),name='posts')
embedded_posts = layers.Embedding(256,vocabulary_size)(posts_input)
x = layers.Conv1D(128,5,activation = 'relu')(embedded_posts)
x = layers.MaxPooling1D(5)(x)
x = layers.Conv1D(256,5,activation = 'relu')(x)
x = layers.Conv1D(256,5,activation = 'relu')(x)
x = layers.MaxPooling1D(5)(x)
x = layers.Conv1D(256,5,activation = 'relu')(x)
x = layers.Conv1D(256,5,activation = 'relu')(x)
x = layers.Dense(128,activation = 'relu')(x)

toxic_prediction = layers.Dense(1,activation = 'sigmoid',name = 'toxic')(x)
severe_toxic_prediction = layers.Dense(1,activation = 'sigmoid',name = 'severe_toxic')(x)
obscene_prediction = layers.Dense(1,activation = 'sigmoid',name = 'obscene')(x)
threat_prediction = layers.Dense(1,activation = 'sigmoid',name = 'threat')(x)
insult_prediction = layers.Dense(1,activation = 'sigmoid',name = 'insult')(x)
identity_hate = layers.Dense(1,activation = 'sigmoid',name = 'identity_hate')(x)
コード例 #8
0
model = Sequential()

#model.add(layers.Embedding(input_dim=vocab_size,
#                           output_dim=embedding_dim,
#                           embeddings_initializer = glorot_uniform(seed=42),
#                           input_length=maxlen,
#                           trainable=True))
model.add(
    layers.Embedding(vocab_size,
                     embedding_dim,
                     weights=[embedding_matrix],
                     input_length=maxlen,
                     trainable=False))

model.add(layers.Conv1D(70, 2, activation='relu'))
model.add(layers.MaxPooling1D(3))
model.add(layers.Conv1D(100, 3, activation='relu'))
model.add(layers.GlobalMaxPool1D())
model.add(layers.Dense(60, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))

from keras.optimizers import SGD
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

model.compile(loss='binary_crossentropy',
              optimizer='sgd',
              metrics=[recall, precision, f1, 'accuracy'])
model.summary()

history = model.fit(X_train,
コード例 #9
0
 def build_model(self):
     """
     Функция, задающая архитектуру нейронной сети
     """
     # symbol_inputs: array, 1D-массив длины m
     symbol_inputs = kl.Input(shape=(None,), dtype='uint8', name="symbol_inputs")
     # symbol_embeddings: array, 2D-массив размера m*self.symbols_number
     if self.use_embeddings:
         symbol_embeddings = kl.Embedding(self.symbols_number_, self.embeddings_size,
                                          name="symbol_embeddings")(symbol_inputs)
     else:
         symbol_embeddings = kl.Lambda(kb.one_hot, output_shape=(None, self.symbols_number_),
                                       arguments={"num_classes": self.symbols_number_},
                                       name="symbol_embeddings")(symbol_inputs)
     inputs = [symbol_inputs]
     if self.to_memorize_morphemes:
         # context_inputs: array, 2D-массив размера m*15
         context_inputs = kl.Input(shape=(None, self.memory_dim), dtype='float32', name="context_inputs")
         inputs.append(context_inputs)
         if self.context_dropout > 0.0:
             context_inputs = kl.Dropout(self.context_dropout)(context_inputs)
         # представление контекста подклеивается к представлению символа
         symbol_embeddings = kl.Concatenate()([symbol_embeddings, context_inputs])
     conv_inputs = symbol_embeddings
     conv_outputs = []
     for window_size, curr_filters_numbers in zip(self.window_size, self.filters_number):
         # свёрточный слой отдельно для каждой ширины окна
         curr_conv_input = conv_inputs
         for j, filters_number in enumerate(curr_filters_numbers[:-1]):
             # все слои свёртки, кроме финального (после них возможен dropout)
             curr_conv_input = kl.Conv1D(filters_number, window_size,
                                         activation="relu", padding="same")(curr_conv_input)
             if self.dropout > 0.0:
                 # между однотипными слоями рекомендуется вставить dropout
                 curr_conv_input = kl.Dropout(self.dropout)(curr_conv_input)
         if not self.use_lstm:
             curr_conv_output = kl.Conv1D(curr_filters_numbers[-1], window_size,
                                          activation="relu", padding="same")(curr_conv_input)
         else:
             curr_conv_output = curr_conv_input
         conv_outputs.append(curr_conv_output)
     # соединяем выходы всех свёрточных слоёв в один вектор
     if len(conv_outputs) == 1:
         conv_output = conv_outputs[0]
     else:
         conv_output = kl.Concatenate(name="conv_output")(conv_outputs)
     if self.use_lstm:
         conv_output = kl.Bidirectional(
             kl.LSTM(self.lstm_units, return_sequences=True))(conv_output)
     if self.dense_output_units:
         pre_last_output = kl.TimeDistributed(
             kl.Dense(self.dense_output_units, activation="relu"),
             name="pre_output")(conv_output)
     else:
         pre_last_output = conv_output
     # финальный слой с softmax-активацией, чтобы получить распределение вероятностей
     output = kl.TimeDistributed(
         kl.Dense(self.target_symbols_number_, activation="softmax"), name="output")(pre_last_output)
     model = Model(inputs, [output])
     model.compile(optimizer=Adam(clipnorm=5.0),
                   loss="categorical_crossentropy", metrics=["accuracy"])
     return model
コード例 #10
0
# 计算val_steps,这是计算需要从val_gen中抽取多少次
val_steps = (valnum - lookback) // batch_size
# 查看训练集需要抽取的次数
train_steps = trainnum // batch_size

from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor="val_loss", patience=30)

#创建模型
model1 = models.Sequential()
model1.add(
    layers.Dense(512,
                 activation="relu",
                 input_shape=(lookback // step, data.shape[-1])))
model1.add(layers.Conv1D(filters=1024, kernel_size=5, activation="relu"))
model1.add(layers.MaxPooling1D(5))
model1.add(
    layers.Bidirectional(layers.LSTM(32, activation="relu", dropout=0.5)))
model1.add(layers.Dense(8, activation="softmax"))
model1.summary()

model1.compile(optimizer=optimizers.RMSprop(),
               loss="categorical_crossentropy",
               metrics=["acc"])
history1 = model1.fit_generator(train_gen,
                                steps_per_epoch=train_steps,
                                epochs=200,
                                validation_data=val_gen,
                                validation_steps=val_steps,
                                callbacks=[early_stopping])
コード例 #11
0
#pred_win = 72
#lookback = 144
dense_units = pred_win
cn = 1

for batch_size in batch_size_list:
    for lr in lr_list:
        print('batch_size=', batch_size, 'lr = ', lr, 'cn = ', cn, 'epochs =',
              epochs, 'delay=', delay, 'lookback=', lookback, 'pred_win=',
              pred_win, 'components =', n_comp)

        model = models.Sequential()
        #model.add(layers.Embedding(max_features, 128, input_length=max_len))
        model.add(
            layers.Conv1D(32,
                          7,
                          activation='relu',
                          input_shape=(x_seq_train.shape[1], n_features)))
        #model.add(layers.MaxPooling1D(5))
        #model.add(layers.Conv1D(256, 7, activation='relu'))
        #model.add(layers.GlobalMaxPooling1D())
        model.add(MaxPooling1D(pool_size=7))
        model.add(Flatten())
        model.add(Dense(128, activation='relu'))
        model.add(layers.Dense(y_seq_train.shape[1]))
        model.summary()

        model.compile(optimizer=Adam(lr=lr, clipnorm=cn), loss='mse')

        # fit model
        history = model.fit(x_seq_train,
                            y_seq_train,
コード例 #12
0
    def build_model(self, X_train):
        # input layers
        main_input = layers.Input(shape=(len(self.main_input_features), ),
                                  dtype="float32",
                                  name="main_input")
        lag_player_stats_input = layers.Input(
            shape=X_train["lag_player_stats_input"].shape[1:],
            dtype="float32",
            name="lag_player_stats_input",
        )
        lag_opp_stats_input = layers.Input(
            shape=X_train["lag_opp_stats_input"].shape[1:],
            dtype="float32",
            name="lag_opp_stats_input",
        )
        lag_team_total_stats_input = layers.Input(
            shape=X_train["lag_team_total_stats_input"].shape[1:],
            dtype="float32",
            name="lag_team_total_stats_input",
        )
        lag_opp_total_stats_input = layers.Input(
            shape=X_train["lag_opp_total_stats_input"].shape[1:],
            dtype="float32",
            name="lag_opp_total_stats_input",
        )

        # convolutional layers
        conv_lag_player_stats = layers.Conv1D(
            filters=self.conv_filters,
            kernel_size=self.conv_kernel_size,
            activation="relu",
        )(lag_player_stats_input)
        conv_lag_player_stats = layers.MaxPooling1D(
            pool_size=self.pool_size)(conv_lag_player_stats)
        conv_lag_player_stats = layers.Flatten()(conv_lag_player_stats)
        conv_lag_opp_stats = layers.Conv1D(
            filters=self.conv_filters,
            kernel_size=self.conv_kernel_size,
            activation="relu",
        )(lag_opp_stats_input)
        conv_lag_opp_stats = layers.MaxPooling1D(
            pool_size=self.pool_size)(conv_lag_opp_stats)
        conv_lag_opp_stats = layers.Flatten()(conv_lag_opp_stats)
        conv_lag_team_total_stats = layers.Conv1D(
            filters=self.conv_filters,
            kernel_size=self.conv_kernel_size,
            activation="relu",
        )(lag_team_total_stats_input)
        conv_lag_team_total_stats = layers.MaxPooling1D(
            pool_size=self.pool_size)(conv_lag_team_total_stats)
        conv_lag_team_total_stats = layers.Flatten()(conv_lag_team_total_stats)
        conv_lag_opp_total_stats = layers.Conv1D(
            filters=self.conv_filters,
            kernel_size=self.conv_kernel_size,
            activation="relu",
        )(lag_opp_total_stats_input)
        conv_lag_opp_total_stats = layers.MaxPooling1D(
            pool_size=self.pool_size)(conv_lag_opp_total_stats)
        conv_lag_opp_total_stats = layers.Flatten()(conv_lag_opp_total_stats)

        # main layers
        concat_all = layers.concatenate([
            main_input,
            conv_lag_player_stats,
            conv_lag_opp_stats,
            conv_lag_team_total_stats,
            conv_lag_opp_total_stats,
        ])
        concat_all = layers.Dense(self.normal_layer_size,
                                  activation="relu",
                                  bias_initializer="zeros")(concat_all)
        concat_all = layers.Dropout(0.2)(concat_all)

        # output layers
        main_output = layers.Dense(
            self.upper - self.lower + 1,
            activation="sigmoid",
            kernel_constraint=constraints.unit_norm(axis=0),
            name="main_output",
        )(concat_all)

        # define model
        input_layers = [
            main_input,
            lag_player_stats_input,
            lag_opp_stats_input,
            lag_team_total_stats_input,
            lag_opp_total_stats_input,
        ]
        output_layers = [main_output]
        output_losses = {"main_output": "binary_crossentropy"}
        output_loss_weights = {"main_output": 1}
        opt = optimizers.Adam(lr=self.learning_rate)
        model = models.Model(inputs=input_layers, outputs=output_layers)
        model.compile(optimizer=opt,
                      loss=output_losses,
                      loss_weights=output_loss_weights)
        return model
コード例 #13
0
def getTextNet():
    words_input = kl.Input(shape=(max_wlen, ), name='words_input')
    padding_masks = kl.Input(shape=(max_wlen, 1), name='padding_masks')
    x2 = kl.Embedding(vocab_size + 1,
                      embedding_size,
                      mask_zero=False,
                      name='w2v_emb')(words_input)
    xk3 = kl.Conv1D(filters=324, kernel_size=3, strides=1, padding='same')(x2)
    xk3 = kl.ELU(alpha=elu_alpha)(xk3)
    xk5 = kl.Conv1D(filters=324, kernel_size=5, strides=1, padding='same')(x2)
    xk5 = kl.ELU(alpha=elu_alpha)(xk5)
    xk7 = kl.Conv1D(filters=324, kernel_size=7, strides=1, padding='same')(x2)
    xk7 = kl.ELU(alpha=elu_alpha)(xk7)
    xk3d2 = kl.Conv1D(filters=324,
                      kernel_size=3,
                      strides=1,
                      dilation_rate=2,
                      padding='same')(x2)
    xk3d2 = kl.ELU(alpha=elu_alpha)(xk3d2)
    xk5d2 = kl.Conv1D(filters=324,
                      kernel_size=5,
                      strides=1,
                      dilation_rate=2,
                      padding='same')(x2)
    xk5d2 = kl.ELU(alpha=elu_alpha)(xk5d2)
    xk7d2 = kl.Conv1D(filters=324,
                      kernel_size=7,
                      strides=1,
                      dilation_rate=2,
                      padding='same')(x2)
    xk7d2 = kl.ELU(alpha=elu_alpha)(xk7d2)
    x2 = kl.Concatenate()([xk3, xk5, xk7, xk3d2, xk5d2, xk7d2])
    #     x2 = kl.BatchNormalization()(x2)
    #     x2 = kl.ELU(alpha=elu_alpha)(x2)
    x2 = kl.Conv1D(filters=100, kernel_size=1, strides=1, padding='same')(x2)
    x2 = kl.BatchNormalization()(x2)
    x2 = kl.ELU(alpha=elu_alpha)(x2)
    # print('x2.shape:',x2.shape)
    x2 = kl.Dropout(dropout_rate)(x2)
    sa_out_x2_1, s_x2_1 = SelfAttention(ch=int(x2.shape[-1]),
                                        name='sa_2_1')([x2, padding_masks])
    sa_out_x2_2, s_x2_2 = SelfAttention(ch=int(x2.shape[-1]),
                                        name='sa_2_2')([x2, padding_masks])
    sa_out_x2_3, s_x2_3 = SelfAttention(ch=int(x2.shape[-1]),
                                        name='sa_2_3')([x2, padding_masks])
    sa_out_x2_4, s_x2_4 = SelfAttention(ch=int(x2.shape[-1]),
                                        name='sa_2_4')([x2, padding_masks])
    #     print(sa_out_x2_4)
    x3 = kl.Concatenate(name='concat_sa_2')(
        [sa_out_x2_1, sa_out_x2_2, sa_out_x2_3, sa_out_x2_4])
    # x3 = kl.ELU(alpha=elu_alpha,name='act_concat_sa_2')(x3)
    x3comb, x3_g1, x3_g2 = ResidualCombine1D(ch_in=int(x3.shape[-1]),
                                             ch_out=100)([x2, x3])
    x3comb = kl.BatchNormalization()(x3comb)
    # x3comb = kl.ELU(alpha=elu_alpha)(x3comb)
    x3comb = kl.Conv1D(filters=100, kernel_size=1, strides=1,
                       padding='same')(x3comb)
    x3comb = kl.BatchNormalization()(x3comb)
    x3comb = kl.ELU()(x3comb)

    x3comb = kl.Dropout(dropout_rate)(x3comb)

    sa_out_x3_1, s_x3_1 = SelfAttention(ch=int(x3comb.shape[-1]),
                                        name='sa_3_1')([x3comb, padding_masks])
    sa_out_x3_2, s_x3_2 = SelfAttention(ch=int(x3comb.shape[-1]),
                                        name='sa_3_2')([x3comb, padding_masks])
    sa_out_x3_3, s_x3_3 = SelfAttention(ch=int(x3comb.shape[-1]),
                                        name='sa_3_3')([x3comb, padding_masks])
    sa_out_x3_4, s_x3_4 = SelfAttention(ch=int(x3comb.shape[-1]),
                                        name='sa_3_4')([x3comb, padding_masks])
    x4 = kl.Concatenate(name='concat_sa_3')(
        [sa_out_x3_1, sa_out_x3_2, sa_out_x3_3, sa_out_x3_4])
    # x4 = kl.ELU(alpha=elu_alpha,name='act_concat_sa_3')(x4)
    x4comb, x4_g1, x4_g2 = ResidualCombine1D(ch_in=int(x4.shape[-1]),
                                             ch_out=100)([x3comb, x4])
    x4comb = kl.BatchNormalization()(x4comb)
    # x4comb = kl.ELU(alpha=elu_alpha)(x4comb)
    x4comb = kl.Conv1D(filters=100, kernel_size=1, strides=1,
                       padding='same')(x4comb)
    x4comb = kl.BatchNormalization()(x4comb)
    x4comb = kl.ELU(alpha=elu_alpha)(x4comb)
    # x4comb = kl.Dropout(dropout_rate)(x4comb)

    # sa_out_x4_1,s_x4_1 = SelfAttention(ch=int(x4comb.shape[-1]),name='sa_4_1')([x4comb,padding_masks])
    # sa_out_x4_2,s_x4_2 = SelfAttention(ch=int(x4comb.shape[-1]),name='sa_4_2')([x4comb,padding_masks])
    # sa_out_x4_3,s_x4_3 = SelfAttention(ch=int(x4comb.shape[-1]),name='sa_4_3')([x4comb,padding_masks])
    # sa_out_x4_4,s_x4_4 = SelfAttention(ch=int(x4comb.shape[-1]),name='sa_4_4')([x4comb,padding_masks])
    # x5 = kl.Concatenate(name='concat_sa_4')([sa_out_x4_1,sa_out_x4_2,sa_out_x4_3,sa_out_x4_4])
    # x5 = kl.ELU(alpha=elu_alpha,name='act_concat_sa_4')(x5)
    # x5comb,x5_g1,x5_g2 = ResidualCombine1D(ch_in=int(x5.shape[-1]),ch_out=256)([x4comb,x5])
    # x5comb = kl.BatchNormalization()(x5comb)
    # x5comb = kl.ELU(alpha=elu_alpha)(x5comb)
    # x5comb = kl.Conv1D(filters=256,kernel_size=1,strides=1,padding='same')(x5comb)
    # x5comb = kl.BatchNormalization()(x5comb)
    # x5comb = kl.ELU(alpha=elu_alpha)(x5comb)

    return Model([words_input, padding_masks], x4comb, name='textModel')
コード例 #14
0
                                  axis=1),
                        axis=2)
        train_y = np.concatenate((train_y, n_y))
        train_X = np.vstack((train_X, n_X))

    train_X, train_y = shuffle(train_X, train_y, random_state=0)

    #Resample
    if len(classes) == 2:
        uniques, counts = np.unique(train_y, return_counts=True)
        print(counts)

    if len(classes) != 2:
        train_y = keras.utils.to_categorical(train_y)
    model = keras.Sequential()
    model.add(layers.Conv1D(64, 3, input_shape=(20, 66), activation="relu"))
    model.add(layers.Conv1D(64, 3, activation="relu"))
    model.add(layers.MaxPooling1D(3))
    model.add(layers.Conv1D(128, 3, activation="relu"))
    model.add(layers.GlobalAveragePooling1D())
    model.add(layers.Dropout(rate=0.5))
    adam = keras.optimizers.Adam(lr=0.001,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=None,
                                 decay=0.0,
                                 amsgrad=True)

    if len(classes) == 2:
        model.add(layers.Dense(1, activation='sigmoid'))
        model.compile(loss='binary_crossentropy',
コード例 #15
0
ファイル: stacking_nn_24.py プロジェクト: yujiye/jingqu_
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=2018)
cvscores_cnn = []
dataset_blend_train_cnn = np.zeros((X_train.shape[0], 1))
preds_test_blend_cnn = np.zeros((X_test.shape[0], 5))
for i, (train_idx, test_idx) in enumerate(kfold.split(X_train, y)):
    X_t, y_t, X_te, y_te = X_train[train_idx], y[train_idx], X_train[
        test_idx], y[test_idx]

    cnn_model0 = Sequential()
    cnn_model0.add(
        Embedding(WORD_IDX_LEN + 1,
                  EMBEDDING_DIM,
                  weights=[embedding_matrix],
                  input_length=MAX_LEN,
                  trainable=False))
    cnn_model0.add(layers.Conv1D(128, 5, activation='relu'))
    cnn_model0.add(layers.GlobalMaxPooling1D())
    cnn_model0.add(layers.Dense(512, activation='relu'))
    cnn_model0.add(layers.Dense(512, activation='relu'))
    cnn_model0.add(Dropout(0.1))
    cnn_model0.add(layers.Dense(512, activation='relu'))
    cnn_model0.add(Dropout(0.1))
    cnn_model0.add(layers.Dense(512, activation='relu'))
    cnn_model0.add(Dropout(0.1))
    cnn_model0.add(layers.Dense(1))
    cnn_model0.compile(optimizer='rmsprop', loss='mse', metrics=[escore])
    hist_cnn = cnn_model0.fit(X_t,
                              y_t,
                              epochs=10,
                              batch_size=64,
                              validation_data=(X_te, y_te),
コード例 #16
0
############################################################################

results = {}

from keras.models import Sequential
from keras import layers

# Neural network
model = Sequential()
model.add(
    layers.Embedding(vocab_size,
                     embedding_dim,
                     weights=[W2V_matrix],
                     input_length=maxlen,
                     trainable=True))
model.add(layers.Conv1D(128, 5, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])
model.summary()
model.fit(X_train,
          y_train,
          epochs=3,
          verbose=True,
          validation_data=(X_validation, y_validation),
          batch_size=10)

results['word2vec'] = model
コード例 #17
0
    def build_model(self, embedding_layer):
        """
        Builds the keras model corresponding to specified type of architecture
        :param embedding_layer: preloaded embedding layer for keras model
        :return: keras model
        """

        model = Sequential()
        model.add(embedding_layer)

        #3-layered bidirectional LSTM
        if self.arch_name == "NN":
            model.add(layers.Flatten())
            model.add(layers.Dense(1024, activation='relu'))
            model.add(layers.Dense(2, activation='softmax'))

        elif self.arch_name == "BiLSTM":
            model.add(
                layers.Bidirectional(
                    layers.LSTM(100, return_sequences=True, dropout=0.1)))
            model.add(
                layers.Bidirectional(layers.LSTM(100, return_sequences=True)))
            model.add(layers.Bidirectional(layers.LSTM(100)))
            model.add(layers.Dense(50, activation='relu'))
            model.add(layers.Dense(2, activation='softmax'))

        #two-layered forward LSTM
        elif self.arch_name == "LSTM":
            model.add(layers.LSTM(200, return_sequences=True))
            model.add(layers.LSTM(100, return_sequences=False))
            model.add(layers.Dense(1000, activation='relu'))
            model.add(layers.Dense(2, activation='softmax'))

        #CNN model with attention layer
        elif self.arch_name == "CNN":
            model.add(layers.Conv1D(100, 7, activation='relu', padding='same'))
            model.add(layers.MaxPooling1D(2))
            model.add(layers.Conv1D(64, 7, activation='relu', padding='same'))
            model.add(Attention1())
            model.add(layers.Dense(2, activation='sigmoid'))

        #single-layered bidirectional GRU with attention layer
        elif self.arch_name == "GRU":
            model.add(
                layers.Bidirectional(
                    layers.GRU(200, return_sequences=True, dropout=0.1)))
            model.add(Attention2(self.ln))
            model.add(layers.Dense(200, activation='relu'))
            model.add(layers.Dense(2, activation='softmax'))

        #Stacked CNN and LSTM with attention layer
        elif self.arch_name == "CNN_LSTM":
            model.add(layers.Conv1D(128, 3, activation='relu', padding='valid')
                      )  # filters: 100, kernel_size = 7 -> output = 100
            model.add(layers.MaxPooling1D(2))
            model.add(
                layers.Bidirectional(layers.LSTM(128, return_sequences=True)))
            model.add(Attention1())
            model.add(layers.Dense(64, activation='relu'))
            model.add(layers.Dense(2, activation='softmax'))

        else:
            raise NotImplementedError(
                "Specified architecture is not implemented:", self.arch_name)
        return model
コード例 #18
0
ファイル: capsulenet.py プロジェクト: zhuyaner/Depicter
def CapsNet_nogradientstop(input_shape, n_class, routings):
    x = layers.Input(shape=input_shape)
    conv1 = layers.Conv1D(filters=200,
                          kernel_size=1,
                          strides=1,
                          padding='valid',
                          kernel_initializer='he_normal',
                          activation='relu',
                          name='conv1')(x)
    # conv1=BatchNormalization()(conv1)
    conv1 = Dropout(0.7)(conv1)
    conv2 = layers.Conv1D(filters=200,
                          kernel_size=9,
                          strides=1,
                          padding='valid',
                          kernel_initializer='he_normal',
                          activation='relu',
                          name='conv2')(conv1)
    # conv1=BatchNormalization()(conv1)
    conv2 = Dropout(0.7)(conv2)  # 0.75 valx loss has 0.1278!
    primarycaps = PrimaryCap(conv2,
                             dim_capsule=8,
                             n_channels=60,
                             kernel_size=20,
                             kernel_initializer='he_normal',
                             strides=1,
                             padding='valid',
                             dropout=0.2)
    dim_capsule_dim2 = 10
    # Capsule layer. Routing algorithm works here.
    digitcaps_c = CapsuleLayer_nogradient_stop(num_capsule=n_class,
                                               dim_capsule=dim_capsule_dim2,
                                               num_routing=routings,
                                               name='digitcaps',
                                               kernel_initializer='he_normal',
                                               dropout=0.1)(primarycaps)
    # digitcaps_c = CapsuleLayer(num_capsule=n_class, dim_capsule=dim_capsule_dim2, num_routing=routings,name='digitcaps',kernel_initializer='he_normal')(primarycaps)
    digitcaps = Extract_outputs(dim_capsule_dim2)(digitcaps_c)
    weight_c = Extract_weight_c(dim_capsule_dim2)(digitcaps_c)
    out_caps = Length(name='capsnet')(digitcaps)
    # Decoder network.
    y = layers.Input(shape=(n_class, ))
    masked_by_y = Mask()(
        [digitcaps, y]
    )  # The true label is used to mask the output of capsule layer. For training
    masked = Mask(
    )(digitcaps)  # Mask using the capsule with maximal length. For prediction

    # Shared Decoder model in training and prediction
    decoder = Sequential(name='decoder')
    decoder.add(
        layers.Dense(512,
                     activation='relu',
                     input_dim=dim_capsule_dim2 * n_class))
    decoder.add(layers.Dense(1024, activation='relu'))
    decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
    decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))

    # Models for training and evaluation (prediction)
    train_model = Model([x, y], [out_caps, decoder(masked_by_y)])
    eval_model = Model(x, [out_caps, decoder(masked)])
    weight_c_model = Model(x, weight_c)
    # manipulate model
    noise = layers.Input(shape=(n_class, dim_capsule_dim2))
    noised_digitcaps = layers.Add()([digitcaps, noise])
    masked_noised_y = Mask()([noised_digitcaps, y])
    manipulate_model = Model([x, y, noise], decoder(masked_noised_y))
    return train_model, eval_model, manipulate_model, weight_c_model
コード例 #19
0
    X_test = np.reshape(
        X_test,
        (np.size(X_test, 0), 1, np.size(X_test, 1), np.size(X_test, 2)))
    X_val = np.reshape(
        X_val, (np.size(X_val, 0), 1, np.size(X_val, 1), np.size(X_val, 2)))

    # Choose hyperparameters
    no_epochs = 25
    batch_size = 32
    learning_rate = 0.0100
    dropout_rate = 0.05

    # Design the Network
    model = Sequential()
    model.add(
        layers.Conv1D(64, 6, activation='relu', input_shape=X_train[0].shape)
    )  # Input shape is VERY fiddly. May need to try different things.
    model.add(Dropout(dropout_rate))
    model.add(layers.Conv1D(128, 6, activation='relu'))
    model.add(layers.GlobalMaxPooling1D())
    model.add(Dense(numberTargets, activation='softmax'))
    print(model.summary())
    """
    # Design the Network
    model = Sequential()
    model.add(layers.SeparableConv2D(32, (1, 2), input_shape=X_train[0].shape, activation='relu'))
    model.add(Dropout(dropout_rate))
    #model.add(layers.BatchNormalization()) # BatchNormalization and dropout work poorly together, though - Ioffe & Szegedy 2015; Li et al. 2018 
    model.add(layers.SeparableConv2D(64, (1, 3), activation='sigmoid'))
    model.add(layers.SeparableConv2D(128, (1, 4), activation='sigmoid'))
    model.add(Dropout(dropout_rate))
コード例 #20
0
print(Counter(target_val))
print(daily_train.shape, daily_val.shape, daily_test.shape, fif_train.shape,
      fif_val.shape, fif_test.shape, target_train.shape, target_val.shape,
      target_test.shape)
# print(daily_tradaily_train.shape,in,daily_test,fif_train,fif_test,target_train,target_test)

# np.set_printoptions(threshold='nan')
# print(daily_train.shape,daily_test.shape,fif_train.shape,fif_test.shape,target_train.shape,target_test.shape)
# print(daily_train,daily_test,fif_train,fif_test,target_train,target_test)

##### 一、模型搭建
# 15分钟频输入训练(!!!卷积滤镜行列先后)
fif_min_input = Input(shape=(16, 5), dtype='float32', name='fif_min_input')
# fif_min_input=(8,16,4,1)
Conv1D_fif = layers.Conv1D(16, 1, strides=1)(fif_min_input)
LSTM_fif = layers.LSTM(100)(Conv1D_fif)

# 日频输入训练
daily_input = Input(shape=(20, 5), dtype='float32', name='daily_input')
# daily_input=(8,16,4,1)
Conv1D_daily = layers.Conv1D(16, 1, strides=1)(daily_input)
LSTM_daily = layers.LSTM(100)(Conv1D_daily)
# 15分钟频训练结果和日频训练结果合并
concatenated = layers.concatenate([LSTM_fif, LSTM_daily],
                                  axis=-1)  # axis=-1按照最后一个轴粘合

alloy = layers.Dense(20, activation='relu')(concatenated)  #将粘合结果再接一个全连接层
dropout = layers.Dropout(0.2)(alloy)
output = layers.Dense(1, activation='sigmoid')(dropout)
model = Model([fif_min_input, daily_input], output)  #八股文:将输入和输出圈起来
コード例 #21
0
     vect_X_train, axis=1
 )  # final dataframe for training fold of cross validation
 X_test_dtm = np.concatenate(
     vect_X_test, axis=1
 )  # final dataframe for testing fold of cross validation
 X_train_dtm = np.expand_dims(
     X_train_dtm, axis=2
 )  # add 1 dimension to fit to the convolutionnal layer
 X_test_dtm = np.expand_dims(
     X_test_dtm, axis=2
 )  # add 1 dimension to fit to the convolutionnal layer
 ###   MODEL   ###
 model = models.Sequential()
 model.add(
     layers.Conv1D(filters=128,
                   kernel_size=(4),
                   activation='relu',
                   input_shape=X_train_dtm[0].shape))
 model.add(layers.GlobalMaxPooling1D())
 model.add(layers.Dropout(rate=.4))
 model.add(
     layers.Dense(units=len(target_names),
                  activation='softmax'))
 model.compile(optimizer="adam",
               loss="sparse_categorical_crossentropy",
               metrics=['accuracy'])
 model.fit(X_train_dtm,
           y_train,
           epochs=epochs,
           batch_size=20,
           class_weight=class_weight,
           validation_data=(X_test_dtm, y_test))
コード例 #22
0
for train, test in kfold.split(x_train, y_train):
    tokenizer = Tokenizer()
    tokenizer.fit_on_texts(x_train)
    encoded_train = tokenizer.texts_to_sequences(x_train[train])
    encoded_test = tokenizer.texts_to_sequences(x_train[test])
    Xtrain = pad_sequences(encoded_train, maxlen=max_length, padding='post')
    Xtest = pad_sequences(encoded_test, maxlen=max_length, padding='post')

    vocab_size = len(tokenizer.word_index) + 1

    # CNN model
    model = keras.Sequential()
    # words will be embedded into 100-long vector (output)
    # turn positive integers (indexes) into dense vectors of fixed size
    model.add(layers.Embedding(vocab_size, 100, input_length=max_length))
    model.add(layers.Conv1D(filters=32, kernel_size=8, activation='relu'))
    model.add(layers.MaxPooling1D(pool_size=2))
    model.add(layers.Flatten())
    model.add(layers.Dense(10, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))
    print(model.summary())

    Xtrain = Xtrain.astype(np.float32)
    Xtest = Xtest.astype(np.float32)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    print(
        '-------------------------------------------------------------------------'
    )
    print(f'Training for fold {fold_no} ...')
コード例 #23
0
print(train_data_multi.shape)
print(test_data_multi.shape)
print(val_data_multi.shape)

# # Building and Testing the Neural Networks

# In[29]:

# Model 1: Initial model on the simple data set
opt = keras.optimizers.adam(lr=0.01)

CNN_above_in_1 = models.Sequential()
CNN_above_in_1.add(
    layers.Conv1D(filters=6,
                  kernel_size=300,
                  activation='relu',
                  input_shape=(7500, 1)))
CNN_above_in_1.add(layers.Conv1D(filters=4, kernel_size=4, activation='relu'))
CNN_above_in_1.add(layers.Dropout(0.5))
CNN_above_in_1.add(layers.MaxPooling1D(pool_size=2))
CNN_above_in_1.add(layers.Flatten())
CNN_above_in_1.add(layers.Dense(44, activation='relu'))
CNN_above_in_1.add(layers.Dense(22, activation='softmax'))
CNN_above_in_1.compile(loss='binary_crossentropy',
                       optimizer=opt,
                       metrics=['accuracy'])

# In[26]:
'''
#To save the model weights to a hard drive
# 计算val_steps,这是计算需要从val_gen中抽取多少次
val_steps = (valnum - lookback) // batch_size
# 查看训练集需要抽取的次数
train_steps = trainnum // batch_size

from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor="val_loss", patience=30)

#创建模型
model1 = models.Sequential()
model1.add(
    layers.Dense(512,
                 activation="relu",
                 input_shape=(lookback // step, data.shape[-1])))
model1.add(layers.Conv1D(filters=32, kernel_size=3, activation="relu"))
model1.add(layers.MaxPooling1D(2))
model1.add(layers.Dropout(0.5))

model1.add(layers.Conv1D(filters=32, kernel_size=3, activation="relu"))
model1.add(layers.MaxPooling1D(2))
model1.add(layers.Dropout(0.5))

model1.add(layers.Conv1D(filters=32, kernel_size=3, activation="relu"))
model1.add(layers.GlobalMaxPool1D())
model1.add(layers.Dropout(0.5))

model1.add(layers.Dense(8, activation="softmax"))
model1.summary()

model1.compile(optimizer=optimizers.RMSprop(),
コード例 #25
0
    def build_model(self):
        """Build an actor (policy) network that maps states -> actions."""

        if self.layer_type == "Dense":
            # Define input layer (states)
            states = layers.Input(shape=(self.state_size, ), name='states')
            # Add hidden layers
            net = layers.Dense(units=32, activation='relu')(states)
            net = layers.Dropout(0.05)(net)
            net = layers.Dense(units=64,
                               activation='relu',
                               kernel_regularizer=l2(0.01),
                               activity_regularizer=l1(0.01))(net)
            net = layers.Dropout(0.02)(net)
            net = layers.Dense(units=32,
                               activation='relu',
                               kernel_regularizer=l2(0.01),
                               activity_regularizer=l1(0.01))(net)

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.
        elif self.layer_type == "Conv":
            # Define input layer (states)
            # state_size is 18 (action_repeat[3] * state dim[6])
            states = layers.Input(shape=(self.state_size, 4), name='states')
            net = layers.Conv1D(filters=32,
                                kernel_size=2,
                                padding='same',
                                activation='relu')(states)
            net = layers.MaxPooling1D(pool_size=2)(net)
            net = layers.Conv1D(filters=64,
                                kernel_size=2,
                                padding='same',
                                activation='relu')(net)
            net = layers.MaxPooling1D(pool_size=2)(net)
            net = layers.GlobalAveragePooling1D()(net)
            net = layers.Dense(units=128, activation='relu')(net)

        else:
            raise ValueError(
                "Specify layer_type with either 'Dense' or 'Conv'")

        # Add final output layer with sigmoid activation
        raw_actions = layers.Dense(units=self.action_size,
                                   activation='sigmoid',
                                   name='raw_actions')(net)

        # Scale [0, 1] output for each action dimension to proper range
        actions = layers.Lambda(lambda x:
                                (x * self.action_range) + self.action_low,
                                name='actions')(raw_actions)

        # Create Keras model
        self.model = models.Model(inputs=states, outputs=actions)

        # Define loss function using action value (Q value) gradients
        action_gradients = layers.Input(shape=(self.action_size, ))
        loss = K.mean(-action_gradients * actions)

        # Incorporate any additional losses here (e.g. from regularizers)

        # Define optimizer and training function
        optimizer = optimizers.Adam()
        updates_op = optimizer.get_updates(params=self.model.trainable_weights,
                                           loss=loss)
        self.train_fn = K.function(
            inputs=[self.model.input, action_gradients,
                    K.learning_phase()],
            outputs=[],
            updates=updates_op)
コード例 #26
0
    def __init__(self, config, word_vecs, ch_vecs, features, ent_vecs):
        """ Initialize all layers. """
        self._config = config
        self._feature_size = len(features)

        self.emb_word = layers.Embedding(len(word_vecs),
                                         300,
                                         input_length=20,
                                         weights=[word_vecs],
                                         trainable=True,
                                         name='word-emb')
        self.emb_ent = layers.Embedding(len(ent_vecs),
                                        300,
                                        weights=[ent_vecs],
                                        trainable=True,
                                        name='ent-emb')

        # Character CNN over candidate entity title and context
        if self._config['character_cnn']:

            self.window_size = config['ccnn_window_size']

            # universal character embeddings
            self.emb_ch = layers.Embedding(len(ch_vecs),
                                           300,
                                           weights=[ch_vecs],
                                           trainable=True,
                                           name='char-emb')

            self.ent_reduc = layers.Dense(300, name='ent-reduc-layer')

            self.ch_ctx_cnn = layers.Conv1D(100,
                                            self.window_size,
                                            activation='relu',
                                            name='ch-cnn')

            self.ch_title_cnn = layers.Conv1D(100,
                                              self.window_size,
                                              activation='relu',
                                              name='ch-title-cnn')

            self.ch_ctx_pool = layers.MaxPooling1D(pool_size=MAX_CTX_CHAR_LENGTH
                                                   - self.window_size + 1,
                                                   name='ch-pool')

            self.ch_title_pool = layers.MaxPooling1D(pool_size=MAX_TITLE_CHAR_LNGTH
                                                     - self.window_size + 1,
                                                     name='ch-titile-pool')
        else:
            self.ch_ctx_cnn = None
            self.ch_ctx_pool = None
            self.ch_title_cnn = None
            self.ch_title_pool = None


        # left and right context encoders w/ attention
        self.cell_size = 300

        self.left_rnn = layers.GRU(self.cell_size, return_sequences=True, name='left-rnn')
        self.right_rnn = layers.GRU(self.cell_size, return_sequences=True, name='right-rnn')

        self.left_attn = layers.Dense(self.cell_size, name='left-attn')
        self.right_attn = layers.Dense(self.cell_size, name='right-attn')

        self.lattn_dist = layers.TimeDistributed(self.left_attn, name='lattn-dist')
        self.rattn_dist = layers.TimeDistributed(self.right_attn, name='rattn-tdist')

        # binary classification layer
        self.reduce_layer = layers.Dense(1, activation='relu', name='final-reduce-layer')
コード例 #27
0
embedding_matrix = create_embedding_matrix("w2vec_wiki_id_300.txt",
                                           tokenizer.word_index, embedding_dim)
nonzero_elements = np.count_nonzero(np.count_nonzero(embedding_matrix, axis=1))
# print(nonzero_elements / vocab_size)

from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import RandomizedSearchCV

# CNN with w2vec
cnn_model = Sequential()
cnn_model.add(
    layers.Embedding(vocab_size,
                     embedding_dim,
                     weights=[embedding_matrix],
                     input_length=maxlen))
cnn_model.add(layers.Conv1D(200, 5, activation="relu"))
cnn_model.add(layers.GlobalMaxPooling1D())
cnn_model.add(layers.Dense(10, activation="relu"))
cnn_model.add(layers.Dense(1, activation="sigmoid"))
cnn_model.compile(optimizer="adam",
                  loss="binary_crossentropy",
                  metrics=["accuracy"])
cnn_model.summary()

history = cnn_model.fit(
    X_train,
    y_train,
    epochs=100,
    verbose=False,
    validation_data=(X_test, y_test),
    batch_size=10,
コード例 #28
0
yAv_train = y_Av[train_idx]
v_train = v[train_idx]

############## DMP Model Design
in_shp_yAv = np.shape(yAv_train)[1:]
in_shp_v = np.shape(v_train)[1:]
### construct neural network graph
n_channels = 32
input_yAv = layers.Input(shape=(10, ))
input_v = layers.Input(shape=(rows, cols))
yAv = layers.Reshape(in_shp_yAv + (1, ))(input_yAv)
v_in = layers.Reshape(in_shp_v + (1, ))(input_v)
for _ in range(3):
    yAv = layers.Conv1D(rows,
                        3,
                        data_format="channels_first",
                        activation='relu',
                        padding='same')(yAv)
for _ in range(3):
    yAv = layers.Conv1D(cols,
                        3,
                        data_format="channels_last",
                        activation='relu',
                        padding='same')(yAv)
yAv2D = layers.Reshape((rows, cols, 1))(yAv)
H_stack = layers.concatenate([yAv2D, v_in], axis=-1)
H = layers.Conv2D(n_channels, (3, 3), activation='relu',
                  padding='same')(H_stack)

for _ in range(5):
    H = layers.Conv2D(n_channels, (3, 3), activation='relu', padding='same')(H)
コード例 #29
0
np_load_old = np.load

np.load = lambda *a, **k: np_load_old(*a, allow_pickle=True, **k)

max_features = 2000
max_len = 500

(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
x_train = sequence.pad_sequences(x_train, maxlen=max_len)
x_test = sequence.pad_sequences(x_test, maxlen=max_len)

model = keras.models.Sequential()

model.add(
    layers.Embedding(max_features, 128, input_length=max_len, name='embed'))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.MaxPooling1D(5))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(1))
model.summary()
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])

callbacks = [
    keras.callbacks.TensorBoard(log_dir='log_tensor', histogram_freq=1)
]

history = model.fit(x_train,
                    y_train,
                    epochs=20,
                    batch_size=128,
コード例 #30
0
print(input_train)
print(input_train.shape)


input_val=np.zeros((len(X_val),64,3))

input_val[:,:,0]=X_val[:,:64]
input_val[:,:,1]=X_val[:,64:128]
input_val[:,:,2]=X_val[:,128:]
print(input_val)
print(input_val.shape)
model = Sequential()

model.add(layers.Conv1D(512,
                        1,
                        activation='relu',
                        input_shape=(64, 3)))

model.add(Flatten())
 
model.add(layers.Dense(512,
                       activation='relu'
                       )
    )
model.add(layers.Dense(128,
                       activation='relu'
                       )
    )
#The multi-classifical problem,so we use softmax with 46 NN union to achieve the purpose.
x=model.add(layers.Dense(99,
                       activation='softmax'