コード例 #1
0
ファイル: model.py プロジェクト: zhang01GA/cloudml-samples
def sonar_model():
    model = Sequential()
    model.add(Dense(60, input_shape=(60,), activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(30, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(1, activation='sigmoid'))

    # Use the Binary Cross Entropy loss function for a Binary Classifier.
    # https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential#compile
    model.compile(loss='binary_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    return model
コード例 #2
0
def siamese_network(input_shape=(105, 105, 1), classes=1):
    """Network Architecture"""
    left_input = layers.Input(shape=input_shape)
    right_input = layers.Input(shape=input_shape)

    # Creating the convnet which shares weights between the left and right legs of Siamese network
    siamese_convnet = Sequential()

    siamese_convnet.add(
        layers.Conv2D(filters=64,
                      kernel_size=10,
                      strides=1,
                      input_shape=input_shape,
                      activation='relu',
                      kernel_initializer=RandomNormal(mean=0, stddev=0.01),
                      kernel_regularizer=l2(1e-2),
                      bias_initializer=RandomNormal(mean=0.5, stddev=0.01)))

    siamese_convnet.add(layers.MaxPooling2D(pool_size=(2, 2)))

    siamese_convnet.add(
        layers.Conv2D(filters=128,
                      kernel_size=7,
                      strides=1,
                      activation='relu',
                      kernel_initializer=RandomNormal(mean=0, stddev=0.01),
                      kernel_regularizer=l2(1e-2),
                      bias_initializer=RandomNormal(mean=0.5, stddev=0.01)))

    siamese_convnet.add(layers.MaxPooling2D(pool_size=(2, 2)))

    siamese_convnet.add(
        layers.Conv2D(filters=128,
                      kernel_size=4,
                      strides=1,
                      activation='relu',
                      kernel_initializer=RandomNormal(mean=0, stddev=0.01),
                      kernel_regularizer=l2(1e-2),
                      bias_initializer=RandomNormal(mean=0.5, stddev=0.01)))

    siamese_convnet.add(layers.MaxPooling2D(pool_size=(2, 2)))

    siamese_convnet.add(
        layers.Conv2D(filters=256,
                      kernel_size=4,
                      strides=1,
                      activation='relu',
                      kernel_initializer=RandomNormal(mean=0, stddev=0.01),
                      kernel_regularizer=l2(1e-2),
                      bias_initializer=RandomNormal(mean=0.5, stddev=0.01)))

    siamese_convnet.add(layers.Flatten())

    siamese_convnet.add(
        layers.Dense(4096,
                     activation='sigmoid',
                     kernel_initializer=RandomNormal(mean=0, stddev=0.2),
                     kernel_regularizer=l2(1e-4),
                     bias_initializer=RandomNormal(mean=0.5, stddev=0.01)))

    encoded_left_input = siamese_convnet(left_input)
    encoded_right_input = siamese_convnet(right_input)

    l1_encoded = layers.Lambda(lambda x: tf.abs(x[0] - x[1]))(
        [encoded_left_input, encoded_right_input])

    output = layers.Dense(classes,
                          activation='sigmoid',
                          kernel_initializer=RandomNormal(mean=0, stddev=0.2),
                          bias_initializer=RandomNormal(
                              mean=0.5, stddev=0.01))(l1_encoded)

    return Model(inputs=[left_input, right_input], outputs=output)
for j in [4,8,16,32,64,128,256,512,1024]:
    accr = []
    print(j)
    for i in range(1):
        kf = KFold(n_splits=10,shuffle = True,random_state = i+2)
        for train_index, test_index in kf.split(x):     
            x_train, x_test = x[train_index], x[test_index]
            y_train, y_test = y_1h[train_index], y_1h[test_index]
            
            scaler = StandardScaler()
            scaler.fit(x_train)
            x_train = scaler.transform(x_train)
            x_test = scaler.transform(x_test)
                    
            model = Sequential()
            model.add(Flatten())
            model.add(Dense(j, activation='relu'))
            model.add(Dense(j, activation='relu'))
            model.add(Dense(3, activation='softmax'))
            model.compile(loss = 'categorical_crossentropy', optimizer = "adam", metrics = ['accuracy'])
            
            train = model.fit(x_train, y_train,    # Training
                          epochs=5, batch_size=1,
                          validation_data=(x_test, y_test),verbose=0)
            
            test = model.evaluate(x_test,y_test)   # Testing
            
            preds = model.predict(x_test)
            predict_class = np.argmax(preds, axis=1)
            true_class = np.argmax(y_test, axis=1)
                    
コード例 #4
0
ファイル: LSTM模型.py プロジェクト: ron-tsai/-
daily_test = DM.daily_test_data(daily_test_df)
fif_train = DM.fif_train_data(fif_train_df)
fif_test = DM.fif_test_data(fif_test_df)
target_train = DM.target_train_data(target_train_df)
target_test = DM.target_test_data(target_test_df)

wenben_long_term_train = DM.wenben_long_term_train_data(wenben_norm_train_df)
wenben_short_term_train = DM.wenben_short_term_train_data(wenben_norm_train_df)
wenben_long_term_test = DM.wenben_long_term_test_data(wenben_norm_test_df)
wenben_short_term_test = DM.wenben_short_term_test_data(wenben_norm_test_df)

print('文本数据', wenben_long_term_train)
print('交易数据', daily_train)

model = Sequential()
model.add(LSTM(100, input_shape=(daily_train.shape[1], daily_train.shape[2])))

model.add(Dropout(0.02))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer=keras.optimizers.Adam(lr=1e-3),
              metrics=['acc'])
# fit network
history = model.fit(daily_train,
                    target_train,
                    epochs=epochs,
                    batch_size=batch_size,
                    validation_split=validation_split,
                    shuffle=False)

loss, accuracy = model.evaluate(daily_test, y=target_test)
コード例 #5
0
    return precision


def f1_m(y_true, y_pred):
    precision = precision_m(y_true, y_pred)
    recall = recall_m(y_true, y_pred)
    return 2 * ((precision * recall) / (precision + recall + K.epsilon()))


data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)

model = Sequential()
model.add(
    tf.keras.applications.ResNet50(include_top=False,
                                   input_shape=(96, 96, 3),
                                   weights='imagenet',
                                   input_tensor=None,
                                   pooling='avg',
                                   classes=2))
model.add(tf.keras.layers.Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dense(1, activation="sigmoid"))
model.layers[0].trainable = False
model.compile(optimizer="sgd",
              loss='binary_crossentropy',
              metrics=['accuracy', f1_m, precision_m, recall_m])
model.summary()
train_generator = data_generator.flow_from_directory(
    r'.\dataset\transferlearningdata\train',
    target_size=(96, 96),
    color_mode='rgb',
コード例 #6
0
def create_classification_model(loss_func, learning_rate, dropout):
    model = Sequential()
    # Convolution and Maxpooling layers
    model.add(layers.Input((256, 256, 3)))
    # Convolution layer gets 32 filters of size (3x3) (filter size should be ann odd number)
    model.add(layers.Conv2D(32, (3, 3), activation='relu'))
    # Maxpooling layer get size (2, 2), with stride = pool size and padding = valid, meaning no zero padding is applied
    model.add(layers.MaxPooling2D((2, 2), strides=(2, 2), padding='valid'))  # down samples the feature map
    model.add(layers.Conv2D(32, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(32, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(32, (3, 3), activation='relu'))  # output shape after layer: (28,28,32) (size 25088)
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(32, (3, 3), activation='relu'))  # output shape after layer: (12,12,32) (size 4608)
    # Flatten output
    model.add(layers.Flatten())
    # Add dense layers
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dropout(dropout))
    model.add(layers.Dense(32, activation='relu'))
    model.add(layers.Dropout(dropout))
    # The last layer is size 4 since we have four classes
    # Softmax activation is a standard for multi-class classification
    model.add(layers.Dense(4, activation='softmax'))
    opt = keras.optimizers.Adam(learning_rate=learning_rate)
    # The loss function is adapted for categorical classification and the accuracy metric is added
    model.compile(optimizer=opt, loss=loss_func, metrics=['accuracy'])
    return model
コード例 #7
0
def convnet_size(optim, loss_func, n_conv_layers):
    """
    The function builds a CNN. The optimizer, loss function and number of convolution layers are specified with the
    parameters. The minimal number of convolution layers is 1, each additional convolution layers is accompanied by a
    MaxPooling layer.
    :param optim:
    :param loss_func:
    :param n_conv_layers:
    :return:
    """
    model = Sequential()
    # Convolution and Maxpooling layers
    model.add(layers.Input((256, 256, 3)))
    # Experimenting with the amount of convolution layers
    for i in range(n_conv_layers-1):
        model.add(layers.Conv2D(32, (3, 3), activation='relu'))
        model.add(layers.MaxPooling2D((2, 2), strides=(2, 2), padding='valid'))
    model.add(layers.Conv2D(32, (3, 3), activation='relu'))  # output shape after layer: (12,12,32) (size 4608)
    # Flatten output
    model.add(layers.Flatten())
    # Add dense layers
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dense(32, activation='relu'))
    model.add(layers.Dense(1))
    model.compile(optimizer=optim, loss=loss_func, metrics=['MeanSquaredError'])
    return model
コード例 #8
0
def build_keras_rnn(sampling_rate,
                    feature_num,
                    using_seq_label: bool,
                    rnn_out_dim=128,
                    dropout_rate=0.5,
                    rnn_type=C.LSTM,
                    threshold=0.5) -> Sequential:
    """"""
    model_out = Sequential()

    # add RNN
    if rnn_type == C.LSTM:
        model_out.add(
            keras.layers.LSTM(units=rnn_out_dim,
                              input_shape=[sampling_rate, feature_num],
                              return_sequences=using_seq_label))
    elif rnn_type == C.GRU:
        model_out.add(
            keras.layers.GRU(units=rnn_out_dim,
                             input_shape=[sampling_rate, feature_num],
                             return_sequences=using_seq_label))
    elif rnn_type == C.LSTM_LSTM:
        # tutorial found here: https://machinelearningmastery.com/stacked-long-short-term-memory-networks/
        model_out.add(
            keras.layers.LSTM(rnn_out_dim,
                              return_sequences=True,
                              input_shape=[sampling_rate, feature_num]))
        model_out.add(
            keras.layers.LSTM(rnn_out_dim, return_sequences=using_seq_label))
    elif rnn_type == C.GRU_GRU:
        # tutorial found here: https://machinelearningmastery.com/stacked-long-short-term-memory-networks/
        model_out.add(
            keras.layers.GRU(rnn_out_dim,
                             return_sequences=True,
                             input_shape=[sampling_rate, feature_num]))
        model_out.add(
            keras.layers.GRU(rnn_out_dim, return_sequences=using_seq_label))
    else:
        raise NotImplementedError("RNN does not recognize {}".format(rnn_type))

    # add FFNN
    if using_seq_label:
        # feed hidden state at each time step to the same FFNN
        # tutorial see: https://machinelearningmastery.com/timedistributed-layer-for-long-short-term-memory-networks-in-python/
        model_out.add(
            TimeDistributed(Dropout(rate=dropout_rate, seed=C.RANDOM_SEED)))
        model_out.add(
            TimeDistributed(Dense(units=rnn_out_dim, activation='relu')))
        model_out.add(TimeDistributed(Dense(1, activation='sigmoid')))
    else:
        model_out.add(Dropout(rate=dropout_rate, seed=C.RANDOM_SEED))
        model_out.add(Dense(units=rnn_out_dim, activation='relu'))
        model_out.add(Dense(1, activation='sigmoid'))

    # compile model
    model_out = compile_model(model_out, threshold)
    return model_out
    def build_model():
        model = Sequential()

        model.add(
            LSTM(units=60,
                 activation='relu',
                 return_sequences=True,
                 input_shape=(X_train.shape[1], 5)))
        model.add(Dropout(0.2))

        model.add(LSTM(units=60, activation='relu', return_sequences=True))
        model.add(Dropout(0.2))

        model.add(LSTM(units=80, activation='relu', return_sequences=True))
        model.add(Dropout(0.2))

        model.add(LSTM(units=120, activation='relu'))
        model.add(Dropout(0.2))

        model.add(Dense(units=1))
        model.compile(optimizer='adam', loss='mean_squared_error')
        return model
コード例 #10
0
def build_alexnet(num_classes, img_size):
    """
    Build an AlexNet
    :param num_classes: number of classes
    :param img_size: image size as tuple (width, height, 3) for rgb and (widht, height) for grayscale
    :return: model
    """
    from tensorflow.keras import Sequential
    from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense
    model = Sequential()
    model.add(
        Conv2D(32, (3, 3),
               padding='same',
               input_shape=img_size,
               activation='relu'))
    model.add(Conv2D(32, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))
    model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))
    model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))
    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))
    return model
コード例 #11
0
def build_lenet5(num_classes, img_size):
    from tensorflow.keras import Sequential
    from tensorflow.keras.layers import Conv2D, AveragePooling2D, Flatten, Dense
    model = Sequential()
    model.add(
        Conv2D(filters=6,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=img_size))
    model.add(AveragePooling2D())
    model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
    model.add(AveragePooling2D())
    model.add(Flatten())
    model.add(Dense(120, activation='relu'))
    model.add(Dense(84, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    return model
コード例 #12
0
def build_NN(train_data, batch_size, dropout):
    '''
	Define Model
	'''

    regressior = Sequential()

    input_shape = (train_data.shape[1], train_data.shape[2])

    regressior.add(
        LSTM(units=1024,
             activation='tanh',
             return_sequences=True,
             input_shape=input_shape))
    regressior.add(Dropout(dropout))

    regressior.add(Dense(units=1024, activation='tanh'))

    regressior.add(LSTM(units=512, activation='tanh', return_sequences=True))
    regressior.add(Dropout(dropout))

    regressior.add(LSTM(units=128, activation='tanh', return_sequences=False))
    regressior.add(Dropout(dropout))

    # regressior.add(Flatten())

    # regressior.add(Dense(units=64, activation = 'tanh'))

    regressior.add(Dense(units=1, activation='tanh'))

    regressior.summary()

    # plot_model(regressior, "plot_model.png")

    return regressior
コード例 #13
0
def model_mixer(train_data, batch_size, dropout, activation_case, layer_case,
                units_case):
    '''
	Composed of four layers (excluding posterior Flatten and Dense layers) up to four Dense or up to 
	'''

    model = Sequential()

    input_shape = (train_data.shape[1], train_data.shape[2])

    activation_dict = {0: 'relu', 1: 'tanh', 2: 'softmax', 3: 'exponential'}

    units_dict = {0: 40, 1: 60, 2: 80, 3: 100, 4: 120}

    first_layer_dict = {
        0:
        'model.add(LSTM(units = {}, activation = {}, return_sequences = True, input_shape = input_shape))',
        1:
        'model.add(TimeDistributed(Dense(units={}, activation = {}), input_shape = input_shape))'
    }

    second_layer_dict = {
        0:
        'model.add(LSTM(units = {}, activation = {}, return_sequences = True))',
        1: 'model.add(TimeDistributed(Dense(units={}, activation = {})))'
    }

    third_layer_dict = {
        0:
        'model.add(LSTM(units = {}, activation = {}, return_sequences = True))',
        1: 'model.add(TimeDistributed(Dense(units={}, activation = {})))',
        2: None
    }

    # Add first layer
    first_layer = first_layer_dict.get(layer_case[0])
    first_layer = first_layer.format(units_dict.get(units_case[0]),
                                     activation_dict.get(activation_case))
    eval(first_layer)
    model.add(Dropout(dropout))

    # Add second layer
    second_layer = second_layer_dict.get(layer_case[1])
    second_layer = second_layer.format(units_dict.get(units_case[1]),
                                       activation_dict.get(activation_case))
    eval(second_layer)
    model.add(Dropout(dropout))

    # Add third layer
    third_layer = third_layer_dict.get(layer_case[2])
    if third_layer is not None:
        third_layer = third_layer.format(units_dict.get(units_case[2]),
                                         activation_dict.get(activation_case))
        eval(third_layer)
        model.add(Dropout(dropout))

    # Add Flatten and Dense layers to all Models
    # model.add(Flatten())
    model.add(Dense(units=1))

    model.summary()

    return model
コード例 #14
0
def create_model(MLP_C_layer,
                 MLP_m_layer,
                 low_C,
                 up_C,
                 low_m,
                 up_m,
                 F,
                 a0RNN,
                 batch_input_shape,
                 selectaux,
                 selectdk,
                 myDtype,
                 return_sequences=False,
                 unroll=False):

    batch_adjusted_shape = (batch_input_shape[2] + 1, )  #Adding state
    placeHolder = Input(shape=(batch_input_shape[2] + 1, ))  #Adding state

    filterLayer = inputsSelection(batch_adjusted_shape, selectaux)(placeHolder)

    filterdkLayer = inputsSelection(batch_adjusted_shape,
                                    selectdk)(placeHolder)

    MLP_C_min = low_C
    MLP_C_range = up_C - low_C

    MLP_C_layer = MLP_C_layer(filterLayer)
    C_layer = Lambda(lambda x: ((x * MLP_C_range) + MLP_C_min))(MLP_C_layer)

    MLP_m_min = low_m
    MLP_m_range = up_m - low_m

    MLP_m_layer = MLP_m_layer(filterLayer)
    MLP_scaled_m_layer = Lambda(lambda x: ((x * MLP_m_range) + MLP_m_min))(
        MLP_m_layer)

    dk_input_shape = filterdkLayer.get_shape()

    dkLayer = StressIntensityRange(input_shape=dk_input_shape,
                                   dtype=myDtype,
                                   trainable=False)
    dkLayer.build(input_shape=dk_input_shape)
    dkLayer.set_weights([np.asarray([F], dtype=dkLayer.dtype)])
    dkLayer = dkLayer(filterdkLayer)

    ldK_layer = Lambda(lambda x: tf.math.log(x) /
                       (tf.math.log(tf.constant(10.))))(dkLayer)

    dKm_layer = Multiply()([MLP_scaled_m_layer, ldK_layer])

    aux_layer = Add()([C_layer, dKm_layer])

    da_layer = Lambda(lambda x: 10**(x))(aux_layer)

    functionalModel = Model(inputs=[placeHolder], outputs=[da_layer])
    "-------------------------------------------------------------------------"
    CDMCellHybrid = CumulativeDamageCell(model=functionalModel,
                                         batch_input_shape=batch_input_shape,
                                         dtype=myDtype,
                                         initial_damage=a0RNN)

    CDMRNNhybrid = RNN(cell=CDMCellHybrid,
                       return_sequences=return_sequences,
                       return_state=False,
                       batch_input_shape=batch_input_shape,
                       unroll=unroll)

    model = Sequential()
    model.add(CDMRNNhybrid)
    model.compile(loss='mse',
                  optimizer=RMSprop(learning_rate=1e-6),
                  metrics=['mae'])
    return model
コード例 #15
0
y = fashion.iloc[:, 0]
y.head()

# input shape for sequential neural networks
X.iloc[0].shape

# code target variable (y) to categorical
y_cat = to_categorical(y, num_classes=10)
y_cat.shape

# ### create a dense neural network with 6 layers

m2 = Sequential()

# add layers
m2.add(Dense(units=50, activation='elu', input_shape=(784, )))
for i in range(4):
    m2.add(Dense(units=50, activation='elu'))

m2.add(Dense(units=10, activation="softmax"))

m2.compile(loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),
           optimizer='adam',
           metrics=['accuracy'])

m2.summary()

history2 = m2.fit(X, y_cat, batch_size=500, epochs=300, validation_split=0.2)

# save model
m2.save("Dense_layers_300.h5")
X_train, y_train = np.array(X_train), np.array(y_train)

# Reshaping
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))

# Part 2 - Building the RNN

# Importing the Keras libraries and packages
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout

# Initialising the RNN
regressior = Sequential()

# Adding the first LSTM layer and some Dropout regularisation
regressior.add(
    LSTM(units=60, return_sequences=True, input_shape=(X_train.shape[1], 1)))
regressior.add(Dropout(0.2))

# Adding a second LSTM layer and some Dropout regularisation
regressior.add(LSTM(units=60, return_sequences=True))
regressior.add(Dropout(0.2))

# Adding a third LSTM layer and some Dropout regularisation
regressior.add(LSTM(units=80, return_sequences=True))
regressior.add(Dropout(0.2))

# Adding a fourth LSTM layer and some Dropout regularisation
regressior.add(LSTM(units=120))
regressior.add(Dropout(0.2))

# Adding the output layer
コード例 #17
0
    rnd = random.randint(0, df_reg.index.__len__(), 1000)
    df_reg = df_reg.iloc[rnd, :]

    # split into input and output columns
    X, y = df_reg.values[:, :-1], df_reg.values[:, -1]
    # split into train and test datasets
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
    print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
    # determine the number of input features
    n_features = X_train.shape[1]

    # define model
    model = Sequential()
    model.add(
        Dense(20,
              activation='relu',
              kernel_initializer='he_normal',
              input_shape=(n_features, )))
    model.add(Dense(16, activation='relu', kernel_initializer='he_normal'))
    model.add(Dense(1))
    # compile the model
    model.compile(optimizer='adam', loss='mse')

    # fit the model
    history = model.fit(X_train, y_train, epochs=50, batch_size=32, verbose=0)

    # evaluate the model
    error = model.evaluate(X_test, y_test, verbose=0)
    print('MSE: %.3f, RMSE: %.3f' % (error, sqrt(error)))

    # make a prediction
コード例 #18
0
print(X_train.shape, y_train.shape)
unique_elements, counts_elements = np.unique(y_train, return_counts=True)
print(np.asarray((unique_elements, counts_elements)))

print('\n Testing Data')
print(X_test.shape, y_test.shape)
unique_elements, counts_elements = np.unique(y_test, return_counts=True)
print(np.asarray((unique_elements, counts_elements)))
"""# **Creating a CNN**"""

#input_shape=input_img

model = Sequential()
model.add(
    Conv2D(64,
           kernel_size=(5, 5),
           strides=(1, 1),
           activation='relu',
           input_shape=(223, 217, 3)))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(128, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(7, activation='softmax'))

model.compile(loss=keras.losses.sparse_categorical_crossentropy,
              optimizer=keras.optimizers.SGD(lr=0.01),
              metrics=['accuracy'])
コード例 #19
0
class CNNModel:
    def __init__(self):
        self.x_train = self.x_test = self.y_train = self.y_test = None
        self.history = None
        self.model = None
        self.model_dir = 'models/'

    def load_data(self):
        IMG = Image_Loader()
        [self.x_train, self.y_train, self.x_test,
         self.y_test] = IMG.load_images()
        print('Training and testing data loaded.')

    def build_model(self):
        x_shape = self.x_train[0].shape
        self.model = Sequential()
        self.model.add(
            layers.Conv2D(32, (3, 3),
                          activation='relu',
                          kernel_initializer='he_uniform',
                          padding='same',
                          input_shape=x_shape))
        self.model.add(layers.MaxPooling2D(2, 2))
        self.model.add(layers.Dropout(0.2))
        self.model.add(
            layers.Conv2D(64, (3, 3),
                          activation='relu',
                          kernel_initializer='he_uniform',
                          padding='same',
                          input_shape=x_shape))
        self.model.add(layers.MaxPooling2D(2, 2))
        self.model.add(layers.Dropout(0.2))
        self.model.add(
            layers.Conv2D(128, (3, 3),
                          activation='relu',
                          kernel_initializer='he_uniform',
                          padding='same',
                          input_shape=x_shape))
        self.model.add(layers.MaxPooling2D(2, 2))
        self.model.add(layers.Dropout(0.2))
        self.model.add(layers.Flatten())
        self.model.add(
            layers.Dense(128,
                         activation='relu',
                         kernel_initializer='he_uniform'))
        self.model.add(layers.Dense(10, activation='sigmoid'))
        self.model.add(layers.Dropout(0.2))
        self.model.compile(loss='categorical_crossentropy',
                           metrics=['accuracy'],
                           optimizer='adam')
        self.model.summary()

    def train(self):
        num_epochs = 100
        num_batches = 64
        self.history = self.model.fit(self.x_train,
                                      self.y_train,
                                      batch_size=num_batches,
                                      epochs=num_epochs,
                                      validation_data=(self.x_test,
                                                       self.y_test),
                                      callbacks=[TqdmCallback()])

    def eval_model(self):
        try:
            score = self.model.evaluate(self.x_train, self.y_train)
            print("Training Loss: ", score[0])
            print("Training Accuracy: ", score[1])
            score = self.model.evaluate(self.x_test, self.y_test)
            print("Testing Loss: ", score[0])
            print("Testing Accuracy: ", score[1])

            if (self.history):
                plt.subplot(1, 2, 1)
                plt.plot(self.history.history['accuracy'], label='accuracy')
                plt.plot(self.history.history['val_accuracy'],
                         label='val_accuracy')
                plt.title('Training and Validation Accuracy')
                plt.xlabel('Epoch')
                plt.ylabel('Accuracy')
                plt.ylim([0.7, 1])
                plt.legend(loc='lower right')

                plt.subplot(1, 2, 2)
                plt.plot(self.history.history['loss'], label='loss')
                plt.plot(self.history.history['val_loss'], label='val_loss')
                plt.title('Training and Validation Loss')
                plt.xlabel('Epoch')
                plt.ylabel('Accuracy')
                plt.ylim([0.7, 1])
                plt.legend(loc='upper right')
        except:
            if (self.x_train is None):
                print(
                    'Train and test data not loaded. Run CNNModel.load_data().'
                )
            else:
                print(
                    'Please make sure train and test data are loaded correctly.'
                )

    def load_image(self, image, url=0):
        # set url to 1 if image is from internet
        if url:
            resp = get(image)
            img_bytes = BytesIO(resp.content)
            img = load_img(img_bytes, target_size=(32, 32))
        else:
            img = load_img(image, target_size=(32, 32))
        img_pix = asarray(img)
        img_pix = img_pix.reshape(1, 32, 32, 3)
        img_pix = img_pix.astype('float32')
        img_pix = img_pix / 255.0
        print('Image loaded.')
        return img_pix

    def predict(self, img_pix):
        y_pred = self.model.predict_classes(img_pix)
        y_prob = self.model.predict_proba(img_pix)
        y_pred = y_pred[0]
        y_prob = y_prob[0][y_pred]
        pred_dict = {'prediction': str(y_pred), 'confidence': str(y_prob)}
        return pred_dict

    def serialize(self, filename):
        file = self.model_dir + filename
        self.model.save(file, save_format='h5')
        print('Model saved.')

    def deserialize(self, filename):
        file = self.model_dir + filename
        model = tf.keras.models.load_model(file)
        self.model = model
        print('Model loaded.')
コード例 #20
0
def create_model(loss_func, learning_rate, dropout):
    model = Sequential()
    # Convolution and Maxpooling layers
    model.add(layers.Input((256, 256, 3)))
    # Convolution layer gets 32 filters of size (3x3) (filter size should be ann odd number)
    model.add(layers.Conv2D(32, (3, 3), activation='relu'))
    # Maxpooling layer get size (2, 2), with stride = pool size and padding = valid, meaning no zero padding is applied
    model.add(layers.MaxPooling2D((2, 2), strides=(2, 2), padding='valid'))  # down samples the feature map
    model.add(layers.Conv2D(32, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(32, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(32, (3, 3), activation='relu'))  # output shape after layer: (28,28,32) (size 25088)
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(32, (3, 3), activation='relu'))  # output shape after layer: (12,12,32) (size 4608)
    # Flatten output
    model.add(layers.Flatten())
    # Add dense layers
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dropout(dropout))
    model.add(layers.Dense(32, activation='relu'))
    model.add(layers.Dropout(dropout))
    # The last layer is size 1, since the output is a continuous value. Also, we do not specify an activation function
    # since it is a regression task and the y-values are not transformed
    model.add(layers.Dense(1))
    opt = keras.optimizers.Adam(learning_rate=learning_rate)
    model.compile(optimizer=opt, loss=loss_func)
    return model
コード例 #21
0
with open('validation_x.pickle', 'rb') as f:
    validation_x = pickle.load(f)

with open('validation_y.pickle', 'rb') as f:
    validation_y = pickle.load(f)

with open('train_x.pickle', 'rb') as f:
    train_x = pickle.load(f)

with open('train_y.pickle', 'rb') as f:
    train_y = pickle.load(f)

model = Sequential()

model.add(
    CuDNNLSTM(128, input_shape=(train_x.shape[1:]), return_sequences=True))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(
    CuDNNLSTM(128, input_shape=(train_x.shape[1:]), return_sequences=True))
model.add(Dropout(0.1))
model.add(BatchNormalization())

model.add(CuDNNLSTM(128, input_shape=(train_x.shape[1:])))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))
コード例 #22
0
args = parser.parse_args()

trainX = []
trainY = []
testX = []
testY = []

### Import data
#   Done if the network is told to Train
if args.action.lower() == "train":
    data = pd.read_csv(dataset_path + "trimmed_" + args.type + ".csv", header=0, index_col=0)
    trainX,trainY,testX,testY = split_data(data)   #collect data

    ##build model
    model = Sequential()
    model.add(LSTM(units=100, return_sequences= True, input_shape=(None,5)))
    #model.add(LSTM(units=30, return_sequences=True))
    model.add(LSTM(units=100))
    model.add(Dense(units=5 ))
    model.compile(optimizer='adam', loss='mean_squared_error', metrics = ['accuracy'])

    trained_model = train(model,trainX,trainY, 20)   #train model

    model.save("/Users/samyakovlev/Desktop/"+args.type+"_model.h5")   #save model per vessel type

elif args.action.lower() == "test":
    model = load_model("/Users/samyakovlev/Desktop/"+args.type+"_model.h5")
    model.summary()                                 # Quick output to show the model was loaded

    data = pd.read_csv(dataset_path + "trimmed_" + args.type + ".csv", header=0, index_col=0)
    trainX,trainY,testX,testY = split_data(data)    # collect data
コード例 #23
0
 def buildCnnModel(self, kwargs_list, layer_orders, out_dim):
     """
     convert a kwargs into a cnn model
     kwargs_list and layer_orders should have the same length
     """
     cnn = Sequential()
     for i, lo in enumerate(layer_orders):
         kwargs = kwargs_list[i]
         if lo == "Dense":
             cnn.add(Dense(**kwargs))
         elif lo == "Conv2D":
             cnn.add(Conv2D(**kwargs))
         elif lo == "MaxPooling2D":
             cnn.add(MaxPooling2D(**kwargs))
         elif lo == "Dropout":
             cnn.add(Dropout(**kwargs))
         elif lo == "Flatten":
             cnn.add(Flatten())
     cnn.add(Dense(out_dim, activation='softmax'))
     kwargs = kwargs_list[-1]
     cnn.compile(metrics=['accuracy'], **kwargs["Compile"])
     return cnn
コード例 #24
0
train_correlated = train[col_mask]

#Data samples
inputs = train_correlated.drop('SalePrice', axis='columns')
targets = train["SalePrice"]
train_id = train["Id"]

test_id = test['Id']
test_inputs = test[inputs.columns]

### Model ###

model = Sequential()

model.add(Dense(10, activation='relu'))
model.add(Dense(300, activation='relu'))
model.add(Dense(500, activation='relu'))
model.add(Dense(100, activation='relu'))

model.add(Dense(1, activation='linear'))

model.compile(loss='MSE',
              optimizer=Adam(learning_rate=LEARN_RATE),
              metrics=['accuracy'])

### Training ##

history = model.fit(inputs, targets, validation_split=VAL_SPLIT, epochs=EPOCHS)

plt.figure()
        # Note: unlike numpy, we cannot simply assign a zeros array first,
        # because we are not allowed to assign afterwards to the Tensor,
        # so we just initialize by treating j==0 separately
        for j in range(self.kernel_size):
            if j==0:
                z=self.w[j]*tf.roll(inputs,shift=j-j0,axis=1)
            else:
                z+=self.w[j]*tf.roll(inputs,shift=j-j0,axis=1)
        return z


# In[3]:


Net=Sequential()
Net.add(PeriodicConvolution(kernel_size=3))

Net.compile(loss='mean_square_error', optimizer='adam')


# In[4]:


y_in=np.array([[0.,0.,3.,0.,0.]])


# In[5]:


y_out=Net.predict_on_batch(y_in)
print(y_out)
コード例 #26
0
def create_model(training_data, training_labels):
    print('Creating new model ...')
    model = Sequential()
    model.add(
        layers.Conv2D(
            32,
            (2, 2),
            activation='relu',
            #Or training_data.shape[1], training_data.shape[2]
            input_shape=(FREQ_BINS, TIME_PERIODS, 1)))
    model.add(layers.MaxPooling2D(2, 2))
    model.add(layers.Conv2D(64, (2, 2), activation='relu'))
    model.add(layers.MaxPooling2D(2, 2))
    model.add(layers.Conv2D(128, (2, 2), activation='relu'))
    model.add(layers.MaxPooling2D(2, 2))
    model.add(layers.Flatten())
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dense(2, activation='softmax'))
    # model.summary()

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])

    history = model.fit(training_data, training_labels, epochs=10, verbose=1)

    print('Done training ...')
    model.save('ml/model/trained_model/CNN_COUGH_COVID_DETECTOR_MODEL_tf',
               save_format='tf')
    print('Model saved ...')
    #Add log file containing: Size of training data, accuracy and other metrics
    log.modelLogs(history=history, size=training_data.shape[0])
    print('Log created ...')
NAME = "{}-conv-{}-nodes-{}-dense-{}-epochs-{}-kernel-{}".format(
    	conv_layers, 
    	layer_sizes, 
    	dense_layers, 
    	EPOCHS, 
    	KERNEL_SIZE, 
    	int(time.time()))
    tboard_dir = os.path.join("logs", NAME)
    tensorboard = TensorBoard(log_dir=tboard_dir)
    earlystop_callback = EarlyStopping(
          monitor='val_accuracy', min_delta=0.01,
          patience=5)

model = Sequential()

model.add(Convolution2D(layer_sizes, KERNEL_SIZE, input_shape=train_X.shape[1:], activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Convolution2D(layer_sizes, KERNEL_SIZE, activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Convolution2D(layer_sizes, KERNEL_SIZE, activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Convolution2D(layer_sizes, KERNEL_SIZE, activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(256, activation='relu'))
# You might very well be needing it!
# Remeber to save only what is worth it from validation perspective...
# model_saver = ModelCheckpoint(...)

# If you need it...
#def schedule(epoch, lr):
#    ...
#    return lr

#lr_scheduler = LearningRateScheduler(schedule)

# Build your whole LSTM model here!
model = Sequential()

model.add(CuDNNLSTM(LSTM_CELL_SIZE, input_shape=(TIME_WINDOW,column_count),stateful=False))
model.add(Dense(1, activation= "linear"))


#For shape remeber, we have a variable defining the "window" and the features in the window...

model.compile(loss='mean_squared_error', optimizer='sgd')
# Fit on the train data
# USE the batch size parameter!
# Use validation data - warning, a tuple of stuff!
# Epochs as deemed necessary...
# You should avoid shuffling the data maybe.
# You can use the callbacks for LR schedule or model saving as seems fit.
history = model.fit(X_train_rolled, y_train_rolled, batch_size=BATCH_SIZE, epochs=EPOCHS,
          validation_data=(X_valid_rolled ,y_valid_rolled), shuffle=False)
コード例 #29
0
def model_OrderPrediction_SIAMESE(the_input_shape, units_dense_layers_1,
                                  units_dense_layers_2, dropout_rate_1,
                                  dropout_rate_2, learning_rate, momentum):
    # Se definen las 4 entradas del modelo
    input_1 = Input(shape=the_input_shape)
    input_2 = Input(shape=the_input_shape)
    input_3 = Input(shape=the_input_shape)
    input_4 = Input(shape=the_input_shape)

    #CaffeNet
    base_model = Sequential(name='CaffeNet')

    base_model.add(
        Conv2D(filters=96,
               kernel_size=(11, 11),
               strides=(4, 4),
               padding='valid',
               data_format='channels_last',
               activation='relu',
               input_shape=the_input_shape,
               name='Conv2D_1_CaffeNet'))
    base_model.add(
        MaxPooling2D(pool_size=(3, 3),
                     strides=(2, 2),
                     padding='valid',
                     data_format='channels_last',
                     name='MaxPooling2D_1_CaffeNet'))
    base_model.add(BatchNormalization())

    base_model.add(
        Conv2D(filters=256,
               kernel_size=(5, 5),
               strides=(1, 1),
               padding='same',
               data_format='channels_last',
               activation='relu',
               name='Conv2D_2_CaffeNet'))
    base_model.add(
        MaxPooling2D(pool_size=(3, 3),
                     strides=(2, 2),
                     padding='valid',
                     data_format='channels_last',
                     name='MaxPooling2D_2_CaffeNet'))
    base_model.add(BatchNormalization())

    base_model.add(
        Conv2D(filters=384,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               data_format='channels_last',
               activation='relu',
               name='Conv2D_3_CaffeNet'))

    base_model.add(
        Conv2D(filters=384,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               data_format='channels_last',
               activation='relu',
               name='Conv2D_4_CaffeNet'))

    base_model.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               data_format='channels_last',
               activation='relu',
               name='Conv2D_5_CaffeNet'))

    base_model.add(
        MaxPooling2D(pool_size=(3, 3),
                     strides=(2, 2),
                     padding='valid',
                     data_format='channels_last',
                     name='MaxPooling2D_3_CaffeNet'))

    # Las 4 entradas son pasadas a través del modelo base (calculo de las distintas convoluciones)
    output_1 = base_model(input_1)
    output_2 = base_model(input_2)
    output_3 = base_model(input_3)
    output_4 = base_model(input_4)

    flatten = Flatten(name='Flatten_OrderPrediction')

    # Se obtienen los vectores de características de las 4 entradas
    features_1 = flatten(output_1)
    features_2 = flatten(output_2)
    features_3 = flatten(output_3)
    features_4 = flatten(output_4)

    # Capa densa utilizada para resumir las caracteristicas extraidas de las capas convolucionales para cada frame
    dense_1 = Dense(units=units_dense_layers_1,
                    activation='relu',
                    name='FC_1_OrderPrediction')

    features_1 = dense_1(features_1)
    features_2 = dense_1(features_2)
    features_3 = dense_1(features_3)
    features_4 = dense_1(features_4)

    dropout_1 = Dropout(dropout_rate_1, name='Dropout_1_OrderPrediction')

    features_1 = dropout_1(features_1)
    features_2 = dropout_1(features_2)
    features_3 = dropout_1(features_3)
    features_4 = dropout_1(features_4)

    Features_12 = concatenate([features_1, features_2])
    Features_13 = concatenate([features_1, features_3])
    Features_14 = concatenate([features_1, features_4])
    Features_23 = concatenate([features_2, features_3])
    Features_24 = concatenate([features_2, features_4])
    Features_34 = concatenate([features_3, features_4])

    # Capa densa que aprende la relación entre las características de los distintos fotogramas
    dense_2 = Dense(units=units_dense_layers_2,
                    activation='relu',
                    name='FC_2_OrderPrediction')

    RelationShip_1_2 = dense_2(Features_12)
    RelationShip_1_3 = dense_2(Features_13)
    RelationShip_1_4 = dense_2(Features_14)
    RelationShip_2_3 = dense_2(Features_23)
    RelationShip_2_4 = dense_2(Features_24)
    RelationShip_3_4 = dense_2(Features_34)

    dropout_2 = Dropout(dropout_rate_2, name='Dropout_2_OrderPrediction')

    RelationShip_1_2 = dropout_2(RelationShip_1_2)
    RelationShip_1_3 = dropout_2(RelationShip_1_3)
    RelationShip_1_4 = dropout_2(RelationShip_1_4)
    RelationShip_2_3 = dropout_2(RelationShip_2_3)
    RelationShip_2_4 = dropout_2(RelationShip_2_4)
    RelationShip_3_4 = dropout_2(RelationShip_3_4)

    # Concatenación de todas las relaciones
    Features_Final = concatenate([
        RelationShip_1_2, RelationShip_1_3, RelationShip_1_4, RelationShip_2_3,
        RelationShip_2_4, RelationShip_3_4
    ])

    prediction = Dense(units=12,
                       activation='softmax',
                       name='FC_Final_OrderPrediction')(Features_Final)

    siamese_model = Model(inputs=[input_1, input_2, input_3, input_4],
                          outputs=prediction)

    siamese_model.summary()

    optimizer = SGD(learning_rate=learning_rate, momentum=momentum)

    siamese_model.compile(optimizer=optimizer,
                          loss='categorical_crossentropy',
                          metrics=['accuracy'])

    return siamese_model
コード例 #30
0
from tensorflow.keras.datasets import imdb
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Embedding, Flatten
from tensorflow.keras import preprocessing
import tensorflow as tf

max_features = 10000
max_len = 20
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
x_train = preprocessing.sequence.pad_sequences(x_train, maxlen=max_len)
x_test = preprocessing.sequence.pad_sequences(x_test, maxlen=max_len)

model = Sequential()
model.add(Embedding(10000, 8, input_length=max_len))
model.add(Flatten())
model.add(Dense(1, activation='softmax'))
model.compile(optimizer='rmsprop',
              loss='binary_crossentropy',
              metrics=['accuracy'])
model.summary()
tf_callback = tf.keras.callbacks.TensorBoard(log_dir='logs')
history = model.fit(x_train,
                    y_train,
                    epochs=10,
                    batch_size=32,
                    validation_split=0.2,
                    callbacks=[tf_callback])
コード例 #31
0
class ExpertApprenticeAgent(Agent):
    def __init__(
        self,
        max_iteration: int,
        action_space_size: int,
        keep_memory: bool = True,
        apprentice_training_before_takeover: int = 100,
    ):
        self.max_iteration = max_iteration
        self.keep_memory = keep_memory
        self.memory = dict()
        self.brain = Sequential()
        self.brain.add(Dense(64, activation=relu))
        self.brain.add(Dense(64, activation=relu))
        self.brain.add(Dense(64, activation=relu))
        self.brain.add(Dense(action_space_size, activation=softmax))
        self.brain.compile(optimizer=Adam(), loss=mse)
        self.apprentice_training_before_takeover = apprentice_training_before_takeover
        self.apprentice_training_count = 0

        self.states_buffer = []
        self.actions_buffer = []

    @staticmethod
    def create_node_in_memory(memory, node_hash, available_actions, current_player):
        memory[node_hash] = [
            {"r": 0, "n": 0, "np": 0, "a": a, "p": current_player}
            for a in available_actions
        ]

    @staticmethod
    def ucb_1(edge):
        return edge["r"] / edge["n"] + sqrt(2 * log(edge["np"]) / edge["n"])

    def act(self, gs: GameState) -> int:

        if self.apprentice_training_count > self.apprentice_training_before_takeover:
            return gs.get_available_actions(gs.get_active_player())[
                np.argmax(
                    self.brain.predict(np.array([gs.get_vectorized_state()]))[0][
                        gs.get_available_actions(gs.get_active_player())
                    ]
                )
            ]

        root_hash = gs.get_unique_id()
        memory = self.memory if self.keep_memory else dict()

        if root_hash not in memory:
            ExpertApprenticeAgent.create_node_in_memory(
                memory,
                root_hash,
                gs.get_available_actions(gs.get_active_player()),
                gs.get_active_player(),
            )

        for i in range(self.max_iteration):
            gs_copy = gs.clone()
            s = gs_copy.get_unique_id()
            history = []

            # SELECTION
            while not gs_copy.is_game_over() and all(
                (edge["n"] > 0 for edge in memory[s])
            ):
                chosen_edge = max(
                    ((edge, ExpertApprenticeAgent.ucb_1(edge)) for edge in memory[s]),
                    key=lambda kv: kv[1],
                )[0]
                history.append((s, chosen_edge))

                gs_copy.step(gs_copy.get_active_player(), chosen_edge["a"])
                s = gs_copy.get_unique_id()
                if s not in memory:
                    ExpertApprenticeAgent.create_node_in_memory(
                        memory,
                        s,
                        gs_copy.get_available_actions(gs_copy.get_active_player()),
                        gs_copy.get_active_player(),
                    )

            # EXPANSION
            if not gs_copy.is_game_over():
                chosen_edge = choice(
                    list(filter(lambda e: e["n"] == 0, (edge for edge in memory[s])))
                )

                history.append((s, chosen_edge))
                gs_copy.step(gs_copy.get_active_player(), chosen_edge["a"])
                s = gs_copy.get_unique_id()
                if s not in memory:
                    ExpertApprenticeAgent.create_node_in_memory(
                        memory,
                        s,
                        gs_copy.get_available_actions(gs_copy.get_active_player()),
                        gs_copy.get_active_player(),
                    )

            # SIMULATION
            while not gs_copy.is_game_over():
                gs_copy.step(
                    gs_copy.get_active_player(),
                    choice(gs_copy.get_available_actions(gs_copy.get_active_player())),
                )

            scores = gs_copy.get_scores()
            # REMONTEE DU SCORE
            for (s, edge) in history:
                edge["n"] += 1
                edge["r"] += scores[edge["p"]]
                for neighbour_edge in memory[s]:
                    neighbour_edge["np"] += 1

        target = np.zeros(gs.get_action_space_size())

        for edge in memory[root_hash]:
            target[edge["a"]] = edge["n"]

        target /= np.sum(target)

        self.states_buffer.append(gs.get_vectorized_state())
        self.actions_buffer.append(target)

        if len(self.states_buffer) > 200:
            self.apprentice_training_count += 1
            self.brain.fit(
                np.array(self.states_buffer), np.array(self.actions_buffer), verbose=0
            )
            self.states_buffer.clear()
            self.actions_buffer.clear()

        if self.apprentice_training_count > self.apprentice_training_before_takeover:
            print("Apprentice is playing next round")

        return max((edge for edge in memory[root_hash]), key=lambda e: e["n"])["a"]

    def observe(self, r: float, t: bool, player_index: int):
        pass

    def save_model(self, filename: str):
        self.brain.save(f"{filename}.h5")

    def load_model(self, filename: str):
        self.brain = load_model(filename)