Ejemplo n.º 1
0
    def __init__(self, game: GridGame):
        super().__init__(game)
        # game params
        self.board_height = game.board_height
        self.board_width = game.board_width
        example_board = game.create_board()
        self.action_size = len(game.get_valid_moves(example_board))
        self.epochs_completed = 0
        self.epochs_to_train = 100
        args = Namespace(lr=0.001,
                         dropout=0.3,
                         epochs=10,
                         batch_size=64,
                         num_channels=512)
        self.checkpoint_name = 'random weights'
        self.args = args

        num_channels = 512
        kernel_size = [3, 3]
        dropout = 0.3
        model = Sequential()
        # regularizer = regularizers.l2(0.00006)
        regularizer = regularizers.l2(0.0001)
        model.add(Conv2D(num_channels,
                         kernel_size,
                         padding='same',
                         activation='relu',
                         input_shape=(self.board_height, self.board_width, 1),
                         activity_regularizer=regularizer))
        model.add(Conv2D(num_channels,
                         kernel_size,
                         padding='same',
                         activation='relu',
                         activity_regularizer=regularizer))
        model.add(Conv2D(num_channels,
                         kernel_size,
                         activation='relu',
                         activity_regularizer=regularizer))
        model.add(Conv2D(num_channels,
                         kernel_size,
                         activation='relu',
                         activity_regularizer=regularizer))
        model.add(Dropout(dropout))
        model.add(Dropout(dropout))
        model.add(Flatten())
        model.add(Dense(self.action_size + 1))
        model.compile('adam', 'mean_squared_error')
        self.model = model
Ejemplo n.º 2
0
                                                    batch_size=batch_size)

    model = Sequential()
    model.add(
        SeparableConv1D(30,
                        kernel_size_first_layer,
                        input_shape=(peek_interval_days, 1),
                        activation="relu"))
    model.add(AveragePooling1D())
    model.add(SeparableConv1D(10, kernel_size_second_layer, activation="relu"))
    model.add(AveragePooling1D())
    model.add(SeparableConv1D(3, kernel_size_third_layer, activation="relu"))
    model.add(GlobalAveragePooling1D())
    model.add(Dense(1, activation="sigmoid"))
    model.compile(optimizer=Adam(),
                  loss="binary_crossentropy",
                  metrics=["accuracy"])

    model.build()
    model.summary()
    history = model.fit_generator(
        train_generator,
        epochs=1,
        validation_data=validation_generator,
        class_weight={
            0: 0.5,  # false
            1: 0.5  # true
        },
        callbacks=[EarlyStopping(monitor="val_loss", patience=5)])

    def plot_history(history):
#Custom loss function
def my_cat_crossentropy(target,output,from_logits=False,axis=-1):
	return tf.nn.softmax_cross_entropy_with_logits_v2(labels=target,logits=output)

my_batch_size = 20

#Model defintion:
my_model = Sequential()
my_model.add(Dense(15, input_dim=30, kernel_initializer='glorot_normal'))
my_model.add(Dropout(0.1))
my_model.add(Dense(2, kernel_initializer='glorot_normal',kernel_regularizer=tf.keras.regularizers.l2(l=0.01)))

#Training model using multi-stage optimiser:
#Stage 1
my_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.005,beta1=0.85,beta2=0.95), loss=my_cat_crossentropy, metrics=['accuracy'])
my_model.fit(x=X_train, y=Y_train, batch_size=my_batch_size, epochs=100, verbose=VERBOSE, shuffle=True)

#Stage 2
my_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.001,beta1=0.9,beta2=0.99), loss=my_cat_crossentropy, metrics=['accuracy'])
my_model.fit(x=X_train, y=Y_train, batch_size=my_batch_size, epochs=150, verbose=VERBOSE, shuffle=True)# new

#Stage 3
my_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.0005,beta1=0.9,beta2=0.99), loss=my_cat_crossentropy, metrics=['accuracy'])
my_model.fit(x=X_train, y=Y_train, batch_size=my_batch_size, epochs=200, verbose=VERBOSE, shuffle=True)# new

#Stage 4
my_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.0001,beta1=0.9,beta2=0.99), loss=my_cat_crossentropy, metrics=['accuracy'])
my_model.fit(x=X_train, y=Y_train, batch_size=my_batch_size, epochs=250, verbose=VERBOSE, shuffle=True)# new

#Evaluate model on training data
Ejemplo n.º 4
0
                end = n_samples
            batch_xs = nextBatch(train_x[start:end, :], n_outputs)
            batch_ys = np.reshape(train_y[start:end], (int(batch_size / n_outputs), n_outputs))

            yield batch_xs, batch_ys

start = datetime.datetime.now()

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
KTF.set_session(session)

# mlp.compile(optimizer=Adam(learning_rate=lr),
mlp.compile(optimizer=Adam(lr=lr),
              loss=my_loss,
              metrics=[my_metric], )

# file_name_trained = '/home/yezhizi/Documents/TianchiMetro/code/ckpt/lr: 0.0001-batch_size: 93312-l2_param: 0.01-dropout: 0.8-training_epochs: 10000-n_inputs: 239-n_outputs: 3-n_hidden: []-n_mlp: [400, 400, 400]-n_samples2146176-weights[10000, 100, 1]/mlp-ep7600-loss88.766-val_loss98.917-lr: 0.0001-batch_size: 93312-l2_param: 0.01-dropout: 0.8-training_epochs: 10000-n_inputs: 239-n_outputs: 3-n_hidden: []-n_mlp: [400, 400, 400]-n_samples2146176-weights[10000, 100, 1].h5'
# mlp.load_weights(file_name_trained)

if mode == constants.TRAIN:

    steps_per_epoch = int(np.ceil(n_samples / batch_size))
    check_point = ModelCheckpoint(file_name, monitor='val_my_metric', verbose=0,
                                  save_best_only=True,
                                  save_weights_only=False, mode='auto', period=1)
    tensor_board = TensorBoard(log_dir='logs/' + file_name[:-3] + '/', histogram_freq=0, write_graph=True,
                               write_images=False)

    result = mlp.fit_generator(generator=generator(steps_per_epoch), steps_per_epoch=steps_per_epoch,
))

model.add(Conv2D(64, 3, strides=2, padding='same', activation='relu'))
model.add(MaxPooling2D(2, 2, 'same'))
model.add(Conv2D(128, 3, strides=2, padding='same', activation='relu'))
model.add(MaxPooling2D(2, 2, 'same'))

#把第二个池化层的输出扁平化为1维
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(10, activation='softmax'))

# 定义优化器
sgd = SGD(lr=0.01)

# 定义优化器,loss function,训练过程中计算准确率
model.compile(optimizer=sgd,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

model.fit(x_train_data, y_train_data, batch_size=64, epochs=100)

# 评估模型
loss, accuracy = model.evaluate(x_test_data, y_test_data)

model_path = "./weights/mnist_cnn.h5"
model.save(model_path, include_optimizer=False, save_format='h5')

print('test loss', loss)
print('test accuracy', accuracy)
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.43404126142165134))

model.add(Flatten())
model.add(Dense(256, activation='sigmoid'))
model.add(Dropout(0.6975075528853121))
model.add(Dense(2, activation='softmax'))

model.summary()

# Model compilation and training

model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=["accuracy"])
hist = model.fit(X_train, y_train, epochs=7, batch_size=256, shuffle=True, validation_data=(X_test, y_test))

# Illustration of loss per epoch

plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper right')
plt.show()

# Illustration of accuracy per epoch

plt.plot(hist.history['accuracy'])
"""### Applying CNN"""

from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.optimizers import Adam

model = Sequential([
    Dense(16, input_shape=(46, ), activation='relu'),
    Dense(32, activation='relu'),
    Dense(3, activation='softmax')
])

model.summary()

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

model.fit(x_train, y_train, batch_size=10, epochs=15)

y_pred = model.predict_classes(x_val)

y_pred

np.array(list(y_val['relevance']))

from sklearn.metrics import accuracy_score

accuracy_score(np.array(list(y_val['relevance'])), y_pred)

y_pred_test = model.predict_classes(x_test)
Ejemplo n.º 8
0
from tensorflow.python.keras.layers import Dense

# 1.load training/test data
dataset = np.loadtxt('./dataset/pimaindians-diabetes.csv', delimiter=',')
x = np.array(dataset[:, 0:8])
t = np.array(dataset[:, 8])
# print(x.shape, t.shape)

# 2. model frame config
model = Sequential()
model.add(Dense(50, input_dim=x.shape[1], activation='relu'))
model.add(Dense(1, activation='sigmoid'))

# 3. model fitting config
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# 4. model fitting
history = model.fit(x, t, epochs=200, batch_size=10)

# 5. result
loss = history.history['loss']
result = model.evaluate(x, t, verbose=0)
print(f'\n(Loss, Accuracy)=({result[0], result[1]})')

# 6. predict
data = np.array([[13, 106, 72, 54, 0, 36.6, 0.178, 45]])
predict = model.predict(data)
percentage = float(predict[0] * 100)
print(f'\n당뇨 발병 확률:{percentage:.2f}%')
Ejemplo n.º 9
0
class RegressionSeries:
    def readcsv(self, path):
        df = pd.read_csv(path,
                         sep=';',
                         parse_dates={'dt': ['Date', 'Time']},
                         infer_datetime_format=True,
                         low_memory=False,
                         index_col='dt')
        return df

    def data_collection(self, df):
        active_values = []
        start_index = []
        end_index = []
        rows = df.shape[0]
        for i in range(rows):
            val = df.iloc[i, 0]
            if val == '?' or val == 'nan':
                if len(active_values) - 60 >= 0:
                    start_index.append(i - 60)
                    end_index.append(i - 1)
                else:
                    start_index.append(0)
                    if i == 0:
                        end_index.append(0)
                    else:
                        end_index.append(i - 1)
            else:
                active_values.append(val)
        return active_values, start_index, end_index

    def prepare_data(self, active_values):
        training = []
        labels = []
        for i in range(len(active_values)):
            #             print(i)
            if i + 60 < len(active_values):
                arr = active_values[i:i + 60]
                #                 print(len(arr))
                training.append(arr)
                labels.append(active_values[i + 60])
            else:
                break
        #print("trainging= ", len(training))
        training = np.asarray(training)
        #print("trining np = ", training.shape)
        labels = np.array(labels)
        return training, labels

    def train(self, train_data, labels):

        train_set = train_data
        train_labels = labels
        #         print(train_set.shape)
        #         print(train_labels.shape)
        #         print(type(train_set[0]))
        self.model = Sequential()
        self.model.add(Dense(100, activation="tanh", input_dim=60))
        self.model.add(Dense(70, activation="relu"))
        self.model.add(Dense(30, activation="relu"))
        self.model.add(Dense(1))
        self.model.compile(optimizer="adam",
                           loss="mse",
                           metrics=['mse', 'mae', 'mape'])
        self.model.fit(train_set, train_labels, epochs=20, batch_size=1000)

    def predict_values(self, start_index, end_index, active_values):

        preds = []
        for i in range(len(start_index)):
            #             print(i)
            x = active_values[start_index[i]:end_index[i] + 1]
            x = np.array(x)
            x = x.reshape((1, 60))
            val = self.model.predict(x)
            preds.append(val)

        return preds
Ejemplo n.º 10
0
for layer in vgg16_model.layers[:-1]:
    model.add(layer)

# Freezes the weights and other trainable parameters in each layer.
# They will not be updated when we pass in our images of cats and dogs.
for layer in model.layers:
    layer.trainable = False

# add a layer of 2 outputs
model.add(Dense(2, activation='softmax'))

# print model
model.summary()

# Same compile as in original model
model.compile(optimizer=Adam(learning_rate=.0001), loss='categorical_crossentropy', metrics=['accuracy'])

# Learning on 40 images -> 4 steps and 10 images per step
model.fit_generator(generator=train_batches, steps_per_epoch=4,
    validation_data=valid_batches, validation_steps=4, epochs=5, verbose=2)

test_imgs, test_labels = next(test_batches)
test_labels = test_labels[:,0]

# Taking one batch so will be 10 images
predictions = model.predict_generator(generator=test_batches, steps=1, verbose=0)
print(np.round(predictions[:,0]))

cm = confusion_matrix(y_true=test_labels, y_pred=np.round(predictions[:,0]))
print('Configuration matrix')
print(cm)
Ejemplo n.º 11
0
x_data = np.array(x_data)
y_data = np.array(y_data)

print(x_data.shape)

# if x_data.shape[0] > y_data.shape[0]:
#     x_data = x_data[:y_data.shape[0] - x_data.shape[0]]

split = int(y_data.shape[0] * 0.8)
x_train, x_test = x_data[:split], x_data[split:]
y_train, y_test = y_data[:split], y_data[split:]

model = Sequential()
model.add(GRU(128, kernel_regularizer=regularizers.l2(0.001)))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(1, activation='sigmoid'))

model.compile(optimizer="adam",
              loss="binary_crossentropy",
              metrics=['binary_accuracy'])  # accuracy,mse,binary_crossentropy

history = model.fit(x_train, y_train, epochs=10, verbose=2)

y_pred = model.predict(x_test)
model.evaluate(x_test, y_test)
last_data = np.expand_dims(x_test[-1], axis=0)
print(model.predict(last_data))
# print("test AUC", round(roc_auc_score(y_test, y_pred), 4))
Ejemplo n.º 12
0
train = train.map(preprocess_data).batch(BATCH_SIZE)
test = test.map(preprocess_data).batch(BATCH_SIZE)
test_steps = info.splits['test'].num_examples // BATCH_SIZE
train_steps = info.splits['train'].num_examples // BATCH_SIZE

#Define and train your model
model = Sequential([
    Flatten(),
    Dense(784, activation='sigmoid'),
    Dense(800, activation='sigmoid'),
    Dense(800, activation='sigmoid'),
    Dense(10, activation='softmax')
])

model.compile(optimizer=Adam(),
              loss=categorical_crossentropy,
              metrics=[categorical_accuracy])
model.fit(train.repeat(),
          epochs=5,
          steps_per_epoch=train_steps,
          validation_data=test,
          validation_steps=test_steps)

#Define metrics that we want to gather
metrics_accumulator = AttackMetricsAccumulator(
    [Accuracy(), L2_Metrics(),
     Robustness(), Timing()])
for data_sample in test.take(100):
    image, labels = data_sample
    fgsm = FGSMUntargeted(iter_max=100, eps=0.001)
    adv_image, adv_logits, parameters = fgsm(model,
Ejemplo n.º 13
0
    splitter2.split(best_sarcasm_features_numpy[other_ind],
                    sarcasm_target_numpy[other_ind]))

# Build a multi-layer network.
# 64 input for Dense layers:
# With no dropout layers:
the_layers = [
    layers.Dense(64, activation='sigmoid'),
    layers.Dense(64, activation='sigmoid'),
    layers.Dense(1, activation='sigmoid')
]
networkMLP = Sequential(layers=the_layers)

# Configure the network in preparation for training.
networkMLP.compile(optimizer='adam',
                   metrics=['accuracy'],
                   loss='binary_crossentropy')

# Train the model using the data.
# Results in a history object that contains training and validation loss and metrics values.
# History dictionary keys => ['val_loss', 'val_acc', 'loss', 'acc']
training064 = networkMLP.fit(
    best_sarcasm_features_numpy[train_ind],
    sarcasm_target_numpy[train_ind],
    validation_data=(best_sarcasm_features_numpy[val_ind],
                     sarcasm_target_numpy[val_ind]),
    epochs=10)

# Get the evaluation accuracy value of the network model using the acc_est data chunk.
# Results in a list object that contains loss and accuracy values.
# Metrics names => ['loss', 'acc']
Ejemplo n.º 14
0
class QNetwork:
    def __init__(self, env, parameters):
        self.observations_size = env.observation_space.shape[0]
        self.action_size = env.action_space.n
        self.learning_rate = parameters["learning_rate"]
        self.learning_rate_decay = parameters["learning_rate_decay"]
        self.loss_metric = parameters["loss_metric"]
        self.layers = parameters['layers']
        self.input_shape = parameters.get('input_shape')

    def build_q_dense_network(self):
        self.model = Sequential()
        self.model.add(
            Dense(self.layers[0],
                  input_dim=self.observations_size,
                  activation='relu'))

        self.model.add(Dense(self.layers[1], activation='relu'))
        self.model.add(Dense(self.action_size, activation='linear'))
        self.model.compile(loss=self.loss_metric,
                           optimizer=Adam(lr=self.learning_rate,
                                          decay=self.learning_rate_decay))

        return self.model

    def build_q_dense_from_json(self):
        self.model = Sequential()

        len_layers = len(self.layers)
        assert len_layers >= 2, 'You must have a network of \
                                            at least 2 layers'

        for i, units in enumerate(self.layers):

            if i == 0:
                self.model.add(
                    Dense(units=units,
                          input_dim=self.observations_size,
                          activation='relu',
                          name='Input_state'))

            else:
                self.model.add(
                    Dense(units=units, activation='relu', name='Hidden_layer'))

            if i + 1 == len_layers:
                break

        self.model.add(
            Dense(self.action_size,
                  activation='linear',
                  name='Output_Q_action'))

        self.model.compile(loss=self.loss_metric,
                           optimizer=Adam(lr=self.learning_rate,
                                          decay=self.learning_rate_decay))

        return self.model

    def build_q_CNN(self):

        self.model = Sequential()

        self.model.add(
            Conv2D(32,
                   kernel_size=8,
                   activation='relu',
                   strides=4,
                   input_shape=self.input_shape))

        self.model.add(Conv2D(64, kernel_size=4, activation='relu', strides=2))

        self.model.add(Conv2D(64, kernel_size=3, activation='relu', strides=1))

        self.model.add(Flatten())

        self.model.add(Dense(512, activation='relu'))

        self.model.add(Dense(self.action_size, activation='linear'))

        self.model.compile(loss=self.loss_metric,
                           optimizer=Adam(lr=self.learning_rate,
                                          decay=self.learning_rate_decay))

        return self.model
Ejemplo n.º 15
0
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.layers import Bidirectional, Dense, CuDNNLSTM

batch_size = 1
embedding_dim = 100
max_time = 100
lstm_output_dim = 128
output_dim = 10
sequence_length = [6, 10]

model = Sequential([
    Bidirectional(CuDNNLSTM(lstm_output_dim, return_sequences=True),
                  input_shape=(max_time, embedding_dim)),
    Dense(output_dim, activation="sigmoid")
])

model.compile(loss='categorical_crossentropy', optimizer='adam')

print(model.output_shape)
import numpy as np
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.layers import Conv2D, Flatten, Dense, MaxPooling2D, Dropout

# Load pickled data
with open('small_train_traffic.p', mode='rb') as f:
    data = pickle.load(f)

# split the data
X_train, y_train = data['features'], data['labels']

model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(MaxPooling2D())
model.add(Dropout(.5))
model.add(Flatten())
model.add(Dense(5, activation='softmax'))

# preprocess data
X_normalized = np.array(X_train / 255.0 - 0.5)

from sklearn.preprocessing import LabelBinarizer

label_binarizer = LabelBinarizer()
y_one_hot = label_binarizer.fit_transform(y_train)

# compile and fit model
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, epochs=3, validation_split=0.2)
Ejemplo n.º 17
0
class TrainTop:
    """
    Trains the top layer. The class variables includes a parametrized Model created using SuperNet
    Call the train function of class to start training and the test function to test the function. The test function also
    writes the data to the buckets file.
    :param epochs: The number of epochs
    :param batch_size: Model training batch size
    :param loss: Defines the criterion function i.e. Loss function. Default is MSELoss()
    :param lr: The learning rate for the model
    :param verbose: Whether to print the logs
    :param identifier: Used to identify the type of dataset
    :param n1: Number of layers in the first layer
    :param n2: Number of layers in the second layer
    :param bias: Bias used or not for Model: bool
    :param validation_split: Amount of validation used to avoid overfitting
    :param optimizer: Defines the optimizer used
    """

    def __init__(self, identifier, epochs, batch_size, filename, lr=0.01, loss="mse", n1=32, n2=0,
                 bias=True, optimizer='RMSprop', validation_split=0.1, verbose=True):
        self.epochs = epochs
        self.batch_size = batch_size
        self.loss = loss
        self.verbose = verbose
        self.optimizer = optimizer
        self.identifier = identifier
        self.n1 = n1
        self.n2 = n2
        self.bias = bias
        self.lr = lr
        self.validation_split = validation_split
        self.model = None
        self.keys, self.values = None, None
        self.filename = filename

    def train(self):

        self.model = Sequential()
        self.model.add(Dense(self.n1, activation=tf.nn.relu, use_bias=self.bias, input_shape=(1,)))
        if self.n2 != 0:
            self.model.add(Dense(self.n2, activation=tf.nn.relu, use_bias=self.bias))
        self.model.add(Dense(1))
        self.model.compile(optimizer=self.optimizer, loss=self.loss, metrics=['mse', 'mse'])

        self.keys, self.values = import_data(self.filename)
        self.model.fit(self.keys, self.values, epochs=self.epochs, batch_size=self.batch_size, verbose=self.verbose,
                       validation_split=self.validation_split)

        if not os.path.exists("models_tf/{}".format(self.identifier)):
            os.makedirs("models_tf/{}".format(self.identifier))
        self.model.save("models_tf/{}/super_layer.h5".format(self.identifier))

        converter = tf.lite.TFLiteConverter.from_keras_model(self.model)  # TF 2.0
        # converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]

        # Following code is used for further optimization of weights. Here we notice a massive drop in performance
        converter.optimizations = [tf.lite.Optimize.DEFAULT]

        sample = tf.cast(self.keys, tf.float32)
        sample = tf.data.Dataset.from_tensor_slices((sample)).batch(1)
        def representative_data_gen():
            for input_value in sample.take(1300000):
                yield [input_value]
        converter.representative_dataset = representative_data_gen

        tflite_model = converter.convert()

        open("models_tf/{}/super_layer.tflite".format(self.identifier), "wb").write(tflite_model)

    def getWeights(self, tflite=False, read_model=False):
        """
        :param read_model: Whether to read the model saved by the train function
        :param tflite: Read from quantized tflite file
        """

        if self.keys is None:
            self.keys, self.values = import_data(self.filename)
        predictions = None

        if read_model:
            if tflite:
                self.model = tf.lite.Interpreter(model_path="models_tf/{}/super_layer.tflite".format(self.identifier))
                self.model.allocate_tensors()
                # Get input and output tensors.
                details = self.model.get_tensor(0)
                print(details)
                details = self.model.get_tensor(1)
                print(details)
                details = self.model.get_tensor(2)
                print(details)
                details = self.model.get_tensor(3)
                print(details)
                # details = self.model.get_tensor(4)
                # print(details)
                # details = self.model.get_tensor(4)
                # print(details)
                # details = self.model.get_tensor(5)
                # print(details)
                # details = self.model.get_tensor(6)
                # print(details)
                input_details = self.model.get_input_details()
                output_details = self.model.get_output_details()

            else:
                if self.model is None:
                    self.model = tf.keras.models.load_model("models_tf/{}/super_layer.h5".format(self.identifier))
                for layer in self.model.layers:
                    print(layer.get_weights())
        else:
            for layer in self.model.layers:
                print(layer.get_weights())

    def test(self, tflite=False, read_model=False, write_buckets=True, total_buckets=100):
        """
        :param read_model: Whether to read the model saved by the train function
        :param total_buckets: Divide the data between the buckets to train the model for next layer
        :param write_buckets: Write the buckets out to disk. Writes the training data for next layer to the buckets directory.
        :param tflite: Read from quantized tflite file
        """
        if self.keys is None:
            self.keys, self.values = import_data(self.filename)
        predictions = None

        if read_model:
            if tflite:
                self.model = tf.lite.Interpreter(model_path="models_tf/{}/super_layer.tflite".format(self.identifier))
                self.model.allocate_tensors()
                # Get input and output tensors.
                input_details = self.model.get_input_details()
                output_details = self.model.get_output_details()
                self.keys = np.reshape(self.keys, (-1,1,1)).astype(np.float32)
                predictions = []
                for i in range(self.keys.shape[0]):
                    self.model.set_tensor(input_details[0]['index'], self.keys[i])
                    self.model.invoke()
                    predictions.append(self.model.get_tensor(output_details[0]['index'])[0])
                predictions = np.asarray(predictions)
            else:
                self.model = tf.keras.models.load_model("models_tf/{}/super_layer.h5".format(self.identifier))
                predictions = self.model.predict(self.keys)
        else:
            predictions = self.model.predict(self.keys)

        if self.verbose:
            print("\n\nEvaluation:\n\n")


        big_bucket = dict()
        self.keys = np.reshape(self.keys, (-1,1))
        predictions = np.concatenate((self.keys, self.values, predictions), axis=1)

        total_length = predictions.shape[0]

        for i in range(total_buckets):
            big_bucket[i] = []

        for i, (k, v, o) in enumerate(predictions):
            k = k.item()
            v = v.item()
            o = o.item()

            if self.verbose and i % 8000 == 0:
                print("Record: ", i+1, "Key: ", k, "Value: ", v, "Model Output: ", o, "Difference: ", o-v)

            mn = (total_buckets * o) / total_length
            model_num = np.clip(np.floor(mn), 0, total_buckets - 1)
            big_bucket[int(model_num)].append([v, k])

        if write_buckets:
            print("\n\nSaving data files for layer 2:\n\n")
            if not os.path.exists("buckets_tf/{}".format(self.identifier)):
                os.makedirs("buckets_tf/{}".format(self.identifier))
            for b in big_bucket:
                np.savetxt(fname="buckets_tf/{}/bucket_{}.txt".format(self.identifier, b), X=np.array(big_bucket[b]), fmt="%u")
Ejemplo n.º 18
0
p, t = convert_to_matrix(data)
#  print("Input matrix: \n", p)
#  print("Output matrix: \n", t)

print("Pradinis reiksmiu saraso dydis: ", len(data[0]))
print("Ivesties reiksmiu saraso dydis: ", len(p))
print("Isvesties reiksmiu saraso dydis: ", len(t))

Lu = 310  # training data count
Pu, Tu = p[:Lu], t[:Lu]
Pu_test, Tu_test = p[:Lu], t[:Lu]

model = Sequential()
model.add(Dense(1, input_dim=2))
model.compile(optimizer='Adam', loss='mean_squared_error', metrics=['mse'])

weights_before_train = model.get_weights()

history = model.fit(Pu, Tu, epochs=1000, batch_size=10, verbose=1)

weights_after_train = model.get_weights()
print("Weights before train: ", weights_before_train)
print("Weights after train: ", weights_after_train)

model_predictions = model.predict(Pu)
#print(model_predictions)
drawPredictionComparison1(data, model_predictions)

plt.plot(history.history['mse'])
plt.xlabel("Iterations")
Ejemplo n.º 19
0
    def test_no_2_000_then_111_what_is_brilliant_idea(self):
        # Testujemy dla stanu ktory raz sie pojawil, ale takze dla bardzo podobnego - i chcemy podobnego wyniku
        # aby nie bylo overfittingu
        act_preds_1_reg = []
        close_preds_1_reg = []
        agents: List[SmartAgent] = get_SmartAgents()
        for agent in agents:
            model = Sequential()
            model.add(
                Dense(13, input_dim=10,
                      activation='relu'))  # 1st hidden layer; states as input
            # model.add(Dense(8, activation='relu'))
            model.add(
                Dense(8,
                      activation='relu',
                      kernel_regularizer=regularizers.l2(0.5)))
            model.add(Dense(3, activation='linear'))
            model.compile(loss='mse', optimizer=Adam())
            agent.model = model

        env = Env(agents)
        env.u = env_settings.u_all_2
        max_time = 90
        Globals().time = 0
        for t in range(max_time):
            # actions = [1, 1, 1] if t < 60 elif 60 == t [2, 2, 2]
            actions = [0, 0, 0]
            if t == 60 or t >= 63:
                actions = [1, 1, 1]
            if t == 61 or t == 62:
                actions = [orange, orange, orange]
            env.step(actions)
        for agent in agents:
            agent.reshape_rewards()
        i = 0
        epochs = 5
        a = True
        while a:
            i += epochs
            agents[0].train(batch_size=40, epochs=epochs, learning_rate=0.001)
            x = [2, 2, 2] + [0.6, 0.6, 28.88] + [0.126, 0.126, 0.18] + [0]
            predictions_actual = agents[0].model.predict(np.array([x]))
            x = [0, 0, 0] + [0, 0, 29] + [0, 0, 0] + [1]
            predictions_close = agents[0].model.predict(np.array([x]))
            act_preds_1_reg.append(predictions_actual[0][1])
            close_preds_1_reg.append(predictions_close[0][1])
            print(
                f'{i},actual: {predictions_actual} close: {predictions_close}')
            if i == 6000:
                a = False
        plt.plot(act_preds_1_reg, color='blue')
        plt.plot(close_preds_1_reg, color='green')
        # plt.ylabel('some numbers')
        # plt.show()

        # #NOT REGULIZED
        # act_preds_1 = []
        # close_preds_1 = []
        # agents: List[SmartAgent] = get_SmartAgents()
        # for agent in agents:
        #     model = Sequential()
        #     model.add(Dense(13, input_dim=10, activation='relu'))  # 1st hidden layer; states as input
        #     model.add(Dense(8, activation='relu'))
        #     model.add(Dense(3, activation='linear'))
        #     model.compile(loss='mse',
        #                   optimizer=Adam())
        #     agent.model = model
        #
        # Globals().time = 0
        # env = Env(agents)
        # env.u = env_settings.u_all_2
        # max_time = 90
        # for t in range(max_time):
        #     # actions = [1, 1, 1] if t < 60 elif 60 == t [2, 2, 2]
        #     actions = [0, 0, 0]
        #     if t == 60 or t >= 63:
        #         actions = [1, 1, 1]
        #     if t == 61 or t == 62:
        #         actions = [orange, orange, orange]
        #     env.step(actions)
        # for agent in agents:
        #     agent.reshape_rewards()
        # i = 0
        # epochs = 20
        # a = True
        # while a:
        #     i += epochs
        #     agents[0].train(batch_size=40, epochs=epochs, learning_rate=0.001)
        #     x = [2, 2, 2] + [0.6, 0.6, 28.88] + [0.126, 0.126, 0.18] + [0]
        #     predictions_actual = agents[0].model.predict(np.array([x]))
        #     x = [0, 0, 0] + [0, 0, 29] + [0, 0, 0] + [1]
        #     predictions_close = agents[0].model.predict(np.array([x]))
        #     act_preds_1.append(predictions_actual[0][1])
        #     close_preds_1.append(predictions_close[0][1])
        #     print(f'{i},actual: {predictions_actual} close: {predictions_close}')
        #     if i == 40000:
        #         a = False
        # plt.plot(close_preds_1,color='yellow')
        # plt.plot(act_preds_1,color='orange')
        plt.show()
Ejemplo n.º 20
0
# split into train and test sets
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# reshape input to be [samples, time steps, features]
trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
# shift train predictions for plotting
Ejemplo n.º 21
0
    def test_constrained_viterbi_tags(self):
        constraints = {(0, 0), (0, 1),
                       (1, 1), (1, 2),
                       (2, 2), (2, 3),
                       (3, 3), (3, 4),
                       (4, 4), (4, 0)}

        # Add the transitions to the end tag
        # and from the start tag.
        for i in range(5):
            constraints.add((5, i))
            constraints.add((i, 6))

        mask = np.array([
                [1, 1, 1],
                [1, 1, 0]
        ])

        crf = CRF(
            units=5,
            use_kernel=False,  # disable kernel transform
            chain_initializer=initializers.Constant(self.transitions),
            use_boundary=True,
            # left_boundary_initializer=initializers.Constant(self.transitions_from_start),
            # right_boundary_initializer=initializers.Constant(self.transitions_to_end),
            transition_constraint=constraints,
            name="crf_layer"
        )
        crf.left_boundary = crf.add_weight(
            shape=(5,),
            name="left_boundary",
            initializer=initializers.Constant(self.transitions_from_start),
        )
        crf.right_boundary = crf.add_weight(
            shape=(5,),
            name="right_boundary",
            initializer=initializers.Constant(self.transitions_to_end),
        )


        crf_loss_instance = ConditionalRandomFieldLoss()

        model = Sequential()
        model.add(layers.Input(shape=(3, 5)))
        model.add(MockMasking(mask_shape=(2, 3), mask_value=mask))
        model.add(crf)
        model.compile('adam', loss={"crf_layer": crf_loss_instance})

        for layer in model.layers:
            print(layer.get_config())
            print(dict(zip(layer.weights, layer.get_weights())))

        # Get just the tags from each tuple of (tags, score).
        viterbi_tags = model.predict(self.logits)

        # Now the tags should respect the constraints
        expected_tags = [
            [2, 3, 3],
            [2, 3, 0]
        ]

        # if constrain not work it should be:
        # [
        #     [2, 4, 3],
        #     [2, 3, 0]
        # ]

        # test assert
        np.testing.assert_equal(viterbi_tags, expected_tags)
Ejemplo n.º 22
0
gmm_llk = gmm.score(x)
gmm_mean = gmm.means_.ravel().astype('float32')


# ====== mdn ====== #
def fn_loss(y_true, y_pred):
    # negative log-likelihood
    nllk = tf.reduce_mean(-y_pred.log_prob(y_true))
    return nllk


mdn = MixtureDensityNetwork(1,
                            n_components=n_components,
                            covariance_type='none')
model = Sequential([mdn])
model.compile(optimizer='adam', loss=fn_loss)
model.fit(x=x, y=x, epochs=48, batch_size=32, verbose=True)

y = model(x)
mdn_llk = tf.reduce_mean(y.log_prob(x)).numpy()
mdn_mean = tf.reduce_mean(y.components_distribution.mean(),
                          axis=(0, -1)).numpy()

# ====== visualizing ====== #
fig = plt.figure()
sns.distplot(x, bins=80)
plt.title('Data')

fig = plt.figure()
sns.distplot(gmm.sample(n * n_components)[0], bins=80)
plt.title('GMM - llk: %.2f' % gmm_llk)
Ejemplo n.º 23
0
    def test_unmasked_constrained_viterbi_tags(self):
        # TODO: using BILUO tag scheme instead of BIO.
        #       So that, transition from tags to end can be tested.

        raw_constraints = np.array([
            #     O     B-X    I-X    B-Y    I-Y  start   end
            [     1,     1,     0,     1,     0,    0,     1],  # O
            [     1,     1,     1,     1,     0,    0,     1],  # B-X
            [     1,     1,     1,     1,     0,    0,     1],  # I-X
            [     1,     1,     0,     1,     1,    0,     1],  # B-Y
            [     1,     1,     0,     1,     1,    0,     1],  # I-Y
            [     1,     1,     0,     1,     0,    0,     0],  # start
            [     0,     0,     0,     0,     0,    0,     0],  # end
        ])

        constraints = np.argwhere(raw_constraints > 0).tolist()

        # transitions = np.array([
        #     #     O     B-X    I-X    B-Y    I-Y
        #     [    0.1,   0.2,   0.3,   0.4,   0.5],  # O
        #     [    0.8,   0.3,   0.1,   0.7,   0.9],  # B-X
        #     [   -0.3,   2.1,  -5.6,   3.4,   4.0],  # I-X
        #     [    0.2,   0.4,   0.6,  -0.3,  -0.4],  # B-Y
        #     [    1.0,   1.0,   1.0,   1.0,   1.0]   # I-Y
        # ])

        transitions = np.ones([5, 5])

        # transitions_from_start = np.array(
        #     #     O     B-X    I-X    B-Y    I-Y
        #     [    0.1,   0.2,   0.3,   0.4,   0.6]  # start
        # )

        transitions_from_start = np.ones(5)

        # transitions_to_end = np.array(
        #     [
        #     #    end
        #         -0.1,  # O
        #         -0.2,  # B-X
        #          0.3,  # I-X
        #         -0.4,  # B-Y
        #         -0.4   # I-Y
        #     ]
        # )

        transitions_to_end = np.ones(5)

        logits = np.array([
            [
            # constraint transition from start to tags
            #     O     B-X    I-X    B-Y    I-Y
                [ 0.,    .1,   1.,     0.,   0.],
                [ 0.,    0.,   1.,     0.,   0.],
                [ 0.,    0.,   1.,     0.,   0.]
            ],
            [
            # constraint transition from tags to tags
            #     O     B-X    I-X    B-Y    I-Y
                [ 0.,    1.,   0.,     0.,   0.],
                [ 0.,    0.,   .1,     1.,   0.],
                [ 0.,    0.,   1.,     0.,   0.]
            ]
        ])

        crf = CRF(
            units=5,
            use_kernel=False,  # disable kernel transform
            chain_initializer=initializers.Constant(transitions),
            use_boundary=True,
            # left_boundary_initializer=initializers.Constant(transitions_from_start),
            # right_boundary_initializer=initializers.Constant(transitions_to_end),
            transition_constraint=constraints,
            name="crf_layer"
        )
        crf.left_boundary = crf.add_weight(
            shape=(5,),
            name="left_boundary",
            initializer=initializers.Constant(self.transitions_from_start),
        )
        crf.right_boundary = crf.add_weight(
            shape=(5,),
            name="right_boundary",
            initializer=initializers.Constant(self.transitions_to_end),
        )

        crf_loss_instance = ConditionalRandomFieldLoss()

        model = Sequential()
        model.add(layers.Input(shape=(3, 5)))
        model.add(crf)
        model.compile('adam', loss={"crf_layer": crf_loss_instance})

        for layer in model.layers:
            print(layer.get_config())
            print(dict(zip(layer.weights, layer.get_weights())))

        # Get just the tags from each tuple of (tags, score).
        viterbi_tags = model.predict(logits)

        # Now the tags should respect the constraints
        expected_tags = [
            [1, 2, 2],  # B-X  I-X  I-X
            [1, 2, 2]   # B-X  I-X  I-X
        ]

        # if constrain not work it should be:
        # [
        #     [2, 4, 3],
        #     [2, 3, 0]
        # ]

        # test assert
        np.testing.assert_equal(viterbi_tags, expected_tags)
    def _fit(self, X, y, step_score=None):
        # Parameter step_score controls the calculation of self.scores_
        # step_score is not exposed to users
        # and is used when implementing RFECV
        # self.scores_ will not be calculated when calling _fit through fit

        X, y = check_X_y(X, y, "csc")
        X = pd.DataFrame(X)

        n_samples, n_features = X.shape
        if self.n_features_to_select is None:
            n_features_to_select = n_features // 2
        else:
            n_features_to_select = self.n_features_to_select

        support_ = np.ones(n_features, dtype=np.bool)
        ranking_ = np.ones(n_features, dtype=np.int)

        if step_score:
            self.scores_ = []

        worst_feature = 0

        # Recursive elimination
        i = 1
        while np.sum(support_) > n_features_to_select:

            if worst_feature == n_features:
                break

            z = (np.cov(X.T)**2)[:, support_].sum(axis=1)
            z[support_ == False] = 0
            z_max = np.argmax(z)
            z_2 = z
            z_2[z_max] = 0
            z_2 = z_2 / np.max(z_2)
            a = z_2 > 0.5

            support_[z_max] = False
            X_worse = X.iloc[:, z_max]
            X_reduced = X.iloc[:, a]

            skf = KFold(n_splits=self.n_splits,
                        shuffle=True,
                        random_state=self.random_state)
            train_index, val_index = [split for split in skf.split(X_worse)][0]
            X_train, X_val = X_reduced.iloc[train_index], X_reduced.iloc[
                val_index]
            y_train, y_val = X_worse[train_index], X_worse[val_index]

            # Eliminate predictable features
            if self.verbose > 0:
                print("Fitting estimator with %d features (%d/%d)" %
                      (np.sum(support_), i, n_features))
                i += 1

            # estimator = clone(self.estimator)
            # estimator.fit(X_train, y_train)
            # score = estimator.score(X_val, y_val)
            # define model
            input_shape = X_train.shape[1]
            model = Sequential()
            model.add(
                Dense(10,
                      activation='relu',
                      kernel_initializer='he_normal',
                      input_shape=(input_shape, )))
            model.add(
                Dense(8, activation='relu', kernel_initializer='he_normal'))
            model.add(Dense(1))
            # compile the model
            model.compile(optimizer='adam', loss='mse')
            # fit the model
            model.fit(X_train, y_train, epochs=150, batch_size=32, verbose=0)
            # evaluate the model
            yhat = model.predict(X_val)
            score = r2_score(y_val, yhat)

            if score >= self.base_score:

                # Compute step score on the previous selection iteration
                # because 'estimator' must use features
                # that have not been eliminated yet
                ranking_[np.logical_not(support_)] += 1

            else:
                support_[z_max] = True

            worst_feature += 1

        self.n_features_ = support_.sum()
        self.support_ = support_
        self.ranking_ = ranking_

        return self
Ejemplo n.º 25
0
def test_masked_viterbi_decode():
    transitions = np.ones([5, 5])
    transitions_from_start = np.ones(5)
    transitions_to_end = np.ones(5)

    logits = np.array([
        [
        #     O     B-X    I-X    B-Y    I-Y
            [ 0.,    1.,   0.,     0.,   0.],
            [ 0.,    0.,   1.,     0.,   0.],
            [ 0.,    0.,   1.,     0.,   0.]
        ],
        [
        #     O     B-X    I-X    B-Y    I-Y
            [ 0.,    1.,   0.,     0.,   0.],
            [ 0.,    1.,   0.,     0.,   0.],
            [ 0.,    1.,   0.,     0.,   0.]
        ]
    ])

    # TODO: this test case is right padding mask only
    #       due to the underline crf function only support sequence length
    mask = np.array([
            [1, 1, 0],
            [1, 1, 0]
    ])

    crf = CRF(
        units=5,
        use_kernel=False,  # disable kernel transform
        chain_initializer=initializers.Constant(transitions),
        use_boundary=True,
        # left_boundary_initializer=initializers.Constant(transitions_from_start),
        # right_boundary_initializer=initializers.Constant(transitions_to_end),
        name="crf_layer"
    )

    crf_loss_instance = ConditionalRandomFieldLoss()

    model = Sequential()
    model.add(layers.Input(shape=(3, 5)))
    model.add(MockMasking(mask_shape=(2, 3), mask_value=mask))
    model.add(crf)
    model.compile('adam', loss={"crf_layer": crf_loss_instance})

    # for layer in model.layers:
    #     print(layer.get_config())
    #     print(dict(zip(layer.weights, layer.get_weights())))

    # Get just the tags from each tuple of (tags, score).
    result = model.predict(logits)

    # Now the tags should respect the constraints
    expected = [
        [1, 2, 0],  # B-X  I-X  NA
        [1, 1, 0]   # B-X  B-X  NA
    ]

    # if constrain not work it should be:
    # [
    #     [2, 4, 3],
    #     [2, 3, 0]
    # ]

    # test assert
    np.testing.assert_equal(result, expected)
Ejemplo n.º 26
0
classifier.add(Dropout(rate=0.5))  # antes era 0.25

# Step 3 - Flattening
classifier.add(Flatten())

# Step 4 - Full connection
classifier.add(Dense(units=512, activation='relu'))
# classifier.add(Dropout(rate=0.5))
classifier.add(Dense(units=4, activation='softmax'))

# show classifier detail
classifier.summary()

# Compiling the CNN
classifier.compile(optimizer='rmsprop',
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])

# Using ImageDataGenerator to read images from directories
train_datagen = ImageDataGenerator(rescale=1. / 255)
test_datagen = ImageDataGenerator(rescale=1. / 255)

training_set = train_datagen.flow_from_directory('dataset/training_set',
                                                 target_size=(28, 28),
                                                 batch_size=16,
                                                 class_mode='categorical')

test_set = test_datagen.flow_from_directory('dataset/test_set',
                                            target_size=(28, 28),
                                            batch_size=16,
                                            class_mode='categorical')
Ejemplo n.º 27
0
def train_task(data: InputBinaryFile(str), epochs: int, batch_size: int,
               model_path: OutputBinaryFile(str)):
    """Train CNN model on MNIST dataset."""

    from tensorflow.python import keras
    from tensorflow.python.keras import Sequential, backend as K
    from tensorflow.python.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense
    import numpy as np

    mnistdata = np.load(data)

    train_x = mnistdata['train_x']
    train_y = mnistdata['train_y']
    test_x = mnistdata['test_x']
    test_y = mnistdata['test_y']

    num_classes = 10
    img_w = 28
    img_h = 28

    if K.image_data_format() == 'channels_first':
        train_x.shape = (-1, 1, img_h, img_w)
        test_x.shape = (-1, 1, img_h, img_w)
        input_shape = (1, img_h, img_w)
    else:
        train_x.shape = (-1, img_h, img_w, 1)
        test_x.shape = (-1, img_h, img_w, 1)
        input_shape = (img_h, img_w, 1)

    model = Sequential([
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=input_shape),
        Conv2D(64, (3, 3), activation='relu'),
        MaxPooling2D(pool_size=(2, 2)),
        Dropout(0.25),
        Flatten(),
        Dense(128, activation='relu'),
        Dropout(0.5),
        Dense(num_classes, activation='softmax'),
    ])

    model.compile(
        loss=keras.losses.categorical_crossentropy,
        optimizer=keras.optimizers.Adadelta(),
        metrics=['accuracy'],
    )

    model.fit(
        train_x,
        train_y,
        batch_size=batch_size,
        epochs=epochs,
        verbose=1,
        validation_data=(test_x, test_y),
    )

    score = model.evaluate(test_x, test_y)
    print('Test loss & accuracy: %s' % (score, ))

    model.save(model_path)
Ejemplo n.º 28
0
# 80% train und 20% Evaluation
input_data = iris_data[:,0:4] # Spalten 0 bis 4 werden extrahier bzw. 'sepal length' 'sepal width' 'petal length' 'petal width'
output_data = iris_data[:,4].reshape(-1, 1) # Die 4. Spalte wird extrahiert und in einen Array von 1D-Array umgewandelt
output_data = to_categorical(output_data)

iris_train_input, iris_test_input, iris_train_output, iris_test_output = train_test_split(input_data, output_data, test_size=0.20)

# Aufbau des Modells mit Keras
iris_model = Sequential()
iris_model.add(Dense(5,input_shape=(4,),activation="relu"))
iris_model.add(Dense(24,activation="relu"))
iris_model.add(Dense(3,activation="softmax"))

sgd = SGD(lr=0.001)

iris_model.compile(loss="categorical_crossentropy", optimizer=sgd, metrics=["accuracy",metrics.mae])
iris_model.fit(x=iris_train_input, y=iris_train_output, batch_size=10, epochs=500, verbose=1)


# Evaluation auf Test Daten
evaluation_results = iris_model.evaluate(iris_test_input, iris_test_output)

print("Loss: {}".format(evaluation_results[0]))
print("Accuracy: {}".format(evaluation_results[1]))
print("Mean Absolute Error: {}".format(evaluation_results[2]))

# Test 
test = np.array([[5.1,3.5,1.4,0.2], [5.9,3.,5.1,1.8], [4.9,3.,1.4,0.2], [5.8,2.7,4.1,1.]])
predictions = iris_model.predict(test)
index_max_predictions = np.argmax(predictions,axis=1)
Ejemplo n.º 29
0
# define the keras model
#from keras.layers import LSTM
#stadard
model = Sequential()
#input 8 first hidden layer of 12
model.add(Dense(12, input_dim=17, activation='relu'))
#goes second hidden layer with 8
model.add(Dense(24, activation='relu'))
#final output layer
model.add(Dense(1, activation='relu'))
# compile the keras model
#just a test to change the learning rate
#we tried with SDG, it didn't work
model.compile(loss='mean_squared_error',
              optimizer='adam',
              metrics=[
                  'mse', 'mean_absolute_error',
                  'mean_absolute_percentage_error', 'cosine_proximity'
              ])
# fit the keras model on the dataset
#use first one for final model for production
#model.fit(X_train, y_train, epochs=50, batch_size=12)
model.fit(X_train, y_train, epochs=2000, batch_size=12)
#score
score = model.evaluate(X_test, y_test, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], score[1] * 100))
#make a prediction
prediction1 = model.predict(XAle)

print(prediction1)

predplusnormalized = np.column_stack([XAle, prediction1[:, -1]])
Ejemplo n.º 30
0
def test_crf_config(get_random_data):
    nb_samples = 2
    timesteps = 10
    embedding_dim = 4
    output_dim = 5
    embedding_num = 12

    x, y = get_random_data(
        nb_samples, timesteps, x_high=embedding_num, y_high=output_dim
    )
    # right padding; left padding is not supported due to the tf.contrib.crf
    x[0, -4:] = 0

    crf_loss_instance = ConditionalRandomFieldLoss()

    # test with masking, fix length
    model = Sequential()
    model.add(
        Embedding(embedding_num, embedding_dim, input_length=timesteps, mask_zero=True)
    )
    model.add(CRF(output_dim, name="crf_layer"))
    model.compile(optimizer="rmsprop", loss={"crf_layer": crf_loss_instance})

    model.fit(x, y, epochs=1, batch_size=10)

    # test config
    result = model.get_config()

    expected = {
        "name": "sequential",
        "layers": [
            {
                "class_name": "Embedding",
                "config": {
                    "name": "embedding",
                    "trainable": True,
                    "batch_input_shape": (None, 10),
                    "dtype": "float32",
                    "input_dim": 12,
                    "output_dim": 4,
                    "embeddings_initializer": {
                        "class_name": "RandomUniform",
                        "config": {
                            "minval": -0.05,
                            "maxval": 0.05,
                            "seed": None,
                            "dtype": "float32",
                        },
                    },
                    "embeddings_regularizer": None,
                    "activity_regularizer": None,
                    "embeddings_constraint": None,
                    "mask_zero": True,
                    "input_length": 10,
                },
            },
            {
                "class_name": "CRF",
                "config": {
                    "name": "crf_layer",
                    "trainable": True,
                    "dtype": "float32",
                    "units": 5,
                    "use_boundary": True,
                    "use_bias": True,
                    "use_kernel": True,
                    "kernel_initializer": {
                        "class_name": "GlorotUniform",
                        "config": {"seed": None, "dtype": "float32"},
                    },
                    "chain_initializer": {
                        "class_name": "Orthogonal",
                        "config": {"gain": 1.0, "seed": None, "dtype": "float32"},
                    },
                    "boundary_initializer": {
                        "class_name": "Zeros",
                        "config": {"dtype": "float32"},
                    },
                    "bias_initializer": {
                        "class_name": "Zeros",
                        "config": {"dtype": "float32"},
                    },
                    "activation": "linear",
                    "kernel_regularizer": None,
                    "chain_regularizer": None,
                    "boundary_regularizer": None,
                    "bias_regularizer": None,
                    "kernel_constraint": None,
                    "chain_constraint": None,
                    "boundary_constraint": None,
                    "bias_constraint": None,
                },
            },
        ],
    }

    assert result == expected
Ejemplo n.º 31
0
class KerasNetwork:
    def __init__(self,
                 discount_factor,
                 name='DQNetwork',
                 model=None,
                 state_size=None,
                 action_size=None,
                 hidden_sizes=None,
                 hidden_activation_functions=None,
                 output_activation_function=None):
        self.name = name
        self.discount_factor = discount_factor
        if model is not None:
            self.model = load_model(model)
        else:
            try:
                assert state_size is not None
                assert action_size is not None
                assert output_activation_function is not None
            except AssertionError:
                print(
                    "Network needs to know at least state_size, actionsize, and have an output_activation_function"
                )
            try:
                if hidden_activation_functions is not None or hidden_sizes is not None:
                    assert len(hidden_activation_functions) == len(
                        hidden_sizes)
            except AssertionError:
                if len(hidden_activation_functions) > len(hidden_sizes):
                    print(
                        "Too many hidden activation functions, must have length(hidden sizes) "
                    )
                else:
                    print(
                        "Too few hidden activation functions, must have length(hidden sizes) "
                    )
                exit(1)
            except TypeError:
                print(
                    "Either hidden_activation_functions or hidden_sizes is None; Make both None or neither."
                )

            self.model = Sequential()
            self.model.add(InputLayer(input_shape=(state_size, )))
            if hidden_activation_functions is not None and hidden_sizes is not None:
                hidden_inputs = np.roll(hidden_sizes, 1)
                hidden_inputs[0] = state_size
                for s, i, a in zip(hidden_sizes, hidden_inputs,
                                   hidden_activation_functions):
                    self.model.add(Dense(s, input_dim=i, activation=a))
                    self.model.add(Dropout(0.1))
            self.model.add(
                Dense(action_size,
                      input_dim=state_size,
                      activation=output_activation_function))
            self.model.compile(loss='mse', optimizer='adam', metrics=['mse'])

    def summary(self):
        return self.model.summary()

    def save(self, path):
        save_model(self.model, path)

    def load(self, path):
        self.model = load_model(path)

    def generate_target_vecs(self, states, actions, rewards, next_states,
                             final_states):
        target_vecs = [
            self.model.predict(np.array([state]))[0] for state in states
        ]
        for vec, action, reward, next_state, final_state in zip(
                target_vecs, actions, rewards, next_states, final_states):
            if final_state or self.discount_factor == 0:
                vec[action] = reward
            else:
                vec[action] = reward + self.discount_factor * np.min(
                    self.model.predict(np.array([next_state]))[0])
        return target_vecs