Esempio n. 1
0
    def Train(self, input, target):
        X_train, X_test, Y_train, Y_test = train_test_split(input, target, train_size=0.75)
        Y_train = np.asarray(Y_train)
        Y_test = np.array(Y_test)
        X_train = np.reshape(X_train, [-1, X_train[0].shape[0], X_train[0].shape[1]])
        X_test = np.reshape(X_test, [-1, X_train[0].shape[0], X_train[0].shape[1]])

        model = Sequential()
        model.add(Conv1D(16, 3, padding='same', input_shape=input[0].shape))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization())
        model.add(GRU(16, return_sequences=True))
        # model.add(Activation("sigmoid"))
        # model.add(LSTM(lstm_out))

        model.add(Flatten())
        model.add(Dense(8, activity_regularizer=l2(0.001)))
        # model.add(GRU(lstm_out, return_sequences=True))
        # model.add(LSTM(lstm_out))
        # model.add(Dense(20, activity_regularizer=l2(0.001)))
        model.add(Activation("relu"))
        model.add(Dense(2))

        model.compile(loss=mean_absolute_error, optimizer='nadam',
                      metrics=[RootMeanSquaredError(), MAE])
        print(model.summary())

        batch_size = 12
        epochs = 100
        reduce_lr_acc = ReduceLROnPlateau(monitor='val_loss', factor=0.9, patience=epochs / 10, verbose=1, min_delta=1e-4, mode='max')
        model.fit(X_train, Y_train,
                  epochs=epochs,
                  batch_size=batch_size, validation_data=(X_test, Y_test), callbacks=[reduce_lr_acc])
        model.save("PositionEstimation.h5", overwrite=True)
        # acc = model.evaluate(X_test,
        #                      Y_test,
        #                      batch_size=batch_size,
        #                      verbose=0)

        predicted = model.predict(X_test, batch_size=batch_size)
        # predicted = out.ravel()

        res = pd.DataFrame({"predicted_x": predicted[:, 0],
                            "predicted_y": predicted[:, 1],
                            "original_x": Y_test[:, 0],
                            "original_y": Y_test[:, 1]})
        res.to_excel("res.xlsx")
Esempio n. 2
0
def free_attn_lstm(dataset_object: LSTM_data):
    X_train, X_test, Y_train, Y_test = dataset_object.get_memory()
    X_train, X_test = X_train[:, :, :-12], X_test[:, :, :-12]
    regressor = Sequential()
    # Adding the first LSTM layer and some Dropout regularisation
    regressor.add(LSTM(units=NEURONS,
                       return_sequences=True,
                       activation=ACTIVATION,
                       recurrent_activation="sigmoid",
                       input_shape=(X_train.shape[1], X_train.shape[2]),
                       bias_regularizer=regularizers.l2(BIAIS_REG),
                       activity_regularizer=regularizers.l2(L2)
                       ))
    regressor.add(Dropout(DROPOUT))
    regressor.add(LSTM(units=NEURONS,
                       activation=ACTIVATION,
                       recurrent_activation="sigmoid",
                       return_sequences=True,
                       bias_regularizer=regularizers.l2(BIAIS_REG),
                       activity_regularizer=regularizers.l2(L2)

                       ))
    regressor.add(Dropout(DROPOUT))
    # Adding a second LSTM layer and some Dropout regularisation
    regressor.add(LSTM(units=NEURONS,
                       activation=ACTIVATION,
                       recurrent_activation="sigmoid",
                       bias_regularizer=regularizers.l2(BIAIS_REG),
                       activity_regularizer=regularizers.l2(L2)
                  ))
    regressor.add(Dropout(DROPOUT))
    # Adding the output layer
    regressor.add(Dense(units=1,
                        activation='relu',
                        bias_regularizer=regularizers.l2(BIAIS_REG),
                       activity_regularizer=regularizers.l2(L2)
                        )
                  )
    optim = Adam()
    # Compiling the RNN
    regressor.compile(optimizer=optim, loss='mean_squared_error')

    # Fitting the RNN to the Training set
    history= regressor.fit(X_train,
                           Y_train,
                           epochs=EPOCHS,
                           batch_size=BATCH_SIZE,
                           validation_data=(X_test, Y_test),
                           callbacks=[REDUCE_LR, EARLY_STOP]
                           )
    regressor.save("data/weights/free_attn_lstm_no_senti")
    plot_train_loss(history)
    evaluate(regressor,X_test,Y_test, dataset_object,name="free_attn_lstm", senti="no")
Esempio n. 3
0
    def test_seq_to_seq(self):
        #print (self.get_random_states())
        train_x = []
        # Data size: 10 x (image + 2 actions) x board/action size
        train_x = np.random.randint(0, 2, size=(10, 3, 9))

        # train_x = [
        #     [
        #     [0.1, 1.0],
        #     [0.1, 1.0],
        #     [0.1, 1.0],
        #     [0.1, 1.0],
        #     [0.1, 1.0],
        # ]]
        # 1 being the batch size
        # 10 being the length
        #train_x = np.random.randint(low=0, high=2, size=(1, 10, 9))

        train_y = [[0.11, 0.11, 0.11]] * 10

        #train_y = [ 0.11 ]
        train_y = np.array(train_y)

        model = Sequential()
        #model.add(layers.Flatten(input_shape=(3, 9))),
        #model.add(layers.Embedding(input_shape=(10, 9), ))
        model.add(
            layers.LSTM(units=100, input_shape=(3, 9), return_sequences=True))
        model.add(layers.Dropout(rate=0.25))
        model.add(layers.Dense(50, activation='relu'))
        model.add(layers.Dense(1, activation=None))
        model.compile(optimizer='adam', loss=tf.losses.MSE, metrics=['mae'])
        print(model.summary())
        model.fit(x=train_x, y=train_y, epochs=100, verbose=0)
        loss = model.evaluate(train_x, train_y, verbose=2)
        self.assertLess(loss[0], 1e-04)
Esempio n. 4
0
def dense_net(dataset_object:LSTM_data):
    X_train, X_test, Y_train, Y_test = dataset_object.get_memory()
    print(X_test.shape, X_train.shape)
    X_train = X_train.reshape(X_train.shape[0],X_train.shape[2])

    X_test=X_test.reshape(X_test.shape[0], X_test.shape[2])
    X_train, X_test = X_train[:, :-12], X_test[:, :-12]
    print(X_test.shape, X_train.shape)
    regressor = Sequential()

    regressor.add(Dense(units=EPOCHS,
                        activation='relu',
                        bias_regularizer=regularizers.l2(BIAIS_REG),
                        activity_regularizer=regularizers.l2(L2)
                        ))

    regressor.add(Dense(units=EPOCHS,
                        activation='relu',
                        bias_regularizer=regularizers.l2(BIAIS_REG),
                        activity_regularizer=regularizers.l2(L2)
                        ))
    regressor.add(Dense(units=EPOCHS,
                        activation='relu',
                        bias_regularizer=regularizers.l2(BIAIS_REG),
                        activity_regularizer=regularizers.l2(L2)
                        ))
    regressor.add(Dropout(DROPOUT))
    regressor.add(Dense(units=1,
                        activation='relu',
                        bias_regularizer=regularizers.l2(BIAIS_REG),
                        activity_regularizer=regularizers.l2(L2)
                        ))
    optim = Adam()
    # Compiling the RNN
    regressor.compile(optimizer=optim, loss='mean_squared_error')

    # Fitting the RNN to the Training set
    history= regressor.fit(X_train,
                           Y_train,
                           epochs=EPOCHS,
                           batch_size=BATCH_SIZE,
                           validation_data=(X_test, Y_test),
                           callbacks=[EARLY_STOP, REDUCE_LR])
    regressor.save("data/weights/dense_no_senti")
    plot_train_loss(history)
    evaluate(regressor, X_test,Y_test, dataset_object,name="dense", senti="yes")
Esempio n. 5
0
Lm1 = Sequential()
#q1. try without input?
Lm1.add(Input(shape=(784, )))
Lm1.add(Dense(num_classes, activation='softmax'))

#q2.try SparseCategoricalCrossentropy without one-hot
loss_object = tf.keras.losses.categorical_crossentropy

optimizer = tf.keras.optimizers.SGD(0.01)

#
train_loss = tf.keras.metrics.Mean(name='train_loss')
#try SparseCategoricalAccuracy
train_accuracy = tf.keras.metrics.CategoricalAccuracy(name='train_accuracy')

test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.CategoricalAccuracy(name='test_accuracy')

checkpoint_path = "./checkpoints/"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
                                                 verbose=1,
                                                 period=1)
#q3 metrics=xxx without []?
Lm1.compile(optimizer=optimizer, loss=loss_object, metrics=[train_accuracy])
#q4 train_ds?
Lm1.fit(train_ds, epochs=3, callbacks=[cp_callback])
loss, acc = Lm1.evaluate(train_ds)
print("saved model, loss: {:5.2f}, acc: {:5.2f}".format(loss, acc))
Esempio n. 6
0
class MyAutoEncoder(object):
    # archType - 1 => 300|256|300 : archType - 2 => 300|128|300
    # archType - 3 => 300|64|300  : archType - 4 => 300|32|300
    # archType - 5 => 300|16|300  : archType - 6 => 300|128|64|128|300
    # archType - 7 => 300|256|128|128|256|300           : archType - 8 => 300|128|64|32|64|128|300
    # archType - 9 => 300||256|128|64|128|256|300       : archType - 10 => 300|128|64|32|16|32|64|128|300
    # archType - 11 => 300|256|128|64|32|64|128|256|300 : archType - 12 => 300|256|128|64|32|16|32|64|128|256|300
    def __init__(self, logFilePath, inputDim=0, archType=0):
        self.logFilePath = logFilePath
        if archType == 0: return  # We are loading a saved model

        # Create auto encoder+decoder
        self.autoEncoderModel = Sequential()
        self.autoEncoderModel.add(
            Dense(inputDim, input_shape=(inputDim, ),
                  activation='relu'))  # Input layer

        if archType == 1:
            self.autoEncoderModel.add(Dense(256, activation='relu'))
        elif archType == 2:
            self.autoEncoderModel.add(Dense(128, activation='relu'))
        elif archType == 3:
            self.autoEncoderModel.add(Dense(64, activation='relu'))
        elif archType == 4:
            self.autoEncoderModel.add(Dense(32, activation='relu'))
        elif archType == 5:
            self.autoEncoderModel.add(Dense(16, activation='relu'))
        elif archType == 6:
            self.autoEncoderModel.add(Dense(128, activation='relu'))
            self.autoEncoderModel.add(Dense(64, activation='relu'))
            self.autoEncoderModel.add(Dense(128, activation='relu'))
        elif archType == 7:
            self.autoEncoderModel.add(Dense(256, activation='relu'))
            self.autoEncoderModel.add(Dense(128, activation='relu'))
            self.autoEncoderModel.add(Dense(256, activation='relu'))
        elif archType == 8:
            self.autoEncoderModel.add(Dense(128, activation='relu'))
            self.autoEncoderModel.add(Dense(64, activation='relu'))
            self.autoEncoderModel.add(Dense(32, activation='relu'))
            self.autoEncoderModel.add(Dense(64, activation='relu'))
            self.autoEncoderModel.add(Dense(128, activation='relu'))
        elif archType == 9:
            self.autoEncoderModel.add(Dense(256, activation='relu'))
            self.autoEncoderModel.add(Dense(128, activation='relu'))
            self.autoEncoderModel.add(Dense(64, activation='relu'))
            self.autoEncoderModel.add(Dense(128, activation='relu'))
            self.autoEncoderModel.add(Dense(256, activation='relu'))
        elif archType == 10:
            self.autoEncoderModel.add(Dense(128, activation='relu'))
            self.autoEncoderModel.add(Dense(64, activation='relu'))
            self.autoEncoderModel.add(Dense(32, activation='relu'))
            self.autoEncoderModel.add(Dense(16, activation='relu'))
            self.autoEncoderModel.add(Dense(32, activation='relu'))
            self.autoEncoderModel.add(Dense(64, activation='relu'))
            self.autoEncoderModel.add(Dense(128, activation='relu'))
        elif archType == 11:
            self.autoEncoderModel.add(Dense(256, activation='relu'))
            self.autoEncoderModel.add(Dense(128, activation='relu'))
            self.autoEncoderModel.add(Dense(64, activation='relu'))
            self.autoEncoderModel.add(Dense(32, activation='relu'))
            self.autoEncoderModel.add(Dense(64, activation='relu'))
            self.autoEncoderModel.add(Dense(128, activation='relu'))
            self.autoEncoderModel.add(Dense(256, activation='relu'))
        elif archType == 12:
            self.autoEncoderModel.add(Dense(256, activation='relu'))
            self.autoEncoderModel.add(Dense(128, activation='relu'))
            self.autoEncoderModel.add(Dense(64, activation='relu'))
            self.autoEncoderModel.add(Dense(32, activation='relu'))
            self.autoEncoderModel.add(Dense(16, activation='relu'))
            self.autoEncoderModel.add(Dense(32, activation='relu'))
            self.autoEncoderModel.add(Dense(64, activation='relu'))
            self.autoEncoderModel.add(Dense(128, activation='relu'))
            self.autoEncoderModel.add(Dense(256, activation='relu'))
        else:
            raise ValueError("Incorrect architecture type given.")

        self.autoEncoderModel.add(Dense(inputDim,
                                        activation='relu'))  # Output layer
        self.autoEncoderModel.compile(optimizer='adam', loss=losses.MSE)
        self.autoEncoderModel.summary()

        # Create encoder
        inputSample = Input(shape=(inputDim, ))
        inputLayer = self.autoEncoderModel.layers[0]
        if 0 < archType < 6:
            layerTwo = self.autoEncoderModel.layers[1]
            self.encoderModel = Model(inputSample,
                                      layerTwo(inputLayer(inputSample)))
        elif archType < 8:
            layerTwo = self.autoEncoderModel.layers[1]
            layerThree = self.autoEncoderModel.layers[2]
            self.encoderModel = Model(
                inputSample, layerThree(layerTwo(inputLayer(inputSample))))
        elif archType < 10:
            layerTwo = self.autoEncoderModel.layers[1]
            layerThree = self.autoEncoderModel.layers[2]
            layerFour = self.autoEncoderModel.layers[3]
            self.encoderModel = Model(
                inputSample,
                layerFour(layerThree(layerTwo(inputLayer(inputSample)))))
        elif archType < 12:
            layerTwo = self.autoEncoderModel.layers[1]
            layerThree = self.autoEncoderModel.layers[2]
            layerFour = self.autoEncoderModel.layers[3]
            layerFive = self.autoEncoderModel.layers[4]
            self.encoderModel = Model(
                inputSample,
                layerFive(
                    layerFour(layerThree(layerTwo(inputLayer(inputSample))))))
        elif archType == 12:
            layerTwo = self.autoEncoderModel.layers[1]
            layerThree = self.autoEncoderModel.layers[2]
            layerFour = self.autoEncoderModel.layers[3]
            layerFive = self.autoEncoderModel.layers[4]
            layerSix = self.autoEncoderModel.layers[5]
            self.encoderModel = Model(
                inputSample,
                layerSix(
                    layerFive(
                        layerFour(layerThree(layerTwo(
                            inputLayer(inputSample)))))))

        self.encoderModel.summary()

    def train(self, trainX, batchSize, epochs, isDenoising=False):
        tic = time.perf_counter()
        inputLayer = trainX
        if isDenoising:  # add some noise to the input layer
            inputLayer = trainX + np.random.normal(0, 1, trainX.shape) / 2

        self.autoEncoderModel.fit(inputLayer,
                                  trainX,
                                  epochs=epochs,
                                  batch_size=batchSize,
                                  shuffle=True,
                                  validation_split=0.2)
        toc = time.perf_counter()

        with open(self.logFilePath, "a") as resultsWriter:
            resultsWriter.write(
                f"AutoEncoder training time: {toc - tic:0.4f} seconds \r")

        return toc - tic

    def encode(self, dataX, isTrainData):
        tic = time.perf_counter()
        encodedDataX = self.encoderModel.predict(dataX)
        toc = time.perf_counter()
        if isTrainData:
            with open(self.logFilePath, "a") as resultsWriter:
                resultsWriter.write(
                    f"AutoEncoder training encoding time: {toc - tic:0.4f} seconds \r"
                )
        else:
            with open(self.logFilePath, "a") as resultsWriter:
                resultsWriter.write(
                    f"AutoEncoder testing encoding time: {toc - tic:0.4f} seconds \r\r"
                )

        return encodedDataX, toc - tic