def experiment(repeats): """ Runs the experiment itself. :param repeats: Number of times to repeat the experiment. When we are trying to create a good network, it is reccomended to use 1. :return: Error scores for each repeat. """ # transform data to be stationary raw_pos = [fake_position(i / 30) for i in range(-300, 300)] diff_pos = difference(raw_pos, 1) raw_accel = [fake_acceleration(i / 30) for i in range(-300, 300)] diff_accel = difference(raw_accel, 1) # diff_accel = numpy.array(raw_accel) # raw_timestamp = range(len(raw_pos)) # # diff_timestamp = difference(numpy.array(raw_timestamp) + numpy.random.rand(len(raw_pos)), 1) # diff_timestamp = array(raw_timestamp) # testar com lstm BIDIRECIONAL model = LSTM(input_size=1, hidden_layer_size=40, n_lstm_units=3, bidirectional=False, output_size=1, training_batch_size=60, epochs=2000, device=device) raw_pos = raw_pos[1:] raw_accel = raw_accel[1:] raw_accel = array(raw_accel) raw_pos = array(raw_pos) diff_accel = array(diff_accel) diff_pos = array(diff_pos) X_scaler = StandardScaler() raw_accel = X_scaler.fit_transform(raw_accel.reshape(-1, 1)) y_scaler = MinMaxScaler(feature_range=(-1, 1)) diff_pos = y_scaler.fit_transform(diff_pos.reshape(-1, 1)) raw_accel = raw_accel.reshape(-1) diff_pos = diff_pos.reshape(-1) X, y = timeseries_dataloader(data_x=raw_accel, data_y=diff_pos, enable_asymetrical=True) # Invertendo para a sequencia ser DEcrescente. # X = X[::-1] # y = y[::-1] # enabling CUDA model.to(device) # Let's go fit model.fit(X, y) X_graphic = torch.from_numpy(raw_accel.astype("float32")).to(device) y_graphic = torch.from_numpy(diff_pos.astype("float32")).to(device) model = torch.load("best_model.pth") model.to(device) yhat = [] model.hidden_cell = (torch.zeros(model.num_directions * model.n_lstm_units, 1, model.hidden_layer_size).to(model.device), torch.zeros(model.num_directions * model.n_lstm_units, 1, model.hidden_layer_size).to(model.device)) model.eval() for X in X_graphic: yhat.append(model(X.view(1, -1, 1))) # report performance plt.close() plt.plot(range(len(yhat)), yhat, range(len(y)), y) plt.savefig("output_train.png", dpi=800) plt.show() # rmse = mean_squared_error(raw_pos[:len(train_scaled)], predictions) error_scores = [] return error_scores
def experiment(repeats): """ Runs the experiment itself. :param repeats: Number of times to repeat the experiment. When we are trying to create a good network, it is reccomended to use 1. :return: Error scores for each repeat. """ # create data raw_timestamp = range(600) raw_timestamp = array(raw_timestamp) + numpy.random.rand( len(raw_timestamp)) diff_timestamp = difference( numpy.array(raw_timestamp) + 2 * numpy.random.rand(raw_timestamp.shape[0]) - 1, 1) raw_pos = [fake_position(i / 30) for i in range(-300, 300)] raw_pos = array(raw_pos) diff_pos = difference(raw_pos, 1) diff_pos = array(diff_pos) raw_accel = [fake_acceleration(i / 30) for i in range(-300, 300)] raw_accel = array(raw_accel) diff_accel = difference(raw_accel, 1) diff_accel = array(diff_accel) # Scaling data X_scaler = StandardScaler() raw_accel = X_scaler.fit_transform(raw_accel.reshape(-1, 1)) y_scaler = MinMaxScaler(feature_range=(-1, 1)) diff_pos = y_scaler.fit_transform(diff_pos.reshape(-1, 1)) raw_timestamp = raw_timestamp[1:] raw_pos = raw_pos[1:] raw_accel = raw_accel[1:] raw_accel = raw_accel.reshape(-1) diff_pos = diff_pos.reshape(-1) X, y = timeseries_dataloader(data_x=raw_accel, data_y=diff_pos, enable_asymetrical=True) model = LSTM(input_size=1, hidden_layer_size=80, n_lstm_units=3, bidirectional=False, output_size=1, training_batch_size=60, epochs=20000, device=device) # enabling CUDA model.to(device) # Let's go fit! Comment if only loading pretrained model. model.fit(X, y) X_graphic = torch.from_numpy(raw_accel.astype("float32")).to(device) y_graphic = diff_pos.astype("float32") # =====================TEST================================================= model = torch.load("best_model.pth") model.to(device) yhat = [] model.hidden_cell = (torch.zeros(model.num_directions * model.n_lstm_units, 1, model.hidden_layer_size).to(model.device), torch.zeros(model.num_directions * model.n_lstm_units, 1, model.hidden_layer_size).to(model.device)) model.eval() for X in X_graphic: yhat.append(model(X.view(1, -1, 1)).detach().cpu().numpy()) # from list to numpy array yhat = array(yhat).reshape(-1) # ======================PLOT================================================ plt.close() plt.plot(range(yhat.shape[0]), yhat, range(y_graphic.shape[0]), y_graphic) plt.savefig("output_reconstruction.png", dpi=800) # plt.show() rmse = mean_squared_error(yhat, y_graphic)**1 / 2 print("RMSE trajetoria inteira: ", rmse) error_scores = [] return error_scores
def buildModelStack(self, X, Y, convModel=[], auto=[], mem=[], dense=[], order=[]): #X = np.array(X) #Y = np.array(Y) Xtensor = Input(shape=X.shape[1:]) print("Xtensor") print(X.shape) num_filters = 1 convLayers = [] #Delta Frequencies < 3Hz #high pass (no low pass needed) convLayers.append( Conv1D(filters=1, kernel_size=33, padding='same', activation='relu')(Xtensor)) # Filter to find good wavelets convLayers[-1] = Conv1D(filters=num_filters, kernel_size=66, padding='same', activation='relu')(convLayers[-1]) #Theta Frequencies 3.5-7.5 Hz #Low pass convLayers.append( Conv1D(filters=1, kernel_size=33, padding='same', activation='relu')(Xtensor)) #High pass convLayers[-1] = Conv1D(filters=1, kernel_size=13, padding='same', activation='relu')(convLayers[-1]) convLayers[-1] = Conv1D(filters=num_filters, kernel_size=20, padding='same', activation='relu')(convLayers[-1]) #Alpha Frequencies 7.5-13 Hz #Low pass convLayers.append( Conv1D(filters=1, kernel_size=13, padding='same', activation='relu')(Xtensor)) #High pass convLayers[-1] = Conv1D(filters=1, kernel_size=8, padding='same', activation='relu')(convLayers[-1]) convLayers[-1] = Conv1D(filters=num_filters, kernel_size=10, padding='same', activation='relu')(convLayers[-1]) #Beta(1) Frequencies 13-25 Hz #Low pass convLayers.append( Conv1D(filters=1, kernel_size=8, padding='same', activation='relu')(Xtensor)) #high pass convLayers[-1] = Conv1D(filters=1, kernel_size=4, padding='same', activation='relu')(convLayers[-1]) convLayers[-1] = Conv1D(filters=num_filters, kernel_size=5, padding='same', activation='relu')(convLayers[-1]) #Beta(2) Frequencies > 25 Hz #Low pass convLayers.append( Conv1D(filters=1, kernel_size=4, padding='same', activation='relu')(Xtensor)) convLayers[-1] = Conv1D(filters=num_filters, kernel_size=3, padding='same', activation='relu')(convLayers[-1]) print("ConvLayers") flattenLayers = [] flattenModels = [] for convLayer in convLayers: flattenLayers.append(Flatten()(convLayer)) flattenModels.append(Model(Xtensor, flattenLayers[-1])) print("flattenLayers") denseLayers = [] for flattenLayer in flattenLayers: denseLayers.append(Dense(2, activation='softmax')(flattenLayer)) print("denseLayers") convTrainModels = [] for denseLayer in denseLayers: convTrainModels.append(Model(Xtensor, denseLayer)) convTrainModels[-1].compile(optimizer="adam", loss="categorical_crossentropy") convTrainModels[-1].fit(X, Y) print("convTrainModels") #encodeLayers = [] #for flattenLayer in flattenLayers: # encodeLayers.append(Dense(3000)(flattenLayer)) # print(flattenLayer.shape) #print("ensoderLayers") #decodeLayers = [] #for encodeLayer in encodeLayers: # decodeLayers.append(Dense(num_filters*3000)(encodeLayer)) #print("decodeLayers") #decoderModels = [] #for decodeLayer,flattenModel in zip(decodeLayers, flattenModels): # decoderModels.append(Model(Xtensor,decodeLayer)) # decoderModels[-1].compile(optimizer="adam",loss="mse") # decoderModels[-1].fit(X, flattenModel.predict(X)) #print("decodeModels") #print(convTrainModels) #classModel = Concatenate()([flattenLayer for flattenLayer in flattenLayers]) #classModel = Embedding( #classModel = Dense(100, activation="relu")(classModel) #classModel = Dense(100, activation="relu")(classModel) newData = np.hstack( [convTrainModel.predict(X) for convTrainModel in convTrainModels]) Data = np.zeros(shape=(newData.shape[0] - 2, 10, 10)) i = 0 for n in np.arange(9, newData.shape[0], 1): for j in np.arange(10): for k in np.arange(10): Data[i][j][k] = newData[n - j][k] i += 1 #Data = [[newData[n-2],newData[n-1],newData[n]] for n in np.arange(2,newData.shape[0],1)] #newData = np.ndarray([convTrainModel.predict(X).flatten() for convTrainModel in convTrainModels]).flatten('F') #print(Data.shape) print(Data[0]) #return timeTensor = Input(shape=[10, 10]) classModel = LSTM(1000)(timeTensor) #classModel = Flatten()(classModel) classModel = Dense(2, activation="softmax")(classModel) classModel = Model(timeTensor, classModel) classModel.compile(optimizer="adam", loss="categorical_crossentropy") classModel.fit(Data, Y[2:]) print("ClassModel") Ypred = np.zeros((X.shape[0], Y.shape[1])) print("zeros") Yi = 0 for pred in np.argmax(classModel.predict(Data, batch_size=None), axis=1): Ypred[Yi][pred] = 1 Yi += 1 print("prediction") tp = tn = fn = fp = 0 Yi = 0 for y in Y[2:]: tp += Ypred[Yi][0] * y[0] fp += max(Ypred[Yi][0] - y[0], 0) tn += Ypred[Yi][1] * y[1] fn += max(Ypred[Yi][1] - y[1], 0) Yi += 1 print("tp,tn,fp,fn") acc = sens = spec = prec = rec = f1 = 0 acc = (tp + tn) / (tp + tn + fp + fn) if (tp + fn > 0): sens = tp / (tp + fn) if (tn + fp > 0): spec = tn / (tn + fp) if (tp + fp > 0): prec = tp / (tp + fp) if (tp + fn > 0): rec = tp / (tp + fn) if (prec + rec > 0): f1 = 2 * ((prec * rec) / (prec + rec)) modelNum = 1 print( f"{'modelNum':8s}|{'tp':8s}|{'fp':8s}|{'fn':8s}|{'tn':8s}|{'acc':8s}|{'sens':8s}|{'spec':8s}|{'rec':8s}|{'prec':8s}|{'f1':8s}\n" ) print( f"{modelNum:8d}|{tp:8.3f}|{fp:8.3f}|{fn:8.3f}|{tn:8.3f}|{acc:8.3f}|{sens:8.3f}|{spec:8.3f}|{rec:8.3f}|{prec:8.3f}|{f1:8.3f}\n" )