print("INFO : Epochs: {}".format(epoches)) print("INFO : Batch Size: {}".format(batch_size)) # Fit model exactly the same way as when you trained it (omit any optional params though) if train_type == 'trialwise': print(model.fit(train_set.X, train_set.y, epochs=epoches, batch_size=batch_size, scheduler='cosine', validation_data=(valid_set.X, valid_set.y),)) else: # cropped input_time_length = 450 print(model.fit(train_set.X, train_set.y, epochs=epoches, batch_size=batch_size, scheduler='cosine', input_time_length=input_time_length, validation_data=(valid_set.X, valid_set.y),)) print(model.epochs_df) np.save("DataForRestoration\\CrossSubject\\{}-{}-{}epoches".format(model_type, train_type, epoches), model.epochs_df.iloc[:]) # Evaluation test_set = SignalAndTarget(singleTestData, y=singleTestLabels) eval = model.evaluate(test_set.X, test_set.y) print(eval) print(eval['misclass']) torch.save(model, "crossModels\\{}-{}-cross-{}epoches-torch-model".format(model_type, train_type, epoches)) np.save("DataForRestoration\\CrossSubject\\{}-{}-{}epoches-testSetMisclass".format(model_type, train_type, epoches), eval['misclass']) y_pred = model.predict_classes(test_set.X) plot('accuracy', 'Plots\\CrossSubject\\', model, test_set, y_pred, model_type, train_type, epoches, 0) plot('confusionMatrix', 'Plots\\CrossSubject\\', model, test_set, y_pred, model_type, train_type, epoches, 0)
iterator_seed=1, cropped=True) print('compiled') num_epochs = 30 model.fit(train_set.X, train_set.y, epochs=num_epochs, batch_size=64, scheduler='cosine', input_time_length=input_time_length) print(model.epochs_df) model.network.eval() print(model.predict(test_set.X)) scores = model.evaluate(test_set.X, test_set.y) Accuracy = 1 - scores['misclass'] print('Accuracy (%) :', Accuracy) # save key values Accuracies[count, CV] = Accuracy Losses[count, :] = model.epochs_df['train_loss'] #th.save({'Acc':Accuracies,'Losses': Losses}, var_save_path) print('Overall Acc Subject {}: {}'.format(i, np.mean(Accuracies[count]))) count += 1 print('last_step')