def batch_learning(train_data, test_data, args): rgwr = GammaGWR() utils = Utilities() utils.remove_files(FILES_FOLDER) # Clear the directory for new data. # Learn the model. train_dataset, train_dimension, train_labelSet = utils.describe_data( train_data, args.dataset_name) rgwr.initNetwork(train_dimension, numClasses=args.num_classes, numWeights=args.num_weights) rgwr.train(train_dataset, train_labelSet, maxEpochs=args.epochs, insertionT=args.threshold, beta=CONTEXT_BETA, epsilon_b=LEARNING_RATE_B, epsilon_n=LEARNING_RATE_N) # Test the model. test_dataset, test_dimension, test_labelSet = utils.describe_data( test_data, args.dataset_name) rgwr.evaluate_model(rgwr, train_dataset, train_labelSet, mode='train') rgwr.evaluate_model(rgwr, test_dataset, test_labelSet, mode='test') utils.save_predicted_metrics(rgwr, '')
def iterative_learning(train_data, test_data, args, category_column): rgwr = GammaGWR() utils = Utilities() utils.remove_files(FILES_FOLDER) # Clear the directory for new data. train_accuracies = [] test_accuracies = [] mini_batch_size = 5 iterations = 10 all_object_classes = np.unique(train_data[:, category_column]) random.shuffle(all_object_classes) rgwr.initNetwork(DATA_DIMENSION, numClasses=args.num_classes, numWeights=args.num_weights) for iteration in range(0, iterations): objects_to_learn = all_object_classes[mini_batch_size * iteration:mini_batch_size * iteration + mini_batch_size] # Learn the model. train_data_prepared = train_data[np.in1d( train_data[:, category_column], objects_to_learn)] train_dataset, train_dimension, train_labelSet = utils.describe_data( train_data_prepared, args.dataset_name) rgwr.train(train_dataset, train_labelSet, maxEpochs=args.epochs, insertionT=args.threshold, beta=CONTEXT_BETA, epsilon_b=LEARNING_RATE_B, epsilon_n=LEARNING_RATE_N) # Test the model. test_dataset, test_dimension, test_labelSet = utils.describe_data( test_data, args.dataset_name) train_accuracy = rgwr.evaluate_model(rgwr, train_dataset, train_labelSet, mode='train') test_accuracy = rgwr.evaluate_model(rgwr, test_dataset, test_labelSet, mode='test') train_accuracies.append(train_accuracy) test_accuracies.append(test_accuracy) utils.save_predicted_metrics(rgwr, iteration) if iteration == 0: number_neurons = pickle.load( open("./saved_data/num_neurons" + str(iteration) + '.file', "rb")) else: previous_neurons = pickle.load( open("./saved_data/num_neurons" + str(iteration) + '.file', "rb")) number_neurons = np.append(number_neurons, previous_neurons) with open('./saved_data/test_accuracies.file', "wb") as f: pickle.dump(test_accuracies, f, pickle.HIGHEST_PROTOCOL) with open('./saved_data/train_accuracies.file', "wb") as f: pickle.dump(train_accuracies, f, pickle.HIGHEST_PROTOCOL) with open("./saved_data/num_neurons.file", "wb") as f: pickle.dump(number_neurons, f, pickle.HIGHEST_PROTOCOL) print("Object classes order: ", all_object_classes) print("Train accuracies: ", train_accuracies) print("Test accuracies: ", test_accuracies)
else: fp.append(object_to_learn) novel_objects_detected += 1 else: if object_to_learn not in learnt_objects: fn.append(object_to_learn) else: tn.append(object_to_learn) # Test the model. print("Assumption. Object is not novel: ", object_to_learn) test_data_prepared = test_data[np.in1d( test_data[:, INSTANCE_COLUMN], object_to_learn)] test_dataSet, test_dimension, test_labelSet = utils.describe_data( test_data_prepared, args.dataset_name) test_accuracy = rgwr.evaluate_model(rgwr, test_dataSet, test_labelSet, mode='test') test_accuracies.append(test_accuracy) learnt_objects_detected += 1 iteration += 1 # utils.save_predicted_metrics(rgwr, iteration) print("Test accuracies: ", test_accuracies) print("Test accuracy: ", sum(test_accuracies) / len(test_accuracies)) print("Probable novel objects: ", novel_objects_detected) print("Probable known objects: ", learnt_objects_detected) print("FP: ", fp) print("TP: ", tp) print("FN: ", fn)