def driver(q, ds: str, data_package: list, regression: bool, perf: Performance, hidden_layers: list, hyper_params: dict, count: int, total_counter: int, total: int): print("Job ", ds, count, "started") try: # init all test data values test_data, test_labels, training_data, training_labels, output_size, input_size = data_package layers = [input_size] + hidden_layers + [output_size] # init neural network nn = NeuralNetwork(input_size, hidden_layers, regression, output_size) nn.set_input_data(training_data, training_labels) total_weights = 0 for i in range(len(layers) - 1): total_weights += layers[i] * layers[i + 1] #self, hyperparameters:dict , Total_Weight:int ,NN ga = GA(hyper_params, total_weights, nn) # plt.ion for gen in range(ga.maxGen): ga.fitness() ga.selection() ga.crossover() # get best overall solution and set the NN weights bestSolution = ga.bestChromie.getChromie() bestWeights = ga.nn.weight_transform(bestSolution) ga.nn.weights = bestWeights # pass the test data through the trained NN results = classify(test_data, test_labels, regression, ga, perf) # headers = ["Data set", "layers", "pop", "Beta", "CR", "generations", "loss1", "loss2"] Meta = [ ds, len(hidden_layers), hyper_params["maxGen"], hyper_params["pop_size"], hyper_params["mutation_rate"], hyper_params["mutation_range"], hyper_params["crossover_rate"] ] results_performance = perf.LossFunctionPerformance(regression, results) data_point = Meta + results_performance data_point_string = ','.join([str(x) for x in data_point]) # put the result on the multiprocessing queue q.put(data_point_string) print(f"{ds} {count}/{int(total/6)}. {total_counter}/{total}") except Exception as e: print('Caught exception in worker thread') # This prints the type, value, and stack trace of the # current exception being handled. traceback.print_exc() print() raise e
def driver(q, maxIter: int, ds: str, data_package: list, regression: bool, perf: Performance, hidden_layers: list, hyper_params: dict, count: int, total_counter:int, total: int): ''' q: a multiprocessing manager Queue object to pass finished data to maxIter: int. number of epochs to run PSO for ds: string. name of the data set data_package: list. see the data_package method below. Provides all the data needed to run the experiment regression: boolean. Is this a regression task? perf: Performance type object, used to process estimates vs ground truth hidden_layers: list. tells the algorithm how many layers there are in the NN architecture hyper_params: dictionary of hyperparameters. useful for tuning. count: int. the job number for this data set total_counter: int. the job number wrt the total run total: int. the total number of jobs across all data sets. used for progress printing. ''' print("Job ", ds, count, "started") # multiprocessor in python supresses exceptions when workers fail. This try catch block forces it to raise an exception and stack trace for debugging. try: # init all test data values test_data, test_labels, training_data, training_labels, output_size, input_size = data_package layers = [input_size] + hidden_layers + [output_size] # init neural network nn = NeuralNetwork(input_size, hidden_layers, regression, output_size) nn.set_input_data(training_data, training_labels) # initi PSO and train it pso = PSO(layers, hyper_params, nn, maxIter) for epoch in range(pso.max_t): print("job", count, "generation", epoch) pso.update_fitness() pso.update_position_and_velocity() # get best overall solution from the PSO and set the NN weights bestSolution = pso.gbest_position bestWeights = pso.NN.weight_transform(bestSolution) pso.NN.weights = bestWeights # pass the test data through the trained NN results = classify(test_data, test_labels, regression, pso, perf) # headers = ["Data set", "layers", "omega", "c1", "c2", "vmax", "pop_size", "maxIter", "loss1", "loss2"] Meta = [ ds, len(hidden_layers), hyper_params["omega"], hyper_params["c1"], hyper_params["c2"], hyper_params["vmax"], hyper_params["pop_size"], hyper_params["max_iter"] ] # get the performance of the network w.r.t. the ground truth results_performance = perf.LossFunctionPerformance(regression, results) # construct the data point to be written to disk via csv file data_point = Meta + results_performance data_point_string = ','.join([str(x) for x in data_point]) # put the result on the multiprocessing queue q.put(data_point_string) # status update print(f"{ds} {count}/{int(total/6)}. {total_counter}/{total}") # if something goes wrong raise an exception except Exception as e: print('Caught exception in worker thread') # This prints the type, value, and stack trace of the # current exception being handled. traceback.print_exc() print() raise e