def main(): """Run this experiment""" training_ints = initialize_instances(TRAIN_DATA_FILE) testing_ints = initialize_instances(TEST_DATA_FILE) validation_ints = initialize_instances(VALIDATE_DATA_FILE) factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) relu = RELU() # 50 and 0.000001 are the defaults from RPROPUpdateRule.java rule = RPROPUpdateRule(0.064, 50, 0.000001) oa_names = ["RHC"] classification_network = factory.createClassificationNetwork( [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER], relu) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) oa = RandomizedHillClimbing(nnop) train( oa, classification_network, "RHC", training_ints, validation_ints, testing_ints, measure, TRAINING_ITERATIONS, OUTFILE.format("RHC"), )
def main(): """Run this experiment""" with open( "/home/ec2-user/CS-7641-assignments/my_assignment2/I_am_NN_RHC_Starting_AT", 'w') as f: f.write('{}'.format(datetime.datetime.now())) training_ints = initialize_instances(TRAIN_DATA_FILE) testing_ints = initialize_instances(TEST_DATA_FILE) validation_ints = initialize_instances(VALIDATE_DATA_FILE) factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) relu = RELU() # 50 and 0.000001 are the defaults from RPROPUpdateRule.java rule = RPROPUpdateRule(0.064, 50, 0.000001) oa_names = ["RHC"] classification_network = factory.createClassificationNetwork( [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER], relu) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) oa = RandomizedHillClimbing(nnop) train(oa, classification_network, 'RHC', training_ints, validation_ints, testing_ints, measure, TRAINING_ITERATIONS, OUTFILE.format('RHC')) with open( "/home/ec2-user/CS-7641-assignments/my_assignment2/I_am_NN_RHC_ending_AT", 'w') as f: f.write('{}'.format(datetime.datetime.now()))
def main(): """Run algorithms on the abalone dataset.""" instances = initialize_instances() factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(instances) relu = RectifiedLinearUnitActivationFunction() networks = [] # BackPropagationNetwork nnop = [] # NeuralNetworkOptimizationProblem oa = [] # OptimizationAlgorithm oa_names = ["RHC"] results = "" for name in oa_names: classification_network = factory.createClassificationNetwork( [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER], relu) networks.append(classification_network) nnop.append( NeuralNetworkOptimizationProblem(data_set, classification_network, measure)) oa.append(RandomizedHillClimbing(nnop[0])) for i, name in enumerate(oa_names): start = time.time() correct = 0 incorrect = 0 train(oa[i], networks[i], oa_names[i], instances, measure) end = time.time() training_time = end - start optimal_instance = oa[i].getOptimal() networks[i].setWeights(optimal_instance.getData()) start = time.time() for instance in instances: networks[i].setInputValues(instance.getData()) networks[i].run() predicted = instance.getLabel().getContinuous() actual = networks[i].getOutputValues().get(0) if abs(predicted - actual) < 0.5: correct += 1 else: incorrect += 1 end = time.time() testing_time = end - start results += "\nResults for %s: \nCorrectly classified %d instances." % ( name, correct) results += "\nIncorrectly classified %d instances.\nPercent correctly classified: %0.03f%%" % ( incorrect, float(correct) / (correct + incorrect) * 100.0) results += "\nTraining time: %0.03f seconds" % (training_time, ) results += "\nTesting time: %0.03f seconds\n" % (testing_time, ) print results
def train(oa, network, oaName, instances, measure = SumOfSquaresError(), surpress_output=False, TRAINING_ITERATIONS = 1500): """Train a given network on a set of instances. :param OptimizationAlgorithm oa: :param BackPropagationNetwork network: :param str oaName: :param list[Instance] instances: :param AbstractErrorMeasure measure: """ if not surpress_output: print "\nError results for %s every 100 " \ "iterations\n---------------------------" % (oaName,) for i in xrange(TRAINING_ITERATIONS): oa.train() error = 0.00 for instance in instances: network.setInputValues(instance.getData()) network.run() output = instance.getLabel() output_values = network.getOutputValues() example = Instance(output_values, Instance(output_values.get(0))) error += measure.value(output, example) if not surpress_output and i % 100 == 0: print "%0.03f" % error
def main(CE): """Run this experiment""" training_ints = initialize_instances('SeasonsStats_trg.csv') testing_ints = initialize_instances('SeasonsStats_test.csv') validation_ints = initialize_instances('SeasonsStats_val.csv') factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) relu = RELU() rule = RPROPUpdateRule() oa_name = "SA{}".format(CE) with open(OUTFILE.replace('XXX', oa_name), 'w') as f: f.write('{},{},{},{},{},{},{},{}\n'.format('iteration', 'MSE_trg', 'MSE_val', 'MSE_tst', 'acc_trg', 'acc_val', 'acc_tst', 'elapsed')) classification_network = factory.createClassificationNetwork([ INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, HIDDEN_LAYER3, OUTPUT_LAYER ], relu) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) for trial in range(TRIALS): oa = SimulatedAnnealing(1E10, CE, nnop) train(oa, classification_network, oa_name, training_ints, validation_ints, testing_ints, measure)
def main(output_filename): """Run this experiment""" training_ints = initialize_instances('out_digits_train.csv') testing_ints = initialize_instances('out_digits_test.csv') validation_ints = initialize_instances('out_digits_test.csv') factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) relu = RELU() rule = RPROPUpdateRule() with open(output_filename,'w') as f: f.write('{},{},{},{},{},{},{},{}\n'.format('iteration','MSE_trg','MSE_val','MSE_tst','acc_trg','acc_val','acc_tst','elapsed')) classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1,HIDDEN_LAYER2,HIDDEN_LAYER3, OUTPUT_LAYER],relu) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) # oa = SimulatedAnnealing(1E10, 0.95, nnop) # oa = RandomizedHillClimbing(nnop) P = 50 mate = 20 mutate = 20 oa = StandardGeneticAlgorithm(P, mate, mutate, nnop) train(oa, classification_network, output_filename, training_ints,validation_ints,testing_ints, measure)
def main(P, mate, mutate): #training_ints = initialize_instances('bCancer_trg.csv') #testing_ints = initialize_instances('bCancer_test.csv') #validation_ints = initialize_instances('bCancer_val.csv') training_ints = initialize_instances('winequality_trg.csv') testing_ints = initialize_instances('winequality_test.csv') validation_ints = initialize_instances('winequality_val.csv') factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) hts = HyperbolicTangentSigmoid() rule = RPROPUpdateRule() oa_name = "GA_{}_{}_{}".format(P, mate, mutate) with open(OUTFILE.replace('XXX', oa_name), 'w') as f: f.write('{},{},{},{},{},{},{},{}\n'.format('iteration', 'MSE_trg', 'MSE_val', 'MSE_tst', 'acc_trg', 'acc_val', 'acc_tst', 'elapsed')) classification_network = factory.createClassificationNetwork( [INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER], hts) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) oa = StandardGeneticAlgorithm(P, mate, mutate, nnop) train(oa, classification_network, oa_name, training_ints, validation_ints, testing_ints, measure)
def main(P, mate, mutate): """Run this experiment""" training_ints = initialize_instances('./../data/x_train_val.csv') testing_ints = initialize_instances('./../data/x_test.csv') factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) acti = HyperbolicTangentSigmoid() rule = RPROPUpdateRule() oa_name = "GA_{}_{}_{}".format(P, mate, mutate) FILE = OUTFILE.replace('XXX', oa_name) with open(FILE, 'w') as f: f.write('{},{},{},{},{},{},{},{},{}\n'.format('iteration', 'MSE_trg', 'MSE_tst', 'acc_trg', 'acc_tst', 'f1_trg', 'f1_tst', 'train_time', 'pred_time')) classification_network = factory.createClassificationNetwork([ INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, HIDDEN_LAYER3, HIDDEN_LAYER4, OUTPUT_LAYER ], acti) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) oa = StandardGeneticAlgorithm(P, mate, mutate, nnop) train(oa, classification_network, oa_name, training_ints, testing_ints, measure, TRAINING_ITERATIONS, FILE)
def __init__(self, iLayer, hLayer_one, hLayer_two, oLayer, iterations, oaName, SA_T=1E10, SA_C=0.10, GA_P=50, GA_MA=10, GA_MU=10): self.input_layer = iLayer self.hidden_layer_one = hLayer_one self.hidden_layer_two = hLayer_two self.output_layer = oLayer self.iterations = iterations self.oaName = oaName self.measure = SumOfSquaresError() self.network = None self.SA_T = SA_T self.SA_C = SA_C self.GA_P = GA_P self.GA_MA = GA_MA self.GA_MU = GA_MU
def main(CE): oa_name = "SA{}".format(CE) with open(OUTFILE.replace('XXX',oa_name),'w') as f: f.write('{},{},{},{},{},{}\n'.format('iteration','MSE_train','MSE_test','acc_train','acc_tst','elapsed')) training_data = initialize_instances('../data/Pima-train.csv') testing_data = initialize_instances('../data/Pima-test.csv') print(len(training_data)) #testing_ints = initialize_instances('m_test.csv') #validation_ints = initialize_instances('m_val.csv') factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_data) relu = RELU() rule = RPROPUpdateRule() classification_network = factory.createClassificationNetwork( [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER], relu ) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) oa = SimulatedAnnealing(1E10, CE, nnop) train( oa, classification_network, oa_name, training_data, testing_data, measure )
def main(P, mate, mutate): """Run this experiment""" training_ints = initialize_instances( '/Users/Sean/School/GeorgiaTech/CS7641/Assignment2/s_trg.csv') testing_ints = initialize_instances( '/Users/Sean/School/GeorgiaTech/CS7641/Assignment2/s_test.csv') validation_ints = initialize_instances( '/Users/Sean/School/GeorgiaTech/CS7641/Assignment2/s_val.csv') factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) sig = LogisticSigmoid() rule = RPROPUpdateRule() oa_name = "GA_%s_%s_%s" % (P, mate, mutate) with open(OUTFILE.replace('XXX', oa_name), 'w') as f: f.write('%s,%s,%s,%s,%s,%s,%s,%s\n' % ('iteration', 'MSE_trg', 'MSE_val', 'MSE_tst', 'acc_trg', 'acc_val', 'acc_tst', 'elapsed')) classification_network = factory.createClassificationNetwork([ INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, HIDDEN_LAYER3, OUTPUT_LAYER ], sig) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) for trial in xrange(TRIALS): oa = StandardGeneticAlgorithm(P, mate, mutate, nnop) train(oa, classification_network, oa_name, training_ints, validation_ints, testing_ints, measure)
def main(CE, layers, training_iterations, test_data_file, train_data_file, validate_data_file, data_name): """Run this experiment""" training_ints = base.initialize_instances(train_data_file) testing_ints = base.initialize_instances(test_data_file) validation_ints = base.initialize_instances(validate_data_file) factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) relu = RELU() # 50 and 0.000001 are the defaults from RPROPUpdateRule.java rule = RPROPUpdateRule(0.064, 50, 0.000001) oa_name = "SA_{}".format(CE) classification_network = factory.createClassificationNetwork(layers, relu) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) oa = SimulatedAnnealing(1E10, CE, nnop) with open(OUTFILE.format(data_name, oa_name), 'a+') as f: content = f.read() if "MSE_trg" not in content: f.write('{},{},{},{},{},{},{},{},{},{},{}\n'.format( 'iteration', 'MSE_trg', 'MSE_val', 'MSE_tst', 'acc_trg', 'acc_val', 'acc_tst', 'f1_trg', 'f1_val', 'f1_tst', 'elapsed')) base.train(oa, classification_network, oa_name, training_ints, validation_ints, testing_ints, measure, training_iterations, OUTFILE.format(data_name, oa_name)) return
def main(P, mate, mutate, layers, training_iterations, test_data_file, train_data_file, validate_data_file): """Run this experiment""" training_ints = base.initialize_instances(train_data_file) testing_ints = base.initialize_instances(test_data_file) validation_ints = base.initialize_instances(validate_data_file) factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) # relu = RELU() relu = LogisticSigmoid() # 50 and 0.000001 are the defaults from RPROPUpdateRule.java rule = RPROPUpdateRule(0.064, 50, 0.000001) oa_name = "GA_{}_{}_{}".format(P, mate, mutate) with open(OUTFILE.format(oa_name), 'w') as f: f.write('{},{},{},{},{},{},{},{},{},{},{}\n'.format('iteration', 'MSE_trg', 'MSE_val', 'MSE_tst', 'acc_trg', 'acc_val', 'acc_tst', 'f1_trg', 'f1_val', 'f1_tst', 'elapsed')) classification_network = factory.createClassificationNetwork( layers, relu) nnop = NeuralNetworkOptimizationProblem( data_set, classification_network, measure) oa = StandardGeneticAlgorithm(P, mate, mutate, nnop) base.train(oa, classification_network, oa_name, training_ints, validation_ints, testing_ints, measure, training_iterations, OUTFILE.format(oa_name)) return
def main(CE): """Run this experiment""" training_ints = initialize_instances(TRAIN_DATA_FILE) testing_ints = initialize_instances(TEST_DATA_FILE) validation_ints = initialize_instances(VALIDATE_DATA_FILE) factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) relu = RELU() # 50 and 0.000001 are the defaults from RPROPUpdateRule.java rule = RPROPUpdateRule(0.064, 50, 0.000001) oa_name = "SA_{}".format(CE) with open(OUTFILE.format(oa_name), 'w') as f: f.write('{},{},{},{},{},{},{},{},{},{},{}\n'.format( 'iteration', 'MSE_trg', 'MSE_val', 'MSE_tst', 'acc_trg', 'acc_val', 'acc_tst', 'f1_trg', 'f1_val', 'f1_tst', 'elapsed')) classification_network = factory.createClassificationNetwork( [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER], relu) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) print(CE) print(nnop) oa = SimulatedAnnealing(1E10, CE, nnop) train(oa, classification_network, oa_name, training_ints, validation_ints, testing_ints, measure, TRAINING_ITERATIONS, OUTFILE.format(oa_name))
def nn_back(name): instances = read_dataset('./spambase.csv', [1]) train_set, test_set = train_test_split(instances, 0.3) factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() classification_network = factory.createClassificationNetwork( [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER], HyperbolicTangentSigmoid()) bp_instance = BatchBackPropagationTrainer(train_set, classification_network, measure, RPROPUpdateRule()) nn_state = {'network': classification_network, 'trainer': bp_instance} wrapper_nn = AlgoWrapper( nn_state, lambda state: state['trainer'].train(), lambda state: classification_error_acc(train_set, state[ 'network'], measure), lambda state: classification_error_acc( test_set, state['network'], measure)) # create name and invalidate if super empty decorated_name = name timed_trainer = TimedTrainer(decorated_name, wrapper_nn, 200, 4000, 1, _param_dict={'name': name}) timed_trainer.run() print "NNBP bp done"
def main(): training_ints = initialize_instances('data/bank_train.csv') testing_ints = initialize_instances('data/bank_test.csv') factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) acti = LogisticSigmoid() rule = RPROPUpdateRule() ######################### back prop ##################### classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti) train(BatchBackPropagationTrainer(data_set,classification_network,measure,rule,), classification_network, 'Backprop', training_ints,testing_ints, measure, './ANN/BP/BACKPROP_LOG.csv', 2000) ######################### simulated annealing ################# for CE in [0.15,0.35,0.55,0.75,0.95]: for T in [1e8,1e10,1e12]: classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) oFile = "./ANN/SA/%s_%s_LOG.csv"%(CE,T) train(SimulatedAnnealing(T, CE, nnop), classification_network, 'simulated annealing', training_ints, testing_ints, measure, oFile, 2000) ######################### random hill climbing ################# classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) train(RandomizedHillClimbing(nnop), classification_network, 'RHC', training_ints, testing_ints, measure, './ANN/RHC/RHC_LOG.csv', 2000) ######################### genetic algorithm ################# for P in [100]: for mate in [5, 15, 30]: for mutate in [5,15,30]: classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) oFile = "./ANN/GA/%s_%s_%s_LOG.csv"%(P, mate, mutate) train(StandardGeneticAlgorithm(P, mate, mutate, nnop), classification_network, 'GA', training_ints, testing_ints, measure, oFile, 2000)
def main(): """Run algorithms on the abalone dataset.""" instances = initialize_instances() measure = SumOfSquaresError() INPUT_LAYER = 7 HIDDEN_LAYER = 5 OUTPUT_LAYER = 1 #OUTPUT_LAYER must match dataset (e.g., 1 binary, >1 multiclass) results = "" start = time.time() correct = 0 incorrect = 0 #BACKPROP NETWORK # network = BackpropNetworkBuilder()\ # .withLayers([INPUT_LAYER,HIDDEN_LAYER,OUTPUT_LAYER])\ # .withDataSet(DataSet(instances))\ # .withIterations(1000)\ # .withUpdateRule(ADAM())\ # .withActivationFunction(RELU())\ # .withErrorMeasure(SumOfSquaresError())\ # .train() #OPT NETWORK - can also use withRHC() or withGA(popSize, toMate, toMutate) network = OptNetworkBuilder()\ .withLayers([INPUT_LAYER,HIDDEN_LAYER,OUTPUT_LAYER])\ .withDataSet(DataSet(instances))\ .withSA(15000, .95)\ .withIterations(1000)\ .train() end = time.time() training_time = end - start start = time.time() for instance in instances: network.setInputValues(instance.getData()) network.run() actual = instance.getLabel().getContinuous() predicted = network.getOutputValues().get(0) if abs(predicted - actual) < 0.5: correct += 1 else: incorrect += 1 end = time.time() testing_time = end - start results += "\nResults: \nCorrectly classified %d instances." % (correct) results += "\nIncorrectly classified %d instances.\nPercent correctly classified: %0.03f%%" % (incorrect, float(correct)/(correct+incorrect)*100.0) results += "\nTraining time: %0.03f seconds" % (training_time,) results += "\nTesting time: %0.03f seconds\n" % (testing_time,) print results
def main(): """Run this experiment""" training_ints = initialize_instances('titanic_train.csv') testing_ints = initialize_instances('titanic_test.csv') factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) acti = LogisticSigmoid() rule = RPROPUpdateRule() oa_names = ["Backprop"] classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti) train(BatchBackPropagationTrainer(data_set,classification_network,measure,rule), classification_network, 'Backprop', training_ints,testing_ints, measure)
def nn_other(): instances = read_dataset('./digits.csv', [5, 6, 7, 8, 9]) train_set, test_set = train_test_split(instances, 0.3) factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() network_rhc = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER], HyperbolicTangentSigmoid()) network_sa = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER], HyperbolicTangentSigmoid()) network_ga = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER], HyperbolicTangentSigmoid()) rhc_network ("digitsNNrhc", network_rhc, measure, train_set, test_set, classification_error_acc, 300.0, 3000, 1, 3) sa_network ("digitsNNsa", network_sa, measure, train_set, test_set, classification_error_acc, 300.0, 1000, 1, 3, ([1E12, 1E6], [0.90, 0.95, 0.99])) ga_network ("digitsNNga", network_ga, measure, train_set, test_set, classification_error_acc, 3000.0, 1000, 1, 1, ([40, 20], [0.4, 0.2], [0.1, 0.05]))
def main(): """Run this experiment""" training_ints = initialize_instances('./../data/x_train_val.csv') testing_ints = initialize_instances('./../data/x_test.csv') factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) acti = HyperbolicTangentSigmoid() rule = RPROPUpdateRule() classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1,HIDDEN_LAYER2,HIDDEN_LAYER3,HIDDEN_LAYER4, OUTPUT_LAYER],acti) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) oa = RandomizedHillClimbing(nnop) train(oa, classification_network, 'RHC', training_ints, testing_ints, measure, TRAINING_ITERATIONS, OUTFILE)
def main(): """Run this experiment""" training_ints = initialize_instances('Cryo_train.csv') testing_ints = initialize_instances('Cryo_test.csv') factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) acti = LogisticSigmoid() rule = RPROPUpdateRule() classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) oa = RandomizedHillClimbing(nnop) train(oa, classification_network, 'RHC', training_ints, testing_ints, measure)
def main(): """Run algorithms on the adult dataset.""" train_instances = initialize_instances(trainX) test_instances = initialize_instances(testX) factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(train_instances) networks = [] # BackPropagationNetwork nnop = [] # NeuralNetworkOptimizationProblem oa = [] # OptimizationAlgorithm oa_names = ["RHC", "SA", "GA", "BP"] print(sys.argv) if len(sys.argv) > 1: oa_names = [sys.argv[1]] set_num = sys.argv[2] # results = "" for name in oa_names: classification_network = factory.createClassificationNetwork( [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER], RELU()) networks.append(classification_network) if name != "BP": nnop.append( NeuralNetworkOptimizationProblem(data_set, classification_network, measure)) else: print("adding backprop") rule = RPROPUpdateRule() nnop.append( BatchBackPropagationTrainer(data_set, classification_network, measure, rule)) if "RHC" in oa_names: rhc_index = oa_names.index("RHC") oa.append(RandomizedHillClimbing(nnop[rhc_index])) if "SA" in oa_names: sa_index = oa_names.index("SA") oa.append(SimulatedAnnealing(1E11, .95, nnop[sa_index])) if "GA" in oa_names: ga_index = oa_names.index("GA") oa.append(StandardGeneticAlgorithm(100, 50, 10, nnop[ga_index])) if "BP" in oa_names: rule = RPROPUpdateRule() bp_index = oa_names.index("BP") oa.append(nnop[bp_index]) for i, name in enumerate(oa_names): train(oa[i], networks[i], oa_names[i], train_instances, test_instances, measure)
def main(): train_data = initialize_instances(TRAIN_FILE) test_data = initialize_instances(TEST_FILE) # Get data factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(train_data) activation = RELU() rule = RPROPUpdateRule() oa_name = 'Backprop' classification_network = factory.createClassificationNetwork( [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER], activation) oa = BatchBackPropagationTrainer(data_set, classification_network, measure, rule) train(oa, classification_network, oa_name, train_data, test_data, measure)
def main(): """Run algorithms on the abalone dataset.""" train_instances = initialize_instances(TRAIN_FILE) test_instances = initialize_instances(TEST_FILE) factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(train_instances) networks = [] # BackPropagationNetwork nnop = [] # NeuralNetworkOptimizationProblem oa = [] # OptimizationAlgorithm oa_names = ["trial_1", "trial_2", "trial_3", "trail_4", "trial_5"] results = "" for name in oa_names: classification_network = factory.createClassificationNetwork( [INPUT_LAYER, HIDDEN_LAYER_1, HIDDEN_LAYER_1, OUTPUT_LAYER]) networks.append(classification_network) nnop.append( NeuralNetworkOptimizationProblem(data_set, classification_network, measure)) for num, params in enumerate(oa_names): oa.append(RandomizedHillClimbing(nnop[num])) result_file = open(WRITE_DIR, "w") for i, name in enumerate(oa_names): start = time.time() train(oa[i], networks[i], oa_names[i], train_instances, measure, result_file) end = time.time() training_time = end - start optimal_instance = oa[i].getOptimal() networks[i].setWeights(optimal_instance.getData()) result_file.write("finished_training " + name + " in " + str(training_time)) test(test_instances, networks[i], name, result_file) print "finished training, " + name result_file.close()
def main(): """Run this experiment""" training_ints = initialize_instances('SeasonsStats_trg.csv') testing_ints = initialize_instances('SeasonsStats_test.csv') validation_ints = initialize_instances('SeasonsStats_val.csv') for t in range(TRIALS): factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) relu = RELU() rule = RPROPUpdateRule() oa_names = ["Backprop"] classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1,HIDDEN_LAYER2,HIDDEN_LAYER3, OUTPUT_LAYER],relu) train(BatchBackPropagationTrainer(data_set, classification_network, measure, rule), classification_network,'Backprop', training_ints, validation_ints, testing_ints, measure)
def main(): """Run this experiment""" training_ints = initialize_instances('spam_trg.csv') testing_ints = initialize_instances('spam_test.csv') validation_ints = initialize_instances('spam_val.csv') factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) relu = RELU() rule = RPROPUpdateRule() oa_names = ["RHC"] classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1,HIDDEN_LAYER2, OUTPUT_LAYER],relu) for trial in xrange(TRIALS): oa = RandomizedHillClimbing(NeuralNetworkOptimizationProblem(data_set, classification_network, measure)) train(oa, classification_network, 'RHC', training_ints,validation_ints,testing_ints, measure)
def main(T, CE): """Run this experiment""" training_ints = initialize_instances('./../data/wine_train.csv') testing_ints = initialize_instances('./../data/wine_test.csv') factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) acti = LogisticSigmoid() rule = RPROPUpdateRule() oa_name = "SA_{}_{}".format(T, CE) with open(OUTFILE,'w') as f: f.write('{},{},{},{},{},{}\n'.format('iteration','MSE_trg','MSE_tst','acc_trg','acc_tst','elapsed')) classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) oa = SimulatedAnnealing(T, CE, nnop) train(oa, classification_network, oa_name, training_ints, testing_ints, measure, TRAINING_ITERATIONS, OUTFILE)
def main(P,mate,mutate): """Run this experiment""" training_ints = initialize_instances('spambase_train.csv') testing_ints = initialize_instances('spambase_test.csv') factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) acti = LogisticSigmoid() rule = RPROPUpdateRule() oa_name = "GA_{}_{}_{}".format(P,mate,mutate) with open(OUTFILE.replace('XXX',oa_name),'w') as f: f.write('{},{},{},{},{},{}\n'.format('iteration','MSE_trg','MSE_tst','acc_trg','acc_tst','elapsed')) classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) oa = StandardGeneticAlgorithm(P, mate, mutate, nnop) train(oa, classification_network, oa_name, training_ints, testing_ints, measure)
def main(P, mate, mutate): """Run this experiment""" training_ints = initialize_instances('./clean_data/adult_train.txt') testing_ints = initialize_instances('./clean_data/adult_test.txt') factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) logunit = LogisticSigmoid() rule = RPROPUpdateRule() oa_name = "\nGA_tuning: %d , %d, %d\n" % (P, mate, mutate) classification_network = factory.createClassificationNetwork( [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER]) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) oa = StandardGeneticAlgorithm(P, mate, mutate, nnop) train(oa, classification_network, oa_name, training_ints, testing_ints, measure)
def main(CE): """Run this experiment""" training_ints = initialize_instances('./clean_data/adult_train.txt') testing_ints = initialize_instances('./clean_data/adult_test.txt') factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) logunit = LogisticSigmoid() rule = RPROPUpdateRule() oa_name = "\nSA_cooling: %0.02f\n" % (CE) classification_network = factory.createClassificationNetwork( [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER]) nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure) oa = SimulatedAnnealing(1E10, CE, nnop) train(oa, classification_network, oa_name, training_ints, testing_ints, measure)