def classification_genetic(data_set, data_set_name, classes, population_size, crossover_prob, creep_variance, mutation_prob, tournament_size, convergence_size): print("Running classification on: {}".format(data_set_name)) network_layouts = get_network_layouts(data_set.num_cols, len(classes)) folds = data_set.validation_folds(10) for layer_sizes in network_layouts: average_accuracy = 0 print("--Testing network layout: {}".format(layer_sizes)) for fold_i, fold in enumerate(folds): train = fold['train'] test = fold['test'] network = Network(train, test, layer_sizes, classes) ga = Genetic(network, population_size=population_size, crossover_prob=crossover_prob, creep_variance=creep_variance, mutation_prob=mutation_prob, tournament_size=tournament_size, convergence_size=convergence_size) ga.train() accuracy = network.get_accuracy(test) average_accuracy += accuracy / 10 print("----Accuracy of fold {}: {:.2f}".format(fold_i, accuracy)) print("--Final accuracy: {:.2f}".format(average_accuracy))
def test_network(self): ''' Load in the neural network the 4 images of the previous stage of sequential way, giving the output 4 images with shape (1, 512, 512, 1) ''' model = Network('./docs/aerial_model.h5') img_test_1 = cv2.imread('test/img/test_scale_image_1.png', cv2.IMREAD_GRAYSCALE) img_test_2 = cv2.imread('test/img/test_scale_image_2.png', cv2.IMREAD_GRAYSCALE) img_test_3 = cv2.imread('test/img/test_scale_image_3.png', cv2.IMREAD_GRAYSCALE) img_test_4 = cv2.imread('test/img/test_scale_image_4.png', cv2.IMREAD_GRAYSCALE) img_test_list = [img_test_1, img_test_2, img_test_3, img_test_4] test_output_network = [] for idx, item in enumerate(img_test_list): X_test, Y_test = model.predict(img_test_list[idx]) test_output_network.append(Y_test) self.assertEqual(np.shape(test_output_network[0]), (1, 512, 512, 1)) self.assertEqual(np.shape(test_output_network[1]), (1, 512, 512, 1)) self.assertEqual(np.shape(test_output_network[2]), (1, 512, 512, 1)) self.assertEqual(np.shape(test_output_network[3]), (1, 512, 512, 1))
def test_network(): net = Network() net.add_host("192.168.0.1") net.add_host("192.168.0.2") net.add_router("192.168.0.3") net.add_router("192.168.0.4") net.link("192.168.0.1", "192.168.0.3") net.link("192.168.0.2", "192.168.0.4") net.link("192.168.0.3", "192.168.0.4") assert ("192.168.0.1" in net.hosts) assert ("192.168.0.2" in net.hosts) assert ("192.168.0.3" in net.routers) assert ("192.168.0.4" in net.routers) assert (net.routers["192.168.0.3"].connected_devices[0] == "192.168.0.1") assert (net.routers["192.168.0.3"].connected_devices[1] == "192.168.0.4") assert (net.routers["192.168.0.4"].connected_devices[0] == "192.168.0.2") assert (net.routers["192.168.0.4"].connected_devices[1] == "192.168.0.3") assert (net.routers["192.168.0.3"].forwarding_table["192.168.0.1"] == "192.168.0.1") assert (net.routers["192.168.0.3"].forwarding_table["192.168.0.4"] == "192.168.0.4") assert (net.routers["192.168.0.4"].forwarding_table["192.168.0.2"] == "192.168.0.2") assert (net.routers["192.168.0.4"].forwarding_table["192.168.0.3"] == "192.168.0.3")
def regression_genetic(data_set, data_set_name, population_size, crossover_prob, creep_variance, mutation_prob, tournament_size, convergence_size): print("Running regression on: {}".format(data_set_name)) network_layouts = get_network_layouts(data_set.num_cols, 1) folds = data_set.validation_folds(10) for layer_sizes in network_layouts: average_error = 0 print("--Testing network layout: {}".format(layer_sizes)) for fold_i, fold in enumerate(folds): train = fold['train'] test = fold['test'] network = Network(train, test, layer_sizes) ga = Genetic(network, population_size, crossover_prob, creep_variance, mutation_prob, tournament_size, convergence_size) ga.train() error = network.get_error(test) average_error += error / 10 print("----Error of fold {}: {:.2f}".format(fold_i, error)) print("--Final error: {:.2f}".format(average_error))
def train_regression(net: Network, dataset: pd.DataFrame, max_epochs: int, learning_rate: float, batch_size: int = 1): """ Train net for a regression task. The dataset consists of features and regression value (in the last column). """ train_df, valid_df = split_dataset(dataset, 0.2) train_losses = [] validation_losses = [] for epoch in range(max_epochs): train_loss = 0 validation_loss = 0 for i in range(0, len(train_df) - batch_size, batch_size): x = np.array(train_df.iloc[i:i + batch_size, :-1]) y = np.reshape(np.array(train_df.iloc[i:i + batch_size, -1]), (batch_size, 1)) loss = net.fit(x, y, learning_rate, batch_size) train_loss += loss for i in range(0, len(valid_df) - batch_size, batch_size): x = np.array(valid_df.iloc[i:i + batch_size, :-1]) y = np.reshape(np.array(valid_df.iloc[i:i + batch_size, -1]), (batch_size, 1)) loss = net.validate(x, y, learning_rate, batch_size) validation_loss += loss train_losses.append(train_loss / len(train_df)) validation_losses.append(validation_loss / len(valid_df)) return train_losses, validation_losses
def main(): training_data, validation_data, test_data = load_data_wrapper() network = Network([784, 15, 10]) network.SGD(training_data=training_data, epochs=30, mini_batch_size=64, eta=0.5, test_data=test_data)
def main(_): model_dir = get_model_dir(conf, ['is_train', 'random_seed', 'monitor', 'display', 'log_level']) preprocess_conf(conf) with tf.Session() as sess: # environment env = gym.make(conf.env_name) env.seed(conf.random_seed) assert isinstance(env.observation_space, gym.spaces.Box), \ "observation space must be continuous" assert isinstance(env.action_space, gym.spaces.Box), \ "action space must be continuous" # exploration strategy if conf.noise == 'ou': strategy = OUExploration(env, sigma=conf.noise_scale) elif conf.noise == 'brownian': strategy = BrownianExploration(env, conf.noise_scale) elif conf.noise == 'linear_decay': strategy = LinearDecayExploration(env) else: raise ValueError('Unkown exploration strategy: %s' % conf.noise) # networks shared_args = { 'sess': sess, 'input_shape': env.observation_space.shape, 'action_size': env.action_space.shape[0], 'hidden_dims': conf.hidden_dims, 'use_batch_norm': conf.use_batch_norm, 'use_seperate_networks': conf.use_seperate_networks, 'hidden_w': conf.hidden_w, 'action_w': conf.action_w, 'hidden_fn': conf.hidden_fn, 'action_fn': conf.action_fn, 'w_reg': conf.w_reg, } logger.info("Creating prediction network...") pred_network = Network( scope='pred_network', **shared_args ) logger.info("Creating target network...") target_network = Network( scope='target_network', **shared_args ) target_network.make_soft_update_from(pred_network, conf.tau) # statistic stat = Statistic(sess, conf.env_name, model_dir, pred_network.variables, conf.update_repeat) agent = NAF(sess, env, strategy, pred_network, target_network, stat, conf.discount, conf.batch_size, conf.learning_rate, conf.max_steps, conf.update_repeat, conf.max_episodes) agent.run(conf.monitor, conf.display, conf.is_train)
def test_networks(): input_size = 20 output_size = 3 batch_size = 5 eps_decimal = 4 seed = 1 layers = [(10, SigmoidActivation(), True), (output_size, SigmoidActivation(), True)] network = Network(input_size, layers, LogError(), seed) model = Sequential() model.add( Dense(10, use_bias=True, input_shape=(input_size, ), activation="sigmoid")) model.add( Dense(output_size, use_bias=True, input_shape=(input_size, ), activation="sigmoid")) model.compile(optimizer="sgd", loss="binary_crossentropy") model.layers[0].set_weights( [network.layers[0].w, network.layers[0].b.flatten()]) model.layers[1].set_weights( [network.layers[1].w, network.layers[1].b.flatten()]) x = np.random.rand(batch_size, input_size) y = np.random.rand(batch_size, output_size) loss = model.evaluate(x, y, verbose=2) output = model.predict(x) output2 = network.predict(x) loss2 = network.evaluate(x, y) # equal outputs np.testing.assert_almost_equal(output, output2, decimal=eps_decimal) # equal loss np.testing.assert_almost_equal(loss, loss2, decimal=eps_decimal) # equal weights and biases derivatives [dw0, db0, dw1, db1] = get_weight_grad(model, x, y) np.testing.assert_almost_equal(db1, network.layers[1].db, decimal=eps_decimal) np.testing.assert_almost_equal(dw1, network.layers[1].dw, decimal=eps_decimal) np.testing.assert_almost_equal(db0, network.layers[0].db, decimal=eps_decimal) np.testing.assert_almost_equal(dw0, network.layers[0].dw, decimal=eps_decimal)
def main(_): model_dir = get_model_dir(conf, ['is_train', 'random_seed', 'monitor', 'display', 'log_level']) preprocess_conf(conf) with tf.Session() as sess: # environment env = gym.make(conf.env_name) env._seed(conf.random_seed) assert isinstance(env.observation_space, gym.spaces.Box), \ "observation space must be continuous" assert isinstance(env.action_space, gym.spaces.Box), \ "action space must be continuous" # exploration strategy if conf.noise == 'ou': strategy = OUExploration(env, sigma=conf.noise_scale) elif conf.noise == 'brownian': strategy = BrownianExploration(env, conf.noise_scale) elif conf.noise == 'linear_decay': strategy = LinearDecayExploration(env) else: raise ValueError('Unkown exploration strategy: %s' % conf.noise) # networks shared_args = { 'sess': sess, 'input_shape': env.observation_space.shape, 'action_size': env.action_space.shape[0], 'hidden_dims': conf.hidden_dims, 'use_batch_norm': conf.use_batch_norm, 'use_seperate_networks': conf.use_seperate_networks, 'hidden_w': conf.hidden_w, 'action_w': conf.action_w, 'hidden_fn': conf.hidden_fn, 'action_fn': conf.action_fn, 'w_reg': conf.w_reg, } logger.info("Creating prediction network...") pred_network = Network( scope='pred_network', **shared_args ) logger.info("Creating target network...") target_network = Network( scope='target_network', **shared_args ) target_network.make_soft_update_from(pred_network, conf.tau) # statistic stat = Statistic(sess, conf.env_name, model_dir, pred_network.variables, conf.update_repeat) agent = NAF(sess, env, strategy, pred_network, target_network, stat, conf.discount, conf.batch_size, conf.learning_rate, conf.max_steps, conf.update_repeat, conf.max_episodes) #agent.run(conf.monitor, conf.display, conf.is_train) agent.run(conf.monitor, conf.display, True)
def run(training_data, epochs, mini_batch_size, eta, test_data=None): # Initialize Neural Network print('Initializing neural network') nn = Network([30, 30, 1]) print('Training network') start = time.time() nn.SGD(training_data, epochs, mini_batch_size, eta, test_data=test_data) print('Runtime: {}s'.format(time.time() - start))
def test_diff_evolution_image(): image_data = data.get_segmentation_data("../../data/segmentation.data") training_data, test_data = image_data.partition(.8) network = Network(training_data, test_data, [19, 13, 7], ["BRICKFACE", "SKY", "FOLIAGE", "CEMENT", "WINDOW", "PATH", "GRASS"]) diff_evo = DiffEvolution(network, mutation_f=.1, recombination_c=.9, pop_size=20) diff_evo.run() accuracy = network.get_accuracy(test_data)*100 print("\n\nAccuracy on test set: {}%".format(accuracy))
def test_particle_swarm_image(): image_data = data.get_segmentation_data("../../data/segmentation.data") training_data, test_data = image_data.partition(.8) network = Network(training_data, test_data, [19, 13, 7], ["BRICKFACE", "SKY", "FOLIAGE", "CEMENT", "WINDOW", "PATH", "GRASS"]) pso = ParticleSwarm(network, pop_size=20, cog_factor=1.0, soc_factor=2.0, inertia=0.05, max_velocity=100000, convergence_size=50) pso.train() accuracy = network.get_accuracy(test_data)*100 print("\n\nAccuracy on test set: {}%".format(accuracy))
def __init__(self, network: Network, max_velocity: float): self.network = network self.max_velocity = max_velocity self.state = np.random.uniform(low=LOWER_BOUND, high=UPPER_BOUND, size=network.get_num_weights()) self.velocity = np.random.uniform(low=-1, high=1, size=network.get_num_weights()) self.velocity = self.velocity * (max_velocity / np.linalg.norm(self.velocity)) self.p_best = self.state self.best_fitness = self.get_fitness()
def test_6_2_2_1(self): nn = Network(layer=[ Layer(LinearUnit(), 784), Layer(LinearUnit(), 30), Layer(LinearUnit(), 10) ], cost=CrossEntropy(), utmode=True) nn.fit(self.training_data[0], self.training_data[1], self.test_data[0], self.test_data[1], learning_rate=7 * 1e-7)
def test_6_2_1_1(self): nn1 = Network(cost=QuadraticCost(), utmode=True) nn1.fit(self.training_data[0], self.training_data[1], self.test_data[0], self.test_data[1]) nn2 = Network(cost=CrossEntropy(), utmode=True) nn2.fit(self.training_data[0], self.training_data[1], self.test_data[0], self.test_data[1])
def run(self): # parameters Sim.scheduler.reset() Sim.set_debug('AppHandler') Sim.set_debug('TCP') # Sim.set_debug('Link') # setup application a = AppHandler(self.filename, self.out_directory) # setup network net = Network('../networks/setup.txt') net.loss(self.loss) # setup routes n1 = net.get_node('n1') n2 = net.get_node('n2') n1.add_forwarding_entry(address=n2.get_address('n1'), link=n1.links[0]) n2.add_forwarding_entry(address=n1.get_address('n2'), link=n2.links[0]) # setup transport t1 = Transport(n1) t2 = Transport(n2) # setup connection c1 = TCP(t1, n1.get_address('n2'), 1, n2.get_address('n1'), 1, a, window=self.window) c2 = TCP(t2, n2.get_address('n1'), 1, n1.get_address('n2'), 1, a, window=self.window) # send a file with open(self.in_directory + '/' + self.filename, 'r') as f: while True: data = f.read(10000) if not data: break Sim.scheduler.add(delay=0, event=data, handler=c1.send) # run the simulation Sim.scheduler.run()
def test_genetic_car(): population_size = 20 crossover_prob = 0.5 creep = 20 mutation_prob = 0.05 tournament_k = 2 convergence_size = 50 car_data = data.get_car_data("../../data/car.data") training_data, testing_data = car_data.partition(0.8) network = Network(training_data, testing_data, [6, 5, 4], ["acc", "unacc", "good", "vgood"]) ga = Genetic(network, population_size, crossover_prob, creep, mutation_prob, tournament_k, convergence_size) ga.train() accuracy = network.get_accuracy(testing_data)*100 print("\n\nAccuracy on test set: {}%".format(accuracy))
def add_host(self, user_supplied): """ Add string passed by user to self.targets as proper Host/Network objects For this, it attempts to create these objects and moves on if got a ValueError. """ # Test if user_supplied is an IP? try: self.targets.add(Host(ips=[user_supplied])) return except ValueError: pass try: self.targets.add(Network(user_supplied)) return except ValueError: pass # Test if user_supplied is a valid DNS? Needs strict flag, otherwise no ValueError will be raise by Host try: self.targets.add(Host(domain=user_supplied, strict=False)) return except ValueError: logging.critical("Couldn't resolve or understand " + user_supplied) pass self.bad_targets.add(user_supplied)
def load_data(): import xml.etree.ElementTree as ET root = ET.parse('data/janos-us--D-D-M-N-S-A-N-S/janos-us.xml').getroot() prefix = "{http://sndlib.zib.de/network}" city_name = "" city_list = [] index_to_name = dict() adjacency_dict = dict() index = 0 for type_tag in root.findall( f"{prefix}networkStructure/{prefix}links/{prefix}link"): source = type_tag.find(f"{prefix}source").text if source != city_name: index_to_name[index] = source city_name = source index += 1 city_name = "Seattle" index = 0 for type_tag in root.findall( f"{prefix}networkStructure/{prefix}links/{prefix}link"): source = type_tag.find(f"{prefix}source").text target = type_tag.find(f"{prefix}target").text cost = type_tag.find( f"{prefix}additionalModules/{prefix}addModule/{prefix}cost").text if source != city_name: city_list.append(City(index, city_name, adjacency_dict)) adjacency_dict = dict() index += 1 city_name = source adjacency_dict[next(key for key, value in index_to_name.items() if value == target)] = cost city_list.append(City(index, city_name, adjacency_dict)) return Network(city_list), index_to_name
def test_genetic_image(): population_size = 20 crossover_prob = 0.5 creep = 20 mutation_prob = 0.05 tournament_k = 2 convergence_size = 50 image_data = data.get_segmentation_data("../../data/segmentation.data") training_data, testing_data = image_data.partition(0.8) network = Network(training_data, testing_data, [19, 13, 7], ["BRICKFACE", "SKY", "FOLIAGE", "CEMENT", "WINDOW", "PATH", "GRASS"]) ga = Genetic(network, population_size, crossover_prob, creep, mutation_prob, tournament_k, convergence_size) ga.train() accuracy = network.get_accuracy(testing_data)*100 print("\n\nAccuracy on test set: {}%".format(accuracy))
def test_genetic_wine(): population_size = 20 crossover_prob = 0.5 creep = 1 mutation_prob = 0.05 tournament_k = 2 convergence_size = 100 wine_data = data.get_wine_data("../../data/winequality.data") training, test = wine_data.partition(.9) network = Network(training, test, [11, 6, 1]) ga = Genetic(network, population_size, crossover_prob, creep, mutation_prob, tournament_k, convergence_size) ga.train() error = network.get_error(test) * 100 print("\n\nError on test set: {}%".format(error))
async def main(): network = Network(2, 1) data = [ [[0, 0], [0]], [[0, 1], [1]], [[1, 0], [1]], [[1, 1], [0]], ] @operator def train(data_set, epochs=10000): for i in range(0, epochs): network.train_once(data_set) @operator(pipable=True) def print_result(t_d): for (i, index) in t_d: network.input(i) print(f"{i[0]} XOR {i[1]} = {network.prediction}") test_data = [ [0, 0], [0, 1], [1, 0], [1, 1], ] xs = (train(data) | print_result.pipe(test_data)) await xs
def test_genetic_abalone(): population_size = 20 crossover_prob = 0.5 creep = 1 mutation_prob = 0.05 tournament_k = 2 convergence_size = 100 abalone_data = data.get_abalone_data("../../data/abalone.data") training_data, testing_data = abalone_data.partition(0.8) network = Network(training_data, testing_data, [7, 4, 1], [i for i in range(1, 30)]) ga = Genetic(network, population_size, crossover_prob, creep, mutation_prob, tournament_k, convergence_size) ga.train() accuracy = network.get_accuracy(testing_data) * 100 print("\n\nAccuracy on test set: {}%".format(accuracy))
def cli(ip_cidr, ipclass, is_private, machines, mask, binary, parts, subnetworks): """ Get ip adresses info Can deal with subnetworks """ ip, mask = ip_cidr.split('/') ip = IPAddress(ip) network = Network(ip, mask) print 'IP Address:', ip if ipclass: print 'Class: {}'.format(ip.ipclass) if is_private: print 'Private: {}'.format('Yes' if ip.is_private else 'No') if mask: print 'Mask: {}'.format(network.mask) if binary: print 'Binary: {}'.format(ip.binary) if subnetworks: print 'Number of subnetworks: {}'.format(network.subnetworks) if machines: print 'Number of machines: {}'.format(network.machines) if parts: print 'Machine part from mask {mask}: {part}'.format( mask=ip.mask, part=ip.machine_part)
class NeuralNetworkLibrary: def create_neuron(self, numberOfInputs, activationFuncion): self._neuron = Neuron(int(numberOfInputs), str(activationFuncion)) def set_neuron_inputs(self, val): value = ast.literal_eval(val) self._neuron.inputs = value assert self._neuron.inputs[:-1] == value, "Neuron's inputs was not set: " def randomize_neuron_weights(self): self._neuron.randomizeWeights() assert self._neuron.weights, "Weights were not initialized" def calculate_neuron_output(self): beforeOutput = self._neuron.output self._neuron.calcOutput() afterOutput = self._neuron.output assert beforeOutput is not afterOutput, "Neuron's aoutput was not updated" def update_neuron_weights(self): beforeWeights = self._neuron.weights self._neuron.updateWeights() afterWeights = self._neuron.weights assert beforeWeights is not afterWeights, "Weights were not updated" def initialize_network(self, data, target, numberOfNeurons, activationFunctions): self._network = Network(ast.literal_eval(data), ast.literal_eval(target), ast.literal_eval(numberOfNeurons), ast.literal_eval(activationFunctions)) def initialize_random_weights(self): self._network.initWeights() def are_weights_initialized(self): for layer in self._network.net: for neuron in layer: if not neuron.weights: raise AssertionError("Network weights were not initialized") def train_until_error(self, error): self._network.train(error=eval(error)) assert self._network.totalError <= eval(error), "Desired error was not achieved" def train_until_epoch(self, epoch): self._network.train(epochs=eval(epoch)) assert self._network.learningEpochs == eval(epoch), "Desired error was not achieved" def run_network(self, inputs): self._network.run(ast.literal_eval(inputs)) def is_net_output_within_deviation(self, target, permissibleDeviation): permDev = float(permissibleDeviation) targetSet = ast.literal_eval(target) for targetOutputs,netOutputs in zip(targetSet, self._network.netOutputs): for targetOutput,netOutput in zip(targetOutputs,netOutputs): assert targetOutput-permDev < netOutput < targetOutput+permDev, \ "Net output is : " + str(netOutput) + " but should be: " + str(targetOutput)
def main(): batch_size: int = 10 input_size: int = 20 output_size: int = 3 alpha: float = 0.01 seed: int = 5 # (number of neurons, activation function, use bias) layers = [(50, SigmoidActivation(), True), (10, SigmoidActivation(), True), (3, LinearActivation(), True)] error = MeanAbsoluteError() network = Network(input_size, layers, error, seed) x = np.random.rand(batch_size, input_size) y = np.random.rand(batch_size, output_size) for i in range(1000): loss = network.fit(x, y, alpha) print("{0} iteration, loss = {1}.".format(i, loss))
def train_classification(net: Network, dataset: pd.DataFrame, max_epochs: int, learning_rate: float, batch_size: int = 1, multiclass: bool = False): """ Train net for a classification task. The dataset consists of features and class label (in the last column). If the multiclass is True, uses one-hot encoding. """ train_df, valid_df = split_dataset(dataset, 0.2) train_y_df = pd.get_dummies( train_df['cls'], dtype=float) if multiclass else train_df['cls'] - 1.0 valid_y_df = pd.get_dummies( valid_df['cls'], dtype=float) if multiclass else valid_df['cls'] - 1.0 y_dim = train_y_df.shape[1] if multiclass else 1 train_losses = [] validation_losses = [] for epoch in range(max_epochs): train_loss = 0 validation_loss = 0 for i in range(0, len(train_df) - batch_size, batch_size): x = np.array(train_df.iloc[i:i + batch_size, :-1]) y = np.reshape(np.array(train_y_df.iloc[i:i + batch_size]), (batch_size, y_dim)) loss = net.fit(x, y, learning_rate, batch_size) train_loss += loss for i in range(0, len(valid_df) - batch_size, batch_size): x = np.array(valid_df.iloc[i:i + batch_size, :-1]) y = np.reshape(np.array(valid_y_df.iloc[i:i + batch_size]), (batch_size, y_dim)) loss = net.validate(x, y, learning_rate, batch_size) validation_loss += loss train_losses.append(train_loss / len(train_df)) validation_losses.append(validation_loss / len(valid_df)) return train_losses, validation_losses
def __init__(self, network: Network, population_size: int, crossover_prob: float, creep_variance: float, mutation_prob: float, tournament_size: int, convergence_size: int): self.network = network self.population_size = population_size self.crossover_prob = crossover_prob self.creep_variance = creep_variance self.mutation_prob = mutation_prob self.tournament_size = tournament_size self.convergence_size = convergence_size self.population = [Individual(network, np.random.uniform(low=LOWER_BOUND, high=UPPER_BOUND, size=network.get_num_weights())) for i in range(population_size)]
def regression_diff_evolution(data_set, data_set_name, mutation_f, recombination_c, pop_size): print("Running regression on: {}".format(data_set_name)) network_layouts = get_network_layouts(data_set.num_cols, 1) folds = data_set.validation_folds(10) for layer_sizes in network_layouts: average_error = 0 print("--Testing network layout: {}".format(layer_sizes)) for fold_i, fold in enumerate(folds): train = fold['train'] test = fold['test'] network = Network(train, test, layer_sizes) diff_evolution = DiffEvolution(network, mutation_f, recombination_c, pop_size) diff_evolution.run() error = network.get_error(test) average_error += error / 10 print("----Error of fold {}: {:.2f}".format(fold_i, error)) print("--Final error: {:.2f}".format(average_error))
def classification_diff_evolution(data_set, data_set_name, classes, mutation_f, recombination_c, pop_size): print("Running classification on: {}".format(data_set_name)) network_layouts = get_network_layouts(data_set.num_cols, len(classes)) folds = data_set.validation_folds(10) for layer_sizes in network_layouts: average_accuracy = 0 print("--Testing network layout: {}".format(layer_sizes)) for fold_i, fold in enumerate(folds): train = fold['train'] test = fold['test'] network = Network(train, test, layer_sizes, classes) diff_evolution = DiffEvolution(network, mutation_f, recombination_c, pop_size) diff_evolution.run() accuracy = network.get_accuracy(test) average_accuracy += accuracy / 10 print("----Accuracy of fold {}: {:.2f}".format(fold_i, accuracy)) print("--Final accuracy: {:.2f}".format(average_accuracy))
def network__get_identifier(self, args): import os from src.bank import Bank from src.household import Household from src.firm import Firm from src.environment import Environment from src.transaction import Transaction from src.network import Network text = "This test checks network.get_identifier \n" self.print_info(text) # # INITIALIZATION # environment_directory = str(args[0]) identifier = str(args[1]) log_directory = str(args[2]) # Configure logging parameters so we get output while the program runs logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S', filename=log_directory + identifier + ".log", level=logging.INFO) logging.info( 'START logging for test network__get_identifier in run: %s', environment_directory + identifier + ".xml") # Construct household filename environment = Environment(environment_directory, identifier) # # TESTING # print("Creating a network \n") network = Network("test") print("Network ID: ") print(network.get_identifier())
def regression_particle_swarm(data_set, data_set_name, pop_size, cog_factor, soc_factor, inertia, max_velocity, convergence_size): print("Running regression on: {}".format(data_set_name)) network_layouts = get_network_layouts(data_set.num_cols, 1) folds = data_set.validation_folds(10) for layer_sizes in network_layouts: average_error = 0 print("--Testing network layout: {}".format(layer_sizes)) for fold_i, fold in enumerate(folds): train = fold['train'] test = fold['test'] network = Network(train, test, layer_sizes) pso = ParticleSwarm(network, pop_size, cog_factor, soc_factor, inertia, max_velocity, convergence_size) pso.train() error = network.get_error(test) average_error += error / 10 print("----Error of fold {}: {:.2f}".format(fold_i, error)) print("--Final error: {:.2f}".format(average_error))
from src.network import Network n = Network([[0,0],[0,1],[1,0],[1,1]], [[0],[1],[1],[0]], [10,5,1], ['sigmoid']*3) n.initWeights() n.train(error=pow(10, -5), epochs=pow(10, 3), eta=0.7, repeatSet=10, plot=True, plotInterval=5) n.run([[0.1,0.1],[0.1,0.9],[1,0],[1,1]])
def initialize_network(self, data, target, numberOfNeurons, activationFunctions): self._network = Network(ast.literal_eval(data), ast.literal_eval(target), ast.literal_eval(numberOfNeurons), ast.literal_eval(activationFunctions))