def __init__(self): self.chain = [] # Instantiate mempool self.mempool = Mempool() # Instantiate the network of nodes which are connected to his node self.network = Network() # Create the genesis block self.create_block( nonce=0, previous_block_hash= '86a4be451d0e4ae83bcd72e1eb5308b19a4b270f95c25d752927341f7632a1cc')
def run(self): # parameters Sim.scheduler.reset() Sim.set_debug('AppHandler') Sim.set_debug('TCP') # Sim.set_debug('Link') # setup application a = AppHandler(self.filename, self.out_directory) # setup network net = Network('../networks/setup.txt') net.loss(self.loss) # setup routes n1 = net.get_node('n1') n2 = net.get_node('n2') n1.add_forwarding_entry(address=n2.get_address('n1'), link=n1.links[0]) n2.add_forwarding_entry(address=n1.get_address('n2'), link=n2.links[0]) # setup transport t1 = Transport(n1) t2 = Transport(n2) # setup connection c1 = TCP(t1, n1.get_address('n2'), 1, n2.get_address('n1'), 1, a, window=self.window) c2 = TCP(t2, n2.get_address('n1'), 1, n1.get_address('n2'), 1, a, window=self.window) # send a file with open(self.in_directory + '/' + self.filename, 'r') as f: while True: data = f.read(10000) if not data: break Sim.scheduler.add(delay=0, event=data, handler=c1.send) # run the simulation Sim.scheduler.run()
def test_genetic_car(): population_size = 20 crossover_prob = 0.5 creep = 20 mutation_prob = 0.05 tournament_k = 2 convergence_size = 50 car_data = data.get_car_data("../../data/car.data") training_data, testing_data = car_data.partition(0.8) network = Network(training_data, testing_data, [6, 5, 4], ["acc", "unacc", "good", "vgood"]) ga = Genetic(network, population_size, crossover_prob, creep, mutation_prob, tournament_k, convergence_size) ga.train() accuracy = network.get_accuracy(testing_data)*100 print("\n\nAccuracy on test set: {}%".format(accuracy))
def test_genetic_image(): population_size = 20 crossover_prob = 0.5 creep = 20 mutation_prob = 0.05 tournament_k = 2 convergence_size = 50 image_data = data.get_segmentation_data("../../data/segmentation.data") training_data, testing_data = image_data.partition(0.8) network = Network(training_data, testing_data, [19, 13, 7], ["BRICKFACE", "SKY", "FOLIAGE", "CEMENT", "WINDOW", "PATH", "GRASS"]) ga = Genetic(network, population_size, crossover_prob, creep, mutation_prob, tournament_k, convergence_size) ga.train() accuracy = network.get_accuracy(testing_data)*100 print("\n\nAccuracy on test set: {}%".format(accuracy))
def test_genetic_wine(): population_size = 20 crossover_prob = 0.5 creep = 1 mutation_prob = 0.05 tournament_k = 2 convergence_size = 100 wine_data = data.get_wine_data("../../data/winequality.data") training, test = wine_data.partition(.9) network = Network(training, test, [11, 6, 1]) ga = Genetic(network, population_size, crossover_prob, creep, mutation_prob, tournament_k, convergence_size) ga.train() error = network.get_error(test) * 100 print("\n\nError on test set: {}%".format(error))
def test_genetic_abalone(): population_size = 20 crossover_prob = 0.5 creep = 1 mutation_prob = 0.05 tournament_k = 2 convergence_size = 100 abalone_data = data.get_abalone_data("../../data/abalone.data") training_data, testing_data = abalone_data.partition(0.8) network = Network(training_data, testing_data, [7, 4, 1], [i for i in range(1, 30)]) ga = Genetic(network, population_size, crossover_prob, creep, mutation_prob, tournament_k, convergence_size) ga.train() accuracy = network.get_accuracy(testing_data) * 100 print("\n\nAccuracy on test set: {}%".format(accuracy))
def main(): batch_size: int = 10 input_size: int = 20 output_size: int = 3 alpha: float = 0.01 seed: int = 5 # (number of neurons, activation function, use bias) layers = [(50, SigmoidActivation(), True), (10, SigmoidActivation(), True), (3, LinearActivation(), True)] error = MeanAbsoluteError() network = Network(input_size, layers, error, seed) x = np.random.rand(batch_size, input_size) y = np.random.rand(batch_size, output_size) for i in range(1000): loss = network.fit(x, y, alpha) print("{0} iteration, loss = {1}.".format(i, loss))
def network__set_identifier(self, args): import os from src.bank import Bank from src.household import Household from src.firm import Firm from src.environment import Environment from src.transaction import Transaction from src.network import Network text = "This test checks network.set_identifier \n" self.print_info(text) # # INITIALIZATION # environment_directory = str(args[0]) identifier = str(args[1]) log_directory = str(args[2]) # Configure logging parameters so we get output while the program runs logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S', filename=log_directory + identifier + ".log", level=logging.INFO) logging.info( 'START logging for test network__set_identifier in run: %s', environment_directory + identifier + ".xml") # Construct household filename environment = Environment(environment_directory, identifier) # # TESTING # print("Creating a network \n") network = Network("test") print("Network ID: ") print(network.get_identifier()) print("Changing network ID") network.set_identifier("test2") print("Network ID: ") print(network.get_identifier())
def add_network(): """ Grabs IP and port from textboxes and attempt to create a network connection. """ host = host_textbox.get() port = port_textbox.get() # Create network object network = None try: network = Network(host, int(port)) except: print("[Error]: Invalid ip address or port number") T.delete(1.0, tkinter.END) T.insert(tkinter.END, "Invalid ip address or port number") connection.append(network) attempt_connection()
def classification_diff_evolution(data_set, data_set_name, classes, mutation_f, recombination_c, pop_size): print("Running classification on: {}".format(data_set_name)) network_layouts = get_network_layouts(data_set.num_cols, len(classes)) folds = data_set.validation_folds(10) for layer_sizes in network_layouts: average_accuracy = 0 print("--Testing network layout: {}".format(layer_sizes)) for fold_i, fold in enumerate(folds): train = fold['train'] test = fold['test'] network = Network(train, test, layer_sizes, classes) diff_evolution = DiffEvolution(network, mutation_f, recombination_c, pop_size) diff_evolution.run() accuracy = network.get_accuracy(test) average_accuracy += accuracy / 10 print("----Accuracy of fold {}: {:.2f}".format(fold_i, accuracy)) print("--Final accuracy: {:.2f}".format(average_accuracy))
def regression_diff_evolution(data_set, data_set_name, mutation_f, recombination_c, pop_size): print("Running regression on: {}".format(data_set_name)) network_layouts = get_network_layouts(data_set.num_cols, 1) folds = data_set.validation_folds(10) for layer_sizes in network_layouts: average_error = 0 print("--Testing network layout: {}".format(layer_sizes)) for fold_i, fold in enumerate(folds): train = fold['train'] test = fold['test'] network = Network(train, test, layer_sizes) diff_evolution = DiffEvolution(network, mutation_f, recombination_c, pop_size) diff_evolution.run() error = network.get_error(test) average_error += error / 10 print("----Error of fold {}: {:.2f}".format(fold_i, error)) print("--Final error: {:.2f}".format(average_error))
def main(): training_data, validation_data, test_data = mnist_loader.load_data_wrapper( ) architecture = [{ 'size': 784, 'activation': 'sigmoid' }, { 'size': 30, 'activation': 'sigmoid' }, { 'size': 10, 'activation': 'sigmoid' }] net = Network(architecture, 'mse', seed=1) train_X = np.array([pair[0] for pair in training_data]).reshape(50000, 784) train_Y = np.array([pair[1] for pair in training_data]).reshape(50000, 10) test_X = np.array([pair[0] for pair in test_data]).reshape(10000, 784) test_y = np.array([pair[1] for pair in test_data]).reshape(10000) net.fit(train_X, train_Y, 30, 30, 80.0, test_X, test_y)
def regression_particle_swarm(data_set, data_set_name, pop_size, cog_factor, soc_factor, inertia, max_velocity, convergence_size): print("Running regression on: {}".format(data_set_name)) network_layouts = get_network_layouts(data_set.num_cols, 1) folds = data_set.validation_folds(10) for layer_sizes in network_layouts: average_error = 0 print("--Testing network layout: {}".format(layer_sizes)) for fold_i, fold in enumerate(folds): train = fold['train'] test = fold['test'] network = Network(train, test, layer_sizes) pso = ParticleSwarm(network, pop_size, cog_factor, soc_factor, inertia, max_velocity, convergence_size) pso.train() error = network.get_error(test) average_error += error / 10 print("----Error of fold {}: {:.2f}".format(fold_i, error)) print("--Final error: {:.2f}".format(average_error))
def classification_particle_swarm(data_set, data_set_name, classes, pop_size, cog_factor, soc_factor, inertia, max_velocity, convergence_size): print("Running classification on: {}".format(data_set_name)) network_layouts = get_network_layouts(data_set.num_cols, len(classes)) folds = data_set.validation_folds(10) for layer_sizes in network_layouts: average_accuracy = 0 print("--Testing network layout: {}".format(layer_sizes)) for fold_i, fold in enumerate(folds): train = fold['train'] test = fold['test'] network = Network(train, test, layer_sizes, classes) pso = ParticleSwarm(network, pop_size, cog_factor, soc_factor, inertia, max_velocity, convergence_size) pso.train() accuracy = network.get_accuracy(test) average_accuracy += accuracy / 10 print("----Accuracy of fold {}: {:.2f}".format(fold_i, accuracy)) print("--Final accuracy: {:.2f}".format(average_accuracy))
from src.mnist_loader import load_data_wrapper from src.network import Network if __name__ == '__main__': train, vaild, test = load_data_wrapper() net = Network([784, 30, 10]) net.SGD(train, 30, 10, 3.0, test_data=test)
from src.network import Network from src.layer import Layer if __name__ == '__main__': # First, build the network network = Network([ Layer(64, 128), Layer(128, 64), Layer(64, 64), Layer(64, 16), Layer(16, 10) ]) def generate_inputs(file): training = open(file) for line in training: yield list(map(lambda i: int(i), line.split(',')[:64])) def generate_outputs(file): training = open(file) for line in training: value = int(line.split(',')[64]) yield [0 if i != value else 1 for i in range(10)] print("Training...", end="\r") # Train the network network.train(generate_inputs("optdigits.tra"), generate_outputs("optdigits.tra")) print("Testing...", end="\r")
@ray.remote class ray_network: def __init__(self, network): self.network = network def get_groups(self): return self.network.groups def get_name(self): return self.network.name ray.init() xor_net = Network(name="xor") xor_net.add_group(name="first", num_units=2, group_type="input", input_transforms=[], output_transforms=[]) xor_net.add_group(name="second", num_units=2, group_type="hidden", input_transforms=["dot"], output_transforms=["sigmoid"]) xor_net.add_group(name="third", num_units=1, group_type="output", input_transforms=["dot"], output_transforms=["sigmoid"])
class Environment(object): # # VARIABLES # identifier = "" parameters = Parameters() state = State() banks = [] network = Network("") # # METHODS # # ------------------------------------------------------------------------- # __init__ # ------------------------------------------------------------------------- def __init__(self): pass # ------------------------------------------------------------------------- # initialize # ------------------------------------------------------------------------- def initialize(self, environment_directory, identifier): self.identifier = identifier # first, read in the environment file environment_filename = environment_directory + identifier + ".xml" self.read_environment_file(environment_filename) logging.info(" environment file read: %s", environment_filename) # then read in all the banks if (self.parameters.bankDirectory != ""): # print("oooo", self.parameters.bankDirectory) if (self.parameters.bankDirectory != "none"): # none is used for tests only self.initialize_banks_from_files(self.parameters.bankDirectory, self.get_state(0), 0) logging.info(" banks read from directory: %s", self.parameters.bankDirectory) else: logging.error("ERROR: no bankDirectory given in %s\n", environment_filename) self.initial_assets = 0.0 # the initial assets are needed to determine the fire-sale price in bank.liquidate_assets for bank in self.banks: self.initial_assets += bank.get_account("I") # finally, create the network # note: this has to be done after creating the banks, as they are # passed to the network as node objects self.network.identifier = self.identifier self.network.initialize_networks(self) # when there is a SIFI surcharge, implement it now on the banking capital self.apply_sifi_surcharge() # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # read_environment_file # ------------------------------------------------------------------------- def read_environment_file(self, environmentFilename): from xml.etree import ElementTree xmlText = open(environmentFilename).read() element = ElementTree.XML(xmlText) self.identifier = element.attrib['title'] self.parameters.identifier = self.identifier # loop over all entries in the xml file for subelement in element: # the first set of parameters will be valid for the whole simulation if (subelement.attrib['type'] == 'numSweeps'): self.parameters.numSweeps = int(subelement.attrib['value']) if (subelement.attrib['type'] == 'numSimulations'): self.parameters.numSimulations = int(subelement.attrib['value']) if (subelement.attrib['type'] == 'numBanks'): self.parameters.numBanks = int(subelement.attrib['value']) if (subelement.attrib['type'] == 'bankDirectory'): self.parameters.bankDirectory = str(subelement.attrib['value']) if (subelement.attrib['type'] == 'graphType'): self.parameters.graphType = str(subelement.attrib['value']) if (subelement.attrib['type'] == 'graphParameter1'): self.parameters.graphParameter1 = float(subelement.attrib['value']) if (subelement.attrib['type'] == 'graphParameter2'): self.parameters.graphParameter2 = float(subelement.attrib['value']) if (subelement.attrib['type'] == 'contractsNetworkFile'): self.parameters.contractsNetworkFile = str(subelement.attrib['value']) # now also read in the parameters that can change during the simulation if (subelement.attrib['type'] == 'changing'): name = subelement.attrib['name'] value = float(subelement.attrib['value']) validFrom = subelement.attrib['validity'].rsplit("-")[0] validTo = subelement.attrib['validity'].rsplit("-")[1] self.parameters.add_parameter(name, value, validFrom, validTo) # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # write_environment_file(file_name) # ------------------------------------------------------------------------- def write_environment_file(self, file_name): out_file = open(file_name + "-check.xml", 'w') text = "<environment title='" + self.identifier + "'>\n" text += " <parameter type='numSweeps' value='" + str(self.parameters.numSweeps) + "'></parameter>\n" text += " <parameter type='numSimulations' value='" + str( self.parameters.numSimulations) + "'></parameter>\n" text += " <parameter type='numBanks' value='" + str(self.parameters.numBanks) + "'></parameter>\n" text += " <parameter type='bankDirectory' value='" + str(self.parameters.bankDirectory) + "'></parameter>\n" text += " <parameter type='graphType' value='" + str(self.parameters.graphType) + "'></parameter>\n" text += " <parameter type='contractsNetworkFile' value='" + str( self.parameters.contractsNetworkFile) + "'></parameter>\n" for entry in self.parameters.parameters: text += " <parameter type='changing' name='" + str(entry['type']) + "' value='" + str( entry['value']) + "' validity='" + str(entry['validity'][0]) + "-" + str( entry['validity'][1]) + "'></parameter>\n" text += "</environment>\n" out_file.write(text) out_file.close() # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # initialize_banks_from_files # banks have to be initialized for each simulation as a number of banks might become inactive # in the previous simulation # ------------------------------------------------------------------------- def initialize_banks_from_files(self, bankDirectory, state, time): # this routine is called more than once, so we have to reset the list of banks each time self.banks = [] listing = os.listdir(bankDirectory) # print('listing', listing) if len(listing) != self.parameters.numBanks: logging.error(" ERROR: number of configuration files in %s (=%s) does not match numBanks (=%s)", bankDirectory, str(len(listing)), str(self.parameters.numBanks)) for infile in listing: # print("infile", infile) bank = Bank() bank.get_parameters_from_file(bankDirectory + infile, self.get_state(0), self.parameters.numBanks, time) # print("bank", bank) self.banks.append(bank) bank.__del__() # TODO not sure if this is really safe, but it is better than doing nothing about all those created instances... # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # get_state # ------------------------------------------------------------------------- def get_state(self, time): # TODO bring parameters in same order as in environment file and in state.__str__() # for each time t in the simulation return the actual set of parameters for parameter in self.parameters.parameters: validFrom = int(parameter['validity'][0]) validTo = int(parameter['validity'][1]) if (int(time) >= int(validFrom)) and (int(time) <= int(validTo)): # we have a valid parameterset if parameter['type'] == 'rb': self.state.rb = float(parameter['value']) if parameter['type'] == 'rd': self.state.rd = float(parameter['value']) if parameter['type'] == 'r': self.state.r = float(parameter['value']) if parameter['type'] == 'collateralQuality': self.state.collateralQuality = float(parameter['value']) if parameter['type'] == 'successProbabilityFirms': self.state.successProbabilityFirms = float(parameter['value']) if parameter['type'] == 'positiveReturnFirms': self.state.positiveReturnFirms = float(parameter['value']) if parameter['type'] == 'scaleFactorHouseholds': self.state.scaleFactorHouseholds = float(parameter['value']) if parameter['type'] == 'dividendLevel': self.state.dividendLevel = float(parameter['value']) if parameter['type'] == 'pFinancial': self.state.pFinancial = float(parameter['value']) if parameter['type'] == 'rhoFinancial': self.state.rhoFinancial = float(parameter['value']) if parameter['type'] == 'pReal': self.state.pReal = float(parameter['value']) if parameter['type'] == 'rhoReal': self.state.rhoReal = float(parameter['value']) if parameter['type'] == 'xiBank': self.state.xiBank = float(parameter['value']) if parameter['type'] == 'thetaBank': self.state.thetaBank = float(parameter['value']) if parameter['type'] == 'rhoBank': self.state.rhoBank = float(parameter['value']) if parameter['type'] == 'shockType': self.state.shockType = int(parameter['value']) if parameter['type'] == 'gammaBank': self.state.gammaBank = float(parameter['value']) if parameter['type'] == 'assetNumber': self.state.assetNumber = float(parameter['value']) if parameter['type'] == 'liquidationDiscountFactor': self.state.liquidationDiscountFactor = float(parameter['value']) if parameter['type'] == 'riskAversionDiscountFactor': self.state.riskAversionDiscountFactor = float(parameter['value']) if parameter['type'] == 'riskAversionAmplificationFactor': self.state.riskAversionAmplificationFactor = float(parameter['value']) if parameter['type'] == 'interbankLoanMaturity': self.state.interbankLoanMaturity = float(parameter['value']) if parameter['type'] == 'firmLoanMaturity': self.state.firmLoanMaturity = float(parameter['value']) if parameter['type'] == 'sifiSurchargeFactor': self.state.sifiSurchargeFactor = float(parameter['value']) if parameter['type'] == 'requiredCapitalRatio': self.state.requiredCapitalRatio = float(parameter['value']) if parameter['type'] == 'liquidityCoverageRatio': self.state.liquidityCoverageRatio = float(parameter['value']) if parameter['type'] == 'netStableFundingRatio': self.state.netStableFundingRatio = float(parameter['value']) if parameter['type'] == 'leverageRatio': self.state.leverageRatio = float(parameter['value']) # # at this point we have all the variables from the parameters[] list # now we need to update them to incorporate past defaults to calculate # new return and volatility for real and financial assets self.state.update_state(time) return self.state # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # apply_sifi_surcharge # ------------------------------------------------------------------------- def apply_sifi_surcharge(self): degree_sum = 0 for bank in self.network.contracts: degree_sum += float(nx.degree(self.network.contracts)[bank]) average_degree = float(degree_sum / len(self.network.contracts.nodes())) for bank in self.network.contracts: # the sifi surcharge is the product of the sifiSurchargeFactor and the connectedness as measured # by degree/average_degree # the maximum ensures that no bank has to hold less than 1.0 times their banking capital sifiSurcharge = max(self.get_state(0).sifiSurchargeFactor * ( float(nx.degree(self.network.contracts)[bank]) / average_degree), 1.0) bank.apply_sifi_surcharge(sifiSurcharge)
#!/usr/bin/env python3 from src.hue import Hue from src.network import Network from src.utils import setup_logger def version(): import logging logger = logging.getLogger("main") logger.info(f'Hue geofencing version {open("VERSION", "r").read()}') if __name__ == "__main__": setup_logger() version() hue = Hue() network = Network(callback_leave=hue.set_leave_home, callback_join=hue.set_arrive)
class Environment(BaseConfig): from state import State from parameters import Parameters # # VARIABLES # identifier = "" # identifier of the environment static_parameters = { } # a dictionary containing all environmenet parameters agents = [] variable_parameters = {} prices = [] network = Network("") # network of transaction # # parameters = Parameters() # state = State() # # CODE # def __getattr__(self, attr): return super(Environment, self).__getattr__(attr) def get_identifier(self): return self.identifier def set_identifier(self, value): super(Environment, self).set_identifier(value) def __str__(self): return super(Environment, self).__str__() def accrue_interests(self): super(Environment, self).accrue_interests() def add_shock(self, shock): super(Environment, self).add_shock() def add_static_parameter(self, params): super(Environment, self).add_static_parameters(params) def get_static_parameters(self): return self.static_parameters def set_static_parameters(self, params): super(Environment, self).set_static_parameters(params) def add_variable_parameter(self, params): super(Environment, self).add_static_parameters(params) def get_variable_parameters(self): return self.variable_parameters def set_variable_parameters(self, params): super(Environment, self).set_variable_parameters(params) def get_assets(self): return self.assets def set_assets(self, params): super(Environment, self).set_assets(params) def get_shocks(self): return self.shocks def set_shocks(self, params): super(Environment, self).set_shocks(params) def check_global_transaction_balance(self, _type): super(Environment, self).check_global_transaction_balance(_type) def write_environment_file(self, file_name): super(Environment, self).write_environment_file() def print_parameters(self): super(Environment, self).print_parameters() def update_asset_returns(self): super(Environment, self).update_asset_returns() def new_transaction(self, type_, asset, from_id, to_id, amount, interest, maturity, time_of_default, environment): from src.transaction import Transaction transaction = Transaction() # transaction.this_transaction(type_, asset, from_id, to_id, amount, interest, maturity, time_of_default) transaction.add_transaction(type_, asset, from_id, to_id, amount, interest, maturity, time_of_default, environment) # ------------------------------------------------------------------------- def __init__(self, environment_directory, identifier): self.initialize(environment_directory, identifier) # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # read_xml_config_file(self, config_file_name) # reads an xml file with config and sets identifier and parameters # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- def read_xml_config_file(self, env_filename): try: xmlText = open(env_filename).read() element = ElementTree.XML(xmlText) # we tell python it's an xml self.identifier = element.attrib['identifier'] # loop over all entries in the xml file for subelement in element: try: # we see whether the value is a int if subelement.attrib['type'] == 'variable_parameters': value = float(subelement.attrib['value']) name = subelement.attrib['name'] self.variable_parameters[name] = value else: value = int(subelement.attrib['value']) type_ = subelement.attrib['type'] self.static_parameters[type_] = value except: # if not, it is a string value = str(subelement.attrib['value']) type_ = subelement.attrib['type'] self.static_parameters[type_] = value except: logging.error(" ERROR: %s could not be parsed", env_filename) # ------------------------------------------------------------------------- # the next function # initializes the environment, initializing all the variables # reading the env_config file from supplied environment_directory and # identifier, and initializes all agents from the directories # supplied in the main config file # ------------------------------------------------------------------------- def initialize(self, environment_directory, identifier): self.identifier = identifier self.funds = [] self.firms = [] self.assets = [] # first, read in the environment file environment_filename = environment_directory + identifier + ".xml" self.read_xml_config_file(environment_filename) logging.info(" Environment file read: %s", environment_filename) # then read in all the agents init_firms(self, self.static_parameters['firm_directory'], 0) init_funds(self, self.static_parameters['fund_directory'], 0) government = Government() household = Household() self.agents = [self.funds, self.firms, government, household] # That's our government agent # print self.agents[2].identifier for i in self.firms: i.endow_firms_with_equity(self, 10000) #Function called from initialisation.py init_profits(self, 0) #Function called from initialisation.py init_assets(self, self.static_parameters['asset_directory'], 0) #Now we determine the amount of fundamentalists and chartists self.variable_parameters['amount_fundamentalists'] = int( (self.count_all_agents()[0] + self.count_all_agents()[1]) * self.variable_parameters['fundamentalists']) self.variable_parameters['amount_chartist'] = int( (self.count_all_agents()[0] + self.count_all_agents()[1]) * self.variable_parameters['chartists']) logging.info(" Initialized %s A funds and %s B funds and stored in environment.funds",\ self.sum_a_funds, self.sum_b_funds) logging.info(" Initialized %s A firms and %s B firms and stored in environment.firms",\ self.sum_a_firms, self.sum_b_firms) logging.info(" Global assets under management are %s currency units; A assets are %s currency units; B assets are %s currency units",\ self.global_assets_under_management, self.ame_market_cap, self.eme_market_cap) logging.info( " We are looking for the price of the A and B equity assets (given an additional risk-free bond asset), introduce QE and look for spillover effects" ) logging.info(" *******Environment initialisation completed*******") def agents_generator(self): if self.agents is not None: for agent_type in self.agents: if type(agent_type) == list: for agent in agent_type: yield agent else: yield agent_type else: raise LookupError('There are no agents to iterate over.') def get_agent_by_id(self, ident): to_return = None for agent in self.agents_generator(): if agent.identifier == ident: if to_return is None: # checks whether something has been found previously in the function to_return = agent else: raise LookupError('At least two agents have the same ID.') # if we have found something before then IDs are not unique, so we raise an error if to_return is None: raise LookupError('No agents have the provided ID.') # if we don't find any agent with that ID we raise an error else: return to_return # ------------------------------------------------------------------------- def count_all_agents(self): sum = 0 for fund in self.funds: if fund.parameters['domicile'] == 0: sum += 1 self.variable_parameters['sum_a_funds'] = sum sum = 0 for fund in self.funds: if fund.parameters['domicile'] == 1: sum += 1 self.variable_parameters['sum_b_funds'] = sum ################## FIRMS sum = 0 for firm in self.firms: if firm.parameters['domicile'] == 0: sum += 1 self.variable_parameters['sum_a_firms'] = sum sum = 0 for firm in self.firms: if firm.parameters['domicile'] == 1: sum += 1 self.variable_parameters['sum_b_firms'] = sum return self.variable_parameters[ 'sum_a_funds'], self.variable_parameters['sum_b_funds']
Rotate the surface around the pivot point :param surface: pygame.Surface :param angle: float :param pivot: list :param offset: pygame.math.Vector2 :return: IMG, rect """ rotated_image = pygame.transform.rotozoom(surface, -angle, 1) # Rotate the image. rotated_offset = offset.rotate(angle) # Rotate the offset vector. # Add the offset vector to the center/pivot point to shift the rect. rect = rotated_image.get_rect(center=pivot + rotated_offset) return rotated_image, rect # Return the rotated image and shifted rect.` n = Network(os.getenv("IP"), int(os.getenv("PORT"))) def main(): global players global n data = n.get_idx() if data is None: print("Error connecting to server...") exit(1) idx = data["idx"] # send username to server n.send({"username": username}) while True:
def train_model(model_name='resnet18', num_epochs=1, hidden_sizes=[256], learning_rate=0.003, model_path=None, data_dir='flowers', use_gpu=False, save_dir='checkpoints'): train, trainloader, validloader = load_data(data_dir) output_size = 102 device = torch.device('cuda' if use_gpu else 'cpu') if model_path is None: start = 0 iterations = num_epochs train_losses, valid_losses = [], [] model = None else: # model, optimizer, iterations, train_losses, valid_losses =load_checkpoint(model_path) model_dict = load_checkpoint(model_path) model = model_dict["model"] model = model.to(device) optimizer = model_dict["optimizer"] model_name = model_dict["model_name"] start = model_dict["iterations"] iterations = num_epochs + start train_losses, valid_losses = model_dict["train_losses"], model_dict[ "valid_losses"] print('starting from {} epoch and training {} epoch(s) now'.format( start, num_epochs)) #CHECK: also in load_checkpoint, maybe refactor if model is None and model_name == 'vgg13': model = models.vgg13(pretrained=True) #turn off gradients for the model for param in model.parameters(): param.requires_grad = False input_size = 25088 model.classifier = Network(input_size, output_size, hidden_sizes) optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate) elif model is None and model_name == 'resnet18': model = models.resnet18(pretrained=True) #turn off gradients for the model for param in model.parameters(): param.requires_grad = False input_size = 512 model.fc = Network(input_size, output_size, hidden_sizes) optimizer = optim.Adam(model.fc.parameters(), lr=learning_rate) print('-' * 20) print(f"Model name: {model_name}") print(f"Learning_rate: {learning_rate}") print(f"Hidden_units: {hidden_sizes}\n") model.class_to_idx = train.class_to_idx criterion = nn.NLLLoss() best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 since = time.time() steps = 0 model.to(device) for epoch in range(start, iterations): print('Epoch {}/{}'.format(epoch + 1, iterations)) print('-' * 10) print("Train losses: {}".format(train_losses)) print("Valid losses: {}".format(valid_losses)) running_loss = 0 model.train() for images, labels in trainloader: since_train_step = time.time() steps += 1 # Move input and label tensors to the GPU images, labels = images.to(device), labels.to(device) model.train() optimizer.zero_grad() with torch.set_grad_enabled(True): log_ps = model(images) loss = criterion(log_ps, labels) loss.backward() optimizer.step() running_loss += loss.item() print("Time per train step {}/{}: {}".format( steps, len(trainloader), time.time() - since_train_step)) else: # Model in inference mode, dropout is off model.eval() # Turn off gradients for validation, will speed up inference with torch.no_grad(): valid_loss, accuracy = validate_model(model, validloader, criterion, device) train_losses.append(round(running_loss / len(trainloader), 3)) valid_losses.append(round(valid_loss / len(validloader), 3)) if accuracy > best_acc: best_acc = accuracy best_model_wts = copy.deepcopy(model.state_dict()) print( "Epoch: {}/{}.. ".format(epoch + 1, iterations), "Training Loss: {:.3f}.. ".format(running_loss / len(trainloader)), "Test Loss: {:.3f}.. ".format(valid_loss / len(validloader)), "Test Accuracy: {:.3f}..".format(accuracy / len(validloader))) running_loss = 0 steps = 0 # Make sure dropout and grads are on for training model.train() # load best model weights model.load_state_dict(best_model_wts) time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) # Save the model to checkpoint checkpoint = { 'hidden_sizes': hidden_sizes, 'model': model, 'state_dict': model.state_dict(), 'optimizer': optimizer, 'optimizer_dict': optimizer.state_dict(), 'class_to_idx': model.class_to_idx, 'iterations': iterations, 'learning_rate': learning_rate, 'train_losses': train_losses, 'valid_losses': valid_losses, 'model_name': model_name } checkpoint_filename = "".join( ["checkpoint_", model_name, "_", str(iterations), "epochs.pth"]) if save_dir is not None: torch.save(checkpoint, '{}/{}'.format(save_dir, checkpoint_filename)) else: torch.save(checkpoint, checkpoint_filename) return model
def setUpClass(cls): cls.network = Network('8.8.8.0/27') print '\n# Testing Network {}'.format(str(cls.network))
def __init__(self): self.origin = None self.car_numbers = None self.network = Network([972, 250, 100, 22], 'network_parameters.json') self.network.load_weights('network_parameters.json')
def setUpClass(cls): cls.network = Network("8.8.8.0/27") print("\n# Testing Network {}".format(str(cls.network)))
save_graphs=False, device_target="Ascend") if __name__ == '__main__': # get input data r = np.load(args_opt.dataset_path) d_coord, d_nlist, avg, std, atype, nlist = r['d_coord'], r['d_nlist'], r[ 'avg'], r['std'], r['atype'], r['nlist'] batch_size = 1 atype_tensor = Tensor(atype) avg_tensor = Tensor(avg) std_tensor = Tensor(std) nlist_tensor = Tensor(nlist) d_coord_tensor = Tensor(np.reshape(d_coord, (1, -1, 3))) d_nlist_tensor = Tensor(d_nlist) frames = [] for i in range(batch_size): frames.append(i * 1536) frames = Tensor(frames) # evaluation net = Network() param_dict = load_checkpoint(args_opt.checkpoint_path) load_param_into_net(net, param_dict) net.to_float(mstype.float32) energy, atom_ener, force, virial = \ net(d_coord_tensor, d_nlist_tensor, frames, avg_tensor, std_tensor, atype_tensor, nlist_tensor) print('energy:', energy) print('atom_energy:', atom_ener) print('force:', force) print('virial:', virial)
def trainNetwork(): training_data, validation_data, test_data = load_data_wrapper() net = Network([784, 60, 30, 10]) net.SGD(training_data, 50, 10, 3.0, test_data=test_data) return net
import unittest from src.network import Network from src.layer import Layer from random import random from math import sin singleNeuronNetwork = Network([Layer(5, 1)]) multipleLayerNetwork = Network( [Layer(10, 3), Layer(3, 5), Layer(5, 2), Layer(2, 10)]) sineWaveNetwork = Network([Layer(2, 1)]) class NetworkTest(unittest.TestCase): def testSingleNeuronNetwork(self): self.assertEqual(1, len(singleNeuronNetwork.first_layer)) self.assertEqual(5, singleNeuronNetwork.first_layer.inputs) # Check if it can process correctly res = singleNeuronNetwork.process([1, 2, 3, 4, 5]) self.assertEqual(1, len(res)) def testMultipleLayerNetwork(self): self.assertEqual(3, len(multipleLayerNetwork.first_layer)) self.assertEqual(10, multipleLayerNetwork.first_layer.inputs) # Check if it can process correctly res = multipleLayerNetwork.process([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) self.assertEqual(10, len(res))
class Test(keras.callbacks.Callback): def __init__(self): return master_tool = Tools() X, y = master_tool.load(fill=False) print("y.shape == {}; y.min == {:.3f}; y.max == {:.3f}".format( y.shape, y.min(), y.max())) earl = EarlyStopping(monitor="val_loss", min_delta=0, patience=10) model = Sequential() temp = Network(model=model, no=3) model = temp.get_model() #model.load_weights("model.h5") model.compile(optimizer="adagrad", loss="mean_squared_error", metrics=["accuracy"]) model.fit(X, y, callbacks=[Test], epochs=100, verbose=1, validation_split=0.1, shuffle=True)
def run(self): # parameters Sim.scheduler.reset() Sim.set_debug('AppHandler') Sim.set_debug('TCP') # Sim.set_debug('Link') # setup application a1 = AppHandler(self.filename + str(1), self.out_directory) a2 = AppHandler(self.filename + str(2), self.out_directory) a3 = AppHandler(self.filename + str(3), self.out_directory) a4 = AppHandler(self.filename + str(4), self.out_directory) a5 = AppHandler(self.filename + str(5), self.out_directory) # setup network net = Network('../networks/setup.txt') net.loss(self.loss) # setup routes n1 = net.get_node('n1') n2 = net.get_node('n2') n1.add_forwarding_entry(address=n2.get_address('n1'), link=n1.links[0]) n2.add_forwarding_entry(address=n1.get_address('n2'), link=n2.links[0]) # setup transport t1 = Transport(n1) t2 = Transport(n2) # setup connection c1 = TCP(t1, n1.get_address('n2'), 1, n2.get_address('n1'), 1, a1, window=self.window) c2 = TCP(t2, n2.get_address('n1'), 1, n1.get_address('n2'), 1, a1, window=self.window) # setup connection c3 = TCP(t1, n1.get_address('n2'), 2, n2.get_address('n1'), 2, a2, window=self.window) c4 = TCP(t2, n2.get_address('n1'), 2, n1.get_address('n2'), 2, a2, window=self.window) # setup connection c5 = TCP(t1, n1.get_address('n2'), 3, n2.get_address('n1'), 3, a3, window=self.window) c6 = TCP(t2, n2.get_address('n1'), 3, n1.get_address('n2'), 3, a3, window=self.window) # setup connection c7 = TCP(t1, n1.get_address('n2'), 4, n2.get_address('n1'), 4, a4, window=self.window) c8 = TCP(t2, n2.get_address('n1'), 4, n1.get_address('n2'), 4, a4, window=self.window) # setup connection c9 = TCP(t1, n1.get_address('n2'), 5, n2.get_address('n1'), 5, a5, window=self.window) c0 = TCP(t2, n2.get_address('n1'), 5, n1.get_address('n2'), 5, a5, window=self.window) # send a file with open(self.in_directory + '/' + self.filename, 'r') as f: while True: data = f.read(10000) if not data: break Sim.scheduler.add(delay=0.0, event=data, handler=c1.send) Sim.scheduler.add(delay=0.1, event=data, handler=c3.send) Sim.scheduler.add(delay=0.2, event=data, handler=c5.send) Sim.scheduler.add(delay=0.3, event=data, handler=c7.send) Sim.scheduler.add(delay=0.4, event=data, handler=c9.send) # run the simulation Sim.scheduler.run()