def test(rep=10): '''A small test function''' global trainingSet topology = [2, 5, 5, 1] net = Network(topology) net.Gradients = [None, None] for i in range(rep + 1): error = 0.0 for I, P in trainingSet: print("Weights:") print(net.weights[0]) print("Gradients:") print(net.Gradients[0]) #print("Previous change:") #print(net.last_change[0]) print() print(net.weights[1]) print("Gradients:") print(net.Gradients[1]) #print("Previous Change:") #print(net.last_change[1]) print() error += net.backprop(I, P) print("Activations:") print("L=0 : ", net.netOuts[0]) print("L=1 : ", net.netOuts[1]) print("L=2 : ", net.out) print() print("-----------------------------------") print("ERROR: ", error, "EPOCH: ", i) print("-----------------------------------") print()
def main(): train_loader, test_loader = create_loaders() model = Network() criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE) summary(model, (1, 784)) for epoch in range(1, NUM_EPOCHS + 1): train_model(model, train_loader, optimizer, criterion, epoch) acc = test_model(model, test_loader) torch.save(model, "mnist.pt")
def __init__(self, trading_fee: float, shares: int, cash: int, id: int): self.brain = Network() self.id = id self.brain.add_layer( 4 ) # 1. self.cash, 2. current market price, 3. self.position 4. supply self.brain.add_layer(4) self.brain.add_layer(4) self.brain.add_layer( 4 ) # 1. how much to buy or sell as a percent of total cash, 2. buy, 3. sell, 4. price self.cash = cash self.shares = shares self.trading = True self.account_values = [] self.trading_Fee = trading_fee
class Agent: def __init__(self, trading_fee: float, shares: int, cash: int, id: int): self.brain = Network() self.id = id self.brain.add_layer( 4 ) # 1. self.cash, 2. current market price, 3. self.position 4. supply self.brain.add_layer(4) self.brain.add_layer(4) self.brain.add_layer( 4 ) # 1. how much to buy or sell as a percent of total cash, 2. buy, 3. sell, 4. price self.cash = cash self.shares = shares self.trading = True self.account_values = [] self.trading_Fee = trading_fee def order_expired(self, shares, price, buy: bool): if buy: self.cash += shares * price else: self.shares += shares self.cash += self.trading_Fee * shares * price def partial_fill(self, shares, price, buy: bool): if buy: self.shares += shares else: self.cash += shares * price def trade(self, supply: int, order_book: OrderBook, price_history: list): trade = self.brain.fire_network( [self.cash, order_book.price, self.shares, supply]) buy = trade[1] > trade[2] price = order_book.price + .01 if trade[ 3] >= .5 else order_book.price - .01 # proportional to distance from .5 shares = int((trade[0] * self.cash) / price) if buy else int(trade[0] * self.shares) if not buy: if shares > self.shares: shares = self.shares # subtract trading fee fee = self.trading_Fee * shares * price if fee < self.cash: if buy: self.cash -= shares * price else: self.shares -= shares self.cash -= fee return Order(self.id, price, shares, buy)
trainingTargets = [ np.array([-0.5]), np.array([0.5]), np.array([0.5]), np.array([-0.5]) ] ''' trainingTargets = [np.array([0]), np.array([1]), np.array([1]), np.array([0])] ''' topology = [2, 10, 10, 1] net = Network(topology, 0.1, 0.1) net.save("recog_number.csv", transpose=True, keep_bias=False) # saving a file with the iniitial weights #net.outActiv_fun = sigmoid trainingSet = list(zip(trainingInputs, trainingTargets)) epochs = 10000 tolerance = 1E-10 print("Initial Weights:") for W in net.weights: print(W) print() net.train(trainingSet, epochs, tolerance)
if wait: barrier.wait() if __name__ == "__main__": if switch: # getting cpu var for the pc this runs on cpu_count = mp.cpu_count() # barrier obj barrier = mp.Barrier(cpu_count) # pool obj pool = mp.Pool(cpu_count, initializer, (barrier, )) else: pool = None # First we need to define our population of networks, let's go with 200 per generation: networks = [Network([10, 8, 6, 4]) for _ in range(200)] for net in networks: net.import_data(random_gen()) # We suppress this typing error later on # In this way we don't actually have to mess with the weights definition in the NeuralNet file generation = 0 best_network = None max_generation = 200 while True: generation += 1 # evaluate networks for fitness evaluate_networks(pool, networks) # sort networks with fittest at the very top networks.sort(key=lambda n: n.fitness, reverse=True) # Descending list of network fitnesses # save the best network (optional) if not best_network or best_network.fitness < networks[0].fitness: