def main(): net = NN(7, 52, 26) sys.stderr.write('Loading neural net\n') network = json.load(file('network2.json', 'r')) for key in network: setattr(net, key, network[key]) for filename in listdir('test'): print filename, ': ', proc = Popen( ['./split', 'test/' + filename, 'out.jpg', '2', '0', '210'], stdout=PIPE) proc.wait() for line in proc.stdout.read().split('\n'): if not line: sys.stdout.write('\n') continue pattern = [int(x) for x in line.split(' ')] outputs = net.update(pattern) #break high_value = 0 high_letter = 0 for i in xrange(len(outputs)): if outputs[i] > high_value: high_value = outputs[i] high_letter = i sys.stdout.write(chr(ord('a') + high_letter))
def main(): net = NN(7, 52, 26) sys.stderr.write('Loading neural net\n') network = json.load(file('network2.json', 'r')) for key in network: setattr(net, key, network[key]) for filename in listdir('test'): print filename, ': ', proc = Popen(['./split', 'test/' + filename, 'out.jpg', '2', '0', '210'], stdout=PIPE) proc.wait() for line in proc.stdout.read().split('\n'): if not line: sys.stdout.write('\n') continue pattern = [int(x) for x in line.split(' ')] outputs = net.update(pattern) #break high_value = 0 high_letter = 0 for i in xrange(len(outputs)): if outputs[i] > high_value: high_value = outputs[i] high_letter = i sys.stdout.write(chr(ord('a') + high_letter))
class NeuralComputerPlayer(ComputerPlayer): def __init__(self, name, search_depth=2): self.name = name self.depth = search_depth self.net = NN(len(heuristic.fs) + len(heuristic.gs), 2, 1) self.inputs_buffer = [] self.N = 0.05 self.M = 0.01 def get_score(self, game_state): import math effs = [f_i(game_state, self.name) for f_i in heuristic.fs] gees = [heuristic.g_1(game_state, self.name)] inputs = effs + gees self.inputs_buffer.append(inputs) output = self.net.score(inputs) return output return output + 1. * heuristic.g_0(game_state, self.name) def update_weights(self, expected_score, actual_score, f_scores): """ Compare the difference between the predicted game state and the current game state and update the weights. The update formula is: new_weight = current_weight + n*(desire_score - actual_score)*f_score """ self.net.update(self.inputs_buffer.pop(0)) return self.net.backPropagate(actual_score, self.N, self.M)
def find_a_network(): # TODO notes from thesis update # # Look at and implement the quasi-random initialization stuff # from pga's paper. # # Try training networks on other chaotic functions # In particular try mu * ( x * (1 - x) )^(1/2) # pga -- it looks like the function that my network learned # # Try finding chaotic autonomous RNNs using my GA. # K = 80 pat = generate_pattern(200) network1 = NN(2, K, 1) d = shelve.open('shelved.networks.db') key = str(int(time.time())) d[key] = network1 d.close() print "Shelved pre-training under the key", key network1.train(pat, True, 0.010) print "Done training." d = shelve.open('shelved.networks.db') key = str(int(time.time())) d[key] = network1 d.close() print "Shelved post-training under the key", key all_diagrams(lambda x : network1.update([x,1]),f)
def main(): print 'Building neural network...' net = NN(4, 52, 26) print 'Training neural network' count = 0 traindata = [] for filename in listdir(CAPTCHA_DIR): if count % 100 == 0: print count count += 1 chdir(CAPTCHA_DIR) proc = Popen(['../split', filename, '../out.jpg', '1', '0', '210'], stdout=PIPE) vectors = proc.stdout.read() proc.wait() if proc.returncode != 0: continue for line in vectors.split('\n'): line = [x for x in line.split(' ') if x] if not line: continue letter = ord(line[0].lower()) - ord('a') line = line[1:] line = [int(x) for x in line] letters = [] for i in xrange(26): if i == letter: letters.append(100) else: letters.append(0) traindata.append((line, letters)) net.train(traindata, iterations=10) print 'Finished training network... Saving' network = { 'ni': net.ni, 'nh': net.nh, 'no': net.no, 'ai': net.ai, 'ah': net.ah, 'ao': net.ao, 'wi': net.wi, 'wo': net.wo, 'ci': net.ci, 'co': net.co } json.dump(network, file('../network2.json', 'w')) print 'Finished dumping to network2.json'
def train(self, session, doc): # modified bpnn to accept dict as sparse input (labels, vectors) = doc.get_raw(session) (nattrs, vectors) = self.renumber_train(session, vectors) labelSet = set(labels) lls = len(labelSet) if lls == 2: patts = [(vectors[x], [labels[x]]) for x in xrange(len(labels))] noutput = 1 else: if lls < 5: templ = ((0, 0), (1, 0), (0, 1), (1, 1)) elif lls < 9: templ = ((0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1)) else: # hopefully not more than 16 classes! templ = ((0, 0, 0, 0), (1, 0, 0, 0), (0, 1, 0, 0), (1, 1, 0, 0), (0, 0, 1, 0), (1, 0, 1, 0), (0, 1, 1, 0), (1, 1, 1, 0), (0, 0, 0, 1), (1, 0, 0, 1), (0, 1, 0, 1), (1, 1, 0, 1), (0, 0, 1, 1), (1, 0, 1, 1), (0, 1, 1, 1), (1, 1, 1, 1)) rll = range(len(labels)) patts = [(vectors[x], templ[labels[x]]) for x in rll] noutput = len(templ[0]) # shuffle to ensure not all of class together r = random.Random() r.shuffle(patts) # only way this is at all usable is with small datasets run in psyco # but is at least fun to play with... if maxattr * len(labels) > 2000000: print "Training NN is going to take a LONG time..." print "Make sure you have psyco enabled..." n = NN(maxattr, self.hidden, noutput) self.model = n n.train(patts, self.iterations, self.learning, self.momentum) modStr = cPickle.dumps(n) f = file(mp, 'w') f.write(modStr) f.close() self.predicting = 1
def load_weights(self): ''' Loads the stored data from previous sessions, if possible.''' valid = False try: fp = open(self.filename, 'r') except IOError: self.log_debug(11, "Couldn't read stats file '%s'", self.filename) else: self.log_debug(11, "Loading stats file '%s'", self.filename) try: pickler = Unpickler(fp) self.input_headers = pickler.load() wi = pickler.load() self.output_headers = pickler.load() wo = pickler.load() #self.seasons = pickler.load() #self.powers = pickler.load() #self.locs = pickler.load() #self.provinces = pickler.load() #self.centers = pickler.load() #self.coastals = pickler.load() #self.coastlines = pickler.load() #self.borders = pickler.load() finally: fp.close() ni = len(self.input_headers) no = len(self.output_headers) nh = len(wo) self.log_debug(7, "%d inputs => %d hidden => %d outputs", ni, nh, no) self.net = NN(ni, nh, no, wi, wo) valid = True return valid
def __init__(self, name, search_depth=2): self.name = name self.depth = search_depth self.net = NN(len(heuristic.fs) + len(heuristic.gs), 2, 1) self.inputs_buffer = [] self.N = 0.05 self.M = 0.01
def train(self, session, doc): # modified bpnn to accept dict as sparse input (labels, vectors) = doc.get_raw(session) (nattrs, vectors) = self.renumber_train(session, vectors) labelSet = set(labels) lls = len(labelSet) if lls == 2: patts = [(vectors[x], [labels[x]]) for x in xrange(len(labels))] noutput = 1 else: if lls < 5: templ = ((0, 0), (1,0), (0, 1), (1, 1)) elif lls < 9: templ = ((0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1)) else: # hopefully not more than 16 classes! templ = ((0,0,0,0), (1,0,0,0), (0,1,0,0), (1,1,0,0), (0,0,1,0), (1,0,1,0), (0,1,1,0), (1,1,1,0), (0,0,0,1), (1,0,0,1), (0,1,0,1), (1,1,0,1), (0,0,1,1), (1,0,1,1), (0,1,1,1), (1,1,1,1)) rll = range(len(labels)) patts = [(vectors[x], templ[labels[x]]) for x in rll] noutput = len(templ[0]) # shuffle to ensure not all of class together r = random.Random() r.shuffle(patts) # only way this is at all usable is with small datasets run in psyco # but is at least fun to play with... if maxattr * len(labels) > 2000000: print "Training NN is going to take a LONG time..." print "Make sure you have psyco enabled..." n = NN(maxattr, self.hidden, noutput) self.model = n n.train(patts, self.iterations, self.learning, self.momentum) modStr = cPickle.dumps(n) f = file(mp, 'w') f.write(modStr) f.close() self.predicting = 1
def __init__(self): # Inheritance NN.__init__(self) # Initial hero is antimage self.heroid = 1 # Open sample data, see gather.py try: with open("data/sampledata.json", "r") as file: self.data = json.load(file) with open("data/heroes.json", "r") as file: self.heroes = json.load(file) file.close() except FileNotFoundError as e: print("[WARNING] %s not found." % e.filename)
def create_net(self, board): # Inputs: Seasons, Coasts*Players, Centers*Players # Outputs: Provinces*(Provinces+Self+Adjacents+Coastals) # Consider: Player*(Player-1) self.input_headers = map(str, self.seasons) for coast in self.locs: if coast[2]: self.input_headers.extend( "%s %s %s: %s" % (coast[0], coast[1], coast[2], power) for power in self.powers) else: self.input_headers.extend( "%s %s: %s" % (coast[0], coast[1], power) for power in self.powers) for center in self.centers: self.input_headers.extend("%s: %s" % (center, power) for power in self.powers) def dest(key): result = "%s %s" % (key[0], key[1]) if key[2]: result += " " + str(key[2]) return result self.output_headers = [] for token in self.provinces: self.output_headers.extend("%s: From %s" % (token, prov) for prov in self.provinces) self.output_headers.extend("%s: To %s" % (token, dest(coast)) for coast in self.coastlines[token]) self.output_headers.extend("%s: To %s" % (token, dest(coast)) for prov in self.borders[token] for coast in self.coastlines[prov]) self.output_headers.extend("%s: CTO %s" % (token, dest(coast)) for coast in self.coastals) ni = len(self.input_headers) no = len(self.output_headers) nh = int(round(sqrt(ni * no))) self.log_debug(15, 'Inputs: %s', self.input_headers) self.log_debug(15, 'Outputs: %s', self.output_headers) self.log_debug(7, "%d inputs => %d hidden => %d outputs", ni, nh, no) self.net = NN(ni, nh, no)
class Brain(VerboseObject): ''' Stores the neural networks for Neurotic bots. Designed to potentially require only one Brain instance per map, without letting bots step on each others' toes. '''#''' def __init__(self, board): self.__super.__init__() self.filename = 'log/bots/neurotic_%s.pkl' % (board.name,) self.sort_lists(board) if not self.load_weights(): self.create_net(board) def sort_lists(self, board): # Originally only in create_net() # All this sorting is probably unnecessary, but maintains consistency. self.seasons = board.current_turn.seasons[:] self.powers = sorted(board.powers.iterkeys()) self.locs = sorted(board.locs.iterkeys()) self.provinces = sorted(board.spaces.iterkeys()) self.centers = [token for token in self.provinces if token.is_supply()] self.coastals = [board.spaces[token].is_coastal() for token in self.provinces if token.is_coastal()] self.coastlines = dict((token, [coast.key for coast in sorted(province.locations)]) for token, province in board.spaces.iteritems()) self.borders = dict((token, sorted(province.borders_out)) for token, province in board.spaces.iteritems()) def create_net(self, board): # Inputs: Seasons, Coasts*Players, Centers*Players # Outputs: Provinces*(Provinces+Self+Adjacents+Coastals) # Consider: Player*(Player-1) self.input_headers = map(str, self.seasons) for coast in self.locs: if coast[2]: self.input_headers.extend( "%s %s %s: %s" % (coast[0], coast[1], coast[2], power) for power in self.powers) else: self.input_headers.extend( "%s %s: %s" % (coast[0], coast[1], power) for power in self.powers) for center in self.centers: self.input_headers.extend("%s: %s" % (center, power) for power in self.powers) def dest(key): result = "%s %s" % (key[0], key[1]) if key[2]: result += " " + str(key[2]) return result self.output_headers = [] for token in self.provinces: self.output_headers.extend("%s: From %s" % (token, prov) for prov in self.provinces) self.output_headers.extend("%s: To %s" % (token, dest(coast)) for coast in self.coastlines[token]) self.output_headers.extend("%s: To %s" % (token, dest(coast)) for prov in self.borders[token] for coast in self.coastlines[prov]) self.output_headers.extend("%s: CTO %s" % (token, dest(coast)) for coast in self.coastals) ni = len(self.input_headers) no = len(self.output_headers) nh = int(round(sqrt(ni * no))) self.log_debug(15, 'Inputs: %s', self.input_headers) self.log_debug(15, 'Outputs: %s', self.output_headers) self.log_debug(7, "%d inputs => %d hidden => %d outputs", ni, nh, no) self.net = NN(ni, nh, no) def collect_values(self, inputs, board): turn = board.current_turn self.log_debug(11, 'Inputs: %s', inputs) outputs = self.net.update(inputs) self.log_debug(11, 'Outputs: %s', outputs) orders = self.parse_outputs(board, outputs) return orders def collect_inputs(self, board): ''' Converts a board state into a list of inputs for the neural net.''' inputs = [0] * len(self.input_headers) inputs[board.current_turn.index] = 1 index = len(self.seasons) for coast in self.locs: units = board.locs[coast].province.units for power in self.powers: inputs[index] = len([unit for unit in units if unit.location.key == coast and unit.nation == power]) index += 1 for center in self.centers: owner = board.spaces[center].owner for power in self.powers: inputs[index] = int(owner == power) index += 1 return inputs def parse_outputs(self, board, outputs): ''' Converts a neural net output list into potential orders.''' order_values = [] index = 0 for token in self.provinces: units = board.spaces[token].units if units: starting = dict() for prov in self.provinces: if prov == token: unit_value = outputs[index] order_values.extend((unit_value, (unit.dislodged and DisbandOrder or RemoveOrder)(unit)) for unit in units) elif board.spaces[prov].units: starting[prov] = outputs[index] index += 1 for coast in self.coastlines[token]: hold_value = unit_value + outputs[index] order_values.extend((hold_value, HoldOrder(unit)) for unit in units if coast == unit.location.key) index += 1 for prov in self.borders[token]: for key in self.coastlines[prov]: dest = board.locs[key] value = unit_value + outputs[index] order_values.extend((value, (unit.dislodged and RetreatOrder or MoveOrder)(unit, dest)) for unit in units if unit.can_move_to(dest)) for start, start_value in starting.iteritems(): for mover in board.spaces[start].units: if mover.can_move_to(dest): value = start_value + outputs[index] order_values.extend((value, SupportMoveOrder(unit, mover, dest)) for unit in units) index += 1 for key in self.coastals: dest = board.locs[key] for unit in units: if unit.can_be_convoyed(): order_values.append((unit_value + outputs[index], ConvoyedOrder(unit, dest))) elif unit.can_convoy(): for start, start_value in starting.iteritems(): for mover in board.spaces[start].units: if mover.can_be_convoyed(): value = start_value + outputs[index] order_values.append((value, ConvoyingOrder(unit, mover, dest))) index += 1 else: index += len(self.provinces) space = board.spaces[token] if space.owner: for key in self.coastlines[token]: coast = board.locs[key] value = outputs[index] order_values.append((value, BuildOrder(Unit(space.owner, coast)))) index += 1 else: index += len(self.coastlines[token]) index += sum(len(self.coastlines[prov]) for prov in self.borders[token]) index += len(self.coastals) for nation in board.powers.itervalues(): waives = -nation.surplus() while waives > 0: order_values.append((0.5, WaiveOrder(nation))) waives -= 1 self.log_debug(7, "Outputs: %d; Index: %d; Orders: %d", len(outputs), index, len(order_values)) for value, order in order_values: self.log_debug(15, "%s: %s", value, order) return order_values def learn(self, inputs, orders, board): ''' Trains the neural net to produce the given orders.''' outputs = self.parse_orders(orders, board) ins = [x for i, x in enumerate(self.input_headers) if inputs[i]] outs = [x for i, x in enumerate(self.output_headers) if outputs[i]] self.log_debug(1, 'Training %s to produce %s.', ins, outs) self.net.learn(inputs, outputs, iterations=1) def parse_orders(self, orders, board): ''' Converts an order set into a neural net output list.''' outputs = [0] * len(self.output_headers) index = 0 prov_orders = {} for order in orders: if order.unit: prov_orders[order.unit.location.province.key] = order for token in self.provinces: if prov_orders.has_key(token): order = prov_orders[token] for prov in self.provinces: if prov == token: if isinstance(order, (DisbandOrder, RemoveOrder, HoldOrder, RetreatOrder, MoveOrder)): outputs[index] = 1 elif board.spaces[prov].unit: if (isinstance(order, (SupportOrder, ConvoyingOrder)) and order.supported.location.province == token): outputs[index] = 1 index += 1 for coast in self.coastlines[token]: if (isinstance(order, HoldOrder) and coast == order.unit.location.key): outputs[index] = 1 index += 1 for prov in self.borders[token]: for key in self.coastlines[prov]: if (isinstance(order, (RetreatOrder, MoveOrder, SupportMoveOrder)) and order.destination.key == key): outputs[index] = 1 index += 1 for key in self.coastals: if (isinstance(order, (ConvoyedOrder, ConvoyingOrder)) and order.destination.key == key): outputs[index] = 1 index += 1 else: index += len(self.provinces) index += len(self.coastlines[token]) index += sum(len(self.coastlines[prov]) for prov in self.borders[token]) index += len(self.coastals) return outputs def load_weights(self): ''' Loads the stored data from previous sessions, if possible.''' valid = False try: fp = open(self.filename, 'r') except IOError: self.log_debug(11, "Couldn't read stats file '%s'", self.filename) else: self.log_debug(11, "Loading stats file '%s'", self.filename) try: pickler = Unpickler(fp) self.input_headers = pickler.load() wi = pickler.load() self.output_headers = pickler.load() wo = pickler.load() #self.seasons = pickler.load() #self.powers = pickler.load() #self.locs = pickler.load() #self.provinces = pickler.load() #self.centers = pickler.load() #self.coastals = pickler.load() #self.coastlines = pickler.load() #self.borders = pickler.load() finally: fp.close() ni = len(self.input_headers) no = len(self.output_headers) nh = len(wo) self.log_debug(7, "%d inputs => %d hidden => %d outputs", ni, nh, no) self.net = NN(ni, nh, no, wi, wo) valid = True return valid def store_weights(self): ''' Stores the weights used for the map.''' try: fp = open(self.filename, 'w') except IOError: self.log_debug(1, "Couldn't write stats file '%s'", self.filename) else: self.log_debug(11, "Writing stats file '%s'", self.filename) try: pickler = Pickler(fp, -1) pickler.dump(self.input_headers) pickler.dump(self.net.wi) pickler.dump(self.output_headers) pickler.dump(self.net.wo) #pickler.dump(self.seasons) #pickler.dump(self.powers) #pickler.dump(self.locs) #pickler.dump(self.provinces) #pickler.dump(self.centers) #pickler.dump(self.coastals) #pickler.dump(self.coastlines) #pickler.dump(self.borders) finally: fp.close() def close(self): ''' Called whenever a Neurotic closes.''' self.store_weights()
def load(self, N=.03, M=.007, iterations=5000): ''' Trains the neural network with match statistics for the current heroid ''' patterns = self.data[self.heroid][:300] print("n = %d" % len(patterns)) NN.train(self, patterns, iterations, N, M)