def __unary(self, tcontext, funcname): ''' unary = ("&" | "*") unary | ("+" | "-")? term ''' token = tcontext.consume_symbol('&') if token: return NodeFactory.create_address_node( self.__unary(tcontext, funcname)) token = tcontext.consume_symbol('*') if token: return NodeFactory.create_dereference_node( self.__unary(tcontext, funcname)) token = tcontext.consume_symbol('+') if token: return self.__term(tcontext, funcname) token = tcontext.consume_symbol('-') if token: return NodeFactory.create_ope_node(NodeTypes.SUB, NodeFactory.create_num_node(0), self.__term(tcontext, funcname)) return self.__term(tcontext, funcname)
def solve(self, problem, all_solutions=False): self.reset() self.problem = problem # Generate the initial (root) node node_factory = NodeFactory(False, True) self.max_frontier_node_count = 0 node = node_factory.make_node(problem.initial_state) # For efficiency, check if node is goal state BEFORE putting on Q if problem.is_goal(node.state): self.solution = node self.total_node_count = 1 if all_solutions == False: #added return node else: self.problem.pretty_print(self.solution.state) counter = counter + 1 # Start the frontier Q by adding the root frontier = deque() frontier.append(node) loops = 0 # Search tree til nothing left to explore (i.e. frontier is empty) # OR a solution is found while len(frontier) > 0: node = frontier.popleft() #POTENTIAL IMPROVEMENT: Use generator for child in node_factory.expand(node, problem): if problem.is_goal(child.state): if self.verbose: print("Max Frontier Count: ", self.max_frontier_node_count) self.solution = child # added for all_solutions being true, print after one is found. If all_solutions # is false, just return child if all_solutions == True: self.problem.pretty_print(self.solution.state) else: self.total_node_count = node_factory.node_count return child frontier.append(child) if len(frontier) > self.max_frontier_node_count: self.max_frontier_node_count = len(frontier) # added for all_solutions being true, will return the last child, however, in execute # a case for this is changed so duplicate solutions won't be printed. if all_solutions == True: self.total_node_count = node_factory.node_count return child self.solution = None self.total_node_count = node_factory.node_count return None
def solve(self, problem, path=True, all_solutions=False): self.reset() self.problem = problem # Generate the initial (root) node node_factory = NodeFactory(verbose=self.verbose, record_parent=path) self.max_frontier_node_count = 0 node = node_factory.make_node(problem.initial_state) # For efficiency, check if node is goal state BEFORE putting on Q if problem.is_goal(node.state): self.solution.append(node) self.total_node_count = 1 if not all_solutions: return self.solution # Start the frontier Q by adding the root frontier = deque() frontier.append(node) self.visited.append(node.state) # Select a node from the frontier (using the til nothing left to explore (i.e. frontier is empty) # OR a solution is found while len(frontier) > 0: #print(frontier) # vvvvvvvvvvvvvvvvvvvvvvvvv add code block for if and elif: #added #If BFS, pop front because it is a queue. #If DFS, pop back because it is a stack. if self.strategy == "BFS": node = frontier.popleft() elif self.strategy == "DFS": node = frontier.pop() for child in self.valid_children(node, problem, node_factory): if child.depth > self.max_depth: self.max_depth = child.depth if problem.is_goal(child.state): if self.verbose: print("Max Frontier Count: ", self.max_frontier_node_count) self.solution.append(child) self.total_node_count = node_factory.node_count if not all_solutions: return child frontier.append(child) if len(frontier) > self.max_frontier_node_count: self.max_frontier_node_count = len(frontier) self.total_node_count = node_factory.node_count if self.solution == []: self.solution = None return self.solution
def BuildSampleXorNetwork(self): """ Builds a simple two input, one output, two node hidden layer network in order learn the XOR function. This network is the "simplest" net that can be built to do something "useful". It's intended to be used for testing and debugging the training from end to end. """ temp = [ { 'transfer_function': 'linear' , 'number_of_nodes': 2 , 'layerName': 'input' } , { 'transfer_function': 'sigmoidal' , 'number_of_nodes': 2 , 'layerName': 'hidden layer' } , { 'transfer_function': 'linear' , 'number_of_nodes': 1 , 'layerName': 'output' }] """ NodeFactory is a helper function for making nodes.""" nf = NodeFactory() for x in temp: """ build layers """ # this is a bit funky, NodeFactory is called for each # iteration though the loop and it returns a node. So in the # end we have a list of layers lists, with each layer list # being composed of some Node type. self.layerList.append(Layer( x['layerName'], 1, [nf.makeNode(x['transfer_function']) for y in range(0, x['number_of_nodes'])], True)) # This network is for testing. To make the unit testing easier we # just set the seed to get the same sequence of random numbers each # time. np.random.seed(42) for idx, x in enumerate(self.layerList[1:]): """ create random weights matrices based on layer definitions """ rows = len(self.layerList[idx].NodeList) cols = len(x.NodeList) temp = Weight(rows, cols) self.weightList.append(temp)
def __init__(self, layer_name, layer_number, node_list, has_bias): self.layerName = layer_name self.layerNumber = layer_number self.NodeList = node_list self.hasBias = has_bias if has_bias: nf = NodeFactory() self.NodeList.append(nf.makeNode('bias')) self.output = matrix(zeros((1, len(self.NodeList)))) if has_bias: self.gradient = matrix(zeros((1, len(self.NodeList) - 1 ))) self.output_derivative = matrix(zeros((1, len(self.NodeList) - 1))) else: self.gradient = matrix(zeros((1, len(self.NodeList)))) self.output_derivative = matrix(zeros((1, len(self.NodeList))))
def __func(self, tcontext): ''' func = "int" ident "(" ("int" ident)* ")" "{" stmt* "}" ''' tcontext.expect_type() funcname = tcontext.expect_ident().text tcontext.expect_symbol('(') args_order_type = [] while not tcontext.consume_symbol(')'): type_token = tcontext.expect_type() ptr_level = 0 while tcontext.consume_symbol('*'): ptr_level += 1 vtype = self.__get_type_from_typename(type_token.text) typeinfo = TypeInfo(vtype, ptr_level) arg_token = tcontext.expect_ident() order = self.__regist_varname(arg_token.text, funcname, typeinfo) args_order_type.append((order, typeinfo)) if not tcontext.consume_symbol(','): tcontext.expect_symbol(')') break if tcontext.current.text != '{': error('関数の"{"がありません') return NodeFactory.create_func_node(funcname, args_order_type, self.__stmt(tcontext, funcname))
def __process(self, queue=None, **kwargs): # build node key = kwargs.pop('key', None) val = kwargs.pop('val', None) path = kwargs.pop('path', '') or '' converted = NodeFactory.convert(key, val, path) node = converted.__node__ # set parent parent = kwargs.pop('parent', None) if parent: if self.config.structure is 'Tree': node.add_parent(parent) elif self.config.structure is 'Graph': node.add_neighbor(parent) # exit if we've visited this node enough if self.config.node_visit_limit != -1 and node.encountered > self.config.node_visit_limit: return node.processed += 1 # match and run callbacks self.__exec_callbacks(node, 0) # object processing if node.container == ContainerType.dict: for k, v in node.val.iteritems(): process_kwargs = { 'key': k, 'val': v, 'path': path + '.' + k, 'parent': node } if self.config.traversal_mode is 'breadth': queue.append(**process_kwargs) else: child = self.__process(**process_kwargs) node.val[k] = child # list processing elif node.container == ContainerType.list: for i, item in enumerate(node.val): process_kwargs = { 'key': None, 'val': item, 'path': path + '.' + str(i), 'parent': node } if self.config.traversal_mode is 'breadth': queue.append(**process_kwargs) else: child = self.__process(**process_kwargs) node.val[i] = child self.__exec_callbacks(node, 1) return node.val
def solve(self, problem): self.problem = problem # Not a great name for it, but Node is a useful structure for annealing node_factory = NodeFactory(verbose=self.verbose) node = node_factory.make_node(problem.get_initial_state()) node.value = problem.apply_objective_function(node.state) # at each iteration, self.solution will contain the best seen so far self.solution = [node] if self.verbose: print("Initial state: ", problem.pretty_print(node)) print("Evaluation: ", node.value) if node.value == 0: self.total_node_count = 1 return self.solution while self.temperature > self.end_temp: self.steps = self.steps + 1 # get a neighbor and decide to change to that state next_node = node_factory.make_node( problem.get_random_neighbor(node.state)) next_node.value = problem.apply_objective_function(next_node.state) self.value_data.append(next_node.value) if next_node.value == 0: self.solution = [node] self.total_node_count = node_factory.node_count return self.solution if next_node.value <= node.value: node = next_node self.moves_to_better += 1 if node.value < self.solution[0].value: self.solution = [node] else: if random.uniform( 0, 1) <= self.calculate_probability(node.value - next_node.value): node = next_node self.moves_to_worse += 1 self.adjust_temperature() self.total_node_count = node_factory.node_count self.elapsed_time = self.calculate_elapsed_time() if self.verbose: print("Elapsed Time: %sms" % (str(self.elapsed_time))) return self.solution
def __assign(self, tcontext, funcname): ''' assign = equality ("=" assign)? ''' node = self.__equality(tcontext, funcname) if tcontext.consume_symbol('='): node = NodeFactory.create_assign_node( node, self.__assign(tcontext, funcname)) return node
def run(self): sync = Sync([]) nf = NodeFactory() store = LocalStoreManager() # Get data from metadata db start = self.start.value.replace('~', '!') # Both characters are allowed query = "select identifier, location, node from nodes where identifier like '%s%%' order by identifier" % start res = store.query(query) # Check through nodes under starting point for record in res: vosid = record['identifier'] location = record['location'] # Get file metadata meta = sync.getMetadata(location[7:]) # Remove file:// scheme # File existence if len(meta) == 0: print("The file %s is missing" % location) if self.fix.value: # Resolve by deleting record store.delete_node(vosid) continue node = nf.get_node(etree.fromstring(record['node'])) # File size if cfg.LENGTH not in node.properties: print("The file size is missing for: %s" % vosid) elif node.properties[cfg.LENGTH] != meta['size']: print("The sizes for %s and %s do not match" % (vosid, location)) if self.fix.value: node.properties[cfg.LENGTH] = meta['size'] store.update_node(vosid, node, vosid) # Date if relativedelta( parser.parse(node.properties[cfg.DATE]) - utc.localize(parser.parse(meta['date'])) ).seconds > 1: # Current tolerance is 1s. print( "The dates for %s and %s do not match: %s %s" % (vosid, location, node.properties[cfg.DATE], meta['date'])) if self.fix.value: node.properties[cfg.DATE] = meta['size'] store.update_node(vosid, node, vosid)
def __parse_common_func(self, tcontext, funcname, map_, next_func): node = next_func(tcontext, funcname) while True: for k, v in map_.items(): token = tcontext.consume_symbol(k) if token: node = NodeFactory.create_ope_node( v, node, next_func(tcontext, funcname)) break else: return node
def __term(self, tcontext, funcname): ''' term = "(" expr ")" | "int" "*"* ident | ident ("(" expr* ")")? | num ''' token = tcontext.consume_symbol('(') if token: node = self.__expr(tcontext, funcname) tcontext.expect_symbol(')') return node token = tcontext.consume_type() if token: ptr_level = 0 while tcontext.consume_symbol('*'): ptr_level += 1 vtype = self.__get_type_from_typename(token.text) typeinfo = TypeInfo(vtype, ptr_level) name = tcontext.expect_ident().text order = self.__regist_varname(name, funcname, typeinfo) node = NodeFactory.create_ident_node(order, typeinfo) return node token = tcontext.consume_ident() if token: name = token.text if tcontext.consume_symbol('('): args = [] while not tcontext.consume_symbol(')'): args.append(self.__expr(tcontext, funcname)) if not tcontext.consume_symbol(','): tcontext.expect_symbol(')') break node = NodeFactory.create_call_node(name, args) else: order, typeinfo = self.__get_order_and_type_from_varname( name, funcname) node = NodeFactory.create_ident_node(order, typeinfo) return node token_num = tcontext.expect_num() return NodeFactory.create_num_node(token_num.value)
def BuildNetworkFromJSON(self, inputFile): """ Reads in a JSON formatted file and builds the network defined in the JSON A Neural Network is nothing more than lists of Nodes, and arrays of Weights self.weight_update() """ with open(inputFile) as nd: nn = nd.read() struct = json.loads(nn) for idx, x in enumerate(struct['layers']): """ build layers """ # this is a bit funky, NodeFactory is called for each # iteration though the loop and it returns a node. So in the # end we have a list of layers, with each layer in the list self.weight_update() # being composed of a list of Nodes. # @todo allow for heterogeneous lists of nodes, i.e. not all needs need # to have the same transfer function nf = NodeFactory() node_list = [nf.makeNode(x['transfer_function']) for y in range(0, x['number_of_nodes'])] self.layerList.append(Layer( x['layerName'], idx, node_list, True if x['has_bias'] == 1 else False)) x_minus_one = len(self.layerList[0].NodeList) for x in self.layerList[1:]: """ create random weights matrices based on layer definitions. The minus one on the second argument assumes that each layer has an extra bias node. """ self.weightList.append(Weight(x_minus_one, len(x.NodeList) - (1 if x.hasBias else 0))) x_minus_one = len(x.NodeList)
def __stmt(self, tcontext, funcname): ''' stmt = "{" stmt* "}" | "if" "(" expr ")" stmt ("else" stmt)? | "while" "(" expr ")" stmt | "for" "(" expr? ";" expr? ";" expr? ")" stmt | "return" expr? ";" | expr ";" ''' if tcontext.consume_symbol('{'): stmts = [] while not tcontext.consume_symbol('}'): if tcontext.is_empty(): error('ブロックの"}"がありません') stmts.append(self.__stmt(tcontext, funcname)) node = NodeFactory.create_block_node(stmts) elif tcontext.consume_if(): tcontext.expect_symbol('(') expr = self.__expr(tcontext, funcname) tcontext.expect_symbol(')') stmt = self.__stmt(tcontext, funcname) else_stmt = self.__stmt( tcontext, funcname) if tcontext.consume_else() else None if else_stmt: node = NodeFactory.create_if_else_node(expr, stmt, else_stmt) else: node = NodeFactory.create_if_node(expr, stmt) elif tcontext.consume_while(): tcontext.expect_symbol('(') expr = self.__expr(tcontext, funcname) tcontext.expect_symbol(')') stmt = self.__stmt(tcontext, funcname) node = NodeFactory.create_while_node(expr, stmt) elif tcontext.consume_for(): tcontext.expect_symbol('(') expr1 = None if tcontext.consume_symbol(';') else self.__expr( tcontext, funcname) if expr1: tcontext.expect_symbol(';') expr2 = None if tcontext.consume_symbol(';') else self.__expr( tcontext, funcname) if expr2: tcontext.expect_symbol(';') else: expr2 = NodeFactory.create_for_infinite_dummy_node() expr3 = None if tcontext.consume_symbol(')') else self.__expr( tcontext, funcname) if expr3: tcontext.expect_symbol(')') stmt = self.__stmt(tcontext, funcname) node = NodeFactory.create_for_node(expr1, expr2, expr3, stmt) elif tcontext.consume_return(): if tcontext.consume_symbol(';'): expr = None else: expr = self.__expr(tcontext, funcname) tcontext.expect_symbol(';') node = NodeFactory.create_return_node(expr) else: node = self.__expr(tcontext, funcname) tcontext.expect_symbol(';') return node
def solve(self, problem, path=True, all_solutions=False): self.reset() self.problem = problem # Generate the initial (root) node node_factory = NodeFactory(verbose=self.verbose, record_parent=path) self.max_frontier_node_count = 0 node = node_factory.make_node(problem.initial_state) # For efficiency, check if node is goal state BEFORE putting on Q if problem.is_goal(node.state): self.solution.append(node) self.total_node_count = 1 if not all_solutions: return self.solution # Start the frontier Q by adding the root frontier = deque() frontier.append(node) # if self.dupstrat == "simple_list": self.visited.append(node.state) if self.dupstrat == "advanced_list": self.visited.append(node) # Select a node from the frontier (using the til nothing left to explore (i.e. frontier is empty) # OR a solution is found while len(frontier) > 0: while len(frontier) > 0: self.steps = self.steps + 1 #print(self.max_depth) #print(frontier) # vvvvvvvvvvvvvvvvvvvvvvvvv add code block for if and elif: #--------------------------------------------------------------------------------------- if self.strategy == "BFS": node = frontier.popleft() elif self.strategy == "DFS": node = frontier.pop() elif self.strategy == "IDDFS": node = frontier.pop() for child in self.valid_children(node, problem, node_factory): if self.strategy == "IDDFS": if child.depth > self.max_depth: self.max_depth = child.depth if problem.is_goal(child.state): if self.verbose: print("Max Frontier Count: ", self.max_frontier_node_count) self.solution.append(child) self.total_node_count = node_factory.node_count if not all_solutions: return child if child.depth <= self.id_depth: frontier.append(child) if len(frontier) > self.max_frontier_node_count: self.max_frontier_node_count = len(frontier) else: if child.depth > self.max_depth: self.max_depth = child.depth if problem.is_goal(child.state): if self.verbose: print("Max Frontier Count: ", self.max_frontier_node_count) self.solution.append(child) self.total_node_count = node_factory.node_count if not all_solutions: return child frontier.append(child) if len(frontier) > self.max_frontier_node_count: self.max_frontier_node_count = len(frontier) if self.strategy == "IDDFS" and self.id_depth < self.max_id_depth: self.id_depth = self.id_depth + 1 #print("Inc depth limit: ",self.id_depth) node = node_factory.make_node(problem.initial_state) frontier.append(node) self.visited = [] else: self.total_node_count = node_factory.node_count if self.solution == []: self.solution = None return self.solution
#Import python ledger object, data type to be updated to allow easier modifictaion ledger = pickle.load(open(ledger_dir, "rb")) #Import secret key seed = pickle.load(open(seed_dir, "rb")) signing_key = nacl.signing.SigningKey(seed.encode("ascii")) verify_key = signing_key.verify_key pubkey = verify_key.encode(encoder=nacl.encoding.HexEncoder) print(myIP) print(pubkey) #Enter address for node block rewards my_address = pubkey factory = NodeFactory(reactor, ledger, my_address, signing_key, PEER_PORT, "myIP", ns) reactor.callLater(5, factory.startPOW) stdio.StandardIO(factory.buildCommandProtocol()) if args.peer: reactor.connectTCP(BOOTSTRAP_ADDRESS, PEER_PORT, factory) # def maintainPeerList(factory): # """ Looping call function for maintaing a list of peers """ # if factory.peerListSize() < ns.PEER_LIST_SIZE: # factory.requestPeers() # print("maintain") # lc = LoopingCall(maintainPeerList, factory) # # reactor.callLater(5, lc)
def solve(self, problem, path=True, all_solutions=False): self.reset() self.problem = problem # Generate the initial (root) node node_factory = NodeFactory(verbose=self.verbose, record_parent=path ) self.max_frontier_node_count = 0 if self.strategy == "DL_DFS": print("Depth limit: ", self.max_depth) result = self.depth_limited_search(problem, node_factory, self.max_depth) if result == 'cutoff': print("No solution found") return result if self.verbose: print("Searching nodes") node = node_factory.make_node( problem.initial_state ) # if self.strategy == "BFS" and self.tree == False: # return BFS_Graph.breadth_first_graph_search(self, problem, node_factory, all_solutions) # For efficiency, check if node is goal state BEFORE putting on Q if problem.is_goal( node.state ): self.solution.append(node) self.total_node_count = 1 if not all_solutions: return self.solution # Start the frontier Q by adding the root frontier=deque() frontier.append(node) self.visited.append(node.state) # Select a node from the frontier (using the til nothing left to explore (i.e. frontier is empty) # OR a solution is found while len(frontier) > 0: #print(frontier) # vvvvvvvvvvvvvvvvvvvvvvvvv add code block for if and elif: if self.strategy=="BFS": node = frontier.popleft() elif self.strategy=="DFS": node = frontier.pop() for child in self.valid_children(node, problem, node_factory): if child.depth > self.max_depth: self.max_depth = child.depth if problem.is_goal( child.state ): if self.verbose: print("") print("") print("Max Frontier Count: ", self.max_frontier_node_count) print("Visited: ", len(self.visited)) self.solution.append(child) self.total_node_count = node_factory.node_count if not all_solutions: return child frontier.append(child) if len(frontier) > self.max_frontier_node_count: self.max_frontier_node_count = len(frontier) self.total_node_count = node_factory.node_count if self.solution==[]: self.solution = None return self.solution