def on_mouse_move(self, event, position): if self.scroll_point: change = utils.make_vector(position, self.scroll_point) self.canvas.set_viewport( utils.vector_add(self.canvas.get_viewport(), change)) return if self.drag_items: change = utils.make_vector(self.drag_mouse_origin, position) for i, item in enumerate(self.drag_items): new_position = utils.vector_add(self.drag_items_origin[i], change) new_position = utils.snap_to_grid(new_position, self.grid_size) item.set_position(new_position) self.canvas.redraw() return item = self.get_item_at_position(position) if item: self.canvas.set_cursor(get_cursor(item.action)) if self.selection: self.mouseover_items = item.get_group() self.set_highlight() elif self.mouseover_items: self.canvas.set_cursor(None) self.mouseover_items = [] self.set_highlight() else: self.canvas.set_cursor(None)
def add_extra_simrun_items(self, items): def text_fn(text, item, attr): return lambda: ("{0}: {1}".format(text, getattr(item, attr)),) for item in self.net.transitions(): box_position = item.box.get_position() if item.get_time_substitution(): size = item.box.size position = utils.vector_add_t(box_position, size, 0.5) i = citems.SimRunLabel( None, "simrunbox", citems.RelativePlacement(item.box, position)) i.text_fn = text_fn("time", item, "time_substitution_code") items.append(i) box_position = utils.vector_add(box_position, (0, 20)) if item.get_clock_substitution(): size = item.box.size position = utils.vector_add_t(box_position, size, 0.5) i = citems.SimRunLabel( None, "simrunbox", citems.RelativePlacement(item.box, position)) i.text_fn = text_fn("clock", item, "clock_substitution_code") items.append(i) for item in self.net.edges(): if item.get_size_substitution(): position = utils.vector_add(item.inscription.get_position(), (0, 10)) i = citems.SimRunLabel( None, "simrunbox", citems.RelativePlacement(item.inscription, position)) i.text_fn = text_fn("size", item, "size_substitution_code") items.append(i)
def get_packet_items(self): def get_size(self, cr): if not self.texts: return tx = max(utils.text_size(cr, t)[0] for t in self.texts) tx += 20 ty = 13 * len(self.texts) + 4 return (tx, ty) result = [] color = (0.8, 0.3, 0.1, 0.85) color_active = (1, 1, 1) color_inactive = (0.8, 0.8, 0.8) for edge in self.perspective.runinstance.net.edges_out(): packets = self.perspective.get_packets_info(edge.id) packet_box = self.packet_boxes.get(edge.id) if packet_box is None: position = utils.vector_add(edge.inscription.get_position(), (0, 15)) placement = citems.AbsPlacement(position) packet_box = citems.Box(None, "packetbox", placement) packet_box.size_fn = get_size packet_box.background = color packet_box.radius = 5 self.packet_boxes[edge.id] = packet_box result.append(packet_box) packet_box.texts = [ p[3] for p in packets ] for i, (process_id, origin_id, top, text) in enumerate(packets): position = utils.vector_add(packet_box.get_position(), (10, 13 * i)) t = citems.Text( None, "packet", packet_box.get_relative_placement(position), text) if top: t.color = color_active t.packet_data = (process_id, origin_id) else: t.color = color_inactive t.packet_data = None t.padding_y = 4 t.z_level = 15 t.action = None result.append(t) return result
def is_legal_move(self, board, start, steps, player): """Move is a tuple which contains starting points of checkers to be moved during a player's turn. An on-board move is legal if both the destinations are open. A bear-off move is the one where a checker is moved off-board. It is legal only after a player has moved all his checkers to his home.""" dest1, dest2 = vector_add(start, steps) dest_range = range(0, 24) move1_legal = move2_legal = False if dest1 in dest_range: if self.is_point_open(player, board[dest1]): self.move_checker(board, start[0], steps[0], player) move1_legal = True else: if self.allow_bear_off[player]: self.move_checker(board, start[0], steps[0], player) move1_legal = True if not move1_legal: return False if dest2 in dest_range: if self.is_point_open(player, board[dest2]): move2_legal = True else: if self.allow_bear_off[player]: move2_legal = True return move1_legal and move2_legal
def solve_move(self, ntuple): print('mock solver: begin move') world = self.world home_pos = world.robot1_instance.pos for parameters in ntuple.parameters: acted_upon = parameters.acted_upon heading = parameters.heading direction = parameters.direction goal = parameters.goal inst = getattr(world, acted_upon) if goal: # TODO: super/subtype relations missing! if goal.ontological_category.type() == 'location': print('| move(x={x}, y={y}, z=0.0)'.format(x=goal.xCoord, y=goal.yCoord)) self.setpos(inst, (float(goal.xCoord), float(goal.yCoord), 0.0)) else: # We assume it's an object, like a box or a block obj = getattr(world, goal.referent.type()) print('| move(x={x}, y={y}, z={z})'.format(x=obj.pos.x, y=obj.pos.y, z=obj.pos.z)) self.setpos(inst, (obj.pos.x, obj.pos.y, obj.pos.z)) elif direction == 'home': print('| move(x={x}, y={y}, z=0.0)'.format(x=home_pos.x, y=home_pos.y)) self.setpos(inst, (home_pos.x, home_pos.y, home_pos.z)) elif heading: n = float(parameters.distance.value) pos = self.getpos(inst) newpos = vector_add(pos, vector_mul(n, self.headings[heading])) print('| move(x={0[0]}, y={0[1]}, z={0[2]})'.format(newpos)) self.setpos(inst, newpos) print('mock solver: end move')
def get_canvas_items(self): items = [ self.box, self.guard ] if self.clock: p = utils.vector_add(self.box.get_position(), (-9, 7)) items.append(citems.ClockIcon( self, "clock", self.box.get_relative_placement(p))) return items
def __init__(self, net, id, position, size): NetItem.__init__(self, net, id) self.point1 = citems.Point(self, "point1", citems.AbsPlacement(position)) self.point1.action = "resize_ltop" self.point2 = citems.Point(self, "point2", citems.AbsPlacement( utils.vector_add(position, size))) self.point2.owner = self self.point2.action = "resize_rbottom"
def backward(HMM, b, ev): sensor_dist = HMM.sensor_dist(ev) prediction = element_wise_product(sensor_dist, b) return normalize( vector_add( scalar_vector_product(prediction[0], HMM.transition_model[0]), scalar_vector_product(prediction[1], HMM.transition_model[1])))
def actions(self, state): """Returns the list of actions which are allowed to be taken from the given state""" allowed_actions = [] for action in self.defined_actions: next_state = vector_add(state, self.defined_actions[action]) if next_state[0] >= 0 and next_state[1] >= 0 and next_state[0] <= self.n - 1 and next_state[1] <= self.m - 1: allowed_actions.append(action) return allowed_actions
def on_mouse_move(self, event, position): if self.resize_item: change = utils.make_vector(self.initial_mouse, position) change = utils.snap_to_grid(change, self.grid_size) new_size = utils.vector_add(self.initial_size, change) new_size = utils.vector_at_least(new_size, 0, 0) self.resize_item.size = new_size self.canvas.redraw() else: NetEditCanvasConfig.on_mouse_move(self, event, position)
def result(self, state, action): """Return the state that results from executing the given action in the given state. :param state: current state :param action: action to take :return: The state that results from executing the given action. """ # vector_add does the same thing as my code: tuple(map(sum, zip(state, action))) return vector_add(state, tuple(action))
def __init__(self, net, id, position, size): NetItem.__init__(self, net, id) self.point1 = citems.Point(self, "point1", citems.AbsPlacement(position)) self.point1.action = "resize_ltop" self.point2 = citems.Point( self, "point2", citems.AbsPlacement(utils.vector_add(position, size))) self.point2.owner = self self.point2.action = "resize_rbottom"
def create_activations(self, values): results = [] position = self.get_position() start = utils.vector_add(position, (12, 0)) position = start count = 0 for text, color, data in values: activation = TransitionActivation(data, "activation", self.get_relative_placement(position), text, color) results.append(activation) position = utils.vector_add(position, (self.space_x + activation.size[0], 0)) count += 1 if count == 6: count = 0 start = utils.vector_add(start, (0, self.space_y + activation.size[1])) position = start return results
def create_activations(self, values): results = [] position = self.get_position() start = utils.vector_add(position, (12, 0)) position = start count = 0 for text, color, data in values: activation = TransitionActivation( data, "activation", self.get_relative_placement(position), text, color) results.append(activation) position = utils.vector_add(position, (self.space_x + activation.size[0], 0)) count += 1 if count == 6: count = 0 start = utils.vector_add( start, (0, self.space_y + activation.size[1])) position = start return results
def get_canvas_items(self, view_mode): items = NetElement.get_canvas_items(self, view_mode) items.append(self.guard) if self.collective: items.append(self.root) if self.clock: p = utils.vector_add(self.box.get_position(), (-9, 7)) items.append(citems.ClockIcon( self, "clock", self.box.get_relative_placement(p))) return items
def solve_move(self, parameters): color = None size = None world = self.world print('solver: begin move_to_destination') #for parameters in ntuple.parameters: protagonist = self.get_described_obj(parameters.protagonist['objectDescriptor']) speed = parameters.speed * 6 heading = parameters.heading goal = parameters.goal direction = parameters.direction inst =protagonist #getattr(self.world, protagonist) if goal: # TODO: super/subtype relations missing! # print(goal) if 'location' in goal: #inst.move(x=float(goal['location'][0]), y=float(goal['location'][1]), z=0.0) if goal['location'] == 'home': self.move(inst, self.home_pos.x, self.home_pos.y, self.home_pos.z, tolerance= 2, speed=speed) else: self.move(inst,float(goal['location'][0]), float(goal['location'][1]), 0.0, speed=speed) elif goal == 'home': self.move(inst, home_pos.x, home_pos.y, home_pos.z, speed=speed) elif 'referent' in goal: obj = getattr(self.world, goal['referent']) #inst.move(x=obj.pos.x, y=obj.pos.y, z=obj.pos.z) self.move(inst, obj.pos.x, obj.pos.y, obj.pos.z, speed=speed) elif ('partDescriptor' in goal): if goal['partDescriptor']['relation']['type'] == 'side': loc = self.get_described_part_pos(goal['partDescriptor'],inst) if (loc): self.move(inst, loc[0], loc[1], tolerance= 2, speed=speed) else: if ('objectDescriptor') in goal: properties = goal['objectDescriptor'] obj = self.get_described_obj(properties, multiple=True) if (obj): self.move(inst, obj.pos.x, obj.pos.y, obj.pos.z, speed=speed) elif ('locationDescriptor') in goal: properties = goal['locationDescriptor'] loc = self.get_described_loc_pos(properties,getattr(self.world, inst)) if (loc): self.move(inst, loc[0], loc[1], speed=speed) elif heading: n = float(parameters.distance.value) print(inst) name = getattr(inst, 'name') #pos = getattr(inst, 'pos') #self.getpos(inst) pos = self.getpos(name) newpos = vector_add(pos, vector_mul(n, self.headings[heading])) self.move(inst, newpos[0], newpos[1], newpos[2], speed=speed) print('solver: end move_to_destination')
def get_canvas_items(self, view_mode): items = NetElement.get_canvas_items(self, view_mode) items.append(self.guard) if self.collective: items.append(self.root) if self.clock: p = utils.vector_add(self.box.get_position(), (-9, 7)) items.append( citems.ClockIcon(self, "clock", self.box.get_relative_placement(p))) return items
def get_activation_items(self): result = [] for transition in self.perspective.runinstance.net.transitions(): activations = self.activations.get(transition.id) if activations is None: position = utils.vector_add(transition.box.get_position(), (0, transition.box.size[1] + 10)) activations = citems.TransitionActivations(None, "activations", citems.AbsPlacement(position)) self.activations[transition.id] = activations values = self.perspective.get_activations_values(transition) if values: result.append(activations) result += activations.create_activations(values) return result
def is_corner( self, pos): # todo: improve to any kind of deadlock (boxes around etc) # todo: add inspection for borders that box will get stuck there steps = [(0, 1), (1, 0), (0, -1), (-1, 0)] # order is: R,D,L,U neighbor_cells = [vector_add(pos, step) for step in steps] is_last_cell_wall = self._grid[neighbor_cells[3]] == WALL for cell in neighbor_cells: is_curr_cell_wall = self._grid[cell] == WALL if is_last_cell_wall and is_curr_cell_wall: return True is_last_cell_wall = is_curr_cell_wall return False
def montavetor_b(self, b, bstares): for s in bstares: nearstates = [] nearstates.append({s: .85}) for act in utils.orientations: near = utils.vector_add(s, act) #só adiciona estados adjacentes limitados pela grid if (near[0] >= 0 and near[1] >= 0) and (near[0] < self.dims[0] and near[1] < self.dims[1]): nearstates.append({near: .15}) b[s] = nearstates return [None]
def get_error_items(self): result = [] messages = self.net.project.get_error_messages(self) if not messages: return result items = self.get_canvas_items_dict(None) for name in messages: item = items.get(name) if item is None: # Key was not found, take first item # For transition/place it is expected that "box" is returned item = self.get_canvas_items()[0] position = utils.vector_add(item.get_position(), item.size) position = utils.vector_add(position, (0, 0)) placement = item.get_relative_placement(position) error_item = citems.Text(None, "error", placement) error_item.delegate_selection = item error_item.background_color = (255, 0, 0) error_item.border_color = (0, 0, 0) error_item.align_y = 0 error_item.z_level = 20 error_item.text = messages[name][0] result.append(error_item) return result
def get_activation_items(self): result = [] for transition in self.perspective.runinstance.net.transitions(): activations = self.activations.get(transition.id) if activations is None: position = utils.vector_add( transition.box.get_position(), (0, transition.box.size[1] + 10)) activations = citems.TransitionActivations( None, "activations", citems.AbsPlacement(position)) self.activations[transition.id] = activations values = self.perspective.get_activations_values(transition) if values: result.append(activations) result += activations.create_activations(values) return result
def get_token_items(self): places = self.perspective.runinstance.net.places() result = [] for place in places: token_box = self.token_boxes.get(place.id) if token_box is None: sx, sy = place.box.size position = utils.vector_add(place.box.get_position(), (sx + 20, sy / 2)) token_box = citems.TokenBox(None, "tokenbox", citems.AbsPlacement(position)) self.token_boxes[place.id] = token_box token_box.set_tokens(self.perspective.get_tokens(place), self.perspective.get_new_tokens(place), self.perspective.get_removed_tokens(place)) result.append(token_box) return result
def neighbors(self, coordinate, tile_size): available = [(-1, 0), (1, 0), (0, 1), (0, -1)] x = coordinate[0] y = coordinate[1] X = tile_size[0] Y = tile_size[1] if x == 0: available.remove((-1, 0)) if x == X - 1: available.remove((1, 0)) if y == 0: available.remove((0, -1)) if y == Y - 1: available.remove((0, 1)) return [vector_add(coordinate, elem) for elem in available]
def ray_cast(self, sensor_num, kin_state): """Returns distace to nearest obstacle or map boundary in the direction of sensor""" pos = kin_state[:2] orient = kin_state[2] # sensor layout when orientation is 0 (towards North) # 0 # 3R1 # 2 delta = ((sensor_num%2 == 0)*(sensor_num - 1), (sensor_num%2 == 1)*(2 - sensor_num)) # sensor direction changes based on orientation for _ in range(orient): delta = (delta[1], -delta[0]) range_count = 0 while (0 <= pos[0] < self.nrows) and (0 <= pos[1] < self.nrows) and (not self.m[pos[0]][pos[1]]): pos = vector_add(pos, delta) range_count += 1 return range_count
def ray_cast(self, sensor_num, kin_state): """Returns distace to nearest obstacle or map boundary in the direction of sensor""" pos = kin_state[:2] orient = kin_state[2] # sensor layout when orientation is 0 (towards North) # 0 # 3R1 # 2 delta = ((sensor_num % 2 == 0) * (sensor_num - 1), (sensor_num % 2 == 1) * (2 - sensor_num)) # sensor direction changes based on orientation for _ in range(orient): delta = (delta[1], -delta[0]) range_count = 0 while (0 <= pos[0] < self.nrows) and (0 <= pos[1] < self.nrows) and (not self.m[pos[0]][pos[1]]): pos = vector_add(pos, delta) range_count += 1 return range_count
def solve_move(self, ntuple): world = self.world home_pos = world.robot1_instance.pos print('solver: begin move_to_destination') if debug: print(len(ntuple.parameters)) for parameters in ntuple.parameters: if debug: print(parameters) acted_upon = parameters.acted_upon heading = parameters.heading goal = parameters.goal direction = parameters.direction inst = getattr(self.world, acted_upon) if goal: # TODO: super/subtype relations missing! if debug: print("goal is") if debug: print(goal) if goal.ontological_category.type() == 'location': inst.move(x=float(goal.xCoord), y=float(goal.yCoord), z=0.0) else: # We assume it's an object, like a box or a block if debug: print("self.world") if debug: print(self.world) if debug: print("goal.referent.type()") if debug: print(goal.referent.type()) obj = getattr(self.world, goal.referent.type()) if debug: print("obj is") if debug: print(obj) if debug: print("color type is") if debug: print(goal.extensions.boundedObject) #if debug: print (goal.extensions.property.ontological_category.type()) #color = getattr(self.world, goal.extensions.property.ontological_category.type()) inst.move(x=obj.pos.x, y=obj.pos.y, z=obj.pos.z) elif direction == 'home': ## print('| move(x={x}, y={y}, z=0.0)'.format(x=home_pos.x, y=home_pos.y)) inst.move(x=home_pos.x, y=home_pos.y, z=home_pos.z) elif heading: n = float(parameters.distance.value) pos = self.getpos(inst) newpos = vector_add(pos, vector_mul(n, self.headings[heading])) inst.move(x=newpos[0], y=newpos[1], z=newpos[2]) print('solver: end move_to_destination')
def solve_move(self, ntuple): world = self.world home_pos = world.robot1_instance.pos print('solver: begin move_to_destination') if debug: print(len(ntuple.parameters)) for parameters in ntuple.parameters: if debug: print(parameters) acted_upon = parameters.acted_upon heading = parameters.heading goal = parameters.goal direction = parameters.direction inst = getattr(self.world, acted_upon) if goal: # TODO: super/subtype relations missing! if debug: print("goal is") if debug: print(goal) if goal.ontological_category.type() == 'location': inst.move(x=float(goal.xCoord), y=float(goal.yCoord), z=0.0) else: # We assume it's an object, like a box or a block if debug: print("self.world") if debug: print(self.world) if debug: print("goal.referent.type()") if debug: print(goal.referent.type()) obj = getattr(self.world, goal.referent.type()) if debug: print("obj is") if debug: print(obj) if debug: print("color type is") if debug: print (goal.extensions.boundedObject) #if debug: print (goal.extensions.property.ontological_category.type()) #color = getattr(self.world, goal.extensions.property.ontological_category.type()) inst.move(x=obj.pos.x, y=obj.pos.y, z=obj.pos.z) elif direction == 'home': ## print('| move(x={x}, y={y}, z=0.0)'.format(x=home_pos.x, y=home_pos.y)) inst.move(x=home_pos.x, y=home_pos.y, z=home_pos.z) elif heading: n = float(parameters.distance.value) pos = self.getpos(inst) newpos = vector_add(pos, vector_mul(n, self.headings[heading])) inst.move(x=newpos[0], y=newpos[1], z=newpos[2]) print('solver: end move_to_destination')
def actions(self, state): """State will show the position we are currently in the grid. Will return all the possible action agent can execute in the given state. :param state: current state :return: all possible action from the current state in a list of tuples """ # array of all possible movements # Horizontal/vertical movement: up = (-1, 0) down = (1, 0) left = (0, -1) right = (0, 1) # Diagonal movement: up&left=(-1, -1) down&left=(1, -1) up&right=(-1, 1) down&right=(1, 1) all_moves = [(1, 0), (-1, 0), (0, -1), (0, 1), (1, -1), (-1, -1), (1, 1), (-1, 1)] valid_moves = [] for move in all_moves: possible_state = vector_add(state, move) if valid_position(self.grid, possible_state): # Make sure we don't leave the grid and don't run into a wall valid_moves.append(move) return valid_moves
def MDPExplore(self, b0, s0): # testa se existe valor de utilidade para s0, caso negativo ele é uma parede # retorna false e FSVI gera um novo sample de b0 if s0 not in self.U: return False else: # salva/incrementa o caminho definido pela politica para b0 if b0 in self.path: self.path[b0].append(s0) else: self.path[b0] = [s0] # Salva/incrementa valor de Utilidade V[b0] if b0 in self.V: self.V[b0] += self.U[s0] else: self.V[b0] = self.U[s0] if not (s0 in self.recompensa): #busca a politica ótima do MDP para s0) a = self.pi[s0] #identifica o proximo estado s1 aplicando a politica a s0 s1 = utils.vector_add(s0, a) self.MDPExplore(b0, s1) return True
def particle_filtering(e, N, HMM): """Particle filtering considering two states variables.""" s = [] dist = [0.5, 0.5] # State Initialization s = ['A' if probability(dist[0]) else 'B' for i in range(N)] # Weight Initialization w = [0 for i in range(N)] # STEP 1 # Propagate one step using transition model given prior state dist = vector_add(scalar_vector_product(dist[0], HMM.transition_model[0]), scalar_vector_product(dist[1], HMM.transition_model[1])) # Assign state according to probability s = ['A' if probability(dist[0]) else 'B' for i in range(N)] w_tot = 0 # Calculate importance weight given evidence e for i in range(N): if s[i] == 'A': # P(U|A)*P(A) w_i = HMM.sensor_dist(e)[0]*dist[0] if s[i] == 'B': # P(U|B)*P(B) w_i = HMM.sensor_dist(e)[1]*dist[1] w[i] = w_i w_tot += w_i # Normalize all the weights for i in range(N): w[i] = w[i]/w_tot # Limit weights to 4 digits for i in range(N): w[i] = float("{0:.4f}".format(w[i])) # STEP 2 s = weighted_sample_with_replacement(N, s, w) return s
def get_next_action(self, observed_map): # TODO: fill in # pass # Should output one of the following: "U", "D", "L", "R" # Timeout: 5 seconds new_state = State(grid_wrap(observed_map)) self._exploring = len(self._ice_suspected) > 0 for ice_pos in self._discovered_ice: new_state.grid[ice_pos] += 20 # to ice # new_state.print("Exploring: {} - Last action: {}".format(self._exploring, self._last_action)) #debug if self._exploring: return self.explore_map(new_state) else: if self._solution is None: Problem.__init__(self, new_state) self._solution = best_first_graph_search(self, self.h).path()[1:] if new_state.player != self._expected_state.player: # we encountered ice, save it. player_pos = self._expected_state.player while player_pos != new_state.player: self._discovered_ice.add(player_pos) new_state.grid[player_pos] += 20 # add ice player_pos = vector_add(player_pos, DIRECTIONS[self._last_action]) Problem.__init__(self, new_state) self._solution = best_first_graph_search(self, self.h).path()[1:] self._visited_states.add(new_state) self._expected_state = self._solution[0].state action = self._solution[0].action # print(action, end=', ') self._last_action = action self._solution = self._solution[1:] return action
def particle_filtering(e, N, HMM): """Particle filtering considering two states variables.""" s = [] dist = [0.5, 0.5] # State Initialization s = ['A' if probability(dist[0]) else 'B' for i in range(N)] # Weight Initialization w = [0 for i in range(N)] # STEP 1 # Propagate one step using transition model given prior state dist = vector_add(scalar_vector_product(dist[0], HMM.transition_model[0]), scalar_vector_product(dist[1], HMM.transition_model[1])) # Assign state according to probability s = ['A' if probability(dist[0]) else 'B' for i in range(N)] w_tot = 0 # Calculate importance weight given evidence e for i in range(N): if s[i] == 'A': # P(U|A)*P(A) w_i = HMM.sensor_dist(e)[0] * dist[0] if s[i] == 'B': # P(U|B)*P(B) w_i = HMM.sensor_dist(e)[1] * dist[1] w[i] = w_i w_tot += w_i # Normalize all the weights for i in range(N): w[i] = w[i] / w_tot # Limit weights to 4 digits for i in range(N): w[i] = float("{0:.4f}".format(w[i])) # STEP 2 s = weighted_sample_with_replacement(s, w, N) return s
def particle_filtering(e, N, HMM): """Filtragem de partículas considerando duas variáveis de estados.""" s = [] dist = [0.5, 0.5] # Inicialização do estado s = ['A' if probability(dist[0]) else 'B' for i in range(N)] # Inicialização de peso w = [0 for i in range(N)] # PASSO 1 - Propagar um passo usando o modelo de transição dado estado anterior dist = vector_add(scalar_vector_product(dist[0], HMM.transition_model[0]), scalar_vector_product(dist[1], HMM.transition_model[1])) # Atribuir o estado de acordo com a probabilidade s = ['A' if probability(dist[0]) else 'B' for i in range(N)] w_tot = 0 # Calcular peso de importância dado evidência e for i in range(N): if s[i] == 'A': # P(U|A)*P(A) w_i = HMM.sensor_dist(e)[0] * dist[0] if s[i] == 'B': # P(U|B)*P(B) w_i = HMM.sensor_dist(e)[1] * dist[1] w[i] = w_i w_tot += w_i # Normalizar todos os pesos for i in range(N): w[i] = w[i] / w_tot # Limite pesos a 4 dígitos for i in range(N): w[i] = float("{0:.4f}".format(w[i])) # STEP 2 s = weighted_sample_with_replacement(s, w, N) return s
def go(self, state, direction): """Return the state that results from going in this direction.""" state1 = vector_add(state, direction) return state1 if state1 in self.states else state
def go(self, state, direction): state1 = vector_add(state, direction) #print("state:{0}, direction:{1} -> state'{2}".format(state,direction,state1)) return state1 if state1 in self.states else state
def get_bounding_box(self): return (self.get_position(), utils.vector_add(self.get_position(), self.size))
def get_position(self): return utils.vector_add(self.parent_placement.get_position(), self.position)
def get_position(self): points = self.multiline.get_points() return utils.vector_add(self.compute_point_on_multiline(points), self.offset)
def forward(HMM, fv, ev): prediction = vector_add(scalar_vector_product(fv[0], HMM.transition_model[0]), scalar_vector_product(fv[1], HMM.transition_model[1])) sensor_dist = HMM.sensor_dist(ev) return normalize(element_wise_product(sensor_dist, prediction))
def BackPropagationLearner(dataset, net, learning_rate, epochs): """[Figure 18.23] The back-propagation algorithm for multilayer network""" # Initialise weights for layer in net: for node in layer: node.weights = random_weights(min_value=-0.5, max_value=0.5, num_weights=len(node.weights)) examples = dataset.examples ''' As of now dataset.target gives an int instead of list, Changing dataset class will have effect on all the learners. Will be taken care of later ''' o_nodes = net[-1] i_nodes = net[0] o_units = len(o_nodes) idx_t = dataset.target idx_i = dataset.inputs n_layers = len(net) inputs, targets = init_examples(examples, idx_i, idx_t, o_units) for epoch in range(epochs): # Iterate over each example for e in range(len(examples)): i_val = inputs[e] t_val = targets[e] # Activate input layer for v, n in zip(i_val, i_nodes): n.value = v # Forward pass for layer in net[1:]: for node in layer: inc = [n.value for n in node.inputs] in_val = dotproduct(inc, node.weights) node.value = node.activation(in_val) # Initialize delta delta = [[] for i in range(n_layers)] # Compute outer layer delta # Error for the MSE cost function err = [t_val[i] - o_nodes[i].value for i in range(o_units)] # The activation function used is the sigmoid function delta[-1] = [sigmoid_derivative(o_nodes[i].value) * err[i] for i in range(o_units)] # Backward pass h_layers = n_layers - 2 for i in range(h_layers, 0, -1): layer = net[i] h_units = len(layer) nx_layer = net[i+1] # weights from each ith layer node to each i + 1th layer node w = [[node.weights[k] for node in nx_layer] for k in range(h_units)] delta[i] = [sigmoid_derivative(layer[j].value) * dotproduct(w[j], delta[i+1]) for j in range(h_units)] # Update weights for i in range(1, n_layers): layer = net[i] inc = [node.value for node in net[i-1]] units = len(layer) for j in range(units): layer[j].weights = vector_add(layer[j].weights, scalar_vector_product( learning_rate * delta[i][j], inc)) return net
def BackPropagationLearner(dataset, net, learning_rate, epoches): "[Figure 18.23] The back-propagation algorithm for multilayer network" # Initialise weights for layer in net: for node in layer: node.weights = [ random.uniform(-0.5, 0.5) for i in range(len(node.weights)) ] examples = dataset.examples ''' As of now dataset.target gives an int instead of list, Changing dataset class will have effect on all the learners. Will be taken care of later ''' idx_t = [dataset.target] idx_i = dataset.inputs n_layers = len(net) o_nodes = net[-1] i_nodes = net[0] for epoch in range(epoches): # Iterate over each example for e in examples: i_val = [e[i] for i in idx_i] t_val = [e[i] for i in idx_t] # Activate input layer for v, n in zip(i_val, i_nodes): n.value = v # Forward pass for layer in net[1:]: for node in layer: inc = [n.value for n in node.inputs] in_val = dotproduct(inc, node.weights) node.value = node.activation(in_val) # Initialize delta delta = [[] for i in range(n_layers)] # Compute outer layer delta o_units = len(o_nodes) err = [t_val[i] - o_nodes[i].value for i in range(o_units)] delta[-1] = [(o_nodes[i].value) * (1 - o_nodes[i].value) * (err[i]) for i in range(o_units)] # Backward pass h_layers = n_layers - 2 for i in range(h_layers, 0, -1): layer = net[i] h_units = len(layer) nx_layer = net[i + 1] # weights from each ith layer node to each i + 1th layer node w = [[node.weights[k] for node in nx_layer] for k in range(h_units)] delta[i] = [(layer[j].value) * (1 - layer[j].value) * dotproduct(w[j], delta[i + 1]) for j in range(h_units)] # Update weights for i in range(1, n_layers): layer = net[i] inc = [node.value for node in net[i - 1]] units = len(layer) for j in range(units): layer[j].weights = vector_add( layer[j].weights, scalar_vector_product(learning_rate * delta[i][j], inc)) return net
def get_relative_placement(self, position, absolute=True): if not absolute: position = utils.vector_add(position, self.placement.get_position()) return RelativePlacement(self.placement, position)
def forward(HMM, fv, ev): prediction = vector_add(scalar_vector_product(fv[0], HMM.transition_model[0]), scalar_vector_product(fv[1], HMM.transition_model[1])) sensor_dist = HMM.sensor_dist(ev) return(normalize(element_wise_product(sensor_dist, prediction)))
def backward(HMM, b, ev): sensor_dist = HMM.sensor_dist(ev) prediction = element_wise_product(sensor_dist, b) return(normalize(vector_add(scalar_vector_product(prediction[0], HMM.transition_model[0]), scalar_vector_product(prediction[1], HMM.transition_model[1]))))
def movedown(self): self.location = vector_add(self.location, (0, 2))
def __init__(self, net, id, position, size): RectItem.__init__(self, net, id, position, size) self.area = citems.Area(self, "area", self.point1, self.point2) position = utils.vector_add(self.point1.get_position(), (0, -15)) self.init = citems.Text(self, "init", self.point1.get_relative_placement(position))
def BackPropagationLearner(dataset, net, learning_rate, epoches): "[Figure 18.23] The back-propagation algorithm for multilayer network" # Initialise weights for layer in net: for node in layer: node.weights = [random.uniform(-0.5, 0.5) for i in range(len(node.weights))] examples = dataset.examples ''' As of now dataset.target gives an int instead of list, Changing dataset class will have effect on all the learners. Will be taken care of later ''' idx_t = [dataset.target] idx_i = dataset.inputs n_layers = len(net) o_nodes = net[-1] i_nodes = net[0] for epoch in range(epoches): # Iterate over each example for e in examples: i_val = [e[i] for i in idx_i] t_val = [e[i] for i in idx_t] # Activate input layer for v, n in zip(i_val, i_nodes): n.value = v # Forward pass for layer in net[1:]: for node in layer: inc = [n.value for n in node.inputs] in_val = dotproduct(inc, node.weights) node.value = node.activation(in_val) # Initialize delta delta = [[] for i in range(n_layers)] # Compute outer layer delta o_units = len(o_nodes) err = [t_val[i] - o_nodes[i].value for i in range(o_units)] delta[-1] = [(o_nodes[i].value)*(1 - o_nodes[i].value) * (err[i]) for i in range(o_units)] # Backward pass h_layers = n_layers - 2 for i in range(h_layers, 0, -1): layer = net[i] h_units = len(layer) nx_layer = net[i+1] # weights from each ith layer node to each i + 1th layer node w = [[node.weights[k] for node in nx_layer] for k in range(h_units)] delta[i] = [(layer[j].value) * (1 - layer[j].value) * dotproduct(w[j], delta[i+1]) for j in range(h_units)] # Update weights for i in range(1, n_layers): layer = net[i] inc = [node.value for node in net[i-1]] units = len(layer) for j in range(units): layer[j].weights = vector_add(layer[j].weights, scalar_vector_product( learning_rate * delta[i][j], inc)) return net
def go(self, state, direction): "Return the state that results from going in this direction." state1 = vector_add(state, direction) return state1 if state1 in self.states else state
def BackPropagationLearner(dataset, net, learning_rate, epochs): """[Figure 18.23] The back-propagation algorithm for multilayer network""" # Initialise weights for layer in net: for node in layer: node.weights = random_weights(min_value=-0.5, max_value=0.5, num_weights=len(node.weights)) examples = dataset.examples ''' As of now dataset.target gives an int instead of list, Changing dataset class will have effect on all the learners. Will be taken care of later ''' o_nodes = net[-1] i_nodes = net[0] o_units = len(o_nodes) idx_t = dataset.target idx_i = dataset.inputs n_layers = len(net) inputs, targets = init_examples(examples, idx_i, idx_t, o_units) for epoch in range(epochs): # Iterate over each example for e in range(len(examples)): i_val = inputs[e] t_val = targets[e] # Activate input layer for v, n in zip(i_val, i_nodes): n.value = v # Forward pass for layer in net[1:]: for node in layer: inc = [n.value for n in node.inputs] in_val = dotproduct(inc, node.weights) node.value = node.activation(in_val) # Initialize delta delta = [[] for i in range(n_layers)] # Compute outer layer delta # Error for the MSE cost function err = [t_val[i] - o_nodes[i].value for i in range(o_units)] # The activation function used is the sigmoid function delta[-1] = [ sigmoid_derivative(o_nodes[i].value) * err[i] for i in range(o_units) ] # Backward pass h_layers = n_layers - 2 for i in range(h_layers, 0, -1): layer = net[i] h_units = len(layer) nx_layer = net[i + 1] # weights from each ith layer node to each i + 1th layer node w = [[node.weights[k] for node in nx_layer] for k in range(h_units)] delta[i] = [ sigmoid_derivative(layer[j].value) * dotproduct(w[j], delta[i + 1]) for j in range(h_units) ] # Update weights for i in range(1, n_layers): layer = net[i] inc = [node.value for node in net[i - 1]] units = len(layer) for j in range(units): layer[j].weights = vector_add( layer[j].weights, scalar_vector_product(learning_rate * delta[i][j], inc)) return net
def get_position(self, position): return utils.interpolate( position, utils.vector_add(position, self.element.default_size), -0.5)