def paned_window(self): self.panedwindow = ttk.Panedwindow(self.parent, orient = tk.HORIZONTAL) self.panedwindow.pack(expand = True, fill = tk.BOTH) self.left_pane = ttk.Frame(self.panedwindow, height = root.winfo_screenheight() - 140, relief = tk.SUNKEN) self.middle_pane = ttk.Frame(self.panedwindow, height = (root.winfo_screenheight() - 140), relief = tk.SUNKEN) self.right_pane = ttk.Frame(self.panedwindow, height = (root.winfo_screenheight() - 140), relief = tk.SUNKEN) self.panedwindow.add(self.left_pane, weight = 1) self.panedwindow.add(self.middle_pane, weight = 1) self.panedwindow.add(self.right_pane, weight = 10) self.panedwindow_left = ttk.Panedwindow(self.left_pane, orient = tk.VERTICAL) self.panedwindow_left.pack(expand = True, fill = tk.BOTH) self.pane_projects = ttk.Frame(self.panedwindow_left, height = (root.winfo_screenheight() - 140) / 2, relief = tk.SUNKEN) self.pane_actions = ttk.Frame(self.panedwindow_left, height = (root.winfo_screenheight() - 140) / 2, relief = tk.SUNKEN) self.panedwindow_left.add(self.pane_projects, weight = 1) self.panedwindow_left.add(self.pane_actions, weight = 1) self.panewindow_middle = ttk.PanedWindow(self.middle_pane, orient = tk.VERTICAL) self.panewindow_middle.pack(expand = True, fill = tk.BOTH) self.pane_canvas = ttk.Frame(self.panewindow_middle, relief = tk.SUNKEN) self.pane_resources = ttk.Frame(self.panewindow_middle, width = 100, relief = tk.SUNKEN) self.panewindow_middle.add(self.pane_canvas, weight = 5) self.panewindow_middle.add(self.pane_resources, weight = 1) self.menubar = Menubar(self.parent) self.properties = Properties(self.right_pane) self.canvas = Canvas(self.properties) self.toolbar = Toolbar(self.pane_canvas, self.canvas) self.project_explorer = ProjectExplorer(self.pane_projects) self.canvas.create_Ui(self.pane_canvas) self.actions = Actions(self.pane_actions, self.canvas, self.properties) self.resources = Resources(self.pane_resources)
def __init__(self): self.state = State.Init self.config = { 'hover_height': 1.0, # States for which movement.fix_hover() will NOT be called (to make sure the drone is at `hover_height`) 'exclude_from_fix_hover': [ State.Init, State.Takeoff, State.Land, State.Done, ], # Radius in meters around a blacklisted goal that the robot will ignore 'blacklisted_goal_radius': 2.0 } self.store = DataStore() self.movement = MovementHandler(core=self, datastore=self.store) self.actions = Actions(core=self, datastore=self.store, movement_handler=self.movement) self.planner = Planner(core=self, datastore=self.store, movement_handler=self.movement) # Aux files self.temp_data = {} self.last_goal = None
def main(phrase="Tell EVE something!"): # Instantiating class object for this conversation a = Actions(phrase) # st.text(respond(a.utter_greet())) intents, user_input, history_df, end = conversation(a) print(end) if st.sidebar.button("Show backend"): backend_dash(intents, user_input, history_df) if end == False: caching.clear_cache() conversation(Actions("Could you please rephrase?"))
def compute_strategy(self, state, id_team, id_player): s = Strategies(state, id_team, id_player) v = SuperState(state, id_team, id_player) a = Actions(state, id_team, id_player) if v.player.distance(v.ball) < PLAYER_RADIUS + BALL_RADIUS: if id_team == 1: tir = (Vector2D(GAME_WIDTH * 3 / 8, GAME_HEIGHT / 2)) - v.player return SoccerAction(shoot=tir.normalize() * 3) else: tir = (Vector2D(GAME_WIDTH * 5 / 8, GAME_HEIGHT / 2)) - v.player return SoccerAction(shoot=tir.normalize() * 3) else: if v.ballecampadverse == 0: if id_team == 1: return a.deplacement( Vector2D((GAME_WIDTH / 2) - 5, v.ball.y)) else: return a.deplacement( Vector2D((GAME_WIDTH / 2) + 5, v.ball.y)) elif v.player.distance(v.ball) < PLAYER_RADIUS * 10: return a.deplacement(v.ball)
def get_legal_actions(self, wrld, curr_pos): """ Iterates through all nine actions from a position Returns only possible ones """ x, y = (curr_pos[0], curr_pos[1]) actions = [] for i in range(10): a = Actions(i).name dx = Pos[a].value[0] dy = Pos[a].value[1] if a == "BOMB": # Add bomb as an action if len(wrld.bombs) > 0 or len(wrld.explosions) > 0: continue else: actions.append(a) continue if (x + dx >= 0) and (x + dx < wrld.width()): if (y + dy >= 0) and (y + dy < wrld.height()): if not wrld.wall_at(x + dx, y + dy) and not wrld.bomb_at( x + dx, y + dy) and not wrld.explosion_at( x + dx, y + dy): if a == "BOMB" and len(wrld.bombs) > 0: continue actions.append(a) return actions
def __init__(self): Gtk.Window.__init__(self, title='Calculator') self.set_resizable(False) #creates box which handles all childs self.box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL) self.add(self.box) self.buttons_box = Gtk.Box() self.operators_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL) #objects to be added to main box self.numbers_grid = NumbersGrid() self.display = Display() self.calc_system = CalculatorSystem(self.display) self.operators_grid = OperatorsGrid(self.display, self.calc_system) self.actions = Actions(self.display, self.calc_system) #add objects to box self.box.add(self.display) self.box.add(self.buttons_box) self.buttons_box.pack_start(self.numbers_grid, False, True, 0) self.buttons_box.pack_start(self.operators_box, False, True, 0) self.operators_box.pack_start(self.actions, False, True, 0) self.operators_box.add(self.operators_grid) #connect numbers buttons to display self.numbers_grid.connect_to_display(self.display) self.connect('key-release-event', self.on_key_release)
def __init__(self): """ Initializes the tables. The q-values are initialized to a value of 10 since 10 seemed like an average q-value after the first 100000 iterations of training. The visits are initialized to zero since no actions have been taken yet. The same_locs table is initalized to all 0s except for the first column which is all 1s. The first column corresponds to the actions ['O', 'O', ..., 'O'] which will never change the locations of robots/stacks. Many times these tables are overwritten by csvs that have been saved after many iterations of training. Returns ------- None. """ num_states = (nCr(N_ROWS*N_COLS+1, N_ROBOTS) * nCr(N_ROWS*N_COLS+1, N_STACKS) * (N_ITEMS+1)**N_STACKS) num_actions = len(Actions().valid_actions)**N_ROBOTS self.qvals = np.ones((num_states, num_actions)) * (N_ROWS + N_COLS - 1) * N_STACKS self.visits = np.zeros((num_states, num_actions)) self.same_locs = np.concatenate((np.ones((1, num_states)), np.zeros((num_actions-1, num_states)))).transpose() self.performance = pd.DataFrame([], columns=['iters', 'score']) return
def __init__(self, c_instance): super(DataLooper, self).__init__(c_instance) self.__c_instance = c_instance # shows in Ableton footer self.show_message("Powered by DATA Looper") # listens to time changes self.song().add_current_song_time_listener(self.on_time_change) # listens to track add/remove self.song().add_tracks_listener(self.on_track_added_or_removed) self.song().add_signature_numerator_listener(self.on_signature_numerator_change) self.tracks = defaultdict(list) self.state = State(self.song()) # creates obj to handle actions self.__action_handler = Actions(self, self.tracks, self.song(), self.state) # creates obj to handle tracks self.__track_handler = TrackHandler(self, self.song(), self.tracks, self.state, self.__action_handler) self.song().add_is_playing_listener(self.on_is_playing) self.__track_handler.scan_tracks(True) # initializes base obj self.live = Live.Application.get_application() self.song().add_metronome_listener(self.on_metro_change) self.send_sysex(0x00, 0x01)
def __init__(self, app): self.app = app self.state = app.state self.actions = Actions(self.app) views_json = Config.load('views.json') for key, value in views_json.items(): setattr(self, key, View(app, self, key, value))
def __init__(self, log, settings): self.log = log self.settings = settings # log.debug("App Class Initializing") # Setup home_dir and temp_dir self.home_dir = os.path.expanduser("~") tempfile.tempdir = self.home_dir self.temp_dir = tempfile.TemporaryDirectory() # log.debug('temp_dir: %s', self.temp_dir) # Initialize Frame self.frame = Urwid.Frame( Urwid.Filler(W.get_text('body', 'Loading...Please Wait', 'center'))) # Initialize MainLoop self.loop = Urwid.MainLoop(self.frame, self.settings.display['palette'], unhandled_input=self.unhandled_input, handle_mouse=False) self.errors = 0 self.state = State(self, self.log) self.menus = Menus(self) self.views = Views(self) self.actions = Actions(self) self.views.activate(self, {'view': 'home'}) self.action_pipes = []
def attaque1(self): s = SuperState(self.state, self.id_team, self.id_player) a = Actions(self.state, self.id_team, self.id_player) if s.poscoequippier.distance( s.ball ) < PLAYER_RADIUS + BALL_RADIUS: #SI le coequippier a la balle return a.deplacement(s.pointcampeurgauche) elif s.player.distance(s.ball) < PLAYER_RADIUS + BALL_RADIUS: if s.player.distance(s.poscoequippier) < PLAYER_RADIUS * 25: if s.coepdevant == 1: return a.tircoequippier elif s.player.distance(s.goaladverse) < ( PLAYER_RADIUS * 15): #Si il est dans la surface de tir : shoot return a.shootbut elif s.playeradverse.distance(s.player) < (PLAYER_RADIUS * 8): return a.dr2 else: return a.avanceravecballe else: return a.deplacement(s.ball)
def choose_discard(self, drawn_card, game_state): """ Given the state of the game and the card that has been drawn, the player decides what card they should discard. The player finds the exchange that produces the highest state evaluation. If the card does not improve on the hand then it is discarded, unless it has been drawn from the discard pile. Otherwise, the player exchanges with the card that produces the best game state. Args: top_discard (Deck.Card): The card drawn game_state ([int]): The current game state Returns: The discarded card location as an Action """ val, ind = self.max_val_ind_exchange(Golf.get_card_index(drawn_card), game_state) #Prevent player from discarding card drawn from discard pile if game_state[Golf.get_card_index(drawn_card)] == -2: index = ind else: #Check if card improves upon current hand max_val = self.function_approximator.value_of_state(game_state) index = ind if val > max_val else 8 return Actions(index)
def __init__(self): config = configparser.ConfigParser() config_file = os.path.join(os.path.dirname(__file__), 'config.ini') config.read(config_file) client = InfluxDBClient.from_config_file(config_file) self.query_api = client.query_api() self.bucket = config['ha']['influx_bucket'] self.actions = Actions()
def addAction(self, code, amount): act = Actions() act.code = code act.amount = amount self.actions.append(act) # def toJSON(self): # return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def predict(): predict_loss = [] predict_accuracy = [] predict_dice = [] model = Actions(sess, configure()) loss, acc, dice = model.predict() predict_loss.append(loss) predict_accuracy.append(acc) predict_dice.append(dice)
def __init__(self, catalog=None): Cmd.__init__(self) self.intro = f'\n [{catalog["name"]}] Work with book CLI' self.prompt = "Book > " self.doc_header = "Book commands (type help <topic>):" print(catalog['id']) self.actions = Actions(catalog['id']) self.search = Search(catalog['id'])
def start(): a = Actions() c = ColorDetector() states = 2 actions = 3 action = 0 state = 0 nextState = 0 ballvisible = True q = QLearningAgent(3, 2) # allow the camera to warmup time.sleep(0.1) # grab an image from the camera camera.capture(rawCapture, format="bgr") image = rawCapture.array res = c.prepareImage(image) redPixel = c.isCenter(res)[1] if redPixel > 0: state = 0 else: state = 1 while True: action = q.chooseAction(state) if action == 0: a.rotateRight() elif action == 1: a.rotateLeft() else: a.walkStraight() time.sleep(0.1) rawCapture.truncate(0) camera.capture(rawCapture, format="bgr") image = rawCapture.array res = c.prepareImage(image) isCenter, pixelCount = c.isCenter(res) if pixelCount > 0: nextState = 0 ballVisible = True else: nextState = 1 ballVisible = False if isCenter and pixelCount > redPixel: q.learn(state, nextState, action, 3) elif isCenter: q.learn(state, nextState, action, 2) elif ballVisible: q.learn(state, nextState, action, 1) else: q.learn(state, nextState, action, -1) state = nextState redPixel = pixelCount
def __init__(self, icon=None, tunableDict=json.loads( json.dumps({ "windowX": 20, "windowY": 50, "windowWidth": 500, "windowHeight": 500, "volume": 50 })), title="Window"): """ Initializing the UI for Forge Args:\n icon (string, optional): File path to the icon(.ico file) for the top left of the window. Defaults to None. tunableDict (JSON, optional): The tunable variables class for saving the windows position on close. Defaults to {"windowX": 10, "windowY": 10, "windowWidth": 500, "windowHeight": 500}. title (str, optional): The name of the window. Defaults to "Window". """ stylesheet = open(utilities.resource_path("QTPie Style\\style.css"), "r") styling = stylesheet.read() stylesheet.close() self.actions = Actions() self.tunableDict = tunableDict self.app = QtWidgets.QApplication(sys.argv) self.app.setStyleSheet(styling) self.app.aboutToQuit.connect( lambda: self.actions.onWindowClose(self.mainWindow)) if icon: appIcon = PyQt5.QtGui.QIcon() appIcon.addFile(utilities.resource_path("icon.png")) self.app.setWindowIcon(appIcon) self.grid = QtWidgets.QGridLayout() self.gridCount = 0 self.grid.setSpacing(0) self.grid.setContentsMargins(0, 0, 0, 0) self.space = self.addLabel([1, 1, 1, 1], txt="", name="Spacer") self.window = QTPieWidget() self.window.setLayout(self.grid) self.mainWindow = QTPieWindow() self.mainWindow.setGeometry(self.tunableDict["windowX"], self.tunableDict["windowY"], self.tunableDict["windowWidth"], self.tunableDict["windowHeight"]) self.mainWindow.setWindowTitle(title) self.mainWindow.setCentralWidget(self.window)
def close_position(self): return random.uniform(-.0003, 0.003) position = self.pq.remove() if position is None: return 0 open_ = position.open_price close = position.close_price return_ = ((close - open_) / open_) if Actions(position.type) == Actions.HOLD: return 0 elif Actions(position.type) == Actions.BUY: return return_ elif Actions(position.type) == Actions.SELL: return -1 * return_ else: return 0
def __init__(self): self.nlg = NLG(user_name=self.my_name) self.speech = Speech(launch_phrase=self.launch_phrase, debugger_enabled=self.debugger_enabled) self.actions = Actions(self.location) if os.path.isfile('unknown_commands.csv') == False: with open('unknown_commands.csv', 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=self.unknown_fieldnames) writer.writeheader()
def __init__(self, players_turn, feature_length, label_length): self.players_turn = players_turn self.game_over = False self.user = Player('user') self.opponent = Player('opponent') self.game_actions = Actions() self.player_training_data = Data(feature_length, label_length) self.opponent_training_data = Data(feature_length, label_length)
def __init__(self, config, encoder_output_dim , action_dict, ent_dict, tri_dict, arg_dict): self.config = config self.model = pm.global_collection() self.multi_task = MultiTask(config, encoder_output_dim , action_dict, ent_dict, tri_dict, arg_dict) self.arg_null_id = arg_dict[Vocab.NULL] bi_rnn_dim = encoder_output_dim # config['rnn_dim'] * 2 #+ config['edge_embed_dim'] lmda_dim = config['lmda_rnn_dim'] part_ent_dim = config['part_ent_rnn_dim'] self.lmda_dim = lmda_dim self.bi_rnn_dim = bi_rnn_dim self.lambda_var = nn.LambdaVar(lmda_dim) dp_state = config['dp_state'] dp_state_h = config['dp_state_h'] self.sigma_rnn = nn.StackLSTM(lmda_dim, lmda_dim, dp_state, dp_state_h) # stack self.delta_rnn = nn.StackLSTM(lmda_dim, lmda_dim, dp_state, dp_state_h) # will be pushed back self.part_ent_rnn = nn.StackLSTM(bi_rnn_dim, part_ent_dim, dp_state, dp_state_h) #self.beta = [] # buffer, unprocessed words self.actions_rnn = nn.StackLSTM(config['action_embed_dim'], config['action_rnn_dim'], dp_state, dp_state_h) self.out_rnn = nn.StackLSTM(bi_rnn_dim, config['out_rnn_dim'], dp_state, dp_state_h) self.act_table = nn.Embedding(len(action_dict), config['action_embed_dim']) self.ent_table = nn.Embedding(len(ent_dict), config['entity_embed_dim']) self.tri_table = nn.Embedding(len(tri_dict), config['trigger_embed_dim']) self.act= Actions(action_dict, ent_dict, tri_dict, arg_dict) hidden_input_dim = bi_rnn_dim + lmda_dim * 3 + part_ent_dim \ + config['action_rnn_dim'] + config['out_rnn_dim'] self.hidden_linear = nn.Linear(hidden_input_dim, config['output_hidden_dim'], activation='tanh') self.output_linear = nn.Linear(config['output_hidden_dim'], len(action_dict)) entity_embed_dim = config['entity_embed_dim'] trigger_embed_dim = config['trigger_embed_dim'] ent_to_lmda_dim = config['part_ent_rnn_dim'] + entity_embed_dim #+ config['sent_vec_dim'] * 4 self.ent_to_lmda = nn.Linear(ent_to_lmda_dim, lmda_dim, activation='tanh') tri_to_lmda_dim = bi_rnn_dim + trigger_embed_dim #+ config['sent_vec_dim'] self.tri_to_lmda = nn.Linear(tri_to_lmda_dim, lmda_dim, activation='tanh') self.hidden_arg = nn.Linear(lmda_dim * 2 + self.bi_rnn_dim, config['output_hidden_dim'], activation='tanh') self.output_arg = nn.Linear(config['output_hidden_dim'], len(arg_dict)) self.empty_buffer_emb = self.model.add_parameters((bi_rnn_dim,), name='bufferGuardEmb') self.event_cons = EventConstraint(ent_dict, tri_dict, arg_dict) #self.cached_valid_args = self.cache_valid_args(ent_dict, tri_dict) self.empty_times = 0
def __init__(self, username=None, password=None): self.general = General(self) self.model = Model(self) self.actions = Actions(self) self.groups = Groups(self) self.customers = Customers(self) self.segments = Segments(self) self.integrations = Integrations(self) if username and password: self.general.login(username, password)
def __init__(self, config, gestures, el, *args, **kwarg): """ :param config: string containing the path to the action configuration :param gestures: string containing the path to the gestures configuration """ super(EventDispatcher, self).__init__(*args, **kwarg) self._event_loop = el self._gdb = GestureDatabase() self._actions = Actions(config, gestures) self.update_devices() self._multitouches = []
def compute_strategy(self, state, id_team, id_player): s = Strategies(state, id_team, id_player) v = SuperState(state, id_team, id_player) a = Actions(state, id_team, id_player) if v.player.distance(v.ball) < PLAYER_RADIUS + BALL_RADIUS: shoot = v.playeradverse - v.player return SoccerAction(shoot=shoot.normalize() * 3) else: return a.deplacement(v.ball)
def predict(): predict_loss = [] predict_accuracy = [] predict_m_iou = [] model = Actions(sess, configure()) loss,acc,m_iou = model.predict() predict_loss.append(loss) predict_accuracy.append(acc) predict_m_iou.append(m_iou) print('predict_loss',predict_loss) print('predict_accuracy',predict_accuracy) print('predict_m_iou',predict_m_iou)
def milieu(self): s = SuperState(self.state, self.id_team, self.id_player) a = Actions(self.state, self.id_team, self.id_player) if s.player.distance(s.ball) < PLAYER_RADIUS * 10: return a.deplacement(s.ball) elif s.player.distance(s.ball) < PLAYER_RADIUS + BALL_RADIUS: return a.shoot(s.pointattaquantgauche) else: return a.deplacementlateralmilieu
def fonceur(self): s = SuperState(self.state, self.id_team, self.id_player) a = Actions(self.state, self.id_team, self.id_player) # id_team is 1 or 2 # id_player starts at 0 #SI C'EST DANS SON CAMP : FONCEUR. SINON : POSDÉFENSEDUO if s.player.distance(s.ball) < PLAYER_RADIUS + BALL_RADIUS: return a.tircoequippier else: return a.deplacement(s.ball)
def __init__(self, mockOutput=False): self.prevGraph = None self.rulesN3 = "(not read yet)" self.inferred = Graph() # gets replaced in each graphChanged call self.outputGraph = PatchableGraph() # copy of inferred, for now self.inputGraph = InputGraph([], self.graphChanged) self.actions = Actions(self.inputGraph, sendToLiveClients, mockOutput=mockOutput) self.inputGraph.updateFileData()
def random_policy(self, current_state): """ Randomly determines an action to take given the current state. Returns ------- Actions The actions that should be taken if following this policy. """ a = Actions() return a