def actions(self): """ Returns a list of possible actions on this item. The first one is autoselected by pressing SELECT """ return [Action(self.name, self.select)]
def get_next_action(self): if len(self.plan) < 1: return Action(ActionType.NoOp, None, None) else: return self.plan.popleft()
def act(self, policy_step): if policy_step == "toggle shield": self.states["shield"] = 2 - self.states["shield"] # flips the bit if self.states["shield"]: self.stats["AC"] += 2 action = Action(self, target_id="self") elif not self.states['shield']: self.stats["AC"] -= 1 action = Action(self, target_id="self") elif policy_step == "absorb": self.states["absorb"] = 1 self.states["spell slots"] -= 1 action = Action(self, target_id="self") elif policy_step == "cool breath": self.states["spell slots"] -= 1 action = Action(self, attack_roll=20, attack_modifier="wis", target_roll=20, save_modifier="wis", effect="radiant cooldown", effect_roll=4, effect_modifier=-1) elif policy_step == "heal": self.states["spell slots"] -= 1 action = Action(self, target_id="self", effect="hp", effect_roll=18, effect_modifier=-1) elif policy_step == "moonbeam": self.states["spell slots"] -= 1 action = Action(self, attack_roll=20, attack_modifier="wis", target_roll=20, save_modifier="con", effect="hp", effect_roll=12, effect_modifier=2) elif policy_step == "harder hit": action = Action(self, attack_roll=20, attack_modifier="str", target_roll=1, save_modifier="AC", effect="hp", effect_roll=8, effect_modifier=2) elif policy_step == "hit": action = Action(self, attack_roll=20, attack_modifier="str", target_roll=1, save_modifier="AC", effect="hp", effect_roll=4, effect_modifier=0) else: action = Action(self, target_id="self") # default, empty action return action
def set_answer(self, answer): action = Action(next(iter(answer)), answer[next(iter(answer))]) return self.sprint.apply_action_to_sprint(action)
def one(): view.menuView() action = Action()
def stop(markers, state, zone): return Result(state.setCommands([stop]), Action("stop", 0))
from vision import Vision from action import Action from debug import Debugger from astar2 import Astar2 from zero_blue_action import zero_blue_action import time import math if __name__ == '__main__': vision = Vision() action = Action() debugger = Debugger() zero = zero_blue_action() #astar = Astar() start_x = 2400 start_y = 1500 goal_x = -2400 goal_y = -1500 while True: time.sleep(1) start_x, start_y = 2400, 1500 #start_x, start_y = vision.my_robot.x, vision.my_robot.y goal_x, goal_y = -2400, -1500 # goal_x.append = [-2400] # goal_y.append = [-1500] astar2 = Astar2(start_x, start_y, goal_x, goal_y) path_x, path_y = astar2.plan(vision) path_x, path_y = zero.simplify_path(path_x=path_x, path_y=path_y)
def parseActionsAndPropositions(self): propositions = [] f = open(self.domainFile, 'r') line = f.readline() propositionLine = f.readline() words = [ word.rstrip() for word in propositionLine.split(" ") if len(word.rstrip()) > 0 ] for i in range(0, len(words)): propositions.append(Proposition(words[i])) actions = [] f = open(self.domainFile, 'r') line = f.readline() while (line != ''): words = [ word.rstrip() for word in line.split(" ") if len(word.rstrip()) > 0 ] if (words[0] == 'Name:'): name = words[1] line = f.readline() precond = [] add = [] delete = [] words = [ word.rstrip() for word in line.split(" ") if len(word.rstrip()) > 0 ] for i in range(1, len(words)): precond.append(Proposition(words[i])) line = f.readline() words = [ word.rstrip() for word in line.split(" ") if len(word.rstrip()) > 0 ] for i in range(1, len(words)): add.append(Proposition(words[i])) line = f.readline() words = [ word.rstrip() for word in line.split(" ") if len(word.rstrip()) > 0 ] for i in range(1, len(words)): delete.append(Proposition(words[i])) act = Action(name, precond, add, delete) for prop in add: self.findPropByName(prop, propositions).addProducer(act) actions.append(act) line = f.readline() for a in actions: new_pre = [ p for p in propositions if p.name in [q.name for q in a.pre] ] new_add = [ p for p in propositions if p.name in [q.name for q in a.add] ] new_delete = [ p for p in propositions if p.name in [q.name for q in a.delete] ] a.pre = new_pre a.add = new_add a.delete = new_delete return [actions, propositions]
else: return language_inquiry(False) """Actual Home""" if __name__ == "__main__": if check_internet_connection(): try: language = language_selection(sys.argv) print("Press 's' key to make an order") while True: key = getKey() if key == 's': master = Listen().listen()[language] mode_number = universal_mode_selection(master, language) Action(language, master).universal_action(mode_number) if key == 'e': break else: pass except KeyboardInterrupt: print('Keyboard interrupt abort') finally: """Add code here if you want to order any final tasks""" gc.collect(generation=2) try: os.remove('sample_1.mp3') except: pass print('Goodbye.') pass
def __init__(self): from fife import fife self.editor = scripts.editor.getEditor() self.engine = self.editor.getEngine() self._map = None self._layer = None self._mapdlg = None self._layerdlg = None self._cameradlg = None self._filebrowser = None self._importbrowser = None self._savebrowser = None newAction = Action(u"New map", "gui/icons/new_map.png") loadAction = Action(u"Open", "gui/icons/load_map.png") closeAction = Action(u"Close", "gui/icons/close_map.png") saveAction = Action(u"Save", "gui/icons/save_map.png") saveAsAction = Action(u"Save as", "gui/icons/save_mapas.png") saveAllAction = Action(u"Save all", "gui/icons/save_allmaps.png") importFileAction = Action(u"Import file", "gui/icons/import_file.png") importDirAction = Action(u"Import directory", "gui/icons/import_dir.png") newAction.helptext = u"Create new map" loadAction.helptext = u"Open existing map" closeAction.helptext = u"Close map" saveAction.helptext = u"Save map" saveAsAction.helptext = u"Save map as" saveAllAction.helptext = u"Save all opened maps" importFileAction.helptext = u"Imports an object file" importDirAction.helptext = u"Recursively imports all objects from a directory" action.activated.connect(self.showMapWizard, sender=newAction) action.activated.connect(self.showLoadDialog, sender=loadAction) action.activated.connect(self.closeCurrentMap, sender=closeAction) action.activated.connect(self.save, sender=saveAction) action.activated.connect(self.saveAs, sender=saveAsAction) action.activated.connect(self.editor.saveAll, sender=saveAllAction) self._importFileCallback = cbwa(self.showImportDialog, self.importFile, False) self._importDirCallback = cbwa(self.showImportDialog, self.importDir, True) action.activated.connect(self._importFileCallback, sender=importFileAction) action.activated.connect(self._importDirCallback, sender=importDirAction) eventlistener = self.editor.getEventListener() eventlistener.getKeySequenceSignal(fife.Key.N, ["ctrl"]).connect( self.showMapWizard) eventlistener.getKeySequenceSignal(fife.Key.O, ["ctrl"]).connect( self.showLoadDialog) eventlistener.getKeySequenceSignal(fife.Key.W, ["ctrl"]).connect( self.closeCurrentMap) eventlistener.getKeySequenceSignal(fife.Key.S, ["ctrl"]).connect(self.save) eventlistener.getKeySequenceSignal( fife.Key.S, ["ctrl", "shift"]).connect(self.editor.saveAll) fileGroup = ActionGroup() fileGroup.addAction(newAction) fileGroup.addAction(loadAction) fileGroup.addSeparator() fileGroup.addAction(closeAction) fileGroup.addSeparator() fileGroup.addAction(saveAction) fileGroup.addAction(saveAsAction) fileGroup.addAction(saveAllAction) fileGroup.addSeparator() fileGroup.addAction(importFileAction) fileGroup.addAction(importDirAction) self.editor.getToolBar().insertAction(fileGroup, 0) self.editor.getToolBar().insertSeparator(None, 1) self.editor._file_menu.insertAction(fileGroup, 0) self.editor._file_menu.insertSeparator(None, 1)
else: print( 'Error Loading MCP3xxx library. Did you pip3 install -r requirements.txt;?' ) else: raise Exception("Unknown Node Type: " + node['type']) t = t.run() if t is not None: threads.append(t) except KeyError: print('Invalid or no Nodes found to Load') # Load in Actions try: for action in CONFIGS["actions"]: a = Action(action) a.init_action() actions[a.key] = a except KeyError: print('No Actions Found to Load') # Worker for Triggers try: t = TriggerWorker(CONFIGS['triggers'], main_thread_running, system_ready, actions) print('Loading Triggers...') t = t.run() threads.append(t) except KeyError: print('No Triggers Found to Load')
def enumerate_possible_actions(state, group, specie, number_my_groups, max_split_rate): groups_human = state.getMembers(Species.human) groups_enemy = state.getMembers(specie.inverse()) actions_total = [] len_group_me = group.eff actions_simple_per_group = [] actions_split_per_group = [] doublets = [] groups_targets = [] #on elague les groupes d'humains humanDistances = [] for humangroup in groups_human: humanDistances.append(utils.getDistance(group, humangroup)) groups_human.sort(key=dict(zip(groups_human, humanDistances)).get, reverse=False) groups_human = groups_human[:len_group_me + 1] #de même pour les ennemis enemyDistances = [] for enemy in groups_enemy: enemyDistances.append(utils.getDistance(group, enemy)) groups_enemy.sort(key=dict(zip(groups_enemy, enemyDistances)).get, reverse=False) groups_enemy = groups_human[:len_group_me + 1] # actions sans split for group_human in groups_human: action = Action(ActionType.attackHuman, group_human, group) action.calc_mark(state) actions_simple_per_group.append(action) groups_targets.append(group_human) for group_enemy in groups_enemy: action = Action(ActionType.attackEnemy, group_enemy, group) action.calc_mark(state) actions_simple_per_group.append(action) groups_targets.append(group_enemy) # actions avec splits if number_my_groups <= max_split_rate: #on évite de trop se splitter for i in range(1, int(len_group_me / 2) + 1): doublets.append([i, len_group_me - i]) for doublet in doublets: group1 = Group(group.x, group.y, doublet[0], specie) group2 = Group(group.x, group.y, doublet[1], specie) for target_group_1 in groups_targets: action_type_1 = specie.determine_action_type( target_group_1.species) for target_group_2 in groups_targets: action_type_2 = specie.determine_action_type( target_group_2.species) # si les deux targets sont différentes : if (target_group_1.x != target_group_2.x) or ( target_group_1.y != target_group_2.y): action1 = Action(action_type_1, target_group_1, group1) action2 = Action(action_type_2, target_group_2, group2) action1.calc_mark(state) action2.calc_mark(state) action1.parent_group = group action2.parent_group = group actions_split_per_group.append([action1, action2]) actions_total.append(actions_simple_per_group) actions_total.append(actions_split_per_group) return actions_total
def getPossibleMoves(self): actions = [] #1. Find all possible moves between play piles for pile1 in self.playPiles: pile1_flipped_cards = pile1.getFlippedCards() #if a pile is empty and another pile has a king if len(pile1.cards) == 0: # pile has no cards for pile2 in self.playPiles: if len(pile2.cards) > 1 and pile2.cards[0].value == "K": #if there are unflipped cards left, give flip bonus +5 reward if len(pile2.getFlippedCards()) < len(pile2.cards): actions.append( Action(pile2.getFlippedCards(), pile2, pile1, 1, flipBonus=True)) elif len(pile2.getFlippedCards()) == len(pile2.cards): actions.append( Action(pile2.getFlippedCards(), pile2, pile1, 1)) #iterate through every other pile if len(pile1_flipped_cards) > 0: for pile2 in self.playPiles: pile2_flipped_cards = pile2.getFlippedCards() #if they're different piles and pile2 has any faceup cards if pile2 is not pile1 and len(pile2_flipped_cards) > 0: #iterate through every possible upward facing stack in pile1 for transfer_cards_size in range( 1, len(pile1_flipped_cards) + 1): cards_to_transfer = pile1_flipped_cards[: transfer_cards_size] #if end of pile2 can be appended by top of a pile1 pile add it to actions if self.checkCardOrder(pile2.cards[0], cards_to_transfer[-1]): #if the move opens a card to be flipped, give flip bonus +5 reward if (len(cards_to_transfer) == len(pile1_flipped_cards) ) and not len(pile1_flipped_cards) == len( pile1.cards): actions.append( Action(reversed(cards_to_transfer), pile1, pile2, 1, flipBonus=True)) else: actions.append( Action(reversed(cards_to_transfer), pile1, pile2, 1)) ###############Why?? #pile1_downcard_count = len(pile1.cards) - len(pile1_flipped_cards) #pile2_downcard_count = len(pile2.cards) - len(pile2_flipped_cards) #if pile2_downcard_count < pile1_downcard_count: # actions.append(Action(reversed(cards_to_transfer), pile1, pile2)) #elif pile1_downcard_count == 0 and len(cards_to_transfer) == len(pile1.cards): # actions.append(Action(reversed(cards_to_transfer), pile1, pile2)) #2. Find all moves from play piles to blocks for pile in self.playPiles: if len(pile.cards) > 0: add = self.canAddToBlock(pile.cards[0]) if add: #if the move opens a card to be flipped, give flip bonus +5 reward if len(pile.getFlippedCards()) == 1 and len( pile.cards) > 1: actions.append( Action(pile.cards[0], pile, add, 2, flipBonus=True)) else: actions.append(Action(pile.cards[0], pile, add, 2)) #3. Find all moves from blocks to play piles (negative reward) for suit in self.suits: if len(self.blockPiles[suit].cards) > 0: add = self.canMoveBlockToPile(self.blockPiles[suit].cards[0]) if add: for dest in add: actions.append( Action( self.blockPiles.get(suit).cards[0], self.blockPiles[suit], dest, 3)) #actions.append(Action(self.blockPiles.get(suit).cards[0], self.blockPiles[suit], i.cards, 3) for i in add) #for block in self.blockPiles: # add = self.canMoveBlockToPile(block.cards[0]) # if len(block.cards) > 0 and add: # actions.extend([Action([block.cards[0]], block, add[i]) for i in add]) #4. Check if can draw card from waste pile if len(self.trashPileDown) > 0: actions.append( Action(self.trashPileDown[0], self.trashPileDown, self.trashPileUp, 4)) #5. Check if can recycle waste pile if len(self.trashPileDown) < 1: # recycle trash # for now, we represent this action as (None, self.trashPileUp, self.trashPileDown) actions.append( Action(None, self.trashPileUp, self.trashPileDown, 5)) #6. Find all moves from trash to play piles for pile in self.playPiles: if len(self.trashPileUp) > 0: if len(pile.cards) == 0 and self.trashPileUp[-1].value == 'K': actions.append( Action(self.trashPileUp[-1], self.trashPileUp, pile, 6)) if len(pile.cards) > 0: add = self.checkCardOrder(pile.cards[0], self.trashPileUp[-1]) if add: actions.append( Action(self.trashPileUp[-1], self.trashPileUp, pile, 6)) #7. Find all moves from trash to blocks if len(self.trashPileUp) > 0: add = self.canAddToBlock(self.trashPileUp[-1]) if add: actions.append( Action(self.trashPileUp[-1], self.trashPileUp, add, 7)) return actions
def p_action_def(p): '''action_def : LPAREN ACTION_KEY NAME parameters_def action_def_body RPAREN''' p[0] = Action(p[3], p[4], p[5][0], p[5][1])
def decide(markers, state, zone): # Store only the arena markers in a constant ARENA_MARKERS = filter( lambda x: x.info.marker_type in ["arena", "MARKER_ARENA"], markers) # Store only the token markers in a constant TOKEN_MARKERS = filter(lambda x: "token" in x.info.marker_type, markers) NOT_CAPTURED = map(lambda x: x.info.code, TOKEN_MARKERS) CAPTURED = filter(lambda x: not x.info.code in NOT_CAPTURED, state.tokens) if len(CAPTURED) != len(state.tokens): return decide(markers, state.setTokens(CAPTURED), zone) # Store only the markers that are in the robot's zone ZONE_MARKERS = getZoneMarkers(zone, ARENA_MARKERS) #If there is a command, execute it if state.turnCounter >= 30: COMMANDS = [moveForward for x in range(3)] return Result( state.setCommands(COMMANDS).resetTurnCounter(), Action("move", 0, 200)) if len(state.commands) > 0: return state.runCommand()(markers, state.dropCommand(), zone) #Run this if no commands are available TIME_UP = (time.time() - state.startTime) > 120 if len(state.tokens) >= 3 or TIME_UP: print("RETURNING!!!") if len(ZONE_MARKERS) > 0: ZONE = ZONE_MARKERS[0] if ZONE.dist < FINALMOVEDIST and ZONE.rot_y < 40 and ZONE.rot_y > -40: COMMANDS = [moveTowardsZoneMarker for x in range(3)] + [stop] return Result( state.setCommands(COMMANDS).resetTurnCounter(), Action("move", ZONE.rot_y, ZONE.dist * 100)) return Result(state.resetTurnCounter(), Action("move", ZONE.rot_y, ZONE.dist * 100)) elif len(ARENA_MARKERS) > 0: print("No zone markers") ROBOT = getRobotEntity(ARENA_MARKERS) """ if isInOwnZone(zone, ROBOT): print("I'm home") return Result(state.setCommands([stop]).resetTurnCounter(), Action("stop", 0)) """ print("ROBOT ANGLE : " + str(ROBOT.angle)) return Result(state.resetTurnCounter(), getZoneAction(zone, ROBOT)) else: DIRECTION = forwardsOrBack(markers, state, zone) COMMANDS = [turnUntilArena for i in range(TURNING_ITER)] + [ lambda markers, state, zone: DIRECTION for i in range(5) ] return Result(state.setCommands(COMMANDS), Action("turn", TURN_ANGLE)) #To go to a token marker if len(TOKEN_MARKERS) > 0: print("GET MARKERS") CAPTURED_TYPES = map(lambda x: x.info.marker_type, state.tokens) if time.time() - state.startTime > 60: AVAILABLE = TOKEN_MARKERS else: AVAILABLE = filter( lambda x: not x.info.marker_type in CAPTURED_TYPES, TOKEN_MARKERS) TOKEN = findClosestCube(AVAILABLE) #If a marker is close enough that it can be grabbed if TOKEN.dist < FINALMOVEDIST and TOKEN.rot_y > -40 and TOKEN.rot_y < 40: #Move forward 3 times commands COMMANDS = [(lambda markers, state, zone: moveIfTokenGone( markers, state.resetTurnCounter(), zone, TOKEN)) for i in range(4)] + [ lambda markers, state, zone: moveAndGetToken( markers, state.resetTurnCounter(), zone, TOKEN) ] #Move forward for 3 iterations return Result( state.setCommands(COMMANDS).resetTurnCounter(), Action("move", TOKEN.rot_y, TOKEN.dist * 100)) else: return Result(state.resetTurnCounter(), Action("move", TOKEN.rot_y, TOKEN.dist * 100)) else: DIRECTION = forwardsOrBack(markers, state, zone) COMMANDS = [turnUntilAvailableToken for i in range(TURNING_ITER)] + [ lambda markers, state, zone: DIRECTION for i in range(5) ] return Result(state.setCommands(COMMANDS), Action("turn", TURN_ANGLE))
import time from action import Action def test_print(): print('trevor') test = Action(test_print, 3) test.disable(10) while True: test.fire() time.sleep(1)
def moveAndGetToken(markers, state, zone, token): print("Getting token") return Result(state.addToken(token), Action("move", 0, 100))
def getAction(self): return Action(damage=self.getDamage(4), weak=1)
def turn(markers, state, zone): return Result(state, Action("turn", TURN_ANGLE))
def getAction(self): if self.turns < 3: return Action() elif self.turns == 3: self.turns = 0 return Action(damage=self.getDamage(25))
import sys ticker, window_size = 'TSLA', 20 init_cash = 1000000 commission = 0.003 #千分之三的手續費 stop_pct = 0.1 #停損%數 safe_pct = 0.8 #現金安全水位 #要給checkpoint個路徑 c_path = "models/{}/training.ckpt".format(ticker) #取得歷史資料 df = pdr.DataReader('{}'.format(ticker), 'yahoo', start='2018-1-1', end='2019-8-1') unit = get_unit(df['Close'].mean(), init_cash) #目前都是操作固定單位 trading = Action(unit) #資料整合轉換 data = init_data(df) #給agent初始化輸入的緯度 input_shape, neurons = get_shape(data[:window_size + 1], window_size) agent = Agent(ticker, input_shape, neurons, c_path, is_eval=True) l = len(data) - 1 n_close = 0 total_profit = 0 inventory = [] cash = init_cash max_drawdown = 0 e, episode_count = 1, 1
def getAction(self): return Action(damage=self.getDamage(9))
def initUI( self ): #화면에 출력될 창들 만들고 위치 지정하기 - 나이, 이름, 행동(~하는 중), 표정, 버튼(밥 주기, 씻기기, 재우기, 공부시키기, 놀아주기), 수치(종합, 배부름, 청결, 피로, 스트레스) self.setGeometry( 300, 200, 1340, 700) #창의 위치와 크기 조절 - (300, 200) 위치에 출력, 가로 1340, 세로 700 self.setWindowTitle('Tamagotchi') #창의 제목 설정 = 'Tamagotchi' self.vbox = QVBoxLayout() #수직 상자 정렬 레이아웃 만들기 self.hbox0 = QHBoxLayout() #0번 수평 상자 정렬 레이아웃 만들기 self.hbox0.addStretch(1) #공간 확보 위해 0번 수평 상자에 스트레치 요소 추가 - 위젯 앞에 공간 확보 self.age_output = QLineEdit(self) #나이(QLineEdit 객체)를 표시할 칸 만들기 self.age_output.setReadOnly( True) #나이 창에 입력이 불가능하도록 만들기 - 프로그램 수행 결과만 출력되게끔 만들기 self.age_output.setFixedWidth(20) #나이 창의 크기 20픽셀로 고정하기 self.age_output.setText(str(self.age)) #나이 창에 age 값 출력하기 self.age_label2 = QLabel(' 살', self) #나이 단위 위젯 만들기 self.hbox0.addWidget(self.age_output) #0번 수평 상자에 나이 표시하는 창 배치하기 self.hbox0.addWidget( self.age_label2) #0번 수평 상자에(나이 표시하는 창 옆에) 나이 단위(' 살') 배치하기 self.name_text = QLineEdit(self) #이름(QLineEdit 객체)를 표시할 칸 만들기 self.name_text.setReadOnly( True) #이름 창에 입력이 불가능하도록 만들기 - 프로그램 처음에 실행할 때 입력한 이름만 출력되게끔 만들기 self.name_text.setFixedWidth(55) #이름 창의 크기 55픽셀로 고정하기 self.hbox0.addWidget( self.name_text) #0번 수평 상자에(나이 단위 옆에) 이름 표시하는 창 배치하기 self.hbox0.addWidget( QLabel(' 은/는 ')) #0번 수평 상자에(이름 표시하는 창 옆에) 조사(' 은/는 ') 배치하기 self.status_text = QLineEdit() #상태(QLineEdit 객체)를 표시할 칸 만들기 self.status_text.setReadOnly( True) #상태 창에 입력이 불가능하도록 만들기 - 프로그램 수행 결과만 출력되게끔 만들기 self.status_text.setFixedWidth(80) #이름 창의 크기 80픽셀로 고정하기 self.hbox0.addWidget( self.status_text) #0번 수평 상자에(조사 옆에) 상태 표시하는 창 배치하기 self.hbox0.addStretch( 1) #공간 확보 위해 0번 수평 상자에 스트레치 요소 추가 - 위젯 뒤에 공간 확보, 위젯 가운데 정렬 self.hbox1 = QHBoxLayout() #1번 수평 상자 정렬 레이아웃 만들기 self.hbox1.addStretch(1) #공간 확보 위해 1번 수평 상자에 스트레치 요소 추가 - 위젯 앞에 공간 확보 self.character_text = QLabel() #다마고치의 표정이 들어갈 공간 확보하기 self.hbox1.addWidget( self.character_text) #1번 수평 상자에 다마고치의 표정 표시하는 창 배치하기 self.hbox1.addStretch( 1) #공간 확보 위해 1번 수평 상자에 스트레치 요소 추가 - 위젯 뒤에 공간 확보, 위젯 가운데 정렬 self.character_text.setPixmap(QPixmap( self.face)) #창에 다마고치의 표정 이미지 나타내기 self.hbox2 = QHBoxLayout() #2번 수평 상자 정렬 레이아웃 만들기 self.hbox2.addStretch(1) #공간 확보 위해 2번 수평 상자에 스트레치 요소 추가 - 위젯 앞에 공간 확보 self.hbox2.addWidget(QLabel(' 밥을 ')) #2번 수평 상자에 텍스트(' 밥을 ') 배치하기 self.feed_edit = QLineEdit() #밥을 얼마나 줄 것인지 텍스트 입력 가능하게 만들기 self.feed_edit.setFixedWidth(100) #밥의 양 나타내는 창의 크기 100픽셀로 고정하기 self.hbox2.addWidget( self.feed_edit) #2번 수평 상자에(텍스트 옆에) 밥의 양 나타내는 창 배치하기 buttonGroups = ['g 주기', '씻기기', '재우기', '공부시키기', '놀아주기'] #버튼을 그룹으로 묶기 - 뒤에 나오는 버튼 생성 코드의 반복 개선 위해 사용할 것 for btnText in buttonGroups: #버튼 생성 코드의 반복 개선 - 반복문 사용해 동일(유사)한 코드를 연속해서 쓰는 것 개선 button = Button(btnText, self.button_clicked) button.setFixedWidth(100) #버튼의 크기 고정 - 100픽셀 self.hbox2.addWidget(button) #2번 수평 상자에 새로 생성한 버튼 위젯 추가 self.hbox2.addStretch( 1) #공간 확보 위해 2번 수평 상자에 스트레치 요소 추가 - 위젯 뒤에 공간 확보, 버튼 위젯 가운데 정렬 self.hbox3 = QHBoxLayout() #3번 수평 상자 정렬 레이아웃 만들기 self.hbox4 = QHBoxLayout() #4번 수평 상자 정렬 레이아웃 만들기 self.hbox5 = QHBoxLayout() #5번 수평 상자 정렬 레이아웃 만들기 self.hbox6 = QHBoxLayout() #6번 수평 상자 정렬 레이아웃 만들기 self.hbox7 = QHBoxLayout() #7번 수평 상자 정렬 레이아웃 만들기 self.all_text = QLineEdit() #종합 수치(QLineEdit 객체)를 표시할 칸 만들기 self.hunger_text = QLineEdit() #배고픔 수치(QLineEdit 객체)를 표시할 칸 만들기 self.clean_text = QLineEdit() #청결 수치(QLineEdit 객체)를 표시할 칸 만들기 self.tired_text = QLineEdit() #피로 수치(QLineEdit 객체)를 표시할 칸 만들기 self.stress_text = QLineEdit() #스트레스 수치(QLineEdit 객체)를 표시할 칸 만들기 self.action = Action() #action 객체 호출하기 self.tamagotchi = Tamagotchi() #tamagotchi 객체 호출하기 layoutGroups = [ self.hbox0, self.hbox1, self.hbox2, self.hbox3, self.hbox4, self.hbox5, self.hbox6, self.hbox7 ] #수평 상자들을 그룹으로 묶기 - 뒤에 나오는 상태 표시 코드의 반복 개선 위해 사용할 것 gaugeGroups = ['종 합 ', '배 부 름 ', '청 결 ', '피 로 ', '스트레스'] textGroups = [ self.all_text, self.hunger_text, self.clean_text, self.tired_text, self.stress_text ] valueGroups = [ self.action.currentAll, self.action.currentHunger, self.action.currentClean, self.action.currentTired, self.action.currentStress ] #수평 상자, 텍스트, 상태 수치를 각각 그룹으로 묶기 - 뒤에 나오는 버튼 생성 코드의 반복 개선 위해 사용할 것 for i in range( len(gaugeGroups) ): #반복문 사용해 동일(유사) 코드의 반복 방지 - 다마고치의 상태(종합, 배부름, 청결, 피로, 스트레스) 나타내는 창 배치하기 layoutGroups[i + 3].addWidget(QLabel(gaugeGroups[i])) textGroups[i].setReadOnly(True) layoutGroups[i + 3].addWidget(textGroups[i]) textGroups[i].setText(valueGroups[i]) for hbox in layoutGroups: #수평 상자 레이아웃을 수직 상자 레이아웃 안으로 배치 - 반복문 사용해 동일(유사) 코드의 반복 방지 self.vbox.addLayout(hbox) self.setLayout(self.vbox) #최종적으로 수직 상자를 창의 메인 레이아웃으로 설정하기 if self.startGame == True: #게임이 시작되었을 경우에 수행될 작업 설정하기 self.nameInput() #다마고치의 이름 정하는 함수 호출하기 self.show()
def getZoneAction(zone, robot): ZONES = [Point(50, 50), Point(50, 750), Point(750, 750), Point(750, 50)] ZONE = ZONES[zone] DISTANCE = math.sqrt((robot.x - ZONE.x)**2 + (robot.y - ZONE.y)**2) ANGLE = getAngleFromNorth(ZONE.x - robot.x, ZONE.y - robot.y) - robot.angle return Action("move", ANGLE, DISTANCE)
def act(self, state, test=False): return Action(random.randint(0, self._num_actions - 1))
def turnSideways(state): return Result(state, Action("turn", TURN_ANGLE))
def make_scenarios(self, n:int=10, threshold=MINUS_INF, log_printing=False): #tle_cnt = 0 #scenario = [] #task_no = 1 #death_cnt = 0 print(f'{self.id} - initialized to make scenarios') print(f'Max time step={self.MAX_timestep}') print(f'objective: find {n} paths') # while complete < n: # initialize scene = dict() self.reset() # state: 현재 보는 local object 대상 # self.state: 현상을 유지해야 하는 state to calculate several things state = State.from_state(self.initial_state) action = Action(action_id=self.action_ids['Wait'], velocity=np.array([0.,0.,0.])) self.agent.action.Update(action) scene['observations'] = Stack() scene['actions'] = Stack() scene['rewards'] = Stack() scene['timesteps'] = Stack() #time_out = False #next_key_input, next_action_id = 'Wait', 0 def _save_scene_(scene, state_id, postfix): time_t = time.strftime('%Y%m%d_%H-%M-%S', time.localtime(time.time())) path = f'pkl/scenario/{state_id}/env_{self.id}/' if not os.path.exists(path): os.makedirs(path) scene_filename = path + f'{time_t}_{postfix}.scn' save_scene = {} for K, V in scene.items(): # K: observations, actions, rewards, timesteps save_scene[K] = np.array(V.getTotal()) # list of numpy array scene[K].pop() # 마지막 step을 roll-back with open(scene_filename, 'wb') as f: pickle.dump(save_scene, f) return visit = np.zeros((25, 101, 101), dtype=bool) def isVisited(pos) -> bool: x, y, z = int(pos[0]), int(pos[1]), int(pos[2]) if y > 20: return True return visit[y, x, z] def check_visit(pos, check=True) -> None: x, y, z = int(pos[0]), int(pos[1]), int(pos[2]) visit[y, x, z] = check return def stepDFS(timestep, state:State, action:Action): # 시작부터 goal인 건 data 만들기 전에 그러지 않다고 가정 if self.goal_cnt >= n: return if timestep > self.MAX_timestep: # time over # It works well even though the scene is empty print('Time over') scene['observations'].pop() state.id = 'death' # fixed step timeout scene['observations'].push(state.get_state_vector()) scene['rewards'].push(MINUS_INF) scene['timesteps'].push(timestep) if 'terminals' not in scene: scene['terminals'] = Stack() scene['terminals'].push(MINUS_INF) # save point if self.TL_cnt < 10: self.TL_cnt += 1 _save_scene_(scene, state.id, self.TL_cnt) logging(self.logs, self.agent.pos, state, action, timestep, MINUS_INF, self.agent.pos) save_log(logger=self.logs, id=self.id, goal_position=self.goal_position, state_id='TL', cnt=self.TL_cnt) delogging(self.logs) for K in scene.keys(): scene[K].pop() check_visit(self.agent.pos, check=False) return if timestep > 1 and action.input_key == 'Wait' and (self.agent.stamina >= self.MAX_stamina or state.id == 'wall'): # 아무 득도 안 되는 행동 return scene['observations'].push(state.get_state_vector()) scene['actions'].push(action.get_action_vector()) action = action.get_action_vector() # step ns, r, d, agent = self.step(action) # npos is used to update agent and determine whether it can unfold parachute ns = cnv_state_vec2obj(ns) action = cnv_action_vec2obj(action) scene['rewards'].push(r) scene['timesteps'].push(timestep) if r < threshold: for K in scene.keys(): scene[K].pop() return else: check_visit(agent.pos) logging(self.logs, self.agent.pos, state, action, timestep, r, agent.pos) if d == True: # same with goal # savepoint self.goal_cnt += 1 if 'dones' not in scene: scene['dones'] = Stack() scene['dones'].push(r) _save_scene_(scene, 'goal', self.goal_cnt) print(f'env{self.id} found out {self.goal_cnt} path(s)!') save_log(logger=self.logs, id=self.id, goal_position=self.goal_position, state_id='G', cnt=self.goal_cnt) delogging(self.logs) for K in scene.keys(): scene[K].pop() check_visit(agent.pos, check=False) return elif ns.id == 'death': # save point if 'terminals' not in scene: scene['terminals'] = Stack() scene['terminals'].push(MINUS_INF) if self.death_cnt < 50: self.death_cnt += 1 print(f'You Died - {self.id}') _save_scene_(scene, 'death', self.death_cnt) save_log(logger=self.logs, id=self.id, goal_position=self.goal_position, state_id='D', cnt=self.death_cnt) delogging(self.logs) for K in scene.keys(): scene[K].pop() check_visit(agent.pos, check=False) return action_list = self.get_valid_action_list(ns.id, agent.stamina) np.random.shuffle(action_list) # PRINT_DEBUG if log_printing == True: print(f'state: {state.id}->{ns.id}') print(f'action: {action.input_key}') print(f'agent: {self.agent.pos}->{agent.pos}') print(f'valid key list: {action_list}') for next_action_key_input in action_list: passing_agent = Agent.from_agent(agent) if ns.id == 'air' and 'j' in next_action_key_input: if passing_agent.stamina <= 0 or self.canParachute(passing_agent.pos) == False: continue elif ns.id == 'wall' and state.id != 'wall': passing_agent.update_direction(action.velocity) velocity, stamina_consume, acting_time, given = get_next_action(ns.id, next_action_key_input, self.action_ids[next_action_key_input], prev_velocity=action.velocity) next_action = Action.from_action(action) next_action.action_update(self.action_ids[next_action_key_input], next_action_key_input, stamina_consume, acting_time, passing_agent.dir, velocity, given) self.agent.Update(passing_agent) self.agent.update_action(next_action) self.state.Update(ns) stepDFS(timestep+1, state=ns, action=next_action) self.agent.Update(agent) self.state.Update(state) if self.goal_cnt >= n: break for K in scene.keys(): scene[K].pop() delogging(self.logs) check_visit(self.agent.pos, check=False) return stepDFS(timestep=1, state=state, action=action) """ for t in range(self.MAX_timestep): # PRINT_DEBUG if log_printing == True: print(f'before: s_id={self.state.id}, pos={self.agent.pos}') # step action = action.get_action_vector() ns, r, done, next_agent = self.step(action) action = cnv_action_vec2obj(action) next_pos = next_agent.get_current_pos() logging(self.logs, self.agent.pos, self.state, action, timestep=t+1, reward=r, next_pos=next_pos) # PRINT_DEBUG if log_printing == True: print(f'after : s_id={ns.id}, pos={next_pos}, action={action.input_key}') print('='*50) scene['rewards'].push(r) scene['timesteps'].push(ns.spend_time) if done == True: if 'dones' not in scene: scene['dones'] = Stack() scene['dones'].push(r) elif t == self.MAX_timestep - 1: tle_cnt += 1 time_out = True print(f'Time over. - {task_no}') #print('failed:agent({}) / goal({})'.format(self.agent.get_current_position(), self.goal_position)) '''if 'terminals' not in scene: scene['terminals'] = [] scene['terminals'].append(1)''' break # calculate next situation state = ns # ok if ns.id == 'death' or ns.id == 'goal': #scenario.append(scene) if ns.id == 'death': death_cnt += 1 #print(f'You Died. - {task_no}') scene['terminals'] = [1] scene['rewards'][-1] = r = -999999 break #scenario.append(scene) # 다음 action을 randomly generate하고, 기초적인 parameter를 초기화한다. next_key_input, next_action_id = self.get_softmax_action(before_key_input=next_key_input) stamina_consume = base_stamina_consume # 회복수치, -4.8 acting_time = base_acting_time # 1.3sec # 경우에 따라 parameter 값을 조정한다. velocity = None given = 'None' if ns.id == 'air': stamina_consume = 0 # no recover, no consume if self.canParachute(next_pos) == True: next_key_input, next_action_id = self.get_softmax_action(before_key_input=next_key_input, only=['Wait', 'j']) else: next_key_input, next_action_id = 'Wait', self.action_ids['Wait'] self.update_softmax_prob(idx=next_action_id) elif ns.id == 'field': if 's' in next_key_input: # sprint stamina_consume = 20 acting_time = 1 if 'j' in next_key_input: stamina_consume = 1 if stamina_consume == base_stamina_consume else stamina_consume + 1 elif ns.id == 'wall': stamina_consume = 10 if self.state.id != 'wall': self.agent.update_direction(action.velocity) # x-z 방향 전환 # Only can be W, S, and Wj next_key_input, next_action_id = self.get_softmax_action_vWall() given = 'wall' if 'W' in next_key_input: velocity = np.array([0., 1., 0.]) else: # 'S' velocity = np.array([0., -1., 0.]) if 'j' in next_key_input: stamina_consume = 25 velocity *= 2 elif ns.id == 'parachute': next_key_input, next_action_id = self.get_softmax_action(next_key_input, only=['W', 'A', 'S', 'D', 'WA', 'WD', 'SA', 'SD', 'j']) stamina_consume = 2 given = 'parachute' # Note: 각 구체적인 값은 parameter table 참조 self.state.Update(ns) self.agent.update_position(next_pos) # return value of action_update is newly constructed. # So, it is okay. action.action_update(next_action_id, next_key_input, stamina_consume, acting_time, self.agent.dir, velocity=velocity, given=given) self.agent.action.Update(action) scene['observations'].append(self.state.get_state_vector()) scene['actions'].append(action.get_action_vector()) # steps ended. if log_printing == True: print_log() for key in scene.keys(): if key != 'observations' and key != 'actions': scene[key] = np.array(scene[key]) # make {key:np.array(), ...} #scenario.append(scene) # save scene at each file instead of memorizeing scenes in scenario array if not time_out and (ns.id == 'goal' or death_cnt <= 95): time_t = time.strftime('%Y%m%d_%H-%M-%S', time.localtime(time.time())) path = f'pkl/scenario/{ns.id}/env_{self.id}/' if not os.path.exists(path): os.makedirs(path) scene_filename = path + f'{time_t}.scn' for scene_key in scene: scene[scene_key] = np.array(scene[scene_key]) with open(scene_filename, 'wb') as f: pickle.dump(scene, f) if ns.id == 'goal': complete += 1 print(f'complete - {complete} / {n}') save_log(self.logs, self.id, self.goal_position, task_no) if log_printing == True: print_log() """ # self.dataset.append(scenario) # Probably unused print(f'env{self.id} succeeded with {self.goal_cnt}.') self.logs.clear() for K in scene.keys(): scene[K].clear() return self.goal_cnt
def moveBackward(markers, state, zone): print("Forwards") return Result(state, Action("move", 0, -100))
def process(self): for _, game_map in self.world.get_component(Map): for _, (player, player_position) in self.world.get_components( Player, Position): entities = [] for entity, (monster, _) in self.world.get_components( Monster, Adjacent): entities.append((monster.threat, entity, monster)) if entities: entities.sort(reverse=True) name = None for _, entity, monster in entities[:player. number_of_attacks]: self.world.add_component(entity, AttackTarget()) if name is None: name = monster.name else: name = "multiple enemies" player.attack_action = Action( action_type=ActionType.ATTACK, rage=+2, nice_name=f"Attack {name}", ) return target = move_dijkstra(self.world, game_map, player_position, DijkstraMap.MONSTER) if target: player.attack_action = Action( action_type=ActionType.MOVE, rage=+1, target=target, nice_name="Charge", ) return for _, (_, monster) in self.world.get_components(Boss, Monster): target = move_dijkstra(self.world, game_map, player_position, DijkstraMap.EXPLORE) if target: player.attack_action = Action( action_type=ActionType.MOVE, rage=-1, target=target, nice_name=f"Find {monster.name}", ) return for entity, (position, _, _) in self.world.get_components( Position, Stairs, Coincident): player.attack_action = Action( action_type=ActionType.USE_STAIRS, rage=-1, target=target, nice_name="Use stairs", ) return target = move_dijkstra(self.world, game_map, player_position, DijkstraMap.STAIRS) if target: player.attack_action = Action( action_type=ActionType.MOVE, rage=-1, target=target, nice_name="Find stairs", ) return target = move_dijkstra(self.world, game_map, player_position, DijkstraMap.EXPLORE) if target: player.attack_action = Action( action_type=ActionType.MOVE, rage=-1, target=target, nice_name="Explore", ) return player.attack_action = Action( action_type=ActionType.WAIT, rage=-1, nice_name="Wait", )
def generate_stats(self): tfold_blob = None if self.is_remote: self.get_remote_model(self.input_path, self.remote_path, self.model_id, self.model_suffix) # Make directory for resulting data and graphs os.makedirs(self.output_path, exist_ok=True) # History graphs try: history = pickle.load( open( os.path.join(self.input_path, self.model_id + "-history.p"), "rb")) self.generate_history_graphs(history) except FileNotFoundError: print("File not found; skipping history graphs") # Rank graphs try: tfold_blob = pickle.load( open( os.path.join(self.input_path, self.model_id + "-t-ranks.p"), "rb")) self.generate_ranks_graphs(tfold_blob) except FileNotFoundError: print("File not found; skipping rank graphs") # Testrank graphs try: tfold_blob = pickle.load( open( os.path.join(self.input_path, self.model_id + "-bestrank-testrank.p"), "rb")) self.generate_testrank_graphs(tfold_blob) except FileNotFoundError: print("File not found; skipping testrank graphs") # Model graphs try: if tfold_blob is not None and "conf" in tfold_blob: # TODO hack because some old blobs don't have use_bias insert_attribute_if_absent(tfold_blob["conf"], "use_bias", True) insert_attribute_if_absent(tfold_blob["conf"], "batch_norm", True) insert_attribute_if_absent(tfold_blob["conf"], "cnn", False) insert_attribute_if_absent(tfold_blob["conf"], "metric_freq", 10) insert_attribute_if_absent(tfold_blob["conf"], "regularizer", None) insert_attribute_if_absent(tfold_blob["conf"], "reglambda", 0.001) insert_attribute_if_absent(tfold_blob["conf"], "key_low", 2) insert_attribute_if_absent(tfold_blob["conf"], "key_high", 3) insert_attribute_if_absent(tfold_blob["conf"], "loss_type", "correlation") insert_attribute_if_absent( tfold_blob["conf"], "leakage_model", LeakageModelType.HAMMING_WEIGHT_SBOX) insert_attribute_if_absent(tfold_blob["conf"], "input_type", AIInputType.SIGNAL) insert_attribute_if_absent(tfold_blob["conf"], "n_hidden_nodes", 256) insert_attribute_if_absent(tfold_blob["conf"], "n_hidden_layers", 1) insert_attribute_if_absent(tfold_blob["conf"], "lr", 0.0001) insert_attribute_if_absent(tfold_blob["conf"], "activation", "leakyrelu") actions = [] for action in tfold_blob["conf"].actions: if isinstance(action, str): actions.append(Action(action)) else: actions.append(action) tfold_blob["conf"].actions = actions model = ai.AI(model_type=self.model_id, conf=tfold_blob["conf"]) model.load() self.generate_model_graphs(model.model) else: print("No tfold blob containing conf. Skipping model graphs.") except OSError: print("File not found; skipping model graphs")