def __init__(self, **kw): super(Object, self).__init__() self.step = kw['step'] self.pos_x = kw['pos'][0] self.pos_y = kw['pos'][1] self.vel_vx = kw['vel'][0] self.vel_vy = kw['vel'][1] self.count = kw['count'] self.color = kw['color'] self.COORDS_X = np.ndarray((self.step, self.count)) self.COORDS_Y = np.ndarray((self.step, self.count)) self.interect = inter.Interaction(x0=self.pos_x, vx0=self.vel_vx, y0=self.pos_y, vy0=self.vel_vy) print('Particle number: ', self.count) print('solve_func', self.interect.solve_func(self.count)[0]) print('solve_func[0][0]', self.interect.solve_func(self.count)[0][0]) for j in range(self.step): for k in range(self.count): self.COORDS_X[j, k] = float( self.interect.solve_func(self.count)[0][k]) self.COORDS_Y[j, k] = float( self.interect.solve_func(self.count)[1][k])
def __init__(self): super().__init__() self.mPath = "" self.sPath = "" self.interaction = interaction.Interaction() self.createmap_bt = tkinter.Button( self, text=u"Create maps", state='disabled', command=lambda: mapping.create_maps(self.sPath, self.mPath)) self.run_bt = tkinter.Button( self, text=u"Start mapping", state='disabled', command=lambda: interaction.background( self.interaction.start_interactive_mapping, (self.sPath, self.mPath))) self.start_bt = tkinter.Button( self, text=u"Start recording", state='normal', command=lambda: self.interaction.start_recording()) self.stop_bt = tkinter.Button( self, text=u"Stop recording", state='normal', command=lambda: self.interaction.stop_recording()) self.close_app = tkinter.Button(self, text=u"Close APP", state='normal', command=lambda: self.destroy()) self.parent = None self.initialize()
def initialize(pid, device, flags, comm, share_comm): message = 'initialize process: {:d} with GPU: {} game: {}'.format( pid, device, flags.rom) comm.send([-1, 'print', message], dest=flags.threads) import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = device[-1] np.random.seed(flags.seed) tf.set_random_seed(flags.seed) try: import ale_python_interface except ImportError: import atari_py.ale_python_interface as ale_python_interface # initialize ALE environment if flags.rom.endswith('.bin'): rom = flags.rom else: rom = "%s.bin" % flags.rom full_rom_path = os.path.join(flags.roms_path, rom) ale = ale_python_interface.ALEInterface() ale.setInt('random_seed', flags.seed) ale.setBool('sound', False) ale.setBool('display_screen', False) ale.setFloat('repeat_action_probability', flags.repeat_action_probability) ale.loadROM(full_rom_path) num_actions = len(ale.getMinimalActionSet()) # adjust flags flags.num_actions = num_actions flags.logs_path = os.path.join(flags.logs_path, '#' + str(pid) + '_' + flags.rom) tf.gfile.MakeDirs(flags.logs_path) # print settings setting_file = open(os.path.join(flags.logs_path, 'flags.txt'), mode='w+') for key, item in flags.__flags.items(): setting_file.write(key + ' : ' + str(item) + '\n') # initialize agent if flags.ot: network = neural_networks.OptimalityTighteningNetwork( pid, flags, device, share_comm) else: network = neural_networks.DeepQNetwork(pid, flags, device, share_comm) setting_file.write(network.nn_structure_file) setting_file.close() if flags.ot: agent = agents.OptimalityTigheningAgent(pid, network, flags, comm, share_comm) else: agent = agents.QLearning(pid, network, flags, comm, share_comm) interaction.Interaction(pid, ale, agent, flags, comm).start()
def start(environment_parameter, agent_parameter, showMessage): """ To open an interface, set parameters and simulate the given experiment """ agent_ = agn.Agent(agent_parameter) agent = agent_.agent environment = env.Environment(environment_parameter) interaction = intrc.Interaction(agent, environment, agent_parameter, environment_parameter) interaction.run_save(showMessage) file_name = interaction.file_name print(file_name) return file_name
def main(): global driver print("processing") driver = webdriver.Chrome("D:/chromedriver.exe") log = login.Login(driver, username, password) log.signin() #link for the channel to get data of followers from driver.get('https://mobile.twitter.com/unity3d') time.sleep(3) interact = interaction.Interaction(driver) interact.clicker() interact.follow() time.sleep(5) print('kana kata')
def move(self, action: str) -> None: """Interpretes order given by get_action and move character. """ move_dict = {'^': (0, 1), '<': (-1, 0), 'v': (0, -1), '>': (1, 0)} for i in range(self.nb_player): move_dict[str(i + 1)] = i + 1 for char in action: if isinstance(move_dict[char], tuple): (d_x, d_y) = move_dict[char] inter = it.Interaction(self, index_character, moving_character, d_x, d_y) inter.interaction() if isinstance(move_dict[char], int): moving_character = self.character_list[move_dict[char]] index_character = move_dict[char] self.wincheck()
def main(): global driver print("processing!") driver = webdriver.Chrome("D:/chromedriver.exe") l = login.Login(driver, username, password) l.signin() driver.get('https://www.instagram.com/unitytechnologies/') col = collector.Collector(driver) col.get_followers() print("followers: ", col.get_num_of_followers()) print("post: ", col.get_num_of_post()) print("following: ", col.get_num_of_following()) followButton = interaction.Interaction(driver) followButton.follow() time.sleep(60)
#!/usr/bin/env python '''This runs the PbD system (i.e. the backend).''' # Core ROS imports come first. import roslib roslib.load_manifest('pr2_pbd_interaction') import rospy if __name__ == '__main__': # Check whether we want code coverage, and start if so. use_coverage = rospy.get_param('/pr2_pbd_interaction/coverage', default=False) if use_coverage: from coverage import coverage cov = coverage( include="*/pr2_pbd_interaction/src/*.py", # source files omit="*/src/pr2_pbd_interaction/*" # generated files ) cov.start() # Run the system import interaction interaction_ = interaction.Interaction() rospy.spin() # System execution finished; generate coverage report if enabled. if use_coverage: cov.stop() cov.save() cov.html_report(title='PR2 PbD code coverage')
import initialization import initialization_detail import environment as env import agent as agn import interaction as intrc environment_detail = initialization_detail.environment_details() environment_parameter, agent_parameter = initialization.config() #Give the file_name to just plot a previousely saved simulation file_name = None if file_name is None: agent_types = ('positive_h', 'negative_h', 'viterbi', 'absorbing') agent_ID = 0 agent_parameter[1][ agent_types[agent_ID]]['agent_type'] = agent_types[agent_ID] agent_parameter_ = agent_parameter[1][agent_types[agent_ID]] agent_ = agn.Agent(agent_parameter_) agent = agent_.agent environment = env.Environment(environment_parameter) interaction = intrc.Interaction(agent, environment, agent_parameter_, environment_parameter) interaction.run_save("") file_name = interaction.file_name print(file_name) plt_ = intrc.Plot_results(file_name) plt_.showResults() plt_.print_setting()