Exemplo n.º 1
0
  def launch(self, zh, workLogNode, actionNode, statusNode):
    state = {}
    data = zookeeper.get(zh, actionNode, 0)
    jsonp = simplejson.loads(data[0])
    state['cmdPath'] = jsonp['cmdPath']
    state['actionPath'] = actionNode
    state['actionId'] = jsonp['actionId']
    state['host'] = self.znode
    state['status']='STARTING'
    self.update(zh, workLogNode, state)

    logger.info("Launch: "+simplejson.dumps(jsonp))
    dispatcher = Runner()
    try:
      result = dispatcher.run(jsonp)
      logger.info("Result: "+simplejson.dumps(result))
      if "exit_code" in result and result['exit_code']==0:
        state['status']='SUCCEEDED'
      else:
        state['status']='FAILED'
    except:
      logger.exception('Execution error: '+actionNode)
      state['status']='FAILED'
    self.update(zh, workLogNode, state)
    self.enqueue(zh, statusNode, state)
Exemplo n.º 2
0
 def __init__(
         self,
         max_itr,
         max_r_norm,
         max_diff_norm,
         la=-1,
         warm_start='off',  # ||f(x+r) - f(x) + p||_{2}^{2} 
         warm_start_factor=0,
         perp_start='rand',
         perp_start_factor=1,
         optimizer='SGA',
         momentum=0.9,
         smoothing_eps=1e-8,
         learning_rate=0.01,
         verbose=True,
         mask=None,
         coil_sens=None):
     Runner.__init__(
         self,
         max_itr,
         max_r_norm,
         max_diff_norm,
         la,
         warm_start,  # ||f(x+r) - f(x) + p||_{2}^{2} 
         warm_start_factor,
         perp_start,
         perp_start_factor,
         optimizer,
         momentum,
         smoothing_eps,
         learning_rate,
         verbose,
         mask)
     self.coil_sens = coil_sens
 def objective(self, trial):
     hyper_params = self.get_hyperparameters(trial=trial)
     runner = Runner(device=self.device,
                     hyper_params=hyper_params,
                     study_name=self.study_name)
     score = runner.run()
     return score
Exemplo n.º 4
0
def run_training(maze_size=(6, 6),
                 trap_number=1,
                 epoch=20,
                 epsilon0=0.3,
                 alpha=0.3,
                 gamma=0.9):

    # # 可选的参数:
    # epoch = 20

    # # 随机探索的初始概率
    # epsilon0 = 0.3

    # # 松弛变量
    # alpha = 0.3

    # # 折扣因子
    # gamma = 0.9

    # # 地图大小
    # maze_size = (6, 6)

    # # 陷阱数量
    # trap_number = 1

    g = Maze(maze_size=maze_size, trap_number=trap_number)
    r = Robot(g, alpha=alpha, epsilon0=epsilon0, gamma=gamma)
    r.set_status(learning=True)

    runner = Runner(r, g)
    runner.run_training(epoch, display_direction=True)
    # runner.generate_movie(filename = "final1.mp4") # 你可以注释该行代码,加快运行速度,不过你就无法观察到视频了。

    # runner.plot_results()
    return runner
Exemplo n.º 5
0
    def test_run_search(self):
        # are results tracked?
        # does it run the right number of iterations?
        # does pause continue work?

        parameter_space_path = os.path.splitext(
            self.parameter_space_file_path)[0]

        results_log_file_path = os.path.join(parameter_space_path,
                                             'result_log.json')

        self.assertTrue(not os.path.isfile(results_log_file_path))

        self.run_search.run()
        self.assertTrue(os.path.isfile(results_log_file_path))

        with open(results_log_file_path) as f:
            probe_data = json.load(f)

        self.assertTrue(probe_data["random_count"] == 3)
        self.assertTrue(len(probe_data["probe_set"]) == 2)

        argv_search2 = ["theprogram", self.parameter_space_file_path, "6"]

        self.run_search = Runner(argv_search2, self.learning_dict)
        self.run_search.run()

        with open(results_log_file_path) as f:
            probe_data = json.load(f)

        self.assertTrue(probe_data["random_count"] == 0)
        self.assertTrue(len(probe_data["probe_set"]) == 6)
Exemplo n.º 6
0
 def POST(self, cmd):
     web.header("Content-Type", "application/json")
     data = web.data()
     jsonp = simplejson.loads(data)
     jsonp["cmd"] = cmd
     dispatcher = Runner()
     return dispatcher.run(jsonp)
Exemplo n.º 7
0
  def launch(self, zh, workLogNode, actionNode, statusNode):
    state = {}
    data = zookeeper.get(zh, actionNode, 0)
    jsonp = simplejson.loads(data[0])
    state['cmdPath'] = jsonp['cmdPath']
    state['actionPath'] = actionNode
    state['actionId'] = jsonp['actionId']
    state['host'] = self.znode
    state['status']='STARTING'
    self.update(zh, workLogNode, state)

    logger.info("Launch: "+simplejson.dumps(jsonp))
    dispatcher = Runner()
    try:
      result = dispatcher.run(jsonp)
      logger.info("Result: "+simplejson.dumps(result))
      if "exit_code" in result and result['exit_code']==0:
        state['status']='SUCCEEDED'
      else:
        state['status']='FAILED'
    except:
      logger.exception('Execution error: '+actionNode)
      state['status']='FAILED'
    self.update(zh, workLogNode, state)
    self.enqueue(zh, statusNode, state)
Exemplo n.º 8
0
def test_input(text: str):
    lexer = Lexer()
    tokens = lexer.tokenize(text)

    print('''
The lexer input was:
{}

The Tokenized output from it was:
{}
'''.format(text, tokens))

    parser = Parser(tokens)
    node_tree = parser.parse()

    print('''
The Parser then created this Node Tree:
{}
'''.format(node_tree))

    runner = Runner(node_tree, infoLevel)
    print('''
The Runner ran the node tree, and came up with this result:
{}
'''.format(runner.run()))
Exemplo n.º 9
0
    def find_adversarial_perturbation(self, f, dQ, batch, only_real=False):
        """ Search for adversarial perturbation.
        
        An extension of Runner.find_adversarial_perturbation(...) with the 
        additional parameter ``only_real``, which makes the algorithm only 
        search for real adversarial perturbations. 
    
        :param only_real: Search only for real perturbations.

        """
        if only_real:
            r_is_empty = not self.r
            #  isempty(r)
            if (r_is_empty):
                ps_factor = self.perp_start_factor
                if (self.perp_start == 'rand'):
                    rr = ps_factor * np.random.rand(
                        *batch.shape).astype('float32')
                elif (self.perp_start == 'randn'):
                    rr = ps_factor * np.random.randn(
                        *batch.shape).astype('float32')
                elif (self.perp_start == 'ones'):
                    rr = ps_factor * np.ones(batch.shape).astype('float32')
                else:  # "off"
                    rr = ps_factor * np.zeros(batch.shape).astype('float32')

                rr = to_lasagne_format(np.real(from_lasagne_format(rr)))
                self.r.append(rr)

        Runner.find_adversarial_perturbation(self, f, dQ, batch)
Exemplo n.º 10
0
 def POST(self, cmd):
     web.header('Content-Type','application/json')
     data = web.data();
     jsonp = simplejson.loads(data)
     jsonp['cmd']=cmd
     dispatcher = Runner()
     return dispatcher.run(jsonp)
Exemplo n.º 11
0
 def POST(self, cmd):
     web.header('Content-Type', 'application/json')
     data = web.data()
     jsonp = simplejson.loads(data)
     jsonp['cmd'] = cmd
     dispatcher = Runner()
     return dispatcher.run(jsonp)
Exemplo n.º 12
0
class TestAgent(unittest.TestCase):
    def setUp(self):
        """
        The case file is a representation of the case14 as found in the ieee14 powergrid.
        :return:
        """
        self.tolvect = 1e-2
        self.tol_one = 1e-5

        self.init_grid_path = os.path.join(PATH_DATA_TEST_PP, "test_case14.json") # full path where grid state is located, eg "./data/test_Pandapower/case14.json"
        self.path_chron = PATH_ADN_CHRONICS_FOLDER
        self.parameters_path = None
        self.names_chronics_to_backend = {"loads": {"2_C-10.61": 'load_1_0', "3_C151.15": 'load_2_1',
                                                    "14_C63.6": 'load_13_2', "4_C-9.47": 'load_3_3',
                                                    "5_C201.84": 'load_4_4',
                                                    "6_C-6.27": 'load_5_5', "9_C130.49": 'load_8_6',
                                                    "10_C228.66": 'load_9_7',
                                                    "11_C-138.89": 'load_10_8', "12_C-27.88": 'load_11_9',
                                                    "13_C-13.33": 'load_12_10'},
                                          "lines": {'1_2_1': '0_1_0', '1_5_2': '0_4_1', '9_10_16': '8_9_2',
                                                    '9_14_17': '8_13_3',
                                                    '10_11_18': '9_10_4', '12_13_19': '11_12_5', '13_14_20': '12_13_6',
                                                    '2_3_3': '1_2_7', '2_4_4': '1_3_8', '2_5_5': '1_4_9',
                                                    '3_4_6': '2_3_10',
                                                    '4_5_7': '3_4_11', '6_11_11': '5_10_12', '6_12_12': '5_11_13',
                                                    '6_13_13': '5_12_14', '4_7_8': '3_6_15', '4_9_9': '3_8_16',
                                                    '5_6_10': '4_5_17',
                                                    '7_8_14': '6_7_18', '7_9_15': '6_8_19'},
                                          "prods": {"1_G137.1": 'gen_0_4', "3_G36.31": "gen_2_1", "6_G63.29": "gen_5_2",
                                                    "2_G-56.47": "gen_1_0", "8_G40.43": "gen_7_3"},
                                          }
        self.gridStateclass = Multifolder
        self.backendClass = PandaPowerBackend
        self.runner = Runner(init_grid_path=self.init_grid_path,
                             path_chron=self.path_chron,
                             parameters_path=self.parameters_path,
                             names_chronics_to_backend=self.names_chronics_to_backend,
                             gridStateclass=self.gridStateclass,
                             backendClass=self.backendClass,
                             rewardClass=L2RPNReward)

    def test_one_episode(self):
        cum_reward, timestep = self.runner.run_one_episode()
        assert int(timestep) == 287
        assert np.abs(cum_reward - 5739.951023) <= self.tol_one

    def test_3_episode(self):
        res = self.runner.run_sequential(nb_episode=3)
        assert len(res) == 3
        for i, cum_reward, timestep, total_ts in res:
            assert int(timestep) == 287
            assert np.abs(cum_reward - 5739.951023) <= self.tol_one

    def test_3_episode_3process(self):
        res = self.runner.run_parrallel(nb_episode=3, nb_process=3)
        assert len(res) == 3
        for i, cum_reward, timestep, total_ts in res:
            assert int(timestep) == 287
            assert np.abs(cum_reward - 5739.951023) <= self.tol_one
Exemplo n.º 13
0
def main():
    while True:
        param = get_data()
        data = process_data(param)
        runner = Runner(data)
        print("Current Level: ", runner.level)
        ans = runner.run()
        send_data(ans)
Exemplo n.º 14
0
def run_code(code: str):
    lexer = Lexer()
    tokens = lexer.tokenize(code)

    parser = Parser(tokens)
    node_tree = parser.parse()

    runner = Runner(node_tree)
    return runner.run()
Exemplo n.º 15
0
def main():
    runner = Runner('tritype')
    t = Tritype(runner)
    results = t.runAll()
    print "GA complete on all paths, running all inputs..."
    for result in results:
        runner.runInput(result)
    print "Generating coverage report..."
    runner.dumpCoverageReporting()
    print "Done."
Exemplo n.º 16
0
 def single_run(self, lock=None, hyper_params={}):
     if lock is not None:
         lock.acquire()
     device = self.get_device()
     if lock is not None:
         lock.release()
     runner = Runner(device=device,
                     hyper_params=hyper_params,
                     study_name=self.study_name)
     # runner = TestModel(device=device, hyper_params=hyper_params, study_name=self.study_name)
     runner.run()
     self.release_device(device)
Exemplo n.º 17
0
    def setUp(self):

        # mock parameter space
        self.parameter_space_file_path = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), 'resources', 'runner',
            'test_parameter_space.json')
        self.result_store_file_path = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), 'resources', 'runner',
            'test_parameter_space')
        self.parameter_space_result_path = os.path.splitext(
            self.parameter_space_file_path)[0]

        self.tearDown()

        # mock argvs

        self.argv_search = ["theprogram", self.parameter_space_file_path, "2"]

        self.argv_probe = [
            "theprogram", self.parameter_space_file_path,
            "--shallow_parameter", "1", "--deep_parameter/parameter", "4",
            "--deep_complex_parameter/parameter", "7",
            "--deep_complex_parameter/parameter2", "10"
        ]

        # mock learning_dictionary

        self.param_trace = []

        trace_counter = 0

        # constructing like this will capture self in closure
        def trace_params(params):
            self.param_trace.append(params)
            nonlocal trace_counter
            trace_counter = trace_counter + 1
            return trace_counter, test_Runner.mock_results

        self.trace_params = trace_params

        self.learning_dict = {
            "test_learning": lambda params: trace_params(params)
        }

        # runner

        self.run_probe = Runner(self.argv_probe, self.learning_dict)
        self.run_search = Runner(self.argv_search, self.learning_dict)
    def initEnvironment(self):
        Logger.log("Resetting Environment...", "warning")
        self.resetEnvironment()
        Logger.log("Initing Environment...", "warning")
        ##### COULD PASS THE FLAG INTO THE CONTAINER AS A PARAM
        splitPorts = self.ports.split()
        imageNames = ["one", "two", "three"]
        sshPorts = ["2222", "2223", "2224"]
        flags = []
        print splitPorts
        #create docker containers for game peices
        if (self.mode == "docker"):
            #Runner.Run(["bash","./buildContainers.sh", str( " ".join(splitPorts) )] )
            print("------going into buildCons " + str(self.ports) + " " +
                  str(self.units))
            os.system("bash ./buildContainers.sh " + str(self.ports) + " " +
                      str(self.units))
            for idx, port in enumerate(splitPorts):
                os.system("touch ./SystemAction/ports")
                Runner.Run([
                    "docker", "run", "-d",
                    "--cidfile=./units/" + imageNames[idx] + ".id", "-p",
                    port + ":" + port, "hc/" + imageNames[idx]
                ])
                os.system("echo " + str(port) + " > ./SystemAction/port_" +
                          str(port))
                # there is an issue with running a copy command upon running container
                # will look into other solutions
        elif (self.mode == "vagrant"):
            Runner.Run(["pwd"])
            #Runner.Run(["bash","./buildVMs.sh"])
            os.system("bash ./buildVMs.sh " + str(self.ports) + " " +
                      str(self.units))
            for idx, port in enumerate(splitPorts):
                Logger.log("Vagrant: per port - " + port, "okblue")

                os.system("touch ./SystemAction/ports")
                getHostPortFromVagrant = "vagrant port $(vagrant global-status | grep virtualbox | awk '{print $1}' | sed -n " + str(
                    idx + 1) + "p) | tail -n1 | awk '{print $4}'"
                Logger.log(getHostPortFromVagrant, "okblue")
                os.system(
                    str(getHostPortFromVagrant) + " > ./SystemAction/port_" +
                    str(port))
                # there is an issue with running a copy command upon running container
                # will look into other solutions
                #
                #
        SystemAction.ActionsForSetup(imageNames, self.mode, splitPorts)
Exemplo n.º 19
0
def main():
    env = VecFrameStack(make_sf2_env(), 1)
    obs = env.reset()
    n_steps = 128,  # 5 * FPS
    options = {
        'network': 'mlp',  # 'impala_cnn'
        'env': venv,
        'total_timesteps': 40000000,
        'nsteps': n_steps,  # 5 * FPS,  # TODO: Do we still need to pass nsteps here?
        'q_coef': 1.0,
        'ent_coef': 0.001,
        'max_grad_norm': 10,
        'lr': 7e-4,
        'lrschedule': 'linear',
        'rprop_epsilon': 1e-5,
        'rprop_alpha': 0.99,
        'gamma': 0.99,
        'log_interval': 1000,
        'buffer_size': 50000,
        'replay_ratio': 4,
        'replay_start': 10000,
        'c': 10.0,
        'trust_region': True,
        'delta': 1,
        'alpha': 0.99,
        # 'load_path': MODEL_PATH,
        'save_interval': 1000,
        # neuronal network parameters
        'activation': tf.nn.relu,
        'num_layers': 2,  # 4, 2
        'num_hidden': 48,  # 64, 64
        'layer_norm': False,
    }
    models = (
        Acer(**options),
        Acer(**options)
    )
    runner = Runner(env, models, n_steps)
    while True:
        runner.run()
        # obs, rew, done, info = env.step((
        #     env.action_space.sample(),
        #     env.action_space.sample()
        # ))
        # env.render()
        # if done:
        #     obs = env.reset()
    env.close()
Exemplo n.º 20
0
 def TakeAction(unitName, flag, idx):
     print "taking action for " + unitName
     #docker exec -i -t $(cat $(ls System/units/one.id) ) touch ./file ;
     #docker exec -i -t $(cat $(ls System/units/one.id) ) ls
     #WRITE IT TO SystemAction unitInfo, so javascript can pick it up
     ## {A: {containerID: "dsds", flag: "6gffdsfgf"}, B: {containerID: "dsds", flag: "6gffdsfgf"}}
     # read the ID file for the unit, and assign it to a string for use in this call
     #os.system(serviceCMD)
     try:
         if (SystemAction.mode == "docker"):
             Logger.log("Flag Placer for docker", "okblue")
             with open('units/' + unitName + '.id', 'r') as myfile:
                 unitID = myfile.read().replace('\n', '')
                 Runner.Run([
                     "docker", "exec", "-i", "-t", unitID, "touch",
                     "/" + flag
                 ])
         elif (SystemAction.mode == "vagrant"):
             Logger.log("Flag Placer for Vagrant", "okblue")
             #vboxmanage --nologo guestcontrol "unit_1" --username vagrant --password vagrant run --exe /bin/sh --wait-stdout --wait-stderr -- sh/arg0 -c "echo this is new line"
             flagPlaceCMD = "vboxmanage --nologo guestcontrol 'unit_" + str(
                 idx + 1
             ) + "' --username vagrant --password vagrant run --exe /bin/sh -- sh/arg0 -c 'sudo touch /" + flag + " ' "
             Logger.log(flagPlaceCMD, "okblue")
             os.system(flagPlaceCMD)
     except Exception as inst:
         print(type(inst))
         print(inst.args)
         print(inst)
Exemplo n.º 21
0
def main():
    workPath = '.'
    binaryName = 'test'
    coverageToolPath = '/Users/zmay/Projects/clang-dev/build/bin/llvm-cov'
    runner = Runner(workPath, binaryName, coverageToolPath)
    t = Test(runner)
    t.runAll()
Exemplo n.º 22
0
def run(args=sys.argv[1:]):
	global total_res
	parser = optparse.OptionParser()
	parser.add_option("-t", "--tag",
						dest="tags",
						default=None,
						action='append',
						help='Use case tag filter,eg:-t smoke -t module1 ,it is filter case tag contain smoke or module1 and run it')

	parser.add_option("-f","--failfast",
						dest="failfast",
						default=False,
						action="store_true",
						help='When the case is executed, the exception is stopped immediately.')

	parser.add_option("-r","--rerun",
						dest="rerun",
						default=False,
						action="store_true",
						help='The last failed case to run again')

	parser.add_option("-o","--out_type",
						dest="out_type",
						default="cmd",
						type="string",
						help='Type of output result')

	options, args = parser.parse_args(args)

	if (not TestSuit.test_suit_arr) and (not TestSuit.test_suit_arr[0].test_case_arr):
		sys.exit("no test suit and case!")

	if options.tags:
		for test_case in TestSuit.get_all_case():
			if not test_case.case_dict.has_key('tags'):
				sys.exit("caseId:[{0}] test case have no tags!".format(test_case.id))

	for test_case in TestSuit.get_all_case():
		if not test_case.case_run_method:
			sys.exit("caseId:[{0}] has no test case run method!".format(test_case.id))

	runner = Runner(TestSuit.test_suit_arr, options.tags, options.failfast, options.rerun, options.out_type)
	total_res = runner.run()

	report_obj = ReportFactory.create_report(options.out_type,total_res)
	report_obj.out_put_report()
Exemplo n.º 23
0
    def __init__(self, check_round, web_root):
        self.chart_gen = ChartGenerator(web_root)

        # Check runner object - runs checks for services with checkService() method
        # Returns integer 1/0 for check pass/fail
        # Default return is 9001 - if we see this in the database there is an issue with the checks
        self.runner = Runner(check_round)
        self.check()
Exemplo n.º 24
0
 def __init__(self, game):
     self.game = game
     self.screen = game.get_screen()
     self.screen_width = game.SCREEN_WIDTH
     self.screen_height = game.SCREEN_HEIGHT
     self.background_1 = pygame.image.load('city_final.jpg')
     self.background_2 = pygame.image.load('city_final.jpg')
     self.background_1_x_pos = 0
     self.background_2_x_pos = self.screen_width
     self.music = pygame.mixer.music.load(os.path.abspath('The Sound of Silence.mp3'))
     self.continue_game = True
     self.font = pygame.font.SysFont('monospace', 20, (0, 0, 0))
     self.runner = Runner()
     self.scores = self.runner.get_scores()
     self.chaser = Chaser()
     self.obstacle = Obstacle(self.TRASH_CAN, self.runner.GROUND_LEVEL, self.runner.FIXED_X_POSITION,
             self.screen_width, self.BACKGROUND_SPEED)
     self.__obstacle_cooldown_count = 0
     self.__reset_countdown = 0
Exemplo n.º 25
0
def train_by_dqn_robot(times, maze_size=5):
    print("start times:", times)

    maze = Maze(maze_size=maze_size)
    """choose Keras or Torch version"""
    robot = KerasRobot(maze=maze)
    # robot = TorchRobot(maze=maze)
    robot.memory.build_full_view(maze=maze)
    """training by runner"""
    runner = Runner(robot=robot)
    runner.run_training(15, 75)
    """Test Robot"""
    robot.reset()
    for _ in range(25):
        a, r = robot.test_update()
        if r < -20:
            print(
                "SUCCESSFUL!",
                "| TIMES:",
                times,
            )
            break
Exemplo n.º 26
0
    def setUp(self):
        """
        The case file is a representation of the case14 as found in the ieee14 powergrid.
        :return:
        """
        self.tolvect = 1e-2
        self.tol_one = 1e-5

        self.init_grid_path = os.path.join(PATH_DATA_TEST_PP, "test_case14.json") # full path where grid state is located, eg "./data/test_Pandapower/case14.json"
        self.path_chron = PATH_ADN_CHRONICS_FOLDER
        self.parameters_path = None
        self.names_chronics_to_backend = {"loads": {"2_C-10.61": 'load_1_0', "3_C151.15": 'load_2_1',
                                                    "14_C63.6": 'load_13_2', "4_C-9.47": 'load_3_3',
                                                    "5_C201.84": 'load_4_4',
                                                    "6_C-6.27": 'load_5_5', "9_C130.49": 'load_8_6',
                                                    "10_C228.66": 'load_9_7',
                                                    "11_C-138.89": 'load_10_8', "12_C-27.88": 'load_11_9',
                                                    "13_C-13.33": 'load_12_10'},
                                          "lines": {'1_2_1': '0_1_0', '1_5_2': '0_4_1', '9_10_16': '8_9_2',
                                                    '9_14_17': '8_13_3',
                                                    '10_11_18': '9_10_4', '12_13_19': '11_12_5', '13_14_20': '12_13_6',
                                                    '2_3_3': '1_2_7', '2_4_4': '1_3_8', '2_5_5': '1_4_9',
                                                    '3_4_6': '2_3_10',
                                                    '4_5_7': '3_4_11', '6_11_11': '5_10_12', '6_12_12': '5_11_13',
                                                    '6_13_13': '5_12_14', '4_7_8': '3_6_15', '4_9_9': '3_8_16',
                                                    '5_6_10': '4_5_17',
                                                    '7_8_14': '6_7_18', '7_9_15': '6_8_19'},
                                          "prods": {"1_G137.1": 'gen_0_4', "3_G36.31": "gen_2_1", "6_G63.29": "gen_5_2",
                                                    "2_G-56.47": "gen_1_0", "8_G40.43": "gen_7_3"},
                                          }
        self.gridStateclass = Multifolder
        self.backendClass = PandaPowerBackend
        self.runner = Runner(init_grid_path=self.init_grid_path,
                             path_chron=self.path_chron,
                             parameters_path=self.parameters_path,
                             names_chronics_to_backend=self.names_chronics_to_backend,
                             gridStateclass=self.gridStateclass,
                             backendClass=self.backendClass,
                             rewardClass=L2RPNReward)
Exemplo n.º 27
0
def main():
    tsp = TSPRunner()
    knap = KnapsackRunner()
    four = FourPeaksRunner()
    nn = NNRunner()
    runner = Runner()
    try:
        opts, args = getopt.getopt(sys.argv[1:], "h", ["baseline"])
    except:
        usage()
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            usage()
            sys.exit(1)
        elif opt == '--baseline':
            tsp.baseLine()

    for i in range(1,11):
        for j in range(1,11):
            algs = ('alg', 'all')
            iter = ('iterations', j * 1000)
            runs = ('run', j)
            temp = ('saTemp' , i * 10**(11))
            cooling = ('saCooling', i * 100)
            pop = ('gaPop', j * 10)
            mate = ('gaMate', j * 5)
            mute = ('gaMutate', j * 2)
            samples = ('mimicSamples', 200)
            tokeep = ('mimicToKeep', 20)
            package = [iter, runs, algs, temp, cooling, pop, mate, mute, samples, tokeep]
            runner.packageBuilder(package)
            #tsp.setPackage(runner.package)
            knap.setPackage(runner.package)
            #four.setPackage(runner.package)
            #tsp.tsp()
            knap.knap()
Exemplo n.º 28
0
class KnackeredRunner:

    def __init__(self, check_round, web_root):
        self.chart_gen = ChartGenerator(web_root)

        # Check runner object - runs checks for services with checkService() method
        # Returns integer 1/0 for check pass/fail
        # Default return is 9001 - if we see this in the database there is an issue with the checks
        self.runner = Runner(check_round)
        self.check()

    # We want the checks to run once per minute for each team.
    # Using a threaded timer to accomplish this
    def check(self):
        threading.Timer(60, self.check).start()

        # start the new round of checks
        self.runner.increment_round()

        # take note of the round now
        # if the current round does not finish before next one starts, we will enter the wrong round
        current_round = self.runner.round

        database = DataAccess()

        # Get teams from DB, loop through and check each service
        teams = database.get_teams()
        for team in teams:
            for service in team.services:
                self.runner.check_service(service)

        # after all teams are checked, deposit into DB (whole round at once)
        database.add_check_round(teams, current_round)

        # generate chart for web interface
        teams = database.get_scores()
        self.chart_gen.generate_chart(current_round, teams)
Exemplo n.º 29
0
 def __init__(self, config_id, appium_port='4723', udid=None):
     self.config = Configuration(config_id)
     self.runner = Runner(self.config.pkg_to, self.config.act_to,
                          self.config.no_reset, appium_port, udid)
     self.src_events = Util.load_events(self.config.id, 'base_from')
     self.tid = self.config.id
     self.current_src_index = 0
     self.tgt_events = []
     self.f_target = 0
     self.prev_tgt_events = []
     self.f_prev_target = -1
     # self.is_rerun_required = True
     self.rp = ResourceParser(
         os.path.join(SA_INFO_FOLDER,
                      self.config.id.split('-')[1]))
     self.widget_db = self.generate_widget_db()
     self.cgp = CallGraphParser(
         os.path.join(SA_INFO_FOLDER,
                      self.config.id.split('-')[1]))
     self.invalid_events = defaultdict(list)
     self.nearest_button_to_text = None
     self.idx_src_to_tgt = {}
     self.skipped_match = defaultdict(list)
     self.consider_naf_only_widget = False
Exemplo n.º 30
0
def main():
    runner = Runner('tritype')
    t = Tritype(runner)
    results = t.runAll()
    print "GA complete on all paths, running all inputs..."
    for result in results:
        runner.runInput(result)
    print "Generating coverage report..."
    runner.dumpCoverageReporting()
    print "Done."
Exemplo n.º 31
0
def test_different_parameter(alpha_test, gamma_test, epsilon_test, epoch_test):
    g = Maze(maze_size=maze_size, trap_number=trap_number)
    r = Robot(g, alpha=alpha_test, epsilon0=epsilon_test, gamma=gamma_test)
    r.set_status(learning=True)

    runner = Runner(r, g)
    runner.run_training(epoch_test, display_direction=True)
    print("alpha: {}, gamma: {}, epsilon: {}, epoch: {}".format(
        alpha_test, gamma_test, epsilon_test, epoch_test))
    runner.plot_results()
Exemplo n.º 32
0
def test_run_example():
    runner = Runner()
    runner.remote_process_client.login("Test_Conway")
    runner.remote_process_client.map(0)
    runner.remote_process_client.turn()                 # reset timer on server
    runner.remote_process_client.move(Move(1, 1, 0))
    for i in range(11):
        response = runner.remote_process_client.map(1)
        print("Position - ", response[1]["train"][0]["position"])
        runner.remote_process_client.turn()
    assert response[1]["train"][0]["position"] == 10
    runner.remote_process_client.move(Move(1, 1, 0))
    for i in range(11):
        response = runner.remote_process_client.map(1)
        print("Position - ", response[1]["train"][0]["position"])
        runner.remote_process_client.turn()
    assert response[1]["train"][0]["position"] == 10
    runner.remote_process_client.logout()
    runner.remote_process_client.close()
Exemplo n.º 33
0
 def PlaceAllFlags(unitNames):
     # iterate over
     print "Placing All Flags"
     for idx, port in enumerate(unitNames):
         flag = unitNames[idx] + "__" + SystemAction.rndm()
         Runner.Run(["touch", "./flags/" + flag])
         SystemAction.TakeAction(unitNames[idx], flag, idx)
         print "Placing flag for " + unitNames[idx]
     Logger.log(type(SystemAction.ports), "okblue")
     if (SystemAction.mode == "docker"):
         Logger.log("Service started for docker", "okblue")
     elif (SystemAction.mode == "vagrant"):
         Logger.log("Service started for docker", "okblue")
         for idx, port in enumerate(SystemAction.ports):
             #serviceCMD = "vboxmanage --nologo guestcontrol 'unit_"+str(idx+1)+"' --username vagrant --password vagrant run --exe /bin/sh --no-wait-stdout --no-wait-stderr -- sh/arg0 -c 'node /vagrant/server "+str(port)+" &' "
             serviceCMD = "vboxmanage --nologo guestcontrol 'unit_" + str(
                 idx + 1
             ) + "' --username vagrant --password vagrant run --exe /bin/sh --no-wait-stdout --no-wait-stderr -- sh/arg0 -c 'bash /vagrant/testing.sh " + str(
                 port) + " &' "
             #--wait-stdout
             Logger.log(serviceCMD, "okblue")
             os.system(serviceCMD)
Exemplo n.º 34
0
    def run(self):
        client = Runner()
        done = False
        try:
            status, start_data = client.remote_process_client.login(
                client.name)
            self.map = client.remote_process_client.read_map()
            while not done:
                self.update()

                for event in pg.event.get():
                    if event.type == pg.QUIT:
                        done = True
                    if event.type == KEYDOWN:
                        if event.key == K_s:
                            done = True

                pg.display.flip()
                self.clock.tick(self.fps)
        finally:
            client.remote_process_client.logout()
            client.remote_process_client.close()
            pg.quit()
Exemplo n.º 35
0
def L_AND_S(rules, text):
    try:
        lex = Lexer(text)
        lex.run(show_states=False, show_spaces=False)
        #lex.show()

        table = SLR_Table(rules)
        #Print_2D_Table(table)
        CheckIfItSLR(table)

        lexer_list = [
            i for i in lex.list if i[1] != "new_line" and i[1] != "Comment"
        ]
        lexer_list.append(["true_end", "$", "end_end"])

        Runner(table, lexer_list, rules)

    except Exception as e:
        print("Не подходит")
        print(e)
        return False
    else:
        print("Подходит")
        return True
Exemplo n.º 36
0
    def main(self) -> None:
        """Start the bot."""
        self.updater = Updater(TELEGRAM_TOKEN, use_context=True)
        self.bot = self.updater.bot
        self.Runner = Runner(self.bot)

        # Get the dispatcher to register handlers
        dp = self.updater.dispatcher

        # Register handlers
        dp.add_handler(CommandHandler("paste", self.receive_paste))
        dp.add_handler(CommandHandler("author", self.author_command))
        dp.add_handler(
            CommandHandler("start",
                           self.start_command,
                           filters=Filters.chat_type.private))
        dp.add_handler(
            CommandHandler("help",
                           self.help_command,
                           filters=Filters.chat_type.private))
        dp.add_handler(
            MessageHandler(
                Filters.text & (~Filters.command) & Filters.chat_type.private,
                self.receive_paste))

        # Starting the webhook.
        self.updater.start_webhook(listen=HOST,
                                   port=PORT,
                                   url_path=TELEGRAM_TOKEN)
        self.updater.bot.set_webhook(f"{WEBHOOK_URL}/{TELEGRAM_TOKEN}")
        logging.info(
            f"Server started on {HOST}:{PORT}. Listening publicily on {WEBHOOK_URL}/<token>"
        )

        # Wait for a signal
        self.updater.idle()
Exemplo n.º 37
0
from flask import render_template
import connexion

from flask import Flask, request
from flask_restx import Resource, Api
from flask_cors import CORS, cross_origin
from Runner import Runner

app = Flask(__name__)
api = Api(app)

runner = Runner()

CORS(app, support_credentials=True)


@cross_origin(supports_credentials=True)
@app.route('/search', methods=['GET'])
def search():
    term = request.args.get('term')
    return runner.search(term)


@app.route('/results', methods=['GET'])
def results():
    searchId = request.args.get('searchId')
    return runner.getResults(searchId)


@app.route('/mostSearched', methods=['GET'])
def mostSearched():
Exemplo n.º 38
0
def main(environment, file_out, weight_file, action_value, f_duration, watch,
         save):
    use_CNN = True
    env = gym.make(environment)
    if use_CNN is True:
        state_size = (88, 80, 1)
    else:
        state_size = env.observation_space.shape[0]

    action_size = env.action_space.n

    # Stack group_size number of atari images
    group_size = 4

    # The following are hard-coded for now, but original image
    # is scaled by preprocssing down to 88, 80, 1 and we combine
    # 4 of them to get a batch of images
    # Note that the "1" argument is the number of copies of environment to train simultaneously
    runner = Runner(environment, 1, group_size)

    online_dqn = DQAgent(state_size,
                         action_size,
                         loss="huber_loss",
                         action=action_value,
                         use_CNN=True)
    target_dqn = DQAgent(state_size,
                         action_size,
                         loss="huber_loss",
                         action=action_value,
                         use_CNN=True)
    online_dqn.model.load_weights(weight_file)
    target_dqn.update_target_weights(online_dqn.model)

    print("Playing {} using weights {} and action {}").format(
        environment, weight_file, action_value)

    epsilon_max = .1
    online_dqn.epsilon = epsilon_max
    done = False

    done_flags = True
    lives = 5

    state = runner.reset_all()
    cumulative_reward = 0
    global_step = 0
    if save is True:
        images = []
    while not done:
        global_step += 1

        q_values = online_dqn.model.predict(state)[0]

        if done_flags is False:
            action = online_dqn.action(q_values, online_dqn.epsilon)
        else:
            random_fire_actions = np.random.randint(1, 3)
            for i in range(random_fire_actions):
                action = 1
                next_state, reward, done, info = runner.step([action])
            state = next_state
            done_flags = False
            continue

        next_state, reward, done, info = runner.step([action])
        if watch is True:
            runner.render()
            sleep(.05)
        if save is True:
            images.append(runner.render(mode="rgb_array"))
        cumulative_reward += reward

        # Losing a life is bad, so say so
        remaining_lives = info[0]["ale.lives"]
        life_lost_flag = bool(lives - remaining_lives)
        lives = remaining_lives

        done_flags = False
        if life_lost_flag or done:
            done_flags = True

        state = next_state

        if done:
            print("Score {}, Total steps {}").format(cumulative_reward,
                                                     global_step)
            break
    if save is True:
        imageio.mimsave(file_out, images, duration=f_duration)
    return 0
Exemplo n.º 39
0
def main(environment, loss_function, action_value, use_CNN, total_games,
         burn_in, training_interval, target_update_interval, save_interval,
         num_epochs, batch_size, learning_rate, epsilon_max, epsilon_min,
         epsilon_decay_steps, gamma, memory_size, log_interval):
    # Set up logging
    start_time = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
    log_dir, parameter_file, score_file = setup_logs(environment, start_time)

    ################################
    # Save our training parameters #
    line = "loss_function: {}\nactionvalue: {}\ntotal_games: {}\ntraining_interval: {}\ntarget_update_interval: {}\nsave_interval: {}\nnum_epochs: {}\nbatch_size: {}\nlearning_rate: {}\nepsilon_max: {}\nepsilon_min: {}\nepsilon_decay_steps: {}\ngamma: {}\nmemory_size: {}\nlog_interval: {}\n".format(
        loss_function, action_value, total_games, training_interval,
        target_update_interval, save_interval, num_epochs, batch_size,
        learning_rate, epsilon_max, epsilon_min, epsilon_decay_steps, gamma,
        memory_size, log_interval)
    os.write(parameter_file, line)
    ################################

    # Set up our environment
    env = gym.make(environment)
    if use_CNN is True:
        state_size = (88, 80, 1)
    else:
        state_size = env.observation_space.shape[0]

    action_size = env.action_space.n

    # Stack group_size number of atari images
    group_size = 4

    # The following are hard-coded for now, but original image
    # is scaled by preprocssing down to 88, 80, 1 and we combine
    # 4 of them to get a batch of images
    # Note that the "1" argument is the number of copies of environment to train simultaneously
    runner = Runner(environment, 1, group_size)

    # Note that if use_CNN = True, then the state_size is ignored!
    online_dqn = DQAgent(state_size,
                         action_size,
                         loss=loss_function,
                         action=action_value,
                         learning_rate=learning_rate,
                         epsilon=epsilon_max,
                         gamma=gamma,
                         memory_size=memory_size,
                         use_CNN=use_CNN)
    target_dqn = DQAgent(state_size,
                         action_size,
                         loss=loss_function,
                         action=action_value,
                         learning_rate=learning_rate,
                         epsilon=epsilon_max,
                         gamma=gamma,
                         memory_size=memory_size,
                         use_CNN=use_CNN)

    target_dqn.update_target_weights(online_dqn.model)

    # Include a threshold value to stop training
    solved_thresh = 500

    print("Playing {} using loss {} and action {}").format(
        environment, loss_function, action_value)

    done = False
    score_history = deque([], maxlen=log_interval)
    max_score = 0
    global_step = 0
    game_num = 1

    state = runner.reset_all()
    cumulative_reward = 0
    lives = 5
    done_flags = True

    while game_num < total_games:
        # Use target_dqn to make Q-values
        # online_dqn then takes epsilon-greedy action
        global_step += 1

        q_values = online_dqn.model.predict(state)[0]

        # If we lose a life, start with a few FIRE actions
        # to get started again. Random to avoid learning
        # fixed sequence of actions
        if done_flags is False:
            action = online_dqn.action(q_values, online_dqn.epsilon)
        else:
            random_fire_actions = np.random.randint(1, 3)
            for i in range(random_fire_actions):
                action = FIRE_ACTION_NUMBER
                next_state, reward, done, info = runner.step([action])
            state = next_state
            done_flags = False
            continue

        next_state, reward, done, info = runner.step([action])
        cumulative_reward += reward[0]

        # Losing a life is bad, so say so
        remaining_lives = info[0]["ale.lives"]
        life_lost_flag = bool(lives - remaining_lives)
        lives = remaining_lives

        done_flags = False
        if life_lost_flag or done:
            done_flags = True

        # Store the result in memory so we can replay later
        online_dqn.remember(state, action, reward, next_state, done_flags)
        state = next_state

        if done:
            score_history.append(cumulative_reward)

            if cumulative_reward > max_score:
                max_score = cumulative_reward

            if game_num % log_interval == 0:
                os.write(score_file, str(list(score_history)) + '\n')
                print(
                    "Completed game {}/{}, global step {}, last {} games average: {:.3f}, max: {}, min: {}. Best so far {}. Epsilon: {:.3f}"
                    .format(game_num, total_games, global_step, log_interval,
                            np.average(score_history), np.max(score_history),
                            np.min(score_history), max_score,
                            online_dqn.epsilon))

            game_num += 1
            cumulative_reward = 0
            lives = 5
            state = runner.reset_all()

            # If we have an average score > 195.0 over 100 consecutive rounds, we have solved CartPole!
            if game_num > 100:
                avg_last_100 = np.average(score_history)

                if avg_last_100 > solved_thresh:
                    stop_time = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
                    print("Congratulations! {} has been solved after {} games."
                          ).format(environment, game_num)
                    online_dqn.model.save(
                        os.path.join(
                            log_dir,
                            "online_dqn_{}_solved.h5".format(environment)))
                    line = "Training start: {}\nTraining ends:  {}\n".format(
                        start_time, stop_time)
                    os.write(parameter_file, line)
                    os.write(score_file, str(list(score_history)) + '\n')
                    os.close(parameter_file)
                    os.close(score_file)
                    return 0

        # For the first burn_in number of rounds, just populate memory
        if global_step < burn_in:
            continue
        # Once we are past the burn_in exploration period, we start to train
        # This is a linear decay that goes from epsilon_max to epsion_min in epsilon_decay_steps
        online_dqn.epsilon = max(
            epsilon_max +
            ((global_step - burn_in) / float(epsilon_decay_steps)) *
            (epsilon_min - epsilon_max), epsilon_min)

        if (global_step % training_interval == 0):
            replay_from_memory(online_dqn, target_dqn, batch_size, num_epochs)

        if (global_step % target_update_interval == 0):
            target_dqn.update_target_weights(online_dqn.model)

        if global_step % save_interval == 0:
            online_dqn.model.save(os.path.join(log_dir, "online_dqn" + ".h5"))

    ##################################################################
    # If we're here, then we finished our training without solution #
    # Let's save the most recent models and make the plots anyway   #
    #################################################################
    stop_time = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
    online_dqn.model.save(
        os.path.join(log_dir, "online_dqn_" + str(global_step) + ".h5"))

    print("Done! Completed game {}/{}, global_step {}".format(
        game_num, total_games, global_step))
    line = "\n \nTraining start: {}\nTraining ends:  {}\n \n".format(
        start_time, stop_time)
    os.write(parameter_file, line)
    if game_num % log_interval != 0:
        os.write(score_file,
                 str(list(score_history)[:game_num % log_interval]) + '\n')
    os.close(parameter_file)
    os.close(score_file)
    return 0
Exemplo n.º 40
0
 def GET(self, cmd, daemonName):
     data = []
     data['cmd']=cmd
     data['daemonName']=daemonName
     dispatcher = Runner()
     return dispatcher.run(data)
Exemplo n.º 41
0
 def GET(self, cmd, packageName):
     data = []
     data["cmd"] = cmd
     data["package"] = {"name": packageName}
     dispatcher = Runner()
     return dispatcher.run(data)