Exemplo n.º 1
0
def testAStar():
    myenv = env.Env('tbl_TrueData', 5)
    first = [True for x in range(10)]
    while not myenv.end():
        for i in range(10):
            if not myenv.done[i]:
                loc = myenv.locs[i]
                if first[i] and myenv.windspeed_at(loc) >= 15.0:
                    #do not take off
                    continue
                else:
                    first[i] = False
                target = myenv.targets[i]
                x, y = (0, 0)
                if loc[0] > target[0]:
                    x = -1
                elif loc[1] > target[1]:
                    y = -1
                elif loc[0] < target[0]:
                    x = 1
                elif loc[1] < target[1]:
                    y = 1

                myenv.set_move(i, (x, y))
        myenv.tick()
    print('score %d time %d remain_task %d' %
          (myenv.score, myenv.time, myenv.remain_task))
Exemplo n.º 2
0
    def __init__(self, s_start, s_goal, step_len, goal_sample_rate,
                 waypoint_sample_rate, iter_max):
        self.s_start = Node(s_start)
        self.s_goal = Node(s_goal)
        self.step_len = step_len
        self.goal_sample_rate = goal_sample_rate
        self.waypoint_sample_rate = waypoint_sample_rate
        self.iter_max = iter_max
        self.vertex = [self.s_start]
        self.vertex_old = []
        self.vertex_new = []
        self.edges = []

        self.env = env.Env()
        self.plotting = plotting.Plotting(s_start, s_goal)
        self.utils = utils.Utils()
        self.fig, self.ax = plt.subplots()

        self.x_range = self.env.x_range
        self.y_range = self.env.y_range
        self.obs_circle = self.env.obs_circle
        self.obs_rectangle = self.env.obs_rectangle
        self.obs_boundary = self.env.obs_boundary
        self.obs_add = [0, 0, 0]  # [x,y,r]

        self.path = []
        self.waypoint = []
Exemplo n.º 3
0
 def make_env(
     rank: int,
     seed: int,
     env_id=None,
     **kwargs,
 ):
     kwargs.update(rank=rank, random_seed=seed + rank)
     return env.Env(**kwargs)
 def __init__(self, xI, xG, obs):
     """ 
     xI: start point
     xG: end point 
     """
     self.xI, self.xG = xI, xG
     self.env = env.Env(obs)
     self.obs = self.env.obs_map_mod(obs)
Exemplo n.º 5
0
def EVAL(ast, _env):
    if type(ast) is not list:
        return eval_ast(ast, _env)
    elif len(ast) == 0:  # empty list just return it
        return ast
    else:  # list, evaluate elements
        func = ast[0]

        if func == "def!":  # define a new symbol in environment
            return _env.set(ast[1], EVAL(ast[2], _env))

        elif func == "let*":
            newEnv = env.Env(_env, None, None)
            bindings, val = ast[1], ast[2]
            for i in range(0, len(bindings), 2):
                newEnv.set(
                    bindings[i],
                    EVAL(bindings[i + 1], newEnv),
                )
            retVal = EVAL(val, newEnv)
            return retVal

        elif func == "do":  # evaluate each element of ast (besides the first one)
            ast = ast[1:]
            evaluated = eval_ast(ast, _env)
            return evaluated[-1]

        elif func == "if":
            ast = ast[1:]
            _bool = EVAL(ast[0], _env)
            if (
                    _bool == None or _bool == False
            ):  # if it's false or none evaluate third element or return None if it doesn't exist
                if len(ast) < 3:  # no third parameter to evaluate
                    return None
                return EVAL(ast[2], _env)
            else:  # if it's true or anything else, evaluate second element
                return EVAL(ast[1], _env)

        elif func == "fn*":
            ast = ast[1:]

            def funcClosure(
                *args,
            ):  # return a new function closure. It creates an environment with the inputs as binds to the argument
                newEnv = env.Env(_env, ast[0], args)
                return EVAL(ast[1], newEnv)

            return funcClosure
        else:  # if it's a symbol
            ast = eval_ast(ast, _env)
            func = ast[0]
            ast = ast[1:]
            values = [value for value in ast]
            return func(
                *values
            )  # calls the function with the rest of the list as function arguments
Exemplo n.º 6
0
Arquivo: util.py Projeto: nrc/N
def fv(var, symbols):
    result = FVTable(var)
    for s in symbols:
        for c in s.cases:
            cc = idClone(c, symbols)
            cc.case = c
            cc.caseMatch = cc.isSuperType(c, symbols)
            result.addCase(s, cc, cc.fv(var, env.Env()))

    return result
Exemplo n.º 7
0
 def __init__(self):
     self.env = env.Env()
     self.actor = actor()
     self.autoencoder_critic, self.encoder, self.critic = CAE_critic()
     self.critic_t = clone_model(self.critic); self.critic_t.set_weights(self.critic.get_weights())
     self.actor_t = clone_model(self.actor); self.actor_t.set_weights(self.actor.get_weights())
     self.memory = deque(maxlen=int(1e+5))
     self.goals = np.loadtxt("goals") # list of random goal postitions
     self.D_MAX = 6
     self.map = Map()
Exemplo n.º 8
0
    def __init__(self):
        self.env = env.Env()

        self.delta = 0.00#######要改的,原来是0.5 太大了
        self.obs_circle = self.env.obs_circle
        self.obs_rectangle = self.env.obs_rectangle
        self.obs_boundary = self.env.obs_boundary
        self.obs_vertex=[]
        self.get_obs_vertex()
        self.step = 1
        self.keypoints=[]
Exemplo n.º 9
0
 def type(self, symbols):
     envt = env.Env()
     lt = self.lhs.type(envt)
     rt = self.rhs.type(envt)
     llt = symbol.List(None)
     llt.arg = lt
     lrt = symbol.List(None)
     lrt.arg = rt
     #this is a bit of a hack, what we really want to do is subst at all levels of list nesting, but we only do one depth here because only one depth is supported in constraints etc.
     return self.body.type(symbols).subst(lt, rt, envt, symbols).subst(
         llt, lrt, envt, symbols)
Exemplo n.º 10
0
def test_sameenv():
    """
    Same env
    """
    env0 = env.Env()
    gc = env.GC(env0)
    s = Scope.root_scope()
    def _fun(e, y):
        val, err, _gc = interp0(parse(y)[0], e, s, s.extend())
        gc.extend(_gc)
        return val
    unittest(lambda: env0, _fun, test_suite)
Exemplo n.º 11
0
    def __init__(self, s_start, s_goal, res=0.5):
        self.res = res
        self.s_start = self.pos2ind(s_start)
        self.s_goal = self.pos2ind(s_goal)
        self.Env = env.Env()  # class Env
        self.utils = utils.Utils()
        self.u_set = [(-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1),
                      (0, -1), (-1, -1)]

        self.OPEN = my_queue.QueuePrior()  # priority queue / OPEN set
        self.CLOSED = []  # CLOSED set / VISITED order
        self.PARENT = dict()  # recorded parent
        self.g = dict()  # cost to come
 def __init__(self):
     self.env = env.Env()
     self.actor = actor()
     self.autoencoder_critic, self.encoder, self.critic = CAE_critic_latent(
     )
     self.critic_t = clone_model(self.critic)
     self.critic_t.set_weights(self.critic.get_weights())
     self.actor_t = clone_model(self.actor)
     self.actor_t.set_weights(self.actor.get_weights())
     self.B_pixel = deque(maxlen=int(6e+4))  # pixel-space replay buffer
     self.B_latent = deque(maxlen=int(2e+5))  # latent-space replay buffer
     self.goals = np.loadtxt("goals")  # list of random goal postitions
     self.D_MAX = 6
     self.map = Map()
Exemplo n.º 13
0
def write_seq2(epochs, filename):
    f = open(filename, 'wb+')

    # origin_cards = ['3', '3', '3', '3', '4', '4', '4', '4', '5', '5', '5', '5',
    #     '6', '6', '6', '6', '7', '7', '7', '7', '8', '8', '8', '8',
    #     '9', '9', '9', '9', '10', '10', '10', '10', 'J', 'J', 'J', 'J',
    #     'Q', 'Q', 'Q', 'Q', 'K', 'K', 'K', 'K', 'A', 'A', 'A', 'A',
    #     '2', '2', '2', '2', '*', '$']
    origin_cards = [
        '3', '3', '3', '3', '4', '4', '4', '4', '5', '5', '5', '5', '6', '6',
        '6', '6', '7', '7', '7', '7', '8', '8', '8', '8', '9', '9', '9', '9',
        '10', '10', '10', '10', 'J', 'J', 'J', 'J'
    ]
    f.write(len(origin_cards).to_bytes(2, byteorder='little', signed=False))
    for i in range(epochs):
        cards = origin_cards.copy()
        enrivon = env.Env()
        random.shuffle(cards)
        for c in cards:
            if c == '10':
                c = '1'
            f.write(ord(c).to_bytes(1, byteorder='little', signed=False))
        # print(cards)
        handcards = [cards[:int(len(cards) / 2)], cards[int(len(cards) / 2):]]
        enrivon.reset()
        enrivon.prepare2_manual(Card.char2color(cards))
        end = False
        ind = 0
        while not end:
            intention, end = enrivon.step2_auto()
            put_list = Card.to_cards_from_3_17(intention)

            try:
                a = next(i for i, v in enumerate(action_space)
                         if v == put_list)
            except StopIteration as e:
                print(put_list)

            f.write(a.to_bytes(2, byteorder='little', signed=False))
            # assert(action_space[a] == put_list)
            for c in put_list:
                handcards[ind].remove(c)
            ind = 1 - ind
        # assert((not handcards[0]) or (not handcards[1]))
        if i % 1000 == 0:
            print("writing %d..." % i)
            sys.stdout.flush()
    f.close()
    print("write completed with %d epochs" % epochs)
Exemplo n.º 14
0
Arquivo: util.py Projeto: nrc/N
def subst(var, symbols):
    st = SubstTable(var)
    for s in symbols:
        for c in s.cases:
            if var in c.deepVars():
                cc = renameForFresh(c, var.variable)
                cc.case = c
                cc.caseMatch = cc.isSuperType(c, symbols)
                vs = varInBinding(var, c.bindings)
                if vs:
                    for b in vs:
                        e = env.Env()
                        cnstr = judge.InTest(None)
                        cnstr.nt = False
                        cnstr.lhs = var
                        cnstr.rhs = b[0]
                        e[var] = cnstr
                        st.addCase(s, cc,
                                   cc.subst(var.variable, var, e, symbols),
                                   "where " + str(var) + " in " + str(b[0]))
                        cnstr.nt = True
                        st.addCase(
                            s, cc, cc.subst(var.variable, var, e, symbols),
                            "where " + str(var) + " not in " + str(b[0]))
                else:
                    st.addCase(s, cc,
                               cc.subst(var.variable, var, env.Env(), symbols))
            if var == c:
                fast = ast.Ast(ast.ID, None)
                fast.val = var.name
                fast.mod = "'"
                fresh = symbol.Id(fast, c.parent)
                fresh.repr = None
                st.addCase(s, fresh, fresh)

    return st
Exemplo n.º 15
0
def write_seq3(epochs, filename):
    f = open(filename, 'wb+')

    origin_cards = [
        '3', '3', '3', '3', '4', '4', '4', '4', '5', '5', '5', '5', '6', '6',
        '6', '6', '7', '7', '7', '7', '8', '8', '8', '8', '9', '9', '9', '9',
        '10', '10', '10', '10', 'J', 'J', 'J', 'J', 'Q', 'Q', 'Q', 'Q', 'K',
        'K', 'K', 'K', 'A', 'A', 'A', 'A', '2', '2', '2', '2', '*', '$'
    ]
    for i in range(epochs):
        cards = origin_cards.copy()
        enrivon = env.Env()
        lord_id = -1
        while lord_id == -1:
            random.shuffle(cards)
            enrivon.reset()
            lord_id = enrivon.prepare_manual(Card.char2color(cards))
        for c in cards:
            if c == '10':
                c = '1'
            f.write(ord(c).to_bytes(1, byteorder='little', signed=False))
        f.write(lord_id.to_bytes(2, byteorder='little', signed=False))
        handcards = [cards[:17], cards[17:34], cards[34:51]]
        extra_cards = cards[51:]
        handcards[lord_id] += extra_cards
        r = 0
        ind = lord_id
        while r == 0:
            intention, r = enrivon.step_auto()
            put_list = Card.to_cards_from_3_17(intention)
            # print(put_list)

            try:
                a = next(i for i, v in enumerate(action_space)
                         if v == put_list)
            except StopIteration as e:
                print(put_list)
                # raise Exception('cards error')

            f.write(a.to_bytes(2, byteorder='little', signed=False))

            for c in put_list:
                handcards[ind].remove(c)
            ind = int(ind + 1) % 3
        f.write(r.to_bytes(2, byteorder='little', signed=True))

    f.close()
    print("write completed with %d epochs" % epochs)
Exemplo n.º 16
0
    def __init__(self, s_start, s_goal, step_len, goal_sample_rate, iter_max):
        self.s_start = Node(s_start)
        self.s_goal = Node(s_goal)
        self.step_len = step_len
        self.goal_sample_rate = goal_sample_rate
        self.iter_max = iter_max
        self.vertex = [self.s_start]

        self.env = env.Env()
        self.plotting = plotting.Plotting(s_start, s_goal)
        self.utils = utils.Utils()

        self.x_range = self.env.x_range
        self.y_range = self.env.y_range
        self.obs_circle = self.env.obs_circle
        self.obs_rectangle = self.env.obs_rectangle
        self.obs_boundary = self.env.obs_boundary
Exemplo n.º 17
0
    def __init__(self, s_start, s_goal, e, heuristic_type):
        self.s_start, self.s_goal = s_start, s_goal
        self.heuristic_type = heuristic_type

        self.Env = env.Env()  # class Env

        self.u_set = self.Env.motions  # feasible input set
        self.obs = self.Env.obs  # position of obstacles
        self.e = e  # weight

        self.g = dict()  # Cost to come
        self.OPEN = dict()  # priority queue / OPEN set
        self.CLOSED = set()  # CLOSED set
        self.INCONS = {}  # INCONSISTENT set
        self.PARENT = dict()  # relations
        self.path = []  # planning path
        self.visited = []  # order of visited nodes
Exemplo n.º 18
0
            def fn(*args, params=params, ast=ast):
                try:
                    args = mal_types.List(args)
                    vargs_index = params.index('&')
                    params = mal_types.List(params[:vargs_index] + params[vargs_index + 1:])
                    rest = args[vargs_index:]
                    args = mal_types.List(args[:vargs_index])
                    args.append(mal_types.List(rest))
                except ValueError:
                    pass

                if_env = env_module.Env(
                    outer=env,
                    binds=params,
                    exprs=args,
                )
                return EVAL(ast, if_env)
Exemplo n.º 19
0
    def test_get_attribute(self):
        visualiser.VisualiserSwitcher.choose_visualiser("pygame")
        args = generate_args(
            planner_id="rrt",
            map_fname="maps/test.png",
            start_pt=np.array([25, 123]),
            goal_pt=np.array([225, 42]),
        )
        args.no_display = True

        e = env.Env(args, fixed_seed=0)

        # test get planner
        assert isinstance(e.planner, Planner)

        # test get sampler
        assert isinstance(e.sampler, Sampler)
Exemplo n.º 20
0
    def __init__(self, s_start, s_goal, heuristic_type, eps, res=0.5):
        self.res = res
        self.s_start = self.pos2ind(s_start)
        self.s_goal = self.pos2ind(s_goal)
        self.heuristic_type = heuristic_type
        self.eps = eps

        self.Env = env.Env()  # class Env
        self.utils = utils.Utils()
        self.u_set = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1),
                      (1, 0), (1, 1)]

        self.OPEN = my_queue.HeapDict()  # priority queue / OPEN set
        self.CLOSED = []  # CLOSED set / VISITED order
        self.PARENT = {}  # recorded parent
        self.INCONS = []  # overconsistent node
        self.g = {}  # cost to come
Exemplo n.º 21
0
def main():
    x_start = (2, 2)  # Starting node
    x_goal = (49, 24)  # Goal node
    environment = env.Env()

    y1_plot = []
    y2_plot = []

    mean_iter = []
    mean_length = []
    step_len_list = [1, 2, 4, 8]
    for step_len in step_len_list:
        for i in range(100):
            rrt = Rrt(environment, x_start, x_goal, step_len, 0.10, 10000)
            path, nb_iter = rrt.planning()

            mean_iter.append(nb_iter)
            mean_length.append(get_path_length(path))
            if path:
                # print('Found path in ' + str(nb_iter) + ' iterations, length : ' + str(get_path_length(path)))
                if showAnimation:
                    rrt.plotting.animation(rrt.vertex, path, "RRT", True)
                    plotting.plt.show()
            else:
                print("No Path Found in " + str(nb_iter) + " iterations!")
                if showAnimation:
                    rrt.plotting.animation(rrt.vertex, [], "RRT", True)
                    plotting.plt.show()
        y1_plot.append(sum(mean_iter) / len(mean_iter))
        y2_plot.append(sum(mean_length) / len(mean_length))
        print("step_len={},mean iter={},mean length={}".format(
            step_len,
            sum(mean_iter) / len(mean_iter),
            sum(mean_length) / len(mean_length)))

    plt.subplot(121)
    plt.plot(step_len_list, y1_plot, "r-o", label="mean_iter")

    plt.xlabel("step len")
    plt.ylabel("iter number")
    plt.subplot(122)
    plt.plot(step_len_list, y2_plot, "b-o", label="mean_length")
    plt.xlabel("step len")
    plt.ylabel("path length")
    plt.show()
Exemplo n.º 22
0
    def main(self):
        awrReportFile = str(
            input("Enter the AWR report file name to be analyzed: "))

        try:
            fName = open(awrReportFile)
        except IOError:
            print(
                "Entered file is not accecible. Please entry correct AWR report."
            )
            exit()
        finally:
            fName.close()

        htmlFile = open(awrReportFile, 'r', encoding='utf-8')
        sourceCode = htmlFile.read()
        soup = BeautifulSoup(sourceCode, "lxml")

        "Call DB, host and snap detail fetch class by defininig object"
        envDetail = env.Env()
        envDetail.extractenvinfo(soup)
        "Call ADDM and Foreground detail fetch class by defininig object"
        addmEventDetails = addmevent.AddmEvent()
        addmEventDetails.Addm(soup)
        "Call IO profile detail fetch class by defininig object"
        ioProfileDetails = ioprofile.IOProfile()
        ioProfileDetails.ioprofile(soup)
        "Call SQL Order by CPU detail fetch class by defininig object"
        cpuOrderByDetails = sqlstats.SqlStats()
        cpuOrderByDetails.sqlstatistics(soup)
        "Call SQL Order by Elapsed Time detail fetch class by defininig object"
        elapsedTimeOrderByDetails = sqlstats.SqlStats()
        elapsedTimeOrderByDetails.elapsedsqlstatistics(soup)
        "Call SQL Order by User IO detail fetch class by defininig object"
        userIOOrderByDetails = sqlstats.SqlStats()
        userIOOrderByDetails.iowaitsqlstatistics(soup)
        "Call SQL Order by Gets detail fetch class by defininig object"
        getsOrderByDetails = sqlstats.SqlStats()
        getsOrderByDetails.getssqlstatistics(soup)
        "Call SQL Order by Physical Reads detail fetch class by defininig object"
        pReadsOrderByDetails = sqlstats.SqlStats()
        pReadsOrderByDetails.preadssqlstatistics(soup)
        "Call SGA Target Advisory method to conclude"
        sgaTargetAdvise = sgatarget.SgaAdvisryTarget()
        sgaTargetAdvise.sgatargetadvisory(soup)
Exemplo n.º 23
0
 def __init__(self):
     self.env = env.Env()
     self.actor = actor()
     self.autoencoder_critic, self.encoder, self.critic = CAE_critic()
     self.critic_t = clone_model(self.critic); self.critic_t.set_weights(self.critic.get_weights())
     self.actor_t = clone_model(self.actor); self.actor_t.set_weights(self.actor.get_weights())
     self.memory = deque(maxlen=int(1e+5))
     self.goals = np.loadtxt("goals") # list of random goal postitions
     self.D_MAX = 6
     self.map = Map()
     
     # gradient of the policy
     val = self.critic.output
     act = self.critic.input[1]
     grad_val2act = tf.gradients(val, act)        
     outputTensor_a = self.actor.output
     grad_val2param = tf.gradients(outputTensor_a[0], self.actor.trainable_weights, grad_val2act[0][0])               
     self.grad_func = K.function([self.critic.input[0], self.critic.input[1], self.actor.input, K.learning_phase()], grad_val2param)
Exemplo n.º 24
0
def test_do_env():
    """
    Quote by (do )
    """
    env0 = env.Env()
    gc = env.GC(env0)
    s = Scope.root_scope()
    def _fun(e, y):
        val, err, _gc = interp0(parse(f"(do {y})")[0], e, s, s.extend())
        gc.extend(_gc)
        return val
    unittest(lambda: env0, _fun, test_suite + \
    ["(do (def i 100)\
          (while (> i 0)\
          (do (set i (- i 1))\
              (((do ((do (fn (x) (fn (y) (fn (z) (+ x y z))))) 1)) 2) 3)))\
          (. (env) __len__))", len(env.Env.buintin_func)])
    print(env0.counter)
Exemplo n.º 25
0
    def __init__(self, step_len, goal_sample_rate, iter_max):

        self.step_len = step_len
        self.goal_sample_rate = goal_sample_rate
        self.iter_max = iter_max

        self.env = env.Env()
        self.plotting = plotting.Plotting("RRT_CONNECT")
        self.utils = utils.Utils()

        self.x_range = self.env.x_range
        self.y_range = self.env.y_range
        self.obs_circle = self.env.obs_circle
        self.obs_rectangle = self.env.obs_rectangle
        self.obs_boundary = self.env.obs_boundary
        self.s_start = Node((0, 0))
        self.s_goal = Node((0, 0))
        self.path = []
        self.key_points = []
def main():
    #Config params
    world = 1  # two worlds available: 1,2

    if world == 1:
        s_start = (10, 10)
        s_goal = (70, 50)
    elif world == 2:
        s_start = (5, 5)
        s_goal = (35, 25)
    label = "2D Dijkstra"

    environment = env.Env(world_id=1)

    dijkstra = Dijkstra(environment, "euclidean")
    plot = Plotter(environment, s_start, s_goal)

    path, visited_nodes = dijkstra.get_path(s_start, s_goal)
    plot.animate_path_and_visited(label, path, visited_nodes)
Exemplo n.º 27
0
    def __init__(self, x_start, x_goal):
        self.xI, self.xG = x_start, x_goal
        self.e = 0.001  # threshold for convergence
        self.gamma = 0.9  # discount factor

        self.env = env.Env(self.xI, self.xG)
        self.motion = motion_model.Motion_model(self.xI, self.xG)
        self.plotting = plotting.Plotting(self.xI, self.xG)

        self.u_set = self.env.motions  # feasible input set
        self.stateSpace = self.env.stateSpace  # state space
        self.obs = self.env.obs_map()  # position of obstacles
        self.lose = self.env.lose_map()  # position of lose states

        self.name1 = "policy_iteration, gamma=" + str(self.gamma)

        [self.value, self.policy] = self.iteration()
        self.path = self.extract_path(self.xI, self.xG, self.policy)
        self.plotting.animation(self.path, self.name1)
Exemplo n.º 28
0
    def __init__(self,
                 s_start,
                 s_goal,
                 obs,
                 bot_size=[0, 0, 0, 0],
                 ratio=1,
                 heuristic_type='manhattan'):
        self.s_start = s_start
        self.s_goal = s_goal
        self.heuristic_type = heuristic_type
        self.Env = env.Env(obs, bot_size, ratio)  # class env
        self.u_set = self.Env.motions
        self.obs = self.Env.obs

        self.Dstop = 30  # the terminal distance

        self.OPEN = []  # priority queque / OPENset
        self.ClOSED = []  # visited points
        self.PARENT = dict()  # the recorded parent
        self.g = dict()  # cost to come
Exemplo n.º 29
0
    def __init__(self, x_start, x_goal):
        self.xI, self.xG = x_start, x_goal
        self.M = 500  # iteration numbers
        self.gamma = 0.9  # discount factor
        self.alpha = 0.5
        self.epsilon = 0.1

        self.env = env.Env(self.xI, self.xG)
        self.motion = motion_model.Motion_model(self.xI, self.xG)
        self.plotting = plotting.Plotting(self.xI, self.xG)

        self.u_set = self.env.motions  # feasible input set
        self.stateSpace = self.env.stateSpace  # state space
        self.obs = self.env.obs_map()  # position of obstacles
        self.lose = self.env.lose_map()  # position of lose states

        self.name1 = "SARSA, M=" + str(self.M)

        [self.value, self.policy] = self.Monte_Carlo(self.xI, self.xG)
        self.path = self.extract_path(self.xI, self.xG, self.policy)
        self.plotting.animation(self.path, self.name1)
Exemplo n.º 30
0
def run():
    pygame.init()
    pygame.display.gl_set_attribute(pygame.GL_CONTEXT_MAJOR_VERSION, 4)
    pygame.display.gl_set_attribute(pygame.GL_CONTEXT_MINOR_VERSION, 1)
    pygame.display.gl_set_attribute(pygame.GL_CONTEXT_PROFILE_MASK,
                                    pygame.GL_CONTEXT_PROFILE_CORE)

    pygame.display.set_mode((1280, 720), pygame.DOUBLEBUF | pygame.OPENGL)

    program = get_program()

    groups = obj_loader.load('shangwu', 'part1.obj')

    env_obj = env.Env()
    arrow_obj = arrow.Arrow()
    coord_obj = coord.Coord()

    uniform.get_locs(program)

    camera_obj = camera.Camera()

    running = True

    clock = pygame.time.Clock()

    while running:
        clock.tick(50)

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                running = False
            camera_obj.process_event(event)

        uniform.set_view(camera_obj.view)

        draw(program, groups)
        env_obj.draw()
        coord_obj.draw()

        pygame.display.flip()