Exemple #1
0
def precompute_and_save_intersections(t, enf_locs, trace):
		print "Precomputing intersections..."
		intersection_cache = []
		for i in xrange(t+1):
			next_enf_loc = scale_up(enf_locs[i])
			cur_enf_loc = scale_up(enf_locs[i-1])
			fv = direction(next_enf_loc, cur_enf_loc)
			intersections = trace.model.isovist.GetIsovistIntersections(cur_enf_loc, fv)
			intersection_cache.append(intersections)

		cPickle.dump( intersection_cache, open("./enf_int_cache.cp","w") )
		print "done!, Interction Count:", len(intersection_cache)
		return intersection_cache
Exemple #2
0
	def run_smart(self, Q):
		t = Q.choice( p=1.0/40*np.ones((1,40)), name="t" )

		# current location of chaser (at time 't')
		curr_loc = [Q.get_obs("enf_x_"+str(t)), Q.get_obs("enf_y_"+str(t))]

		# randomly choose a start and goal for runner
		cnt = len(self.locs)
		int_start_i = Q.choice( p=1.0/cnt*np.ones((1,cnt)), name="int_start" )
		int_goal_i = Q.choice( p=1.0/cnt*np.ones((1,cnt)), name="int_goal" )

		# create empty trace using model
		q = p.ProgramTrace(self.runner_model)
		# condition q on Q 
		q = self.condition_q(Q, q)

		# run inference to get intruder's expected next step
		post_sample_traces = run_inference(q, post_samples=6, samples=5) # 10, 5
		runner_exp_next_step, runner_exp_path = rand_expected_future_step(post_sample_traces, "int_plan")

		#if point_in_obstacle(runner_exp_next_step, self.epolys):
		#	runner_exp_next_step = get_clear_goal(curr_loc, runner_exp_next_step, self.polys)

		Q.keep("q", q.trace)
		Q.keep("runner_post_sample_traces", post_sample_traces)
		Q.keep("runner_exp_next_step", runner_exp_next_step)

		# plan path toward anticipated location (t+1) for runner
		#enf_plan = self.plan_path(np.atleast_2d( curr_loc), np.atleast_2d( runner_exp_next_step))
		rx1,ry1,rx2,ry2 = self.seg_map
		enf_plan = planner.run_rrt_opt( np.atleast_2d( curr_loc), np.atleast_2d( runner_exp_next_step), rx1,ry1,rx2,ry2)
		Q.keep("enf_plan", enf_plan)

		# set up the enforcer view (forward vector, fv) for the next step
		cur_enf_loc = scale_up(curr_loc)
		next_enf_loc = scale_up(enf_plan[1])
		next_int_loc = scale_up(runner_exp_path[t+1])

		fv = direction(next_int_loc, cur_enf_loc)

		intersections = self.isovist.GetIsovistIntersections(next_enf_loc, fv)

		# does the enforcer see me at time 't'
		#runner_next_loc = scale_up(runner_exp_next_step)
		will_runner_be_seen = self.isovist.FindIntruderAtPoint( next_int_loc, intersections )
		detected_prob = 0.999*will_runner_be_seen + 0.001*(1-will_runner_be_seen) # ~ flip(seen*.999 + (1-seen*.001)
		
		# XXX should consider seeing if the enforcer will see the intruder 
		# before reaching its simulated goal location, not just in the next step
		# down side: it would need more isovist calculations at each step
		runner_detected = Q.flip( p=detected_prob, name="int_detected" )
Exemple #3
0
	def run_naive(self, Q):
		t = Q.choice( p=1.0/40*np.ones((1,40)), name="t" )

		# current location of chaser (at time 't')
		curr_loc = [Q.get_obs("enf_x_"+str(t)), Q.get_obs("enf_y_"+str(t))]

		# randomly choose a start and goal for runner
		cnt = len(self.locs)
		int_start_i = Q.choice( p=1.0/cnt*np.ones((1,cnt)), name="int_start" )
		int_goal_i = Q.choice( p=1.0/cnt*np.ones((1,cnt)), name="int_goal" )

		rx1,ry1,rx2,ry2 = self.seg_map
		int_plan = planner.run_rrt_opt( np.atleast_2d(self.locs[int_start_i]), 
			np.atleast_2d( self.locs[int_goal_i]), rx1,ry1,rx2,ry2, slow=True)
		
		# plan path toward anticipated location (t+1) for runner
		#enf_plan = self.plan_path(np.atleast_2d( curr_loc), np.atleast_2d( runner_exp_next_step))
		t_ = min(t+12, len(int_plan)-1)
		enf_plan = planner.run_rrt_opt( np.atleast_2d( curr_loc), np.atleast_2d( int_plan[t_]), rx1,ry1,rx2,ry2)
		Q.keep("enf_plan", enf_plan)
		Q.keep("int_plan", int_plan)

		# set up the enforcer view (forward vector, fv) for the next step
		cur_enf_loc = scale_up(curr_loc)
		next_enf_loc = scale_up(enf_plan[1])
		fv = direction(next_enf_loc, cur_enf_loc)

		intersections = self.isovist.GetIsovistIntersections(next_enf_loc, fv)

		# does the enforcer see me at time 't'
		runner_next_loc = scale_up(int_plan[t+1])
		will_runner_be_seen = self.isovist.FindIntruderAtPoint( int_plan[t+1], intersections )
		detected_prob = 0.999*will_runner_be_seen + 0.001*(1-will_runner_be_seen) # ~ flip(seen*.999 + (1-seen*.001)
		
		# XXX should consider seeing if the enforcer will see the intruder 
		# before reaching its simulated goal location, not just in the next step
		# down side: it would need more isovist calculations at each step
		runner_detected = Q.flip( p=detected_prob, name="int_detected" )
Exemple #4
0
    def run(self, Q):
        #----------------------------------------------------------
        #				simplified enforcer model
        #----------------------------------------------------------

        t = Q.choice(p=1.0 / 40 * np.ones((1, 40)), name="t")

        cnt = len(self.locs)
        enf_start_i = Q.choice(p=1.0 / cnt * np.ones((1, cnt)),
                               name="enf_start")
        enf_goal_i = Q.choice(p=1.0 / cnt * np.ones((1, cnt)), name="enf_goal")

        enf_start = np.atleast_2d(self.locs[enf_start_i])
        enf_goal = np.atleast_2d(self.locs[enf_goal_i])
        path_noise = .003
        #enf_plan = self.plan_path(enf_start, enf_goal)
        rx1, ry1, rx2, ry2 = self.seg_map
        enf_plan = planner.run_rrt_opt(np.atleast_2d(enf_start),
                                       np.atleast_2d(enf_goal), rx1, ry1, rx2,
                                       ry2)

        enf_noisy_plan = [enf_plan[0]]
        for i in xrange(
                1,
                len(enf_plan) - 1
        ):  #loc_t = np.random.multivariate_normal(enf_plan[i], [[path_noise, 0], [0, path_noise]]) # name 't_i' i.e. t_1, t_2,...t_n
            loc_x = Q.randn(mu=enf_plan[i][0],
                            sigma=path_noise,
                            name="enf_x_" + str(i))
            loc_y = Q.randn(mu=enf_plan[i][1],
                            sigma=path_noise,
                            name="enf_y_" + str(i))
            loc_t = [loc_x, loc_y]
            enf_noisy_plan.append(loc_t)

        enf_noisy_plan.append(enf_plan[-1])
        enf_loc = np.atleast_2d(enf_noisy_plan[t])

        #----------------- end of enforcer model ------------------

        # XXX the runner wants to do goal inference to figure out the next step of the enf
        # TWO desires:
        # 1) high likelihood for a enf path conditioned on past locations
        # 2) high likelihood for his own path conditioned of not being detected for the next step and conditioned
        # 	 on past enf locations !

        #----------------------------------------------------------
        #				runner (intruder) model
        #----------------------------------------------------------

        start_i = Q.choice(p=1.0 / cnt * np.ones((1, cnt)), name="int_start")
        goal_i = Q.choice(p=1.0 / cnt * np.ones((1, cnt)), name="int_goal")

        start = np.atleast_2d(self.locs[start_i])
        goal = np.atleast_2d(self.locs[goal_i])

        #my_plan = self.plan_path(start, goal)
        my_plan = planner.run_rrt_opt(np.atleast_2d(start),
                                      np.atleast_2d(goal), rx1, ry1, rx2, ry2)
        my_loc = np.atleast_2d(my_plan[t])
        my_noisy_plan = [my_plan[0]]
        for i in xrange(
                1,
                len(my_plan) - 1
        ):  #loc_t = np.random.multivariate_normal(my_plan[i], [[path_noise, 0], [0, path_noise]]) # name 't_i' i.e. t_1, t_2,...t_n
            loc_x = Q.randn(mu=my_plan[i][0],
                            sigma=path_noise,
                            name="int_x_" + str(i))
            loc_y = Q.randn(mu=my_plan[i][1],
                            sigma=path_noise,
                            name="int_y_" + str(i))
            loc_t = [loc_x, loc_y]
            my_noisy_plan.append(loc_t)
        my_noisy_plan.append(my_plan[-1])

        # XXX Need to make sure the runner wasn't seen in any of the previous time steps
        i_already_seen = 0
        for i in xrange(t):
            intersections = Q.cache["enf_intersections_t_" +
                                    str(i)]  # get enforcer's fv for time t
            i_already_seen = self.isovist.FindIntruderAtPoint(
                my_noisy_plan[i], intersections)
            if (i_already_seen):
                detected_prob = 0.999 * i_already_seen + 0.001 * (
                    1 - i_already_seen)

        if not i_already_seen:
            # set up the enforcer view (forward vector, fv) for the next step
            cur_enf_loc = scale_up(enf_noisy_plan[t])
            next_enf_loc = scale_up(enf_noisy_plan[t + 1])
            fv = direction(next_enf_loc, cur_enf_loc)
            intersections = self.isovist.GetIsovistIntersections(
                next_enf_loc, fv)

            # does the enforcer see me at time 't'
            my_next_loc = scale_up(my_noisy_plan[t + 1])
            will_i_be_seen = self.isovist.FindIntruderAtPoint(
                my_next_loc, intersections)
            detected_prob = 0.999 * will_i_be_seen + 0.001 * (
                1 - will_i_be_seen)  # ~ flip(seen*.999 + (1-seen*.001)

        future_detection = Q.flip(p=detected_prob, name="int_detected")

        # for rendering purposes
        Q.keep("int_plan", my_noisy_plan)
        Q.keep("enf_plan", enf_noisy_plan)
Exemple #5
0
    def run_basic_partial(self, Q, path_noise=0.003):
        rx1, ry1, rx2, ry2 = self.seg_map
        t = Q.choice(p=1.0 / 40 * np.ones((1, 40)), name="t")

        # #------------- model other agent movements (past and future) -------------- XXX need to change RV names
        o_start_i = Q.choice(p=1.0 / self.cnt * np.ones((1, self.cnt)),
                             name="other_run_start")
        o_goal_i = Q.choice(p=1.0 / self.cnt * np.ones((1, self.cnt)),
                            name="other_run_goal")
        o_start = np.atleast_2d(self.locs[o_start_i])
        o_goal = np.atleast_2d(self.locs[o_goal_i])

        # plan using the latent variables of start and goal
        other_plan = planner.run_rrt_opt(np.atleast_2d(o_start),
                                         np.atleast_2d(o_goal), rx1, ry1, rx2,
                                         ry2)

        # add noise to the plan
        other_noisy_plan = [other_plan[0]]
        for i in xrange(
                1,
                len(other_plan) - 1
        ):  #loc_t = np.random.multivariate_normal(my_plan[i], [[path_noise, 0], [0, path_noise]]) # name 't_i' i.e. t_1, t_2,...t_n
            loc_x = Q.randn(mu=other_plan[i][0],
                            sigma=path_noise,
                            name="other_run_x_" + str(i))
            loc_y = Q.randn(mu=other_plan[i][1],
                            sigma=path_noise,
                            name="other_run_y_" + str(i))
            loc_t = [loc_x, loc_y]
            other_noisy_plan.append(loc_t)
        other_noisy_plan.append(other_plan[-1])

        #------------- model agent's own movements (past and future) --------------
        start_i = Q.choice(p=1.0 / self.cnt * np.ones((1, self.cnt)),
                           name="run_start")
        goal_i = Q.choice(p=1.0 / self.cnt * np.ones((1, self.cnt)),
                          name="run_goal")
        start = np.atleast_2d(self.locs[start_i])
        goal = np.atleast_2d(self.locs[goal_i])

        # plan using the latent variables of start and goal
        my_plan = planner.run_rrt_opt(np.atleast_2d(start),
                                      np.atleast_2d(goal), rx1, ry1, rx2, ry2)

        # add noise to the plan
        my_noisy_plan = [my_plan[0]]
        for i in xrange(
                1,
                len(my_plan) - 1
        ):  #loc_t = np.random.multivariate_normal(my_plan[i], [[path_noise, 0], [0, path_noise]]) # name 't_i' i.e. t_1, t_2,...t_n
            loc_x = Q.randn(mu=my_plan[i][0],
                            sigma=path_noise,
                            name="run_x_" + str(i))
            loc_y = Q.randn(mu=my_plan[i][1],
                            sigma=path_noise,
                            name="run_y_" + str(i))
            loc_t = [loc_x, loc_y]
            my_noisy_plan.append(loc_t)
        my_noisy_plan.append(my_plan[-1])

        my_loc = my_noisy_plan[t]

        #---------------- need to add RV of detection for each time step ----------
        t_detected = []
        PATH_LIMIT = 40
        for i in xrange(0, PATH_LIMIT):
            cur_loc = scale_up(my_noisy_plan[i])
            intersections = None
            detection_prob = 0.001
            # face the runner if within certain radius
            if dist(my_noisy_plan[i], other_noisy_plan[i]) <= .4:  #.35:
                fv = direction(scale_up(other_noisy_plan[i]), cur_loc)
                intersections = self.isovist.GetIsovistIntersections(
                    cur_loc, fv)

                # does the agent see other at time 't'
                other_loc = scale_up(other_noisy_plan[i])
                will_other_be_seen = self.isovist.FindIntruderAtPoint(
                    other_loc, intersections)
                if will_other_be_seen:
                    detection_prob = 0.999
                    t_detected.append(i)

            future_detection = Q.flip(p=detection_prob,
                                      name="detected_t_" + str(i))

            Q.keep("intersections-t-" + str(i), intersections)

        same_goal = 0
        if goal_i == o_goal_i:
            same_goal = 1
        same_goal_prob = 0.999 * same_goal + 0.001 * (1 - same_goal)

        runners_same_goal = Q.flip(p=same_goal_prob, name="same_goal")

        Q.keep("t_detected", t_detected)
        Q.keep("my_plan", my_noisy_plan)
        Q.keep("other_plan", other_noisy_plan)
Exemple #6
0
def run_simulation(sim_id, locs, seg_map, isovist, polys, epolys):
    # unpack
    rx1, ry1, rx2, ry2 = seg_map

    intersection_cache = []
    # create model of chaser
    chaser_model = create_chaser_model(seg_map=seg_map,
                                       locs=locs,
                                       isovist=isovist)
    # get evenly spead out starts and goal

    int_start = 0
    int_goal = 8

    #chas_start = 1

    # int_start = randint(0, len(locs)-1)
    # int_goal = randint(0, len(locs)-1)
    # while(dist(locs[int_start], locs[int_goal]) <= 0.5):
    # 	int_goal = randint(0, len(locs)-1)
    chas_start = randint(0, len(locs) - 1)
    while (dist(locs[int_start], locs[chas_start]) <= 0.5):
        chas_start = randint(0, len(locs) - 1)

    runner_start_i = int_start
    runner_start = locs[runner_start_i]
    # get runner's random goal location (far enough from its start)
    runner_goal_i = int_goal
    runner_goal = locs[runner_goal_i]
    # store history of runner's locations
    runner_true_locs = [runner_start]
    # get random start location
    start_index = chas_start
    chaser_start = locs[start_index]
    # begin tracking history of movements
    chaser_locs = [chaser_start]

    #TEMP FOR TESTING (NAIVE RUNNER)
    runner_path = planner.run_rrt_opt(np.atleast_2d(runner_start),
                                      np.atleast_2d(runner_goal),
                                      rx1,
                                      ry1,
                                      rx2,
                                      ry2,
                                      slow=True)

    if not naive_runner:
        # XXX smart runner
        runner_path = [[0.10000000000000001, 0.099999999999999978],
                       [0.14370786380607686, 0.12428214655893158],
                       [0.18741572761215378, 0.14856429311786318],
                       [0.23112359141823066, 0.17284643967679481],
                       [0.27483145522430752, 0.19712858623572641],
                       [0.25854686086588807, 0.23830235506087813],
                       [0.25097278902994408, 0.28619750551848688],
                       [0.24540487293466112, 0.31648310191196649],
                       [0.19679616003865907, 0.30477015904546001],
                       [0.16866150321818352, 0.32844002322014876],
                       [0.15786536899041551, 0.37717509770481417],
                       [0.14948852463283413, 0.44046838810633692],
                       [0.14111168027525278, 0.47576167850785966],
                       [0.13273483591767141, 0.52505496890938252],
                       [0.12435799156009002, 0.56434825931090527],
                       [0.11598114720250866, 0.62364154971242802],
                       [0.13916939803284978, 0.66269402652378151],
                       [0.15039748869499225, 0.70969377654572163],
                       [0.15619668605039488, 0.75935633095075694],
                       [0.16199588340579754, 0.80901888535579236],
                       [0.16779508076120014, 0.85868143976082756],
                       [0.20837094819244148, 0.87335725908849149],
                       [0.25828353694770523, 0.87588922948695491],
                       [0.30825277792753181, 0.87764278470422896],
                       [0.35822201890735811, 0.87939633992150279],
                       [0.40819125988718469, 0.88114989513877684],
                       [0.45816050086701121, 0.88290345035605078],
                       [0.48130593126207499, 0.91889699806900549],
                       [0.53128113014992562, 0.92047163965355017],
                       [0.58125632903777602, 0.92204628123809473]]
        for i in xrange(10):
            runner_path.append(runner_path[len(runner_path) - 1])
        runner_goal = runner_path[-1]
        #--end of smart runner

    assert len(runner_path) == 40
    runner_locs = runner_path

    Q_history = []
    Q_history_ts = []

    # begin timer
    TIME_LIMIT = 35
    for t in xrange(TIME_LIMIT):

        # check if the runner has reached its goal
        try:
            if runner_locs[t + 1] == runner_goal:
                print "Failed: Runner Reached Goal"
                return False, Q_history_ts
                break
        except:
            print "FAILED"
            return False, Q_history_ts

        # get true runner's location at time t + 1
        runner_true_loc = runner_locs[t + 1]
        runner_true_locs.append(runner_true_loc)

        #---------------------------------

        # create empty trace
        Q = p.ProgramTrace(chaser_model)
        # condition the trace
        Q.condition("t", t)
        Q.set_obs("enf_start", chas_start)
        Q.condition("int_detected", True)
        Q.condition("int_start", runner_start_i)

        for pre_t in xrange(t + 1):
            Q.set_obs("enf_x_" + str(pre_t), chaser_locs[pre_t][0])
            Q.set_obs("enf_y_" + str(pre_t), chaser_locs[pre_t][1])
            if pre_t < t:
                Q.cache["enf_intersections_t_" +
                        str(pre_t)] = intersection_cache[pre_t]

        # run inference
        if naive_chaser:
            post_sample_traces = run_inference(Q, post_samples=64, samples=32)
        else:
            post_sample_traces = run_inference(Q, post_samples=64,
                                               samples=32)  # nested inference

        exp_next_step, exp_enf_path = rand_expected_future_step(
            post_sample_traces, "enf_plan")

        #XXX not working
        #if point_in_obstacle(chaser_locs[t], epolys):
        #	print "chaser went inside building"
        #return False

        Q.keep("post_sample_traces", post_sample_traces)
        # replan to its expected next step
        #t_ = min(t, len())
        # enf_plan = planner.run_rrt_opt( np.atleast_2d(chaser_locs[t]),
        # 	np.atleast_2d(exp_next_step), rx1,ry1,rx2,ry2)

        enf_plan = exp_enf_path

        # ------------------------------------

        if enf_plan is None:
            print "trying again"
            t -= 1
            continue

        enf_next_step = enf_plan[1]
        # store step made
        chaser_locs.append(enf_next_step)

        inters = None
        # check to see if the runner was detected
        runner_detected = False
        # only if he is close
        if dist(enf_next_step, runner_true_loc) <= .4:
            # add a bit of hearing by facing the chaser towards the runner
            fv = direction(scale_up(runner_true_loc), scale_up(enf_next_step))
            # check if runner can be detected
            runner_true_next_loc = scale_up(runner_true_loc)
            intersections = isovist.GetIsovistIntersections(
                scale_up(enf_next_step), fv)
            runner_detected = isovist.FindIntruderAtPoint(
                scale_up(runner_true_loc), intersections)
            inters = np.asarray(intersections)

        # store intersections at time 't' facing in the direction it stepped
        fv = direction(scale_up(chaser_locs[t + 1]), scale_up(chaser_locs[t]))
        intersections = isovist.GetIsovistIntersections(
            scale_up(chaser_locs[t + 1]), fv)
        if not runner_detected:
            runner_detected = isovist.FindIntruderAtPoint(
                scale_up(runner_true_loc), intersections)
        intersection_cache.append(intersections)
        if inters is None:
            inters = np.asarray(intersections)
        inters /= 500.0

        # plot behavior
        plot_name = None
        if runner_detected:
            print "Success: Runner Detected Before Goal"
            plot_name = str(sim_id) + "_t-" + str(t) + "_s-" + "T" + ".eps"
            if naive_chaser:
                save_chaser_post_traces_naive(
                    post_sample_traces,
                    plot_name=plot_name,
                    runner_true_locs=runner_true_locs,
                    chaser_true_locs=chaser_locs,
                    intersections=inters,
                    exp_enf_path=exp_enf_path)
            else:
                save_chaser_post_traces_nested(
                    post_sample_traces,
                    plot_name=plot_name,
                    runner_true_locs=runner_true_locs,
                    chaser_true_locs=chaser_locs,
                    intersections=inters,
                    exp_enf_path=exp_enf_path)
            Q.keep("real_world_detection", True)
            Q_history.append(Q)
            Q_history_ts.append(Q.trace)
            return True, Q_history_ts
        else:
            plot_name = str(sim_id) + "_t-" + str(t) + "_s-" + "F" + ".eps"
            Q.keep("real_world_detection", False)
            Q_history.append(Q)
            Q_history_ts.append(Q.trace)
            print "searching..."

        if naive_chaser:
            save_chaser_post_traces_naive(post_sample_traces,
                                          plot_name=plot_name,
                                          runner_true_locs=runner_true_locs,
                                          chaser_true_locs=chaser_locs,
                                          intersections=inters,
                                          exp_enf_path=exp_enf_path)
        else:
            save_chaser_post_traces_nested(post_sample_traces,
                                           plot_name=plot_name,
                                           runner_true_locs=runner_true_locs,
                                           chaser_true_locs=chaser_locs,
                                           intersections=inters,
                                           exp_enf_path=exp_enf_path)

    print "Failed: Runner Reached Goal"
    return False, Q_history_ts
Exemple #7
0
	def run_tom_partial(self, Q, path_noise=0.003):
		rx1,ry1,rx2,ry2 = self.seg_map
		t = Q.choice( p=1.0/40*np.ones((1,40)), name="t" )

		#------------- model agent's movements (past and future) --------------
		start_i = Q.choice( p=1.0/self.cnt*np.ones((1,self.cnt)), name="init_run_start" )
		goal_i = Q.choice( p=1.0/self.cnt*np.ones((1,self.cnt)), name="init_run_goal" )
		start = np.atleast_2d( self.locs[start_i] )
		goal = np.atleast_2d( self.locs[goal_i] )

		# plan using the latent variables of start and goal
		my_plan = planner.run_rrt_opt( np.atleast_2d(start), 
		np.atleast_2d(goal), rx1,ry1,rx2,ry2 )
		
		# add noise to the plan
		my_noisy_plan = [my_plan[0]]
		for i in xrange(1, len(my_plan)-1):#loc_t = np.random.multivariate_normal(my_plan[i], [[path_noise, 0], [0, path_noise]]) # name 't_i' i.e. t_1, t_2,...t_n
			loc_x = Q.randn( mu=my_plan[i][0], sigma=path_noise, name="init_run_x_"+str(i) )
			loc_y = Q.randn( mu=my_plan[i][1], sigma=path_noise, name="init_run_y_"+str(i) )
			loc_t = [loc_x, loc_y]
			my_noisy_plan.append(loc_t)
		my_noisy_plan.append(my_plan[-1])
		my_loc = my_noisy_plan[t]

		#---------------- do inference --------------------------------------------
		other_noisy_plan = None
		other_inferred_trace = None

		if self.mode == "collab":
			post_sample_traces, other_inferred_goal = self.collaborative_nested_inference(Q)
			for trace in post_sample_traces:
				if trace["run_goal"] == other_inferred_goal:
					other_inferred_trace = trace
					other_noisy_plan = trace["my_plan"]
					break
		if self.mode == "advers":
			# return the trace with least detections 
			nested_post_sample_traces, other_inferred_trace = self.adversarial_nested_inference(Q)
			other_noisy_plan = other_inferred_trace["my_plan"]


		#---------------- need to add RV of detection for each time step ----------
		t_detected = []
		PATH_LIMIT = 40
		for i in xrange(0, PATH_LIMIT):
			cur_loc = scale_up(my_noisy_plan[i])
			intersections = None
			detection_prob = -10000.0
			# face the runner if within certain radius
			if dist(my_noisy_plan[i], other_noisy_plan[i]) <= .4: #.35:
				fv = direction(scale_up(other_noisy_plan[i]), cur_loc)
				intersections = self.isovist.GetIsovistIntersections(cur_loc, fv)
			
				# does the agent see other at time 't'
				other_loc = scale_up(other_noisy_plan[i])
				will_other_be_seen = self.isovist.FindIntruderAtPoint( other_loc, intersections )
				if will_other_be_seen:
					detection_prob = -.01
					t_detected.append(i)

			future_detection = Q.lflip( lp=detection_prob, name="detected_t_"+str(i) )
			
			Q.keep("intersections-t-"+str(i), intersections)

		if self.mode == "collab":
			same_goal = 0
			if goal_i == other_inferred_goal:
				same_goal = 1
			same_goal_prob = 0.999*same_goal + 0.001*(1-same_goal)

			runners_same_goal = Q.flip( p=same_goal_prob, name="same_goal" ) #TODO: update this lflip 1.19.2018

		#print t_detected
		Q.keep("t_detected", t_detected)
		Q.keep("my_plan", my_noisy_plan)
		Q.keep("other_plan", other_noisy_plan)
		Q.keep("other_run_start", other_inferred_trace["run_start"])
		Q.keep("other_run_goal", other_inferred_trace["run_goal"])
		Q.keep("nested_post_samples", nested_post_sample_traces)
Exemple #8
0
	def run_basic_partial(self, Q, path_noise=0.000):
		rx1,ry1,rx2,ry2 = self.seg_map
		t = Q.choice( p=1.0/40*np.ones((1,40)), name="t" )

		# #------------- model other agent movements (past and future) -------------- 
		o_start_i = Q.choice( p=1.0/self.cnt*np.ones((1,self.cnt)), name="other_run_start" )
		o_goal_i = Q.choice( p=1.0/self.cnt*np.ones((1,self.cnt)), name="other_run_goal" )

		o_start = np.atleast_2d( self.locs[o_start_i] )
		o_goal = np.atleast_2d( self.locs[o_goal_i] )

		# plan using the latent variables of start and goal
		# other_plan = planner.run_rrt_opt( np.atleast_2d(o_start), 
		# np.atleast_2d(o_goal), rx1,ry1,rx2,ry2 )

		#print Q.fetch_obs()
		prev_true_loc =  np.atleast_2d([Q.get_obs("other_run_x_"+str(t-1)), Q.get_obs("other_run_y_"+str(t-1))])
		#print "prev_true_loc", prev_true_loc
		other_plan = planner.run_rrt_opt( prev_true_loc, np.atleast_2d(o_goal), rx1,ry1,rx2,ry2 )
		u = 1

		# add noise to the plan
		other_noisy_plan = [self.locs[o_start_i]]
		for i in xrange(1, len(other_plan)-1):#loc_t = np.random.multivariate_normal(my_plan[i], [[path_noise, 0], [0, path_noise]]) # name 't_i' i.e. t_1, t_2,...t_n
			
			if i < t:
				# do not sample if already known to be true:
				loc_t = [Q.get_obs(name="other_run_x_"+str(i)), Q.get_obs(name="other_run_y_"+str(i))]
			else:
				loc_x = Q.randn( mu=other_plan[u][0], sigma=path_noise, name="other_run_x_"+str(i) )
				loc_y = Q.randn( mu=other_plan[u][1], sigma=path_noise, name="other_run_y_"+str(i) )
				loc_t = [loc_x, loc_y]
				u+=1
			other_noisy_plan.append(loc_t)
		other_noisy_plan.append(other_plan[-1])
		

		#------------- model agent's own movements (past and future) --------------
		start_i = Q.choice( p=1.0/self.cnt*np.ones((1,self.cnt)), name="run_start" )
		goal_i = Q.choice( p=1.0/self.cnt*np.ones((1,self.cnt)), name="run_goal" )

		#insert as a way to get goals not too close
		goal_i, Q = get_goal(start_i, Q)

		start = np.atleast_2d( self.locs[start_i] )
		goal = np.atleast_2d( self.locs[goal_i] )

		# plan using the latent variables of start and goal
		my_plan = planner.run_rrt_opt( np.atleast_2d(start), 
		np.atleast_2d(goal), rx1,ry1,rx2,ry2 )
		

		# add noise to the plan
		my_noisy_plan = [my_plan[0]]
		for i in xrange(1, len(my_plan)-1):#loc_t = np.random.multivariate_normal(my_plan[i], [[path_noise, 0], [0, path_noise]]) # name 't_i' i.e. t_1, t_2,...t_n
			loc_x = Q.randn( mu=my_plan[i][0], sigma=path_noise, name="run_x_"+str(i) )
			loc_y = Q.randn( mu=my_plan[i][1], sigma=path_noise, name="run_y_"+str(i) )
			loc_t = [loc_x, loc_y]
			my_noisy_plan.append(loc_t)
		my_noisy_plan.append(my_plan[-1])

		my_loc = my_noisy_plan[t]

		#-------------------------- detections ---------------------
		if self.mode == "collab":
			t_detected = []
			PATH_LIMIT = 30
			for i in xrange(0, PATH_LIMIT):
				cur_loc = scale_up(my_noisy_plan[i])
				intersections = None
				detection_prob = 0.1
				# face the runner if within certain radius
				if dist(my_noisy_plan[i], other_noisy_plan[i]) <= .4: #.35:
					fv = direction(scale_up(other_noisy_plan[i]), cur_loc)
					intersections = self.isovist.GetIsovistIntersections(cur_loc, fv)
				
					# does the agent see other at time 't'
					other_loc = scale_up(other_noisy_plan[i])
					will_other_be_seen = self.isovist.FindIntruderAtPoint( other_loc, intersections )
					if will_other_be_seen:
						detection_prob = 0.9
						t_detected.append(i)

				future_detection = Q.flip( p=detection_prob, name="detected_t_"+str(i) )
				# ^ might want to change this to lflip when you do experiments next
				# you wrote this on 1.19.2018 - iris
				
				Q.keep("intersections-t-"+str(i), intersections)

			same_goal = 0
			if goal_i == o_goal_i:
				same_goal = 1

			same_goal_prob = .999*same_goal + .001*(1-same_goal)

			runners_same_goal = Q.lflip( lp=same_goal_prob, name="same_goal" ) 

			Q.keep("t_detected", t_detected)
			Q.keep("my_plan", my_noisy_plan)
			Q.keep("other_plan", other_noisy_plan)

		if self.mode == "advers":
			t_detected = []
			PATH_LIMIT = 30
			for i in xrange(0, PATH_LIMIT):
				other_loc = scale_up(other_noisy_plan[i])
				intersections = None
				detection_prob = -0.0001
				# face the runner if within certain radius
				if dist(my_noisy_plan[i], other_noisy_plan[i]) <= .4: #.35:
					# -----if other is looking for me
					#print ("fv:", scale_up(my_noisy_plan[i]), other_loc)
					fv = direction(scale_up(my_noisy_plan[i]), other_loc)
					intersections = self.isovist.GetIsovistIntersections(other_loc, fv)
					#print intersections
				
					# does the chaser see other at time 'i'
					curr_loc = scale_up(my_noisy_plan[i])
					#if other is looking for me
					will_I_be_seen = self.isovist.FindIntruderAtPoint( curr_loc, intersections )

					#print will_I_be_seen
					if will_I_be_seen:
						detection_prob = -1.0
						t_detected.append(i)

				
				future_detection = Q.clflip( lp=detection_prob, name="detected_t_"+str(i) )

				#future_detection = Q.flip( p=detection_prob, name="detected_t_"+str(i) )
				
				Q.keep("intersections-t-"+str(i), intersections)

			Q.keep("t_detected", t_detected)
			Q.keep("my_plan", my_noisy_plan)
			Q.keep("other_plan", other_noisy_plan)
Exemple #9
0
	def run_tom_partial(self, Q, path_noise=0.000):
		
		rx1,ry1,rx2,ry2 = self.seg_map
		t = Q.choice( p=1.0/40*np.ones((1,40)), name="t" )

		#------------- model agent's movements (past and future) --------------
		start_i = Q.choice( p=1.0/self.cnt*np.ones((1,self.cnt)), name="init_run_start" )
		goal_i = Q.choice( p=1.0/self.cnt*np.ones((1,self.cnt)), name="init_run_goal" )
		start = np.atleast_2d( self.locs[start_i] )
		goal = np.atleast_2d( self.locs[goal_i] )
		#print "HERE:", goal_i, goal
		
		# plan using the latent variables of start and goal
		
		prev_true_loc =  np.atleast_2d([Q.get_obs("init_run_x_"+str(t-1)), Q.get_obs("init_run_y_"+str(t-1))])

		my_plan = planner.run_rrt_opt( prev_true_loc, goal, rx1,ry1,rx2,ry2 )
		u = 1
		my_noisy_plan = [self.locs[start_i]]

		
		for i in xrange(1, len(my_plan)-1):#loc_t = np.random.multivariate_normal(my_plan[i], [[path_noise, 0], [0, path_noise]]) # name 't_i' i.e. t_1, t_2,...t_n
			
			if i < t:
				# do not sample if already known to be true
				loc_t = [Q.get_obs("init_run_x_"+str(i)), Q.get_obs("init_run_y_"+str(i))]
			else:
				loc_x = Q.randn( mu=my_plan[u][0], sigma=path_noise, name="init_run_x_"+str(i) )
				loc_y = Q.randn( mu=my_plan[u][1], sigma=path_noise, name="init_run_y_"+str(i) )
				loc_t = [loc_x, loc_y]
				u += 1
			my_noisy_plan.append(loc_t)
		my_noisy_plan.append(my_plan[-1])
		my_loc = my_noisy_plan[t]


		
		#assert (len(my_noisy_plan) == 30)

		#---------------- do inference --------------------------------------------
		other_noisy_plan = None
		other_inferred_trace = None

		if self.mode == "collab":
			post_sample_traces, other_inferred_goal = self.collaborative_nested_inference(Q)
			for trace in post_sample_traces:
				if trace["run_goal"] == other_inferred_goal:
					other_inferred_trace = trace
					other_noisy_plan = trace["my_plan"]
					break

		
		if self.mode == "advers":
			all_t_detected = []
			other_plans = []
			L = self.L
			all_Qls_scores = []
			all_Qls_traces = []
			all_Qls_obs = []
			all_ql_scores = []
			#print "init Q score:", Q.get_score()

			for l in xrange(L):
				q = self.condition_middle_model(Q)
				# print ("Q obs:", Q.fetch_obs())
				# print ("q obs:", q.fetch_obs())
				# raw_input()
				Q_l = Q.get_copy()
				#print "---------running runner model--------------"
				q_score_l, q_l = q.run_model()
				#print '----------running rest of chaser model-----------'
				#plot_middlemost_sample(q_l, q_score_l, self.directory)
				other_noisy_plan = q_l["my_plan"]

				#---------------- need to add RV of detection for each time step ----------
				t_detected = []
				PATH_LIMIT = 30
				will_other_be_seen = False
				for i in xrange(0, PATH_LIMIT):
					cur_loc = scale_up(my_noisy_plan[i])
					intersections = None
					detection_prob = -.0001
					# face the runner if within certain radius
					if dist(my_noisy_plan[i], other_noisy_plan[i]) <= .4: #.35:
						fv = direction(scale_up(other_noisy_plan[i]), cur_loc)
						intersections = self.isovist.GetIsovistIntersections(cur_loc, fv)
					
						# does the agent see other at time 't'
						other_loc = scale_up(other_noisy_plan[i])
						will_other_be_seen = self.isovist.FindIntruderAtPoint( other_loc, intersections )
						if will_other_be_seen:
							detection_prob = -1
							t_detected.append(i)
					
					#future_detection = Q_l.flip( p=detection_prob, name="detected_t_"+str(i) )
					
					future_detection = Q_l.lflip( lp=detection_prob, name="detected_t_"+str(i))
					
				Q_l.add_trace(name="q_trace", trace=q_l, score=q_score_l)
				all_ql_scores.append(q_score_l)
				#print "list:", all_ql_scores

				# Q_l.keep("t_detected", t_detected)
				all_t_detected.append(t_detected)
				# Q_l.keep("my_plan", my_noisy_plan)
				# Q_l.keep("other_plan", other_noisy_plan)
				other_plans.append(other_noisy_plan)
				
				# Q_l.keep("other_run_start", q_l["run_start"])
				# Q_l.keep("other_run_goal", q_l["run_goal"])
				all_Qls_scores.append(Q_l.get_score())
				all_Qls_traces.append(Q_l.get_trace())
				all_Qls_obs.append(Q_l.get_obs())

				

		#print ("all ql scores:", all_ql_scores)
		#print ("all_Qls_scores:", all_Qls_scores)
		Q.keep("all_ql_scores", np.array(all_ql_scores))
		Q.keep("all_Qls_obs", all_Qls_obs)
		Q.keep("all_Qls_traces", all_Qls_traces)
		Q.keep("all_Qls_scores", np.array(all_Qls_scores))
		Q.keep("mean", np.mean(all_Qls_scores))
		Q.keep("my_plan", my_noisy_plan)
		Q.keep("t_detected", all_t_detected)
		Q.keep("other_plan", other_plans)

		#update actual score of Q.
		Q.cur_trace_score = np.mean(all_Qls_scores)