Exemplo n.º 1
0
def test_opposing(lr=0.5):
    initial_state = torch.tensor([
        [0.0, 0.0, 0.0, 1.0, 0.0, 10.0],
        [-0.3, 10.0, 0.0, -1.0, -0.3, 0.0],
    ])
    generator = socialforce.Simulator(initial_state)
    truth = torch.stack(
        [generator.step().state[:, 0:2].clone() for _ in range(21)]).detach()

    print('============DONE WITH GENERATION===============')

    v0 = torch.tensor(1.2, requires_grad=True)
    sigma_v = torch.tensor(0.1, requires_grad=True)
    V = socialforce.PedPedPotential(0.4, v0, sigma_v)

    # training
    for _ in range(100):
        print('loop')
        s = socialforce.Simulator(initial_state, None, V)
        g = torch.stack([s.step().state[:, 0:2].clone() for _ in range(21)])
        loss = (g - truth).pow(2).sum()

        v0_grad, sigma_grad = torch.autograd.grad(loss, [v0, sigma_v])
        print('v0', v0, v0_grad)
        print('sigma', sigma_v, sigma_grad)

        with torch.no_grad():
            v0 -= lr * v0_grad
            sigma_v -= lr * sigma_grad

    assert v0.item() == pytest.approx(2.1, abs=0.01)
    assert sigma_v.item() == pytest.approx(0.3, abs=0.01)
Exemplo n.º 2
0
    def predict(self, state):
        """

        :param state:
        :return:
        """
        sf_state = []
        self_state = state.self_state
        sf_state.append((self_state.px, self_state.py, self_state.vx,
                         self_state.vy, self_state.gx, self_state.gy))
        for human_state in state.human_states:
            # approximate desired direction with current velocity
            if human_state.vx == 0 and human_state.vy == 0:
                gx = np.random.random()
                gy = np.random.random()
            else:
                gx = human_state.px + human_state.vx
                gy = human_state.py + human_state.vy
            sf_state.append((human_state.px, human_state.py, human_state.vx,
                             human_state.vy, gx, gy))
        sim = socialforce.Simulator(np.array(sf_state),
                                    delta_t=self.time_step,
                                    initial_speed=self.initial_speed,
                                    v0=self.v0,
                                    sigma=self.sigma)
        sim.step()
        action = ActionXY(sim.state[0, 2], sim.state[0, 3])

        self.last_state = state

        return action
Exemplo n.º 3
0
def test_opposing_scipy():
    initial_state = torch.tensor([
        [0.0, 0.0, 0.0, 1.0, 0.0, 10.0],
        [-0.3, 10.0, 0.0, -1.0, -0.3, 0.0],
    ])
    generator = socialforce.Simulator(initial_state)
    truth = torch.stack(
        [generator.step().state[:, 0:2].clone() for _ in range(21)]).detach()

    print('============DONE WITH GENERATION===============')

    # training
    def f(x):
        v0 = torch.tensor(x[0], requires_grad=True)
        sigma_v = float(x[1])
        V = socialforce.PedPedPotential(0.4, v0, sigma_v)
        s = socialforce.Simulator(initial_state, None, V)
        g = torch.stack([s.step().state[:, 0:2].clone() for _ in range(21)])

        # average euclidean distance loss
        loss = (g - truth).pow(2).sum()
        return loss

    parameters = np.array([1.2, 0.1])
    res = scipy.optimize.minimize(f,
                                  parameters,
                                  method='L-BFGS-B',
                                  options=OPTIMIZER_OPT)
    print(res)
    assert res.x == pytest.approx(np.array([2.1, 0.3]), abs=0.01)
Exemplo n.º 4
0
    def predict(self, state):
        sf_state = []
        for agent_state in state:
            sf_state.append((
                agent_state.px,
                agent_state.py,
                agent_state.vx,
                agent_state.vy,
                agent_state.gx,
                agent_state.gy,
            ))

        sim = socialforce.Simulator(
            np.array(sf_state),
            delta_t=self.time_step,
            initial_speed=self.initial_speed,
            v0=self.v0,
            sigma=self.sigma,
        )
        sim.step()
        actions = [
            ActionXY(sim.state[i, 2], sim.state[i, 3])
            for i in range(len(state))
        ]
        del sim

        return actions
def test_walkway(n):
    pos_left = ((np.random.random((n, 2)) - 0.5) * 2.0) * np.array([25.0, 5.0])
    pos_right = ((np.random.random((n, 2)) - 0.5) * 2.0) * np.array([25.0, 5.0])

    x_vel_left = np.random.normal(1.34, 0.26, size=(n, 1))
    x_vel_right = np.random.normal(-1.34, 0.26, size=(n, 1))
    x_destination_left = 100.0 * np.ones((n, 1))
    x_destination_right = -100.0 * np.ones((n, 1))

    zeros = np.zeros((n, 1))

    state_left = np.concatenate(
        (pos_left, x_vel_left, zeros, x_destination_left, zeros), axis=-1)
    state_right = np.concatenate(
        (pos_right, x_vel_right, zeros, x_destination_right, zeros), axis=-1)
    initial_state = np.concatenate((state_left, state_right))

    space = [
        np.array([(x, 5) for x in np.linspace(-25, 25, num=5000)]),
        np.array([(x, -5) for x in np.linspace(-25, 25, num=5000)]),
    ]
    s = socialforce.Simulator(initial_state, socialforce.PedSpacePotential(space))
    states = []
    for _ in range(250):
        state = s.step().state
        # periodic boundary conditions
        state[state[:, 0] > 25, 0] -= 50
        state[state[:, 0] < -25, 0] += 50

        states.append(state.copy())
    states = np.stack(states)

    with visualize(states, space, 'docs/walkway_{}.gif'.format(n)) as _:
        pass
Exemplo n.º 6
0
    def f(x):
        V.set_parameters(torch.from_numpy(x))
        s = socialforce.Simulator(initial_state, None, V)
        g = torch.stack([s.step().state[:, 0:2].clone() for _ in range(21)])

        # average euclidean distance loss
        diff = g - truth
        loss = torch.mul(diff, diff).sum(dim=-1).mean()

        return float(loss)
Exemplo n.º 7
0
    def f(x):
        v0 = torch.tensor(x[0], requires_grad=True)
        sigma_v = float(x[1])
        V = socialforce.PedPedPotential(0.4, v0, sigma_v)
        s = socialforce.Simulator(initial_state, None, V)
        g = torch.stack([s.step().state[:, 0:2].clone() for _ in range(21)])

        # average euclidean distance loss
        loss = (g - truth).pow(2).sum()
        return loss
def test_separator():
    initial_state = np.array([
        [-10.0, -0.0, 1.0, 0.0, 10.0, 0.0],
    ])
    space = [
        np.array([(i, i) for i in np.linspace(-1, 4.0)]),
    ]
    s = socialforce.Simulator(initial_state, socialforce.PedSpacePotential(space))
    states = np.stack([s.step().state.copy() for _ in range(80)])

    # visualize
    with visualize(states, space, 'docs/separator.gif') as ax:
        ax.set_xlim(-10, 10)
Exemplo n.º 9
0
def generate_sf_trajectory(sim_scene,
                           num_ped,
                           sf_params=[0.5, 2.1, 0.3],
                           end_range=0.2):
    """ Simulating Scenario using SF """
    ## Default: (0.5, 2.1, 0.3)
    sampling_rate = 1

    ## Circle Crossing
    if sim_scene == 'circle_crossing':
        fps = 10
        sampling_rate = fps / 2.5
        trajectories, positions, goals, speed = generate_circle_crossing(
            num_ped)
    else:
        raise NotImplementedError

    initial_state = np.array([[
        positions[i][0], positions[i][1], speed[i][0], speed[i][1],
        goals[i][0], goals[i][1]
    ] for i in range(num_ped)])

    ped_ped = PedPedPotential(1. / fps, v0=sf_params[1], sigma=sf_params[2])
    field_of_view = FieldOfView()
    s = socialforce.Simulator(initial_state,
                              ped_ped=ped_ped,
                              field_of_view=field_of_view,
                              delta_t=1. / fps,
                              tau=sf_params[0])

    # run
    reaching_goal = [False] * num_ped
    done = False
    count = 0

    #Simulate a scene
    while not done and count < 500:
        count += 1
        position = np.stack(s.step().state.copy())
        for i in range(len(initial_state)):
            if count % sampling_rate == 0:
                trajectories[i].append((position[i, 0], position[i, 1]))
            # check if this agent reaches the goal
            if np.linalg.norm(position[i, :2] -
                              np.array(goals[i])) < end_range:
                reaching_goal[i] = True
            else:
                s.state[i, :4] = position[i, :4]
        done = all(reaching_goal)

    return trajectories, count
Exemplo n.º 10
0
def test_crossing():
    log = logging.getLogger('test_crossing')
    agentNum = 30
    state = []
    for episode in range(10):
        for num in range(agentNum):

            v1 = random.uniform(-0.5, 0.5)
            v2 = random.uniform(-0.5, 0.5)

            p1 = random.uniform(0, 10)
            p2 = random.uniform(0, 10)
            p3 = random.uniform(0, 10)
            p4 = random.uniform(0, 10)

            p = [p1, p2, v1, v2, p3, p4]
            if num == 0:
                state = np.array([p])
            else:
                state = np.append(state, [p], 0)

        log.debug(state)
        print('hello')

        initial_state2 = np.array([
            [0.0, 0.0, 0.5, 0.5, 10.0, 10.0],
            [10.0, 0.3, 0.5, 0.5, 0.0, 10.0],
        ])

        initial_state = np.array(state)
        print('state:', state)
        print('initial state:', initial_state)
        print('initial state2:', initial_state2)
        s = socialforce.Simulator(initial_state)
        states = np.stack([s.step().state.copy() for _ in range(50)])

        # visualize
        print('')
        filename = 'crossing' + str(episode) + '.png'
        with socialforce.show.canvas('docs/cross/' + filename) as ax:
            ax.set_xlabel('x [m]')
            ax.set_ylabel('y [m]')

            for ped in range(agentNum):
                x = states[:, ped, 0]
                y = states[:, ped, 1]
                #ax.plot(x, y, '-o', label='ped {}'.format(ped), markersize=2.5)
                ax.plot(x, y, '-o', markersize=2.5)
            ax.legend()
def test_f_ab():
    s = socialforce.Simulator()
    initial_state = s.normalize_state([
        [0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
        [1.0, 0.0, 0.0, 0.0, 1.0, 1.0],
    ])
    force_at_unit_distance = 0.25  # confirmed below
    assert s.f_ab(initial_state).detach().numpy() == pytest.approx(np.array([[
        [0.0, 0.0],
        [-force_at_unit_distance, 0.0],
    ], [
        [force_at_unit_distance, 0.0],
        [0.0, 0.0],
    ]]),
                                                                   abs=0.05)
def test_fab():
    initial_state = np.array([
        [0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
        [1.0, 0.0, 0.0, 0.0, 1.0, 1.0],
    ])
    s = socialforce.Simulator(initial_state)
    force_at_unit_distance = 0.25  # TODO confirm
    assert s.f_ab() == pytest.approx(np.array([[
        [0.0, 0.0],
        [-force_at_unit_distance, 0.0],
    ], [
        [force_at_unit_distance, 0.0],
        [0.0, 0.0],
    ]]),
                                     abs=0.05)
Exemplo n.º 13
0
def test_opposing():
    initial_state = np.array([
        [0.0, 0.0, 1.0, 0.0, 0.0, 10.0],
        [-0.3, 10.0, -1.0, 0.0, -0.3, 0.0],
    ])
    s = socialforce.Simulator(initial_state)
    states = np.stack([s.step().state.copy() for _ in range(21)])

    # visualize
    print('')
    with socialforce.show.canvas('docs/opposing.png') as ax:
        ax.set_xlabel('x [m]')
        ax.set_ylabel('y [m]')

        for ped in range(2):
            x = states[:, ped, 0]
            y = states[:, ped, 1]
            ax.plot(x, y, '-o', label='ped {}'.format(ped), markersize=2.5)
        ax.legend()
def test_gate():
    initial_state = np.array([
        [-9.0, -0.0, 1.0, 0.0, 10.0, 0.0],
        [-10.0, -1.5, 1.0, 0.0, 10.0, 0.0],
        [-10.0, -2.0, 1.0, 0.0, 10.0, 0.0],
        [-10.0, -2.5, 1.0, 0.0, 10.0, 0.0],
        [-10.0, -3.0, 1.0, 0.0, 10.0, 0.0],
        [10.0, 1.0, -1.0, 0.0, -10.0, 0.0],
        [10.0, 2.0, -1.0, 0.0, -10.0, 0.0],
        [10.0, 3.0, -1.0, 0.0, -10.0, 0.0],
        [10.0, 4.0, -1.0, 0.0, -10.0, 0.0],
        [10.0, 5.0, -1.0, 0.0, -10.0, 0.0],
    ])
    space = [
        np.array([(0.0, y) for y in np.linspace(-10, -0.7, 1000)]),
        np.array([(0.0, y) for y in np.linspace(0.7, 10, 1000)]),
    ]
    s = socialforce.Simulator(initial_state, socialforce.PedSpacePotential(space))
    states = np.stack([s.step().state.copy() for _ in range(150)])

    with visualize(states, space, 'docs/gate.gif') as _:
        pass
Exemplo n.º 15
0
def test_opposing_mlp_scipy():
    torch.manual_seed(42)
    np.random.seed(42)

    initial_state = torch.tensor([
        [0.0, 0.0, 0.0, 1.0, 0.0, 10.0],
        [-0.3, 10.0, 0.0, -1.0, -0.3, 0.0],
    ])
    generator = socialforce.Simulator(initial_state)
    truth = torch.stack(
        [generator.step().state[:, 0:2].clone() for _ in range(21)]).detach()

    print('============DONE WITH GENERATION===============')

    V = socialforce.PedPedPotentialMLP(0.4)
    parameters = V.get_parameters().clone().detach().numpy()
    initial_parameters = parameters.copy()

    # training
    def f(x):
        V.set_parameters(torch.from_numpy(x))
        s = socialforce.Simulator(initial_state, None, V)
        g = torch.stack([s.step().state[:, 0:2].clone() for _ in range(21)])

        # average euclidean distance loss
        diff = g - truth
        loss = torch.mul(diff, diff).sum(dim=-1).mean()

        return float(loss)

    res = scipy.optimize.minimize(f,
                                  parameters,
                                  method='L-BFGS-B',
                                  options=OPTIMIZER_OPT)
    print(res)

    # make plots of result
    visualize('docs/mlp_scipy_', V, initial_parameters, res.x)
Exemplo n.º 16
0
def gate(n, door_width, velocity, printGif, printPng):
    door_x = 0.0  # 门的横坐标
    destination = [3.0, 0.0]  # 目的地坐标
    range_x = [-10, door_x]  # 初始状态人群位置的x范围
    range_y = [-6.0, 6.0]  # 初始状态人群位置的y范围
    vel_x = velocity  # x方向速度
    vel_y = velocity  # y方向速度

    x_pos = np.random.random((n, 1)) * np.array([range_x[0]])
    y_pos = ((np.random.random((n, 1)) - 0.5) * 2.0) * np.array(([range_y[1]]))
    x_vel = np.full((n, 1), vel_x)
    y_vel = np.full((n, 1), vel_y)
    x_dest = np.full((n, 1), destination[0])
    y_dest = np.full((n, 1), destination[1])
    initial_state = np.concatenate(
        (x_pos, y_pos, x_vel, y_vel, x_dest, y_dest), axis=-1)
    print(initial_state)

    space = [
        np.array([(door_x, y)
                  for y in np.linspace(-10, -door_width / 2, 1000)]),
        np.array([(door_x, y) for y in np.linspace(door_width / 2, 10, 1000)]),
    ]
    s = socialforce.Simulator(initial_state,
                              socialforce.PedSpacePotential(space))

    # 判断是否所有人都通过了门
    states = []
    i = 0
    while True:
        i += 1
        state = s.step().state.copy()
        states.append(state)
        end = True
        for sta in state:
            if sta[0] < 0:
                end = False
            else:
                sta[0] += 5.0
        if end:
            # print("terminate when time is ", i)
            break

    states = np.stack(states)

    if printGif:
        # 生成动态图,比较慢so我先注释掉
        with visualize(states, space,
                       'out/gate{}-{}.gif'.format(n, door_width)) as _:
            pass
    if printPng:
        # 生成路线图
        with socialforce.show.canvas('out/gate{}-{}.png'.format(
                n, door_width)) as ax:
            ax.set_xlabel('x [m]')
            ax.set_ylabel('y [m]')
            for ped in range(len(initial_state)):
                x = states[:, ped, 0]
                y = states[:, ped, 1]
                ax.plot(x, y, '-o')
            ax.set_title("evacuate time {}".format(i))
            ax.legend()
    return i
    def get_scene(self,
                  nr_agents,
                  nr_scenes,
                  agent_states,
                  threshold,
                  setting="random"):
        """
        Function to generate each scene of the dataset.
        The agents enter the scenario at one side of the setting and leave it at another side. An implementation of the Social Force Model is used to
        describe the agents motion behavior including the interactions between the agents.
        :param nr_agents: Number of agents in each scene
        :param nr_scenes: Number of scenes of the dataset
        :param agent_states: Numpy array of shape N x 6 for N initial agents in the dataset. Determines the initial states of the agents in the dataset.
        The row of the i-th agent needs to be of the form: [x, y, v_x, v_y, d_x, d_y]. If the states should be initialized randomly, use agent_states = [] as input.
        :param threshold: Specifies threshold for agents getting out of the scene (in meters)
        :param setting: Either random or meeting
        """
        print("Start generating scenes for dataset %s..." % (self.scenario))

        # Initialize scene
        if setting == "meeting":
            self.initialize = meeting(self.scenario, self.v_max, self.v_min,
                                      self.a, self.b, self.tau)
        else:
            self.initialize = random_initialization(self.scenario, self.v_max,
                                                    self.v_min, self.a, self.b,
                                                    self.tau)

        self.initial_state = self.initialize_agents(self.v_max, self.v_min,
                                                    nr_agents, agent_states,
                                                    self.a, self.b, setting)

        # Initialize simulator
        self.s = socialforce.Simulator(self.initial_state,
                                       self.V0,
                                       self.sigma,
                                       self.delta_t,
                                       self.tau,
                                       beta=self.beta)

        # Get Array for scene numbers and agents
        scene = np.zeros(nr_agents)
        agents = np.arange(nr_agents) + 1

        # Concatenate arrays with respective x-y-coordinates
        dataset = np.concatenate((scene.reshape(
            nr_agents, 1), agents.reshape(nr_agents, 1), self.s.state[:, 0:2]),
                                 axis=1)

        # Specify file location
        root_path_experiments = os.path.abspath(
            os.path.join(os.path.dirname(__file__), "..", "..", "Experiments"))
        if not os.path.exists(root_path_experiments):
            raise FileNotFoundError(errno.ENOENT, strerror(errno.ENOENT),
                                    "Can not find Experiments folder!")

        root_path_dataset = os.path.abspath(
            os.path.join(
                root_path_experiments, "datasets",
                str(self.scenario) + "simulated" + "_" +
                str(self.parameterinfo), str(self.phase)))

        if not os.path.exists(root_path_dataset):
            os.makedirs(root_path_dataset)

        # Create background image
        fig, ax = plt.subplots()
        ax.grid(linestyle='dotted')
        ax.set_aspect(1.0, 'datalim')
        ax.set_axisbelow(True)
        ax.set_xlabel('x [m]')
        ax.set_ylabel('y [m]')
        ax.set_ylim(
            [self.initialize.total_y_min - 2, self.initialize.total_y_max + 2])
        ax.set_xlim(
            [self.initialize.total_x_min - 2, self.initialize.total_x_max + 2])
        ax.set_title(self.scenario + " V0: " + str(self.V0) + " - sigma: " +
                     str(self.sigma))
        fig.set_tight_layout(True)
        fig.savefig(root_path_dataset + "//" + str(self.scenario) +
                    "simulated.jpg")
        plt.close()

        # Create txt-file for dataset
        txt_file = os.path.join(
            root_path_dataset,
            str(self.phase) + "_" + str(self.scenario) + "simulated.txt")
        print("Create dataset as txt-file for scenario: " + str(self.scenario))
        file = open(txt_file, "w")
        np.savetxt(file, dataset, fmt="%.2f", delimiter="\t")
        file.close()
        file = open(txt_file, "a")

        # Calculate states of each agent for every scene in the dataset
        states = []
        states.append(self.s.state.copy())
        agent_list = []

        for _ in range(nr_scenes):
            state = self.s.step().state
            agent_list.append(agents.copy())
            scene += 1

            # Avoid oscillations of pedestrians -> Agents that have been at the same place for a longer period of time are exit the scene and new agents enter it
            oscillation_length = 20  # corresponds to: 20 * 0.4 = 8 seconds

            if _ >= oscillation_length:
                dist = np.linalg.norm(states[_][:, 0:2] -
                                      states[_ - oscillation_length][:, 0:2],
                                      axis=1)
                agent_cond = agent_list[_] == agent_list[_ -
                                                         oscillation_length]
                add_cond = (dist[:] < 0.8) & agent_cond
            else:
                add_cond = np.ones((state.shape[0])) > 1

            # Boundary conditions - Determine when agents leave the scene
            condition = (
                state[:, 0] > self.initialize.total_x_max + threshold) | (
                    state[:, 0] < self.initialize.total_x_min - threshold) | (
                        state[:, 1] > self.initialize.total_y_max + threshold
                    ) | (state[:, 1] <
                         self.initialize.total_y_min - threshold) | add_cond

            agents[np.argwhere(condition)] = (
                max(agents) + np.arange(len(np.argwhere(condition))) +
                1).reshape(-1, 1)

            # Generate new Agents when old Agents left the scene
            if len(state[condition]) != 0:
                state[condition] = (np.stack(
                    self.initialize.initialize_state(agents[np.argwhere(
                        condition)][i, :])
                    for i in range(len(state[condition])))).squeeze()

            dataset = np.concatenate((scene.reshape(
                nr_agents, 1), agents.reshape(nr_agents, 1), state[:, 0:2]),
                                     axis=1)

            # Write states to txt-file
            np.savetxt(file, dataset, fmt="%.2f", delimiter="\t")

            states.append(state.copy())

        file.close()
        states = np.stack(states)

        return states
Exemplo n.º 18
0
def predict(input_paths,
            dest_dict=None,
            dest_type='interp',
            sf_params=[0.5, 2.1, 0.3],
            predict_all=True,
            n_predict=12,
            obs_length=9):

    pred_length = n_predict

    def init_states(input_paths, start_frame, dest_dict, dest_type):
        initial_state = []
        for i, _ in enumerate(input_paths):
            path = input_paths[i]
            ped_id = path[0].pedestrian
            past_path = [t for t in path if t.frame <= start_frame]
            past_frames = [t.frame for t in path if t.frame <= start_frame]
            future_path = [t for t in path if t.frame > start_frame]
            len_path = len(past_path)

            ## To consider agent or not consider.
            if start_frame in past_frames:
                curr = past_path[-1]

                ## Velocity
                if len_path >= 4:
                    stride = 3
                    prev = past_path[-4]
                else:
                    stride = len_path - 1
                    prev = past_path[-len_path]
                [v_x, v_y] = vel_state(prev, curr, stride)

                ## Destination
                if dest_type == 'true':
                    if dest_dict is not None:
                        [d_x, d_y] = dest_dict[ped_id]
                    else:
                        raise ValueError
                elif dest_type == 'interp':
                    [d_x, d_y] = dest_state(past_path, len_path)
                elif dest_type == 'vel':
                    [d_x, d_y] = [pred_length * v_x, pred_length * v_y]
                elif dest_type == 'pred_end':
                    [d_x, d_y] = [future_path[-1].x, future_path[-1].y]
                else:
                    raise NotImplementedError

                ## Initialize State
                initial_state.append([curr.x, curr.y, v_x, v_y, d_x, d_y])
        return np.array(initial_state)

    def vel_state(prev, curr, stride):
        if stride == 0:
            return [0, 0]
        diff = np.array([curr.x - prev.x, curr.y - prev.y])
        theta = np.arctan2(diff[1], diff[0])
        speed = np.linalg.norm(diff) / (stride * 0.4)
        return [speed * np.cos(theta), speed * np.sin(theta)]

    def dest_state(path, length):
        if length == 1:
            return [path[-1].x, path[-1].y]
        x = [t.x for t in path]
        y = [t.y for t in path]
        time = list(range(length))
        f = interp1d(x=time, y=[x, y], fill_value='extrapolate')
        return f(time[-1] + pred_length)

    multimodal_outputs = {}
    primary = input_paths[0]
    neighbours_tracks = []
    frame_diff = primary[1].frame - primary[0].frame
    start_frame = primary[obs_length - 1].frame
    first_frame = primary[obs_length - 1].frame + frame_diff

    # initialize
    initial_state = init_states(input_paths, start_frame, dest_dict, dest_type)

    fps = 20
    sampling_rate = int(fps / 2.5)

    if len(initial_state) != 0:
        # run
        ped_ped = PedPedPotential(1. / fps,
                                  v0=sf_params[1],
                                  sigma=sf_params[2])
        field_of_view = FieldOfView()
        s = socialforce.Simulator(initial_state,
                                  ped_ped=ped_ped,
                                  field_of_view=field_of_view,
                                  delta_t=1. / fps,
                                  tau=sf_params[0])
        states = np.stack([
            s.step().state.copy() for _ in range(pred_length * sampling_rate)
        ])
        ## states : pred_length x num_ped x 7
        states = np.array(
            [s for num, s in enumerate(states) if num % sampling_rate == 0])
    else:
        ## Stationary
        past_path = [t for t in input_paths[0] if t.frame == start_frame]
        states = np.stack([[[past_path[0].x, past_path[0].y]]
                           for _ in range(pred_length)])

    # predictions
    primary_track = states[:, 0, 0:2]
    neighbours_tracks = states[:, 1:, 0:2]

    ## Primary Prediction Only
    if not predict_all:
        neighbours_tracks = []

    # Unimodal Prediction
    multimodal_outputs[0] = primary_track, neighbours_tracks
    return multimodal_outputs
Exemplo n.º 19
0
def test_circle_mlp(n, lr=0.1, delta_t=0.2):
    torch.manual_seed(42)
    np.random.seed(42)

    # ped0 always left to right
    ped0 = np.array([-5.0, 0.0, 1.0, 0.0, 5.0, 0.0])

    generator_initial_states = []
    for theta in np.random.rand(n) * 2.0 * math.pi:
        # ped1 at a random angle with +/-20% speed variation
        c, s = np.cos(theta), np.sin(theta)
        r = np.array([[c, -s], [s, c]])
        ped1 = np.concatenate((
            np.matmul(r, ped0[0:2]),
            np.matmul(r, ped0[2:4]),  # * (0.8 + random.random() * 0.4)),
            np.matmul(r, ped0[4:6]),
        ))
        generator_initial_states.append(
            torch.tensor(np.stack((ped0, ped1))).float())
    if n == 1:  # override for n=1
        generator_initial_states = [
            torch.tensor([
                [0.0, 0.0, 0.0, 1.0, 0.0, 10.0],
                [-0.3, 10.0, 0.0, -1.0, -0.3, 0.0],
            ])
        ]

    # generator_v0 = 1.5 if n != 1 else 2.1
    generator_ped_ped = socialforce.PedPedPotential(delta_t, 2.1)
    true_scenes = []
    for initial_state in generator_initial_states:
        generator = socialforce.Simulator(initial_state,
                                          ped_ped=generator_ped_ped,
                                          delta_t=delta_t)
        true_scenes.append(
            [generator.step().state.clone().detach() for _ in range(42)])
    true_experience = socialforce.Optimizer.scenes_to_experience(true_scenes)

    print('============DONE WITH GENERATION===============')

    V = socialforce.PedPedPotentialMLP(delta_t)
    initial_parameters = V.get_parameters().clone().detach().numpy()

    def simulator_factory(initial_state):
        s = socialforce.Simulator(initial_state, ped_ped=V, delta_t=delta_t)
        s.desired_speeds = generator.desired_speeds
        s.max_speeds = generator.max_speeds
        return s

    opt = socialforce.Optimizer(simulator_factory, V.parameters(),
                                true_experience)
    for i in range(100 // n):
        loss = opt.epoch()
        print('epoch {}: {}'.format(i, loss))

    # make plots of result
    visualize('docs/mlp_circle_n{}_'.format(n),
              V,
              initial_parameters,
              V.get_parameters().clone(),
              V_gen=generator_ped_ped)
Exemplo n.º 20
0
def predict(input_paths, predict_all=False):

    def init_states(input_paths, start_frame):
        initial_state = []
        for i in range(len(input_paths)):
            path = input_paths[i] 
            past_path = [t for t in path if t.frame <= start_frame]
            past_frames = [t.frame for t in path if t.frame <= start_frame]
            l = len(past_path)

            ## To consider agent or not consider. 
            if (start_frame in past_frames) and l >= 4:
                curr = past_path[-1]
                prev = past_path[-4]
                [v_x, v_y] = vel_state(prev, curr, 3) 
                if np.linalg.norm([v_x, v_y]) < 1e-6:
                    continue   
                [d_x, d_y] = dest_state(past_path, l-1)
                if np.linalg.norm([curr.x - d_x, curr.y - d_y]) < 1e-6:
                    continue   
                    
                initial_state.append([curr.x, curr.y, v_x, v_y, d_x, d_y])
        return np.array(initial_state)

    def vel_state(prev, curr, l):
        diff = np.array([curr.x - prev.x, curr.y - prev.y])
        theta = np.arctan2(diff[1], diff[0])
        speed = np.linalg.norm(diff) / (l * 0.4)
        return [speed*np.cos(theta), speed*np.sin(theta)]

    def dest_state(path, l):
        x = [t.x for t in path]
        y = [t.y for t in path]
        time = [i for i in range(l+1)]
        f = interp1d(x=time, y=[x, y], fill_value='extrapolate')
        return f(time[-1] + 12)

    multimodal_outputs = {}
    primary = input_paths[0]
    neighbours_tracks = []
    frame_diff = primary[1].frame - primary[0].frame
    start_frame = primary[8].frame
    first_frame = primary[8].frame + frame_diff

    # initialize
    initial_state = init_states(input_paths, start_frame)

    # run
    s = socialforce.Simulator(initial_state)
    states = np.stack([s.step().state.copy() for _ in range(12)])
    ## states : 12 x num_ped x 7

    # predictions    
    for i in range(states.shape[1]):
        ped_id = input_paths[i][0].pedestrian
        if i == 0:
            primary_track = [trajnettools.TrackRow(first_frame + j * frame_diff, ped_id, x, y)
            for j, (x, y) in enumerate(states[:, i, 0:2])]
        else:
            neighbours_tracks.append([trajnettools.TrackRow(first_frame + j * frame_diff, ped_id, x, y)
            for j, (x, y) in enumerate(states[:, i, 0:2])])

    ## Primary Prediction
    if not predict_all:
        neighbours_tracks = []

    # Unimodal Prediction
    multimodal_outputs[0] = primary_track, neighbours_tracks
    return multimodal_outputs
Exemplo n.º 21
0
    def get_scene(self, agent_states):
        """
        Generate Sequence of Scene with obstacle-setting of the desired Input-Scene. For this, the input-scene-op.jpg image is read in an the obstacles are approximated accordingly.
        :param scene: Name of input-scene
        :param nr_agents: Number of agents that will be constantly in scene. Whenever an agent is leaving the scene, a new agent will spawn
        :param nr_scenes: Number of Frames/Scenes in Sequence
        :param image_path: Path to input-image for obstacle approximation
        :param dataset_path: Path to output-txt-file (generated dataset)
        :param create_background: Boolean - if True, create background image for sequence
        :param create_video: Boolean - if True, create .mp4-file which visualizes the sequence
        :return: Dataset (txt-file) and if chosen video and background image
        """

        # agents have state vectors of the form (x, y, v_x, v_y, d_x, d_y, [tau])

        print("Start generating scenes for dataset %s..." % (self.scenario))

        # Initialize state for each agent
        self.initial_state = self.initialize_agents(self.v_max, self.v_min, self.nr_agents, agent_states, self.a, self.b, setting="random")

        # Get boundaries
        initialize = random_initialization(self.scenario, self.v_max, self.v_min, a=self.a, b=self.b, tau=0.5)
        self.total_y_min = initialize.total_y_min
        self.total_y_max = initialize.total_y_max
        self.total_x_min = initialize.total_x_min
        self.total_x_max = initialize.total_x_max


        # Initialize space according to input image
        space = self.initialize_space(self.image_path, self.create_background, self.scaling)

        # Get Socialforce Simulator
        s = socialforce.Simulator(self.initial_state, ped_space=socialforce.PedSpacePotential(space, u0=self.U0, r=self.r), v0=self.V0, sigma=self.sigma)

        # Get Array for scene numbers and agents
        scene = np.zeros(self.nr_agents)
        agents = np.arange(self.nr_agents) + 1

        # Concatenate arrays with respective x-y-coordinates
        dataset = np.concatenate((scene.reshape(self.nr_agents, 1), agents.reshape(self.nr_agents, 1), s.state[:, 0:2]), axis=1)

        # Create txt-file for dataset
        print("Create dataset as txt-file for scenario: %s" % (self.scenario))
        file = open(self.dataset_path, "w")
        np.savetxt(file, dataset, fmt="%.2f", delimiter="\t")
        file.close()

        file = open(self.dataset_path, "a")

        # Calculate states of each agent for every scene in the dataset
        states = []
        agent_list = []
        states.append(s.state.copy())

        for _ in range(self.nr_scenes):
            state = s.step().state
            agent_list.append(agents.copy())
            scene += 1

            # Avoid oscillations of pedestrians -> Agents that have been at the same place for a longer period of time are exit the scene and new agents enter it
            oscillation_length = 20  # corresponds to: 20 * 0.4 = 8 seconds

            if _ >= oscillation_length:
                dist = np.linalg.norm(states[_][:, 0:2] - states[_ - oscillation_length][:, 0:2], axis=1)
                agent_cond = agent_list[_] == agent_list[_ - oscillation_length]
                add_cond = (dist[:] < 0.8) & agent_cond
            else:
                add_cond = np.ones((state.shape[0])) > 1

            # Boundary conditions - Determine when agents leave the scene
            condition = (state[:, 0] > self.total_x_max + self.threshold) | (state[:, 0] < self.total_x_min - self.threshold) | (state[:, 1] > self.total_y_max + self.threshold) | (state[:, 1] < self.total_y_min - self.threshold) | add_cond

            agents[np.argwhere(condition)] = (max(agents) + np.arange(len(np.argwhere(condition))) + 1).reshape(-1, 1)

            # Generate new Agents when old Agents left the scene
            if len(state[condition]) != 0:
                state[condition] = (np.stack(initialize.initialize_state(agents[np.argwhere(condition)][i, :]) for i in range(len(state[condition])))).squeeze()

            dataset = np.concatenate((scene.reshape(self.nr_agents, 1), agents.reshape(self.nr_agents, 1), state[:, 0:2]), axis=1)
            np.savetxt(file, dataset, fmt="%.2f", delimiter="\t")

            states.append(state.copy())

        file.close()
        states = np.stack(states)
        print("Dataset created.")

        # Show animation of simulation
        if self.show_animation:
            print("Start creating video for visualization of simulated scenario " + str(self.scenario) + "...")
            with self.animate_scenes(states=states, space=space, sim=s) as _:
                pass

            print("Video created.")
Exemplo n.º 22
0
 def simulator_factory(initial_state):
     s = socialforce.Simulator(initial_state, ped_ped=V, delta_t=delta_t)
     s.desired_speeds = generator.desired_speeds
     s.max_speeds = generator.max_speeds
     return s
    def reset(self, phase='test', test_case=None):
        """
        Set px, py, gx, gy, vx, vy, theta for robot and humans
        :return:
        """
        if self.robot is None:
            raise AttributeError('robot has to be set!')
        assert phase in ['train', 'val', 'test']
        if test_case is not None:
            self.case_counter[phase] = test_case
        self.global_time = 0
        if phase == 'test':
            self.human_times = [0] * self.human_num
        else:
            self.human_times = [0] * (
                self.human_num if self.robot.policy.multiagent_training else 1)
        #if not self.robot.policy.multiagent_training:
        #    self.train_val_sim = 'circle_crossing'

        if self.config.get('humans', 'policy') == 'trajnet':
            raise NotImplementedError
        else:
            counter_offset = {
                'train':
                self.case_capacity['val'] + self.case_capacity['test'],
                'val': 0,
                'test': self.case_capacity['val']
            }
            self.robot.set(0, -self.circle_radius, 0, self.circle_radius, 0, 0,
                           np.pi / 2)
            if self.case_counter[phase] >= 0:
                #np.random.seed(counter_offset[phase] + self.case_counter[phase])
                if phase in ['train', 'val']:
                    human_num = self.human_num if self.robot.policy.multiagent_training else 1
                    self.generate_random_human_position(
                        human_num=human_num, rule=self.train_val_sim)
                else:
                    self.generate_random_human_position(
                        human_num=self.human_num, rule=self.test_sim)
                # case_counter is always between 0 and case_size[phase]
                self.case_counter[phase] = (self.case_counter[phase] +
                                            1) % self.case_size[phase]
            else:
                assert phase == 'test'
                if self.case_counter[phase] == -1:
                    # for debugging purposes
                    self.human_num = 3
                    self.humans = [
                        Human(self.config, 'humans')
                        for _ in range(self.human_num)
                    ]
                    self.humans[0].set(0, -6, 0, 5, 0, 0, np.pi / 2)
                    self.humans[1].set(-5, -5, -5, 5, 0, 0, np.pi / 2)
                    self.humans[2].set(5, -5, 5, 5, 0, 0, np.pi / 2)
                else:
                    raise NotImplementedError
        ###
        if self.domain_settings is not None:
            for human in self.humans:
                human.modify_policy(self.domain_settings)

        for agent in [self.robot] + self.humans:
            agent.time_step = self.time_step
            agent.policy.time_step = self.time_step

        self.states = list()
        if hasattr(self.robot.policy, 'action_values'):
            self.action_values = list()
        if hasattr(self.robot.policy, 'get_attention_weights'):
            self.attention_weights = list()

        # get current observation
        if self.robot.sensor == 'coordinates':
            ob = [human.get_observable_state() for human in self.humans]
        elif self.robot.sensor == 'RGB':
            raise NotImplementedError

        self.simulator = socialforce.Simulator(
            self.humans_to_sf_state(self.humans, init=True),
            ped_ped=socialforce.PedPedPotential(self.delta_t, self.v0,
                                                self.sigma),
            delta_t=self.delta_t,
            tau=self.tau)

        return ob
    def get_scene(self,
                  nr_agents,
                  nr_scenes,
                  agent_states,
                  threshold,
                  setting="random"):
        """
        Method overriding. In this case agents do not leave the scenario at a specific side of the scene, but remain seq_len time steps in the scene.
        After seq_len time steps all agents are replaced by an equal number of new agents. An implementation of the Social Force Model is used to
        describe the agents motion behavior including the interactions between the agents.
        :param nr_agents: Number of agents in each scene
        :param nr_scenes: Number of scenes of the dataset
        :param agent_states: Numpy array of shape N x 6 for N initial agents in the dataset. Determines the initial states of the agents in the dataset.
        The row of the i-th agent needs to be of the form: [x, y, v_x, v_y, d_x, d_y]. If the states should be initialized randomly, use agent_states = [] as input.
        :param threshold: Specifies the threshold for agents getting out of the scene (in meters)
        :param setting: Either random or meeting
        """

        if setting == "meeting":
            self.initialize = meeting(self.scenario, self.v_max, self.v_min,
                                      self.a, self.b, self.tau)
        else:
            self.initialize = random_initialization(self.scenario, self.v_max,
                                                    self.v_min, self.a, self.b,
                                                    self.tau)

        self.initial_state = self.initialize_agents(self.v_max, self.v_min,
                                                    nr_agents, agent_states,
                                                    self.a, self.b, setting)

        self.s = socialforce.Simulator(self.initial_state,
                                       self.V0,
                                       self.sigma,
                                       self.delta_t,
                                       self.tau,
                                       beta=self.beta)

        # Get Array for scene numbers and agents
        scene = np.zeros(nr_agents)
        agents = np.arange(nr_agents) + 1

        # Concatenate arrays with respective x-y-coordinates
        dataset = np.concatenate((scene.reshape(
            nr_agents, 1), agents.reshape(nr_agents, 1), self.s.state[:, 0:2]),
                                 axis=1)

        # Specify file location
        root_path_experiments = os.path.abspath(
            os.path.join(os.path.dirname(__file__), "..", "..", "Experiments"))
        if not os.path.exists(root_path_experiments):
            raise FileNotFoundError(errno.ENOENT, strerror(errno.ENOENT),
                                    "Can not find Experiments folder!")

        root_path_dataset = os.path.abspath(
            os.path.join(
                root_path_experiments, "datasets",
                str(self.scenario) + "simulated" + "_" +
                str(self.parameterinfo), str(self.phase)))

        if not os.path.exists(root_path_dataset):
            os.makedirs(root_path_dataset)

        # Create background image
        fig, ax = plt.subplots()
        ax.grid(linestyle='dotted')
        ax.set_aspect(1.0, 'datalim')
        ax.set_axisbelow(True)
        ax.set_xlabel('x [m]')
        ax.set_ylabel('y [m]')
        ax.set_ylim(
            [self.initialize.total_y_min - 2, self.initialize.total_y_max + 2])
        ax.set_xlim(
            [self.initialize.total_x_min - 2, self.initialize.total_x_max + 2])
        ax.set_title(self.scenario + " V0: " + str(self.V0) + " - sigma: " +
                     str(self.sigma))
        fig.set_tight_layout(True)
        fig.savefig(root_path_dataset + "//" + str(self.scenario) +
                    "simulated.jpg")
        plt.close()

        txt_file = os.path.join(
            root_path_dataset,
            str(self.phase) + "_" + str(self.scenario) + "simulated.txt")
        print("Create dataset as txt-file for scene " + str(self.scenario))
        file = open(txt_file, "w")
        np.savetxt(file, dataset, fmt="%.2f", delimiter="\t")
        file.close()
        file = open(txt_file, "a")

        # Repeat for each scene and update according steps of agents
        states = []
        states.append(self.s.state.copy())

        for _ in range(nr_scenes):
            state = self.s.step().state
            scene += 1

            # Periodic Boundary condition
            if _ != 0 and (_ + 1) % self.seq_len == 0:
                # Generate new Agents after seq_len timesteps
                state[:] = np.stack(
                    self.initialize.initialize_state(i)
                    for i in range(len(state))).squeeze()
                # Update IDs
                agents += len(agents)
            dataset = np.concatenate((scene.reshape(
                nr_agents, 1), agents.reshape(nr_agents, 1), state[:, 0:2]),
                                     axis=1)
            np.savetxt(file, dataset, fmt="%.2f", delimiter="\t")

            states.append(state.copy())

        file.close()
        states = np.stack(states)

        return states