示例#1
0
def generate_dataset(size, statebatchsize, shape, grid_type="free", verbose=False):

    # if verbose:
    #     progress_bar = tqdm(total=size)

    m = shape[0]
    n = shape[1]
    images = np.zeros((size, m, n, 2), 'float32')
    #goal_images = np.zeros((size, m, n), 'int32')
    S1s = np.zeros((size, statebatchsize), 'int32')
    S2s = np.zeros((size, statebatchsize), 'int32')
    labels = np.zeros((size, statebatchsize), 'int32')

    for i in range(size):
        flag = True
        grid, start, goal = generate_grid(shape, grid_type=grid_type)
        images[i, :, :, 0] = grid
        images[i, goal[0], goal[1], 1] = 100
        count = 0
        if i % 100 == 0:
            print(i)
        while flag:

            stop = 1
            while stop:
                start_x = np.random.randint(m - 2) + 1
                start_y = np.random.randint(n - 2) + 1

                if grid[start_x, start_y] == 0:
                    stop = 0
                    start = (start_x, start_y)
            path, action_planning = compute_action_planning(grid, start, goal)
            # print("hh", start, goal, path)

            if path != False:
                for action, position in zip(action_planning, path):
                    if count < statebatchsize:
                        S1s[i, count] = position[0]
                        S2s[i, count] = position[1]
                        labels[i, count] = action
                        count += 1
                    else:
                        flag = False
                # print(start, goal)
                # print(S1s[i], S2s[i], labels[i])

        # if verbose:
        #     progress_bar.update(1)

        # if i == size - 1:
        #     if verbose:
        #         progress_bar.close()
    print(np.shape(images), np.shape(S1s), np.shape(S2s), np.shape(labels))
    print(images[0, 0:24, 0:24, 0])
    print(S1s[0])
    print(S2s[0])
    print(labels[0])
    return images, S1s, S2s, labels
def generate_dataset(size,
                     shape,
                     *,
                     timesteps=10,
                     grid_type="free",
                     observable_depth=5):
    """
    Arguments
    ---------
    size : number of episodes generated
    shape : the grid shape
    grid_type : the type of grid ("free", "obstacle", "maze")

    Return
    ------
    return episodes, a list of tuple (images, labels)

    each episode contains a list of :
    image : (m, n, 2) grid with state and goal on the 3rd axis
        state = (m, n) grid with 1 (wall), 0 (free) and -1 (unseen) ;
        goal = (m, n) grid with 10 at goal position
    label : the action made

    """
    episodes = []
    for _ in tqdm(range(size)):
        grid, start, goal = generate_grid(shape, grid_type=grid_type)
        path, action_planning = compute_action_planning(grid, start, goal)

        grid[goal] = GOAL_VALUE

        images = []
        labels = []

        for timestep in range(timesteps):
            # at the end, pad the episode with the last action
            if (timestep < len(action_planning)):
                action = action_planning[timestep]
                position = path[timestep]

                # Compute the partial grid
                _partial_grid = partial_grid(grid, position, observable_depth)
                _partial_grid = grid_with_start(_partial_grid, position)

                # Goal grid contains something only if the goal is visible
                where_is_goal = _partial_grid == GOAL_VALUE
                goal_grid = create_goal_grid(grid.shape, where_is_goal)

                # Stack partial and goal grid
                image = np.stack([_partial_grid, goal_grid], axis=2)

            images.append(image)
            labels.append(action)

        episodes.append((images, labels))
    return episodes
示例#3
0
    def reset(self):

        self.terminal = False

        self.grid, self.player, self.target = generate_grid(
            self.shape,
            grid_type=self.grid_type,
            generation_seed=self.generation_seed,
            spawn_seed=self.spawn_seed)

        return self.get_state()
示例#4
0
    def reset(self):

        self.terminal = False

        self.grid, self.player, self.target = generate_grid(
            self.shape,
            grid_type=self.grid_type,
            generation_seed=self.generation_seed,
            spawn_seed=self.spawn_seed)
        path, action_planning = self.compute_action_planning(
            self.grid, self.player, self.target)
        sign_list = self.build_sign(action_planning)
        self.grid = self.add_sign(action_planning, path, self.grid, sign_list)
        return self.get_state()
def generate_dataset(size, shape, *, grid_type="free", verbose=False):
    """
    Arguments
    ---------
    size : number of training set generated
    shape : the grid shape
    grid_type : the type of grid ("free", "obstacle", "maze")

    Return
    ------
    return images, S1s, S2s, labels

    image : (m, n, 2) grid with state and goal on the 3rd axis
        state = (m, n) grid with 1 and 0 ;
        goal = (m, n) grid with 10 at goal position

    S1 : vertical position of the player
    S2 : horizontal position of the player
    label : the action made
    """
    if verbose: progress_bar = tqdm(total=size)

    images = []
    S1s = []
    S2s = []
    labels = []

    n = 0

    while True:

        grid, start, goal = generate_grid(shape, grid_type=grid_type)
        path, action_planning = compute_action_planning(grid, start, goal)

        goal_grid = create_goal_grid(grid.shape, goal)
        image = np.stack([grid, goal_grid], axis=2)

        for action, position in zip(action_planning, path):
            images.append(image)
            S1s.append(position[0])
            S2s.append(position[1])
            labels.append(action)

            if verbose: progress_bar.update(1)

            n += 1
            if n >= size:
                if verbose: progress_bar.close()
                return images, S1s, S2s, labels
示例#6
0
def generate_dataset(size, shape, *, grid_type="free", verbose=False):
    """
    Arguments
    ---------
    size : number of training set generated
    shape : the grid shape
    grid_type : the type of grid ("free", "obstacle", "maze")

    Return
    ------
    return states, goals, starts, actions

    state : grid like shape with 1 and 0
    goal : grid like shape with 1 at goal position
    start : (1, 1) player position
    action : (4) the action (in one hot shape)
    """
    if verbose: progress_bar = tqdm(total=size)

    states = []
    goals = []
    starts = []
    actions = []
    n = 0

    while True:

        grid, start, goal = generate_grid(shape, grid_type=grid_type)
        path, action_planning = compute_action_planning(grid, start, goal)

        goal_grid = create_goal_grid(grid.shape, goal)

        for action, position in zip(action_planning, path):
            states.append(grid)
            goals.append(goal_grid)
            starts.append(position)
            actions.append(one_hot_value(ACTION_SIZE, action))            

            if verbose : progress_bar.update(1)

            n += 1 
            if n >= size:
                if verbose : progress_bar.close()
                return states, goals, starts, actions
    def generate_dataset(self, size):
        """
        Return
        ------
        return episodes, a list of tuple (images, labels)

        each episode contains a list of :
        image : (m, n, 2) grid with state and goal on the 3rd axis
            state = (m, n) grid with 1 (wall), 0 (free) and -1 (unseen) ;
            goal = (m, n) grid with 10 at goal position
        label : the action made
        """
        episodes = []
        for _ in tqdm(range(size)):
            grid, start, goal = generate_grid(self.shape,
                                              grid_type=self.grid_type)
            path, action_planning = compute_action_planning(grid, start, goal)

            episode = self.generate_episode(grid, goal, action_planning, path)

            images, labels = zip(*episode)

            episodes.append((images, labels))
        return episodes