class SceneDataset(InMemoryDataset):
    def __init__(self, root, config, transform=None, pre_transform=None):
        self.config = config
        self.attr_encoder = Encoder(config)

        super().__init__(root, transform, pre_transform)
        self.data, self.slices = torch.load(self.processed_paths[0])

    @property
    def raw_file_names(self):
        # return ["graphs.pkl"]
        return [cmd_args.graph_file_name]

    @property
    def processed_file_names(self):
        # return ['train_1000_dataset.pt']
        return [cmd_args.dataset_name]

    def download(self):
        pass

    def process(self):
        data_list = []

        for raw_path in self.raw_paths:
            with open(raw_path, 'rb') as raw_file:
                graphs = pickle.load(raw_file)

            for graph_id, graph in enumerate(graphs):

                x = self.attr_encoder.get_embedding(
                    [node.name for node in graph.nodes])
                edge_index, edge_types = graph.get_edge_info()
                edge_attrs = torch.tensor(
                    self.attr_encoder.get_embedding(
                        [f"edge_{tp}" for tp in edge_types]))
                data_point = Data(torch.tensor(x), torch.tensor(edge_index),
                                  edge_attrs, graph.target_id)

                # print(torch.tensor(x), torch.tensor(edge_index), edge_attrs, graph.target_id)
                data_point.obj_num = len(graph.scene["objects"])
                data_point.graph_id = graph_id
                # data_point.attr_encoder = self.attr_encoder
                data_list.append(data_point)

        data, slices = self.collate(data_list)
        torch.save((data, slices), self.processed_paths[0])
Пример #2
0

if __name__ == "__main__":
    # load the data
    data_dir = os.path.abspath(__file__ + "../../../data")
    root = os.path.abspath(os.path.join(data_dir, "./processed_dataset"))

    config = get_config()
    attr_encoder = Encoder(config)

    scenes_path = os.path.abspath(
        os.path.join(data_dir,
                     f"./processed_dataset/raw/{cmd_args.scene_file_name}"))
    with open(scenes_path, 'r') as scenes_file:
        scenes = json.load(scenes_file)

    # construct a mini example
    target_id = 0
    graph = Graph(config, scenes[0], target_id)

    x = attr_encoder.get_embedding([node.name for node in graph.nodes])
    edge_index, edge_types = graph.get_edge_info()
    edge_attrs = torch.tensor(attr_encoder.get_embedding(edge_types))
    data_point = Data(x=x,
                      edge_index=edge_index,
                      edge_attr=edge_attrs,
                      y=target_id)

    # construct an env
    env = Env(data_point, graph, config, attr_encoder)
if __name__ == "__main__":
    # load the data
    data_dir = os.path.abspath(__file__ + "../../../../data")
    root = os.path.abspath(os.path.join(data_dir, "./processed_dataset"))

    config = get_config()

    attr_encoder = Encoder(config)

    scenes_path = os.path.abspath(
        os.path.join(data_dir,
                     f"./processed_dataset/raw/{cmd_args.scene_file_name}"))
    with open(scenes_path, 'r') as scenes_file:
        scenes = json.load(scenes_file)

    # construct a mini example
    target_id = 0
    graph = Graph(config, scenes[0], target_id)

    x = attr_encoder.get_embedding(graph.get_nodes())
    edge_index, edge_types = graph.get_edge_info()
    edge_attrs = attr_encoder.get_embedding(
        [f"edge_{tp}" for tp in edge_types])
    data_point = Data(torch.tensor(x), torch.tensor(edge_index),
                      torch.tensor(edge_attrs), graph.target_id)

    # construct an env
    env = Env(data_point, graph, config, attr_encoder)
    # env.reset(graph)