コード例 #1
0
def create_dataset(data_dir, scenes_path, graphs_path, config=None):
    with open(scenes_path, 'r') as scenes_file:
        scenes = json.load(scenes_file)
        if type(config) == type(None):
            config = get_config()
        graphs = []

        for scene in scenes:
            for target_id in range(len(scene["objects"])):
                graph = Graph(config, scene, target_id)
                graphs.append(graph)

        with open(graphs_path, 'wb') as graphs_file:
            pickle.dump(graphs, graphs_file)

        root = os.path.join(data_dir, "./processed_dataset")
        scene_dataset = SceneDataset(root, config)
    return graphs, scene_dataset
コード例 #2
0

if __name__ == "__main__":
    # load the data
    data_dir = os.path.abspath(__file__ + "../../../data")
    root = os.path.abspath(os.path.join(data_dir, "./processed_dataset"))

    config = get_config()
    attr_encoder = Encoder(config)

    scenes_path = os.path.abspath(
        os.path.join(data_dir,
                     f"./processed_dataset/raw/{cmd_args.scene_file_name}"))
    with open(scenes_path, 'r') as scenes_file:
        scenes = json.load(scenes_file)

    # construct a mini example
    target_id = 0
    graph = Graph(config, scenes[0], target_id)

    x = attr_encoder.get_embedding([node.name for node in graph.nodes])
    edge_index, edge_types = graph.get_edge_info()
    edge_attrs = torch.tensor(attr_encoder.get_embedding(edge_types))
    data_point = Data(x=x,
                      edge_index=edge_index,
                      edge_attr=edge_attrs,
                      y=target_id)

    # construct an env
    env = Env(data_point, graph, config, attr_encoder)
コード例 #3
0
    graphs_path = os.path.join(raw_path, cmd_args.graph_file_name)

    # In the pytorch geometry package, only int and tensor seems to be allowed to save
    # we process all the graphs and save them to a file.
    
    with open(scenes_path, 'r') as scenes_file:
        scenes = json.load(scenes_file)

    config = get_config()

    graphs = []
    attr_encoder = Encoder(config)

    for scene in scenes:
        for target_id in range(len(scene["objects"])):
            graph = Graph(config, scene, target_id)
            graphs.append(graph)
    
    with open(graphs_path, 'wb') as graphs_file:
        pickle.dump(graphs, graphs_file) 

    root = os.path.join(data_dir, "./processed_dataset")
    scene_dataset = SceneDataset(root, config)

    if os.path.exists(cmd_args.model_path) and os.path.getsize(cmd_args.model_path) > 0:
        refrl = torch.load(cmd_args.model_path)
        logging.info("Loaded refrl model")
    else:
        refrl = RefRL(scene_dataset, config, graphs)
        logging.info("Constructed refrl model")
コード例 #4
0
if __name__ == "__main__":
    # load the data
    data_dir = os.path.abspath(__file__ + "../../../../data")
    root = os.path.abspath(os.path.join(data_dir, "./processed_dataset"))

    config = get_config()

    attr_encoder = Encoder(config)

    scenes_path = os.path.abspath(
        os.path.join(data_dir,
                     f"./processed_dataset/raw/{cmd_args.scene_file_name}"))
    with open(scenes_path, 'r') as scenes_file:
        scenes = json.load(scenes_file)

    # construct a mini example
    target_id = 0
    graph = Graph(config, scenes[0], target_id)

    x = attr_encoder.get_embedding(graph.get_nodes())
    edge_index, edge_types = graph.get_edge_info()
    edge_attrs = attr_encoder.get_embedding(
        [f"edge_{tp}" for tp in edge_types])
    data_point = Data(torch.tensor(x), torch.tensor(edge_index),
                      torch.tensor(edge_attrs), graph.target_id)

    # construct an env
    env = Env(data_point, graph, config, attr_encoder)
    # env.reset(graph)