Пример #1
0
    def pre_process(self):
        processed_dir = osp.join(self.root, 'processed')
        pre_processed_file_path = osp.join(processed_dir, 'dgl_data_processed')

        if osp.exists(pre_processed_file_path):
            self.graph, label_dict = load_graphs(pre_processed_file_path)
            self.labels = label_dict['labels']

        else:
            ### check download
            if not osp.exists(osp.join(self.root, "raw", "edge.csv.gz")):
                url = self.meta_info[self.name]["url"]
                if decide_download(url):
                    path = download_url(url, self.original_root)
                    extract_zip(path, self.original_root)
                    os.unlink(path)
                    # delete folder if there exists
                    try:
                        shutil.rmtree(self.root)
                    except:
                        pass
                    shutil.move(osp.join(self.original_root, self.download_name), self.root)
                else:
                    print("Stop download.")
                    exit(-1)

            raw_dir = osp.join(self.root, "raw")

            ### pre-process and save
            add_inverse_edge = self.meta_info[self.name]["add_inverse_edge"] == "True"

            if self.meta_info[self.name]["additional node files"] == 'None':
                additional_node_files = []
            else:
                additional_node_files = self.meta_info[self.name]["additional node files"].split(',')

            if self.meta_info[self.name]["additional edge files"] == 'None':
                additional_edge_files = []
            else:
                additional_edge_files = self.meta_info[self.name]["additional edge files"].split(',')

            graph = read_csv_graph_dgl(raw_dir, add_inverse_edge = add_inverse_edge, additional_node_files = additional_node_files, additional_edge_files = additional_edge_files)[0]

            ### adding prediction target
            node_label = pd.read_csv(osp.join(raw_dir, 'node-label.csv.gz'), compression="gzip", header = None).values
            if "classification" in self.task_type:
                node_label = torch.tensor(node_label, dtype = torch.long)
            else:
                node_label = torch.tensor(node_label, dtype = torch.float32)

            label_dict = {"labels": node_label}

            print('Saving...')
            save_graphs(pre_processed_file_path, graph, label_dict)

            self.graph, label_dict = load_graphs(pre_processed_file_path)
            self.labels = label_dict['labels']
Пример #2
0
    def pre_process(self):
        processed_dir = osp.join(self.root, 'processed')
        pre_processed_file_path = osp.join(processed_dir, 'dgl_data_processed')

        if osp.exists(pre_processed_file_path):
            self.graph, _ = load_graphs(pre_processed_file_path)

        else:
            ### check download
            if not osp.exists(osp.join(self.root, "raw", "edge.csv.gz")):
                url = self.meta_info[self.name]["url"]
                if decide_download(url):
                    path = download_url(url, self.original_root)
                    extract_zip(path, self.original_root)
                    os.unlink(path)
                    # delete folder if there exists
                    try:
                        shutil.rmtree(self.root)
                    except:
                        pass
                    shutil.move(
                        osp.join(self.original_root, self.download_name),
                        self.root)
                else:
                    print("Stop download.")
                    exit(-1)

            raw_dir = osp.join(self.root, "raw")

            add_inverse_edge = self.meta_info[
                self.name]["add_inverse_edge"] == "True"

            ### pre-process and save
            if self.meta_info[self.name]["additional node files"] == 'None':
                additional_node_files = []
            else:
                additional_node_files = self.meta_info[
                    self.name]["additional node files"].split(',')

            if self.meta_info[self.name]["additional edge files"] == 'None':
                additional_edge_files = []
            else:
                additional_edge_files = self.meta_info[
                    self.name]["additional edge files"].split(',')

            graph = read_csv_graph_dgl(
                raw_dir,
                add_inverse_edge=add_inverse_edge,
                additional_node_files=additional_node_files,
                additional_edge_files=additional_edge_files)[0]

            print('Saving...')
            save_graphs(pre_processed_file_path, graph, {})

            self.graph, _ = load_graphs(pre_processed_file_path)
Пример #3
0
    def pre_process(self):
        processed_dir = osp.join(self.root, 'processed')
        raw_dir = osp.join(self.root, 'raw')
        pre_processed_file_path = osp.join(processed_dir, 'dgl_data_processed')

        ### download
        url = 'https://snap.stanford.edu/ogb/data/graphproppred/csv_mol_download/pcba.zip'
        if decide_download(url):
            path = download_url(url, self.original_root)
            extract_zip(path, self.original_root)
            os.unlink(path)
            # delete folder if there exists
            try:
                shutil.rmtree(self.root)
            except:
                pass
            shutil.move(osp.join(self.original_root, self.download_name),
                        self.root)
        else:
            print("Stop download.")
            exit(-1)

        ### preprocess
        add_inverse_edge = True
        additional_node_files = []
        additional_edge_files = []

        graphs = read_csv_graph_dgl(
            raw_dir,
            add_inverse_edge=add_inverse_edge,
            additional_node_files=additional_node_files,
            additional_edge_files=additional_edge_files)

        labels = pd.read_csv(osp.join(raw_dir, "graph-label.csv.gz"),
                             compression="gzip",
                             header=None).values

        has_nan = np.isnan(labels).any()

        if "classification" in self.task_type:
            if has_nan:
                labels = torch.from_numpy(labels)
            else:
                labels = torch.from_numpy(labels).to(torch.long)
        else:
            labels = torch.from_numpy(labels)

        print('Not Saving...')
        # save_graphs(pre_processed_file_path, graphs, labels={'labels': labels})

        ### load preprocessed files
        self.graphs = graphs
        self.labels = labels
Пример #4
0
    def pre_process(self):
        processed_dir = osp.join(self.root, 'processed')
        pre_processed_file_path = osp.join(processed_dir, 'dgl_data_processed')

        if osp.exists(pre_processed_file_path):
            self.graph, _ = load_graphs(pre_processed_file_path)

        else:
            ### check download
            if not osp.exists(osp.join(self.root, "raw", "edge.csv.gz")):
                url = self.meta_info[self.name]["url"]
                if decide_download(url):
                    path = download_url(url, self.original_root)
                    extract_zip(path, self.original_root)
                    os.unlink(path)
                    # delete folder if there exists
                    try:
                        shutil.rmtree(self.root)
                    except:
                        pass
                    shutil.move(
                        osp.join(self.original_root, self.download_name),
                        self.root)
                else:
                    print("Stop download.")
                    exit(-1)

            raw_dir = osp.join(self.root, "raw")

            file_names = ["edge"]
            if self.meta_info[self.name]["has_node_attr"] == "True":
                file_names.append("node-feat")
            if self.meta_info[self.name]["has_edge_attr"] == "True":
                file_names.append("edge-feat")
            raw_file_names = [
                file_name + ".csv.gz" for file_name in file_names
            ]

            ### pre-process and save
            add_inverse_edge = self.meta_info[
                self.name]["add_inverse_edge"] == "True"
            graph = read_csv_graph_dgl(raw_dir,
                                       raw_file_names,
                                       add_inverse_edge=add_inverse_edge)

            save_graphs(pre_processed_file_path, graph, {})

            self.graph, _ = load_graphs(pre_processed_file_path)
Пример #5
0
    def pre_process(self):
        processed_dir = osp.join(self.root, 'processed')
        raw_dir = osp.join(self.root, 'raw')
        pre_processed_file_path = osp.join(processed_dir, 'dgl_data_processed')

        if os.path.exists(pre_processed_file_path):
            self.graphs, label_dict = load_graphs(pre_processed_file_path)
            self.labels = label_dict['labels']

        else:
            ### download
            url = self.meta_info[self.name]["url"]
            if decide_download(url):
                path = download_url(url, self.original_root)
                extract_zip(path, self.original_root)
                os.unlink(path)
                # delete folder if there exists
                try:
                    shutil.rmtree(self.root)
                except:
                    pass
                shutil.move(osp.join(self.original_root, self.download_name),
                            self.root)
            else:
                print("Stop download.")
                exit(-1)

            ### preprocess
            add_inverse_edge = self.meta_info[
                self.name]["add_inverse_edge"] == "True"
            graphs = read_csv_graph_dgl(raw_dir,
                                        add_inverse_edge=add_inverse_edge)
            labels = torch.tensor(
                pd.read_csv(osp.join(raw_dir, "graph-label.csv.gz"),
                            compression="gzip",
                            header=None).values)

            print('Saving...')
            save_graphs(pre_processed_file_path,
                        graphs,
                        labels={'labels': labels})

            ### load preprocessed files
            self.graphs, label_dict = load_graphs(pre_processed_file_path)
            self.labels = label_dict['labels']
Пример #6
0
    def pre_process(self):
        processed_dir = osp.join(self.root, 'processed')
        raw_dir = osp.join(self.root, 'raw')
        pre_processed_file_path = osp.join(processed_dir, 'dgl_data_processed')

        if self.task_type == 'sequence prediction':
            target_sequence_file_path = osp.join(processed_dir,
                                                 'target_sequence')

        if os.path.exists(pre_processed_file_path):

            if self.task_type == "sequence prediction":
                self.graphs, _ = load_graphs(pre_processed_file_path)
                self.labels = torch.load(target_sequence_file_path)

            else:
                self.graphs, label_dict = load_graphs(pre_processed_file_path)
                self.labels = label_dict['labels']

        else:
            ### download
            url = self.meta_info[self.name]["url"]
            if decide_download(url):
                path = download_url(url, self.original_root)
                extract_zip(path, self.original_root)
                os.unlink(path)
                # delete folder if there exists
                try:
                    shutil.rmtree(self.root)
                except:
                    pass
                shutil.move(osp.join(self.original_root, self.download_name),
                            self.root)
            else:
                print("Stop download.")
                exit(-1)

            ### preprocess
            add_inverse_edge = self.meta_info[
                self.name]["add_inverse_edge"] == "True"

            if self.meta_info[self.name]["additional node files"] == 'None':
                additional_node_files = []
            else:
                additional_node_files = self.meta_info[
                    self.name]["additional node files"].split(',')

            if self.meta_info[self.name]["additional edge files"] == 'None':
                additional_edge_files = []
            else:
                additional_edge_files = self.meta_info[
                    self.name]["additional edge files"].split(',')

            graphs = read_csv_graph_dgl(
                raw_dir,
                add_inverse_edge=add_inverse_edge,
                additional_node_files=additional_node_files,
                additional_edge_files=additional_edge_files)

            if self.task_type == "sequence prediction":
                # the downloaded labels are initially joined by ' '
                labels_joined = pd.read_csv(osp.join(raw_dir,
                                                     "graph-label.csv.gz"),
                                            compression="gzip",
                                            header=None).values
                # need to split each element into subtokens
                labels = [
                    str(labels_joined[i][0]).split(' ')
                    for i in range(len(labels_joined))
                ]

                print('Saving...')
                save_graphs(pre_processed_file_path, graphs)
                torch.save(labels, target_sequence_file_path)

                ### load preprocessed files
                self.graphs, _ = load_graphs(pre_processed_file_path)
                self.labels = torch.load(target_sequence_file_path)

            else:
                labels = torch.tensor(
                    pd.read_csv(osp.join(raw_dir, "graph-label.csv.gz"),
                                compression="gzip",
                                header=None).values)

                print('Saving...')
                save_graphs(pre_processed_file_path,
                            graphs,
                            labels={'labels': labels})

                ### load preprocessed files
                self.graphs, label_dict = load_graphs(pre_processed_file_path)
                self.labels = label_dict['labels']
Пример #7
0
    def pre_process(self):
        processed_dir = osp.join(self.root, 'processed')
        pre_processed_file_path = osp.join(processed_dir, 'dgl_data_processed')

        if osp.exists(pre_processed_file_path):

            if not self.is_hetero:
                self.graph, label_dict = load_graphs(pre_processed_file_path)
                self.labels = label_dict['labels']
            else:
                with open(pre_processed_file_path, 'rb') as f:
                    self.graph, self.labels = pickle.load(f)

        else:
            ### check if the downloaded file exists
            has_necessary_file_simple = osp.exists(
                osp.join(self.root, "raw",
                         "edge.csv.gz")) and (not self.is_hetero)
            has_necessary_file_hetero = osp.exists(
                osp.join(self.root, "raw",
                         "triplet-type-list.csv.gz")) and self.is_hetero

            has_necessary_file = has_necessary_file_simple or has_necessary_file_hetero
            if not has_necessary_file:
                url = self.meta_info[self.name]["url"]
                if decide_download(url):
                    path = download_url(url, self.original_root)
                    extract_zip(path, self.original_root)
                    os.unlink(path)
                    # delete folder if there exists
                    try:
                        shutil.rmtree(self.root)
                    except:
                        pass
                    shutil.move(
                        osp.join(self.original_root, self.download_name),
                        self.root)
                else:
                    print("Stop download.")
                    exit(-1)

            raw_dir = osp.join(self.root, "raw")

            ### pre-process and save
            add_inverse_edge = self.meta_info[
                self.name]["add_inverse_edge"] == "True"

            if self.meta_info[self.name]["additional node files"] == 'None':
                additional_node_files = []
            else:
                additional_node_files = self.meta_info[
                    self.name]["additional node files"].split(',')

            if self.meta_info[self.name]["additional edge files"] == 'None':
                additional_edge_files = []
            else:
                additional_edge_files = self.meta_info[
                    self.name]["additional edge files"].split(',')

            if self.is_hetero:
                graph = read_csv_heterograph_dgl(
                    raw_dir,
                    add_inverse_edge=add_inverse_edge,
                    additional_node_files=additional_node_files,
                    additional_edge_files=additional_edge_files)[0]

                label_dict = read_node_label_hetero(raw_dir)

                # convert into torch tensor
                if "classification" in self.task_type:
                    for nodetype in label_dict.keys():
                        # detect if there is any nan
                        node_label = label_dict[nodetype]
                        if np.isnan(node_label).any():
                            label_dict[nodetype] = torch.from_numpy(
                                node_label).to(torch.float32)
                        else:
                            label_dict[nodetype] = torch.from_numpy(
                                node_label).to(torch.long)
                else:
                    for nodetype in label_dict.keys():
                        node_label = label_dict[nodetype]
                        label_dict[nodetype] = torch.from_numpy(node_label).to(
                            torch.float32)

                with open(pre_processed_file_path, 'wb') as f:
                    pickle.dump(([graph], label_dict), f)

                with open(pre_processed_file_path, 'rb') as f:
                    self.graph, self.labels = pickle.load(f)

            else:
                graph = read_csv_graph_dgl(
                    raw_dir,
                    add_inverse_edge=add_inverse_edge,
                    additional_node_files=additional_node_files,
                    additional_edge_files=additional_edge_files)[0]

                ### adding prediction target
                node_label = pd.read_csv(osp.join(raw_dir,
                                                  'node-label.csv.gz'),
                                         compression="gzip",
                                         header=None).values

                if "classification" in self.task_type:
                    # detect if there is any nan
                    if np.isnan(node_label).any():
                        node_label = torch.from_numpy(node_label).to(
                            torch.float32)
                    else:
                        node_label = torch.from_numpy(node_label).to(
                            torch.long)
                else:
                    node_label = torch.from_numpy(node_label).to(torch.float32)

                label_dict = {"labels": node_label}

                save_graphs(pre_processed_file_path, graph, label_dict)

                self.graph, label_dict = load_graphs(pre_processed_file_path)
                self.labels = label_dict['labels']
Пример #8
0
    def pre_process(self):
        processed_dir = osp.join(self.root, 'processed')
        pre_processed_file_path = osp.join(processed_dir, 'dgl_data_processed')

        if osp.exists(pre_processed_file_path):

            if not self.is_hetero:
                self.graph, _ = load_graphs(pre_processed_file_path)
            else:
                with open(pre_processed_file_path, 'rb') as f:
                    self.graph = pickle.load(f)

        else:
            ### check if the downloaded file exists
            has_necessary_file_simple = osp.exists(
                osp.join(self.root, "raw",
                         "edge.csv.gz")) and (not self.is_hetero)
            has_necessary_file_hetero = osp.exists(
                osp.join(self.root, "raw",
                         "triplet-type-list.csv.gz")) and self.is_hetero

            has_necessary_file = has_necessary_file_simple or has_necessary_file_hetero
            if not has_necessary_file:
                url = self.meta_info[self.name]["url"]
                if decide_download(url):
                    path = download_url(url, self.original_root)
                    extract_zip(path, self.original_root)
                    os.unlink(path)
                    # delete folder if there exists
                    try:
                        shutil.rmtree(self.root)
                    except:
                        pass
                    shutil.move(
                        osp.join(self.original_root, self.download_name),
                        self.root)
                else:
                    print("Stop download.")
                    exit(-1)

            raw_dir = osp.join(self.root, "raw")

            add_inverse_edge = self.meta_info[
                self.name]["add_inverse_edge"] == "True"

            ### pre-process and save
            if self.meta_info[self.name]["additional node files"] == 'None':
                additional_node_files = []
            else:
                additional_node_files = self.meta_info[
                    self.name]["additional node files"].split(',')

            if self.meta_info[self.name]["additional edge files"] == 'None':
                additional_edge_files = []
            else:
                additional_edge_files = self.meta_info[
                    self.name]["additional edge files"].split(',')

            if self.is_hetero:
                graph = read_csv_heterograph_dgl(
                    raw_dir,
                    add_inverse_edge=add_inverse_edge,
                    additional_node_files=additional_node_files,
                    additional_edge_files=additional_edge_files)[0]

                with open(pre_processed_file_path, 'wb') as f:
                    pickle.dump([graph], f)

                with open(pre_processed_file_path, 'rb') as f:
                    self.graph = pickle.load(f)

            else:
                graph = read_csv_graph_dgl(
                    raw_dir,
                    add_inverse_edge=add_inverse_edge,
                    additional_node_files=additional_node_files,
                    additional_edge_files=additional_edge_files)[0]

                print('Saving...')
                save_graphs(pre_processed_file_path, graph, {})

                self.graph, _ = load_graphs(pre_processed_file_path)