Exemple #1
0
    else:
        train_percent = 20
    ablation = "all"
    shuffle = False
    metapath_length = 3
    mlp_settings = {'layer_list': [256], 'dropout_list': [0.5], 'activation': 'sigmoid'}
    info_section = 40  # total embedding dim = info_section *3 = 120
    learning_rate = 0.01  #
    select_method = "all_node"  # Only used in end-node study
    single_path_limit = 5  # lambda = 5

    # Automatically calculated parameters
    num_batch_per_epoch = 5  # 每个epoch循环的批次数
    batch_size = train_percent // 20 * 96
    DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
    data = HeteData(dataset=dataset, train_percent=train_percent, shuffle=shuffle)
    graph_list = data.get_dict_of_list()
    homo_graph = nx.to_dict_of_lists(data.homo_graph)
    input_dim = data.x.shape[1]

    pre_embed_dim = data.type_num * info_section
    output_dim = max(data.train_list[:, 1].tolist()) + 1  # 隐藏单元节点数  两层

    DEVICE = "cuda" if torch.cuda.is_available() else "cpu"  # 获取预处理数据

    assert select_method in ["end_node", "all_node"]

    x = data.x
    train_list = data.train_list  # 训练节点/数据对应的标签
    test_list = data.test_list  # 测试节点/数据对应的索引
    val_list = data.val_list  # 验证节点/数据对应的索引
Exemple #2
0
        'activation': 'sigmoid'
    }
    info_section = 40  # total embedding dim = info_section *3 = 120
    learning_rate = 0.01  #
    select_method = "all_node"  # Only used in end-node study
    single_path_limit = 8  # lambda = 5
    metapath_name = {'P': ['PA', 'PS']}
    metapath_list = [[0], [2]]
    print("Selected Path: PA, PS")

    # Automatically calculated parameters
    num_batch_per_epoch = 5  # 每个epoch循环的批次数
    batch_size = train_percent // 20 * 96
    DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
    data = HeteData(dataset=dataset,
                    train_percent=train_percent,
                    shuffle=shuffle)
    graph_list = data.get_dict_of_list()
    homo_graph = nx.to_dict_of_lists(data.homo_graph)
    input_dim = data.x.shape[1]

    pre_embed_dim = data.type_num * info_section
    output_dim = max(data.train_list[:, 1].tolist()) + 1  # 隐藏单元节点数  两层

    DEVICE = "cuda" if torch.cuda.is_available() else "cpu"  # 获取预处理数据

    assert select_method in ["end_node", "all_node"]

    x = data.x
    train_list = data.train_list  # 训练节点/数据对应的标签
    test_list = data.test_list  # 测试节点/数据对应的索引