Esempio n. 1
0
File: GNN.py Progetto: li-study/BiGI
    def __init__(self, opt):
        super(DGCNLayer, self).__init__()
        self.opt = opt
        self.dropout = opt["dropout"]
        self.gc1 = GCN(nfeat=opt["feature_dim"],
                       nhid=opt["hidden_dim"],
                       dropout=opt["dropout"],
                       alpha=opt["leakey"])

        self.gc2 = GCN(nfeat=opt["feature_dim"],
                       nhid=opt["hidden_dim"],
                       dropout=opt["dropout"],
                       alpha=opt["leakey"])
        self.gc3 = GCN(
            nfeat=opt["hidden_dim"],  # change
            nhid=opt["feature_dim"],
            dropout=opt["dropout"],
            alpha=opt["leakey"])

        self.gc4 = GCN(
            nfeat=opt["hidden_dim"],  # change
            nhid=opt["feature_dim"],
            dropout=opt["dropout"],
            alpha=opt["leakey"])
        self.user_union = nn.Linear(opt["feature_dim"] + opt["feature_dim"],
                                    opt["feature_dim"])
        self.item_union = nn.Linear(opt["feature_dim"] + opt["feature_dim"],
                                    opt["feature_dim"])
Esempio n. 2
0
    def __init__(self, opt):
        super(DGCNLayer, self).__init__()
        self.opt = opt
        self.gc1 = GCN(
            nfeat=opt["feature_dim"],
            nhid=opt["hidden_dim"],
            dropout=opt["dropout"],
            alpha=opt["leakey"]
        )

        self.gc2 = GCN(
            nfeat=opt["feature_dim"],
            nhid=opt["hidden_dim"],
            dropout=opt["dropout"],
            alpha=opt["leakey"]
        )
        self.gc3 = GCN(
            nfeat=opt["feature_dim"],
            nhid=opt["hidden_dim"],
            dropout=opt["dropout"],
            alpha=opt["leakey"]
        )

        self.gc4 = GCN(
            nfeat=opt["feature_dim"],
            nhid=opt["hidden_dim"],
            dropout=opt["dropout"],
            alpha=opt["leakey"]
        )

        self.Union = nn.Linear(opt["hidden_dim"] + opt["feature_dim"], opt["hidden_dim"])

        self.user_index = torch.arange(0, self.opt["number_user"], 1)
        self.item_index = torch.arange(self.opt["number_user"], self.opt["number_user"] + self.opt["number_item"],
                                       1)
        if self.opt["cuda"]:
            self.user_index = self.user_index.cuda()
            self.item_index = self.item_index.cuda()
Esempio n. 3
0
    def __init__(self, adj_node, adj_edge, dim_in_node, dim_out_node, dim_out_edge, M, range_K, device, in_drop=0.0,
                 gcn_drop=0.0, residual=False):
        '''
        :param range_K: k ranges
        :param adj_node:(V,V)
        :param adj_edge:(E,E)
        :param V:number of node
        :param E:number of edge
        :param M:(V,E),M_{i,(i->j)}=M_{j,(i->j)}=1
        :param dim_in_node: int, num of channels in the input sequence
        :param dim_out_node: int, num of node channels  in the output sequence
        :param dim_out_edge: int, num of edge channels  in the output sequence
        '''
        super(BGCN, self).__init__()
        self.DEVICE = device
        self.K = range_K
        self._M = M
        GCN_khops_node = []

        for k in range(self.K):
            if k == 0:
                GCN_khops_node.append(
                    GCN(adj_node, dim_in_node, dim_out_node, k + 1, device, in_drop=in_drop, gcn_drop=gcn_drop,
                        residual=residual))
            else:
                GCN_khops_node.append(
                    GCN(adj_node, dim_out_node + dim_out_edge, dim_out_node, k + 1, device, in_drop=in_drop,
                        gcn_drop=gcn_drop,
                        residual=residual))
        self.GCN_khops_node = nn.ModuleList(GCN_khops_node)
        self.GCN_khops_edge = nn.ModuleList(
            [GCN(adj_edge, dim_out_edge, dim_out_edge, k + 1, device, in_drop=in_drop, gcn_drop=gcn_drop,
                 residual=residual) for
             k in range(self.K)])

        self.W_b = nn.Parameter(torch.FloatTensor(dim_in_node, dim_out_edge))
def build_model(inputs , num_classes, segmentation_model, is_training ):
	if segmentation_model=="FCN8":
		print ("segmentation_model:FCN8")
		return FCN8(inputs , num_classes)

	elif segmentation_model=="U_Net":
		print ("segmentation_model:U_Net")
		return U_Net(inputs , num_classes)

	elif segmentation_model=="Seg_Net":
		print ("segmentation_model:Seg_Net")
		return Seg_Net(inputs , num_classes)

	elif segmentation_model=="Deeplab_v1":
		print ("segmentation_model:Deeplab_v1")
		return Deeplab_v1(inputs , num_classes)

	elif segmentation_model=="Deeplab_v2":
		print ("segmentation_model:Deeplab_v2")
		return Deeplab_v2(inputs , num_classes, is_training)

	elif segmentation_model=="Deeplab_v3":
		print ("segmentation_model:Deeplab_v3")
		return Deeplab_v3(inputs , num_classes, is_training)

	elif segmentation_model=="PSPNet":
		print ("segmentation_model:PSPNet")
		return PSPNet(inputs , num_classes, is_training)

	elif segmentation_model=="GCN":
		print ("segmentation_model:GCN")
		return GCN(inputs, num_classes, is_training)

	elif segmentation_model=="ENet":
		print ("segmentation_model:ENet")
		return ENet(inputs, num_classes, is_training)

	elif segmentation_model=="ICNet":
		print ("segmentation_model:ICNet")
		return ICNet(inputs , num_classes, is_training)
def main():
    parse = argparse.ArgumentParser()
    # ---------- environment setting: which gpu -------
    parse.add_argument('-gpu',
                       '--gpu',
                       type=str,
                       default='0',
                       help='which gpu to use: 0 or 1')
    parse.add_argument('-folder_name',
                       '--folder_name',
                       type=str,
                       default='datasets/citibike-data/data/')
    parse.add_argument('-output_folder_name',
                       '--output_folder_name',
                       type=str,
                       default='output/citibike-data/data/')
    # ---------- input/output settings -------
    parse.add_argument('-input_steps',
                       '--input_steps',
                       type=int,
                       default=6,
                       help='number of input steps')
    # ---------- model ----------
    parse.add_argument('-model',
                       '--model',
                       type=str,
                       default='GCN',
                       help='model: DyST, GCN, AttGCN')
    parse.add_argument('-num_layers',
                       '--num_layers',
                       type=int,
                       default=2,
                       help='number of layers in model')
    parse.add_argument('-num_units',
                       '--num_units',
                       type=int,
                       default=64,
                       help='dim of hidden states')
    parse.add_argument('-trained_adj_mx',
                       '--trained_adj_mx',
                       type=int,
                       default=0,
                       help='if training adjacent matrix')
    parse.add_argument('-filter_type',
                       '--filter_type',
                       type=str,
                       default='dual_random_walk',
                       help='laplacian, random_walk, or dual_random_walk')
    parse.add_argument('-delta',
                       '--delta',
                       type=int,
                       default=1e7,
                       help='delta to calculate rescaled weighted matrix')
    parse.add_argument('-epsilon',
                       '--epsilon',
                       type=float,
                       default=0.8,
                       help='epsilon to calculate rescaled weighted matrix')
    #
    parse.add_argument(
        '-dy_temporal',
        '--dy_temporal',
        type=int,
        default=0,
        help='whether to use temporal attention module before output layer')
    parse.add_argument(
        '-multi_loss',
        '--multi_loss',
        type=int,
        default=0,
        help='whether to only consider last prediction into loss function.')
    parse.add_argument('-att_units',
                       '--att_units',
                       type=int,
                       default=64,
                       help='dim of hidden states')
    #
    parse.add_argument(
        '-dy_adj',
        '--dy_adj',
        type=int,
        default=1,
        help=
        'whether to use dynamic adjacent matrix for lower feature extraction layer'
    )
    parse.add_argument(
        '-dy_filter',
        '--dy_filter',
        type=int,
        default=0,
        help='whether to use dynamic filter generate region-specific filter ')
    #parse.add_argument('-att_dynamic_adj', '--att_dynamic_adj', type=int, default=1, help='whether to use dynamic adjacent matrix in attention parts')
    parse.add_argument('-model_save',
                       '--model_save',
                       type=str,
                       default='gcn',
                       help='folder name to save model')
    parse.add_argument('-pretrained_model',
                       '--pretrained_model_path',
                       type=str,
                       default=None,
                       help='path to the pretrained model')
    # ---------- params for CNN ------------
    parse.add_argument('-num_filters',
                       '--num_filters',
                       type=int,
                       default=32,
                       help='number of filters in CNN')
    parse.add_argument('-pooling_units',
                       '--pooling_units',
                       type=int,
                       default=64,
                       help='number of pooling units')
    parse.add_argument('-dropout_keep_prob',
                       '--dropout_keep_prob',
                       type=float,
                       default=0.5,
                       help='keep probability in dropout layer')
    # ---------- training parameters --------
    parse.add_argument('-n_epochs',
                       '--n_epochs',
                       type=int,
                       default=20,
                       help='number of epochs')
    parse.add_argument('-batch_size',
                       '--batch_size',
                       type=int,
                       default=8,
                       help='batch size for training')
    parse.add_argument('-show_batches',
                       '--show_batches',
                       type=int,
                       default=100,
                       help='show how many batches have been processed.')
    parse.add_argument('-lr',
                       '--learning_rate',
                       type=float,
                       default=0.0002,
                       help='learning rate')
    parse.add_argument('-update_rule',
                       '--update_rule',
                       type=str,
                       default='adam',
                       help='update rule')
    # ---------- train or predict -------
    parse.add_argument('-train',
                       '--train',
                       type=int,
                       default=1,
                       help='whether to train')
    parse.add_argument('-test', '--test', type=int, default=0, help='if test')
    #
    args = parse.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    print('load train, test data...')
    # train: 20140401 - 20140831
    # validate: 20140901 - 20140910
    # test: 20140911 - 20140930
    split = [3672, 240, 480]
    #split = [3912, 480]
    data, train_data, val_data, test_data = load_npy_data(filename=[
        args.folder_name + 'd_station.npy', args.folder_name + 'p_station.npy'
    ],
                                                          split=split)
    # data: [num, station_num, 2]
    #f_data, train_f_data, val_f_data, test_f_data = load_pkl_data(args.folder_name + 'f_data_list.pkl', split=split)
    f_data, train_f_data, val_f_data, test_f_data = load_npy_data(
        filename=[args.folder_name + 'citibike_flow_data.npy'], split=split)
    print(len(f_data))
    print('preprocess train/val/test flow data...')
    #f_preprocessing = StandardScaler()
    f_preprocessing = MinMaxNormalization01()
    f_preprocessing.fit(train_f_data)
    train_f_data = f_preprocessing.transform(train_f_data)
    if val_f_data is not None:
        val_f_data = f_preprocessing.transform(val_f_data)
    test_f_data = f_preprocessing.transform(test_f_data)
    print('preprocess train/val/test data...')
    pre_process = MinMaxNormalization01()
    #pre_process = StandardScaler()
    pre_process.fit(train_data)
    train_data = pre_process.transform(train_data)
    if val_data is not None:
        val_data = pre_process.transform(val_data)
    test_data = pre_process.transform(test_data)
    #
    num_station = data.shape[1]
    print('number of station: %d' % num_station)
    #
    train_loader = DataLoader_graph(train_data,
                                    train_f_data,
                                    args.input_steps,
                                    flow_format='identity')
    if val_data is not None:
        val_loader = DataLoader_graph(val_data,
                                      val_f_data,
                                      args.input_steps,
                                      flow_format='identity')
    else:
        val_loader = None
    test_loader = DataLoader_graph(test_data,
                                   test_f_data,
                                   args.input_steps,
                                   flow_format='identity')
    # f_adj_mx = None
    if os.path.isfile(args.folder_name + 'f_adj_mx.npy'):
        f_adj_mx = np.load(args.folder_name + 'f_adj_mx.npy')
    else:
        f_adj_mx = train_loader.get_flow_adj_mx()
        np.save(args.folder_name + 'f_adj_mx.npy', f_adj_mx)
    #
    #
    if args.filter_type == 'laplacian':
        w = np.load(args.folder_name + 'w.npy')
        # w = np.array(w, dtype=np.float32)
        W = get_rescaled_W(w, delta=args.delta, epsilon=args.epsilon)
        # Calculate graph kernel
        L = scaled_laplacian(W)
        #
        f_adj_mx = L

    if args.model == 'FC_LSTM':
        model = FC_LSTM(num_station,
                        args.input_steps,
                        num_layers=args.num_layers,
                        num_units=args.num_units,
                        batch_size=args.batch_size)
    if args.model == 'FC_GRU':
        model = FC_GRU(num_station,
                       args.input_steps,
                       num_layers=args.num_layers,
                       num_units=args.num_units,
                       batch_size=args.batch_size)
    if args.model == 'GCN':
        model = GCN(num_station,
                    args.input_steps,
                    num_layers=args.num_layers,
                    num_units=args.num_units,
                    dy_adj=args.dy_adj,
                    dy_filter=args.dy_filter,
                    f_adj_mx=f_adj_mx,
                    trained_adj_mx=args.trained_adj_mx,
                    filter_type=args.filter_type,
                    batch_size=args.batch_size)
    if args.model == 'flow_GCN':
        model = flow_GCN(num_station,
                         args.input_steps,
                         num_layers=args.num_layers,
                         num_units=args.num_units,
                         f_adj_mx=f_adj_mx,
                         trained_adj_mx=args.trained_adj_mx,
                         filter_type=args.filter_type,
                         batch_size=args.batch_size)
    if args.model == 'Coupled_GCN':
        model = Coupled_GCN(num_station,
                            args.input_steps,
                            num_layers=args.num_layers,
                            num_units=args.num_units,
                            f_adj_mx=f_adj_mx,
                            trained_adj_mx=args.trained_adj_mx,
                            filter_type=args.filter_type,
                            dy_temporal=args.dy_temporal,
                            att_units=args.att_units,
                            multi_loss=args.multi_loss,
                            batch_size=args.batch_size)
    #
    model_path = os.path.join(args.output_folder_name, 'model_save',
                              args.model_save)
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    #model_path = os.path.join(args.folder_name, 'model_save', args.model_save)
    solver = ModelSolver(
        model,
        train_loader,
        val_loader,
        test_loader,
        pre_process,
        batch_size=args.batch_size,
        show_batches=args.show_batches,
        n_epochs=args.n_epochs,
        pretrained_model=args.pretrained_model_path,
        update_rule=args.update_rule,
        learning_rate=args.learning_rate,
        model_path=model_path,
    )
    results_path = os.path.join(model_path, 'results')
    if not os.path.exists(results_path):
        os.makedirs(results_path)
    if args.train:
        print('==================== begin training ======================')
        test_target, test_prediction = solver.train(
            os.path.join(model_path, 'out'))
        np.save(os.path.join(results_path, 'test_target.npy'), test_target)
        np.save(os.path.join(results_path, 'test_prediction.npy'),
                test_prediction)
    if args.test:
        print('==================== begin test ==========================')
        test_target, test_prediction = solver.test()
        np.save(os.path.join(results_path, 'test_target.npy'), test_target)
        np.save(os.path.join(results_path, 'test_prediction.npy'),
                test_prediction)
Esempio n. 6
0
def main():
    parse = argparse.ArgumentParser()
    # ---------- environment setting: which gpu -------
    parse.add_argument('-gpu',
                       '--gpu',
                       type=str,
                       default='0',
                       help='which gpu to use: 0 or 1')
    parse.add_argument('-folder_name',
                       '--folder_name',
                       type=str,
                       default='datasets/didi-data/data/')
    parse.add_argument('-output_folder_name',
                       '--output_folder_name',
                       type=str,
                       default='output/didi-data/data/')
    # ---------- input/output settings -------
    parse.add_argument('-input_steps',
                       '--input_steps',
                       type=int,
                       default=6,
                       help='number of input steps')
    # ---------- model ----------
    parse.add_argument('-model',
                       '--model',
                       type=str,
                       default='GCN',
                       help='model: GCN, ConvLSTM, flow_ConvLSTM')
    parse.add_argument('-num_layers',
                       '--num_layers',
                       type=int,
                       default=2,
                       help='number of layers in model')
    parse.add_argument('-num_units',
                       '--num_units',
                       type=int,
                       default=64,
                       help='dim of hidden states')
    parse.add_argument('-kernel_size',
                       '--kernel_size',
                       type=int,
                       default=3,
                       help='kernel size in convolutional operations')
    #
    parse.add_argument(
        '-dy_adj',
        '--dy_adj',
        type=int,
        default=1,
        help=
        'whether to use dynamic adjacent matrix for lower feature extraction layer'
    )
    parse.add_argument(
        '-dy_filter',
        '--dy_filter',
        type=int,
        default=0,
        help='whether to use dynamic filter generate region-specific filter ')
    parse.add_argument(
        '-att_dynamic_adj',
        '--att_dynamic_adj',
        type=int,
        default=0,
        help='whether to use dynamic adjacent matrix in attention parts')
    #
    parse.add_argument('-model_save',
                       '--model_save',
                       type=str,
                       default='gcn',
                       help='folder name to save model')
    parse.add_argument('-pretrained_model',
                       '--pretrained_model_path',
                       type=str,
                       default=None,
                       help='path to the pretrained model')
    # ---------- params for CNN ------------
    parse.add_argument('-num_filters',
                       '--num_filters',
                       type=int,
                       default=32,
                       help='number of filters in CNN')
    parse.add_argument('-pooling_units',
                       '--pooling_units',
                       type=int,
                       default=64,
                       help='number of pooling units')
    parse.add_argument('-dropout_keep_prob',
                       '--dropout_keep_prob',
                       type=float,
                       default=0.5,
                       help='keep probability in dropout layer')
    # ---------- training parameters --------
    parse.add_argument('-n_epochs',
                       '--n_epochs',
                       type=int,
                       default=20,
                       help='number of epochs')
    parse.add_argument('-batch_size',
                       '--batch_size',
                       type=int,
                       default=8,
                       help='batch size for training')
    parse.add_argument('-show_batches',
                       '--show_batches',
                       type=int,
                       default=100,
                       help='show how many batches have been processed.')
    parse.add_argument('-lr',
                       '--learning_rate',
                       type=float,
                       default=0.0002,
                       help='learning rate')
    parse.add_argument('-update_rule',
                       '--update_rule',
                       type=str,
                       default='adam',
                       help='update rule')
    # ---------- train or predict -------
    parse.add_argument('-train',
                       '--train',
                       type=int,
                       default=1,
                       help='whether to train')
    parse.add_argument('-test', '--test', type=int, default=0, help='if test')
    #
    args = parse.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    print('load train, test data...')
    #
    # train: 20161101 - 20161125
    # validate: 20161126 - 20161127
    # test: 20161128 - 20161130
    split = [2400, 192, 288]
    data, train_data, val_data, test_data = load_npy_data(
        filename=[args.folder_name + 'cd_didi_data.npy'], split=split)
    # data: [num, station_num, 2]
    print(data.shape)
    #
    if 'GCN' in args.model or 'FC' in args.model:
        dataloader = DataLoader_graph
    else:
        data = np.reshape(data, (-1, 20, 20, 2))
        train_data = np.reshape(train_data, (-1, 20, 20, 2))
        val_data = np.reshape(val_data, (-1, 20, 20, 2))
        test_data = np.reshape(test_data, (-1, 20, 20, 2))
        # data: [num, height, width, 2]
        print(data.shape)
        #
        dataloader = DataLoader_map
    #
    map_size = data.shape[1:-1]
    input_dim = data.shape[-1]
    num_station = np.prod(data.shape[1:-1])
    #
    f_data, train_f_data, val_f_data, test_f_data = load_npy_data(
        [args.folder_name + 'cd_didi_flow_in.npy'], split=split)
    print(len(f_data))
    print('preprocess train/val/test flow data...')
    #f_preprocessing = StandardScaler()
    f_preprocessing = MinMaxNormalization01()
    f_preprocessing.fit(train_f_data)
    train_f_data = f_preprocessing.transform(train_f_data)
    val_f_data = f_preprocessing.transform(val_f_data)
    test_f_data = f_preprocessing.transform(test_f_data)
    print('preprocess train/val/test data...')
    # pre_process = StandardScaler()
    pre_process = MinMaxNormalization01()
    pre_process.fit(train_data)
    train_data = pre_process.transform(train_data)
    val_data = pre_process.transform(val_data)
    test_data = pre_process.transform(test_data)
    #

    print('number of station: %d' % num_station)
    #
    train_loader = dataloader(train_data,
                              train_f_data,
                              args.input_steps,
                              flow_format='identity')
    val_loader = dataloader(val_data,
                            val_f_data,
                            args.input_steps,
                            flow_format='identity')
    test_loader = dataloader(test_data,
                             test_f_data,
                             args.input_steps,
                             flow_format='identity')
    # f_adj_mx = None
    if os.path.isfile(args.folder_name + 'f_adj_mx.npy'):
        f_adj_mx = np.load(args.folder_name + 'f_adj_mx.npy')
    else:
        f_adj_mx = train_loader.get_flow_adj_mx()
        np.save(args.folder_name + 'f_adj_mx.npy', f_adj_mx)
    #
    # if args.model == 'FC_LSTM':
    #     model = FC_LSTM(num_station, args.input_steps,
    #                     num_layers=args.num_layers, num_units=args.num_units,
    #                     batch_size=args.batch_size)
    if args.model == 'FC_GRU':
        model = FC_GRU(num_station,
                       args.input_steps,
                       num_layers=args.num_layers,
                       num_units=args.num_units,
                       batch_size=args.batch_size)
    if args.model == 'GCN':
        model = GCN(num_station,
                    args.input_steps,
                    num_layers=args.num_layers,
                    num_units=args.num_units,
                    dy_adj=args.dy_adj,
                    dy_filter=args.dy_filter,
                    f_adj_mx=f_adj_mx,
                    batch_size=args.batch_size)
    if args.model == 'ConvGRU':
        model = ConvGRU(input_shape=[map_size[0], map_size[1], input_dim],
                        input_steps=args.input_steps,
                        num_layers=args.num_layers,
                        num_units=args.num_units,
                        kernel_shape=[args.kernel_size, args.kernel_size],
                        batch_size=args.batch_size)
    # if args.model == 'flow_ConvGRU':
    #     model = flow_ConvGRU(input_shape=[20, 20, input_dim], input_steps=args.input_steps,
    #                           num_layers=args.num_layers, num_units=args.num_units, kernel_shape=[args.kernel_size, args.kernel_size],
    #                           f_adj_mx=f_adj_mx,
    #                           batch_size=args.batch_size)
    if args.model == 'Coupled_ConvGRU':
        model = CoupledConvGRU(
            input_shape=[20, 20, input_dim],
            input_steps=args.input_steps,
            num_layers=args.num_layers,
            num_units=args.num_units,
            kernel_shape=[args.kernel_size, args.kernel_size],
            batch_size=args.batch_size)

    ##
    # flow_ConvGRU_2 is Stack_ConvGRU with 2 conv layers.
    if args.model == 'flow_ConvGRU_2':
        model = flow_ConvGRU_2(
            input_shape=[20, 20, input_dim],
            input_steps=args.input_steps,
            num_layers=args.num_layers,
            num_units=args.num_units,
            kernel_shape=[args.kernel_size, args.kernel_size],
            f_adj_mx=f_adj_mx,
            batch_size=args.batch_size)

    if args.model == 'Stack_ConvGRU':
        model = Stack_ConvGRU(
            input_shape=[20, 20, input_dim],
            input_steps=args.input_steps,
            num_layers=args.num_layers,
            num_units=args.num_units,
            kernel_shape=[args.kernel_size, args.kernel_size],
            f_adj_mx=f_adj_mx,
            batch_size=args.batch_size)
    #
    model_path = os.path.join(args.output_folder_name, 'model_save',
                              args.model_save)
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    #model_path = os.path.join(args.folder_name, 'model_save', args.model_save)
    solver = ModelSolver(
        model,
        train_loader,
        val_loader,
        test_loader,
        pre_process,
        batch_size=args.batch_size,
        show_batches=args.show_batches,
        n_epochs=args.n_epochs,
        pretrained_model=args.pretrained_model_path,
        update_rule=args.update_rule,
        learning_rate=args.learning_rate,
        model_path=model_path,
    )
    results_path = os.path.join(model_path, 'results')
    if not os.path.exists(results_path):
        os.makedirs(results_path)
    if args.train:
        print('==================== begin training ======================')
        test_target, test_prediction = solver.train(
            os.path.join(model_path, 'out'))
        np.save(os.path.join(results_path, 'test_target.npy'), test_target)
        np.save(os.path.join(results_path, 'test_prediction.npy'),
                test_prediction)
    if args.test:
        print('==================== begin test ==========================')
        test_target, test_prediction = solver.test()
        np.save(os.path.join(results_path, 'test_target.npy'), test_target)
        np.save(os.path.join(results_path, 'test_prediction.npy'),
                test_prediction)
Esempio n. 7
0
    def __init__(self, args, args_SG, multiG):
        super(mymodel, self).__init__()
        self.args = args
        self.cuda_avl = args["cuda"]
        self.dim = args["dim"]
        self.num_ent1 = args["num_ent1"]
        self.num_ent2 = args["num_ent2"]

        # initialize embedding
        self.num_rels1, self.num_rels2 = args["num_rels1"], args["num_rels2"]
        if args["random_initialize"]:
            self.emb_ew1 = nn.Embedding(args["num_ew1"], self.dim)
            self.emb_ew2 = nn.Embedding(args["num_ew2"], self.dim)
            nn.init.xavier_normal(self.emb_ew1.weight)
            nn.init.xavier_normal(self.emb_ew2.weight)
        else:
            emb1, _, _ = load_vec(args["emb_ew1"])
            emb2, _, _ = load_vec(args["emb_ew2"])
            # self.embed1_w, self.embed2_w = nn.Parameter(torch.from_numpy(emb1[self.args["num_ent1"]:,:]).float()), nn.Parameter(torch.from_numpy(emb2[self.args["num_ent2"]:,:]).float())
            # self.embed1_e, self.embed2_e = nn.Parameter(torch.from_numpy(emb1[:self.args["num_ent1"],:]).float()), nn.Parameter(torch.from_numpy(emb2[:self.args["num_ent2"],:]).float())
            self.emb_ew1, self.num_ew1, self.dim = create_emb_layer(
                weights_matrix=emb1, max_norm=args["max_norm"])
            self.emb_ew2, self.num_ew2, self.dim = create_emb_layer(
                weights_matrix=emb2, max_norm=args["max_norm"])
            # self.emb_ew1, self.emb_ew2 = torch.from_numpy(emb1).float(), torch.from_numpy(emb2).float()
            # self.num_ew1, self.dim = self.emb_ew1.shape[0], self.emb_ew1.shape[1]
            # self.num_ew2, self.dim = self.emb_ew2.shape[0], self.emb_ew2.shape[1]
            del emb1, emb2

        if args["GCN"] is True:
            self.emb_rels1 = nn.Embedding(self.num_rels1, self.dim * 2)
            self.emb_rels2 = nn.Embedding(self.num_rels2, self.dim * 2)
        else:
            self.emb_rels1 = nn.Embedding(self.num_rels1, self.dim)
            self.emb_rels2 = nn.Embedding(self.num_rels2, self.dim)
        nn.init.xavier_normal(self.emb_rels1.weight)
        nn.init.xavier_normal(self.emb_rels2.weight)

        # GCN
        if args["GCN"] is True:
            self.GCN_model1 = GCN(nfeat=multiG.KG1.attr.shape[1],
                                  nhid=args["dim"],
                                  nclass=args["dim"],
                                  dropout=args["dropout"])
            self.GCN_model2 = GCN(nfeat=multiG.KG2.attr.shape[1],
                                  nhid=args["dim"],
                                  nclass=args["dim"],
                                  dropout=args["dropout"])
        self.emb_ew1_GCN = None
        self.emb_ew2_GCN = None

        # mapping
        if args["GCN"] is True:
            self.mapping = nn.Linear(self.dim * 2, self.dim * 2, bias=False)
            self.mapping.weight.data.copy_(torch.diag(torch.ones(self.dim *
                                                                 2)))
        else:
            self.mapping = nn.Linear(self.dim, self.dim, bias=False)
            self.mapping.weight.data.copy_(torch.diag(torch.ones(self.dim)))

            self.mapping_rel = nn.Linear(self.dim, self.dim, bias=False)
            self.mapping_rel.weight.data.copy_(torch.diag(torch.ones(
                self.dim)))

            for param in self.mapping_rel.parameters():
                param.requires_grad = False

            for param in self.mapping.parameters():
                param.requires_grad = False

        # model
        self.MtransE = MTransE(args)
        self.SkipGram = SkipGram()
        self.MUSE = MUSE(args)
        self.DistMult = DistMult(args)

        # data
        self.multiG = multiG