コード例 #1
0
    def __init__(self, file_dir, matrix_size1, matrix_size2, seed, shuffle):

        self.file_dir = file_dir

        self.matrix_size_1_long = matrix_size1
        self.matrix_size_2_short = matrix_size2

        self.train_data = json.load(
            open(join(settings.VENUE_DATA_DIR, 'train.txt'), 'r'))
        self.mag = [nltk.word_tokenize(p[1]) for p in self.train_data]
        self.aminer = [nltk.word_tokenize(p[2]) for p in self.train_data]
        self.labels = [p[0] for p in self.train_data]

        self.calc_keyword_seqs()

        n_matrix = len(self.labels)
        self.X_long = np.zeros(
            (n_matrix, self.matrix_size_1_long, self.matrix_size_1_long))
        self.X_short = np.zeros(
            (n_matrix, self.matrix_size_2_short, self.matrix_size_2_short))
        self.Y = np.zeros(n_matrix, dtype=np.long)
        count = 0
        for i, cur_y in enumerate(self.labels):
            if i % 100 == 0:
                logger.info('pairs to matrices %d', i)
            v1 = self.mag[i]
            v1 = " ".join([str(v) for v in v1])
            v2 = self.aminer[i]
            v2 = " ".join([str(v) for v in v2])
            v1_key = self.mag_venue_keywords[i]
            v1_key = " ".join([str(v) for v in v1_key])
            v2_key = self.aminer_venue_keywords[i]
            v2_key = " ".join([str(v) for v in v2_key])
            matrix1 = self.sentences_long_to_matrix(v1, v2)
            self.X_long[count] = feature_utils.scale_matrix(matrix1)
            matrix2 = self.sentences_short_to_matrix(v1_key, v2_key)
            self.X_short[count] = feature_utils.scale_matrix(matrix2)
            self.Y[count] = cur_y
            count += 1

        print("shuffle", shuffle)
        if shuffle:
            self.X_long, self.X_short, self.Y = sklearn.utils.shuffle(
                self.X_long, self.X_short, self.Y, random_state=seed)

        self.labels_one_hot = data_utils.one_hot_encoding(self.Y)

        self.N = len(self.Y)
コード例 #2
0
    def __init__(self,
                 file_dir,
                 matrix_size1,
                 matrix_size2,
                 seed,
                 shuffle,
                 args,
                 use_emb=True):
        self.file_dir = file_dir

        self.matrix_size_1_long = matrix_size1
        self.matrix_size_2_short = matrix_size2

        self.use_emb = use_emb
        if self.use_emb:
            self.pretrain_emb = torch.load(
                os.path.join(settings.OUT_DIR, "rnn_init_word_emb.emb"))
        self.tokenizer = data_utils.load_large_obj(settings.OUT_DIR,
                                                   "tokenizer_all_domain.pkl")

        self.train_data = json.load(
            open(join(settings.VENUE_DATA_DIR, 'train_filter.txt'), 'r'))

        n_pos_set = int((args.train_num + 2 * args.test_num) / 2)

        neg_pairs = [p for p in self.train_data if p[0] == 0]
        pos_pairs = [p for p in self.train_data if p[0] == 1][-n_pos_set:]
        n_pos = len(pos_pairs)
        neg_pairs = neg_pairs[-n_pos:]
        self.train_data = pos_pairs + neg_pairs

        self.train_data = sklearn.utils.shuffle(self.train_data,
                                                random_state=37)

        self.mag = [nltk.word_tokenize(p[1]) for p in self.train_data]
        self.aminer = [nltk.word_tokenize(p[2]) for p in self.train_data]
        self.labels = [p[0] for p in self.train_data]

        self.calc_keyword_seqs()

        n_matrix = len(self.labels)
        self.X_long = np.zeros(
            (n_matrix, self.matrix_size_1_long, self.matrix_size_1_long))
        self.X_short = np.zeros(
            (n_matrix, self.matrix_size_2_short, self.matrix_size_2_short))
        self.Y = np.zeros(n_matrix, dtype=np.long)
        count = 0
        for i, cur_y in enumerate(self.labels):
            if i % 100 == 0:
                print('pairs to matrices', i)
            v1 = self.mag[i]
            v1 = " ".join([str(v) for v in v1])
            v2 = self.aminer[i]
            v2 = " ".join([str(v) for v in v2])
            v1_key = self.mag_venue_keywords[i]
            v1_key = " ".join([str(v) for v in v1_key])
            v2_key = self.aminer_venue_keywords[i]
            v2_key = " ".join([str(v) for v in v2_key])
            matrix1 = self.sentences_long_to_matrix(v1, v2)
            # print("mat1", matrix1)
            self.X_long[count] = feature_utils.scale_matrix(matrix1)
            matrix2 = self.sentences_short_to_matrix(v1_key, v2_key)
            # print("mat2", matrix2)
            self.X_short[count] = feature_utils.scale_matrix(matrix2)
            self.Y[count] = cur_y
            count += 1

        self.N = len(self.Y)

        n_train = args.train_num
        n_test = args.test_num
        # n_train = self.N - 2*n_test

        train_data = {}
        train_data["x1"] = self.X_long[:n_train]
        train_data["x2"] = self.X_short[:n_train]
        train_data["y"] = self.Y[:n_train]
        print("train labels", len(train_data["y"]))

        test_data = {}
        test_data["x1"] = self.X_long[n_train:(n_train + n_test)]
        test_data["x2"] = self.X_short[n_train:(n_train + n_test)]
        test_data["y"] = self.Y[n_train:(n_train + n_test)]
        print("test labels", len(test_data["y"]))

        valid_data = {}
        valid_data["x1"] = self.X_long[n_train + n_test:(n_train + n_test * 2)]
        valid_data["x2"] = self.X_short[n_train + n_test:(n_train +
                                                          n_test * 2)]
        valid_data["y"] = self.Y[n_train + n_test:(n_train + n_test * 2)]
        print("valid labels", len(valid_data["y"]))

        out_dir = join(settings.DATA_DIR, "dom-adpt")
        os.makedirs(out_dir, exist_ok=True)
        data_utils.dump_large_obj(train_data, out_dir, "venue_train.pkl")
        data_utils.dump_large_obj(test_data, out_dir, "venue_test.pkl")
        data_utils.dump_large_obj(valid_data, out_dir, "venue_valid.pkl")
コード例 #3
0
    def __init__(self,
                 file_dir,
                 matrix_size1,
                 matrix_size2,
                 seed,
                 shuffle,
                 args,
                 use_emb=True):

        self.file_dir = file_dir

        self.matrix_size_1_long = matrix_size1
        self.matrix_size_2_short = matrix_size2

        self.use_emb = use_emb
        if self.use_emb:
            self.pretrain_emb = torch.load(
                os.path.join(settings.OUT_DIR, "rnn_init_word_emb.emb"))
        self.tokenizer = data_utils.load_large_obj(settings.OUT_DIR,
                                                   "tokenizer_all_domain.pkl")

        # load training pairs
        # pos_pairs = data_utils.load_json(file_dir, 'train_positive_affi.json')
        # pos_pairs = [(p['aminer_affi'], p['mag_affi']) for p in pos_pairs]
        pos_pairs = data_utils.load_json(file_dir,
                                         "label_data_aff_zhoushao.json")[:600]
        pos_pairs = [({
            "name": p["affiliation"]
        }, {
            "DisplayName": p["label"]
        }) for p in pos_pairs if p["label"] != "[NIF]"]
        # neg_pairs = data_utils.load_json(file_dir, 'train_negative_affi.json')
        neg_pairs = data_utils.load_json(
            file_dir, 'train_negative_affi_clean.json')[:600]
        neg_pairs = [(p['aminer_affi'], p['mag_affi']) for p in neg_pairs]
        pairs_add = data_utils.load_json(
            file_dir, "mag_aminer_hard_correct_zfj_copy.json")
        print("add pairs", len(pairs_add))
        pos_pairs += [(p['aminer_affi'], p['mag_affi']) for p in pairs_add
                      if p["label_zfj"] == "1"]
        neg_pairs += [(p['aminer_affi'], p['mag_affi']) for p in pairs_add
                      if p["label_zfj"] == "0"]
        n_pos = len(pos_pairs)
        # labels = [1] * len(pos_pairs) + [0] * len(pos_pairs)
        labels = [1] * len(pos_pairs) + [0] * len(neg_pairs)
        # pairs = pos_pairs + [neg_pairs[x] for x in range(n_pos)]  # label balanced is important
        pairs = pos_pairs + neg_pairs  # label balanced is important

        n_matrix = len(pairs)
        self.X_long = np.zeros(
            (n_matrix, self.matrix_size_1_long, self.matrix_size_1_long))
        self.X_short = np.zeros(
            (n_matrix, self.matrix_size_2_short, self.matrix_size_2_short))
        self.Y = np.zeros(n_matrix, dtype=np.long)
        count = 0
        for i, pair in enumerate(pairs):
            if i % 100 == 0:
                print('pairs to matrices', i)
            item_a, item_m = pair
            cur_y = labels[i]
            matrix1 = self.sentences_long_to_matrix(item_a['name'],
                                                    item_m['DisplayName'])
            # print("matrix1", matrix1)
            # print(item_a['name'])
            # print(item_m['DisplayName'])
            self.X_long[count] = feature_utils.scale_matrix(matrix1)
            # matrix2 = self.sentences_short_to_matrix(item_a['main_body'], item_m['NormalizedName'])
            matrix2 = self.sentences_short_to_matrix_2(item_a['name'],
                                                       item_m['DisplayName'])
            # print("matrix2", matrix2)
            self.X_short[count] = feature_utils.scale_matrix(matrix2)
            self.Y[count] = cur_y
            count += 1

            # # transpose
            # self.X_long[count] = feature_utils.scale_matrix(matrix1.transpose())
            # self.X_short[count] = feature_utils.scale_matrix(matrix2.transpose())
            # self.Y[count] = cur_y
            # count += 1

        print("shuffle", shuffle)
        if shuffle:
            self.X_long, self.X_short, self.Y = sklearn.utils.shuffle(
                self.X_long, self.X_short, self.Y, random_state=seed)

        self.N = len(self.Y)

        n_train = int(self.N * 0.6)
        n_test = int(self.N * 0.2)

        train_data = {}
        train_data["x1"] = self.X_long[:n_train]
        train_data["x2"] = self.X_short[:n_train]
        train_data["y"] = self.Y[:n_train]
        print("train labels", len(train_data["y"]))

        test_data = {}
        test_data["x1"] = self.X_long[n_train:(n_train + n_test)]
        test_data["x2"] = self.X_short[n_train:(n_train + n_test)]
        test_data["y"] = self.Y[n_train:(n_train + n_test)]
        print("test labels", len(test_data["y"]), test_data["y"])

        valid_data = {}
        valid_data["x1"] = self.X_long[n_train + n_test:(n_train + n_test * 2)]
        valid_data["x2"] = self.X_short[n_train + n_test:(n_train +
                                                          n_test * 2)]
        valid_data["y"] = self.Y[n_train + n_test:(n_train + n_test * 2)]
        print("valid labels", len(valid_data["y"]), valid_data["y"])

        out_dir = join(settings.DATA_DIR, "dom-adpt")
        os.makedirs(out_dir, exist_ok=True)
        data_utils.dump_large_obj(train_data, out_dir, "aff_train.pkl")
        data_utils.dump_large_obj(test_data, out_dir, "aff_test.pkl")
        data_utils.dump_large_obj(valid_data, out_dir, "aff_valid.pkl")
コード例 #4
0
    def __init__(self, file_dir, matrix_size1, matrix_size2, build_index_window, seed, shuffle, args, use_emb=True):
        self.file_dir = file_dir
        self.build_index_window = build_index_window

        self.matrix_title_size = matrix_size1
        self.matrix_author_size = matrix_size2

        self.use_emb = use_emb
        if self.use_emb:
            self.pretrain_emb = torch.load(os.path.join(settings.OUT_DIR, "rnn_init_word_emb.emb"))
        self.tokenizer = data_utils.load_large_obj(settings.OUT_DIR, "tokenizer_all_domain.pkl")

        # load training pairs
        pos_pairs = data_utils.load_json(file_dir, 'pos-pairs-train.json')
        pos_pairs = [(p['c'], p['n']) for p in pos_pairs]
        neg_pairs = data_utils.load_json(file_dir, 'neg-pairs-train.json')
        neg_pairs = [(p['c'], p['n']) for p in neg_pairs]
        labels = [1] * len(pos_pairs) + [0] * len(neg_pairs)
        pairs = pos_pairs + neg_pairs

        n_matrix = len(pairs)
        self.X_title = np.zeros((n_matrix, self.matrix_title_size, self.matrix_title_size))
        self.X_author = np.zeros((n_matrix, self.matrix_author_size, self.matrix_author_size))
        self.Y = np.zeros(n_matrix, dtype=np.long)
        count = 0
        for i, pair in enumerate(pairs):
            if i % 100 == 0:
                print('pairs to matrices', i)
            cpaper, npaper = pair
            cur_y = labels[i]
            matrix1 = self.titles_to_matrix(cpaper['title'], npaper['title'])
            self.X_title[count] = feature_utils.scale_matrix(matrix1)
            matrix2 = self.authors_to_matrix(cpaper['authors'], npaper['authors'])
            self.X_author[count] = feature_utils.scale_matrix(matrix2)
            self.Y[count] = cur_y
            count += 1

        print("shuffle", shuffle)
        if shuffle:
            self.X_title, self.X_author, self.Y = sklearn.utils.shuffle(
                self.X_title, self.X_author, self.Y,
                random_state=seed
            )

        self.N = len(self.Y)

        # valid_start = int(self.N * args.train_ratio / 100)
        # test_start = int(self.N * (args.train_ratio + args.valid_ratio) / 100)
        valid_start = 800
        test_start = 200 + valid_start
        end_point = 200 + test_start

        train_data = {}
        train_data["x1"] = self.X_title[:valid_start]
        train_data["x2"] = self.X_author[:valid_start]
        train_data["y"] = self.Y[:valid_start]
        print("train labels", len(train_data["y"]))

        test_data = {}
        test_data["x1"] = self.X_title[test_start: end_point]
        test_data["x2"] = self.X_author[test_start: end_point]
        test_data["y"] = self.Y[test_start: end_point]
        print("test labels", len(test_data["y"]))

        valid_data = {}
        valid_data["x1"] = self.X_title[valid_start:test_start]
        valid_data["x2"] = self.X_author[valid_start:test_start]
        valid_data["y"] = self.Y[valid_start:test_start]
        print("valid labels", len(valid_data["y"]))

        out_dir = join(settings.DATA_DIR, "dom-adpt")
        os.makedirs(out_dir, exist_ok=True)
        data_utils.dump_large_obj(train_data, out_dir, "paper_train.pkl")
        data_utils.dump_large_obj(test_data, out_dir, "paper_test.pkl")
        data_utils.dump_large_obj(valid_data, out_dir, "paper_valid.pkl")
コード例 #5
0
    def __init__(self,
                 file_dir,
                 matrix_size1,
                 matrix_size2,
                 seed,
                 shuffle,
                 args,
                 use_emb=True,
                 all_train=False):
        self.file_dir = file_dir
        self.matrix_title_size = matrix_size1
        self.matrix_author_size = matrix_size2

        # load training pairs
        pos_pairs = data_utils.load_json(file_dir, 'pos_person_pairs.json')
        neg_pairs = data_utils.load_json(file_dir, 'neg_person_pairs.json')
        pairs = pos_pairs + neg_pairs
        labels = [1] * len(pos_pairs) + [0] * len(neg_pairs)

        self.person_dict = data_utils.load_json(file_dir,
                                                "ego_person_dict.json")

        self.use_emb = use_emb
        if self.use_emb:
            self.pretrain_emb = torch.load(
                os.path.join(settings.OUT_DIR, "rnn_init_word_emb.emb"))
        self.tokenizer = data_utils.load_large_obj(settings.OUT_DIR,
                                                   "tokenizer_all_domain.pkl")

        X_long = []
        X_short = []
        nn_pos = 0
        nn_neg = 0
        for i, pair in enumerate(pairs):
            if i % 100 == 0:
                logger.info('pairs to matrices %d %d %d', i, nn_pos, nn_neg)
            aid, mid = pair['aid'], pair['mid']
            aperson = self.person_dict.get(aid, {})
            mperson = self.person_dict.get(mid, {})
            # matrix1, nn1 = self.org_to_matrix(aperson.get('org', ''), mperson.get('org', ''), matrix_size1)
            matrix1, nn1 = self.paper_to_matrix(aperson.get('pubs', []),
                                                mperson.get('pubs', []),
                                                matrix_size1)
            matrix1 = feature_utils.scale_matrix(matrix1)
            X_long.append(matrix1)
            matrix2, nn2 = self.venue_to_matrix(aperson.get('venue', ''),
                                                mperson.get('venue', ''),
                                                matrix_size2)
            # print("matrix2", matrix2)
            matrix2 = feature_utils.scale_matrix(matrix2)
            X_short.append(matrix2)

        self.X_long = X_long
        self.X_short = X_short
        self.Y = labels

        print("shuffle", shuffle)
        if shuffle:
            self.X_long, self.X_short, self.Y = sklearn.utils.shuffle(
                self.X_long, self.X_short, self.Y, random_state=seed)

        self.N = len(self.Y)

        # valid_start = int(self.N * args.train_ratio / 100)
        # test_start = int(self.N * (args.train_ratio + args.valid_ratio) / 100)
        if all_train:
            valid_start = 10000
            test_start = 5000 + valid_start
            end_point = 5000 + test_start
        else:
            valid_start = 800
            test_start = 200 + valid_start
            end_point = 200 + test_start

        train_data = {}
        train_data["x1"] = self.X_long[:valid_start]
        train_data["x2"] = self.X_short[:valid_start]
        train_data["y"] = self.Y[:valid_start]
        print("train labels", len(train_data["y"]))

        test_data = {}
        test_data["x1"] = self.X_long[test_start:end_point]
        test_data["x2"] = self.X_short[test_start:end_point]
        test_data["y"] = self.Y[test_start:end_point]
        print("test labels", len(test_data["y"]))

        valid_data = {}
        valid_data["x1"] = self.X_long[valid_start:test_start]
        valid_data["x2"] = self.X_short[valid_start:test_start]
        valid_data["y"] = self.Y[valid_start:test_start]
        print("valid labels", len(valid_data["y"]))

        print("train positive samples", sum(train_data["y"]))
        print("test positive samples", sum(test_data["y"]))

        out_dir = join(settings.DATA_DIR, "dom-adpt")
        os.makedirs(out_dir, exist_ok=True)
        data_utils.dump_large_obj(train_data, out_dir, "author_train.pkl")
        data_utils.dump_large_obj(test_data, out_dir, "author_test.pkl")
        data_utils.dump_large_obj(valid_data, out_dir, "author_valid.pkl")