def predict_level(self, level, test_x, k, labels_num): data_cnf, model_cnf = self.data_cnf, self.model_cnf model = self.models.get(level, None) if level == 0: logger.info(F'Predicting Level-{level}, Top: {k}') if model is None: model = Model(AttentionRNN, labels_num=labels_num, model_path=F'{self.model_path}-Level-{level}', emb_init=self.emb_init, **data_cnf['model'], **model_cnf['model']) test_loader = DataLoader(MultiLabelDataset(test_x), model_cnf['predict']['batch_size'], num_workers=4) return model.predict(test_loader, k=k) else: if level == self.level - 1: groups = np.load(F'{self.groups_path}-Level-{level-1}.npy') else: groups = self.get_inter_groups(labels_num) group_scores, group_labels = self.predict_level(level - 1, test_x, self.top, len(groups)) torch.cuda.empty_cache() logger.info(F'Predicting Level-{level}, Top: {k}') if model is None: model = XMLModel(network=FastAttentionRNN, labels_num=labels_num, model_path=F'{self.model_path}-Level-{level}', emb_init=self.emb_init, **data_cnf['model'], **model_cnf['model']) test_loader = DataLoader(XMLDataset(test_x, labels_num=labels_num, groups=groups, group_labels=group_labels, group_scores=group_scores), model_cnf['predict']['batch_size'], num_workers=4) return model.predict(test_loader, k=k)
def default_eval( data_cnf, model_cnf, data_name, model_name, model_path, emb_init, tree_id, output_suffix, dry_run, ): logger.info('Loading Test Set') mlb = get_mlb(data_cnf['labels_binarizer']) labels_num = len(mlb.classes_) test_x, _ = get_data(data_cnf['test']['texts'], None) logger.info(F'Size of Test Set: {len(test_x):,}') logger.info('Predicting') model_cnf['model'].pop('load_model', None) if 'cluster' not in model_cnf: test_loader = DataLoader( MultiLabelDataset(test_x), model_cnf['predict']['batch_size'], num_workers=4) if 'loss' in model_cnf: gamma = model_cnf['loss'].get('gamma', 1.0) loss_name = model_cnf['loss']['name'] else: gamma = None loss_name = 'bce' model = Model( network=AttentionRNN, labels_num=labels_num, model_path=model_path, emb_init=emb_init, load_model=True, loss_name=loss_name, gamma=gamma, **data_cnf['model'], **model_cnf['model']) scores, labels = model.predict(test_loader, k=model_cnf['predict'].get('k', 100)) labels = mlb.classes_[labels] else: model = FastAttentionXML(labels_num, data_cnf, model_cnf, tree_id, output_suffix) scores, labels = model.predict(test_x, model_cnf['predict'].get('k', 100)) labels = mlb.classes_[labels] logger.info('Finish Predicting') score_path, label_path = output_res(data_cnf['output']['res'], f'{model_name}-{data_name}{tree_id}', scores, labels, output_suffix) log_results(score_path, label_path, dry_run)
def main(data_cnf, model_cnf, mode, reg): yaml = YAML(typ='safe') data_cnf, model_cnf = yaml.load(Path(data_cnf)), yaml.load(Path(model_cnf)) model, model_name, data_name = None, model_cnf['name'], data_cnf['name'] model_path = os.path.join(model_cnf['path'], F'{model_name}-{data_name}') emb_init = get_word_emb(data_cnf['embedding']['emb_init']) logger.info(F'Model Name: {model_name}') if mode is None or mode == 'train': logger.info('Loading Training and Validation Set') train_x, train_labels = get_data(data_cnf['train']['texts'], data_cnf['train']['labels']) if 'size' in data_cnf['valid']: random_state = data_cnf['valid'].get('random_state', 1240) train_x, valid_x, train_labels, valid_labels = train_test_split( train_x, train_labels, test_size=data_cnf['valid']['size'], random_state=random_state) else: valid_x, valid_labels = get_data(data_cnf['valid']['texts'], data_cnf['valid']['labels']) mlb = get_mlb(data_cnf['labels_binarizer'], np.hstack((train_labels, valid_labels))) train_y, valid_y = mlb.transform(train_labels), mlb.transform( valid_labels) labels_num = len(mlb.classes_) logger.info(F'Number of Labels: {labels_num}') logger.info(F'Size of Training Set: {len(train_x)}') logger.info(F'Size of Validation Set: {len(valid_x)}') edges = set() if reg: classes = mlb.classes_.tolist() with open(data_cnf['hierarchy']) as fin: for line in fin: data = line.strip().split() p = data[0] if p not in classes: continue p_id = classes.index(p) for c in data[1:]: if c not in classes: continue c_id = classes.index(c) edges.add((p_id, c_id)) logger.info(F'Number of Edges: {len(edges)}') logger.info('Training') train_loader = DataLoader(MultiLabelDataset(train_x, train_y), model_cnf['train']['batch_size'], shuffle=True, num_workers=4) valid_loader = DataLoader(MultiLabelDataset(valid_x, valid_y, training=True), model_cnf['valid']['batch_size'], num_workers=4) model = Model(network=MATCH, labels_num=labels_num, model_path=model_path, emb_init=emb_init, mode='train', reg=reg, hierarchy=edges, **data_cnf['model'], **model_cnf['model']) opt_params = { 'lr': model_cnf['train']['learning_rate'], 'betas': (model_cnf['train']['beta1'], model_cnf['train']['beta2']), 'weight_decay': model_cnf['train']['weight_decay'] } model.train(train_loader, valid_loader, opt_params=opt_params, **model_cnf['train']) # CHANGE: inserted opt_params logger.info('Finish Training') if mode is None or mode == 'eval': logger.info('Loading Test Set') mlb = get_mlb(data_cnf['labels_binarizer']) labels_num = len(mlb.classes_) test_x, _ = get_data(data_cnf['test']['texts'], None) logger.info(F'Size of Test Set: {len(test_x)}') logger.info('Predicting') test_loader = DataLoader(MultiLabelDataset(test_x), model_cnf['predict']['batch_size'], num_workers=4) if model is None: model = Model(network=MATCH, labels_num=labels_num, model_path=model_path, emb_init=emb_init, mode='eval', **data_cnf['model'], **model_cnf['model']) scores, labels = model.predict(test_loader, k=model_cnf['predict'].get('k', 100)) logger.info('Finish Predicting') labels = mlb.classes_[labels] output_res(data_cnf['output']['res'], F'{model_name}-{data_name}', scores, labels)
def train_level(self, level, train_x, train_y, valid_x, valid_y): model_cnf, data_cnf = self.model_cnf, self.data_cnf if level == 0: while not os.path.exists(F'{self.groups_path}-Level-{level}.npy'): time.sleep(30) groups = np.load(F'{self.groups_path}-Level-{level}.npy') train_y, valid_y = self.get_mapping_y(groups, self.labels_num, train_y, valid_y) labels_num = len(groups) train_loader = DataLoader(MultiLabelDataset(train_x, train_y), model_cnf['train'][level]['batch_size'], num_workers=4, shuffle=True) valid_loader = DataLoader(MultiLabelDataset(valid_x, valid_y, training=False), model_cnf['valid']['batch_size'], num_workers=4) model = Model(AttentionRNN, labels_num=labels_num, model_path=F'{self.model_path}-Level-{level}', emb_init=self.emb_init, **data_cnf['model'], **model_cnf['model']) if not os.path.exists(model.model_path): logger.info( F'Training Level-{level}, Number of Labels: {labels_num}') model.train(train_loader, valid_loader, **model_cnf['train'][level]) model.optimizer = None logger.info(F'Finish Training Level-{level}') self.models[level] = model logger.info(F'Generating Candidates for Level-{level+1}, ' F'Number of Labels: {labels_num}, Top: {self.top}') train_loader = DataLoader(MultiLabelDataset(train_x), model_cnf['valid']['batch_size'], num_workers=4) return train_y, model.predict( train_loader, k=self.top), model.predict(valid_loader, k=self.top) else: train_group_y, train_group, valid_group = self.train_level( level - 1, train_x, train_y, valid_x, valid_y) torch.cuda.empty_cache() logger.info('Getting Candidates') _, group_labels = train_group group_candidates = np.empty((len(train_x), self.top), dtype=np.int) for i, labels in tqdm(enumerate(group_labels), leave=False, desc='Parents'): ys, ye = train_group_y.indptr[i], train_group_y.indptr[i + 1] positive = set(train_group_y.indices[ys:ye]) if self.top >= len(positive): candidates = positive for la in labels: if len(candidates) == self.top: break if la not in candidates: candidates.add(la) else: candidates = set() for la in labels: if la in positive: candidates.add(la) if len(candidates) == self.top: break if len(candidates) < self.top: candidates = (list(candidates) + list(positive - candidates))[:self.top] group_candidates[i] = np.asarray(list(candidates)) if level < self.level - 1: while not os.path.exists( F'{self.groups_path}-Level-{level}.npy'): time.sleep(30) groups = np.load(F'{self.groups_path}-Level-{level}.npy') train_y, valid_y = self.get_mapping_y(groups, self.labels_num, train_y, valid_y) labels_num, last_groups = len(groups), self.get_inter_groups( len(groups)) else: groups, labels_num = None, train_y.shape[1] last_groups = np.load( F'{self.groups_path}-Level-{level-1}.npy') train_loader = DataLoader(XMLDataset( train_x, train_y, labels_num=labels_num, groups=last_groups, group_labels=group_candidates), model_cnf['train'][level]['batch_size'], num_workers=4, shuffle=True) group_scores, group_labels = valid_group valid_loader = DataLoader(XMLDataset(valid_x, valid_y, training=False, labels_num=labels_num, groups=last_groups, group_labels=group_labels, group_scores=group_scores), model_cnf['valid']['batch_size'], num_workers=4) model = XMLModel(network=FastAttentionRNN, labels_num=labels_num, emb_init=self.emb_init, model_path=F'{self.model_path}-Level-{level}', **data_cnf['model'], **model_cnf['model']) if not os.path.exists(model.model_path): logger.info( F'Loading parameters of Level-{level} from Level-{level-1}' ) last_model = self.get_last_models(level - 1) model.network.module.emb.load_state_dict( last_model.module.emb.state_dict()) model.network.module.lstm.load_state_dict( last_model.module.lstm.state_dict()) model.network.module.linear.load_state_dict( last_model.module.linear.state_dict()) logger.info( F'Training Level-{level}, ' F'Number of Labels: {labels_num}, ' F'Candidates Number: {train_loader.dataset.candidates_num}' ) model.train(train_loader, valid_loader, **model_cnf['train'][level]) model.optimizer = model.state = None logger.info(F'Finish Training Level-{level}') self.models[level] = model if level == self.level - 1: return logger.info(F'Generating Candidates for Level-{level+1}, ' F'Number of Labels: {labels_num}, Top: {self.top}') group_scores, group_labels = train_group train_loader = DataLoader(XMLDataset(train_x, labels_num=labels_num, groups=last_groups, group_labels=group_labels, group_scores=group_scores), model_cnf['valid']['batch_size'], num_workers=4) return train_y, model.predict( train_loader, k=self.top), model.predict(valid_loader, k=self.top)
def main(data_cnf, model_cnf, mode, tree_id): tree_id = F'-Tree-{tree_id}' if tree_id is not None else '' yaml = YAML(typ='safe') data_cnf, model_cnf = yaml.load(Path(data_cnf)), yaml.load(Path(model_cnf)) model, model_name, data_name = None, model_cnf['name'], data_cnf['name'] model_path = os.path.join(model_cnf['path'], F'{model_name}-{data_name}{tree_id}') emb_init = get_word_emb(data_cnf['embedding']['emb_init']) logger.info(F'Model Name: {model_name}') if mode is None or mode == 'train': logger.info('Loading Training and Validation Set') train_x, train_labels = get_data(data_cnf['train']['texts'], data_cnf['train']['labels']) if 'size' in data_cnf['valid']: random_state = data_cnf['valid'].get('random_state', 1240) train_x, valid_x, train_labels, valid_labels = train_test_split(train_x, train_labels, test_size=data_cnf['valid']['size'], random_state=random_state) else: valid_x, valid_labels = get_data(data_cnf['valid']['texts'], data_cnf['valid']['labels']) mlb = get_mlb(data_cnf['labels_binarizer'], np.hstack((train_labels, valid_labels))) train_y, valid_y = mlb.transform(train_labels), mlb.transform(valid_labels) labels_num = len(mlb.classes_) logger.info(F'Number of Labels: {labels_num}') logger.info(F'Size of Training Set: {len(train_x)}') logger.info(F'Size of Validation Set: {len(valid_x)}') logger.info('Training') if 'cluster' not in model_cnf: train_loader = DataLoader(MultiLabelDataset(train_x, train_y), model_cnf['train']['batch_size'], shuffle=True, num_workers=4) valid_loader = DataLoader(MultiLabelDataset(valid_x, valid_y, training=False), model_cnf['valid']['batch_size'], num_workers=4) model = Model(network=AttentionRNN, labels_num=labels_num, model_path=model_path, emb_init=emb_init, **data_cnf['model'], **model_cnf['model']) model.train(train_loader, valid_loader, **model_cnf['train']) else: model = FastAttentionXML(labels_num, data_cnf, model_cnf, tree_id) model.train(train_x, train_y, valid_x, valid_y, mlb) logger.info('Finish Training') if mode is None or mode == 'eval': logger.info('Loading Test Set') mlb = get_mlb(data_cnf['labels_binarizer']) labels_num = len(mlb.classes_) test_x, _ = get_data(data_cnf['test']['texts'], None) logger.info(F'Size of Test Set: {len(test_x)}') logger.info('Predicting') if 'cluster' not in model_cnf: test_loader = DataLoader(MultiLabelDataset(test_x), model_cnf['predict']['batch_size'], num_workers=4) if model is None: model = Model(network=AttentionRNN, labels_num=labels_num, model_path=model_path, emb_init=emb_init, **data_cnf['model'], **model_cnf['model']) scores, labels = model.predict(test_loader, k=model_cnf['predict'].get('k', 100)) else: if model is None: model = FastAttentionXML(labels_num, data_cnf, model_cnf, tree_id) scores, labels = model.predict(test_x) logger.info('Finish Predicting') labels = mlb.classes_[labels] output_res(data_cnf['output']['res'], F'{model_name}-{data_name}{tree_id}', scores, labels)
def spectral_clustering_train( data_cnf, data_cnf_path, model_cnf, model_cnf_path, emb_init, model_path, tree_id, output_suffix, dry_run, ): train_xs = [] valid_xs = [] train_labels_list = [] valid_labels_list = [] train_ys = [] valid_ys = [] mlb_list = [] indices_list = [] n_clusters = model_cnf['spectral_clustering']['num_clusters'] n_components = model_cnf['spectral_clustering']['n_components'] alg = model_cnf['spectral_clustering']['alg'] size_min = model_cnf['spectral_clustering']['size_min'] size_max = model_cnf['spectral_clustering']['size_max'] train_x, train_labels = load_dataset(data_cnf) if 'cluster' not in model_cnf: mlb = get_mlb(data_cnf['labels_binarizer'], train_labels) train_y = mlb.transform(train_labels) logger.info('Build label adjacency matrix') adj = train_y.T @ train_y adj.setdiag(0) adj.eliminate_zeros() logger.info(f"Sparsity: {adj.count_nonzero() / adj.shape[0] ** 2}") clustering = MySpectralClustering(n_clusters=n_clusters, affinity='precomputed', n_components=n_components, n_init=1, size_min=size_min, size_max=size_max, assign_labels=alg, n_jobs=-1) logger.info('Start Spectral Clustering') clustering.fit(adj) logger.info('Finish Spectral Clustering') groups = [[] for _ in range(n_clusters)] for i, group in enumerate(clustering.labels_): groups[group].append(i) splitted_labels = [] for indices in groups: splitted_labels.append(mlb.classes_[indices]) for labels in splitted_labels: indices = get_splitted_samples(labels, train_labels) indices_list.append(indices) train_xs.append(train_x[indices]) train_labels_list.append(train_labels[indices]) if 'size' in data_cnf['valid']: for i, (train_x, train_labels) in enumerate(zip(train_xs, train_labels_list)): valid_size = data_cnf['valid']['size'] if len(train_x) * 0.8 > len(train_x) - valid_size: valid_size = 0.2 train_x, valid_x, train_labels, valid_labels = train_test_split( train_x, train_labels, test_size=valid_size, ) train_xs[i] = train_x train_labels_list[i] = train_labels valid_xs.append(valid_x) valid_labels_list.append(valid_labels) else: raise Exception("Setting valid set explicitly is not " "supported spectral clustering mode.") labels_binarizer_path = data_cnf['labels_binarizer'] suffix = output_suffix.upper().replace('-', '_') for i, labels in enumerate(splitted_labels): filename = f"{labels_binarizer_path}_{suffix}_{i}" mlb_tree = get_mlb(filename, labels[None, ...], force=True) mlb_list.append(mlb_tree) logger.info(f"Number of labels of cluster {i}: {len(labels):,}") logger.info(f"Number of Training Set of cluster {i}: {len(train_xs[i]):,}") logger.info(f"Number of Validation Set of cluster {i}: {len(valid_xs[i]):,}") with redirect_stderr(None): train_y = mlb_tree.transform(train_labels_list[i]) valid_y = mlb_tree.transform(valid_labels_list[i]) train_ys.append(train_y) valid_ys.append(valid_y) else: if 'size' in data_cnf['valid']: train_x, valid_x, train_labels, valid_labels = train_test_split( train_x, train_labels, test_size=data_cnf['valid']['size'], ) else: valid_x, valid_labels = get_data(data_cnf['valid']['texts'], data_cnf['valid']['labels']) mlb = get_mlb(data_cnf['labels_binarizer'], np.hstack(( train_labels, valid_labels, ))) train_y, valid_y = mlb.transform(train_labels), mlb.transform(valid_labels) logger.info('Training') if 'cluster' not in model_cnf: for i, (train_x, train_y, valid_x, valid_y) in enumerate(zip( train_xs, train_ys, valid_xs, valid_ys, )): train_loader = DataLoader( MultiLabelDataset(train_x, train_y), model_cnf['train']['batch_size'], shuffle=True, num_workers=4) valid_loader = DataLoader( MultiLabelDataset(valid_x, valid_y, training=False), model_cnf['valid']['batch_size'], num_workers=4) model = Model( network=AttentionRNN, labels_num=len(mlb_list[i].classes_), model_path=f'{model_path}-{i}', emb_init=emb_init, **data_cnf['model'], **model_cnf['model']) if not dry_run: logger.info(f"Start Training Cluster {i}") model.train(train_loader, valid_loader, **model_cnf['train']) logger.info(f"Finish Training Cluster {i}") else: model.save_model() else: model = FastAttentionXML( len(mlb.classes_), data_cnf, model_cnf, tree_id, output_suffix, ) if not dry_run: model.train(train_x, train_y, valid_x, valid_y, mlb) log_config(data_cnf_path, model_cnf_path, dry_run)
def spectral_clustering_eval( data_cnf, model_cnf, data_name, model_name, model_path, emb_init, tree_id, output_suffix, dry_run, ): mlb_list = [] n_clusters = model_cnf['spectral_clustering']['num_clusters'] labels_binarizer_path = data_cnf['labels_binarizer'] scores_list = [] labels_list = [] logger.info('Loading Test Set') test_x, _ = get_data(data_cnf['test']['texts'], None) logger.info(F'Size of Test Set: {len(test_x):,}') logger.info('Predicting') if 'cluster' not in model_cnf: suffix = output_suffix.upper().replace('-', '_') for i in range(n_clusters): filename = f"{labels_binarizer_path}_{suffix}_{i}" mlb_tree = get_mlb(filename) mlb_list.append(mlb_tree) test_loader = DataLoader( MultiLabelDataset(test_x), model_cnf['predict']['batch_size'], num_workers=4) for i, mlb in enumerate(mlb_list): logger.info(f"Predicting Cluster {i}") labels_num = len(mlb.classes_) k = model_cnf['predict'].get('k', 100) // n_clusters model = Model( network=AttentionRNN, labels_num=labels_num, model_path=f'{model_path}-{i}', emb_init=emb_init, load_model=True, **data_cnf['model'], **model_cnf['model']) scores, labels = model.predict(test_loader, k=k) scores_list.append(scores) labels_list.append(mlb.classes_[labels]) logger.info(f"Finish Prediting Cluster {i}") scores = np.hstack(scores_list) labels = np.hstack(labels_list) i = np.arange(len(scores))[:, None] j = np.argsort(scores)[:, ::-1] scores = scores[i, j] labels = labels[i, j] else: mlb = get_mlb(data_cnf['labels_binarizer']) model = FastAttentionXML(len(mlb.classes_), data_cnf, model_cnf, tree_id, output_suffix) scores, labels = model.predict(test_x, model_cnf['predict'].get('k', 100)) labels = mlb.classes_[labels] logger.info('Finish Predicting') score_path, label_path = output_res(data_cnf['output']['res'], f'{model_name}-{data_name}{tree_id}', scores, labels, output_suffix) log_results(score_path, label_path, dry_run)
def default_train( data_cnf, data_cnf_path, model_cnf, model_cnf_path, emb_init, model_path, tree_id, output_suffix, dry_run, ): train_x, train_labels = load_dataset(data_cnf) if 'size' in data_cnf['valid']: train_x, valid_x, train_labels, valid_labels = train_test_split( train_x, train_labels, test_size=data_cnf['valid']['size'], ) else: valid_x, valid_labels = get_data(data_cnf['valid']['texts'], data_cnf['valid']['labels']) mlb = get_mlb(data_cnf['labels_binarizer'], np.hstack(( train_labels, valid_labels, ))) freq = mlb.transform(np.hstack([train_labels, valid_labels])).sum(axis=0).A1 train_y, valid_y = mlb.transform(train_labels), mlb.transform(valid_labels) labels_num = len(mlb.classes_) logger.info(F'Number of Labels: {labels_num}') logger.info(F'Size of Training Set: {len(train_x):,}') logger.info(F'Size of Validation Set: {len(valid_x):,}') logger.info('Training') if 'cluster' not in model_cnf: if 'propensity' in data_cnf: a = data_cnf['propensity']['a'] b = data_cnf['propensity']['b'] pos_weight = get_inv_propensity(train_y, a, b) else: pos_weight = None train_loader = DataLoader( MultiLabelDataset(train_x, train_y), model_cnf['train']['batch_size'], shuffle=True, num_workers=4) valid_loader = DataLoader( MultiLabelDataset(valid_x, valid_y, training=False), model_cnf['valid']['batch_size'], num_workers=4) if 'loss' in model_cnf: gamma = model_cnf['loss'].get('gamma', 2.0) loss_name = model_cnf['loss']['name'] else: gamma = None loss_name = 'bce' model = Model( network=AttentionRNN, labels_num=labels_num, model_path=model_path, emb_init=emb_init, pos_weight=pos_weight, loss_name=loss_name, gamma=gamma, freq=freq, **data_cnf['model'], **model_cnf['model']) if not dry_run: model.train(train_loader, valid_loader, mlb=mlb, **model_cnf['train']) else: model.save_model() else: model = FastAttentionXML(labels_num, data_cnf, model_cnf, tree_id, output_suffix) if not dry_run: model.train(train_x, train_y, valid_x, valid_y, mlb) log_config(data_cnf_path, model_cnf_path, dry_run)
def main(data_cnf, model_cnf, mode): model_name = os.path.split(model_cnf)[1].split(".")[0] yaml = YAML(typ='safe') data_cnf, model_cnf = yaml.load(Path(data_cnf)), yaml.load(Path(model_cnf)) # 設定log檔案位置 logfile("./logs/logfile_{0}_cornet_{1}_cornet_dim_{2}.log".format( model_name, model_cnf['model']['n_cornet_blocks'], model_cnf['model']['cornet_dim'])) model, model_name, data_name = None, model_cnf['name'], data_cnf['name'] model_path = os.path.join( model_cnf['path'], F'{model_name}-{data_name}-{model_cnf["model"]["n_cornet_blocks"]}-{model_cnf["model"]["cornet_dim"]}' ) emb_init = get_word_emb(data_cnf['embedding']['emb_init']) logger.info(F'Model Name: {model_name}') # summary(model_dict[model_name]) if mode is None or mode == 'train': logger.info('Loading Training and Validation Set') train_x, train_labels = get_data(data_cnf['train']['texts'], data_cnf['train']['labels']) if 'size' in data_cnf['valid']: random_state = data_cnf['valid'].get('random_state', 1240) train_x, valid_x, train_labels, valid_labels = train_test_split( train_x, train_labels, test_size=data_cnf['valid']['size'], random_state=random_state) else: valid_x, valid_labels = get_data(data_cnf['valid']['texts'], data_cnf['valid']['labels']) mlb = get_mlb(data_cnf['labels_binarizer'], np.hstack((train_labels, valid_labels))) train_y, valid_y = mlb.transform(train_labels), mlb.transform( valid_labels) labels_num = len(mlb.classes_) logger.info(F'Number of Labels: {labels_num}') logger.info(F'Size of Training Set: {len(train_x)}') logger.info(F'Size of Validation Set: {len(valid_x)}') logger.info('Training') train_loader = DataLoader(MultiLabelDataset(train_x, train_y), model_cnf['train']['batch_size'], shuffle=True, num_workers=4) valid_loader = DataLoader(MultiLabelDataset(valid_x, valid_y, training=True), model_cnf['valid']['batch_size'], num_workers=4) if 'gpipe' not in model_cnf: model = Model(network=model_dict[model_name], labels_num=labels_num, model_path=model_path, emb_init=emb_init, **data_cnf['model'], **model_cnf['model']) else: model = GPipeModel(model_name, labels_num=labels_num, model_path=model_path, emb_init=emb_init, **data_cnf['model'], **model_cnf['model']) loss, p1, p5 = model.train(train_loader, valid_loader, **model_cnf['train']) np.save( model_cnf['np_loss'] + "{0}_cornet_{1}_cornet_dim_{2}.npy".format( model_name, model_cnf['model']['n_cornet_blocks'], model_cnf['model']['cornet_dim']), loss) np.save( model_cnf['np_p1'] + "{0}_cornet_{1}_cornet_dim_{2}.npy".format( model_name, model_cnf['model']['n_cornet_blocks'], model_cnf['model']['cornet_dim']), p1) np.save( model_cnf['np_p5'] + "{0}_cornet_{1}_cornet_dim_{2}.npy".format( model_name, model_cnf['model']['n_cornet_blocks'], model_cnf['model']['cornet_dim']), p5) logger.info('Finish Training') if mode is None or mode == 'eval': logger.info('Loading Test Set') logger.info('model path: ', model_path) mlb = get_mlb(data_cnf['labels_binarizer']) labels_num = len(mlb.classes_) test_x, _ = get_data(data_cnf['test']['texts'], None) logger.info(F'Size of Test Set: {len(test_x)}') logger.info('Predicting') test_loader = DataLoader(MultiLabelDataset(test_x), model_cnf['predict']['batch_size'], num_workers=4) if 'gpipe' not in model_cnf: if model is None: model = Model(network=model_dict[model_name], labels_num=labels_num, model_path=model_path, emb_init=emb_init, **data_cnf['model'], **model_cnf['model']) else: if model is None: model = GPipeModel(model_name, labels_num=labels_num, model_path=model_path, emb_init=emb_init, **data_cnf['model'], **model_cnf['model']) scores, labels = model.predict(test_loader, k=model_cnf['predict'].get('k', 3801)) logger.info('Finish Predicting') labels = mlb.classes_[labels] output_res(data_cnf['output']['res'], F'{model_name}-{data_name}', scores, labels)
def splitting_head_tail_train( data_cnf, data_cnf_path, model_cnf, model_cnf_path, emb_init, model_path, tree_id, output_suffix, dry_run, split_ratio, ): train_x, train_labels = load_dataset(data_cnf) logger.info(f'Split head and tail labels: {split_ratio}') head_labels, head_labels_i, tail_labels, tail_labels_i = get_head_tail_labels( train_labels, split_ratio, ) train_h_x = train_x[head_labels_i] train_h_labels = train_labels[head_labels_i] train_t_x = train_x[tail_labels_i] train_t_labels = train_labels[tail_labels_i] if 'size' in data_cnf['valid']: valid_size = data_cnf['valid']['size'] train_h_x, valid_h_x, train_h_labels, valid_h_labels = train_test_split( train_h_x, train_h_labels, test_size=valid_size if len(train_h_x) > 2 * valid_size else 0.1, ) train_t_x, valid_t_x, train_t_labels, valid_t_labels = train_test_split( train_t_x, train_t_labels, test_size=valid_size if len(train_t_x) > 2 * valid_size else 0.1, ) else: valid_x, valid_labels = get_data(data_cnf['valid']['texts'], data_cnf['valid']['labels']) valid_h_labels_i, valid_t_labels_i = get_head_tail_samples( head_labels, tail_labels, valid_labels, ) valid_t_x = valid_x[valid_h_labels_i] valid_h_x = valid_x[valid_t_labels_i] valid_h_labels = valid_x[valid_h_labels_i] valid_t_labels = valid_x[valid_t_labels_i] labels_binarizer_path = data_cnf['labels_binarizer'] mlb_h = get_mlb(f"{labels_binarizer_path}_h_{split_ratio}", head_labels[None, ...]) mlb_t = get_mlb(f"{labels_binarizer_path}_t_{split_ratio}", tail_labels[None, ...]) with redirect_stderr(None): train_h_y = mlb_h.transform(train_h_labels) valid_h_y = mlb_h.transform(valid_h_labels) train_t_y = mlb_t.transform(train_t_labels) valid_t_y = mlb_t.transform(valid_t_labels) logger.info(f'Number of Head Labels: {len(head_labels):,}') logger.info(f'Number of Tail Labels: {len(tail_labels):,}') logger.info(f'Size of Head Training Set: {len(train_h_x):,}') logger.info(f'Size of Head Validation Set: {len(valid_h_x):,}') logger.info(f'Size of Tail Training Set: {len(train_t_x):,}') logger.info(f'Size of Tail Validation Set: {len(valid_t_x):,}') logger.info('Training') if 'cluster' not in model_cnf: train_h_loader = DataLoader(MultiLabelDataset(train_h_x, train_h_y), model_cnf['train']['batch_size'], shuffle=True, num_workers=4) valid_h_loader = DataLoader(MultiLabelDataset(valid_h_x, valid_h_y, training=False), model_cnf['valid']['batch_size'], num_workers=4) head_model = Model(network=AttentionRNN, labels_num=len(head_labels), model_path=f'{model_path}-head', emb_init=emb_init, **data_cnf['model'], **model_cnf['model']) if not dry_run: logger.info('Training Head Model') head_model.train(train_h_loader, valid_h_loader, **model_cnf['train']) logger.info('Finish Traning Head Model') else: head_model.save_model() train_t_loader = DataLoader(MultiLabelDataset(train_t_x, train_t_y), model_cnf['train']['batch_size'], shuffle=True, num_workers=4) valid_t_loader = DataLoader(MultiLabelDataset(valid_t_x, valid_t_y, training=False), model_cnf['valid']['batch_size'], num_workers=4) tail_model = Model(network=AttentionRNN, labels_num=len(tail_labels), model_path=f'{model_path}-tail', emb_init=emb_init, **data_cnf['model'], **model_cnf['model']) if not dry_run: logger.info('Training Tail Model') tail_model.train(train_t_loader, valid_t_loader, **model_cnf['train']) logger.info('Finish Traning Tail Model') else: tail_model.save_model() else: raise Exception("FastAttention is not currently supported for " "splited head and tail dataset") log_config(data_cnf_path, model_cnf_path, dry_run) return head_model, tail_model, head_labels, tail_labels
def splitting_head_tail_eval( data_cnf, model_cnf, data_name, model_name, model_path, emb_init, tree_id, output_suffix, dry_run, split_ratio, head_labels, tail_labels, head_model, tail_model, ): logger.info('Loading Test Set') mlb = get_mlb(data_cnf['labels_binarizer']) labels_num = len(mlb.classes_) test_x, _ = get_data(data_cnf['test']['texts'], None) logger.info(F'Size of Test Set: {len(test_x):,}') labels_binarizer_path = data_cnf['labels_binarizer'] mlb_h = get_mlb(f"{labels_binarizer_path}_h_{split_ratio}") mlb_t = get_mlb(f"{labels_binarizer_path}_t_{split_ratio}") if head_labels is None: train_x, train_labels = get_data(data_cnf['train']['texts'], data_cnf['train']['labels']) head_labels, _, tail_labels, _ = get_head_tail_labels( train_labels, split_ratio, ) h_labels_i = np.nonzero(mlb.transform(head_labels[None, ...]).toarray())[0] t_labels_i = np.nonzero(mlb.transform(tail_labels[None, ...]).toarray())[0] logger.info('Predicting') if 'cluster' not in model_cnf: test_loader = DataLoader(MultiLabelDataset(test_x), model_cnf['predict']['batch_size'], num_workers=4) if head_model is None: head_model = Model(network=AttentionRNN, labels_num=len(head_labels), model_path=f'{model_path}-head', emb_init=emb_init, load_model=True, **data_cnf['model'], **model_cnf['model']) logger.info('Predicting Head Model') h_k = model_cnf['predict'].get('top_head_k', 30) scores_h, labels_h = head_model.predict(test_loader, k=h_k) labels_h = mlb_h.classes_[labels_h] logger.info('Finish Predicting Head Model') if tail_model is None: tail_model = Model(network=AttentionRNN, labels_num=len(tail_labels), model_path=f'{model_path}-tail', emb_init=emb_init, load_model=True, **data_cnf['model'], **model_cnf['model']) logger.info('Predicting Tail Model') t_k = model_cnf['predict'].get('top_tail_k', 70) scores_t, labels_t = tail_model.predict(test_loader, k=t_k) labels_t = mlb_t.classes_[labels_t] logger.info('Finish Predicting Tail Model') scores = np.c_[scores_h, scores_t] labels = np.c_[labels_h, labels_t] i = np.arange(len(scores))[:, None] j = np.argsort(scores)[:, ::-1] scores = scores[i, j] labels = labels[i, j] else: raise Exception("FastAttention is not currently supported for " "splited head and tail dataset") logger.info('Finish Predicting') score_path, label_path = output_res(data_cnf['output']['res'], f'{model_name}-{data_name}{tree_id}', scores, labels, output_suffix) log_results(score_path, label_path, dry_run)