def train_embed(data_dir, params, model_name): # ハイパラ読み込み embedding_dim = params['embedding_dim'] batch_size = params['batch_size'] lr = params['lr'] weight_decay = params['weight_decay'] #warmup = params['warmup'] warmup = 350 #lr_decay_every = params['lr_decay_every'] lr_decay_every = 2 lr_decay_rate = params['lr_decay_rate'] if model_name == 'SparseTransE': alpha = params['alpha'] # dataload dataset = AmazonDataset(data_dir, model_name='TransE') relation_size = len(set(list(dataset.triplet_df['relation'].values))) entity_size = len(dataset.entity_list) if model_name == 'TransE': model = TransE(int(embedding_dim), relation_size, entity_size).to(device) elif model_name == 'SparseTransE': model = SparseTransE(int(embedding_dim), relation_size, entity_size, alpha=alpha).to(device) iterater = TrainIterater(batch_size=int(batch_size), data_dir=data_dir, model_name=model_name) #iterater.iterate_epoch(model, lr=lr, epoch=3000, weight_decay=weight_decay, warmup=warmup, # lr_decay_rate=lr_decay_rate, lr_decay_every=lr_decay_every, eval_every=1e+5) iterater.iterate_epoch(model, lr=lr, epoch=3000, weight_decay=weight_decay, warmup=warmup, lr_decay_rate=lr_decay_rate, lr_decay_every=lr_decay_every, eval_every=1e+5, early_stop=True) return model
def objective(trial): start = time.time() import gc gc.collect() data_dir = ['../' + data_path + '/valid1', '../' + data_path + '/valid2'] score_sum = 0 # hyper para embedding_dim = trial.suggest_discrete_uniform('embedding_dim', 16, 128, 16) alpha = trial.suggest_loguniform('alpha', 1e-6, 1e-2) #SparseTransEの時だけ batch_size = trial.suggest_int('batch_size', 128, 512, 128) lr = trial.suggest_loguniform('lr', 1e-4, 1e-2) weight_decay = trial.suggest_loguniform('weight_decay', 1e-6, 1e-2) #warmup = trial.suggest_int('warmup', 100, 500) warmup = trial.suggest_int('warmup', 10, 100) #warmup = 350 #lr_decay_every = trial.suggest_int('lr_decay_every', 1, 10) lr_decay_every = 2 lr_decay_rate = trial.suggest_uniform('lr_decay_rate', 0.5, 1) for dir_path in data_dir: # データ読み込み dataset = AmazonDataset(dir_path, model_name='SparseTransE') relation_size = len(set(list(dataset.triplet_df['relation'].values))) entity_size = len(dataset.entity_list) model = SparseTransE(int(embedding_dim), relation_size, entity_size, alpha=alpha).to(device) iterater = TrainIterater(batch_size=int(batch_size), data_dir=dir_path, model_name='SparseTransE') score = iterater.iterate_epoch(model, lr=lr, epoch=3000, weight_decay=weight_decay, warmup=warmup, lr_decay_rate=lr_decay_rate, lr_decay_every=lr_decay_every, eval_every=1e+5, early_stop=False) score_sum += score torch.cuda.empty_cache() mi, sec = time_since(time.time() - start) print('{}m{}sec'.format(mi, sec)) return -1 * score_sum / 2
params = load_params() print(params) import gc gc.collect() # dataload data_dir = '../' + data_path + '/test/' dataset = AmazonDataset(data_dir, model_name='SparseTransE') relation_size = len(set(list(dataset.triplet_df['relation'].values))) entity_size = len(dataset.entity_list) embedding_dim = params['embedding_dim'] alpha = params['alpha'] model = SparseTransE(int(embedding_dim), relation_size, entity_size, alpha=alpha).to(device) batch_size = params['batch_size'] iterater = TrainIterater(batch_size=int(batch_size), data_dir=data_dir, model_name=model_name) lr = params['lr'] weight_decay = params['weight_decay'] warmup = 350 lr_decay_every = 2 lr_decay_rate = params['lr_decay_rate'] score = iterater.iterate_epoch(model,