def main(args): datamodule = LanguageDataModule(root=args.dataset_path, languages=args.languages, batch_size=args.batch_size, num_workers=args.num_workers) model = LanguageModel( # layers=14,#10 # blocks=1,#4 skip_channels=32, #256 end_channels=32, #256 # uncomment for fast debug network ) ckpt = torch.load(args.ckpt_path) model.load_state_dict(ckpt['state_dict']) trainer = pl.Trainer( # comment to run on cpu for local testing gpus=args.gpus, auto_select_gpus=True, # distributed_backend='ddp', benchmark=True, ## ------- terminate_on_nan=True, ) datamodule.setup() # trainer.fit(model, datamodule) results = trainer.test(model, datamodule.test_dataloader())
def model_load(path, model=None, optimizer=None): config = LMConfig(os.path.join(path, 'config.json')) if model is None: model_to_load = LanguageModel(config) else: model_to_load = get_model(model) model_to_load.__init__(config) model_state_dict = torch.load(open(os.path.join(path, 'model.pt'), 'rb'), map_location=lambda s, l: s) model_to_load.load_state_dict(model_state_dict) if optimizer: optimizer_state_dict = torch.load(open( os.path.join(path, 'optimizer.pt'), 'rb'), map_location=lambda s, l: s) optimizer.load_state_dict(optimizer_state_dict) return model_to_load
def freestyle(loc): # TODO # load data model_dir = Path(loc) settings = pickle.load(open(model_dir / 'settings.pkl', 'rb')) print(settings) # settings cell = settings['cell'] hidden_size = settings['hidden_size'] token = settings['token'] small = settings['small'] how_many = 100 # load the models vocab = generate.get_vocab(token, small) if token == 'word': emb = generate.get_embedding('word2vec') input_size = emb.vectors.shape[1] output_size = emb.vectors.shape[0] elif token == 'character': emb = None input_size = vocab.size output_size = vocab.size fnames = os.listdir(model_dir / 'checkpoints') fname = fnames[-1] # load the model model = LanguageModel(cell, input_size, hidden_size, output_size) model.load_state_dict(torch.load(model_dir / 'checkpoints' / fname)) model.eval() # monitor sents = [ 'The Standard ', 'non-abelian', 'silicon pixel detector', 'estimate the', '[23] ATLAS' ] temperatures = [0.01 + 0.1 * i for i in range(11)] eval_stream = model_dir / 'evaluate_stream.txt' for temperature in temperatures: txt = '\nTemperature = {}'.format(temperature) utils.report(txt, eval_stream) for sent in sents: txt = generate.compose(model, vocab, emb, sent, temperature, how_many) utils.report(txt, eval_stream)
def plot_switch_prob(loc): # load settings model_dir = Path(loc) settings = pickle.load(open(model_dir / 'settings.pkl', 'rb')) cell = settings['cell'] hidden_size = settings['hidden_size'] token = settings['token'] small = settings['small'] max_len = settings['max_len'] # load the final model vocab = generate.get_vocab(token, small) if token == 'word': emb = generate.get_embedding('word2vec') input_size = emb.vectors.shape[1] output_size = emb.vectors.shape[0] elif token == 'character': emb = None input_size = vocab.size output_size = vocab.size fnames = os.listdir(model_dir / 'checkpoints') fname = fnames[-1] # load the model model = LanguageModel(cell, input_size, hidden_size, output_size) model.load_state_dict(torch.load(model_dir / 'checkpoints' / fname)) model.eval() # prepare the base and replacement batch N = 100 gen = generate.generate('valid', token=token, max_len=max_len, small=small, batch_size=N) base_batch, _ = next(gen) repl_batch, _ = next(gen) # compute the average KL divs over the batch depths = [i for i in range(max_len)] switch_probs = [ compute_switch_prob(model, base_batch, repl_batch, keep_depth, vocab, emb) for keep_depth in depths ] # make the plot fig, ax = plt.subplots() ax.plot(depths, switch_probs, 'tomato') ax.plot(depths, [0.01] * len(depths), 'k') ax.set_yscale('log') ax.set_ylim(0.001, 1) ax.set_xlim(0, max_len) ax.set_title('Probability of switching predicted character\n{}'.format( model_dir.name), fontsize=7) ax.set_xlabel('sequence keep-depth') ax.set_ylabel('Probabillity') ax.grid() plt.savefig(model_dir / 'SwitchProbability.pdf')
def plot_losses(loc): # load data model_dir = Path(loc) settings = pickle.load(open(model_dir / 'settings.pkl', 'rb')) # settings cell = settings['cell'] hidden_size = settings['hidden_size'] token = settings['token'] small = settings['small'] max_len = settings['max_len'] n_epochs = settings['n_epochs'] n_saves = settings['n_saves'] criterion = nn.CrossEntropyLoss() # load the models models = [] vocab = generate.get_vocab(token, small) if token == 'word': emb = generate.get_embedding('word2vec') input_size = emb.vectors.shape[1] output_size = emb.vectors.shape[0] elif token == 'character': emb = None input_size = vocab.size output_size = vocab.size for fname in os.listdir(model_dir / 'checkpoints'): model = LanguageModel(cell, input_size, hidden_size, output_size) model.load_state_dict(torch.load(model_dir / 'checkpoints' / fname)) model.eval() models.append(model) # prepare training and validation sets N = 10000 splits = ['train', 'valid'] gens = { split: generate.generate(split, token=token, max_len=max_len, small=small, batch_size=N) for split in splits } batch, labels = {}, {} for split in splits: for b, l in gens[split]: # one hot encode if token == 'character': b = generate.one_hot_encode(b, vocab) # or embed elif token == 'word': b = generate.w2v_encode(b, emb, vocab) batch[split], labels[split] = torch.Tensor(b), torch.Tensor( l).long() break # evaluate the models loss = {split: [] for split in splits} acc = {split: [] for split in splits} for i, model in enumerate(models): t0 = time.time() print(i) for split in splits: # loss outputs = model(batch[split]) l = criterion(outputs, labels[split]) loss[split].append(float(l)) # accuracy _, preds = torch.max(outputs, 1) a = sum(preds == labels[split]) / float(N) acc[split].append(float(a)) print('{:2.2f}s'.format(time.time() - t0)) for split in splits: with open(model_dir / 'best_{}_acc.txt'.format(split), 'w') as handle: best = max(acc[split]) handle.write('{}\n'.format(best)) # plot both quantities for quantity, description in zip([loss, acc], ['Loss', 'Accuracy']): fig, ax = plt.subplots() for split in splits: xs = (1 + np.arange(len(quantity[split]))) / n_saves ax.plot(xs, quantity[split], label=split) ax.set_xlabel('Training epoch') if n_epochs > 1: ax.set_xlabel('Epoch') ax.set_ylabel(description) upper = ax.get_ylim()[1] if description == 'Loss' else 1 ax.set_ylim(0, upper) ax.set_xlim(0, ax.get_xlim()[1]) ax.set_title(model_dir.name, fontsize=7) ax.legend() ax.grid(alpha=0.5, which='both') plt.savefig(model_dir / '{}.pdf'.format(description))