def save_results(model, train_loss, dev_loss, train_size, dev_size, results_fname): results = [['alphabet_size', 'embedding_size', 'hidden_size', 'nlayers',\ 'dropout_p', 'train_loss', 'dev_loss',\ 'train_size', 'dev_size']] results += [[model.alphabet_size, model.embedding_size, model.hidden_size,\ model.nlayers, model.dropout_p, train_loss, dev_loss,\ train_size, dev_size]] util.write_csv(results_fname, results)
def save_results(model, train_loss, dev_loss, test_loss, results_fname): args = model.get_args() del args['alphabet'] results = [['name', 'train_loss', 'dev_loss', 'test_loss', 'alphabet_size'] + list(args.keys())] results += [[model.name, train_loss, dev_loss, test_loss, model.alphabet_size] + list(args.values())] util.write_csv(results_fname, results)
def get_p_values(keep_eos, args, model): print('\nRunning model: %s - %s' % (model, str(keep_eos))) (losses, y_values, lengths) = get_results(args.checkpoints_path, keep_eos=keep_eos, models=[model]) results = analyse_languages(losses, y_values, lengths, model_type=model, n_permutations=args.n_permutations) fname = '%s_%s__%s--%d.tsv' % (args.dataset, model, str(keep_eos), args.n_permutations) util.write_csv('results/p_values/bin--%s' % fname, results)
def save_results(model, train_results, dev_results, test_results, results_fname): results = [model.print_param_names() + ['train_loss', 'dev_loss', 'test_loss', 'train_acc', 'dev_acc', 'test_acc', 'train_norm', 'dev_norm', 'test_norm']] results += [model.print_params() + [train_results['loss'], dev_results['loss'], test_results['loss'], train_results['acc'], dev_results['acc'], test_results['acc'], train_results['norm'], dev_results['norm'], test_results['norm']]] util.write_csv(results_fname, results)
def save_results(model, train_results, dev_results, test_results, results_fname): results = [[ 'n_classes', 'embedding_size', 'hidden_size', 'nlayers', 'dropout_p', 'train_loss', 'dev_loss', 'test_loss', 'train_acc', 'dev_acc', 'test_acc' ]] results += [[ model.n_classes, model.embedding_size, model.hidden_size, model.nlayers, model.dropout_p, train_results['loss'], dev_results['loss'], test_results['loss'], train_results['acc'], dev_results['acc'], test_results['acc'] ]] util.write_csv(results_fname, results)
def save_results(model, atural_code_avg, permuted_natural_avg, two_stage_avg,\ natural_correlation, permuted_correlation, two_stage_correlation,\ alphabet_size, sentences, results_fname, test): print('Saving to', results_fname) results = [] file_size = os.path.getsize(results_fname) if os.path.exists( results_fname) else 0 if file_size == 0: results = [['model', 'natural_code_avg', 'permuted_natural_code_avg',\ 'two_stage_code_avg', 'natural_correlation', 'permuted_correlation',\ 'two_stage_correlation', 'alphabet_size', 'sentences', 'test']] results += [[model, atural_code_avg, permuted_natural_avg, two_stage_avg,\ natural_correlation, permuted_correlation, two_stage_correlation,\ alphabet_size, sentences, test]] util.write_csv(results_fname, results)
def save_two_stage_training_results(model, args, train_loss, dev_loss, generator_dev_loss,\ training_time, train_size, dev_size): results_fname = args.adaptor_results_file print('Saving to', results_fname) results = [] file_size = os.path.getsize(results_fname) if os.path.exists( results_fname) else 0 if file_size == 0: results = [['alphabet_size', 'embedding_size', 'hidden_size', 'nlayers', 'dropout_p', 'alpha', 'beta', 'train_loss', 'dev_loss',\ 'generator_dev_losss', 'total_epochs', 'adaptor_iterations',\ 'training_time', 'train_size', 'dev_size']] results += [[model.alphabet_size, model.embedding_size, model.hidden_size, model.nlayers,\ model.dropout_p, args.alpha, args.beta, train_loss, dev_loss,\ generator_dev_loss, args.epochs, args.adaptor_iterations,\ training_time, train_size, dev_size]] util.write_csv(results_fname, results)
def main(): # pylint: disable=all args = get_args() folds = util.get_folds() trainloader, devloader, _, alphabet = \ get_data_loaders_with_folds('tokens', args.data_file, folds, args.batch_size,\ max_train_tokens=args.max_train_tokens) print('Train size: %d Dev size %d' % (len(trainloader.dataset), len(devloader.dataset))) beta_limit = len(trainloader.dataset) * 2 if args.beta_limit is not None: beta_limit = args.beta_limit print('Tuning alpha and beta') tuning_results = tune_alpha_and_beta(trainloader, devloader, alphabet, args, args.no_iterations, beta_limit) print('Writing tuning results to', args.results_file) util.write_csv(args.results_file, tuning_results)