net.debug = args.debug_mode
else:
  raise NotImplementedError()

if not os.path.exists(args.save_path):
  os.makedirs(args.save_path)
snapshot_pfx   = 'snapshot.' + ".".join([key.upper()+str(config[key]) for key in config.keys() if key[0] != 'f'])
if args.out_file == '':
  out_file = os.path.join(args.save_path, snapshot_pfx + '.tst-eval.json')
else:
  print("will dump prediction to {}".format(args.out_file))
  out_file = args.out_file

print("="*20)
print("Starting testing {} model".format(config['model']))
print("Snapshots {}.*\nDetails:".format(os.path.join(args.save_path, snapshot_pfx)))
for key in config:
  print("{} : {}".format(key,config[key]))
print("="*20)


start_time = time.time()
print("startup time for {} model: {:5.3f} for {} parameters".format(config['model'],start_time - start, get_n_params(net)))

best_val , val_rate = evaluate(net, dev, CNN, config, '', verbose = True, no_lang = args.no_lang)
tst_score, tst_rate = evaluate(net, tst, CNN, config, '', verbose = True,
                               tst_json = tst_json,
                               out_file = out_file,
                               no_lang = args.no_lang)
print("model scores based on best validation accuracy\nval_acc:{:5.3f} test_acc: {:5.3f} test speed {:5.1f} inst/sec\n".format(best_val,tst_score,tst_rate))
    criterion = nn.NLLLoss()
elif config['loss'] == 'smargin':
    criterion = nn.MultiLabelSoftMarginLoss()
elif config['loss'] == 'lamm':
    criterion = lamm
elif config['loss'] == 'mbr':
    criterion = mbr
else:
    raise NotImplementedError()

start_time = time.time()
if args.verbose:
    print(
        "startup time for {} model: {:5.3f} for {} instances for {} parameters"
        .format(config['model'], start_time - start, len(indexes),
                get_n_params(net)))

best_val = 0
timeout = False
for ITER in range(args.epochs):
    net.train()
    random.shuffle(indexes)
    closs = 0.0
    cinst = 0
    correct = 0.0
    trn_start = time.time()

    if args.verbose and not args.debug_mode:
        pbar = tqdm(indexes, desc='trn_loss')
    else:
        pbar = indexes
Пример #3
0
optimizer.zero_grad()

if config['loss'] == 'nll':
  criterion = nn.NLLLoss()
elif config['loss'] == 'smargin':
  criterion = nn.MultiLabelSoftMarginLoss()
elif config['loss'] == 'lamm':
  criterion = lamm
elif config['loss'] == 'mbr':
  criterion = mbr
else:
  raise NotImplementedError()

start_time = time.time()
if args.verbose:
  print("startup time for {} model: {:5.3f} for {} instances for {} parameters".format(config['model'],start_time - start, len(indexes), get_n_params(net)))

best_val = 0
timeout = False
for ITER in range(args.epochs):
  net.train()
  random.shuffle(indexes)
  closs = 0.0
  cinst = 0
  correct = 0.0
  trn_start = time.time()

  if args.verbose and not args.debug_mode:
    pbar = tqdm(indexes, desc='trn_loss')
  else:
    pbar = indexes
Пример #4
0
if args.out_file == '':
    out_file = os.path.join(args.save_path, snapshot_pfx + '.tst-eval.json')
else:
    print("will dump prediction to {}".format(args.out_file))
    out_file = args.out_file

print("=" * 20)
print("Starting testing {} model".format(config['model']))
print("Snapshots {}.*\nDetails:".format(
    os.path.join(args.save_path, snapshot_pfx)))
for key in config:
    print("{} : {}".format(key, config[key]))
print("=" * 20)

start_time = time.time()
print("startup time for {} model: {:5.3f} for {} parameters".format(
    config['model'], start_time - start, get_n_params(net)))

best_val, val_rate = evaluate(net, dev, CNN, config, '', verbose=True)
tst_score, tst_rate = evaluate(net,
                               tst,
                               CNN,
                               config,
                               '',
                               verbose=True,
                               tst_json=tst_json,
                               out_file=out_file)
print(
    "model scores based on best validation accuracy\nval_acc:{:5.3f} test_acc: {:5.3f} test speed {:5.1f} inst/sec\n"
    .format(best_val, tst_score, tst_rate))