Exemple #1
0
def batchify(batch):
    batch_args = DictTree()
    for k, v in batch[0].args.allitems():
        if isinstance(v, torch.Tensor):
            batch_args[k] = torch.cat([job.args[k] for job in batch])
        else:
            batch_args[k] = v
    return [batch_args[f'arg{i}'] for i in range(batch[0].meta.num_args)], batch_args.get('kwargs', {})
Exemple #2
0
def _evaluate(test_data, agent, test_str, stats_writer, step):
    num_traces = 0
    num_steps = 0
    test_stats = DictTree()
    for test_batch in test_data.batches():
        num_traces += len(test_batch)
        num_steps += sum(trace.metadata.length for trace in test_batch)
        stats = _step(agent, test_batch, train_mode=False)
        stats |= agent.evaluate(test_batch)
        test_stats += stats
    avg_stats = test_stats.get('per_trace', DictTree()) / num_traces
    avg_stats |= test_stats.get('per_step', DictTree()) / num_steps
    timestamp = datetime.now().replace(microsecond=0).isoformat()
    for k, v in avg_stats.allitems():
        k = '/'.join(k)
        print(f"[{timestamp}] Step {step} {test_str} {k}: {v}")
        stats_writer.add(step, f'{test_str}/{k}', v)
    stats_writer.flush()
    if 'loss' in avg_stats:
        return avg_stats.loss
    else:
        return avg_stats.score