for i in range(epochs): for j in range(nbatch): start = j * batch_size end = start + batch_size inp = train_data[start:end] tgt = train_targets[start:end] res = my_step(inp, tgt) running_losses.append(res.value) running_hits.append(int(sum(res.hits)) / batch_size) loss = sum(running_losses) / len(running_hits) accuracy = sum(running_hits) / len(running_hits) stats = [ f"E: {i + 1}/{epochs}", f"B: {j + 1}/{nbatch}", f"L: {loss:2.5f}", f"A: {accuracy:.0%}", ] if weight_stats: data = tuple(zip(*res.wstat)) mx = max(data[0]) avg = sum(data[1]) / len(data[1]) mn = min(data[2]) stats.append(f"W: {mx:.4f} > {avg:.4f} > {mn:.4f}") print(" -- ".join(stats)) if __name__ == "__main__": auto_cli(train, category=cat.CliArgument, eval_env=globals())
def test_conflict(): with pytest.raises(ConflictError): auto_cli(stout, (3, ), category=cat.Argument, argv="--z=3 --q=10".split())
def test_cli(): assert auto_cli(stout, (3, ), category=cat.Argument, argv="--z=3".split()) == (7, 8) assert auto_cli(stout, (3, ), category=cat.Argument, argv="--z=3 --w=10".split()) == (16, 8)
def test_unknown_argument(): with pytest.raises(SystemExit): auto_cli(stout, (3, ), category=cat.Argument, argv="--x=4".split())
target: cat.CliArgument = default(random.randint(minimum, maximum)) assert minimum <= target <= maximum print(f"> Please guess a number between {minimum} and {maximum}") for i in range(maxtries): guess = float(input()) if guess == target: print("Yes! :D") return True elif i == maxtries - 1: print("You failed :(") return False elif guess < target: print("> Too low. Guess again.") elif guess > target: print("> Too high. Guess again.") @ptera def main(): # Number of rounds of guessing rounds: cat.CliArgument = default(1) for i in range(rounds): guess() if __name__ == "__main__": auto_cli(main, description="Guessing game", category=cat.CliArgument)