Exemple #1
0
def guess():
    # Minimal possible number
    minimum: cat.CliArgument = default(0)
    # Maximal possible number
    maximum: cat.CliArgument = default(100)
    # Maximal number of tries
    maxtries: cat.CliArgument = default(10)

    # Force the number to guess (defaults to random)
    target: cat.CliArgument = default(random.randint(minimum, maximum))

    assert minimum <= target <= maximum

    print(f"> Please guess a number between {minimum} and {maximum}")
    for i in range(maxtries):
        guess = float(input())
        if guess == target:
            print("Yes! :D")
            return True
        elif i == maxtries - 1:
            print("You failed :(")
            return False
        elif guess < target:
            print("> Too low. Guess again.")
        elif guess > target:
            print("> Too high. Guess again.")
Exemple #2
0
def make_network(layer_sizes):
    # Activation function for the network
    actfn: cat.CliArgument = default(torch.relu)

    layers = [
        layer.new(
            W=param(nin, nout),
            b=param(nin, nout, bias=True),
            actfn=actfn,
        ) for nin, nout in zip(layer_sizes[:-1], layer_sizes[1:])
    ]
    layers[-1] = layers[-1].new(actfn=torch.nn.LogSoftmax(dim=1))
    network = step.new(lossfn=torch.nn.NLLLoss(),
                       model=sequential.new(layers=layers))
    return network
Exemple #3
0
def step(inp, target):
    # How much to regulate the magnitude of the weights
    weight_reg: cat.CliArgument = default(0)

    model: cat.Model
    lossfn: cat.LossFunction

    if weight_reg:
        results = model.using(weights="$param:WeightMatrix")(inp)
        output = results.value
        reg = sum(results.weights.map(lambda param: param.abs().sum()))
        loss = lossfn(output, target) + weight_reg * reg
    else:
        output = model(inp)
        loss = lossfn(output, target)

    return loss
Exemple #4
0
def train():

    # Sizes of the hidden layers
    hidden: cat.CliArgument = default(1000)
    if isinstance(hidden, int):
        hidden = (hidden, )

    # Number of epochs
    epochs: cat.CliArgument & int = default(10)

    # Batch size
    batch_size: cat.CliArgument & int = default(32)

    # Learning rate
    lr: cat.CliArgument & float = default(0.1)

    # Seed
    seed: cat.CliArgument & int = default(1234)

    # Display weight statistics
    weight_stats: cat.CliArgument & bool = default(False)

    torch.random.manual_seed(seed)

    mn = mnist()
    train_data = mn.data
    train_targets = mn.targets
    train_data = train_data.reshape((-1, 784)) * (2.0 / 255) - 1.0

    nbatch = len(train_data) // batch_size
    running_losses = deque(maxlen=100)
    running_hits = deque(maxlen=100)
    layer_sizes = (784, *hidden, 10)

    my_step = make_network(layer_sizes).clone(return_object=True)

    @my_step.on("step{target} > output")
    def hits(output, target):
        return sum(output.max(dim=1).indices == target)

    @my_step.on(Grad("step{!!loss} >> $param:Parameter"))
    def update(param):
        param_value, param_grad = param
        param_value.data.sub_(lr * param_grad)

    if weight_stats:

        @my_step.on("$param:WeightMatrix")
        def wstat(param):
            absw = param.abs()
            return absw.max(), absw.mean(), absw.min()

    for i in range(epochs):
        for j in range(nbatch):
            start = j * batch_size
            end = start + batch_size

            inp = train_data[start:end]
            tgt = train_targets[start:end]

            res = my_step(inp, tgt)
            running_losses.append(res.value)
            running_hits.append(int(sum(res.hits)) / batch_size)
            loss = sum(running_losses) / len(running_hits)
            accuracy = sum(running_hits) / len(running_hits)
            stats = [
                f"E: {i + 1}/{epochs}",
                f"B: {j + 1}/{nbatch}",
                f"L: {loss:2.5f}",
                f"A: {accuracy:.0%}",
            ]
            if weight_stats:
                data = tuple(zip(*res.wstat))
                mx = max(data[0])
                avg = sum(data[1]) / len(data[1])
                mn = min(data[2])
                stats.append(f"W: {mx:.4f} > {avg:.4f} > {mn:.4f}")
            print(" -- ".join(stats))
Exemple #5
0
def stout(v):
    w: cat.Argument = default(1)
    q: cat.Argument = 2
    a = lager(v, w)
    b = lager(v, q)
    return a, b
Exemple #6
0
def main():
    # Number of rounds of guessing
    rounds: cat.CliArgument = default(1)

    for i in range(rounds):
        guess()