def loop(model: Layer,
          images: List[Tensor],
          labels: List[Tensor],
          loss: Loss,
          optimizer: Optimizer = None) -> None:
     correct = 0         # Track number of correct predictions.
     total_loss = 0.0    # Track total loss.
 
     with tqdm.trange(len(images)) as t:
         for i in t:
             predicted = model.forward(images[i])             # Predict.
             if argmax(predicted) == argmax(labels[i]):       # Check for
                 correct += 1                                 # correctness.
             total_loss += loss.loss(predicted, labels[i])    # Compute loss.
 
             # If we're training, backpropagate gradient and update weights.
             if optimizer is not None:
                 gradient = loss.gradient(predicted, labels[i])
                 model.backward(gradient)
                 optimizer.step(model)
 
             # And update our metrics in the progress bar.
             avg_loss = total_loss / (i + 1)
             acc = correct / (i + 1)
             t.set_description(f"mnist loss: {avg_loss:.3f} acc: {acc:.3f}")
    def loop(model: Layer,
             images: List[Tensor],
             labels: List[Tensor],
             loss: Loss,
             optimizer: Optimizer = None) -> None:
        correct = 0  # Track number of correct predictions.
        total_loss = 0.0  # Track total loss.

        with tqdm.trange(len(images)) as t:
            for i in t:
                predicted = model.forward(images[i])  # Predict.
                if argmax(predicted) == argmax(labels[i]):  # Check for
                    correct += 1  # correctness.
                total_loss += loss.loss(predicted, labels[i])  # Compute loss.

                # If we're training, backpropagate gradient and update weights.
                if optimizer is not None:
                    gradient = loss.gradient(predicted, labels[i])
                    model.backward(gradient)
                    optimizer.step(model)

                # And update our metrics in the progress bar.
                avg_loss = total_loss / (i + 1)
                acc = correct / (i + 1)
                t.set_description(f"mnist loss: {avg_loss:.3f} acc: {acc:.3f}")
예제 #3
0
    def loop(model: Layer,
             images: List[Tensor],
             labels: List[Tensor],
             loss: Loss,
             optimizer: Optimizer = None) -> None:
        correct = 0  # Przechowuje liczbę poprawnych przewidywań.
        total_loss = 0.0  # Przechowuje całkowitą stratę.

        with tqdm.trange(len(images)) as t:
            for i in t:
                predicted = model.forward(
                    images[i])  # Określ wartości przewidywane.
                if argmax(predicted) == argmax(
                        labels[i]):  # Sprawdź poprawność.
                    correct += 1
                total_loss += loss.loss(predicted, labels[i])  # Oblicz stratę.

                # Podczas treningu propaguj wstecznie gradient i zaktualizuj wagi.
                if optimizer is not None:
                    gradient = loss.gradient(predicted, labels[i])
                    model.backward(gradient)
                    optimizer.step(model)

                # Zaktualizuj metryki na pasku postępu.
                avg_loss = total_loss / (i + 1)
                acc = correct / (i + 1)
                t.set_description(f"mnist loss: {avg_loss:.3f} acc: {acc:.3f}")
 def fizzbuzz_accuracy(low: int, hi: int, net: Layer) -> float:
     num_correct = 0
     for n in range(low, hi):
         x = binary_encode(n)
         predicted = argmax(net.forward(x))
         actual = argmax(fizz_buzz_encode(n))
         if predicted == actual:
             num_correct += 1
 
     return num_correct / (hi - low)
    def fizzbuzz_accuracy(low: int, hi: int, net: Layer) -> float:
        num_correct = 0
        for n in range(low, hi):
            x = binary_encode(n)
            predicted = argmax(net.forward(x))
            actual = argmax(fizz_buzz_encode(n))
            if predicted == actual:
                num_correct += 1

        return num_correct / (hi - low)