コード例 #1
0
from hypothesis.strategies import integers, lists, data, permutations
from .strategies import (
    tensors,
    shaped_tensors,
    assert_close,
    assert_close_tensor,
    small_floats,
)
from minitorch import MathTestVariable

one_arg, two_arg, red_arg = MathTestVariable._tests()

# The tests in this file only run the main mathematical functions.
# The difference is that they run with different tensor ops backends.

TensorBackend = minitorch.make_tensor_backend(minitorch.TensorOps)
FastTensorBackend = minitorch.make_tensor_backend(minitorch.FastOps)
shared = {"fast": FastTensorBackend}

# ## Task 3.1
backend_tests = [pytest.param("fast", marks=pytest.mark.task3_1)]

# ## Task 3.2
matmul_tests = [pytest.param("fast", marks=pytest.mark.task3_2)]

if numba.cuda.is_available():
    # ## Task 3.3
    backend_tests.append(pytest.param("cuda", marks=pytest.mark.task3_3))

    # ## Task 3.4
    matmul_tests.append(pytest.param("cuda", marks=pytest.mark.task3_4))
コード例 #2
0
ファイル: run_sentiment.py プロジェクト: minitorch/Module-4
import minitorch
from datasets import load_dataset
import embeddings
import random

BACKEND = minitorch.make_tensor_backend(minitorch.FastOps)


def RParam(*shape):
    r = 0.1 * (minitorch.rand(shape, backend=BACKEND) - 0.5)
    return minitorch.Parameter(r)


class Linear(minitorch.Module):
    def __init__(self, in_size, out_size):
        super().__init__()
        self.weights = RParam(in_size, out_size)
        self.bias = RParam(out_size)
        self.out_size = out_size

    def forward(self, x):
        batch, in_size = x.shape
        return (x.view(batch, in_size) @ self.weights.value.view(
            in_size, self.out_size)).view(batch,
                                          self.out_size) + self.bias.value


class Conv1d(minitorch.Module):
    def __init__(self, in_channels, out_channels, kernel_width):
        super().__init__()
        self.weights = RParam(out_channels, in_channels, kernel_width)
コード例 #3
0
import minitorch
import datasets
import numba
import random

FastTensorBackend = minitorch.make_tensor_backend(minitorch.FastOps)
if numba.cuda.is_available():
    GPUBackend = minitorch.make_tensor_backend(minitorch.CudaOps, is_cuda=True)


def default_log_fn(epoch, total_loss, correct, losses):
    print("Epoch ", epoch, " loss ", total_loss, "correct", correct)


def RParam(*shape, backend):
    r = minitorch.rand(shape, backend=backend) - 0.5
    return minitorch.Parameter(r)


class Network(minitorch.Module):
    def __init__(self, hidden, backend):
        super().__init__()

        # Submodules
        self.layer1 = Linear(2, hidden, backend)
        self.layer2 = Linear(hidden, hidden, backend)
        self.layer3 = Linear(hidden, 1, backend)

    def forward(self, x):
        raise NotImplementedError(
            'Need to include this file from past assignment.')