Beispiel #1
0
def setup_gpu(use_gpu: int) -> None:
    """Configure the GPU and log info."""
    if use_gpu >= 0:
        msg.info(f"Using GPU: {use_gpu}")
        require_gpu(use_gpu)
    else:
        msg.info("Using CPU")
        if has_cupy and gpu_is_available():
            msg.info("To switch to GPU 0, use the option: --gpu-id 0")
Beispiel #2
0
def setup_gpu(use_gpu: int, silent=None) -> None:
    """Configure the GPU and log info."""
    if silent is None:
        local_msg = Printer()
    else:
        local_msg = Printer(no_print=silent, pretty=not silent)
    if use_gpu >= 0:
        local_msg.info(f"Using GPU: {use_gpu}")
        require_gpu(use_gpu)
    else:
        local_msg.info("Using CPU")
        if gpu_is_available():
            local_msg.info("To switch to GPU 0, use the option: --gpu-id 0")
Beispiel #3
0
def test_model_gpu():
    pytest.importorskip("ml_datasets")
    import ml_datasets

    ops = "cpu"
    if has_cupy and gpu_is_available():
        ops = "cupy"

    with use_ops(ops):
        n_hidden = 32
        dropout = 0.2
        (train_X, train_Y), (dev_X, dev_Y) = ml_datasets.mnist()
        model = chain(
            Relu(nO=n_hidden, dropout=dropout),
            Relu(nO=n_hidden, dropout=dropout),
            Softmax(),
        )
        # make sure the data is on the right device
        train_X = model.ops.asarray(train_X)
        train_Y = model.ops.asarray(train_Y)
        dev_X = model.ops.asarray(dev_X)
        dev_Y = model.ops.asarray(dev_Y)

        model.initialize(X=train_X[:5], Y=train_Y[:5])
        optimizer = Adam(0.001)
        batch_size = 128

        for i in range(2):
            batches = model.ops.multibatch(batch_size,
                                           train_X,
                                           train_Y,
                                           shuffle=True)
            for X, Y in batches:
                Yh, backprop = model.begin_update(X)
                backprop(Yh - Y)
                model.finish_update(optimizer)
            # Evaluate and print progress
            correct = 0
            total = 0
            for X, Y in model.ops.multibatch(batch_size, dev_X, dev_Y):
                Yh = model.predict(X)
                correct += (Yh.argmax(axis=1) == Y.argmax(axis=1)).sum()
                total += Yh.shape[0]