コード例 #1
0
def train(model, optim, steps, BS=128, gpu=False):
    if gpu is True: [x.cuda_() for x in get_parameters([model, optim])]
    losses, accuracies = [], []
    for i in (t := trange(steps, disable=os.getenv('CI') is not None)):
        samp = np.random.randint(0, X_train.shape[0], size=(BS))

        x = Tensor(X_train[samp].reshape((-1, 28 * 28)).astype(np.float32),
                   gpu=gpu)
        Y = Y_train[samp]
        y = np.zeros((len(samp), 10), np.float32)
        # correct loss for NLL, torch NLL loss returns one per row
        y[range(y.shape[0]), Y] = -10.0
        y = Tensor(y, gpu=gpu)

        # network
        out = model.forward(x)

        # NLL loss function
        loss = out.mul(y).mean()
        optim.zero_grad()
        loss.backward()
        optim.step()

        cat = np.argmax(out.cpu().data, axis=1)
        accuracy = (cat == Y).mean()

        # printing
        loss = loss.cpu().data
        losses.append(loss)
        accuracies.append(accuracy)
        t.set_description("loss %.2f accuracy %.2f" % (loss, accuracy))
コード例 #2
0
ファイル: test_mnist.py プロジェクト: EmersonMax139/DumbTorch
def train(model, optim, steps, BS=128):
  losses, accuracies = [], []
  for i in (t := trange(steps)):
    samp = np.random.randint(0, X_train.shape[0], size=(BS))
    
    x = Tensor(X_train[samp].reshape((-1, 28*28)).astype(np.float32))
    Y = Y_train[samp]
    y = np.zeros((len(samp),10), np.float32)
    # correct loss for NLL, torch NLL loss returns one per row
    y[range(y.shape[0]),Y] = -10.0
    y = Tensor(y)
    
    # network
    out = model.forward(x)

    # NLL loss function
    loss = out.mul(y).mean()
    loss.backward()
    optim.step()
    
    cat = np.argmax(out.data, axis=1)
    accuracy = (cat == Y).mean()
    
    # printing
    loss = loss.data
    losses.append(loss)
    accuracies.append(accuracy)
    t.set_description("loss %.2f accuracy %.2f" % (loss, accuracy))