Exemplo n.º 1
0
def main():
    conf = {'batch_size': 20}

    with experiment.record(name='sample',
                           exp_conf=conf,
                           writers={'web_api', 'screen'}):
        for i in range(10_000):
            values = {'loss': random()}
            # if i > 1000:
            #     raise RuntimeError('Testing error')
            # for j in range(0, 100):
            #     values[f'grad.fc.{j}.l1'] = random()
            #     values[f'grad.fc.{j}.l2'] = random()
            #     values[f'grad.fc.{j}.mean'] = random()
            #
            #     # values[f'param.fc.{j}.l1'] = random()
            #     # values[f'param.fc.{j}.l2'] = random()
            #     # values[f'param.fc.{j}.mean'] = random()
            #     #
            #     # values[f'module.fc.{j}.l1'] = random()
            #     # values[f'module.fc.{j}.l2'] = random()
            #     # values[f'module.fc.{j}.mean'] = random()
            #     #
            #     # values[f'time.fc.{j}.l1'] = random()
            #     # values[f'time.fc.{j}.l2'] = random()
            #     # values[f'time.fc.{j}.mean'] = random()
            tracker.save(i, values)

            if i % 1000 == 0:
                tracker.new_line()
Exemplo n.º 2
0
def main():
    conf = {'batch_size': 20}

    for i in range(2):
        with experiment.record(name=f'sample_{i}',
                               exp_conf=conf,
                               writers={'screen'}):
            for epoch in range(100):
                tracker.save(i, loss=random())
            tracker.new_line()
Exemplo n.º 3
0
def repeat_values():
    conf = {'batch_size': 20}

    with experiment.record(name='sample',
                           exp_conf=conf,
                           writers={'web_api', 'screen'}):
        for i in range(10):
            tracker.add_global_step(1)
            tracker.save('loss', 1)
            tracker.save('loss', 5)
            # tracker.save()

            if i % 1000 == 0:
                tracker.new_line()
Exemplo n.º 4
0
def main():
    # Init our model
    mnist_model = MNISTModel()

    # Init DataLoader from MNIST Dataset
    train_ds = MNIST(str(lab.get_data_path()), train=True, download=True, transform=transforms.ToTensor())
    train_loader = DataLoader(train_ds, batch_size=32)

    # Initialize a trainer
    trainer = pl.Trainer(gpus=1, max_epochs=3, progress_bar_refresh_rate=20, logger=LabMLLightningLogger())

    # Train the model ⚡
    with experiment.record(name='mnist_lightening', disable_screen=True):
        trainer.fit(mnist_model, train_loader)
Exemplo n.º 5
0
def main():
    conf = {'batch_size': 20}

    with experiment.record(name='sample', exp_conf=conf, writers={'web_api'}):
        for i in range(10000000):
            values = {'loss': random()}
            continue
            for j in range(0, 100):
                values[f'grad.fc.{j}.l1'] = random()
                values[f'grad.fc.{j}.l2'] = random()
                values[f'grad.fc.{j}.mean'] = random()

                # values[f'param.fc.{j}.l1'] = random()
                # values[f'param.fc.{j}.l2'] = random()
                # values[f'param.fc.{j}.mean'] = random()
                #
                # values[f'module.fc.{j}.l1'] = random()
                # values[f'module.fc.{j}.l2'] = random()
                # values[f'module.fc.{j}.mean'] = random()
                #
                # values[f'time.fc.{j}.l1'] = random()
                # values[f'time.fc.{j}.l2'] = random()
                # values[f'time.fc.{j}.mean'] = random()
            tracker.save(i, values)
Exemplo n.º 6
0
import time

from numpy.random import random

from labml import tracker, experiment

conf = {'batch_size': 20}


def train(n: int):
    return 0.999**n + random() / 10, 1 - .999**n + random() / 10


with experiment.record(name='sample', exp_conf=conf):
    for i in range(100000):
        time.sleep(0.2)
        loss, accuracy = train(i)
        tracker.save(i, {'loss': loss, 'accuracy': accuracy})
Exemplo n.º 7
0
Arquivo: amsgrad.py Projeto: wx-b/nn
def _synthetic_experiment(is_adam: bool):
    """
    ## Synthetic Experiment

    This is the synthetic experiment described in the paper,
    that shows a scenario where *Adam* fails.

    The paper (and Adam) formulates the problem of optimizing as
    minimizing the expected value of a function, $\mathbb{E}[f(\theta)]$
    with respect to the parameters $\theta$.
    In the stochastic training setting we do not get hold of the function $f$
    it self; that is,
    when you are optimizing a NN $f$ would be the function on  entire
    batch of data.
    What we actually evaluate is a mini-batch so the actual function is
    realization of the stochastic $f$.
    This is why we are talking about an expected value.
    So let the function realizations be $f_1, f_2, ..., f_T$ for each time step
    of training.

    We measure the performance of the optimizer as the regret,
    $$R(T) = \sum_{t=1}^T \big[ f_t(\theta_t) - f_t(\theta^*) \big]$$
    where $theta_t$ is the parameters at time step $t$, and  $\theta^*$ is the
    optimal parameters that minimize $\mathbb{E}[f(\theta)]$.

    Now lets define the synthetic problem,
    \begin{align}
    f_t(x) =
    \begin{cases}
    1010 x,  & \text{for $t \mod 101 = 1$} \\
    -10  x, & \text{otherwise}
    \end{cases}
    \end{align}
    where $-1 \le x \le +1$.
    The optimal solution is $x = -1$.

    This code will try running *Adam* and *AMSGrad* on this problem.
    """

    # Define $x$ parameter
    x = nn.Parameter(torch.tensor([.0]))
    # Optimal, $x^* = -1$
    x_star = nn.Parameter(torch.tensor([-1]), requires_grad=False)

    def func(t: int, x_: nn.Parameter):
        """
        ### $f_t(x)$
        """
        if t % 101 == 1:
            return (1010 * x_).sum()
        else:
            return (-10 * x_).sum()

    # Initialize the relevant optimizer
    if is_adam:
        optimizer = Adam([x], lr=1e-2, betas=(0.9, 0.99))
    else:
        optimizer = AMSGrad([x], lr=1e-2, betas=(0.9, 0.99))
    # $R(T)$
    total_regret = 0

    from labml import monit, tracker, experiment

    # Create experiment to record results
    with experiment.record(name='synthetic',
                           comment='Adam' if is_adam else 'AMSGrad'):
        # Run for $10^7$ steps
        for step in monit.loop(10_000_000):
            # $f_t(\theta_t) - f_t(\theta^*)$
            regret = func(step, x) - func(step, x_star)
            # $R(T) = \sum_{t=1}^T \big[ f_t(\theta_t) - f_t(\theta^*) \big]$
            total_regret += regret.item()
            # Track results every 1,000 steps
            if (step + 1) % 1000 == 0:
                tracker.save(loss=regret,
                             x=x,
                             regret=total_regret / (step + 1))
            # Calculate gradients
            regret.backward()
            # Optimize
            optimizer.step()
            # Clear gradients
            optimizer.zero_grad()

            # Make sure $-1 \le x \le +1$
            x.data.clamp_(-1., +1.)
Exemplo n.º 8
0
from labml import tracker, experiment
from numpy.random import random

conf = {'batch_size': 20}

with experiment.record(name='sample', exp_conf=conf, writers={'web_api'}):
    for i in range(10000000):
        values = {'loss': random()}
        for j in range(0, 100):
            values[f'grad.fc.{j}.l1'] = random()
            values[f'grad.fc.{j}.l2'] = random()
            values[f'grad.fc.{j}.mean'] = random()

            # values[f'param.fc.{j}.l1'] = random()
            # values[f'param.fc.{j}.l2'] = random()
            # values[f'param.fc.{j}.mean'] = random()
            #
            # values[f'module.fc.{j}.l1'] = random()
            # values[f'module.fc.{j}.l2'] = random()
            # values[f'module.fc.{j}.mean'] = random()
            #
            # values[f'time.fc.{j}.l1'] = random()
            # values[f'time.fc.{j}.l2'] = random()
            # values[f'time.fc.{j}.mean'] = random()
        tracker.save(i, values)
Exemplo n.º 9
0
from numpy.random import random

from labml import tracker, experiment


def train(i):
    return 0.999**i + random() / 10, 1 - .999**i + random() / 10


conf = {'batch_size': 20}
with experiment.record(name='sample',
                       exp_conf=conf,
                       token='903c84fba8ca49ca9f215922833e08cf'):
    for i in range(10000):
        loss, accuracy = train(i)
        tracker.save(i, {'loss': loss, 'accuracy': accuracy})
Exemplo n.º 10
0
from fastai.vision.all import untar_data, URLs, ImageDataLoaders, get_image_files, Resize, error_rate, resnet34, \
    cnn_learner

from labml import lab, experiment
from labml.utils.fastai import LabMLFastAICallback

path = untar_data(
    URLs.PETS,
    dest=lab.get_data_path(),
    fname=lab.get_data_path() / URLs.path(URLs.PETS).name) / 'images'


def is_cat(x):
    return x[0].isupper()


dls = ImageDataLoaders.from_name_func(path,
                                      get_image_files(path),
                                      valid_pct=0.2,
                                      seed=42,
                                      label_func=is_cat,
                                      item_tfms=Resize(224))
# Train the model ⚡
learn = cnn_learner(dls,
                    resnet34,
                    metrics=error_rate,
                    cbs=LabMLFastAICallback())

with experiment.record(name='pets', exp_conf=learn.labml_configs()):
    learn.fine_tune(5)
Exemplo n.º 11
0
if mode == 'lr_finder':
  trainer.train_dataloader = data_module.train_dataloader
  # Run learning rate finder
  lr_finder = trainer.tuner.lr_find(model, train_loader, min_lr=1e-6, max_lr=100, num_training=500)
  # Plot with
  fig = lr_finder.plot(suggest=True, show=True)
  fig.savefig('lr_finder.png')
  fig.show()
# Pick point based on plot, or get suggestion
  new_lr = lr_finder.suggestion()
  print(f"Suggested LR: {new_lr}")
  exit()

wandb.log(params)
with experiment.record(name=model_name, exp_conf=dict(params), disable_screen=True, token='ae914b4ab3de48eb84b3a4a757c928b9'):
  trainer.fit(model, datamodule=data_module)
try:
  print(f"Best Model path: {checkpoint_callback1.best_model_path} Best Score: {checkpoint_callback1.best_model_score:.4f}")
except:
  pass
chk_path = checkpoint_callback1.best_model_path
model2 = LightningDR.load_from_checkpoint(chk_path, model=base, loss_fn=criterion, optim=optimizer,
 plist=plist, batch_size=batch_size, 
lr_scheduler=lr_reduce_scheduler, cyclic_scheduler=cyclic_scheduler, 
num_class=num_class, target_type=target_type, learning_rate = learning_rate, random_id=random_id)

trainer.test(model=model2, test_dataloaders=test_loader)

# CAM Generation
model2.eval()
Exemplo n.º 12
0
from numpy.random import random

from labml import tracker, experiment


def train(i):
    return 0.999**i + random() / 10, 1 - .999**i + random() / 10


conf = {'batch_size': 20}
with experiment.record(name='sample',
                       exp_conf=conf,
                       token='49d688f6624d468394ca029ecabc8a84'):
    for i in range(10000):
        loss, accuracy = train(i)
        tracker.save(i, {'loss': loss, 'accuracy': accuracy})