コード例 #1
0
    def _test(self):
        self.encoder.eval()

        with torch.no_grad():
            macro_f1s = []
            test_losses = []
            for input_tensor, target_tensor in logger.iterate(
                    "Test", self.test_loader):
                encoder_hidden = self.encoder.init_hidden(
                    self.device).double().to(self.device)

                input_tensor = input_tensor.to(self.device).unsqueeze(1)
                target_tensor = target_tensor.to(self.device).double()

                encoder_output, encoder_hidden = self.encoder(
                    input_tensor, encoder_hidden)

                test_loss = self.loss(encoder_output, target_tensor)

                macro_f1 = f1_score(
                    y_true=target_tensor.cpu().detach().numpy().ravel(),
                    y_pred=encoder_output.cpu().detach().to(
                        torch.int32).numpy().ravel(),
                    average='macro')

                test_losses.append(test_loss)
                macro_f1s.append(macro_f1)

            logger.store(test_loss=np.mean(test_losses))
            logger.store(accuracy=np.mean(macro_f1s))
            logger.write()
コード例 #2
0
ファイル: a_logger.py プロジェクト: ZENGXH/lab
def loop():
    logger.info(a=2, b=1)

    logger.add_indicator('loss_ma', IndicatorType.queue,
                         IndicatorOptions(queue_size=10))
    for i in range(10):
        logger.add_global_step(1)
        logger.store(loss=100 / (i + 1), loss_ma=100 / (i + 1))
        logger.write()
        if (i + 1) % 2 == 0:
            logger.new_line()

        time.sleep(2)
コード例 #3
0
ファイル: cifar.py プロジェクト: Kaushalya/lab
    def _test(self):
        self.model.eval()
        test_loss = 0
        correct = 0
        with torch.no_grad():
            for data, target in logger.iterate("Test", self.test_loader):
                data, target = data.to(self.device), target.to(self.device)
                output = self.model(data)
                test_loss += F.cross_entropy(output, target, reduction='sum').item()
                pred = output.argmax(dim=1, keepdim=True)
                correct += pred.eq(target.view_as(pred)).sum().item()

        logger.store(test_loss=test_loss / len(self.test_loader.dataset))
        logger.store(accuracy=correct / len(self.test_loader.dataset))
コード例 #4
0
ファイル: mnist_indexed_logs.py プロジェクト: tonyle9/lab
    def _test(self):
        self.model.eval()
        test_loss = 0
        correct = 0
        idx = 0
        with torch.no_grad():
            for data, target in logger.iterate("Test", self.test_loader):
                data, target = data.to(self.device), target.to(self.device)
                output = self.model(data)
                loss = F.nll_loss(output, target, reduction='none')
                indexes = [idx + i for i in range(self.test_loader.batch_size)]
                values = list(loss.cpu().numpy())
                logger.store('test_sample_loss', (indexes, values))

                test_loss += float(np.sum(loss.cpu().numpy()))
                pred = output.argmax(dim=1, keepdim=True)
                values = list(pred.cpu().numpy())
                logger.store('test_sample_pred', (indexes, values))
                correct += pred.eq(target.view_as(pred)).sum().item()

                idx += self.test_loader.batch_size

        # Add test loss and accuracy to logger
        logger.store(test_loss=test_loss / len(self.test_loader.dataset))
        logger.store(accuracy=correct / len(self.test_loader.dataset))
コード例 #5
0
ファイル: cifar.py プロジェクト: Kaushalya/lab
    def _train(self):
        self.model.train()
        for i, (data, target) in logger.enum("Train", self.train_loader):
            data, target = data.to(self.device), target.to(self.device)

            self.optimizer.zero_grad()
            output = self.model(data)
            loss = F.cross_entropy(output, target)
            loss.backward()
            self.optimizer.step()

            logger.store(train_loss=loss)
            logger.add_global_step()

            if i % self.train_log_interval == 0:
                logger.write()
コード例 #6
0
def test(model, device, test_loader):
    with logger.section("Test", total_steps=len(test_loader)):
        model.eval()
        test_loss = 0
        correct = 0
        with torch.no_grad():
            for batch_idx, (data, target) in enumerate(test_loader):
                data, target = data.to(device), target.to(device)
                output = model(data)
                test_loss += F.nll_loss(output, target, reduction='sum').item()
                pred = output.argmax(dim=1, keepdim=True)
                correct += pred.eq(target.view_as(pred)).sum().item()
                logger.progress(batch_idx + 1)

        # Add test loss and accuracy to logger
        logger.store(test_loss=test_loss / len(test_loader.dataset))
        logger.store(accuracy=correct / len(test_loader.dataset))
コード例 #7
0
def test(session: tf.Session, loss_value, accuracy_value, batches):
    with logger.section("Test", total_steps=batches):
        test_loss = 0
        correct = 0
        batch_idx = -1
        while True:
            batch_idx += 1
            try:
                l, a = session.run([loss_value, accuracy_value])
                test_loss += l
                correct += a
            except tf.errors.OutOfRangeError:
                break
            logger.progress(batch_idx + 1)

        logger.store(test_loss=test_loss / batches)
        logger.store(accuracy=correct / batches)
コード例 #8
0
    def _train(self):
        self.model.train()
        for i, (data, target) in logger.enumerator("Train", self.train_loader):
            data, target = data.to(self.device), target.to(self.device)
            self.optimizer.zero_grad()
            output = self.model(data)
            loss = F.nll_loss(output, target)
            loss.backward()
            self.optimizer.step()

            # Add training loss to the logger.
            # The logger will queue the values and output the mean
            logger.store(train_loss=loss.item())
            logger.add_global_step()

            # Print output to the console
            if i % self.train_log_interval == 0:
                # Output the indicators
                logger.write()
コード例 #9
0
def train(args, session: tf.Session, loss_value, train_op, batches, epoch):
    with logger.section("Train", total_steps=batches):
        batch_idx = -1
        while True:
            batch_idx += 1
            try:
                l, _ = session.run([loss_value, train_op])
            except tf.errors.OutOfRangeError:
                break

            # Add training loss to the logger.
            # The logger will queue the values and output the mean
            logger.store(train_loss=l)
            logger.progress(batch_idx + 1)
            logger.set_global_step(epoch * batches + batch_idx)

            # Print output to the console
            if batch_idx % args.log_interval == 0:
                # Output the indicators
                logger.write()
コード例 #10
0
def train(args, model, device, train_loader, optimizer, epoch):
    with logger.section("Train", total_steps=len(train_loader)):
        model.train()
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(device), target.to(device)
            optimizer.zero_grad()
            output = model(data)
            loss = F.nll_loss(output, target)
            loss.backward()
            optimizer.step()

            # Add training loss to the logger.
            # The logger will queue the values and output the mean
            logger.store(train_loss=loss.item())
            logger.progress(batch_idx + 1)
            logger.set_global_step(epoch * len(train_loader) + batch_idx)

            # Print output to the console
            if batch_idx % args.log_interval == 0:
                # Output the indicators
                logger.write()
コード例 #11
0
    def _train(self):
        for i, (input_tensor,
                target_tensor) in logger.enum("Train", self.train_loader):
            encoder_hidden = self.encoder.init_hidden(self.device).double().to(
                self.device)

            input_tensor = input_tensor.to(self.device).unsqueeze(1)
            target_tensor = target_tensor.to(self.device).double()

            self.optimizer.zero_grad()
            encoder_output, encoder_hidden = self.encoder(
                input_tensor, encoder_hidden)

            train_loss = self.loss(encoder_output, target_tensor)

            train_loss.backward()
            self.optimizer.step()

            logger.store(loss=train_loss.item())
            logger.add_global_step()
            logger.write()
コード例 #12
0
    def _train(self):
        for i, (images, _) in logger.enum("Train", self.train_loader):
            targets_real = torch.empty(images.size(0), 1, device=self.device).uniform_(0.8, 1.0)
            targets_fake = torch.empty(images.size(0), 1, device=self.device).uniform_(0.0, 0.2)

            images = images.to(self.device)

            self.optimizer_D.zero_grad()
            logits_real = self.discriminator(images)
            fake_images = self.generator(
                noise(self.device, self.batch_size, self.noise_dim)).detach()
            logits_fake = self.discriminator(fake_images)
            discriminator_loss = DLoss(logits_real, logits_fake, targets_real, targets_fake)
            discriminator_loss.backward()
            self.optimizer_D.step()

            self.optimizer_G.zero_grad()
            fake_images = self.generator(noise(self.device, self.batch_size, self.noise_dim))
            logits_fake = self.discriminator(fake_images)
            generator_loss = GLoss(logits_fake, targets_real)
            generator_loss.backward()
            self.optimizer_G.step()

            logger.store(G_Loss=generator_loss.item())
            logger.store(D_Loss=discriminator_loss.item())
            logger.add_global_step()

        for j in range(1, 10):
            img = fake_images[j].squeeze()
            logger.store('generated', img)
コード例 #13
0
def main():
    args = parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    # Loading data
    with logger.section("Loading data"):
        train_loader = torch.utils.data.DataLoader(datasets.MNIST(
            './data',
            train=True,
            download=True,
            transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307, ), (0.3081, ))
            ])),
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   **kwargs)
        test_loader = torch.utils.data.DataLoader(
            datasets.MNIST('./data',
                           train=False,
                           transform=transforms.Compose([
                               transforms.ToTensor(),
                               transforms.Normalize((0.1307, ), (0.3081, ))
                           ])),
            batch_size=args.test_batch_size,
            shuffle=True,
            **kwargs)

    # Model creation
    with logger.section("Create model"):
        model = Net().to(device)
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr,
                              momentum=args.momentum)

    # Specify indicators
    logger.add_indicator("train_loss", queue_limit=10, is_print=True)
    logger.add_indicator("test_loss", is_histogram=False, is_print=True)
    logger.add_indicator("accuracy", is_histogram=False, is_print=True)
    for name, param in model.named_parameters():
        if param.requires_grad:
            logger.add_indicator(name, is_histogram=True, is_print=False)
            logger.add_indicator(f"{name}_grad",
                                 is_histogram=True,
                                 is_print=False)

    # Start the experiment
    EXPERIMENT.start_train()

    # Loop through the monitored iterator
    for epoch in logger.loop(range(0, args.epochs)):
        # Delayed keyboard interrupt handling to use
        # keyboard interrupts to end the loop.
        # This will capture interrupts and finish
        # the loop at the end of processing the iteration;
        # i.e. the loop won't stop in the middle of an epoch.
        try:
            with logger.delayed_keyboard_interrupt():

                # Training and testing
                train(args, model, device, train_loader, optimizer, epoch)
                test(model, device, test_loader)

                # Add histograms with model parameter values and gradients
                for name, param in model.named_parameters():
                    if param.requires_grad:
                        logger.store(name, param.data.cpu().numpy())
                        logger.store(f"{name}_grad", param.grad.cpu().numpy())

                # Clear line and output to console
                logger.write()

                # Output the progress summaries to `trial.yaml` and
                # to the python file header
                logger.save_progress()

                # Clear line and go to the next line;
                # that is, we add a new line to the output
                # at the end of each epoch
                logger.new_line()

        # Handled delayed interrupt
        except KeyboardInterrupt:
            logger.finish_loop()
            logger.new_line()
            logger.log("\nKilling loop...")
            break
コード例 #14
0
ファイル: pytorch.py プロジェクト: Kaushalya/lab
def store_model_indicators(model: torch.nn.Module, model_name: str = "model"):
    for name, param in model.named_parameters():
        if param.requires_grad:
            logger.store(f"{model_name}.{name}", param)
            logger.store(f"{model_name}.{name}.grad", param.grad)
コード例 #15
0
ファイル: getting_started.py プロジェクト: nmasnadithya/labml
    for global_step in logger.loop(range(50)):
        # You can set the global step explicitly with
        # 'logger.set_global_step(global_step)'

        # Handle Keyboard Interrupts
        try:
            with logger.delayed_keyboard_interrupt():
                # A sample monitored section inside iterator
                with logger.section("sample"):
                    time.sleep(0.5)

                # A silent section is used only to organize code.
                # It produces no output
                with logger.section("logging", is_silent=True):
                    # Store values
                    logger.store(reward=global_step / 3.0, fps=12)
                    # Store more values
                    for i in range(global_step, global_step + 10):
                        logger.store('loss', i)
                        logger.store(advantage_reward=(i, i * 2))

                # Another silent section
                with logger.section("process_samples", is_silent=True):
                    time.sleep(0.5)

                # A third section with an inner loop
                with logger.section("train", total_steps=100):
                    # Let it run for multiple iterations.
                    # We'll track the progress of that too
                    for i in range(100):
                        time.sleep(0.01)