Esempio n. 1
0
    print(">>> [Epoch was updated]")
    for b in range(len(dataset.files) // config.batch_size):
        scheduler.optimizer.zero_grad()
        try:
            batch_x, batch_y = dataset.slide_seq2seq_batch(
                config.batch_size, config.max_seq)
            batch_x = torch.from_numpy(batch_x).contiguous().to(
                config.device, non_blocking=True, dtype=torch.int)
            batch_y = torch.from_numpy(batch_y).contiguous().to(
                config.device, non_blocking=True, dtype=torch.int)
        except IndexError:
            continue

        start_time = time.time()
        mt.train()
        sample = mt.forward(batch_x)
        metrics = metric_set(sample, batch_y)
        loss = metrics['loss']
        loss.backward()
        scheduler.step()
        end_time = time.time()

        if config.debug:
            print("[Loss]: {}".format(loss))

        train_summary_writer.add_scalar('loss',
                                        metrics['loss'],
                                        global_step=idx)
        train_summary_writer.add_scalar('accuracy',
                                        metrics['accuracy'],
                                        global_step=idx)
Esempio n. 2
0
    print(">>> [Epoch was updated]")
    for b in range(len(dataset.files) // config.batch_size):
        scheduler.optimizer.zero_grad()
        try:
            batch_x, batch_y = dataset.slide_seq2seq_batch(
                config.batch_size, config.max_seq)
            batch_x = torch.from_numpy(batch_x).contiguous().to(
                config.device, non_blocking=True, dtype=torch.int)
            batch_y = torch.from_numpy(batch_y).contiguous().to(
                config.device, non_blocking=True, dtype=torch.int)
        except IndexError:
            continue

        start_time = time.time()
        mt.train()
        sample, _ = mt.forward(batch_x)
        metrics = metric_set(sample, batch_y)
        loss = metrics['loss']
        with amp.scale_loss(loss, scheduler.optimizer) as scaled_loss:
            scaled_loss.backward()
        scheduler.step()
        end_time = time.time()

        if config.debug:
            print("[Loss]: {}".format(loss))

        train_summary_writer.add_scalar('loss',
                                        metrics['loss'],
                                        global_step=idx)
        train_summary_writer.add_scalar('accuracy',
                                        metrics['accuracy'],