Esempio n. 1
0
    def fit(train_mask, train_y):
        n2v = self.n2v
        svm = self.svm

        n2v.train()
        optimizer = optim.Adam(n2v.parameters(),
                               lr=LR,
                               weight_decay=WEIGHT_DECAY)
        nodes = range(len(X))
        subsample_size = 100

        iterable = tqdm(range(5000))
        loss_avg = deque(maxlen=100)
        for i in iterable:
            subsample = random.sample(nodes, subsample_size)
            subsample = torch.tensor(subsample)

            loss = n2v.loss(A, subsample)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            loss_avg.append(loss)
            tqdm.set_description(iterable,
                                 desc='Loss: %.4f' % (loss),
                                 refresh=True)

        train_X = np.where(train_mask)[0]
        train_X = torch.tensor(train_X, dtype=int)

        n2v.eval()
        with torch.no_grad():
            Z = n2v.forward(train_X).numpy()
        svm.fit(Z, train_y)
        print('Finished Training SVM.')
Esempio n. 2
0
def train(X, A, train_y, train_mask, val_y, val_mask):
    model = GCN(in_feats=X.shape[1])
    model.cuda()
    X, A = X.cuda(), A.cuda()

    train_y = torch.tensor(train_y, dtype=torch.float32)
    val_y = torch.tensor(val_y, dtype=torch.float32)

    optimizer = optim.Adam(model.parameters(),
                           lr=LR,
                           weight_decay=WEIGHT_DECAY)
    loss_fnc = nn.BCEWithLogitsLoss()

    val_acc, val_steps = 0, 250
    train_acc = 0
    iterable = tqdm(range(5000))
    for i in iterable:
        model.train()
        logits = model(X, A)

        idxs = torch.tensor(np.where(train_mask)[0])
        positive_idxs = idxs[train_y == 1]
        negative_idxs = idxs[train_y == 0][:len(positive_idxs)]

        positives = train_y[train_y == 1]
        negatives = train_y[train_y == 0][:len(positives)]

        #        idxs = range(len(negative_idxs))
        #        sample_idx = random.sample(idxs, len(positive_idxs))
        #        negative_idxs = negative_idxs[sample_idx]
        #        negatives = negatives[sample_idx]

        loss_pos = loss_fnc(logits[positive_idxs].squeeze(), positives.cuda())
        loss_neg = loss_fnc(logits[negative_idxs].squeeze(), negatives.cuda())
        loss = loss_pos + loss_neg

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if i % val_steps == 0:
            train_acc = evaluate(model, X, A, train_y.cuda(), train_mask)
            val_acc = evaluate(model, X, A, val_y.cuda(), val_mask)
        tqdm.set_description(
            iterable,
            desc='Loss: %.4f. Train Accuracy %.4f. Validation Accuracy: %.4f' %
            (loss, train_acc, val_acc),
            refresh=True)
    return model
Esempio n. 3
0
    def evaluate_with_error_rates(self, iterator, tqdm):
        all_orig = []
        all_predicted = []
        results = {}
        self.diacritizer.set_model(self.model)
        evaluated_batches = 0
        tqdm.set_description(f"Calculating DER/WER {self.global_step}: ")
        for batch in iterator:
            if evaluated_batches > int(self.config["error_rates_n_batches"]):
                break

            predicted = self.diacritizer.diacritize_batch(batch)
            all_predicted += predicted
            all_orig += batch["original"]
            tqdm.update()

        summary_texts = []
        orig_path = os.path.join(self.config_manager.prediction_dir,
                                 f"original.txt")
        predicted_path = os.path.join(self.config_manager.prediction_dir,
                                      f"predicted.txt")

        with open(orig_path, "w", encoding="utf8") as file:
            for sentence in all_orig:
                file.write(f"{sentence}\n")

        with open(predicted_path, "w", encoding="utf8") as file:
            for sentence in all_predicted:
                file.write(f"{sentence}\n")

        for i in range(int(self.config["n_predicted_text_tensorboard"])):
            if i > len(all_predicted):
                break

            summary_texts.append(
                (f"eval-text/{i}", f"{ all_orig[i]} |->  {all_predicted[i]}"))

        results["DER"] = der.calculate_der_from_path(orig_path, predicted_path)
        results["DER*"] = der.calculate_der_from_path(orig_path,
                                                      predicted_path,
                                                      case_ending=False)
        results["WER"] = wer.calculate_wer_from_path(orig_path, predicted_path)
        results["WER*"] = wer.calculate_wer_from_path(orig_path,
                                                      predicted_path,
                                                      case_ending=False)
        tqdm.reset()
        return results, summary_texts
Esempio n. 4
0
    def evaluate(self, iterator, tqdm, use_target=True):
        epoch_loss = 0
        epoch_acc = 0
        self.model.eval()
        tqdm.set_description(f"Eval: {self.global_step}")
        with torch.no_grad():
            for batch_inputs in iterator:
                batch_inputs["src"] = batch_inputs["src"].to(self.device)
                batch_inputs["lengths"] = batch_inputs["lengths"].to("cpu")
                if use_target:
                    batch_inputs["target"] = batch_inputs["target"].to(
                        self.device)
                else:
                    batch_inputs["target"] = None

                outputs = self.model(
                    src=batch_inputs["src"],
                    target=batch_inputs["target"],
                    lengths=batch_inputs["lengths"],
                )

                predictions = outputs["diacritics"]

                predictions = predictions.view(-1, predictions.shape[-1])
                targets = batch_inputs["target"]
                targets = targets.view(-1)
                loss = self.criterion(predictions, targets.to(self.device))
                acc = categorical_accuracy(predictions,
                                           targets.to(self.device),
                                           self.pad_idx)
                epoch_loss += loss.item()
                epoch_acc += acc.item()
                tqdm.update()

        tqdm.reset()
        return epoch_loss / len(iterator), epoch_acc / len(iterator)
Esempio n. 5
0
def train(
    params,
    X,
    A,
    edge_weights,
    train_y,
    train_idx,
    val_y,
    val_idx,
    save_best_only=True,
    savepath='',
):

    epochs = 1000

    model = GAT(in_feats=X.shape[1], **params)
    model.to(DEVICE)
    X = X.to(DEVICE)
    A = A.to(DEVICE)
    train_y = train_y.to(DEVICE)
    val_y = val_y.to(DEVICE)
    if edge_weights is not None:
        edge_weights = edge_weights.to(DEVICE)

    optimizer = optim.Adam(model.parameters(),
                           lr=params['lr'],
                           weight_decay=params['weight_decay'])
    loss_fnc = tools.Loss(train_y, train_idx)
    val_loss_fnc = tools.Loss(val_y, val_idx)

    iterable = tqdm(range(epochs))
    for i in iterable:
        model.train()
        logits = model(X, A, edge_weights=edge_weights)

        optimizer.zero_grad()
        loss = loss_fnc(logits)
        loss.backward()
        optimizer.step()

        logits = logits.detach()
        val_loss = val_loss_fnc(logits)
        train_auc = evalAUC(None, 0, 0, train_y, 0, logits[train_idx])
        val_auc = evalAUC(None, 0, 0, val_y, 0, logits[val_idx])

        tqdm.set_description(
            iterable,
            desc=
            'Loss: %.4f ; Val Loss %.4f ; Train AUC %.4f. Validation AUC: %.4f'
            % (loss, val_loss, train_auc, val_auc))

    score = evalAUC(model, X, A, val_y, val_idx)
    print(f'Last validation AUC: {val_auc}')

    if savepath:
        save = {
            'auc': score,
            'model_params': params,
            'model_state_dict': model.state_dict()
        }
        torch.save(save, savepath)

    return model
Esempio n. 6
0
    def run(self):
        scaler = torch.cuda.amp.GradScaler()
        train_iterator, _, validation_iterator = load_iterators(
            self.config_manager)
        print("data loaded")
        print("----------------------------------------------------------")
        tqdm_eval = trange(0, len(validation_iterator), leave=True)
        tqdm_error_rates = trange(0, len(validation_iterator), leave=True)
        tqdm_eval.set_description("Eval")
        tqdm_error_rates.set_description("WER/DER : ")
        tqdm = trange(self.global_step,
                      self.config["max_steps"] + 1,
                      leave=True)

        for batch_inputs in repeater(train_iterator):
            tqdm.set_description(f"Global Step {self.global_step}")
            if self.config["use_decay"]:
                self.lr = self.adjust_learning_rate(
                    self.optimizer, global_step=self.global_step)
            self.optimizer.zero_grad()
            if self.device == "cuda" and self.config["use_mixed_precision"]:
                with autocast():
                    step_results = self.run_one_step(batch_inputs)
                    scaler.scale(step_results["loss"]).backward()
                    scaler.unscale_(self.optimizer)
                    if self.config.get("CLIP"):
                        torch.nn.utils.clip_grad_norm_(self.model.parameters(),
                                                       self.config["CLIP"])

                    scaler.step(self.optimizer)

                    scaler.update()
            else:
                step_results = self.run_one_step(batch_inputs)

                loss = step_results["loss"]
                loss.backward()
                if self.config.get("CLIP"):
                    torch.nn.utils.clip_grad_norm_(self.model.parameters(),
                                                   self.config["CLIP"])
                self.optimizer.step()

            self.losses.append(step_results["loss"].item())

            self.print_losses(step_results, tqdm)

            self.summary_manager.add_scalar("meta/learning_rate",
                                            self.lr,
                                            global_step=self.global_step)

            if self.global_step % self.config["model_save_frequency"] == 0:
                torch.save(
                    {
                        "global_step": self.global_step,
                        "model_state_dict": self.model.state_dict(),
                        "optimizer_state_dict": self.optimizer.state_dict(),
                    },
                    os.path.join(
                        self.config_manager.models_dir,
                        f"{self.global_step}-snapshot.pt",
                    ),
                )

            if self.global_step % self.config["evaluate_frequency"] == 0:
                loss, acc = self.evaluate(validation_iterator, tqdm_eval)
                self.summary_manager.add_scalar("evaluate/loss",
                                                loss,
                                                global_step=self.global_step)
                self.summary_manager.add_scalar("evaluate/acc",
                                                acc,
                                                global_step=self.global_step)
                tqdm.display(
                    f"Evaluate {self.global_step}: accuracy, {acc}, loss: {loss}",
                    pos=8)
                self.model.train()

            if (self.global_step %
                    self.config["evaluate_with_error_rates_frequency"] == 0):
                error_rates, summery_texts = self.evaluate_with_error_rates(
                    validation_iterator, tqdm_error_rates)
                if error_rates:
                    WER = error_rates["WER"]
                    DER = error_rates["DER"]
                    DER1 = error_rates["DER*"]
                    WER1 = error_rates["WER*"]

                    self.summary_manager.add_scalar(
                        "error_rates/WER",
                        WER / 100,
                        global_step=self.global_step,
                    )
                    self.summary_manager.add_scalar(
                        "error_rates/DER",
                        DER / 100,
                        global_step=self.global_step,
                    )
                    self.summary_manager.add_scalar(
                        "error_rates/DER*",
                        DER1 / 100,
                        global_step=self.global_step,
                    )
                    self.summary_manager.add_scalar(
                        "error_rates/WER*",
                        WER1 / 100,
                        global_step=self.global_step,
                    )

                    error_rates = f"DER: {DER}, WER: {WER}, DER*: {DER1}, WER*: {WER1}"
                    tqdm.display(f"WER/DER {self.global_step}: {error_rates}",
                                 pos=9)

                    for tag, text in summery_texts:
                        self.summary_manager.add_text(tag, text)

                self.model.train()

            if self.global_step % self.config["train_plotting_frequency"] == 0:
                self.plot_attention(step_results)

            self.report(step_results, tqdm)

            self.global_step += 1
            if self.global_step > self.config["max_steps"]:
                print("Training Done.")
                return

            tqdm.update()
def train_tom(opt, train_loader, model, board):
    device = torch.device("cuda:0")
    model.to(device)
    #model.cuda()
    model.train()

    # criterion
    criterionL1 = nn.L1Loss()
    criterionVGG = VGGLoss()
    criterionMask = nn.L1Loss()

    # optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=opt.lr,
                                 betas=(0.5, 0.999))
    scheduler = torch.optim.lr_scheduler.LambdaLR(
        optimizer,
        lr_lambda=lambda step: 1.0 - max(0, step - opt.keep_step) / float(
            opt.decay_step + 1),
    )

    pbar = tqdm(range(opt.keep_step + opt.decay_step))
    for step in pbar:
        inputs = train_loader.next_batch()

        im = inputs["image"].to(device)  #.cuda()
        im_pose = inputs["pose_image"]
        im_h = inputs["head"]
        shape = inputs["shape"]

        agnostic = inputs["agnostic"].to(device)  # .cuda()
        c = inputs["cloth"].to(device)  #.cuda()
        cm = inputs["cloth_mask"].to(device)  #.cuda()

        concat_tensor = torch.cat([agnostic, c], 1)
        concat_tensor = concat_tensor.to(device)

        outputs = model(concat_tensor)
        p_rendered, m_composite = torch.split(outputs, 3, 1)
        p_rendered = F.tanh(p_rendered)
        m_composite = F.sigmoid(m_composite)
        p_tryon = c * m_composite + p_rendered * (1 - m_composite)

        visuals = [
            [im_h, shape, im_pose],
            [c, cm * 2 - 1, m_composite * 2 - 1],
            [p_rendered, p_tryon, im],
        ]

        loss_l1 = criterionL1(p_tryon, im)
        loss_vgg = criterionVGG(p_tryon, im)
        loss_mask = criterionMask(m_composite, cm)
        loss = loss_l1 + loss_vgg + loss_mask
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        tqdm.set_description(
            f"loss: {loss.item():.4f}, l1: {loss_l1.item():.4f}, vgg: {loss_vgg.item():.4f}, mask: {loss_mask.item():.4f}",
        )
        if board and (step + 1) % opt.display_count == 0:
            board_add_images(board, "combine", visuals, step + 1)
            board.add_scalar("metric", loss.item(), step + 1)
            board.add_scalar("L1", loss_l1.item(), step + 1)
            board.add_scalar("VGG", loss_vgg.item(), step + 1)
            board.add_scalar("MaskL1", loss_mask.item(), step + 1)
            print(
                f"step: {step + 1:8d}, loss: {loss.item():.4f}, l1: {loss_l1.item():.4f}, vgg: {loss_vgg.item():.4f}, mask: {loss_mask.item():.4f}",
                flush=True,
            )

        if (step + 1) % opt.save_count == 0:
            save_checkpoint(
                model,
                os.path.join(opt.checkpoint_dir, opt.name,
                             "step_%06d.pth" % (step + 1)),
            )
Esempio n. 8
0
def tprint_rank_0(tqdm, message):
    if torch.distributed.is_initialized():
        if torch.distributed.get_rank() == 0:
            tqdm.set_description(message)
    else:
        tqdm.set_description(message)