Beispiel #1
0
def main(args):

    data_config = load_config_from_json(args.data_config_path)
    model_config = load_config_from_json(
        os.path.join(args.saved_model_path, "config.jsonl"))

    # initialize model
    model = SFNet(model_config["sfnet"])
    model = model.to(device)

    if not os.path.exists(args.saved_model_path):
        raise FileNotFoundError(args.saved_model_path)

    checkpoint = os.path.join(args.saved_model_path, args.checkpoint)
    model.load_state_dict(torch.load(checkpoint, map_location="cpu"))
    print("Model loaded from %s" % (args.saved_model_path))

    # tracker to keep true labels and predicted probabilitites
    target_tracker = []
    pred_tracker = []

    print("Preparing test data ...")
    dataset = ModCloth(data_config, split="test")
    data_loader = DataLoader(
        dataset=dataset,
        batch_size=model_config["trainer"]["batch_size"],
        shuffle=False,
    )

    print("Evaluating model on test data ...")
    model.eval()
    with torch.no_grad():

        for iteration, batch in enumerate(data_loader):

            for k, v in batch.items():
                if torch.is_tensor(v):
                    batch[k] = to_var(v)

            # Forward pass
            _, pred_probs = model(batch)

            target_tracker.append(batch["fit"].cpu().numpy())
            pred_tracker.append(pred_probs.cpu().data.numpy())

    target_tracker = np.stack(target_tracker[:-1]).reshape(-1)
    pred_tracker = np.stack(pred_tracker[:-1], axis=0).reshape(
        -1, model_config["sfnet"]["num_targets"])
    precision, recall, f1_score, accuracy, auc = compute_metrics(
        target_tracker, pred_tracker)

    print("-" * 50)
    print(
        "Metrics:\n Precision = {:.3f}\n Recall = {:.3f}\n F1-score = {:.3f}\n Accuracy = {:.3f}\n AUC = {:.3f}\n "
        .format(precision, recall, f1_score, accuracy, auc))
    print("-" * 50)
Beispiel #2
0
print("Instantiate dataloader")
test_dataset = PF_Pascal(args.test_csv_path, args.test_image_path,
                         args.feature_h, args.feature_w, args.eval_type)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=1,
                                          shuffle=False,
                                          num_workers=args.num_workers)

# Instantiate model
print("Instantiate model")
net = SFNet(args.feature_h,
            args.feature_w,
            beta=args.beta,
            kernel_sigma=args.kernel_sigma)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net.to(device)

# Load weights
print("Load pre-trained weights")
best_weights = torch.load("./weights/best_checkpoint.pt")
adap3_dict = best_weights['state_dict1']
adap4_dict = best_weights['state_dict2']
net.adap_layer_feat3.load_state_dict(adap3_dict, strict=False)
net.adap_layer_feat4.load_state_dict(adap4_dict, strict=False)


# PCK metric from 'https://github.com/ignacio-rocco/weakalign/blob/master/util/eval_util.py'
def correct_keypoints(source_points, warped_points, L_pck, alpha=0.1):
    # compute correct keypoints
    p_src = source_points[0, :]
    p_wrp = warped_points[0, :]
Beispiel #3
0
def main(args):

    ts = time.strftime("%Y-%b-%d-%H-%M-%S", time.gmtime())

    data_config = load_config_from_json(args.data_config_path)
    model_config = load_config_from_json(args.model_config_path)

    splits = ["train", "valid"]

    datasets = OrderedDict()
    for split in splits:
        datasets[split] = ModCloth(data_config, split=split)

    # initialize model
    model = SFNet(model_config["sfnet"])
    model = model.to(device)

    print("-" * 50)
    print(model)
    print("-" * 50)
    print("Number of model parameters: {}".format(
        sum(p.numel() for p in model.parameters())))
    print("-" * 50)

    save_model_path = os.path.join(
        model_config["logging"]["save_model_path"],
        model_config["logging"]["run_name"] + ts,
    )
    os.makedirs(save_model_path)

    if model_config["logging"]["tensorboard"]:
        writer = SummaryWriter(os.path.join(save_model_path, "logs"))
        writer.add_text("model", str(model))
        writer.add_text("args", str(args))

    loss_criterion = torch.nn.CrossEntropyLoss(reduction="mean")

    optimizer = torch.optim.Adam(
        model.parameters(),
        lr=model_config["trainer"]["optimizer"]["lr"],
        weight_decay=model_config["trainer"]["optimizer"]["weight_decay"],
    )

    step = 0
    tensor = torch.cuda.FloatTensor if torch.cuda.is_available(
    ) else torch.Tensor

    for epoch in range(model_config["trainer"]["num_epochs"]):

        for split in splits:

            data_loader = DataLoader(
                dataset=datasets[split],
                batch_size=model_config["trainer"]["batch_size"],
                shuffle=split == "train",
            )

            loss_tracker = defaultdict(tensor)

            # Enable/Disable Dropout
            if split == "train":
                model.train()
            else:
                model.eval()
                target_tracker = []
                pred_tracker = []

            for iteration, batch in enumerate(data_loader):

                for k, v in batch.items():
                    if torch.is_tensor(v):
                        batch[k] = to_var(v)

                # Forward pass
                logits, pred_probs = model(batch)

                # loss calculation
                loss = loss_criterion(logits, batch["fit"])

                # backward + optimization
                if split == "train":
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()
                    step += 1

                # bookkeepeing
                loss_tracker["Total Loss"] = torch.cat(
                    (loss_tracker["Total Loss"], loss.view(1)))

                if model_config["logging"]["tensorboard"]:
                    writer.add_scalar(
                        "%s/Total Loss" % split.upper(),
                        loss.item(),
                        epoch * len(data_loader) + iteration,
                    )

                if iteration % model_config["logging"][
                        "print_every"] == 0 or iteration + 1 == len(
                            data_loader):
                    print("{} Batch Stats {}/{}, Loss={:.2f}".format(
                        split.upper(), iteration,
                        len(data_loader) - 1, loss.item()))

                if split == "valid":
                    target_tracker.append(batch["fit"].cpu().numpy())
                    pred_tracker.append(pred_probs.cpu().data.numpy())

            print("%s Epoch %02d/%i, Mean Total Loss %9.4f" % (
                split.upper(),
                epoch + 1,
                model_config["trainer"]["num_epochs"],
                torch.mean(loss_tracker["Total Loss"]),
            ))

            if model_config["logging"]["tensorboard"]:
                writer.add_scalar(
                    "%s-Epoch/Total Loss" % split.upper(),
                    torch.mean(loss_tracker["Total Loss"]),
                    epoch,
                )

            # Save checkpoint
            if split == "train":
                checkpoint_path = os.path.join(save_model_path,
                                               "E%i.pytorch" % (epoch + 1))
                torch.save(model.state_dict(), checkpoint_path)
                print("Model saved at %s" % checkpoint_path)

        if split == "valid" and model_config["logging"]["tensorboard"]:
            # not considering the last (incomplete) batch for metrics
            target_tracker = np.stack(target_tracker[:-1]).reshape(-1)
            pred_tracker = np.stack(pred_tracker[:-1], axis=0).reshape(
                -1, model_config["sfnet"]["num_targets"])
            precision, recall, f1_score, accuracy, auc = compute_metrics(
                target_tracker, pred_tracker)

            writer.add_scalar("%s-Epoch/Precision" % split.upper(), precision,
                              epoch)
            writer.add_scalar("%s-Epoch/Recall" % split.upper(), recall, epoch)
            writer.add_scalar("%s-Epoch/F1-Score" % split.upper(), f1_score,
                              epoch)
            writer.add_scalar("%s-Epoch/Accuracy" % split.upper(), accuracy,
                              epoch)
            writer.add_scalar("%s-Epoch/AUC" % split.upper(), auc, epoch)

    # Save Model Config File
    with jsonlines.open(os.path.join(save_model_path, "config.jsonl"),
                        "w") as fout:
        fout.write(model_config)