Esempio n. 1
0
def main():
    # Hyperparameters
    img_size = 112
    emb_size = 64
    device = torch.device("cuda")

    # Dataloader
    transform = torchvision.transforms.Compose([
        torchvision.transforms.Scale((img_size, img_size)),
        torchvision.transforms.CenterCrop(112),
        torchvision.transforms.RandomHorizontalFlip(),
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225]),
    ])
    train_dataset = TripletDataset(root_dir="../../data/images/",
                                   data_dir="../../data/",
                                   transform=transform)
    test_auc_dataset = CategoryDataset(
        root_dir="../../data/images/",
        data_dir="../../data/",
        transform=transform,
        use_mean_img=True,
        data_file="test_no_dup_with_category_3more_name.json",
        neg_samples=True,
    )

    # Model
    tnet = CompatModel(
        emb_size,
        n_conditions=len(train_dataset.conditions) // 2,
        learnedmask=True,
        prein=False,
    )
    tnet.load_state_dict(torch.load("./csn_model_best.pth"))
    tnet = tnet.to(device)
    tnet.eval()
    embeddingnet = tnet.embeddingnet

    # Test
    auc = test_compatibility_auc(test_auc_dataset, embeddingnet)
    print("AUC: {:.4f}".format(auc))
    fitb_accuracy = test_fitb_quesitons(test_auc_dataset, embeddingnet)
    print("Fitb Accuracy: {:.4f}".format(fitb_accuracy))
Esempio n. 2
0
)
test_loader = DataLoader(test_dataset, 32, shuffle=False, num_workers=4)

val_auc_dataset = CategoryDataset(
    root_dir="../../data/images/",
    data_dir="../../data/",
    transform=transform,
    use_mean_img=True,
    data_file="valid_no_dup_with_category_3more_name.json",
    neg_samples=True,
)

# Model
tnet = CompatModel(
    emb_size,
    n_conditions=len(train_dataset.conditions) // 2,
    learnedmask=True,
    prein=False,
)
tnet = tnet.to(device)


def accuracy(dista, distb):
    margin = 0
    pred = (dista - distb - margin).cpu().data
    return (pred > 0).sum().item() / dista.size(0)


# Hyperparameters
criterion = torch.nn.MarginRankingLoss(margin=0.2)
parameters = filter(lambda p: p.requires_grad, tnet.parameters())
optimizer = torch.optim.Adam(parameters, lr=5e-5)
        print('problem_part: {}'.format(problem_part))
        print('best substitution: {}'.format(best_img_path[problem_part]))
        print('After substitution the score is {:.4f}'.format(best_score))
        # plt.imshow(plt.imread(best_img_path[problem_part]))
        # plt.gca().axis('off')
        # plt.show()

    show_imgs(x[0], select, "revised_outfit.pdf")
    return best_score, best_img_path


if __name__ == "__main__":
    # Load model weights
    from model import CompatModel
    model = CompatModel(embed_size=1000,
                        need_rep=True,
                        vocabulary=len(train_dataset.vocabulary)).to(device)
    model.load_state_dict(
        torch.load('./model_train_relation_vse_type_cond_scales.pth'))
    model.eval()

    print("=" * 80)
    ID = [
        '178118160_1', 'bottom_mean', '199285568_4', '111355382_5',
        '209432387_4'
    ]
    x = loadimg_from_id(ID).to(device)
    # kick out the mean images for padding the sequence when making visualization
    select = [i for i, l in enumerate(ID) if 'mean' not in l]

    print("Step 1: show images in an outfit...")
device = torch.device("cuda")

# Dataloader
train_dataset, train_loader, val_dataset, val_loader, test_dataset, test_loader = prepare_dataloaders(
    root_dir="../../data/images",
    data_dir="../../data",
    img_size=299,
    batch_size=12,
    use_mean_img=False,
    neg_samples=False,
    collate_fn=lstm_collate_fn,
)

# Model
model = CompatModel(emb_size=emb_size,
                    need_rep=True,
                    vocabulary=len(train_dataset.vocabulary))
mode = model.to(device)


# Train process
def train(model, device, train_loader, val_loader, comment):
    optimizer = torch.optim.SGD(model.parameters(), lr=2e-1, momentum=0.9)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.5)
    for epoch in range(1, epochs + 1):
        # Train phase
        total_loss = 0
        scheduler.step()
        model.train()
        for batch_num, input_data in enumerate(train_loader, 1):
            lengths, images, names, offsets, set_ids, labels, is_compat = input_data
import torchvision
from sklearn import metrics
from torchvision import models

import resnet
from utils import AverageMeter, BestSaver, config_logging, prepare_dataloaders
from model import CompatModel

# Dataloader
train_dataset, train_loader, val_dataset, val_loader, test_dataset, test_loader = (
    prepare_dataloaders())

# Load pretrained weights
device = torch.device("cuda:0")
model = CompatModel(embed_size=1000,
                    need_rep=True,
                    vocabulary=len(train_dataset.vocabulary)).to(device)
model.load_state_dict(
    torch.load("./model_train_relation_vse_type_cond_scales.pth"))
criterion = nn.BCELoss()

# Compatibility AUC test
model.eval()
total_loss = 0
outputs = []
targets = []
for batch_num, batch in enumerate(test_loader, 1):
    print("\r#{}".format(batch_num), end="", flush=True)
    lengths, images, names, offsets, set_ids, labels, is_compat = batch
    images = images.to(device)
    target = is_compat.float().to(device)
Esempio n. 6
0
# Logger
config_logging(comment)

# Dataloader
train_dataset, train_loader, val_dataset, val_loader, test_dataset, test_loader = (
    prepare_dataloaders(batch_size=16))

# Device
device = torch.device("cuda:0")

# Model
model = CompatModel(embed_size=1000,
                    need_rep=True,
                    vocabulary=len(train_dataset.vocabulary),
                    vse_off=vse_off,
                    pe_off=pe_off,
                    mlp_layers=mlp_layers,
                    conv_feats=conv_feats)


# Train process
def train(model, device, train_loader, val_loader, comment):
    model = model.to(device)
    criterion = nn.BCELoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.9)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)
    saver = BestSaver(comment)
    epochs = 50
    for epoch in range(1, epochs + 1):
        logging.info("Train Phase, Epoch: {}".format(epoch))
import json
import sys
import torch
import re

sys.path.insert(0, "../mcn")
import torchvision.transforms as transforms
from model import CompatModel
from utils import prepare_dataloaders
from PIL import Image

train_dataset, _, _, _, test_dataset, _ = prepare_dataloaders(num_workers=1)
# Load pretrained weights
device = torch.device('cpu')
# print(len(.vocabulary)) # 2757
model = CompatModel(embed_size=1000, need_rep=True, vocabulary=2757).to(device)
model.load_state_dict(
    torch.load("../mcn/model_train_relation_vse_type_cond_scales.pth",
               map_location="cpu"))
model.eval()
for name, param in model.named_parameters():
    if 'fc' not in name:
        param.requires_grad = False


def defect_detect(img, model, normalize=True):
    # Register hook for comparison matrix
    relation = None

    def func_r(module, grad_in, grad_out):
        nonlocal relation
emb_size = 512
device = torch.device("cuda")

# Dataloader
train_dataset, train_loader, val_dataset, val_loader, test_dataset, test_loader = prepare_dataloaders(
    root_dir="../../data/images",
    data_dir="../../data",
    img_size=299,
    use_mean_img=False,
    neg_samples=False,
    collate_fn=lstm_collate_fn,
)

# Restore model parameters
model = CompatModel(emb_size=emb_size,
                    need_rep=False,
                    vocabulary=len(train_dataset.vocabulary))
model.load_state_dict(torch.load('model.pth'))
model.to(device)
model.eval()

# Compute feature or Load extracted feature
if os.path.exists("test_features.pkl"):
    print("Found test_features.pkl...")
    test_features = pickle.load(open('./test_features.pkl', 'rb'))
else:
    print("Extract cnn features...")
    test_features = {}
    for input_data in tqdm(test_loader):
        lengths, images, names, offsets, set_ids, labels, is_compat = input_data
        image_seqs = images.to(device)
from tqdm import tqdm, trange
import dgl

# Dataloader
train_dataset, train_loader, val_dataset, val_loader, test_dataset, test_loader = prepare_dataloaders(
    root_dir="../../data/images",
    data_dir="../../data",
    batch_size=12,
    collate_fn=graph_collate_fn,
    use_mean_img=False,
    num_workers=6)

# Load pretrained weights
device = torch.device("cuda:0")
model = CompatModel(embed_size=512,
                    need_rep=True,
                    vocabulary=len(train_dataset.vocabulary)).to(device)
model.load_state_dict(torch.load("./model_train.pth"))
print("Successfully load model weight...")
model.eval()
criterion = nn.BCELoss()

# Compatibility AUC test
total_loss = 0
outputs = []
targets = []
for batch in tqdm(test_loader):
    lengths, batch_g, names, offsets, set_ids, labels, is_compat = batch
    target = is_compat.float().to(device)
    with torch.no_grad():
        output, _, _ = model._compute_score(batch_g)
# Dataloader
train_dataset, train_loader, val_dataset, val_loader, test_dataset, test_loader = prepare_dataloaders(
    root_dir="../../data/images",
    data_dir="../../data",
    batch_size=12,
    collate_fn=graph_collate_fn,
    use_mean_img=False,
    num_workers=6)

# Device
device = torch.device("cuda:0")

# Model
model = CompatModel(embed_size=512,
                    need_rep=True,
                    vocabulary=len(train_dataset.vocabulary))


# Train process
def train(model, device, train_loader, val_loader, comment):
    model = model.to(device)
    criterion = nn.BCELoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.9)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)
    saver = BestSaver(comment)
    epochs = 50
    for epoch in range(1, epochs + 1):
        logging.info("Train Phase, Epoch: {}".format(epoch))
        scheduler.step()
        total_losses = AverageMeter()
Esempio n. 11
0
import torchvision
from sklearn import metrics
from torchvision import models

import resnet
from utils import AverageMeter, BestSaver, config_logging, prepare_dataloaders
from model import CompatModel

# Dataloader
train_dataset, train_loader, val_dataset, val_loader, test_dataset, test_loader = prepare_dataloaders(
    root_dir="../../data/images", data_dir="../../data", batch_size=12)

# Load pretrained weights
device = torch.device("cuda:0")
model = CompatModel(embed_size=1000,
                    need_rep=True,
                    vocabulary=len(train_dataset.vocabulary)).to(device)
# model.load_state_dict(torch.load("./model_train.pth"))
criterion = nn.BCELoss()

# Compatibility AUC test
model.eval()
total_loss = 0
outputs = []
targets = []
for batch_num, batch in enumerate(test_loader, 1):
    print("\r#{}".format(batch_num), end="", flush=True)
    lengths, images, names, offsets, set_ids, labels, is_compat = batch
    images = images.to(device)
    target = is_compat.float().to(device)
    with torch.no_grad():
Esempio n. 12
0
vse_off = args.vse_off
pe_off = args.pe_off
mlp_layers = args.mlp_layers
conv_feats = args.conv_feats
model_path = args.model_path

# Dataloader
train_dataset, train_loader, val_dataset, val_loader, test_dataset, test_loader = (
    prepare_dataloaders())

# Load pretrained weights
device = torch.device("cuda:0")
model = CompatModel(embed_size=1000,
                    need_rep=True,
                    vocabulary=len(train_dataset.vocabulary),
                    vse_off=vse_off,
                    pe_off=pe_off,
                    mlp_layers=mlp_layers,
                    conv_feats=conv_feats).to(device)
model.load_state_dict(torch.load(model_path))
criterion = nn.BCELoss()

# Compatibility AUC test
model.eval()
total_loss = 0
outputs = []
targets = []
for batch_num, batch in enumerate(test_loader, 1):
    print("\r#{}/{}".format(batch_num, len(test_loader)), end="", flush=True)
    lengths, images, names, offsets, set_ids, labels, is_compat = batch
    images = images.to(device)