def load_test_model(): if not os.path.isfile(DUMPED_MODEL) and not os.path.isfile( os.path.join(DATASET_BASE, "models", DUMPED_MODEL)): print("No trained model file!") return model = f_model(model_path=DUMPED_MODEL).cuda(GPU_ID) model.eval() extractor = FeatureExtractor(model) return extractor
def load_test_model(): if not os.path.isfile(DUMPED_MODEL) and not os.path.isfile(os.path.join(DATASET_BASE, "models", DUMPED_MODEL)): print("No trained model file!") return main_model = f_model(model_path=DUMPED_MODEL).cuda(GPU_ID) color_model = c_model().cuda(GPU_ID) pooling_model = p_model().cuda(GPU_ID) extractor = FeatureExtractor(main_model, color_model, pooling_model) return extractor
def load_test_model(with_clsf=False): if not os.path.isfile(DUMPED_MODEL) and not os.path.isfile( os.path.join(DATASET_BASE, "models", DUMPED_MODEL)): print("No trained model file!") return main_model = f_model(model_path=DUMPED_MODEL).cuda(GPU_ID) color_model = c_model().cuda(GPU_ID) pooling_model = p_model().cuda(GPU_ID) if not with_clsf: extractor = FeatureExtractor(main_model, color_model, pooling_model) else: extractor = FeatureExtractorWithClassification(main_model, color_model, pooling_model) return extractor
Fashion_attr_prediction(type="train", transform=data_transform_train), batch_size=TRAIN_BATCH_SIZE, num_workers=NUM_WORKERS, pin_memory=True ) test_loader = torch.utils.data.DataLoader( Fashion_attr_prediction(type="test", transform=data_transform_test), batch_size=TEST_BATCH_SIZE, num_workers=NUM_WORKERS, pin_memory=True ) triplet_loader = torch.utils.data.DataLoader( Fashion_attr_prediction(type="triplet", transform=data_transform_train), batch_size=TRIPLET_BATCH_SIZE, num_workers=NUM_WORKERS, pin_memory=True ) model = f_model(freeze_param=FREEZE_PARAM, model_path=DUMPED_MODEL).cuda(GPU_ID) optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=LR, momentum=MOMENTUM) def train(epoch): model.train() criterion_c = nn.CrossEntropyLoss() if ENABLE_TRIPLET_WITH_COSINE: criterion_t = TripletMarginLossCosine() else: criterion_t = nn.TripletMarginLoss() triplet_loader_iter = iter(triplet_loader) triplet_type = 0 for batch_idx, (data, target) in enumerate(train_loader): if batch_idx % TEST_INTERVAL == 0: test()
batch_size=cfg.TEST_BATCH_SIZE, num_workers=cfg.NUM_WORKERS, pin_memory=True ) triplet_loader = torch.utils.data.DataLoader( Fashion_attr_prediction(type="triplet", transform=data_transform_train), batch_size=cfg.TRIPLET_BATCH_SIZE, num_workers=cfg.NUM_WORKERS, pin_memory=True ) if cfg.ENABLE_INSHOP_DATASET: triplet_in_shop_loader = torch.utils.data.DataLoader( Fashion_inshop(type="train", transform=data_transform_train), batch_size=cfg.TRIPLET_BATCH_SIZE, num_workers=cfg.NUM_WORKERS, pin_memory=True ) #model = f_model(freeze_param=cfg.FREEZE_PARAM, model_path=cfg.DUMPED_MODEL).cpu() model = f_model(freeze_param=cfg.FREEZE_PARAM).cpu() optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=cfg.LR, momentum=cfg.MOMENTUM) def train(epoch): print("begin train") model.train() criterion_c = nn.CrossEntropyLoss() criterion_a = nn.MultiLabelSoftMarginLoss() # due to a problem I commented out these lines #if cfg.ENABLE_TRIPLET_WITH_COSINE: #criterion_t = cfg.TripletMarginLossCosine() #else: criterion_t = nn.TripletMarginLoss() print("build loss") triplet_loader_iter = iter(triplet_loader)
# -*- coding: utf-8 -*- import torch import os from myconfig import cfg #from utils import FeatureExtractor,data_transform_test from utils import * from torch.autograd import Variable from data import Fashion_attr_prediction, Fashion_inshop from net import f_model, c_model, p_model import numpy as np print(cfg.DUMPED_MODEL) main_model = f_model(model_path=cfg.DUMPED_MODEL).cpu() color_model = c_model().cpu() pooling_model = p_model().cpu() extractor = FeatureExtractor(main_model, color_model, pooling_model) def dump_dataset(loader, deep_feats, color_feats, labels): for batch_idx, (data, data_path) in enumerate(loader): data = Variable(data).cpu() deep_feat, color_feat = extractor(data) for i in range(len(data_path)): path = data_path[i] feature_n = deep_feat[i].squeeze() color_feature_n = color_feat[i] # dump_feature(feature, path) deep_feats.append(feature_n) color_feats.append(color_feature_n) labels.append(path)
pin_memory=True) triplet_loader = torch.utils.data.DataLoader(Fashion_attr_prediction( type="triplet", transform=data_transform_train), batch_size=cfg.TRIPLET_BATCH_SIZE, num_workers=cfg.NUM_WORKERS, pin_memory=True) if cfg.ENABLE_INSHOP_DATASET: triplet_in_shop_loader = torch.utils.data.DataLoader( Fashion_inshop(type="train", transform=data_transform_train), batch_size=cfg.TRIPLET_BATCH_SIZE, num_workers=cfg.NUM_WORKERS, pin_memory=True) model = f_model(freeze_param=cfg.FREEZE_PARAM, model_path=cfg.DUMPED_MODEL).cuda(cfg.GPU_ID) optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=cfg.LR, momentum=cfg.MOMENTUM) def train(epoch): model.train() criterion_c = nn.CrossEntropyLoss() if cfg.ENABLE_TRIPLET_WITH_COSINE: criterion_t = cfg.TripletMarginLossCosine() else: criterion_t = nn.TripletMarginLoss() triplet_loader_iter = iter(triplet_loader) triplet_type = 0 if cfg.ENABLE_INSHOP_DATASET:
# -*- coding: utf-8 -*- import os from config import * from utils import * from torch.autograd import Variable from data import Fashion_attr_prediction, Fashion_inshop from net import f_model, c_model, p_model main_model = f_model(model_path=DUMPED_MODEL).cuda(GPU_ID) color_model = c_model().cuda(GPU_ID) pooling_model = p_model().cuda(GPU_ID) extractor = FeatureExtractor(main_model, color_model, pooling_model) def dump_dataset(loader, deep_feats, color_feats, labels): for batch_idx, (data, data_path) in enumerate(loader): data = Variable(data).cuda(GPU_ID) deep_feat, color_feat = extractor(data) for i in range(len(data_path)): path = data_path[i] feature_n = deep_feat[i].squeeze() color_feature_n = color_feat[i] # dump_feature(feature, path) deep_feats.append(feature_n) color_feats.append(color_feature_n) labels.append(path) if batch_idx % LOG_INTERVAL == 0: print("{} / {}".format(batch_idx * EXTRACT_BATCH_SIZE,
triplet_loader = torch.utils.data.DataLoader(Fashion_attr_prediction( type="triplet", transform=data_transform_train), batch_size=cfg.TRIPLET_BATCH_SIZE, num_workers=cfg.NUM_WORKERS, pin_memory=True) if cfg.ENABLE_INSHOP_DATASET: triplet_in_shop_loader = torch.utils.data.DataLoader( Fashion_inshop(type="train", transform=data_transform_train), batch_size=cfg.TRIPLET_BATCH_SIZE, num_workers=cfg.NUM_WORKERS, pin_memory=True) #model = f_model(freeze_param=cfg.FREEZE_PARAM, model_path=cfg.DUMPED_MODEL).cuda(cfg.GPU_ID) model = f_model(freeze_param=cfg.FREEZE_PARAM).cuda(cfg.GPU_ID) optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=cfg.LR, momentum=cfg.MOMENTUM) def train(epoch): print("begin train") model.train() criterion_c = nn.CrossEntropyLoss() criterion_a = nn.MultiLabelSoftMarginLoss() if cfg.ENABLE_TRIPLET_WITH_COSINE: criterion_t = cfg.TripletMarginLossCosine() else: criterion_t = nn.TripletMarginLoss() print("build loss")
# -*- coding: utf-8 -*- import torch import os from myconfig import cfg #from utils import FeatureExtractor,data_transform_test from utils import * from torch.autograd import Variable from data import Fashion_attr_prediction, Fashion_inshop from net import f_model, c_model, p_model import numpy as np print(cfg.DUMPED_MODEL) main_model = f_model(model_path=cfg.DUMPED_MODEL).cuda(cfg.GPU_ID) color_model = c_model().cuda(cfg.GPU_ID) pooling_model = p_model().cuda(cfg.GPU_ID) extractor = FeatureExtractor(main_model, color_model, pooling_model) def dump_dataset(loader, deep_feats, color_feats, labels): for batch_idx, (data, data_path) in enumerate(loader): data = Variable(data).cuda(cfg.GPU_ID) deep_feat, color_feat = extractor(data) for i in range(len(data_path)): path = data_path[i] feature_n = deep_feat[i].squeeze() color_feature_n = color_feat[i] # dump_feature(feature, path) deep_feats.append(feature_n) color_feats.append(color_feature_n) labels.append(path)