def load_test_model(): if not os.path.isfile(DUMPED_MODEL) and not os.path.isfile(os.path.join(DATASET_BASE, "models", DUMPED_MODEL)): print("No trained model file!") return main_model = f_model(model_path=DUMPED_MODEL).cuda(GPU_ID) color_model = c_model().cuda(GPU_ID) pooling_model = p_model().cuda(GPU_ID) extractor = FeatureExtractor(main_model, color_model, pooling_model) return extractor
def load_test_model(with_clsf=False): if not os.path.isfile(DUMPED_MODEL) and not os.path.isfile( os.path.join(DATASET_BASE, "models", DUMPED_MODEL)): print("No trained model file!") return main_model = f_model(model_path=DUMPED_MODEL).cuda(GPU_ID) color_model = c_model().cuda(GPU_ID) pooling_model = p_model().cuda(GPU_ID) if not with_clsf: extractor = FeatureExtractor(main_model, color_model, pooling_model) else: extractor = FeatureExtractorWithClassification(main_model, color_model, pooling_model) return extractor
# -*- coding: utf-8 -*- import torch import os from myconfig import cfg #from utils import FeatureExtractor,data_transform_test from utils import * from torch.autograd import Variable from data import Fashion_attr_prediction, Fashion_inshop from net import f_model, c_model, p_model import numpy as np print(cfg.DUMPED_MODEL) main_model = f_model(model_path=cfg.DUMPED_MODEL).cpu() color_model = c_model().cpu() pooling_model = p_model().cpu() extractor = FeatureExtractor(main_model, color_model, pooling_model) def dump_dataset(loader, deep_feats, color_feats, labels): for batch_idx, (data, data_path) in enumerate(loader): data = Variable(data).cpu() deep_feat, color_feat = extractor(data) for i in range(len(data_path)): path = data_path[i] feature_n = deep_feat[i].squeeze() color_feature_n = color_feat[i] # dump_feature(feature, path) deep_feats.append(feature_n) color_feats.append(color_feature_n) labels.append(path)
# -*- coding: utf-8 -*- import os from config import * from utils import * from torch.autograd import Variable from data import Fashion_attr_prediction, Fashion_inshop from net import f_model, c_model, p_model main_model = f_model(model_path=DUMPED_MODEL).cuda(GPU_ID) color_model = c_model().cuda(GPU_ID) pooling_model = p_model().cuda(GPU_ID) extractor = FeatureExtractor(main_model, color_model, pooling_model) def dump_dataset(loader, deep_feats, color_feats, labels): for batch_idx, (data, data_path) in enumerate(loader): data = Variable(data).cuda(GPU_ID) deep_feat, color_feat = extractor(data) for i in range(len(data_path)): path = data_path[i] feature_n = deep_feat[i].squeeze() color_feature_n = color_feat[i] # dump_feature(feature, path) deep_feats.append(feature_n) color_feats.append(color_feature_n) labels.append(path) if batch_idx % LOG_INTERVAL == 0: print("{} / {}".format(batch_idx * EXTRACT_BATCH_SIZE,
# -*- coding: utf-8 -*- import torch import os from myconfig import cfg #from utils import FeatureExtractor,data_transform_test from utils import * from torch.autograd import Variable from data import Fashion_attr_prediction, Fashion_inshop from net import f_model, c_model, p_model import numpy as np print(cfg.DUMPED_MODEL) main_model = f_model(model_path=cfg.DUMPED_MODEL).cuda(cfg.GPU_ID) color_model = c_model().cuda(cfg.GPU_ID) pooling_model = p_model().cuda(cfg.GPU_ID) extractor = FeatureExtractor(main_model, color_model, pooling_model) def dump_dataset(loader, deep_feats, color_feats, labels): for batch_idx, (data, data_path) in enumerate(loader): data = Variable(data).cuda(cfg.GPU_ID) deep_feat, color_feat = extractor(data) for i in range(len(data_path)): path = data_path[i] feature_n = deep_feat[i].squeeze() color_feature_n = color_feat[i] # dump_feature(feature, path) deep_feats.append(feature_n) color_feats.append(color_feature_n) labels.append(path)