def main(): args = parse_args() cfg = Config.fromfile(args.config) if args.work_dir is not None: cfg.work_dir = args.work_dir if args.checkpoint is not None: cfg.load_from = args.checkpoint # init distributed env first if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger logger = get_root_logger(cfg.log_level) logger.info('Distributed test: {}'.format(distributed)) # data loader dataset = build_dataset(cfg.data.test) print('dataset loaded') # create model model = build_fashion_recommender(cfg.model) load_checkpoint(model, cfg.load_from, map_location='cpu') print('load checkpoint from: {}'.format(cfg.load_from)) test_fashion_recommender(model, dataset, cfg, distributed=False, validate=False, logger=None)
def main(): seed = 0 torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) args = parse_args() cfg = Config.fromfile(args.config) model = build_retriever(cfg.model) load_checkpoint(model, args.checkpoint) print('load checkpoint from {}'.format(args.checkpoint)) if args.use_cuda: model.cuda() model.eval() img_tensor = get_img_tensor(args.input, args.use_cuda) query_feat = model(img_tensor, landmark=None, return_loss=False) query_feat = query_feat.data.cpu().numpy() gallery_set = build_dataset(cfg.data.gallery) gallery_embeds = _process_embeds(gallery_set, model, cfg) retriever = ClothesRetriever(cfg.data.gallery.img_file, cfg.data_root, cfg.data.gallery.img_path) retriever.show_retrieved_images(query_feat, gallery_embeds)
def main(): args = parse_args() cfg = Config.fromfile(args.config) img = cv2.imread(args.input, -1) img = cv2.resize(img, (224, 224)) img_tensor = img_to_tensor(img, squeeze=True, cuda=args.use_cuda) model = build_retriever(cfg.model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.use_cuda: model.cuda() model.eval() query_embed = model(img_tensor, landmark=None, return_loss=False) query_embed = query_embed.data.cpu().numpy() gallery_set = build_dataset(cfg.data.gallery) gallery_embeds = _process_embeds(gallery_set, model, cfg) retriever = ClothesRetriever(cfg.data.gallery.img_file, cfg.data_root, cfg.data.gallery.img_path) retriever.show_retrieved_images(query_embed, gallery_embeds)
def main(): args = parse_args() cfg = Config.fromfile(args.config) if args.data_type == 'train': image_set = build_dataset(cfg.data.train) elif args.data_type == 'query': image_set = build_dataset(cfg.data.query) elif args.data_type == 'gallery': image_set = build_dataset(cfg.data.gallery) else: raise TypeError('So far only support train/query/gallery dataset') if args.checkpoint is not None: cfg.load_from = args.checkpoint extract_features(image_set, cfg, args.save_dir)
def main(): args = parse_args() cfg = Config.fromfile(args.config) if args.work_dir is not None: cfg.work_dir = args.work_dir # init distributed env first if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) if args.checkpoint is not None: cfg.load_from = args.checkpoint # init logger logger = get_root_logger(cfg.log_level) logger.info('Distributed test: {}'.format(distributed)) # data loader cfg.data.query.find_three = False cfg.data.gallery.find_three = False query_set, gallery_set = build_dataset(cfg.data.query), build_dataset( cfg.data.gallery) print('dataset loaded') # build model and load checkpoint model = build_retriever(cfg.model) print('model built') load_checkpoint(model, cfg.load_from) print('load checkpoint from: {}'.format(cfg.load_from)) # test test_retriever(model, query_set, gallery_set, cfg, distributed=distributed, validate=args.validate, logger=logger)
def main(): args = parse_args() cfg = Config.fromfile(args.config) cfg.load_from = args.checkpoint # init distributed env first distributed = False # init logger logger = get_root_logger(cfg.log_level) logger.info('Distributed test: {}'.format(distributed)) # create model model = build_fashion_recommender(cfg.model) load_checkpoint(model, cfg.load_from, map_location='cpu') print('load checkpoint from: {}'.format(cfg.load_from)) if args.use_cuda: model.cuda() model.eval() # prepare input data img_tensors = [] item_ids = [] for dirpath, dirname, fns in os.walk(args.input_dir): for imgname in fns: item_ids.append(imgname.split('.')[0]) tensor = get_img_tensor( os.path.join(dirpath, imgname), args.use_cuda) img_tensors.append(tensor) img_tensors = torch.cat(img_tensors) # test embeds = [] with torch.no_grad(): embed = model(img_tensors, return_loss=False) embeds.append(embed.data.cpu()) embeds = torch.cat(embeds) try: metric = model.module.triplet_net.metric_branch except Exception: metric = None # get compatibility score, so far only support images from polyvore dataset = build_dataset(cfg.data.test) score = dataset.get_single_compatibility_score(embeds, item_ids, metric, args.use_cuda) print("Compatibility score: {:.3f}".format(score))
def retrieve(): file = request.files.get('image') img_tensor = get_img_tensor(file, True) query_feat = model_ret(img_tensor, landmark=None, return_loss=False) query_feat = query_feat.data.cpu().numpy() gallery_set = build_dataset(cfg_ret.data.gallery) gallery_embeds = _process_embeds(gallery_set, model_ret, cfg_ret) retriever = ClothesRetriever(cfg_ret.data.gallery.img_file, cfg_ret.data_root, cfg_ret.data.gallery.img_path) result = retriever.show_retrieved_images(query_feat, gallery_embeds) resultDict = {} resultDict['paths'] = result return jsonify(resultDict)
def main(): args = parse_args() cfg = Config.fromfile(args.config) if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from # init distributed env if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) # build predictor to extract embeddings model = build_retriever(cfg.model) print('model built') if cfg.init_weights_from is not None: model = init_weights_from(cfg.init_weights_from, model) print('Initialize model weights from {}'.format(cfg.init_weights_from)) # data loader dataset = build_dataset(cfg.data.train) print('dataset loaded') # train train_retriever( model, dataset, cfg, distributed=distributed, validate=args.validate, logger=logger)
def main(): seed = 0 torch.manual_seed(seed) args = parse_args() if args.use_cuda and torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) cfg = Config.fromfile(args.config) model = build_retriever(cfg.model) load_checkpoint(model, args.checkpoint, map_location=torch.device('cuda:0')) print('load checkpoint from {}'.format(args.checkpoint)) if args.use_cuda: model.cuda() model.eval() print('Model evaled') img_tensor = get_img_tensor(args.input, args.use_cuda) print('Image tensor got.') query_feat = model(img_tensor, landmark=None, return_loss=False) print('Query feat 1') query_feat = query_feat.data.cpu().numpy() print('Query feat 2') gallery_set = build_dataset(cfg.data.gallery) print('Gallery set') gallery_embeds = _process_embeds(gallery_set, model, cfg) print('Gallery embeds') retriever = ClothesRetriever(cfg.data.gallery.img_file, cfg.data_root, cfg.data.gallery.img_path) print('Retriever') results = retriever.show_retrieved_images(query_feat, gallery_embeds) print('Show retriever') for result in results: print(result)
import sys sys.path.insert(0, "/home/grupo08/M5/MetricLearning/bielski") from networks import TripletNet, Vgg16L2, EmbeddingNet from losses import TripletLoss from trainer import fit # Load the config from the custom file # cfg_fname = 'configs/retriever_in_shop/triplet_hnm.py' # Triplet network with hard negative mining cfg_fname = 'configs/retriever_in_shop/triplet_vanilla.py' # Triplet network cfg = Config.fromfile(cfg_fname) cuda = True # Data loader train_set = build_dataset(cfg.data.train) query_set = build_dataset(cfg.data.query) print('datasets loaded') train_loader = torch.utils.data.DataLoader(train_set, batch_size=cfg.data.imgs_per_gpu, shuffle=True) query_loader = torch.utils.data.DataLoader(query_set, batch_size=cfg.data.imgs_per_gpu, shuffle=True) print('dataloader created') # Build model backbone_model = Vgg16L2(num_dim=128) model = TripletNet(backbone_model) model.cuda()
seed = 0 torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) cfg = Config.fromfile(args.config) model = build_retriever(cfg.model) load_checkpoint(model, args.checkpoint) print('load checkpoint from {}'.format(args.checkpoint)) if args.use_cuda: model = model.cuda() model.eval() gallery_set = build_dataset(cfg.data.gallery) gallery_embeds = _process_embeds(gallery_set, model, cfg) retriever = ClothesRetriever(cfg.data.gallery.img_file, cfg.data_root, cfg.data.gallery.img_path) model = model.cpu() torch.cuda.empty_cache() # Flask #------------------------------------ print("MMFashion server started!") app.debug = args.debug if (args.enable_threaded): app.run(host=args.host, port=args.port, threaded=False) else: