def save_state(self, conf, accuracy, to_save_folder=False, extra=None, model_only=False): if to_save_folder: save_path = conf.save_path else: save_path = conf.model_path lz.mkdir_p(save_path, delete=False) torch.save( self.model.state_dict(), save_path / ('model_{}_accuracy:{}_step:{}_{}.pth'.format( get_time(), accuracy, self.step, extra))) if not model_only: torch.save( self.head.state_dict(), save_path / ('head_{}_accuracy:{}_step:{}_{}.pth'.format( get_time(), accuracy, self.step, extra))) torch.save( self.optimizer.state_dict(), save_path / ('optimizer_{}_accuracy:{}_step:{}_{}.pth'.format( get_time(), accuracy, self.step, extra)))
def crop_face(args): assert osp.exists(args.data_dir), "The input dir not exist" root_folder_name = args.data_dir.split('/')[-1] dst_folder = args.data_dir.replace(root_folder_name, root_folder_name + '_OPPOFaces') lz.mkdir_p(dst_folder, delete=False) mtcnn = MTCNN() ind = 0 all_img = [] for imgfn in itertools.chain( glob.glob(args.data_dir + '/**/*.jpg', recursive=True), glob.glob(args.data_dir + '/**/*.JPEG', recursive=True)): ind += 1 if ind % 10 == 0: print(f'proc {ind}, {imgfn}') dstimgfn = imgfn.replace(root_folder_name, root_folder_name + '_OPPOFaces') dst_folder = osp.dirname(dstimgfn) lz.mkdir_p(dst_folder, delete=False) img = cvb.read_img(imgfn) # bgr img1 = Image.fromarray(img) face = mtcnn.align_best(img1, limit=None, min_face_size=16, imgfn=imgfn) face = np.asarray(face) # bgr # face = cvb.bgr2rgb(face) # rgb cvb.write_img(face, dstimgfn) all_img.append( dstimgfn) logging.info(f'finish crop all {ind} imgs') lz.msgpack_dump(all_img, dst_folder + '/' + 'all_imgs.pk') del mtcnn torch.cuda.empty_cache()
def consumer(queue, lock): while True: imgfn, param, roi_box, dst_imgfn = queue.get() pts68 = [predict_68pts(param[i], roi_box[i]) for i in range(param.shape[0])] for img_fp, pts68_, dst in zip(imgfn, pts68, dst_imgfn): try: img_ori = cvb.read_img(img_fp) pts5 = to_landmark5(pts68_[:2, :].transpose()) warped = preprocess(img_ori, landmark=pts5) # plt_imshow(warped, inp_mode='bgr'); plt.show() lz.mkdir_p(osp.dirname(dst), delete=False) cvb.write_img(warped, dst) except Exception as e: logging.warning(f'error occur {e}, pls check!') cvb.write_img(np.ones((112, 112, 3), np.uint8), dst)
def run(_): cfgs = lz.load_cfg('./cfgs/single_ohnm.py') procs = [] for args in cfgs.cfgs: if args.loss != 'trivid': print(f'skip {args.loss} {args.logs_dir}') continue if args.log_at is None: args.log_at = np.concatenate([ range(0, 640, 31), range(args.epochs - 8, args.epochs, 1) ]) args.logs_dir = lz.work_path + 'reid/work/' + args.logs_dir if osp.exists(args.logs_dir) and osp.exists(args.logs_dir + '/checkpoint.64.pth'): print(os.listdir(args.logs_dir)) continue if not args.gpu_fix: args.gpu = lz.get_dev(n=len(args.gpu), ok=args.gpu_range, mem_thresh=[0.09, 0.09], sleep=32.3) lz.logging.info(f'use gpu {args.gpu}') # args.batch_size = 16 # args.gpu = (3, ) # args.epochs = 1 # args.logs_dir+='.bak' if isinstance(args.gpu, int): args.gpu = [args.gpu] if not args.evaluate and not args.vis: assert args.logs_dir != args.resume lz.mkdir_p(args.logs_dir, delete=True) lz.pickle_dump(args, args.logs_dir + '/conf.pkl') if cfgs.no_proc: main(args) else: proc = mp.Process(target=main, args=(args,)) proc.start() lz.logging.info('next') time.sleep(random.randint(39, 90)) if not cfgs.parallel: proc.join() else: procs.append(proc) if cfgs.parallel: for proc in procs: proc.join()
def run(_): cfgs = lz.load_cfg('./cfgs/single_ohnm.py') procs = [] for args in cfgs.cfgs: if args.loss != 'tcx': print(f'skip {args.loss} {args.logs_dir}') continue # args.log_at = np.concatenate([ # args.log_at, # range(args.epochs - 8, args.epochs, 1) # ]) args.logs_dir = 'work/' + args.logs_dir if not args.gpu_fix: args.gpu = lz.get_dev(n=len(args.gpu), ok=args.gpu_range, mem=[0.12, 0.07], sleep=32.3) lz.logging.info(f'use gpu {args.gpu}') # args.batch_size = 16 # args.gpu = (3, ) # args.epochs = 1 # args.logs_dir+='.bak' if isinstance(args.gpu, int): args.gpu = [args.gpu] if not args.evaluate: assert args.logs_dir != args.resume lz.mkdir_p(args.logs_dir, delete=True) lz.pickle_dump(args, args.logs_dir + '/conf.pkl') # main(args) proc = mp.Process(target=main, args=(args, )) proc.start() lz.logging.info('next') time.sleep(random.randint(39, 90)) procs.append(proc) for proc in procs: proc.join()
def do_align_by_list(inps): from lz import mkdir_p, cvb ind, tid, sid, fn, x, y, w, h, mtcnn = inps dst_dir = f'{dst}/{sid}/{tid}' dst_fn = f'{dst}/{sid}/{tid}/{ind}.png' if osp.exists(dst_fn): return # logging.info(f'{ind} start') x, y, w, h = list(map(int, [x, y, w, h])) imgp = img_path + fn assert osp.exists(imgp), imgp img = cvb.read_img(imgp) face = img[y:y + h, x:x + w, :] face_ali = alignface(face, mtcnn) _ = mkdir_p(dst_dir, delete=False) _ = cvb.write_img(face_ali, dst_fn)
def do_align_one( ind, val, ): tid = val['TEMPLATE_ID'] sid = val['SUBJECT_ID'] fn = val['FILENAME'] dst_dir = f'{dst}/{sid}/{tid}' dst_fn = f'{dst}/{sid}/{tid}/{ind}.png' # if osp.exists(dst_fn): return x, y, w, h = val.iloc[-4:] x, y, w, h = list(map(int, [x, y, w, h])) imgp = img_path + fn assert osp.exists(imgp), imgp img = cvb.read_img(imgp) assert img is not None, 'impg' face = img[y:y + h, x:x + w, :] face_ali = alignface(face, mtcnn, img) _ = mkdir_p(dst_dir, delete=False) _ = cvb.write_img(face_ali, dst_fn)
def crop_face(args): for k, v in default_args.items(): setattr(args, k, v) assert osp.exists(args.data_dir), "The input dir not exist" root_folder_name = args.data_dir.split('/')[-1] src_folder = args.data_dir dst_folder = args.data_dir.replace(root_folder_name, root_folder_name + '_OPPOFaces') lz.mkdir_p(dst_folder, delete=False) ds = TestData(src_folder) loader = torch.utils.data.DataLoader(ds, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=False, pin_memory=True, drop_last=False ) # 1. load pre-tained model checkpoint_fp = 'models/phase1_wpdc_vdc.pth.tar' arch = 'mobilenet_1' checkpoint = torch.load(checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict'] model = getattr(mobilenet_v1, arch)(num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression) model_dict = model.state_dict() # because the model is trained by multiple gpus, prefix module should be removed for k in checkpoint.keys(): model_dict[k.replace('module.', '')] = checkpoint[k] model.load_state_dict(model_dict) if args.mode == 'gpu': cudnn.benchmark = True model = model.cuda() model.eval() # 2. load dlib model for face detection and landmark used for face cropping queue = Queue() lock = Lock() consumers = [] for i in range(args.num_consumers): p = Process(target=consumer, args=(queue, lock)) p.daemon = True consumers.append(p) for c in consumers: c.start() # 3. forward ttl_nimgs = 0 ttl_imgs = [] data_meter = lz.AverageMeter() model_meter = lz.AverageMeter() post_meter = lz.AverageMeter() lz.timer.since_last_check('start crop face') for ind, data in enumerate(loader): data_meter.update(lz.timer.since_last_check(verbose=False)) if (data['finish'] == 1).all().item(): logging.info('finish') break if ind % 10 == 0: logging.info( f'proc batch {ind}, data time: {data_meter.avg:.2f}, model: {model_meter.avg:.2f}, post: {post_meter.avg:.2f}') mask = data['finish'] == 0 input = data['img'][mask] input_np = input.numpy() roi_box = data['roi_box'][mask].numpy() imgfn = np.asarray(data['imgfn'])[mask.numpy().astype(bool)] dst_imgfn = [img_fp.replace(root_folder_name, root_folder_name + '_OPPOFaces') for img_fp in imgfn] ttl_imgs.extend(dst_imgfn) ttl_nimgs += mask.sum().item() with torch.no_grad(): if args.mode == 'gpu': input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().astype(np.float32) model_meter.update(lz.timer.since_last_check(verbose=False)) queue.put((imgfn, param, roi_box, dst_imgfn)) # pts68 = [predict_68pts(param[i], roi_box[i]) for i in range(param.shape[0])] # pts68_proc = [predict_68pts(param[i], [0, 0, STD_SIZE, STD_SIZE]) for i in range(param.shape[0])] # for img_fp, pts68_, pts68_proc_, img_, dst in zip(imgfn, pts68, pts68_proc, input_np, dst_imgfn): # ## this may need opt to async read write # img_ori = cvb.read_img(img_fp) # pts5 = to_landmark5(pts68_[:2, :].transpose()) # warped = preprocess(img_ori, landmark=pts5) # # plt_imshow(warped, inp_mode='bgr'); plt.show() # lz.mkdir_p(osp.dirname(dst), delete=False) # cvb.write_img(warped, dst) # # ## this may cause black margin # # pts5 = to_landmark5(pts68_proc_[:2, :].transpose()) # # warped = preprocess(to_img(img_), landmark=pts5) # # # plt_imshow(warped, inp_mode='bgr'); plt.show() # # dst = img_fp.replace(root_folder_name, root_folder_name + '_OPPOFaces') # # cvb.write_img(warped, dst) # if args.dump_res: # img_ori = cvb.read_img(img_fp) # pts_res = [pts68_] # dst = img_fp.replace(root_folder_name, root_folder_name + '_kpts.demo') # lz.mkdir_p(osp.dirname(dst), delete=False) # draw_landmarks(img_ori, pts_res, # wfp=dst, # show_flg=args.show_flg) post_meter.update(lz.timer.since_last_check(verbose=False)) lz.msgpack_dump(ttl_imgs, dst_folder + '/' + 'all_imgs.pk') del model, input torch.cuda.empty_cache() while not queue.empty(): time.sleep(1)
def __init__(self, conf, inference=False, need_loader=True): print(conf) if conf.use_mobilfacenet: # self.model = MobileFaceNet(conf.embedding_size).to(conf.device) self.model = torch.nn.DataParallel( MobileFaceNet(conf.embedding_size)).cuda() print('MobileFaceNet model generated') else: # self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode).to(conf.device) self.model = torch.nn.DataParallel( Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode)).cuda() print('{}_{} model generated'.format(conf.net_mode, conf.net_depth)) if not inference: self.milestones = conf.milestones if need_loader: # self.loader, self.class_num = get_train_loader(conf) self.dataset = Dataset2() self.loader = DataLoader(self.dataset, batch_size=conf.batch_size, num_workers=conf.num_workers, shuffle=True, pin_memory=True) # self.loader = Loader2(conf) self.class_num = 85164 print(self.class_num, 'classes, load ok ') else: import copy conf_t = copy.deepcopy(conf) conf_t.data_mode = 'emore' self.loader, self.class_num = get_train_loader(conf_t) print(self.class_num) self.class_num = 85164 lz.mkdir_p(conf.log_path, delete=True) self.writer = SummaryWriter(conf.log_path) self.step = 0 if conf.loss == 'arcface': self.head = Arcface(embedding_size=conf.embedding_size, classnum=self.class_num).to(conf.device) elif conf.loss == 'softmax': self.head = MySoftmax(embedding_size=conf.embedding_size, classnum=self.class_num).to(conf.device) else: raise ValueError(f'{conf.loss}') print('two model heads generated') paras_only_bn, paras_wo_bn = separate_bn_paras(self.model) if conf.use_mobilfacenet: self.optimizer = optim.SGD( [{ 'params': paras_wo_bn[:-1], 'weight_decay': 4e-5 }, { 'params': [paras_wo_bn[-1]] + [self.head.kernel], 'weight_decay': 4e-4 }, { 'params': paras_only_bn }], lr=conf.lr, momentum=conf.momentum) else: self.optimizer = optim.SGD( [{ 'params': paras_wo_bn + [self.head.kernel], 'weight_decay': 5e-4 }, { 'params': paras_only_bn }], lr=conf.lr, momentum=conf.momentum) print(self.optimizer) # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True) print('optimizers generated') self.board_loss_every = 100 # len(self.loader) // 100 self.evaluate_every = len(self.loader) // 10 self.save_every = len(self.loader) // 5 self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data( self.loader.dataset.root_path) else: self.threshold = conf.threshold
]) draw_ellipse = True i, j = np.indices(pij2d.shape) i = i.ravel() j = j.ravel() pij = pij2d.ravel().astype('float32') # Remove self-indices idx = i != j i, j, pij = i[idx], j[idx], pij[idx] n_topics = 2 n_dim = 2 print(n_points, n_dim, n_topics) lz.mkdir_p(root_path + '/work/' + path, delete=True) os.chdir(root_path + '/work/' + path) model = VTSNE(n_points, n_topics, n_dim) wrap = Wrapper(model, batchsize=4096, epochs=1) for itr in range(235): print(itr, end=' ') wrap.fit(pij, i, j) # Visualize the results embed = model.logits.weight.cpu().data.numpy() f = plt.figure() if not draw_ellipse: plt.scatter(embed[:, 0], embed[:, 1], c=y * 1.0 / y.max()) plt.axis('off') plt.savefig('scatter_{:03d}.png'.format(itr), bbox_inches='tight') plt.close(f)
parser.add_argument('--num_consumers', type=int, default=6) parser.add_argument('--gpus', type=str, default="0") # todo allow multiple gpu args = parser.parse_args() assert osp.exists(args.data_dir), "The input dir not exist" root_folder_name = args.data_dir.split('/')[-1] src_folder = args.data_dir.replace(root_folder_name, root_folder_name + '_OPPOFaces') if not osp.exists(src_folder): logging.info('first crop face, an alternative way is run python crop_face_oppo.py --data_dir DATASET. ') from crop_face_oppo_fast import crop_face # from crop_face_oppo import crop_face crop_face(args) dst_folder = args.data_dir.replace(root_folder_name, root_folder_name + '_OPPOFeatures') lz.mkdir_p(dst_folder, delete=False) class TestData(torch.utils.data.Dataset): def __init__(self, imgfn_iter): self.imgfn_iter = imgfn_iter try: self.imgfns = lz.msgpack_load(src_folder + '/all_imgs.pk') except: logging.info( "After crop_face_oppo.py runned, *_OPPOFaces/all_imgs.pk will be generetd, which logs img list. But, all_imgs.pk cannot be loaded, we are regenerating all img list now ...") self.imgfns = list(self.imgfn_iter) self.length = len(self.imgfns) # self.imgfn_iter is not thread safe # self.lock = torch.multiprocessing.Lock() # self.length = int(10 * 10 ** 6) # assume ttl test img less than 10M