コード例 #1
0
def train():

    net = RetinaFace(cfg=cfg)
    logger.info("Printing net...")
    logger.info(net)

    if args.resume_net is not None:
        logger.info('Loading resume network...')
        state_dict = torch.load(args.resume_net)
        # create new OrderedDict that does not contain `module.`
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            head = k[:7]
            if head == 'module.':
                name = k[7:] # remove `module.`
            else:
                name = k
            new_state_dict[name] = v
        net.load_state_dict(new_state_dict)

    if num_gpu > 1 and gpu_train:
        net = torch.nn.DataParallel(net).cuda()
    else:
        net = net.cuda()

    cudnn.benchmark = True

    priorbox = PriorBox(cfg, image_size=(img_dim, img_dim))
    with torch.no_grad():
        priors = priorbox.forward()
        priors = priors.cuda()

    net.train()
    epoch = 0 + args.resume_epoch
    logger.info('Loading Dataset...')

    trainset = WiderFaceDetection(training_dataset, preproc=train_preproc(img_dim, rgb_mean), mode='train')
    validset = WiderFaceDetection(training_dataset, preproc=valid_preproc(img_dim, rgb_mean), mode='valid')
    # trainset = WiderFaceDetection(training_dataset, transformers=train_transformers(img_dim), mode='train')
    # validset = WiderFaceDetection(training_dataset, transformers=valid_transformers(img_dim), mode='valid')
    trainloader = data.DataLoader(trainset, batch_size, shuffle=True, num_workers=num_workers, collate_fn=detection_collate)
    validloader = data.DataLoader(validset, batch_size, shuffle=True, num_workers=num_workers, collate_fn=detection_collate)
    logger.info(f'Totally {len(trainset)} training samples and {len(validset)} validating samples.')

    epoch_size = math.ceil(len(trainset) / batch_size)
    max_iter = max_epoch * epoch_size
    logger.info(f'max_epoch: {max_epoch:d} epoch_size: {epoch_size:d}, max_iter: {max_iter:d}')

    # optimizer = optim.SGD(net.parameters(), lr=initial_lr, momentum=momentum, weight_decay=weight_decay)
    optimizer = optim.Adam(net.parameters(), lr=initial_lr, weight_decay=weight_decay)
    scheduler = _utils.get_linear_schedule_with_warmup(optimizer, int(0.1 * max_iter), max_iter)
    criterion = MultiBoxLoss(num_classes, 0.35, True, 0, True, 7, 0.35, False)

    stepvalues = (cfg['decay1'] * epoch_size, cfg['decay2'] * epoch_size)
    step_index = 0

    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0

    best_loss_val = float('inf')
    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            # create batch iterator
            # batch_iterator = iter(tqdm(trainloader, total=len(trainloader)))
            batch_iterator = iter(trainloader)
            # if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 == 0 and epoch > cfg['decay1']):
            #     torch.save(net.state_dict(), save_folder + cfg['name']+ '_epoch_' + str(epoch) + '.pth')
            epoch += 1
            torch.cuda.empty_cache()

        if (valid_steps > 0) and (iteration > 0) and (iteration % valid_steps == 0):
            net.eval()
            # validation
            loss_l_val = 0.
            loss_c_val = 0.
            loss_landm_val = 0.
            loss_val = 0.
            # for val_no, (images, targets) in tqdm(enumerate(validloader), total=len(validloader)):
            for val_no, (images, targets) in enumerate(validloader):
                # load data
                images = images.cuda()
                targets = [anno.cuda() for anno in targets]
                # forward
                with torch.no_grad():
                    out = net(images)
                    loss_l, loss_c, loss_landm = criterion(out, priors, targets)
                    loss = cfg['loc_weight'] * loss_l + loss_c + loss_landm
                loss_l_val += loss_l.item()
                loss_c_val += loss_c.item()
                loss_landm_val += loss_landm.item()
                loss_val += loss.item()
            loss_l_val /= len(validloader)
            loss_c_val /= len(validloader)
            loss_landm_val /= len(validloader)
            loss_val /= len(validloader)
            logger.info('[Validating] Epoch:{}/{} || Epochiter: {}/{} || Iter: {}/{} || Total: {:.4f} Loc: {:.4f} Cla: {:.4f} Landm: {:.4f}'
                .format(epoch, max_epoch, (iteration % epoch_size) + 1,
                epoch_size, iteration + 1, max_iter, 
                loss_val, loss_l_val, loss_c_val, loss_landm_val))
            if loss_val < best_loss_val:
                best_loss_val = loss_val
                pth = os.path.join(save_folder, cfg['name'] + '_iter_' + str(iteration) + f'_{loss_val:.4f}_' + '.pth')
                torch.save(net.state_dict(), pth)
                logger.info(f'Best validating loss: {best_loss_val:.4f}, model saved as {pth:s})')
            net.train()

        load_t0 = time.time()
        # if iteration in stepvalues:
        #     step_index += 1
        # lr = adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size)

        # load train data
        images, targets = next(batch_iterator)
        images = images.cuda()
        targets = [anno.cuda() for anno in targets]

        # forward
        out = net(images)

        # backprop
        optimizer.zero_grad()
        loss_l, loss_c, loss_landm = criterion(out, priors, targets)
        loss = cfg['loc_weight'] * loss_l + loss_c + loss_landm
        loss.backward()
        optimizer.step()
        scheduler.step()
        load_t1 = time.time()
        batch_time = load_t1 - load_t0
        eta = int(batch_time * (max_iter - iteration))
        if iteration % verbose_steps == 0:
            logger.info('[Training] Epoch:{}/{} || Epochiter: {}/{} || Iter: {}/{} || Total: {:.4f} Loc: {:.4f} Cla: {:.4f} Landm: {:.4f} || LR: {:.8f} || Batchtime: {:.4f} s || ETA: {}'
                .format(epoch, max_epoch, (iteration % epoch_size) + 1,
                epoch_size, iteration + 1, max_iter, 
                loss.item(), loss_l.item(), loss_c.item(), loss_landm.item(), 
                scheduler.get_last_lr()[-1], batch_time, str(datetime.timedelta(seconds=eta))))
コード例 #2
0
        pretrained_dict = remove_prefix(pretrained_dict, 'module.')
    check_keys(model, pretrained_dict)
    model.load_state_dict(pretrained_dict, strict=False)
    return model


if __name__ == '__main__':
    torch.set_grad_enabled(False)  #test的标志

    cfg = None
    if args.network == "mobile0.25":
        cfg = cfg_mnet
    elif args.network == "resnet50":
        cfg = cfg_re50
    # net and model
    net = RetinaFace(cfg=cfg, phase='test')
    net = load_model(net, args.trained_model, args.cpu)
    net.eval()  #test ,训练时 写作net.train()
    print('Finished loading model!')
    print(net)
    cudnn.benchmark = True
    device = torch.device("cpu" if args.cpu else "cuda")
    net = net.to(device)

    # testing dataset
    testset_folder = args.dataset_folder
    testset_list = args.dataset_folder[:-8] + "test_val.txt"

    with open(testset_list, 'r') as fr:
        test_dataset = fr.read().split('\n')
    num_images = len(test_dataset)
コード例 #3
0
num_gpu = cfg['ngpu']
batch_size = cfg['batch_size']
max_epoch = cfg['epoch']
gpu_train = cfg['gpu_train']
if args.resume_net:
    cfg['pretrain'] = False

num_workers = args.num_workers
momentum = args.momentum
weight_decay = args.weight_decay
initial_lr = args.lr
gamma = args.gamma
training_dataset = args.training_dataset
save_folder = args.save_folder

net = RetinaFace(cfg=cfg)
print("Printing net...")
print(net)

if args.resume_net is not None:
    print('Loading resume network...')
    state_dict = torch.load(args.resume_net)
    # create new OrderedDict that does not contain `module.`
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:]  # remove `module.`
        else:
            name = k
コード例 #4
0
    else:
        device = torch.cuda.current_device()
        pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
    if "state_dict" in pretrained_dict.keys():
        pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
    else:
        pretrained_dict = remove_prefix(pretrained_dict, 'module.')
    check_keys(model, pretrained_dict)
    model.load_state_dict(pretrained_dict, strict=False)
    return model


if __name__ == '__main__':
    torch.set_grad_enabled(False)
    # net and model
    net = RetinaFace(phase="test")
    net = load_model(net, args.trained_model, args.cpu)
    net.eval()
    print('Finished loading model!')
    print(net)
    cudnn.benchmark = True
    device = torch.device("cpu" if args.cpu else "cuda")
    net = net.to(device)


    # save file
    if not os.path.exists(args.save_folder):
        os.makedirs(args.save_folder)
    fw = open(os.path.join(args.save_folder, args.dataset + '_dets.txt'), 'w')

    # testing dataset
コード例 #5
0
ファイル: train.py プロジェクト: zyg11/Pytorch_Retinaface
rgb_mean = (104, 117, 123)  # bgr order
num_classes = 2
img_dim = args.img_dim
num_gpu = args.ngpu
num_workers = args.num_workers
batch_size = args.batch_size
momentum = args.momentum
weight_decay = args.weight_decay
initial_lr = args.lr
gamma = args.gamma
max_epoch = args.max_epoch
training_dataset = args.training_dataset
save_folder = args.save_folder
gpu_train = cfg['gpu_train']

net = RetinaFace()
print("Printing net...")
print(net)

if args.resume_net is not None:
    print('Loading resume network...')
    state_dict = torch.load(args.resume_net)
    # create new OrderedDict that does not contain `module.`
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:]  # remove `module.`
        else:
            name = k
コード例 #6
0
num_classes = 2
img_dim = cfg['image_size']
num_gpu = cfg['ngpu']
batch_size = cfg['batch_size']
max_epoch = cfg['epoch']
gpu_train = cfg['gpu_train']

num_workers = args.num_workers
momentum = args.momentum
weight_decay = args.weight_decay
initial_lr = args.lr
gamma = args.gamma
training_dataset = args.training_dataset
save_folder = args.save_folder

net = RetinaFace(cfg=cfg)
teacher_net = RetinaFace(cfg=teacher_cfg)
print("Printing net...")
print(net)

if args.resume_net is not None:
    print('Loading resume network...')
    state_dict = torch.load(args.resume_net)
    # create new OrderedDict that does not contain `module.`
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:]  # remove `module.`
        else:
コード例 #7
0
def detect(img_path):

    torch.set_grad_enabled(False)
    cfg = None
    if args.network == "mobile0.25":
        cfg = cfg_mnet
    elif args.network == "resnet50":
        cfg = cfg_re50
    # net and model
    net = RetinaFace(cfg=cfg, phase='test')
    #net = FaceBoxes(phase='test', size=None, num_classes=2)
    net = load_model(net, args.trained_model, args.cpu)
    net.eval()

    #print('Finished loading model!')

    #print(net)
    cudnn.benchmark = True
    device = torch.device("cpu" if args.cpu else "cuda")
    net = net.to(device)

    _t = {'forward_pass': Timer(), 'misc': Timer()}
    resize = 1
    # testing begin
    # for i, img_name in enumerate(test_dataset):
    #     image_path = testset_folder + img_name + '.jpg'
    #     img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)

    if type(img_path) is not np.ndarray:
        img = Image.open(img_path)
        if img.mode == 'L':
            img = img.convert('RGB')
        img_raw = np.array(img)
    else:
        img_raw = img_path

    #img_raw = img_path
    img = np.float32(img_raw)
    if resize != 1:
        img = cv2.resize(img,
                         None,
                         None,
                         fx=resize,
                         fy=resize,
                         interpolation=cv2.INTER_LINEAR)
    im_height, im_width, _ = img.shape
    scale = torch.Tensor(
        [img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
    img -= (104, 117, 123)
    img = img.transpose(2, 0, 1)
    img = torch.from_numpy(img).unsqueeze(0)
    img = img.to(device)
    scale = scale.to(device)

    _t['forward_pass'].tic()
    loc, conf, landms = net(img)  # forward pass
    _t['forward_pass'].toc()
    _t['misc'].tic()
    priorbox = PriorBox(cfg, image_size=(im_height, im_width))
    #priorbox = PriorBox1(cfg, image_size=(im_height, im_width))
    priors = priorbox.forward()
    priors = priors.to(device)
    prior_data = priors.data
    boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])
    boxes = boxes * scale / resize
    boxes = boxes.cpu().numpy()
    scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
    landms = decode_landm(landms.data.squeeze(0), prior_data, cfg['variance'])
    scale1 = torch.Tensor([
        img.shape[3], img.shape[2], img.shape[3], img.shape[2], img.shape[3],
        img.shape[2], img.shape[3], img.shape[2], img.shape[3], img.shape[2]
    ])
    scale1 = scale1.to(device)
    landms = landms * scale1 / resize
    landms = landms.cpu().numpy()

    # ignore low scores
    inds = np.where(scores > args.confidence_threshold)[0]
    boxes = boxes[inds]
    landms = landms[inds]
    scores = scores[inds]

    # keep top-K before NMS
    # order = scores.argsort()[::-1][:args.top_k]
    order = scores.argsort()[::-1]
    boxes = boxes[order]
    landms = landms[order]
    scores = scores[order]

    # do NMS
    dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32,
                                                            copy=False)
    keep = py_cpu_nms(dets, args.nms_threshold)

    dets = dets[keep, :]
    landms = landms[keep]

    # keep top-K faster NMS
    # dets = dets[:args.keep_top_k, :]
    # landms = landms[:args.keep_top_k, :]

    dets = np.concatenate((dets, landms), axis=1)
    _t['misc'].toc()

    # save dets
    # if args.dataset == "FDDB":
    #     fw.write('{:s}\n'.format(img_name))
    #     fw.write('{:.1f}\n'.format(dets.shape[0]))
    #     for k in range(dets.shape[0]):
    #         xmin = dets[k, 0]
    #         ymin = dets[k, 1]
    #         xmax = dets[k, 2]
    #         ymax = dets[k, 3]
    #         score = dets[k, 4]
    #         w = xmax - xmin + 1
    #         h = ymax - ymin + 1
    #         # fw.write('{:.3f} {:.3f} {:.3f} {:.3f} {:.10f}\n'.format(xmin, ymin, w, h, score))
    #         fw.write('{:d} {:d} {:d} {:d} {:.10f}\n'.format(int(xmin), int(ymin), int(w), int(h), score))
    print('forward_pass_time: {:.4f}s misc: {:.4f}s'.format(
        _t['forward_pass'].average_time, _t['misc'].average_time))

    # if type(img_path) is not np.ndarray:
    #     img_raw = cv2.imread(img_path, cv2.IMREAD_COLOR)
    # else:
    #     img_raw = img_path

    # # show image
    # if args.save_image:
    #     for b in dets:
    #         if b[4] < args.vis_thres:
    #             continue
    #         text = "{:.4f}".format(b[4])
    #         b = list(map(int, b))
    #         cv2.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
    #         cx = b[0]
    #         cy = b[1] + 12
    #         cv2.putText(img_raw, text, (cx, cy),
    #                     cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))

    #         # landms
    #         cv2.circle(img_raw, (b[5], b[6]), 1, (0, 0, 255), 4)
    #         cv2.circle(img_raw, (b[7], b[8]), 1, (0, 255, 255), 4)
    #         cv2.circle(img_raw, (b[9], b[10]), 1, (255, 0, 255), 4)
    #         cv2.circle(img_raw, (b[11], b[12]), 1, (0, 255, 0), 4)
    #         cv2.circle(img_raw, (b[13], b[14]), 1, (255, 0, 0), 4)
    # save image
    # if not os.path.exists("./results/"):
    #     os.makedirs("./results/")
    # name = "./results/" + str(i) + ".jpg"
    # cv2.imwrite(name, img_raw)
    return dets, img_path