コード例 #1
0
ファイル: detect.py プロジェクト: smartape/DFace
def create_mtcnn_net(p_model_path=None,
                     r_model_path=None,
                     o_model_path=None,
                     use_cuda=True):

    pnet, rnet, onet = None, None, None

    if p_model_path is not None:
        pnet = PNet(use_cuda=use_cuda)
        pnet.load_state_dict(torch.load(p_model_path))
        if (use_cuda):
            pnet.cuda()
        pnet.eval()

    if r_model_path is not None:
        rnet = RNet(use_cuda=use_cuda)
        rnet.load_state_dict(torch.load(r_model_path))
        if (use_cuda):
            rnet.cuda()
        rnet.eval()

    if o_model_path is not None:
        onet = ONet(use_cuda=use_cuda)
        onet.load_state_dict(torch.load(o_model_path))
        if (use_cuda):
            onet.cuda()
        onet.eval()

    return pnet, rnet, onet
コード例 #2
0
def create_mtcnn_net(p_model_path=None, r_model_path=None, o_model_path=None, use_cuda=True):

    pnet, rnet, onet = None, None, None

    if p_model_path is not None:
        pnet = PNet(use_cuda=use_cuda)
        if(use_cuda):
            pnet.load_state_dict(torch.load(p_model_path))
            pnet.cuda()
        else:
            # forcing all GPU tensors to be in CPU while loading
            pnet.load_state_dict(torch.load(p_model_path, map_location=lambda storage, loc: storage))
        pnet.eval()

    if r_model_path is not None:
        rnet = RNet(use_cuda=use_cuda)
        if (use_cuda):
            rnet.load_state_dict(torch.load(r_model_path))
            rnet.cuda()
        else:
            rnet.load_state_dict(torch.load(r_model_path, map_location=lambda storage, loc: storage))
        rnet.eval()

    if o_model_path is not None:
        onet = ONet(use_cuda=use_cuda)
        if (use_cuda):
            onet.load_state_dict(torch.load(o_model_path))
            onet.cuda()
        else:
            onet.load_state_dict(torch.load(o_model_path, map_location=lambda storage, loc: storage))
        onet.eval()

    return pnet,rnet,onet
コード例 #3
0
ファイル: detect.py プロジェクト: Mandy-77/MTCNN_Tucker2
def create_mtcnn_net(p_model_path=None,
                     r_model_path=None,
                     o_model_path=None,
                     use_cuda=True,
                     use_tucker2=False):

    pnet, rnet, onet = None, None, None

    if p_model_path is not None:
        pnet = PNet(use_cuda=use_cuda)
        if (use_cuda):
            pnet.load_state_dict(torch.load(p_model_path))
            pnet.cuda()
        else:
            # forcing all GPU tensors to be in CPU while loading
            pnet.load_state_dict(
                torch.load(p_model_path,
                           map_location=lambda storage, loc: storage))
        pnet.eval()

    if r_model_path is not None:
        rnet = RNet(use_cuda=use_cuda)
        if (use_cuda):
            rnet.load_state_dict(torch.load(r_model_path))
            rnet.cuda()
        else:
            rnet.load_state_dict(
                torch.load(r_model_path,
                           map_location=lambda storage, loc: storage))
        rnet.eval()

    if o_model_path is not None:
        onet = ONet(use_cuda=use_cuda)
        if (use_cuda):
            ckp = torch.load(o_model_path)
            if use_tucker2:
                onet.conv2 = Tkd2Conv(onet.conv2, 21, 25)
                onet.conv3 = Tkd2Conv(onet.conv3, 36, 35)
                onet.conv4 = Tkd2Conv(onet.conv4, 33, 18)

            onet.load_state_dict(ckp)
            onet.cuda()
        else:
            if use_tucker2:
                onet.conv2 = Tkd2Conv(onet.conv2, 21, 25)
                onet.conv3 = Tkd2Conv(onet.conv3, 36, 35)
                onet.conv4 = Tkd2Conv(onet.conv4, 33, 18)
            onet.load_state_dict(
                torch.load(o_model_path,
                           map_location=lambda storage, loc: storage))

        onet.eval()

    return pnet, rnet, onet
コード例 #4
0
ファイル: detect.py プロジェクト: binbinmeng/mtcnn-pytorch
def create_mtcnn_net(p_model_path=None, r_model_path=None, o_model_path=None, use_cuda=True):

    pnet, rnet, onet = None, None, None

    if p_model_path is not None:
        pnet = PNet(use_cuda=use_cuda)       
        if(use_cuda):
            pnet = torch.nn.DataParallel(pnet,device_ids=[0]) #slove load pretrained error
            pnet.load_state_dict(torch.load(p_model_path))
            #pnet.cuda()
            pnet = pnet.cuda()
            #pnet = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
            cudnn.benchmark = True
        else:
            # forcing all GPU tensors to be in CPU while loading
            pnet.load_state_dict(torch.load(p_model_path, map_location=lambda storage, loc: storage))
        pnet.eval()

    if r_model_path is not None:
        rnet = RNet(use_cuda=use_cuda)
        if (use_cuda):
            rnet = torch.nn.DataParallel(rnet,device_ids=[0])
            rnet.load_state_dict(torch.load(r_model_path))
            #rnet.cuda()
            rnet=rnet.cuda()
            cudnn.benchmark=True
        else:
            rnet.load_state_dict(torch.load(r_model_path, map_location=lambda storage, loc: storage))
        rnet.eval()

    if o_model_path is not None:
        onet = ONet(use_cuda=use_cuda)
        if (use_cuda):
            onet = torch.nn.DataParallel(onet,device_ids=[0])
            onet.load_state_dict(torch.load(o_model_path))
            onet.cuda()
            onet=onet.cuda()
            cudnn.benchmark=True
        else:
            onet.load_state_dict(torch.load(o_model_path, map_location=lambda storage, loc: storage))
        onet.eval()
        
    return pnet,rnet,onet