Exemplo n.º 1
0
def create_mtcnn_net(p_model_path=None, r_model_path=None, o_model_path=None, use_cuda=True):

    pnet, rnet, onet = None, None, None

    if p_model_path is not None:
        pnet = PNet(use_cuda=use_cuda)
        if(use_cuda):
            pnet.load_state_dict(torch.load(p_model_path))
            pnet.cuda()
        else:
            # forcing all GPU tensors to be in CPU while loading
            pnet.load_state_dict(torch.load(p_model_path, map_location=lambda storage, loc: storage))
        pnet.eval()

    if r_model_path is not None:
        rnet = RNet(use_cuda=use_cuda)
        if (use_cuda):
            rnet.load_state_dict(torch.load(r_model_path))
            rnet.cuda()
        else:
            rnet.load_state_dict(torch.load(r_model_path, map_location=lambda storage, loc: storage))
        rnet.eval()

    if o_model_path is not None:
        onet = ONet(use_cuda=use_cuda)
        if (use_cuda):
            onet.load_state_dict(torch.load(o_model_path))
            onet.cuda()
        else:
            onet.load_state_dict(torch.load(o_model_path, map_location=lambda storage, loc: storage))
        onet.eval()

    return pnet,rnet,onet
Exemplo n.º 2
0
def create_mtcnn_net(p_model_path=None,
                     r_model_path=None,
                     o_model_path=None,
                     use_cuda=True,
                     use_tucker2=False):

    pnet, rnet, onet = None, None, None

    if p_model_path is not None:
        pnet = PNet(use_cuda=use_cuda)
        if (use_cuda):
            pnet.load_state_dict(torch.load(p_model_path))
            pnet.cuda()
        else:
            # forcing all GPU tensors to be in CPU while loading
            pnet.load_state_dict(
                torch.load(p_model_path,
                           map_location=lambda storage, loc: storage))
        pnet.eval()

    if r_model_path is not None:
        rnet = RNet(use_cuda=use_cuda)
        if (use_cuda):
            rnet.load_state_dict(torch.load(r_model_path))
            rnet.cuda()
        else:
            rnet.load_state_dict(
                torch.load(r_model_path,
                           map_location=lambda storage, loc: storage))
        rnet.eval()

    if o_model_path is not None:
        onet = ONet(use_cuda=use_cuda)
        if (use_cuda):
            ckp = torch.load(o_model_path)
            if use_tucker2:
                onet.conv2 = Tkd2Conv(onet.conv2, 21, 25)
                onet.conv3 = Tkd2Conv(onet.conv3, 36, 35)
                onet.conv4 = Tkd2Conv(onet.conv4, 33, 18)

            onet.load_state_dict(ckp)
            onet.cuda()
        else:
            if use_tucker2:
                onet.conv2 = Tkd2Conv(onet.conv2, 21, 25)
                onet.conv3 = Tkd2Conv(onet.conv3, 36, 35)
                onet.conv4 = Tkd2Conv(onet.conv4, 33, 18)
            onet.load_state_dict(
                torch.load(o_model_path,
                           map_location=lambda storage, loc: storage))

        onet.eval()

    return pnet, rnet, onet
Exemplo n.º 3
0
def create_mtcnn_net(p_model_path=None,
                     r_model_path=None,
                     o_model_path=None,
                     use_cuda=True):

    pnet, rnet, onet = None, None, None

    if p_model_path is not None:
        pnet = PNet(use_cuda=use_cuda)
        pnet.load_state_dict(torch.load(p_model_path))
        if (use_cuda):
            pnet.cuda()
        pnet.eval()

    if r_model_path is not None:
        rnet = RNet(use_cuda=use_cuda)
        rnet.load_state_dict(torch.load(r_model_path))
        if (use_cuda):
            rnet.cuda()
        rnet.eval()

    if o_model_path is not None:
        onet = ONet(use_cuda=use_cuda)
        onet.load_state_dict(torch.load(o_model_path))
        if (use_cuda):
            onet.cuda()
        onet.eval()

    return pnet, rnet, onet
Exemplo n.º 4
0
def create_mtcnn_net(p_model_path=None, r_model_path=None, o_model_path=None, use_cuda=True):

    pnet, rnet, onet = None, None, None

    if p_model_path is not None:
        pnet = PNet(use_cuda=use_cuda)       
        if(use_cuda):
            pnet = torch.nn.DataParallel(pnet,device_ids=[0]) #slove load pretrained error
            pnet.load_state_dict(torch.load(p_model_path))
            #pnet.cuda()
            pnet = pnet.cuda()
            #pnet = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
            cudnn.benchmark = True
        else:
            # forcing all GPU tensors to be in CPU while loading
            pnet.load_state_dict(torch.load(p_model_path, map_location=lambda storage, loc: storage))
        pnet.eval()

    if r_model_path is not None:
        rnet = RNet(use_cuda=use_cuda)
        if (use_cuda):
            rnet = torch.nn.DataParallel(rnet,device_ids=[0])
            rnet.load_state_dict(torch.load(r_model_path))
            #rnet.cuda()
            rnet=rnet.cuda()
            cudnn.benchmark=True
        else:
            rnet.load_state_dict(torch.load(r_model_path, map_location=lambda storage, loc: storage))
        rnet.eval()

    if o_model_path is not None:
        onet = ONet(use_cuda=use_cuda)
        if (use_cuda):
            onet = torch.nn.DataParallel(onet,device_ids=[0])
            onet.load_state_dict(torch.load(o_model_path))
            onet.cuda()
            onet=onet.cuda()
            cudnn.benchmark=True
        else:
            onet.load_state_dict(torch.load(o_model_path, map_location=lambda storage, loc: storage))
        onet.eval()
        
    return pnet,rnet,onet
Exemplo n.º 5
0
            tmpstr += ', weights={}'.format(weights)
        if show_parameters:
            tmpstr +=  ', parameters={}'.format(params)
        tmpstr += '\n'   

    tmpstr = tmpstr + ')'
    return tmpstr

class Model(nn.Module):

    def __init__(self):
        super(Model,self).__init__()

        self.conv0 = nn.Conv2d(1, 16, kernel_size=3, padding=5)
        self.conv1 = nn.Conv2d(16, 32, kernel_size=3)

    def forward(self, x):
        h = self.conv0(x)
        h = self.conv1(h)
        return h

model = RNet(is_train=False, use_cuda=True)

print(torch_summarize(model))

# Summarize Model
from pytorch_modelsummary import ModelSummary

ms = ModelSummary(model, input_size=(1, 3, 12, 12))

Exemplo n.º 6
0
def train_rnet(model_store_path,
               end_epoch,
               imdb,
               batch_size,
               frequent=50,
               base_lr=0.01,
               use_cuda=True):

    if not os.path.exists(model_store_path):
        os.makedirs(model_store_path)

    lossfn = LossFn()
    net = RNet(is_train=True, use_cuda=use_cuda)
    net.train()
    if use_cuda:
        net.cuda()

    optimizer = torch.optim.Adam(net.parameters(), lr=base_lr)

    train_data = TrainImageReader(imdb, 24, batch_size, shuffle=True)

    for cur_epoch in range(1, end_epoch + 1):
        train_data.reset()
        accuracy_list = []
        cls_loss_list = []
        bbox_loss_list = []
        landmark_loss_list = []

        for batch_idx, (image, (gt_label, gt_bbox,
                                gt_landmark)) in enumerate(train_data):

            im_tensor = [
                image_tools.convert_image_to_tensor(image[i, :, :, :])
                for i in range(image.shape[0])
            ]
            im_tensor = torch.stack(im_tensor)

            im_tensor = Variable(im_tensor)
            gt_label = Variable(torch.from_numpy(gt_label).float())

            gt_bbox = Variable(torch.from_numpy(gt_bbox).float())
            gt_landmark = Variable(torch.from_numpy(gt_landmark).float())

            if use_cuda:
                im_tensor = im_tensor.cuda()
                gt_label = gt_label.cuda()
                gt_bbox = gt_bbox.cuda()
                gt_landmark = gt_landmark.cuda()

            cls_pred, box_offset_pred = net(im_tensor)
            # all_loss, cls_loss, offset_loss = lossfn.loss(gt_label=label_y,gt_offset=bbox_y, pred_label=cls_pred, pred_offset=box_offset_pred)

            cls_loss = lossfn.cls_loss(gt_label, cls_pred)
            box_offset_loss = lossfn.box_loss(gt_label, gt_bbox,
                                              box_offset_pred)
            # landmark_loss = lossfn.landmark_loss(gt_label,gt_landmark,landmark_offset_pred)

            all_loss = cls_loss * 1.0 + box_offset_loss * 0.5

            if batch_idx % frequent == 0:
                accuracy = compute_accuracy(cls_pred, gt_label)

                show1 = accuracy.data.tolist()
                show2 = cls_loss.data.tolist()
                show3 = box_offset_loss.data.tolist()
                # show4 = landmark_loss.data.tolist()
                show5 = all_loss.data.tolist()

                print(
                    "%s : Epoch: %d, Step: %d, accuracy: %s, det loss: %s, bbox loss: %s, all_loss: %s, lr:%s "
                    % (datetime.datetime.now(), cur_epoch, batch_idx, show1,
                       show2, show3, show5, base_lr))
                accuracy_list.append(accuracy)
                cls_loss_list.append(cls_loss)
                bbox_loss_list.append(box_offset_loss)
                # landmark_loss_list.append(landmark_loss)

            optimizer.zero_grad()
            all_loss.backward()
            optimizer.step()

        accuracy_avg = torch.mean(torch.stack(accuracy_list))
        cls_loss_avg = torch.mean(torch.stack(cls_loss_list))
        bbox_loss_avg = torch.mean(torch.stack(bbox_loss_list))
        # landmark_loss_avg = torch.mean(torch.stack(landmark_loss_list))

        show6 = accuracy_avg.data.tolist()
        show7 = cls_loss_avg.data.tolist()
        show8 = bbox_loss_avg.data.tolist()
        # show9 = landmark_loss_avg.data.tolist()

        print("Epoch: %d, accuracy: %s, cls loss: %s, bbox loss: %s" %
              (cur_epoch, show6, show7, show8))
        torch.save(
            net.state_dict(),
            os.path.join(model_store_path, "rnet_epoch_%d.pt" % cur_epoch))
        torch.save(
            net,
            os.path.join(model_store_path,
                         "rnet_epoch_model_%d.pkl" % cur_epoch))