Ejemplo n.º 1
0
def main():

    # Model Initialize
    m = createModel().cuda()
    if opt.loadModel:
        print('Loading Model from {}'.format(opt.loadModel))
        m.load_state_dict(torch.load(opt.loadModel))
        if not os.path.exists("../exp/{}/{}".format(opt.dataset, opt.expID)):
            try:
                os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID))
            except FileNotFoundError:
                os.mkdir("../exp/{}".format(opt.dataset))
                os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID))
    else:
        print('Create new model')
        if not os.path.exists("../exp/{}/{}".format(opt.dataset, opt.expID)):
            try:
                os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID))
            except FileNotFoundError:
                os.mkdir("../exp/{}".format(opt.dataset))
                os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID))

    criterion = torch.nn.MSELoss().cuda()

    if opt.optMethod == 'rmsprop':
        optimizer = torch.optim.RMSprop(m.parameters(),
                                        lr=opt.LR,
                                        momentum=opt.momentum,
                                        weight_decay=opt.weightDecay)
    elif opt.optMethod == 'adam':
        optimizer = torch.optim.Adam(m.parameters(), lr=opt.LR)
    else:
        raise Exception

    writer = SummaryWriter('.tensorboard/{}/{}'.format(opt.dataset, opt.expID))

    # Prepare Dataset
    if opt.dataset == 'coco':
        train_dataset = coco.Mscoco(train=True)
        val_dataset = coco.Mscoco(train=False)
    train_dataset = coco.Mscoco(train=True)
    val_dataset = coco.Mscoco(train=False)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt.trainBatch,
                                               shuffle=True,
                                               num_workers=opt.nThreads,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=opt.validBatch,
                                             shuffle=False,
                                             num_workers=opt.nThreads,
                                             pin_memory=True)

    # Model Transfer
    m = torch.nn.DataParallel(m).cuda()

    # Start Training
    for i in range(opt.nEpochs):
        opt.epoch = i

        print('############# Starting Epoch {} #############'.format(
            opt.epoch))
        loss, acc = train(train_loader, m, criterion, optimizer, writer)

        print('Train-{idx:d} epoch | loss:{loss:.8f} | acc:{acc:.4f}'.format(
            idx=opt.epoch, loss=loss, acc=acc))

        opt.acc = acc
        opt.loss = loss
        m_dev = m.module
        if i % opt.snapshot == 0:
            torch.save(
                m_dev.state_dict(),
                '../exp/{}/{}/model_{}.pkl'.format(opt.dataset, opt.expID,
                                                   opt.epoch))
            torch.save(
                opt, '../exp/{}/{}/option.pkl'.format(opt.dataset, opt.expID,
                                                      opt.epoch))
            torch.save(
                optimizer,
                '../exp/{}/{}/optimizer.pkl'.format(opt.dataset, opt.expID))

        loss, acc = valid(val_loader, m, criterion, optimizer, writer)

        print('Valid-{idx:d} epoch | loss:{loss:.8f} | acc:{acc:.4f}'.format(
            idx=i, loss=loss, acc=acc))
        '''
        if opt.dataset != 'mpii':
            with torch.no_grad():
                mAP, mAP5 = prediction(m)

            print('Prediction-{idx:d} epoch | mAP:{mAP:.3f} | mAP0.5:{mAP5:.3f}'.format(
                idx=i,
                mAP=mAP,
                mAP5=mAP5
            ))
        '''
    writer.close()
Ejemplo n.º 2
0
def main():
    print(opt)
    # Model Initialize

    m = createModel().cuda()
    if opt.loadModel:
        print('Loading Model from {}'.format(opt.loadModel))
        '''
        ckp = torch.load(opt.loadModel)        
        for name,param in m.state_dict().items():
            if name in ckp:
                ckp_param = ckp[name]
                if ckp_param.shape == param.shape:
                    param.copy_(ckp_param)
                    print(name, 'copy successfully')
                else:
                    print(name, 'shape is inconsistent with checkpoint')
            else:
                print(name, 'can not be found in checkpoint')
        '''

        m.load_state_dict(torch.load(opt.loadModel))

        if not os.path.exists("../exp/{}/{}".format(opt.dataset, opt.expID)):
            try:
                os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID))
            except FileNotFoundError:
                os.mkdir("../exp/{}".format(opt.dataset))
                os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID))
    else:
        print('Create new model')
        #  import pdb;pdb.set_trace()
        if not os.path.exists("../exp/{}/{}".format(opt.dataset, opt.expID)):
            try:
                os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID))
            except FileNotFoundError:
                os.mkdir("../exp/{}".format(opt.dataset))
                os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID))

    criterion = torch.nn.MSELoss().cuda()

    if opt.optMethod == 'rmsprop':
        optimizer = torch.optim.RMSprop(m.parameters(),
                                        lr=opt.LR,
                                        momentum=opt.momentum,
                                        weight_decay=opt.weightDecay)
    elif opt.optMethod == 'adam':
        optimizer = torch.optim.Adam(m.parameters(), lr=opt.LR)
    elif opt.optMethod == 'rmsprop_refine':
        print('opt rmsprop refine')
        optimizer = torch.optim.RMSprop([{
            "params": m.preact.parameters(),
            "lr": 1e-5
        }, {
            "params": m.suffle1.parameters(),
            "lr": 1e-5
        }, {
            "params": m.duc1.parameters(),
            "lr": 1e-5
        }, {
            "params": m.duc2.parameters(),
            "lr": 1e-5
        }, {
            "params": m.conv_out.parameters(),
            "lr": 1e-4
        }],
                                        lr=opt.LR,
                                        momentum=opt.momentum,
                                        weight_decay=opt.weightDecay)
    else:
        raise Exception
    if opt.loadOptimizer:
        optimizer = torch.load(opt.loadOptimizer)

    writer = SummaryWriter('.tensorboard/{}/{}'.format(opt.dataset, opt.expID))

    # Prepare Dataset
    if opt.dataset == 'coco':
        train_dataset = coco.Mscoco(train=True)
        val_dataset = coco.Mscoco(train=False)
    elif opt.dataset == 'h36m':
        train_dataset = h36m.H36M(train=True)
        val_dataset = h36m.H36M(train=False)
    '''
    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=opt.trainBatch, shuffle=False, sampler=RandomSampler(train_dataset, replacement=True, num_samples=len(train_dataset)//10), num_workers=opt.nThreads, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        val_dataset, batch_size=opt.validBatch, shuffle=False, sampler=RandomSampler(val_dataset, replacement=True, num_samples=len(val_dataset)//10), num_workers=opt.nThreads, pin_memory=True)
    '''
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt.trainBatch,
                                               shuffle=True,
                                               num_workers=opt.nThreads,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=opt.validBatch,
        shuffle=False,
        sampler=RandomSampler(val_dataset,
                              replacement=True,
                              num_samples=len(val_dataset) // 8),
        num_workers=opt.nThreads,
        pin_memory=True)
    #show_loader_image('train_check_images', train_loader, joint_names=train_dataset.joint_names)
    #show_loader_image('valid_check_images', val_loader, joint_names=val_dataset.joint_names)
    #return

    # Model Transfer
    m = torch.nn.DataParallel(m).cuda()

    # Start Training
    for i in range(opt.nEpochs):
        opt.epoch = i

        print('############# Starting Epoch {} #############'.format(
            opt.epoch))
        loss, acc = train(train_loader, m, criterion, optimizer, writer)

        print('Train-{idx:d} epoch | loss:{loss:.8f} | acc:{acc:.4f}'.format(
            idx=opt.epoch, loss=loss, acc=acc))

        opt.acc = acc
        opt.loss = loss
        m_dev = m.module
        if i % opt.snapshot == 0:
            torch.save(
                m_dev.state_dict(),
                '../exp/{}/{}/model_{}.pkl'.format(opt.dataset, opt.expID,
                                                   opt.epoch))
            torch.save(
                opt, '../exp/{}/{}/option.pkl'.format(opt.dataset, opt.expID,
                                                      opt.epoch))
            torch.save(
                optimizer,
                '../exp/{}/{}/optimizer.pkl'.format(opt.dataset, opt.expID))

        loss, acc = valid(val_loader, m, criterion, optimizer, writer)

        print('Valid-{idx:d} epoch | loss:{loss:.8f} | acc:{acc:.4f}'.format(
            idx=i, loss=loss, acc=acc))
        '''
        if opt.dataset != 'mpii':
            with torch.no_grad():
                mAP, mAP5 = prediction(m)

            print('Prediction-{idx:d} epoch | mAP:{mAP:.3f} | mAP0.5:{mAP5:.3f}'.format(
                idx=i,
                mAP=mAP,
                mAP5=mAP5
            ))
        '''
    writer.close()
Ejemplo n.º 3
0
            add_candidate_joints(result,
                                 kp_preds.cpu().numpy(), pt1.numpy(),
                                 pt2.numpy(), opt.inputResH, opt.inputResW,
                                 opt.outputResH, opt.outputResW)

            result = {'imgname': im_name, 'result': result}
        #img = display_frame(orig_img, result, opt.outputpath)
        #ori_inp = np.transpose(
        #    ori_inp[0][:3].clone().numpy(), (1, 2, 0)) * 255
        #img = vis_frame(ori_inp, result)
        #cv2.imwrite(os.path.join(
        #    './val', 'vis', im_name), img)
        final_result.append(result)
    # np.savez('../../examples/coco_val/final_result.npz', result=result)
    select_best_candidate(
        '../../examples/coco_val/person_keypoints_val2017.json', final_result)
    write_json(final_result, '../../examples/coco_val', for_eval=True)
    return getmap()


if __name__ == '__main__':

    m = createModel()
    assert os.path.exists(opt.loadModel), 'model file {} not exsit'.format(
        opt.loadModel)

    print('Loading Model from {}'.format(opt.loadModel))
    m.load_state_dict(torch.load(opt.loadModel))
    prediction(m, opt.inputpath, opt.boxh5, opt.inputlist)
Ejemplo n.º 4
0
def main():

    # Model Initialize
    m = createModel().cuda()
    if opt.loadModel:
        print('Loading Model from {}'.format(opt.loadModel))
        current_model_weight = m.state_dict()
        weight_save = torch.load(opt.loadModel)
        weight_save_changed = {}
        for k in weight_save:
            if 'conv_out.weight' in k or 'conv_out.bias' in k:
                print(k, 'not used')
                continue
            weight_save_changed[k] = weight_save[k]
        current_model_weight.update(weight_save_changed)
        m.load_state_dict(current_model_weight)
        if not os.path.exists("../exp/{}/{}".format(opt.dataset, opt.expID)):
            try:
                os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID))
            except FileNotFoundError:
                os.mkdir("../exp")
                os.mkdir("../exp/{}".format(opt.dataset))
                os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID))
    else:
        print('Create new model')
        if not os.path.exists("../exp/{}/{}".format(opt.dataset, opt.expID)):
            try:
                os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID))
            except FileNotFoundError:
                os.mkdir("../exp")
                os.mkdir("../exp/{}".format(opt.dataset))
                os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID))

    criterion = torch.nn.MSELoss().cuda()

    if opt.optMethod == 'rmsprop':
        optimizer = torch.optim.RMSprop(m.parameters(), lr=opt.LR)
    elif opt.optMethod == 'adam':
        optimizer = torch.optim.Adam(m.parameters(), lr=opt.LR)
    else:
        raise Exception

    writer = SummaryWriter('.tensorboard/{}/{}'.format(opt.dataset, opt.expID))

    # Prepare Dataset
    if opt.dataset == 'coco':
        train_dataset = coco.Mscoco(train=True,
                                    img_folder=opt.img_folder_train,
                                    annot_file=opt.annot_file_train,
                                    nJoints=opt.nClasses)
        val_dataset = coco.Mscoco(train=False,
                                  img_folder=opt.img_folder_val,
                                  annot_file=opt.annot_file_val,
                                  nJoints=opt.nClasses)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt.trainBatch,
                                               shuffle=True,
                                               num_workers=opt.nThreads,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=opt.validBatch,
                                             shuffle=False,
                                             num_workers=opt.nThreads,
                                             pin_memory=True)

    # Model Transfer
    m = DataParallel(m).cuda()

    # Start Training
    for i in range(opt.nEpochs + 1):
        opt.epoch = i

        print('############# Starting Epoch {} #############'.format(
            opt.epoch))
        loss, acc = train(train_loader, m, criterion, optimizer, writer)

        print('Train-{idx:d} epoch | loss:{loss:.8f} | acc:{acc:.4f}'.format(
            idx=opt.epoch, loss=loss, acc=acc))

        opt.acc = acc
        opt.loss = loss
        m_dev = m.module
        if (i % opt.snapshot == 0 and i != 0) or i == opt.nEpochs:
            torch.save(
                m_dev.state_dict(),
                '../exp/{}/{}/model_{}.pkl'.format(opt.dataset, opt.expID,
                                                   opt.epoch))
            torch.save(
                opt, '../exp/{}/{}/option.pkl'.format(opt.dataset, opt.expID,
                                                      opt.epoch))
            # torch.save(
            #     optimizer, '../exp/{}/{}/optimizer.pkl'.format(opt.dataset, opt.expID))

        loss, acc = valid(val_loader, m, criterion, optimizer, writer)

        print('Valid-{idx:d} epoch | loss:{loss:.8f} | acc:{acc:.4f}'.format(
            idx=i, loss=loss, acc=acc))

        # # Prediction Test
        # if opt.map and acc > 0.8:
        #     with torch.no_grad():
        #         mAP, mAP5 = prediction_silence(m)
        #     logger.info('Prediction-{idx:d} epoch | mAP:{mAP:.3f} | mAP0.5:{mAP5:.3f}'.format(
        #         idx=i,
        #         mAP=mAP,
        #         mAP5=mAP5
        #     ))
        '''
        if opt.dataset != 'mpii':
            with torch.no_grad():
                mAP, mAP5 = prediction(m)

            print('Prediction-{idx:d} epoch | mAP:{mAP:.3f} | mAP0.5:{mAP5:.3f}'.format(
                idx=i,
                mAP=mAP,
                mAP5=mAP5
            ))
        '''
    writer.close()