# full_name = "/home/yuliang/code/MobilePose-pytorch/models/demo/resnet18_227x227-robust.t7" # Rescale Expansion ToTensor
    # full_name = "/home/yuliang/code/MobilePose-pytorch/models/demo/resnet18_227x227.t7" # Rescale Expansion ToTensor

    ROOT_DIR = "../deeppose_tf/datasets/mpii"

    if modeltype == 'resnet':
        full_name = "/home/yuliang/code/MobilePose-pytorch/models/demo/resnet18_227x227.t7"  # Rescale Expansion ToTensor
        input_size = 227
        # test_dataset = PoseDataset(csv_file=os.path.join(ROOT_DIR,'test_joints.csv'),
        #                             transform=transforms.Compose([
        #                                         Rescale((input_size, input_size)), # resnet use
        #                                         # Wrap((input_size,input_size)), # mobilenet use
        #                                         Expansion(),
        #                                         ToTensor()
        #                                     ]))
        test_dataset = DatasetFactory.get_test_dataset(modeltype, input_size)

    elif modeltype == 'mobilenet':
        full_name = "/home/yuliang/code/MobilePose-pytorch/models/demo/mobilenetv2_224x224-robust.t7"  # Wrap Expansion ToTensor
        input_size = 224
        # test_dataset = PoseDataset(csv_file=os.path.join(ROOT_DIR,'test_joints.csv'),
        #                             transform=transforms.Compose([
        #                                         Rescale((input_size, input_size)), # resnet use
        #                                         # Wrap((input_size,input_size)), # mobilenet use
        #                                         Expansion(),
        #                                         ToTensor()
        #                                     ]))
        test_dataset = DatasetFactory.get_test_dataset(modeltype, input_size)

    print("Loading testing dataset, wait...")
示例#2
0
    parser.add_argument('--t7', type=str, required=True, default="")
    parser.add_argument('--gpu', type=str, required=True, default="")
    args = parser.parse_args()

    modelpath = args.t7

    device = torch.device("cuda" if len(args.gpu) > 0 else "cpu")

    # user defined parameters
    num_threads = multiprocessing.cpu_count()
    PATH_PREFIX = "./results/{}".format(modelpath.split(".")[0])

    input_size = 224
    modelname = args.model

    test_dataset = DatasetFactory.get_test_dataset("resnet", input_size)

    print("Loading testing dataset, wait...")
    bs_test = len(test_dataset)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=bs_test,
                                 shuffle=False,
                                 num_workers=num_threads)

    # get all test data
    all_test_data = {}
    for i_batch, sample_batched in enumerate(tqdm(test_dataloader)):
        all_test_data = sample_batched
        eval_coco(all_test_data, modelname, modelpath,
                  os.path.join(PATH_PREFIX, 'result-gt-json.txt'),
                  os.path.join(PATH_PREFIX, 'result-pred-json.txt'))
        # net = torch.load('./models/%s/%s'%(modeltype,modelname)).cuda()
        net = torch.load('./models/%s/%s' % (modeltype, modelname)).cuda()
    # alog.info(net)
    net = net.train()

    ROOT_DIR = "../deeppose_tf/datasets/mpii"  # root dir to the dataset
    PATH_PREFIX = './models/{}/'.format(modeltype)  # path to save the model

    tmp_modeltype = "resnet"
    train_dataset = DatasetFactory.get_train_dataset(tmp_modeltype, inputsize)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=batchsize,
                                  shuffle=False,
                                  num_workers=num_threads)

    test_dataset = DatasetFactory.get_test_dataset(tmp_modeltype, inputsize)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=batchsize,
                                 shuffle=False,
                                 num_workers=num_threads)

    criterion = nn.MSELoss().cuda()
    # optimizer = optim.Adam(net.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-08)
    # optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)
    optimizer = optim.RMSprop(net.parameters(), lr=learning_rate, momentum=0.9)

    def mse_loss(input, target):
        return torch.sum(torch.pow(input - target, 2)) / input.nelement()

    train_loss_all = []
    valid_loss_all = []