Exemplo n.º 1
0
def main(args):
    transform = getTransforms()

    data_path = args.input_data
    if not os.path.exists(data_path):
        print('ERROR: No dataset named {}'.format(data_path))
        exit(1)

    dataset = EvalDataset(data_path, transform=transform)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=1)

    with open(args.class_list, 'r') as class_file:
        class_names = []
        for class_name in class_file.readlines():
            if len(class_name.strip()) > 0:
                class_names.append(class_name.strip())

    model = ResNet(num_layers=18, num_classes=len(class_names)).to(DEVICE)
    model = model.eval()

    output_dir = os.path.join(data_path, 'out')
    os.makedirs(output_dir, exist_ok=True)

    model_file = args.model_file

    if os.path.exists(model_file):
        checkpoint = torch.load(model_file)
        if 'state_dict' in checkpoint.keys():
            model.load_state_dict(checkpoint['state_dict'], strict=False)
        else:
            model.load_state_dict(checkpoint, strict=False)
        print('=> loaded {}'.format(model_file))

    else:
        print('model_file "{}" does not exists.'.format(model_file))
        exit(1)

    font = cv2.FONT_HERSHEY_SIMPLEX

    with torch.no_grad():
        for data, path in dataloader:
            outputs = model(data.to(DEVICE))
            _, predicted = torch.max(outputs.data, 1)
            predicted = predicted.to('cpu')[0].item()
            class_text = class_names[predicted]
            print(class_text, path)

            image = cv2.imread(path[0], cv2.IMREAD_COLOR)
            image = cv2.rectangle(image, (0, 0), (150, 25), (255, 255, 255),
                                  -1)
            image = cv2.rectangle(image, (0, 0), (150, 25), (255, 0, 0), 2)
            cv2.putText(image, class_text, (5, 15), font, 0.5, (
                255,
                0,
            ), 1, cv2.LINE_AA)
            cv2.imwrite(os.path.join(output_dir, os.path.basename(path[0])),
                        image)
Exemplo n.º 2
0
def main(args):
    transform = getTransforms()

    data_path = os.path.join('data', args.data)
    if not os.path.exists(data_path):
        print('ERROR: No dataset named {}'.format(args.data))
        exit(1)

    testset = BaseDataset(list_path=os.path.join(data_path, 'val.lst'),
                          transform=transform)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=1,
                                             shuffle=True,
                                             num_workers=1)

    class_list = getClassList(data_path)

    model = ResNet(num_layers=18, num_classes=len(class_list)).to(DEVICE)
    model.eval()

    output_dir = os.path.join('outputs', args.data)
    model_state_file = os.path.join(output_dir, 'checkpoint.pth.tar')

    model_file = args.model_file
    if len(model_file) == 0:
        model_file = model_state_file

    if os.path.exists(model_file):
        checkpoint = torch.load(model_file)
        if 'state_dict' in checkpoint.keys():
            model.load_state_dict(checkpoint['state_dict'], strict=False)
        else:
            model.load_state_dict(checkpoint, strict=False)
        print('=> loaded {}'.format(model_file))

    else:
        print('model_file "{}" does not exists.'.format(model_file))
        exit(1)

    accuracy = test(model=model,
                    dataloader=testloader,
                    device=DEVICE,
                    classes=class_list)

    print('Accuracy: {:.2f}%'.format(100 * accuracy))
Exemplo n.º 3
0
def main():
    # for repeatable experiments
    torch.backends.cudnn.enabled = False
    cudnn.benchmark = False
    cudnn.deterministic = True
    np.random.seed(0)
    torch.manual_seed(0)
    torch.cuda.manual_seed(0)

    # gpus
    gpus = [0]

    noise_h36m = 'result_H36M.pth'
    # --------------------------------------------------------------------
    # test loader for final prediction
    loader_test = torch.utils.data.DataLoader(dataset=H36M17(
        2, 'test', False, False, 2, 0.0, 0.0, noise_h36m),
                                              batch_size=512 * len(gpus),
                                              shuffle=False,
                                              num_workers=conf.num_threads)

    # build models
    #device = torch.device("cuda:1")
    generator = ResNet(3000).cuda()
    generator = nn.DataParallel(generator, device_ids=gpus)
    generator.eval()

    save_dir = '/media/sunwon/Samsung_T5/MeshLifter/demo_meshlifter'  # directory of final model.pth

    file_name = os.path.join(save_dir, 'final_model.pth')
    if os.path.exists(file_name):
        state = torch.load(file_name)
        generator.load_state_dict(state['generator'])
        print('success model loading')
    else:
        print('Doesnt exist!')

    # generate final prediction
    with torch.no_grad():
        test('test', 1, loader_test, generator)
Exemplo n.º 4
0
def get_models(args, train=True, as_ensemble=False, model_file=None, leaky_relu=False):
    models = []
    
    mean = torch.tensor([0.4914, 0.4822, 0.4465], dtype=torch.float32).cuda()
    std = torch.tensor([0.2023, 0.1994, 0.2010], dtype=torch.float32).cuda()
    normalizer = NormalizeByChannelMeanStd(mean=mean, std=std)

    if model_file:
        state_dict = torch.load(model_file)
        if train:
            print('Loading pre-trained models...')
    
    iter_m = state_dict.keys() if model_file else range(args.model_num)

    for i in iter_m:
        if args.arch.lower() == 'resnet':
            model = ResNet(depth=args.depth, leaky_relu=leaky_relu)
        else:
            raise ValueError('[{:s}] architecture is not supported yet...')
        # we include input normalization as a part of the model
        model = ModelWrapper(model, normalizer)
        if model_file:
            model.load_state_dict(state_dict[i])
        if train:
            model.train()
        else:
            model.eval()
        model = model.cuda()
        models.append(model)

    if as_ensemble:
        assert not train, 'Must be in eval mode when getting models to form an ensemble'
        ensemble = Ensemble(models)
        ensemble.eval()
        return ensemble
    else:
        return models
Exemplo n.º 5
0
def loadModel(data_root, file_list, backbone_net, gpus='0,1,2,3', resume=None):

    if backbone_net == 'MobileFace':
        raise NotImplementedError
    elif backbone_net == 'Res50':
        net = ResNet(10575)
    elif backbone_net == 'Res101':
        net = NotImplementedError
    else:
        print(backbone_net, 'is not available!')

    # gpu init
    multi_gpus = False
    if len(gpus.split(',')) > 1:
        multi_gpus = True
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    net.load_state_dict(loadStateDict(resume))

    net = net.backbone
    if multi_gpus:
        net = DataParallel(net).to(device)
    else:
        net = net.to(device)

    transform = transforms.Compose([
        transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
        transforms.Normalize(mean=(0.5, 0.5, 0.5),
                             std=(0.5, 0.5,
                                  0.5))  # range [0.0, 1.0] -> [-1.0,1.0]
    ])
    lfw_dataset = LFW(data_root, file_list, transform=transform)
    lfw_loader = torch.utils.data.DataLoader(lfw_dataset,
                                             batch_size=128,
                                             shuffle=False,
                                             num_workers=2,
                                             drop_last=False)

    return net.eval(), device, lfw_dataset, lfw_loader
Exemplo n.º 6
0
def main(db):
    if db == 'h36m':
        loader_val = torch.utils.data.DataLoader(dataset=H36M17(
            2, 'test', False, False, 2, 0, 0, noise_h36_val),
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=0)
    model = ResNet(3000).cuda()
    model = nn.DataParallel(model, device_ids=[0])

    #last full model

    save_dir = '/media/sunwon/Samsung_T5/MeshLifter/demo_meshlifter/'

    filename = '%s/final_model.pth' % (save_dir)
    state = torch.load(filename)
    model.load_state_dict(state['generator'], strict=False)
    model.eval()

    for i, data in enumerate(loader_val):
        if np.mod(i, 1) == 0:
            pose2d = data['pose2d'].float().to("cuda")
            print(pose2d)
            bbox = data['bbox'].float().to("cuda")
            pose3d = data['pose3d'].float().to("cuda")
            rot = data['rot'].to("cuda").detach()
            rot_inv = data['rot_inv'].to("cuda").detach()
            img = data['img'].detach().cpu().numpy().squeeze()
            #img = cv2.resize(img, (256,256))
            meta2d = pose2d[0].clone()

            faces = model.module.smpl.faces
            pose2d_in = _normalize_pose(pose2d)
            rot = rot[0].detach().cpu().numpy()

            generator_output = model(pose2d_in)
            (thetas_out, verts_out, pose3d_out) = generator_output

            pose3d = pose3d.detach().cpu().numpy().squeeze()
            pose3d_out = pose3d_out.detach().cpu().numpy().squeeze()

            verts = verts_out[0].detach().cpu().numpy()
            pose2d_rot = pose3d_out[:, :2]

            pose2d = pose2d[0].detach().cpu().numpy()

            pose2d, mean, std = normalize_np_pose(pose2d)
            pose2d_rot, a, b = normalize_np_pose(pose2d_rot)
            pose2d_rot = pose2d_rot * std + mean
            vertex_color = np.ones([verts.shape[0], 4]) * [0.8, 0.8, 0.8, 1.0]
            tri_mesh = trimesh.Trimesh(verts,
                                       faces,
                                       vertex_colors=vertex_color)

            pts_color = np.ones([pose3d_out.shape[0], 4])
            pts3d = pyrender.Mesh.from_points(pose3d_out, colors=pts_color)

            mesh = pyrender.Mesh.from_trimesh(tri_mesh)
            scene = pyrender.Scene()
            scene.add(mesh)

            pose2d = pose2d[0]  # batch 14 2  -> 14 2
            len = pose2d.shape[0]

            ori_img = img.copy()

            #img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            img_2d = draw_skeleton(img, meta2d)
            print(meta2d)
            ori_img = cv2.cvtColor(ori_img, cv2.COLOR_RGB2BGR)
            cv2.imshow('original', img_2d)

            # cv2.imshow('mesh', rn.r)
            k = cv2.waitKey(0)
            if k == 27:
                cv2.destroyAllWindows()
                pdb.set_trace()
                break
            elif k == ord('s'):
                cv2.destroyAllWindows()

            pyrender.Viewer(scene, use_raymond_lighting=True)

            verts = np.expand_dims(verts, axis=0)
            joints = np.expand_dims(pose3d_out, axis=0)
Exemplo n.º 7
0
# image transformation function
loader = transforms.Compose([transforms.ToTensor()])

# checking if the GPU is available for inference
is_use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if is_use_cuda else "cpu")

# initializing the model
net = ResNet(depth=14, in_channels=1, output=3)
# moving the net to GPU for testing
if is_use_cuda:
    net.to(device)
    net = nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
# loading the network parameters
net.load_state_dict(torch.load("./checkpoints/model.pth"))
net.eval()


def draw_circle(img, row, col, rad):
    rr, cc, val = circle_perimeter_aa(row, col, rad)
    valid = ((rr >= 0) & (rr < img.shape[0]) & (cc >= 0) & (cc < img.shape[1]))
    img[rr[valid], cc[valid]] = val[valid]


def noisy_circle(size, radius, noise):
    img = np.zeros((size, size), dtype=np.float)

    # Circle
    row = np.random.randint(size)
    col = np.random.randint(size)
    rad = np.random.randint(10, max(10, radius))