Exemplo n.º 1
0
def train(cfg):
    logger = setup_logger(name='Train', level=cfg.LOGGER.LEVEL)
    logger.info(cfg)
    model = build_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)
    #model.to(cuda_device = 'cuda:9')

    criterion = build_loss(cfg)

    optimizer = build_optimizer(cfg, model)
    scheduler = build_lr_scheduler(cfg, optimizer)

    train_loader = build_data(cfg, is_train=True)
    val_loader = build_data(cfg, is_train=False)

    logger.info(train_loader.dataset)
    logger.info(val_loader.dataset)

    arguments = dict()
    arguments["iteration"] = 0

    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    checkpointer = Checkpointer(model, optimizer, scheduler, cfg.SAVE_DIR)

    do_train(cfg, model, train_loader, val_loader, optimizer, scheduler,
             criterion, checkpointer, device, checkpoint_period, arguments,
             logger)
Exemplo n.º 2
0
def run_fitting(
    exp_cfg,
    batch: Dict[str, Tensor],
    body_model: nn.Module,
    def_matrix: Tensor,
    mask_ids: Optional = None
) -> Dict[str, Tensor]:
    ''' Runs fitting
    '''
    vertices = batch['vertices']
    faces = batch['faces']

    batch_size = len(vertices)
    dtype, device = vertices.dtype, vertices.device
    summary_steps = exp_cfg.get('summary_steps')
    interactive = exp_cfg.get('interactive')

    # Get the parameters from the model
    var_dict = get_variables(batch_size, body_model)

    # Build the optimizer object for the current batch
    optim_cfg = exp_cfg.get('optim', {})

    def_vertices = apply_deformation_transfer(def_matrix, vertices, faces)

    if mask_ids is None:
        f_sel = np.ones_like(body_model.faces[:, 0], dtype=np.bool_)
    else:
        f_per_v = [[] for _ in range(body_model.get_num_verts())]
        [f_per_v[vv].append(iff) for iff, ff in enumerate(body_model.faces)
         for vv in ff]
        f_sel = list(set(tuple(sum([f_per_v[vv] for vv in mask_ids], []))))
    vpe = get_vertices_per_edge(
        body_model.v_template.detach().cpu().numpy(), body_model.faces[f_sel])

    def log_closure():
        return summary_closure(def_vertices, var_dict, body_model,
                               mask_ids=mask_ids)

    edge_fitting_cfg = exp_cfg.get('edge_fitting', {})
    edge_loss = build_loss(type='vertex-edge', gt_edges=vpe, est_edges=vpe,
                           **edge_fitting_cfg)
    edge_loss = edge_loss.to(device=device)

    vertex_fitting_cfg = exp_cfg.get('vertex_fitting', {})
    vertex_loss = build_loss(**vertex_fitting_cfg)
    vertex_loss = vertex_loss.to(device=device)

    per_part = edge_fitting_cfg.get('per_part', True)
    logger.info(f'Per-part: {per_part}')
    # Optimize edge-based loss to initialize pose
    if per_part:
        for key, var in tqdm(var_dict.items(), desc='Parts'):
            if 'pose' not in key:
                continue

            for jidx in tqdm(range(var.shape[1]), desc='Joints'):
                part = torch.zeros(
                    [batch_size, 3], dtype=dtype, device=device,
                    requires_grad=True)
                # Build the optimizer for the current part
                optimizer_dict = build_optimizer([part], optim_cfg)
                closure = build_edge_closure(
                    body_model, var_dict, edge_loss, optimizer_dict,
                    def_vertices, per_part=per_part, part_key=key, jidx=jidx,
                    part=part)

                minimize(optimizer_dict['optimizer'], closure,
                         params=[part],
                         summary_closure=log_closure,
                         summary_steps=summary_steps,
                         interactive=interactive,
                         **optim_cfg)
                with torch.no_grad():
                    var[:, jidx] = part
    else:
        optimizer_dict = build_optimizer(list(var_dict.values()), optim_cfg)
        closure = build_edge_closure(
            body_model, var_dict, edge_loss, optimizer_dict,
            def_vertices, per_part=per_part)

        minimize(optimizer_dict['optimizer'], closure,
                 params=var_dict.values(),
                 summary_closure=log_closure,
                 summary_steps=summary_steps,
                 interactive=interactive,
                 **optim_cfg)

    if 'translation' in var_dict:
        optimizer_dict = build_optimizer([var_dict['translation']], optim_cfg)
        closure = build_vertex_closure(
            body_model, var_dict,
            optimizer_dict,
            def_vertices,
            vertex_loss=vertex_loss,
            mask_ids=mask_ids,
            per_part=False,
            params_to_opt=[var_dict['translation']],
        )
        # Optimize translation
        minimize(optimizer_dict['optimizer'],
                 closure,
                 params=[var_dict['translation']],
                 summary_closure=log_closure,
                 summary_steps=summary_steps,
                 interactive=interactive,
                 **optim_cfg)

    #  Optimize all model parameters with vertex-based loss
    optimizer_dict = build_optimizer(list(var_dict.values()), optim_cfg)
    closure = build_vertex_closure(
        body_model, var_dict,
        optimizer_dict,
        def_vertices,
        vertex_loss=vertex_loss,
        per_part=False,
        mask_ids=mask_ids)
    minimize(optimizer_dict['optimizer'], closure,
             params=list(var_dict.values()),
             summary_closure=log_closure,
             summary_steps=summary_steps,
             interactive=interactive,
             **optim_cfg)

    param_dict = {}
    for key, var in var_dict.items():
        # Decode the axis-angles
        if 'pose' in key or 'orient' in key:
            param_dict[key] = batch_rodrigues(
                var.reshape(-1, 3)).reshape(len(var), -1, 3, 3)
        else:
            # Simply pass the variable
            param_dict[key] = var

    body_model_output = body_model(
        return_full_pose=True, get_skin=True, **param_dict)
    var_dict.update(body_model_output)
    var_dict['faces'] = body_model.faces

    return var_dict
Exemplo n.º 3
0
    def init_criterion(self):
        anchor = self.model.get_anchor_box()

        return build_loss(self.args, anchor=anchor)
def loss_fn(batch, model_outputs, viz_extra=False, trainer=None):
    B = batch['vert_pos'].shape[0]

    # Data from the sample
    vert_pos_batch = batch['vert_pos'].to(world.device)
    surf_pos_batch = batch['surf_pos'].to(world.device)
    surf_normal_batch = batch['surf_normal'].to(world.device)

    # Outputs from model
    all_candidates = model_outputs['candidates']
    all_candidate_probs = model_outputs['probs']
    all_proposals = model_outputs['proposals']
    all_proposal_probs = model_outputs['proposal_probs']

    # Accumulate loss
    need_grad = all_candidate_probs.requires_grad
    total_loss = torch.tensor(0.0,
                              dtype=vert_pos_batch.dtype,
                              device=vert_pos_batch.device,
                              requires_grad=need_grad)

    # Evaluate loss one batch entry at a time
    for b in range(B):

        vert_pos = vert_pos_batch[b, :]
        candidates = all_candidates[b, :, :]
        candidate_probs = all_candidate_probs[b, :]
        proposals = all_proposals[b, :, :]
        proposal_probs = all_proposal_probs[b, :]

        surf_pos = surf_pos_batch[b, :]
        surf_normal = surf_normal_batch[b, :]

        # Add all the terms
        loss_terms = losses.build_loss(args,
                                       vert_pos,
                                       candidates,
                                       candidate_probs,
                                       surf_pos=surf_pos,
                                       surf_normal=surf_normal,
                                       n_sample=1000)

        loss_terms["proposal_match"] = losses.match_predictions(
            candidates, candidate_probs.detach(), proposals, proposal_probs)

        this_loss = torch.tensor(0.0,
                                 dtype=vert_pos_batch.dtype,
                                 device=vert_pos_batch.device,
                                 requires_grad=need_grad)
        for t in loss_terms:
            this_loss = this_loss + loss_terms[t]

        # Log some stats
        if trainer is not None:
            if trainer.training:
                prefix = "train_"
                it = trainer.curr_iter + b
            else:
                prefix = "val_"
                it = trainer.eval_iter + b

            # log less
            if it % 10 == 0:

                for t in loss_terms:
                    world.tb_writer.add_scalar(prefix + t,
                                               loss_terms[t].item(), it)

                world.tb_writer.add_scalar(prefix + "sample loss",
                                           this_loss.item(), it)

                if it % 1000 == 0:
                    world.tb_writer.add_histogram(prefix + 'triangle_probs',
                                                  candidate_probs.detach(), it)
                    world.tb_writer.add_histogram(
                        prefix + 'triangle_proposal_probs',
                        proposal_probs.detach(), it)

                world.tb_writer.add_scalar(prefix + "prob mean",
                                           torch.mean(candidate_probs).item(),
                                           it)
                world.tb_writer.add_scalar(prefix + "prob stddev",
                                           torch.std(candidate_probs).item(),
                                           it)

                if not trainer.training:
                    trainer.add_eval_stat_entry(
                        "prob mean",
                        torch.mean(candidate_probs).item())
                    trainer.add_eval_stat_entry(
                        "prob std",
                        torch.std(candidate_probs).item())
                    for t in loss_terms:
                        trainer.add_eval_stat_entry(t, loss_terms[t].item())

        total_loss = total_loss + this_loss

    return total_loss / B