Example #1
0
            _, feas = model(centers, corners, normals, neighbor_index)
            ft_all = feas.cpu().squeeze(0).numpy()
            embed_dict[file_id] = ft_all

    ids = sorted(list(embed_dict.keys()))
    for i in ids:
        embed_npy.append(embed_dict[i])

    print(f'Number of embeddings in testset: {len(ids)}')
    np.save(f'./results/test/embed_{args.fold}.npy', embed_npy)


if __name__ == '__main__':
    if args.task == 'Shape':
        num_classes = 8
    else:
        num_classes = 6

    model = MeshNet(cfg=cfg['MeshNet'],
                    num_classes=num_classes,
                    require_fea=True)
    model.cuda()
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(args.weight))

    if not os.path.exists(f'./results/test'):
        os.makedirs(f'./results/test')
    model.eval()

    inference(model)
Example #2
0
        running_scale_loss += scale_loss.item()
        running_center_loss += center_loss.item()
        running_rotation_loss += rotation_loss.item()
        running_miou += miou.item()

    epoch_loss = running_loss / len(data_set)
    epoch_l1_loss = running_l1_loss / len(data_set)
    epoch_scale_loss = running_scale_loss / len(data_set)
    epoch_center_loss = running_center_loss / len(data_set)
    epoch_rotation_loss = running_rotation_loss / len(data_set)
    epoch_miou = running_miou / len(data_set)

    print('Loss: {:.4f}'.format(float(epoch_loss)))
    print('L1 Loss: {:.4f}'.format(float(epoch_l1_loss)))
    print('Scale L1 Loss: {:.4f}'.format(float(epoch_scale_loss)))
    print('Center L1 Loss: {:.4f}'.format(float(epoch_center_loss)))
    print('M IOU: {:.4f}'.format(float(epoch_miou)))


if __name__ == '__main__':

    model = MeshNet(cfg=cfg['MeshNet'], require_fea=True)
    if use_gpu:
        model.cuda()
    model = nn.DataParallel(model)
    model.load_state_dict(
        torch.load(os.path.join(root_path, cfg['load_model'])))
    model.eval()

    test_model(model)
Example #3
0
    except KeyboardInterrupt:
        return best_values

    return best_values


if __name__ == '__main__':

    if args.task == 'Shape':
        num_classes = 8
    else:
        num_classes = 6

    model = MeshNet(cfg=cfg['MeshNet'],
                    num_classes=num_classes,
                    require_fea=True)
    model = nn.DataParallel(model)

    if 'pretrained' in cfg.keys():
        try:
            ret = model.load_state_dict(torch.load(cfg['pretrained']),
                                        strict=False)
        except RuntimeError as e:
            pass

    model = model.cuda()

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.AdamW(model.parameters(),
                            lr=cfg['lr'],
Example #4
0
                
                if epoch % cfg['save_steps'] == 0:
                    torch.save(copy.deepcopy(model.state_dict()), os.path.join(cfg['ckpt_root'], '{}.pkl'.format(epoch)))
                
                print(print_info)

    print('Best val acc: {:.4f}'.format(best_acc))
    print('Config: {}'.format(cfg))

    return best_model_wts


if __name__ == '__main__':

    # prepare model
    model = MeshNet(cfg=cfg['MeshNet'], require_fea=True)
    model.cuda()
    model = nn.DataParallel(model)

    # criterion
    criterion = nn.CrossEntropyLoss()

    # optimizer
    if cfg['optimizer'] == 'sgd':
        optimizer = optim.SGD(model.parameters(), lr=cfg['lr'], momentum=cfg['momentum'], weight_decay=cfg['weight_decay'])
    else:
        optimizer = optim.AdamW(model.parameters(), lr=cfg['lr'], weight_decay=cfg['weight_decay'])
    
    # scheduler
    if cfg['scheduler'] == 'step':
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg['milestones'])
Example #5
0
                    best_loss = epoch_loss
                    best_model_wts = copy.deepcopy(model.state_dict())
                if epoch % 2 == 0:
                    torch.save(copy.deepcopy(model.state_dict()),
                               root_path + '/ckpt_root/{}.pkl'.format(epoch))

                print('{} Loss: {:.4f}'.format(phrase, epoch_loss))

        save_loss_plot(train_losses, val_losses, root_path)

    return best_model_wts


if __name__ == '__main__':

    model = MeshNet(cfg=cfg['MeshNet'], require_fea=True)
    if use_gpu:
        model.cuda()
    model = nn.DataParallel(model)
    #model.load_state_dict(torch.load(os.path.join(root_path, cfg['ckpt_root'], 'MeshNet_best.pkl')))
    criterion = nn.L1Loss()
    optimizer = optim.SGD(model.parameters(),
                          lr=cfg['lr'],
                          momentum=cfg['momentum'],
                          weight_decay=cfg['weight_decay'])
    #optimizer = optim.Adam(model.parameters(), lr=cfg['lr'], weight_decay=cfg['weight_decay'])
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               milestones=cfg['milestones'],
                                               gamma=cfg['gamma'])

    for f in os.listdir(root_path + '/ckpt_root/'):
Example #6
0
            corners = corners.cuda()
            normals = normals.cuda()
            neighbor_index = neighbor_index.cuda()
            targets = targets.cuda()

            outputs, feas = model(centers, corners, normals, neighbor_index)
            _, preds = torch.max(outputs, 1)

            correct_num += (preds == targets).float().sum()

            if cfg['retrieval_on']:
                ft_all = append_feature(ft_all, feas.detach().cpu())
                lbl_all = append_feature(lbl_all,
                                         targets.detach().cpu(),
                                         flaten=True)

    print('Accuracy: {:.4f}'.format(float(correct_num) / len(data_set)))
    if cfg['retrieval_on']:
        print('mAP: {:.4f}'.format(calculate_map(ft_all, lbl_all)))


if __name__ == '__main__':

    model = MeshNet(cfg=cfg['MeshNet'], require_fea=True)
    model.cuda()
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(cfg['load_model']))
    model.eval()

    test_model(model)
Example #7
0
                print('{} Loss: {:.4f} Acc: {:.4f} mAP: {:.4f}'.format(
                    phrase, epoch_loss, epoch_acc, epoch_map))
                clsf_rpt = sklearn.metrics.classification_report(
                    np.concatenate(labels_list, axis=None),
                    np.concatenate(preds_list, axis=None))
                print(clsf_rpt)
                writer.add_scalar('data/test_loss', epoch_loss, epoch)
                writer.add_scalar('data/test_acc', epoch_acc, epoch)

    writer.close()
    return best_model_wts


if __name__ == '__main__':

    model = MeshNet(cfg=cfg['MeshNet'], require_fea=True)
    model.cuda()
    model = nn.DataParallel(model)
    num_total_params = sum(p.numel() for p in model.parameters())
    num_trainable_params = sum(p.numel() for p in model.parameters()
                               if p.requires_grad)
    print('Number of total paramters: %d, number of trainable parameters: %d' %
          (num_total_params, num_trainable_params))

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=cfg['lr'],
                          momentum=cfg['momentum'],
                          weight_decay=cfg['weight_decay'])
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               milestones=cfg['milestones'],