Exemplo n.º 1
0
def save_summaries(writer: SummaryWriter, data: Dict, predicted: List, endpoints: Dict = None,
                   losses: Dict = None, metrics: Dict = None, step: int = 0):
    """Save tensorboard summaries"""

    subset = [0, 1]

    with torch.no_grad():
        # Save clouds
        if 'points_src' in data:

            points_src = data['points_src'][subset, ..., :3]
            points_ref = data['points_ref'][subset, ..., :3]

            colors = torch.from_numpy(
                np.concatenate([np.tile(ORANGE, (*points_src.shape[0:2], 1)),
                                np.tile(BLUE, (*points_ref.shape[0:2], 1))], axis=1))

            iters_to_save = [0, len(predicted)-1] if len(predicted) > 1 else [0]

            # Save point cloud at iter0, iter1 and after last iter
            concat_cloud_input = torch.cat((points_src, points_ref), dim=1)
            writer.add_mesh('iter_0', vertices=concat_cloud_input, colors=colors, global_step=step)
            for i_iter in iters_to_save:
                src_transformed_first = se3.transform(predicted[i_iter][subset, ...], points_src)
                concat_cloud_first = torch.cat((src_transformed_first, points_ref), dim=1)
                writer.add_mesh('iter_{}'.format(i_iter+1), vertices=concat_cloud_first, colors=colors, global_step=step)

            if endpoints is not None and 'perm_matrices' in endpoints:
                color_mapper = colormap.ScalarMappable(norm=None, cmap=colormap.get_cmap('coolwarm'))
                for i_iter in iters_to_save:
                    ref_weights = torch.sum(endpoints['perm_matrices'][i_iter][subset, ...], dim=1)
                    ref_colors = color_mapper.to_rgba(ref_weights.detach().cpu().numpy())[..., :3]
                    writer.add_mesh('ref_weights_{}'.format(i_iter), vertices=points_ref,
                                    colors=torch.from_numpy(ref_colors) * 255, global_step=step)

        if endpoints is not None:
            if 'perm_matrices' in endpoints:
                for i_iter in range(len(endpoints['perm_matrices'])):
                    src_weights = torch.sum(endpoints['perm_matrices'][i_iter], dim=2)
                    ref_weights = torch.sum(endpoints['perm_matrices'][i_iter], dim=1)
                    writer.add_histogram('src_weights_{}'.format(i_iter), src_weights, global_step=step)
                    writer.add_histogram('ref_weights_{}'.format(i_iter), ref_weights, global_step=step)

        # Write losses and metrics
        if losses is not None:
            for l in losses:
                writer.add_scalar('losses/{}'.format(l), losses[l], step)
        if metrics is not None:
            for m in metrics:
                writer.add_scalar('metrics/{}'.format(m), metrics[m], step)

        writer.flush()
Exemplo n.º 2
0
class Writer:
    def __init__(self, opt):
        self.name = opt.name
        self.opt = opt
        self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
        self.log_name = os.path.join(self.save_dir, 'loss_log.txt')
        self.testacc_log = os.path.join(self.save_dir, 'testacc_log.txt')
        self.start_logs()
        self.nexamples = 0
        self.ncorrect = 0
        #
        if opt.is_train and not opt.no_vis and SummaryWriter is not None:
            from datetime import datetime
            current_time = datetime.now().strftime('%b%d_%H-%M-%S')
            log_dir = os.path.join('runs/' + opt.dataroot.split('/')[-2], current_time)
            self.display = SummaryWriter(log_dir+opt.name)
            print(log_dir + opt.name)
        else:
            self.display = None

    def start_logs(self):
        """ creates test / train log files """
        if self.opt.is_train:
            with open(self.log_name, "a") as log_file:
                now = time.strftime("%c")
                log_file.write('================ Training Loss (%s) ================\n' % now)
        else:
            with open(self.testacc_log, "a") as log_file:
                now = time.strftime("%c")
                log_file.write('================ Testing Acc (%s) ================\n' % now)

    def print_current_losses(self, epoch, i, losses, t, t_data):
        """ prints train loss to terminal / file """
        message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) loss: %.3f ' \
                  % (epoch, i, t, t_data, losses.item())
        print(message)
        with open(self.log_name, "a") as log_file:
            log_file.write('%s\n' % message)

    def plot_loss(self, loss, epoch, i, n):
        iters = i + (epoch - 1) * n
        if self.display:
            self.display.add_scalar('data/train_loss', loss, iters)

    def plot_model_wts(self, model, epoch):
        if self.opt.is_train and self.display:
            for name, param in model.net.named_parameters():
                self.display.add_histogram(name, param.clone().cpu().data.numpy(), epoch)

    def print_acc(self, epoch, acc):
        """ prints test accuracy to terminal / file """
        message = 'epoch: {}, TEST ACC: [{:.5} %]\n' \
            .format(epoch, acc * 100)
        print(message)
        with open(self.testacc_log, "a") as log_file:
            log_file.write('%s\n' % message)

    def plot_acc(self, acc, epoch):
        if self.display:
            self.display.add_scalar('data/test_acc', acc, epoch)
            
    def plot_reg_loss(self, reg_loss, iters):
        if self.display:
            self.display.add_scalar('data/regularization_loss', reg_loss, iters)
    
    def plot_train_acc(self, acc, epoch):
        if self.display:
            self.display.add_scalar('data/train_acc', acc, epoch)

    def reset_counter(self):
        """
        counts # of correct examples
        """
        self.ncorrect = 0
        self.nexamples = 0

    def update_counter(self, ncorrect, nexamples):
        self.ncorrect += ncorrect
        self.nexamples += nexamples

    @property
    def acc(self):
        return float(self.ncorrect) / self.nexamples
    
    def add_figure(self, tag, fig, steps) :
        self.display.add_figure(tag, fig, global_step=steps)
        
    def add_mesh(self, tag, vertices, colors=None, faces=None, config_dict=None, global_step=None, walltime=None) :
        self.display.add_mesh(tag, vertices, colors=None, faces=None, config_dict=None, global_step=None, walltime=None)

    def close(self):
        if self.display is not None:
            self.display.close()
Exemplo n.º 3
0
def teapot_smooth_test(loss_lap='loss'):
    from time import time

    # from ..external.neural_renderer import neural_renderer
    # from ..utils.bird_vis import VisRenderer
    import scipy.misc
    import tqdm

    # from psbody.mesh.meshviewer import MeshViewer
    # from psbody.mesh import Mesh

    assert (loss_lap in ['loss', 'lap'])
    print(f'Using {loss_lap}')

    from tensorboardX import SummaryWriter
    logger = SummaryWriter(f'temp/{loss_lap}')

    from ..utils import mesh
    vertices, faces = mesh.create_sphere()
    faces = torch.tensor(faces).long()
    vertices = torch.tensor(vertices)
    # vertices = vertices + 0.05*vertices.clone().normal_()

    # verts = np.tile(verts[None, :, :], (3,1,1))
    # faces = np.tile(faces[None, :, :], (3,1,1))
    vertices = vertices[None, :, :]
    faces = faces[None, :, :]

    vertices = vertices.cuda()
    faces = faces.cuda()

    from .loss_utils import LaplacianLoss

    class SphereModel_Loss(torch.nn.Module):
        def __init__(self):
            super(SphereModel_Loss, self).__init__()
            self.vertices = torch.nn.Parameter(vertices)
            self.loss = LaplacianLoss(faces, verts=vertices)

        def forward(self):
            # import ipdb; ipdb.set_trace()
            return self.loss(self.vertices)

    class SphereModel_Lap(torch.nn.Module):
        def __init__(self):
            super(SphereModel_Lap, self).__init__()
            self.vertices = torch.nn.Parameter(vertices)
            self.laplacian = Laplacian(faces, verts=vertices)

        def forward(self):
            # import ipdb; ipdb.set_trace()
            Lx = self.laplacian(self.vertices)
            return torch.norm(Lx.view(-1, Lx.size(2)), p=2, dim=1).mean()

    opt_model = SphereModel_Lap() if loss_lap == 'lap' else SphereModel_Loss()
    optimizer = torch.optim.Adam(opt_model.parameters(),
                                 lr=1e-4,
                                 betas=(0.9, 0.999))

    print('Smoothing Vertices: ')
    for i in range(200):
        t0 = time()
        optimizer.zero_grad()

        loss = opt_model.forward()

        print(f'loss {loss:.5g}')
        loss.backward()

        import numpy as np

        # import ipdb; ipdb.set_trace()
        red_color = torch.tensor([255., 0., 0.]).double().cuda()
        # weights = torch.norm(Lx, dim=2)
        colors = None  #weights[:,:,None]*red_color[None,None,:]*10
        config_double = {
            'material': {
                'cls': 'MeshStandardMaterial',
                'side': 2
            },
            'lights': [{
                'cls': 'AmbientLight',
                'color': '#ffffff',
                'intensity': 0.75,
            }, {
                'cls': 'DirectionalLight',
                'color': '#ffffff',
                'intensity': 0.75,
                'position': [0, -1, 2],
            }],
        }
        if i % 10 == 0:
            logger.add_mesh(f'v{i}',
                            vertices,
                            faces=faces,
                            colors=colors,
                            config_dict=config_double)

        optimizer.step()
Exemplo n.º 4
0
def train(args):
    train_loader = DataLoader(HoleDataset('../data/std_split_data/train', args.tooth_ids), num_workers=0,batch_size=args.batch_size, shuffle=True, drop_last=True)
    test_loader = DataLoader(HoleDataset('../data/std_split_data/valid', args.tooth_ids, train=False), num_workers=0,batch_size=4, shuffle=False, drop_last=False)
    # Try to load models
    device = torch.device("cuda")
    model = get_model(args.model, args).to(device)
    print(str(model))
    model = nn.DataParallel(model)
    print("Let's use", torch.cuda.device_count(), "GPUs!")

    if args.loss == 'ce':
        criterion = nn.CrossEntropyLoss()
    elif args.loss == 'weighted_ce':
        with open(args.stat) as json_file:
            stat = json.load(json_file)
        criterion = nn.CrossEntropyLoss(weight=torch.tensor([1 / stat['neg_rate'], 1 / stat['pos_rate']])).cuda()
    elif args.loss == 'weighted_ce_var_i':
        with open(args.stat) as json_file:
            stat = json.load(json_file)
        criterion = nn.CrossEntropyLoss(weight=torch.tensor([1 - stat['neg_rate'], 1 - stat['pos_rate']])).cuda()
    elif args.loss == 'weighted_ce_var_ii':
        with open(args.stat) as json_file:
            stat = json.load(json_file)
        criterion = nn.CrossEntropyLoss(
            weight=torch.tensor([stat['pos_rate'] / stat['neg_rate'], stat['pos_rate'] / stat['pos_rate']])).cuda()
    elif args.loss == 'weighted_ce_var_iii':
        with open(args.stat) as json_file:
            stat = json.load(json_file)
        criterion = nn.CrossEntropyLoss(
            weight=torch.tensor([stat['neg_rate'] / stat['neg_rate'], stat['neg_rate'] / stat['pos_rate']])).cuda()
    elif args.loss == 'log_weighted_ce':
        with open(args.stat) as json_file:
            stat = json.load(json_file)
        criterion = nn.CrossEntropyLoss(weight=torch.tensor([np.log(1.1 + 1 / stat['neg_rate']),
                                                             np.log(1.1 + 1 / stat['pos_rate'])])).cuda()
    elif args.loss == 'focal_loss':
        if args.alpha == 0.5:
            criterion = FocalLoss(gamma=args.gamma)
        else:
            criterion = FocalLoss(gamma=args.gamma, weight=torch.tensor([args.alpha, 1 - args.alpha]).cuda())
    else:
        criterion = get_loss_criterion(name=args.loss, stats=args.stat, beta=args.beta)

    if args.opt == 'sgd':
        print("Use SGD")
        opt = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=1e-4)
    elif args.opt == 'adam':
        print("Use Adam")
        opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
    else:
        raise Exception("Not implemented")
    scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)

    writer = SummaryWriter(os.path.join('runs', args.exp_name))

    best_test_acc = -1
    for epoch in tqdm.tqdm(range(args.epochs)):
        ####################
        # Train
        ####################
        model.train()
        train_metric = Metric()
        for x, y, _ in tqdm.tqdm(train_loader, leave=False):
            x, y = x.to(device).permute(0, 2, 1), y.to(device)

            opt.zero_grad()
            y_ = model(x)
            loss = criterion(y_, y)
            loss.backward()
            opt.step()

            train_metric.update(y, y_, criterion)
        scheduler.step()
        ####################
        # Test
        ####################
        if epoch % 10 == 0:
            model.eval()
            test_metric = Metric()
            with torch.no_grad():
                for idx, (x, y, _) in enumerate(test_loader):
                    x, y = x.to(device).permute(0, 2, 1), y.to(device)

                    y_ = model(x)

                    test_metric.update(y, y_, criterion)
                    if idx == 0:
                        x = x.permute(0, 2, 1)[:1]
                        color = torch.tensor([[[255, 0, 0]]]).to(device, torch.long).repeat(1, x.size(1), 1)
                        writer.add_mesh('y', x, colors=color * y[:1].unsqueeze(-1), global_step=epoch)
                        writer.add_mesh('y_', x, colors=color * y_[:1].argmax(dim=1).unsqueeze(-1), global_step=epoch)

            if test_metric.avg_acc() > best_test_acc:
                best_test_acc = test_metric.avg_acc()
                torch.save(model.state_dict(), os.path.join('checkpoints', args.exp_name, 'models/model.t7'))
        torch.save(model.state_dict(), os.path.join('checkpoints', args.exp_name, 'models/latest.t7'))
        if epoch % 10 == 0:
            torch.save(model.state_dict(), os.path.join('checkpoints', args.exp_name, 'models/' + str(epoch) + '.t7'))
        ####################
        # Writer
        ####################
        writer.add_scalar('avg_train_loss', train_metric.avg_loss(), epoch)
        writer.add_scalar('avg_train_acc', train_metric.avg_acc(), epoch)
        writer.add_scalar('avg_test_loss', test_metric.avg_loss(), epoch)
        writer.add_scalar('avg_test_acc', test_metric.avg_acc(), epoch)

        avg_pos_acc, avg_neg_acc, avg_fp_acc, avg_fn_acc = train_metric.avg_stat()
        writer.add_scalar('avg_pos_acc', avg_pos_acc, epoch)
        writer.add_scalar('avg_neg_acc', avg_neg_acc, epoch)
        writer.add_scalar('avg_fp_acc', avg_fp_acc, epoch)
        writer.add_scalar('avg_fn_acc', avg_fn_acc, epoch)

        avg_pos_acc_t, avg_neg_acc_t, avg_fp_acc_t, avg_fn_acc_t = test_metric.avg_stat()
        writer.add_scalar('test_avg_pos_acc', avg_pos_acc_t, epoch)
        writer.add_scalar('test_avg_neg_acc', avg_neg_acc_t, epoch)

        print('Epoch {}: Pos_acc: {}, Neg_acc: {}'.format(epoch, avg_pos_acc, avg_neg_acc))

    writer.close()
Exemplo n.º 5
0
def test():
    from ..utils import mesh
    vertices, faces = mesh.create_sphere()
    faces = faces[None, :, :]
    vertices = vertices[None, :, :]
    laplacian = Laplacian(torch.tensor(faces).long())

    from torch.autograd import gradcheck

    # gradcheck takes a tuple of tensors as input, check if your gradient
    # evaluated with these tensors are close enough to numerical
    # approximations and returns True if they all verify this condition.
    inp = torch.randn(vertices.shape, dtype=torch.double, requires_grad=True)
    # test = gradcheck(laplacian, inp, eps=1e-3, atol=1e-3)
    # print(test)

    from .loss_utils import LaplacianLoss
    crit = LaplacianLoss(torch.tensor(faces).long())
    test = gradcheck(crit, inp, eps=1e-4, atol=1e-4)
    print(test)

    from tensorboardX import SummaryWriter
    logger = SummaryWriter('temp/')

    vertices = torch.tensor(vertices)
    vertices = vertices + 0.01 * vertices.clone().normal_()
    vertices = torch.nn.Parameter(vertices)
    vertices.cuda()
    optimizer = torch.optim.Adam([vertices], lr=0.01)
    for i in range(10):
        loss = crit(vertices)
        weights = crit.visualize(vertices)

        import numpy as np

        # import ipdb; ipdb.set_trace()
        red_color = np.array([255., 0., 0.])
        colors = weights[None, :, None] * red_color[None, None, :] * 10
        config_double = {
            'material': {
                'cls': 'MeshStandardMaterial',
                'side': 2
            },
            'lights': [
                {
                    'cls': 'AmbientLight',
                    'color': '#ffffff',
                    'intensity': 0.75,
                },
                # {
                # 'cls': 'DirectionalLight',
                # 'color': '#ffffff',
                # 'intensity': 0.75,
                # 'position': [0, -1, 2],
                # }
            ],
        }
        logger.add_mesh(f'v{i}',
                        vertices,
                        faces=faces,
                        colors=colors,
                        config_dict=config_double)

        print(
            f'loss: {loss.item():.6f}; vertices: ({vertices.min():.6f},{vertices.max():.6f})'
        )
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()