Example #1
0
def main():
    dataset = SequenceDataset("/home/ai/Data/kitti_formatted")
    loader, _, _ = utils.data_loaders(dataset, 1.0, 0.0, 0.0, 4, 1)

    writer = SummaryWriter()
    config_3d = { "camera": { "cls": "PerspectiveCamera", "fov": 75, "near": 0.1, "far": 5000.0 } }
    
    for i, batch in enumerate(loader):
        tgt, refs, K, Kinv = batch
        
        s = tgt.shape[-1]*tgt.shape[-2]
        verts = np.random.random((1, s, 3)) * 1.0
        colors = np.repeat(np.array([[[255, 0, 0]]]), s, 1)
        
        grid = torchvision.utils.make_grid(tgt, nrow=2)

        loss = 1/(0.01*i+1)
        
        writer.add_image("tgt", img_tensor=grid, global_step=i)
        writer.add_mesh("cloud", vertices=verts, colors=colors, config_dict=config_3d, global_step=i)
        writer.add_scalar("loss", scalar_value=loss, global_step=i)
        
        time.sleep(3.0)

    writer.close()
Example #2
0
def main(args):
    # load data

    writer = SummaryWriter()
    kwargs = {'num_workers': 4, 'pin_memory': True}

    test_loader = torch.utils.data.DataLoader(PointCloudDataset(
        args.test_json),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    netG = GeneratorVanilla(encoder_dim=(3, 3),
                            grid_dims=(32, 32, 1),
                            Generate1_dims=(1219, 128, 128, 3),
                            Generate2_dims=(1219, 256, 256, 3))

    netG.load_state_dict(torch.load('snapshots/model_train_best.pth'))

    netG.eval()

    criterion_I = torch.nn.L1Loss().to(args.device)
    criterion_G = ChamfersDistance().to(args.device)

    for batch in test_loader:
        image, ptcloud = batch['image'], batch['ptcloud']
        proMatrix = batch['proMatrix']

        image, ptcloud = Variable(image).to(args.device), \
                         Variable(ptcloud).to(args.device)

        proMatrix = Variable(proMatrix, requires_grad=False).to(args.device)

        B = image.shape[0]

        noise = cube_generator(B, args.noise_pts, 3)
        noisev = Variable(noise).to(args.device)

        with torch.set_grad_enabled(False):
            h, w, img_recons, ptcloud_pred_primitive, ptcloud_pred_fine = netG(
                image, noisev, proMatrix)

        colors_tensor = torch.zeros(B, args.noise_pts, 3)

        writer.add_mesh('pt_fine',
                        vertices=ptcloud_pred_fine,
                        colors=colors_tensor)
        writer.close()
Example #3
0
	def visualize_data(
			self, model: Model, writer: SummaryWriter,
			dataset: Dataset, indices: List, tag, step
	):
		# visualize one data
		batch = [dataset[i] for i in indices]
		coords, feats, label, _ = list(zip(*batch))
		coords, feats, = sparse_collate(coords, feats)
		x = SparseTensor(feats, coords)

		x = x.to(model.device)
		with torch.no_grad():
			y = model(x)
		pred = y['pred']
		pred_choices = pred.max(dim=1).indices

		for i in range(len(indices)):
			# get indices with specific indices
			data_indices = (y.C[:, 3] == i).nonzero().squeeze(1)
			coord = coords[data_indices, :3].type(torch.FloatTensor)
			coord = coord * self.config['voxel_size']
			coord = torch.stack([coord, coord])  # Tensor of 2 x N x 3
			pred_choice = pred_choices[data_indices]

			# add color for prediction
			pred_color = torch.stack(
				[self.cmap[point] for point in pred_choice],
				dim=0
			)  # Tensor of N x 3 (1 for batch)
			gt_color = torch.stack(
				[self.cmap[point] for point in label[i]],
				dim=0
			)  # Tensor of N x 3 (1 for batch)
			color = torch.stack([pred_color, gt_color], dim=0)  # Tensor of 2 x N x 3
			color = (color * 255).type(torch.IntTensor)

			max_sample = self.config['max_vis_sample']
			if coord.shape[1] > max_sample:
				perm = np.random.RandomState(0).permutation(coord.shape[1])
				coord = coord[:, perm[:max_sample], :]
				color = color[:, perm[:max_sample], :]

			writer.add_mesh(
				tag=tag + '/vis_%d' % i, vertices=coord,
				colors=color, global_step=step
			)
Example #4
0
def tensorboard_warps(writer: SummaryWriter,
                      number_validation_images,
                      samples,
                      warps,
                      step,
                      tensorboard_tag='warp',
                      point_size=0.01):
    if number_validation_images <= len(samples):
        samples = samples[:number_validation_images]
        warps = warps[:number_validation_images]
    magnitude = np.linalg.norm(warps, axis=-1)
    cmap = plt.cm.get_cmap('viridis')
    rgb = cmap(magnitude)[:, :, :3] * 255
    # point_size_config = {
    #    'material': {
    #        'size': point_size
    #    }
    # }
    writer.add_mesh(
        tensorboard_tag,
        vertices=samples,
        colors=rgb,
        global_step=step,
    )
Example #5
0
class Logger:
    _count = 0

    def __init__(self, scrn=True, log_dir='', phase=''):
        super().__init__()
        self._logger = logging.getLogger('logger_{}'.format(Logger._count))
        Logger._count += 1
        self._logger.setLevel(logging.DEBUG)

        if scrn:
            self._scrn_handler = logging.StreamHandler()
            self._scrn_handler.setLevel(logging.INFO)
            self._scrn_handler.setFormatter(
                logging.Formatter(fmt=FORMAT_SHORT))
            self._logger.addHandler(self._scrn_handler)

        if log_dir and phase:
            self.log_path = os.path.join(
                log_dir,
                '{}-{:-4d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}.log'.format(
                    phase,
                    *localtime()[:6]))
            self.show_nl("log into {}\n\n".format(self.log_path))
            self._file_handler = logging.FileHandler(filename=self.log_path)
            self._file_handler.setLevel(logging.DEBUG)
            self._file_handler.setFormatter(logging.Formatter(fmt=FORMAT_LONG))
            self._logger.addHandler(self._file_handler)

            self._writer = SummaryWriter(log_dir=os.path.join(
                log_dir, '{}-{:-4d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}'.format(
                    phase,
                    *localtime()[:6])))

    def show(self, *args, **kwargs):
        return self._logger.info(*args, **kwargs)

    def show_nl(self, *args, **kwargs):
        self._logger.info("")
        return self.show(*args, **kwargs)

    def dump(self, *args, **kwargs):
        return self._logger.debug(*args, **kwargs)

    def warning(self, *args, **kwargs):
        return self._logger.warning(*args, **kwargs)

    def error(self, *args, **kwargs):
        return self._logger.error(*args, **kwargs)

    # tensorboard
    def add_scalar(self, *args, **kwargs):
        return self._writer.add_scalar(*args, **kwargs)

    def add_scalars(self, *args, **kwargs):
        return self._writer.add_scalars(*args, **kwargs)

    def add_histogram(self, *args, **kwargs):
        return self._writer.add_histogram(*args, **kwargs)

    def add_image(self, *args, **kwargs):
        return self._writer.add_image(*args, **kwargs)

    def add_images(self, *args, **kwargs):
        return self._writer.add_images(*args, **kwargs)

    def add_figure(self, *args, **kwargs):
        return self._writer.add_figure(*args, **kwargs)

    def add_video(self, *args, **kwargs):
        return self._writer.add_video(*args, **kwargs)

    def add_audio(self, *args, **kwargs):
        return self._writer.add_audio(*args, **kwargs)

    def add_text(self, *args, **kwargs):
        return self._writer.add_text(*args, **kwargs)

    def add_graph(self, *args, **kwargs):
        return self._writer.add_graph(*args, **kwargs)

    def add_pr_curve(self, *args, **kwargs):
        return self._writer.add_pr_curve(*args, **kwargs)

    def add_custom_scalars(self, *args, **kwargs):
        return self._writer.add_custom_scalars(*args, **kwargs)

    def add_mesh(self, *args, **kwargs):
        return self._writer.add_mesh(*args, **kwargs)

    # def add_hparams(self, *args, **kwargs):
    #     return self._writer.add_hparams(*args, **kwargs)

    def flush(self):
        return self._writer.flush()

    def close(self):
        return self._writer.close()

    def _grad_hook(self, grad, name=None, grads=None):
        grads.update({name: grad})

    def watch_grad(self, model, layers):
        """
        Add hooks to the specific layers. Gradients of these layers will save to self.grads
        :param model:
        :param layers: Except a list eg. layers=[0, -1] means to watch the gradients of
                        the fist layer and the last layer of the model
        :return:
        """
        assert layers
        if not hasattr(self, 'grads'):
            self.grads = {}
            self.grad_hooks = {}
        named_parameters = list(model.named_parameters())
        for layer in layers:
            name = named_parameters[layer][0]
            handle = named_parameters[layer][1].register_hook(
                functools.partial(self._grad_hook, name=name,
                                  grads=self.grads))
            self.grad_hooks.update(dict(name=handle))

    def watch_grad_close(self):
        for _, handle in self.grad_hooks.items():
            handle.remove()  # remove the hook

    def add_grads(self, global_step=None, *args, **kwargs):
        """
        Add gradients to tensorboard. You must call the method self.watch_grad before using this method!
        """
        assert  hasattr(self, 'grads'),\
        "self.grads is nonexisent! You must call self.watch_grad before!"
        assert self.grads, "self.grads if empty!"
        for (name, grad) in self.grads.items():
            self.add_histogram(tag=name,
                               values=grad,
                               global_step=global_step,
                               *args,
                               **kwargs)

    @staticmethod
    def make_desc(counter, total, *triples):
        desc = "[{}/{}]".format(counter, total)
        # The three elements of each triple are
        # (name to display, AverageMeter object, formatting string)
        for name, obj, fmt in triples:
            desc += (" {} {obj.val:" + fmt + "} ({obj.avg:" + fmt +
                     "})").format(name, obj=obj)
        return desc
Example #6
0
class SummaryWriter:
    def __init__(self, logdir, flush_secs=120):

        self.writer = TensorboardSummaryWriter(
            log_dir=logdir,
            purge_step=None,
            max_queue=10,
            flush_secs=flush_secs,
            filename_suffix='')

        self.global_step = None
        self.active = True

        # ------------------------------------------------------------------------
        # register add_* and set_* functions in summary module on instantiation
        # ------------------------------------------------------------------------
        this_module = sys.modules[__name__]
        list_of_names = dir(SummaryWriter)
        for name in list_of_names:

            # add functions (without the 'add' prefix)
            if name.startswith('add_'):
                setattr(this_module, name[4:], getattr(self, name))

            #  set functions
            if name.startswith('set_'):
                setattr(this_module, name, getattr(self, name))

    def set_global_step(self, value):
        self.global_step = value

    def set_active(self, value):
        self.active = value

    def add_audio(self, tag, snd_tensor, global_step=None, sample_rate=44100, walltime=None):
        if self.active:
            global_step = self.global_step if global_step is None else global_step
            self.writer.add_audio(
                tag, snd_tensor, global_step=global_step, sample_rate=sample_rate, walltime=walltime)

    def add_custom_scalars(self, layout):
        if self.active:
            self.writer.add_custom_scalars(layout)

    def add_custom_scalars_marginchart(self, tags, category='default', title='untitled'):
        if self.active:
            self.writer.add_custom_scalars_marginchart(tags, category=category, title=title)

    def add_custom_scalars_multilinechart(self, tags, category='default', title='untitled'):
        if self.active:
            self.writer.add_custom_scalars_multilinechart(tags, category=category, title=title)

    def add_embedding(self, mat, metadata=None, label_img=None, global_step=None,
                      tag='default', metadata_header=None):
        if self.active:
            global_step = self.global_step if global_step is None else global_step
            self.writer.add_embedding(
                mat, metadata=metadata, label_img=label_img, global_step=global_step,
                tag=tag, metadata_header=metadata_header)

    def add_figure(self, tag, figure, global_step=None, close=True, walltime=None):
        if self.active:
            global_step = self.global_step if global_step is None else global_step
            self.writer.add_figure(
                tag, figure, global_step=global_step, close=close, walltime=walltime)

    def add_graph(self, model, input_to_model=None, verbose=False):
        if self.active:
            self.writer.add_graph(model, input_to_model=input_to_model, verbose=verbose)

    def add_histogram(self, tag, values, global_step=None, bins='tensorflow', walltime=None, max_bins=None):
        if self.active:
            global_step = self.global_step if global_step is None else global_step
            self.writer.add_histogram(
                tag, values, global_step=global_step, bins=bins,
                walltime=walltime, max_bins=max_bins)

    def add_histogram_raw(self, tag, min, max, num, sum, sum_squares,
                          bucket_limits, bucket_counts, global_step=None,
                          walltime=None):
        if self.active:
            global_step = self.global_step if global_step is None else global_step
            self.writer.add_histogram_raw(
                tag, min=min, max=max, num=num, sum=sum, sum_squares=sum_squares,
                bucket_limits=bucket_limits, bucket_counts=bucket_counts,
                global_step=global_step, walltime=walltime)

    def add_image(self, tag, img_tensor, global_step=None, walltime=None, dataformats='CHW'):
        if self.active:
            global_step = self.global_step if global_step is None else global_step
            self.writer.add_image(
                tag, img_tensor, global_step=global_step, walltime=walltime, dataformats=dataformats)

    def add_image_with_boxes(self, tag, img_tensor, box_tensor, global_step=None,
                             walltime=None, rescale=1, dataformats='CHW'):
        if self.active:
            global_step = self.global_step if global_step is None else global_step
            self.writer.add_image_with_boxes(
                tag, img_tensor, box_tensor,
                global_step=global_step, walltime=walltime,
                rescale=rescale, dataformats=dataformats)

    def add_images(self, tag, img_tensor, global_step=None, walltime=None, dataformats='NCHW'):
        if self.active:
            global_step = self.global_step if global_step is None else global_step
            self.writer.add_images(
                tag, img_tensor, global_step=global_step, walltime=walltime, dataformats=dataformats)

    def add_mesh(self, tag, vertices, colors=None, faces=None, config_dict=None, global_step=None, walltime=None):
        if self.active:
            global_step = self.global_step if global_step is None else global_step
            self.writer.add_mesh(
                tag, vertices, colors=colors, faces=faces, config_dict=config_dict,
                global_step=global_step, walltime=walltime)

    def add_onnx_graph(self, graph):
        if self.active:
            self.writer.add_onnx_graph(graph)

    def add_pr_curve(self, tag, labels, predictions, global_step=None,
                     num_thresholds=127, weights=None, walltime=None):
        if self.active:
            global_step = self.global_step if global_step is None else global_step
            self.writer.add_pr_curve(
                tag, labels, predictions, global_step=global_step,
                num_thresholds=num_thresholds, weights=weights, walltime=walltime)

    def add_pr_curve_raw(self, tag, true_positive_counts,
                         false_positive_counts,
                         true_negative_counts,
                         false_negative_counts,
                         precision,
                         recall,
                         global_step=None,
                         num_thresholds=127,
                         weights=None,
                         walltime=None):
        if self.active:
            global_step = self.global_step if global_step is None else global_step
            self.writer.add_pr_curve_raw(
                tag, true_positive_counts,
                false_positive_counts,
                true_negative_counts,
                false_negative_counts,
                precision,
                recall,
                global_step=global_step,
                num_thresholds=num_thresholds,
                weights=weights,
                walltime=walltime)

    def add_scalar(self, tag, scalar_value, global_step=None, walltime=None):
        if self.active:
            global_step = self.global_step if global_step is None else global_step
            self.writer.add_scalar(
                tag, scalar_value, global_step=global_step, walltime=walltime)

    def add_scalars(self, main_tag, tag_scalar_dict, global_step=None, walltime=None):
        if self.active:
            global_step = self.global_step if global_step is None else global_step
            self.writer.add_scalars(
                main_tag, tag_scalar_dict, global_step=global_step, walltime=walltime)

    def add_text(self, tag, text_string, global_step=None, walltime=None):
        if self.active:
            global_step = self.global_step if global_step is None else global_step
            self.writer.add_text(
                tag, text_string, global_step=global_step, walltime=walltime)

    def add_video(self, tag, vid_tensor, global_step=None, fps=4, walltime=None):
        if self.active:
            global_step = self.global_step if global_step is None else global_step
            self.writer.add_video(
                tag, vid_tensor, global_step=global_step, fps=fps, walltime=walltime)

    def close(self):
        self.writer.close()

    def __enter__(self):
        return self.writer.__enter__()

    def __exit__(self, exc_type, exc_val, exc_tb):
        return self.writer.__exit__(exc_type, exc_val, exc_tb)
Example #7
0
def main():
    parser = argparse.ArgumentParser(description='Test',
                                 formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('data', metavar='DATA', help='path to file')

    parser.add_argument('--tb-save-path', dest='tb_save_path', metavar='PATH', default='../checkpoints/', 
                            help='tensorboard checkpoints path')

    parser.add_argument('--weight-save-path', dest='weight_save_path', metavar='PATH', default='../weights/', 
                            help='weight checkpoints path')

    parser.add_argument('--sdf-weight', dest='sdf_weight', metavar='PATH', default=None, 
                            help='pretrained weight for SDF model')


    parser.add_argument('--batchsize', dest='batchsize', type=int, metavar='BATCHSIZE', default=1,
                            help='batch size')
    parser.add_argument('--epoch', dest='epoch', type=int,metavar='EPOCH', default=500, 
                            help='epochs for adam and lgd')
    parser.add_argument('--lr', dest='lr', type=float,metavar='LEARNING_RATE', default=5e-3, 
                            help='learning rate')
    parser.add_argument('--lgd-step', dest='lgd_step_per_epoch', type=int,metavar='LGD_STEP_PER_EPOCH', default=5, 
                            help='number of simulation steps of LGD per epoch')

    parser.add_argument('--width', dest='width', type=int,metavar='WIDTH', default=128, 
                            help='width for rendered image')
    parser.add_argument('--height', dest='height', type=int,metavar='HEIGHT', default=128, 
                            help='height for rendered image')

    parser.add_argument('--outfile', dest='outfile', metavar='OUTFILE', 
                            help='output file')

    args = parser.parse_args()

    width = args.width
    height = args.height
    lr = args.lr
    epoch = args.epoch
    lgd_step_per_epoch = args.lgd_step_per_epoch

    writer = SummaryWriter(args.tb_save_path)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # create models
    model = Siren(in_features=3, out_features=1, hidden_features=256, hidden_layers=5, outermost_linear=True).to(device) 

    if args.sdf_weight != None:
        try:
            model.load_state_dict(torch.load(args.sdf_weight))
        except:
            print("Couldn't load pretrained weight: " + args.sdf_weight)

    model.eval() 
    for param in model.parameters():
        param.requires_grad = False

    
    # load 
    mm = torch.tensor([-0.1, -0.1, 0.1], device=device, dtype=torch.float)
    mx = torch.tensor([0.1, 0.1, 0.1], device=device, dtype=torch.float)
    wh = torch.tensor([width, height, 1], device=device, dtype=torch.int)

    rot = torch.tensor([[1,0,0], [0,1,0], [0,0,1]], device=device, dtype=torch.float)
    trans = torch.tensor([[0, 0, -0.8]], device=device, dtype=torch.float)

    p_distribution = GridDataset(mm, mx, wh)

    d = torch.zeros((width*height, 1), device=device, dtype=torch.float).requires_grad_(True)
    

    sampler = nn.Sequential(UniformSample(width*height), 
                            PointTransform(rot))

    p = sampler(p_distribution)


    ds = ObjDataset(args.data)
    objsampler = ObjUniformSample(1000)
    x_preview = (objsampler(ds)['p']).to(device)


    d2_eval = lambda d: torch.pow(d, 2).mean()
    sdf_eval = lambda d: torch.pow(model(d * ray_n + p + trans)[0], 2).sum(dim=1).mean()
    d_eval = lambda d: (torch.tanh(d) - 1.).mean() * 0.5

    d2_eval_list = lambda d: d2_eval(d[0])
    sdf_eval_list = lambda d: sdf_eval(d[0])
    d_eval_list = lambda d: d_eval(d[0])


    writer.add_mesh("preview", torch.cat([(p + trans),  x_preview]).unsqueeze(0), global_step=0)

    print("lgd")
    hidden = None

    lgd = LGD(1, 3, k=10).to(device)
    lgd_optimizer = optim.Adam(lgd.parameters(), lr= lr)

    # train LGD
    lgd.train()
    for i in range(epoch):
        print(i)
        # evaluate losses
        samples_n = width*height//128
        sample_inds = torch.randperm(width*height)[:samples_n]

        ray_n = torch.tensor([[0,0,1]], device=device, dtype=torch.float).repeat(samples_n, 1)

        sdf_eval_batch = lambda d: torch.pow(model(d * ray_n + p[sample_inds] + trans)[0], 2).sum(dim=1).mean()
        sdf_eval_batch_list = lambda d: sdf_eval_batch(d[0])

        # update lgd parameters
        lgd_optimizer.zero_grad()
        lgd.loss_trajectory_backward(d[sample_inds], [d2_eval_list, sdf_eval_batch_list, d_eval_list], None, 
                                     constraints=["None", "Zero", "Positive"], batch_size=samples_n, steps=lgd_step_per_epoch)
        lgd_optimizer.step()

        torch.save(lgd.state_dict(), args.weight_save_path+'model_%03d.pth' % i)
 
    writer.close()
Example #8
0
        # Save pclouds.
        if bi == 1 and conf['pcloud_save_period'] != 0 and \
                ep % conf['pcloud_save_period'] == 0:
            pcs_vis, clrs_vis = helpers.pclouds2vis(
                batch['pcloud'].cpu(),
                model.pc_pred.detach().cpu(),
                conf['pcloud_samples_per_period'], conf)
            assert pcs_vis.shape == (np.minimum(
                conf['pcloud_samples_per_period'],
                conf['batch_size']), 2, np.maximum(conf['N'], conf['M']), 3)
            assert clrs_vis.shape == pcs_vis.shape
            for idx, (pc, clr) in enumerate(zip(pcs_vis,
                                                clrs_vis)):  # (2, P,3)
                writer_tr.add_mesh('pc_{}'.format(idx),
                                   vertices=pc,
                                   colors=clr,
                                   global_step=it)

        # Save number of collapsed patches.
        if bi % save_collapsed_every == 0 and 'fff' in model.geom_props:
            num_collpased = np.sum(
                [inds.shape[0] for inds in model.collapsed_patches_A()]) / \
                            model.pc_pred.shape[0]
            writer_tr.add_scalar('collapsed_patches',
                                 num_collpased,
                                 global_step=it)

    # Validation.
    model.eval()
    it = ep * iters_tr
    loss_va_run = 0.
Example #9
0
def main():
    parser = argparse.ArgumentParser(
        description='Test',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('data', metavar='DATA', help='path to file')

    parser.add_argument('--tb-save-path',
                        dest='tb_save_path',
                        metavar='PATH',
                        default='../checkpoints/',
                        help='tensorboard checkpoints path')

    parser.add_argument('--weight-save-path',
                        dest='weight_save_path',
                        metavar='PATH',
                        default='../weights/',
                        help='weight checkpoints path')

    parser.add_argument('--pretrained-weight',
                        dest='weight',
                        metavar='PATH',
                        default=None,
                        help='pretrained weight')

    parser.add_argument('--activation',
                        dest='activation',
                        metavar='activation',
                        default='relu',
                        help='activation of network; \'relu\' or \'sin\'')

    parser.add_argument('--batchsize',
                        dest='batchsize',
                        type=int,
                        metavar='BATCHSIZE',
                        default=1,
                        help='batch size')
    parser.add_argument('--epoch',
                        dest='epoch',
                        type=int,
                        metavar='EPOCH',
                        default=100,
                        help='epochs')

    parser.add_argument(
        '--abs',
        dest='abs',
        type=bool,
        metavar='BOOL',
        default=False,
        help='whether we should use ABS when evaluating normal loss')

    parser.add_argument('--epsilon',
                        dest='epsilon',
                        type=float,
                        metavar='EPSILON',
                        default=0.1,
                        help='epsilon')
    parser.add_argument('--lambda',
                        dest='lamb',
                        type=float,
                        metavar='LAMBDA',
                        default=0.005,
                        help='hyperparameter for s : normal loss ratio')

    parser.add_argument('--outfile',
                        dest='outfile',
                        metavar='OUTFILE',
                        help='output file')

    args = parser.parse_args()

    writer = SummaryWriter(args.tb_save_path)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # create models
    model = Siren(in_features=3,
                  out_features=1,
                  hidden_features=256,
                  hidden_layers=5,
                  outermost_linear=True).to(device)

    if args.weight != None:
        try:
            model.load_state_dict(torch.load(args.weight))
        except:
            print("Couldn't load pretrained weight: " + args.weight)

    # load
    ds = ObjDataset(args.data)
    samples_n = 20000

    augments = nn.Sequential(ObjUniformSample(samples_n),
                             NormalPerturb(args.epsilon),
                             RandomAugment(samples_n, args.epsilon * 0.5)(ds))

    ds = augments(ds)

    p_aug = ds['p'].detach_().to(device)
    n_aug = ds['n'].detach_().to(device)
    s_aug = ds['s'].detach_().to(device)

    p = p_aug[:samples_n]
    n = n_aug[:samples_n]

    p_gt = p.repeat(2, 1)

    writer.add_mesh("1. n_gt",
                    p.unsqueeze(0),
                    colors=(n.unsqueeze(0) * 128 + 128).int())

    optimizer = optim.Adam(list(model.parameters()), lr=1e-4)

    for epoch in range(args.epoch):
        optimizer.zero_grad()

        # train
        utils.model_train(model)
        loss_t, s, n = train(device,
                             model,
                             p_aug,
                             s_aug,
                             n_aug,
                             backward=True,
                             lamb=args.lamb,
                             use_abs=args.abs)

        #loss_x = 1e2 * torch.sum(torch.pow(p_aug - p_gt, 2))
        #loss_x.backward()

        #writer.add_scalars("loss", {'train': loss_t + loss_x.detach()}, epoch)

        # visualization
        with torch.no_grad():

            n_normalized = n / torch.norm(n, dim=1, keepdim=True)

            n_error = torch.sum(n_normalized * n_aug, dim=1,
                                keepdim=True) / torch.norm(
                                    n_aug, dim=1, keepdim=True)

            n_error_originals = n_error[:p.shape[0]]

            writer.add_scalars(
                "cosine similarity", {
                    'train':
                    n_error_originals[~torch.isnan(n_error_originals)].detach(
                    ).mean()
                }, epoch)

            if epoch % 10 == 0:
                print(epoch)
                writer.add_mesh(
                    "2. n",
                    p_aug[:p.shape[0]].unsqueeze(0).detach().clone(),
                    colors=(n_normalized[:p.shape[0]].unsqueeze(
                        0).detach().clone() * 128 + 128).int(),
                    global_step=epoch)

                writer.add_mesh(
                    "3. cosine similarity",
                    p_aug[:p.shape[0]].unsqueeze(0).detach().clone(),
                    colors=(F.pad(1 - n_error[:p.shape[0]],
                                  (0, 2)).unsqueeze(0).detach().clone() *
                            256).int(),
                    global_step=epoch)

        # update
        optimizer.step()

        torch.save(model.state_dict(),
                   args.weight_save_path + 'model_%03d.pth' % epoch)

    writer.close()
Example #10
0
class TensorBoardVisualizer():
    """This class includes several functions that can display/save images and print/save logging information.

    It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.
    """
    def __init__(self, opt):
        """Initialize the Visualizer class

        Parameters:
            opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
        Step 1: Cache the training/test options
        Step 2: connect to a visdom server
        Step 3: create an HTML object for saveing HTML filters
        Step 4: create a logging file to store training losses
        """

        # self.writer.add_scalar("Loss/train", 10, 10)
        self.opt = opt  # cache the option
        self.display_id = opt.display_id
        self.use_html = opt.isTrain and not opt.no_html
        self.win_size = opt.display_winsize
        self.name = opt.name
        self.port = opt.display_port
        self.saved = False
        existing_folders = len(
            list(Path(opt.logs_folder).glob(f'*{self.name}*')))
        self.writer = SummaryWriter(
            f'{opt.logs_folder}/{self.name}_exp_{existing_folders}',
            flush_secs=60)
        print(
            f'Visualizer Tensorboard is exporting to folder {opt.logs_folder}/{self.name}_exp_{existing_folders} . . .'
        )
        if self.display_id > 0:  # connect to a visdom server given <display_port> and <display_server>
            # import visdom
            self.ncols = opt.display_ncols
            # self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env)
            # if not self.vis.check_connection():
            #     self.create_visdom_connections()

            if self.use_html:  # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/
                self.web_dir = os.path.join(opt.checkpoints_dir, opt.name,
                                            'web')
                self.img_dir = os.path.join(self.web_dir, 'images')
                print('create web directory %s...' % self.web_dir)
                util.mkdirs([self.web_dir, self.img_dir])
        # create a logging file to store training losses
        self.log_name = os.path.join(opt.checkpoints_dir, opt.name,
                                     'loss_log.txt')
        with open(self.log_name, "a") as log_file:
            now = time.strftime("%c")
            log_file.write(
                '================ Training Loss (%s) ================\n' % now)

    def reset(self):
        """Reset the self.saved status"""
        self.saved = False

    def display_estimated_mesh(self,
                               epoch,
                               flamelayer,
                               estimated_mesh,
                               estimated_texture_map,
                               tag='mesh'):
        vertices_tensor = estimated_mesh.verts_padded()
        faces_tensor = torch.tensor(np.int32(flamelayer.faces),
                                    dtype=torch.long).cuda().unsqueeze(0)

        colors_tensor = torch.zeros(vertices_tensor.shape)

        verts_uvs = estimated_mesh.textures.verts_uvs_packed().clone()
        verts_uvs[:, 1] = 1 - verts_uvs[:, 1]  # invert horizontal axis

        verts_uvs_un = (verts_uvs * estimated_texture_map.shape[1] - 1).long()
        vertices_uv_correspondence = flamelayer.extract_vertices_uv_correspondence_for_tb(
            estimated_mesh, estimated_texture_map)
        for i in range(vertices_uv_correspondence.shape[0]):
            colors_tensor[0, vertices_uv_correspondence[
                i, 0], :] = estimated_texture_map[
                    0, verts_uvs_un[vertices_uv_correspondence[i, 1], 1],
                    verts_uvs_un[vertices_uv_correspondence[i, 1],
                                 0], :].float() * 255

        self.writer.add_mesh(tag,
                             vertices=vertices_tensor,
                             colors=colors_tensor,
                             faces=faces_tensor,
                             global_step=epoch)

    def display_current_results(self,
                                visuals,
                                epoch,
                                save_result,
                                additional_visuals=None):
        """Display current results on visdom; save current results to an HTML file.

        Parameters:
            visuals (OrderedDict) - - dictionary of images to display or save
            epoch (int) - - the current epoch
            save_result (bool) - - if save the current results to an HTML file
        """
        try:

            self.display_estimated_mesh(
                epoch, additional_visuals['flamelayer'],
                additional_visuals['true_mesh'][
                    additional_visuals['verbose_batch_ind']],
                additional_visuals['true_mesh'].textures.maps_padded()
                [additional_visuals['verbose_batch_ind'], None], 'true_mesh')
            self.display_estimated_mesh(
                epoch, additional_visuals['flamelayer'],
                additional_visuals['estimated_mesh'][
                    additional_visuals['verbose_batch_ind']],
                additional_visuals['estimated_texture_map'][
                    additional_visuals['verbose_batch_ind'],
                    None], 'estimated_mesh')

        except:
            pass

        if self.display_id > 0:  # show images in the browser using visdom
            ncols = self.ncols
            f = plt.figure(figsize=(15, 15))
            import math
            rows = int(math.ceil(len(visuals) / ncols))
            if ncols > 0:  # show all the images in one visdom panel
                ncols = min(ncols, len(visuals))
                h, w = next(iter(visuals.values())).shape[:2]
                # table_css = """<style>
                #         table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center}
                #         table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}
                #         </style>""" % (w, h)  # create a table css
                # # create a table of images.
                title = self.name
                # label_html = ''
                # label_html_row = ''
                images = []
                idx = 0
                for label, image in visuals.items():
                    image_numpy = util.tensor2im(image)
                    # label_html_row += '<td>%s</td>' % label
                    a = f.add_subplot(rows, ncols, idx + 1)
                    a.set_title(f'{label}')
                    plt.imshow(image_numpy, cmap="jet")

                    # Image.fromarray(image_numpy).save(f'out/{label}.png')
                    # images.append(image_numpy.transpose([2, 0, 1]))
                    idx += 1
                    # if idx % ncols == 0:
                    #     label_html += '<tr>%s</tr>' % label_html_row
                    #     label_html_row = ''
                # white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255
                self.writer.add_figure('phase', f, epoch)

                # while idx % ncols != 0:
                # images.append(white_image)
                # label_html_row += '<td></td>'
                # idx += 1
                # if label_html_row != '':
                #     label_html += '<tr>%s</tr>' % label_html_row
                # try:
                #     self.vis.images(images, nrow=ncols, win=self.display_id + 1,
                #                     padding=2, opts=dict(title=title + ' images'))
                #     label_html = '<table>%s</table>' % label_html
                #     self.vis.text(table_css + label_html, win=self.display_id + 2,
                #                   opts=dict(title=title + ' labels'))
                # except VisdomExceptionBase:
                #     self.create_visdom_connections()

            else:  # show each image in a separate visdom panel;
                idx = 1
                # try:
                for label, image in visuals.items():
                    image_numpy = util.tensor2im(image)
                    a = f.add_subplot(rows, ncols, idx)
                    a.set_title(f'{label}')
                    plt.imshow(image_numpy, cmap="jet")
                    idx += 1
                # except VisdomExceptionBase:
                #     self.create_visdom_connections()

            plt.close('all')
        if self.use_html and (
                save_result or not self.saved
        ):  # save images to an HTML file if they haven't been saved.
            self.saved = True
            # save images to the disk
            for label, image in visuals.items():
                image_numpy = util.tensor2im(image)
                img_path = os.path.join(self.img_dir,
                                        'epoch%.3d_%s.png' % (epoch, label))
                util.save_image(image_numpy, img_path)

            # update website
            webpage = html.HTML(self.web_dir,
                                'Experiment name = %s' % self.name,
                                refresh=1)
            for n in range(epoch, 0, -1):
                webpage.add_header('epoch [%d]' % n)
                ims, txts, links = [], [], []

                for label, image_numpy in visuals.items():
                    image_numpy = util.tensor2im(image)
                    img_path = 'epoch%.3d_%s.png' % (n, label)
                    ims.append(img_path)
                    txts.append(label)
                    links.append(img_path)
                webpage.add_images(ims, txts, links, width=self.win_size)
            webpage.save()

    def plot_current_losses(self, epoch, counter_ratio, losses):
        """display the current losses on visdom display: dictionary of error labels and values

        Parameters:
            epoch (int)           -- current epoch
            counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1
            losses (OrderedDict)  -- training losses stored in the format of (name, float) pairs
        """
        if not hasattr(self, 'plot_data'):
            self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}
        self.plot_data['X'].append(epoch + counter_ratio)
        self.plot_data['Y'].append(
            [losses[k] for k in self.plot_data['legend']])
        d = defaultdict()

        for k in self.plot_data['legend']:
            d[f'{k}'] = torch.from_numpy(np.array(losses[k]))
            # self.writer.add_scalar(f'data/{k}', torch.from_numpy(np.array(losses[k])), self.plot_data['X'][-1])
        # print(self.plot_data['X'][-1])
        self.writer.add_scalars('data/scalar_group', d,
                                self.plot_data['X'][-1])

    # losses: same format as |losses| of plot_current_losses
    def print_current_losses(self, epoch, iters, losses, t_comp, t_data):
        """print current losses on console; also save the losses to the disk

        Parameters:
            epoch (int) -- current epoch
            iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
            losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
            t_comp (float) -- computational time per data point (normalized by batch_size)
            t_data (float) -- data loading time per data point (normalized by batch_size)
        """
        message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (
            epoch, iters, t_comp, t_data)
        for k, v in losses.items():
            message += '%s: %.3f ' % (k, v)

        print(message)  # print the message
        with open(self.log_name, "a") as log_file:
            log_file.write('%s\n' % message)  # save the message
Example #11
0
class MeshWriter:
    def __init__(self, args, template: pv.PolyData):
        self.args = args
        self.template = template

        self.faces = torch.tensor(self.template.faces.reshape(-1, 4)[:, 1:])

        out_dir = f'{self.args.out_dir}/{self.args.exp_name}'

        if os.path.lexists(out_dir):
            versions = os.listdir(out_dir)
            max_version = max(
                int(version.split('_')[-1]) for version in versions)
            self.exp_dir = f'{out_dir}/version_{max_version + 1}'
        else:
            os.mkdir(out_dir)
            self.exp_dir = f'{out_dir}/version_0'

        os.mkdir(self.exp_dir)

        self.writer = SummaryWriter(self.exp_dir)
        hparams = {k: str(v) for k, v in vars(self.args).items()}
        self.writer.add_hparams(
            hparam_dict=hparams,
            metric_dict={'hpmetrics': 0.1},
        )

        with open(f'{self.exp_dir}/hparam.json', 'w') as f:
            json.dump(hparams, f)

    def write_scalars(self, epoch, train=True, **scalars):
        section = 'Train' if train else 'Val'
        for k, v in scalars.items():
            print(k, v)
            self.writer.add_scalar(f'{section}/{k}', v, epoch)

    def write_meshes(self, epoch, verts, train=False):
        """
        Vertices should correspond to faces provided in the constructor
        """
        assert verts.shape[1] - 1 == int(self.faces.max())

        section = 'Train' if train else 'Val'
        camera_config = {
            'cls': 'PerspectiveCamera',
            'fov': 75,
            'aspect': 0.9,
        }
        material_config = {
            'cls': 'MeshDepthMaterial',
            'wireframe': True,
        }
        config_dict = {
            'material': material_config,
            'camera': camera_config,
        }

        self.writer.add_mesh(
            section,
            vertices=verts,
            colors=None,
            faces=self.faces.unsqueeze(0).repeat((verts.shape[0], 1, 1)),
            global_step=epoch,
            config_dict=config_dict,
        )

    def save_model_checkpoint(self, model, epoch):
        path = os.path.join(self.exp_dir, 'checkpoint.pt')
        torch.save({
            'epoch': epoch,
            'model_state_dict': model.state_dict()
        }, path)
Example #12
0
class Agent:
    def __init__(self,
                 state_size,
                 action_size,
                 lr=1e-2,
                 critic_lr=1e-4,
                 gamma=1.0,
                 entropy_beta=1e-3):
        np.random.seed(123)
        torch.manual_seed(123)

        self.lr = lr
        self.critic_lr = critic_lr
        self.gamma = gamma
        self.policy = Policy(state_size=state_size,
                             action_size=action_size).to(device)
        self.critic = Critic(state_size=state_size).to(device)
        self.optimizer = optim.Adam(self.policy.parameters(), lr=self.lr)
        self.coptimizer = optim.Adam(self.critic.parameters(),
                                     lr=self.critic_lr)
        # self.optimizer = optim.SGD(self.policy.parameters(), lr=self.lr)
        # self.coptimizer = optim.SGD(self.critic.parameters(), lr=self.critic_lr)

        self.creation_timestamp = dt.now().strftime('%Y%m%d_%H%M%S')

        self.entropy_beta = entropy_beta
        self.writer = None

    def train(self, envs, dataset_name, n_epochs, model_path=None):
        self.policy.train()
        exp_keys = np.array(list(envs.keys()))

        hostname = socket.gethostname()
        self.writer = SummaryWriter(
            log_dir='REINFORCE/logs/{}_reinforce_{}_{}_lr{}_gamma{}_eps{}/'.
            format(self.creation_timestamp, hostname, dataset_name.lower(),
                   self.lr, self.gamma, n_epochs),
            filename_suffix='_{}'.format(self.creation_timestamp))

        for exp_name, env in envs.items():
            fig, ax = plt.subplots()
            plot_vec = env.dots_vec
            idxs = np.arange(len(plot_vec))

            ax.plot(idxs, plot_vec)
            self.writer.add_figure(
                'Frames_Dots_{}/{}'.format(dataset_name, exp_name), fig)

        #### Running REINFORCE ####

        scores = {exp_key: [] for exp_key in envs.keys()}
        saved_log_probs = {exp_key: [] for exp_key in envs.keys()}
        # saved_probs = {exp_key: [] for exp_key in envs.keys()}
        saved_entropies = {exp_key: [] for exp_key in envs.keys()}
        rewards = {exp_key: [] for exp_key in envs.keys()}
        biases = {exp_key: 0. for exp_key in envs.keys()}
        avg_epoch_losses = []
        avg_critic_losses = []
        # _probs = np.ndarray((env.num_frames, n_epochs, 3), dtype=np.float32)
        for i_epoch in tqdm(range(n_epochs), desc='Epochs'):

            epoch_losses = []
            critic_losses = []

            # Shuffle the experiments at every new epoch
            np.random.shuffle(exp_keys)

            for exp_key in tqdm(exp_keys, desc='Epoch {}'.format(
                    i_epoch)):  # Run an episode for each environment
                env = envs[exp_key]

                saved_log_probs[exp_key] = []
                # saved_probs[exp_key] = []
                saved_entropies[exp_key] = []
                rewards[exp_key] = []
                state = env.reset()

                saved_actions = []
                s_t_values = []
                saved_states = [state]

                done = False
                while not done:

                    action, log_prob, entropy, probs = self.policy.act(state)

                    s_t_values.append(self.critic.criticize(state))

                    # saved_probs[exp_key].append(probs)
                    saved_log_probs[exp_key].append(log_prob)
                    saved_entropies[exp_key].append(entropy)
                    saved_actions.append(action)
                    state, reward, done, _ = env.step(action)
                    saved_states.append(state)

                    rewards[exp_key].append(reward)

                # scores_deque.append(sum(rewards[exp_key]))
                scores[exp_key].append(sum(rewards[exp_key]))

                discounts = [
                    self.gamma**i for i in range(len(rewards[exp_key]))
                ]
                R_t = np.zeros((len(rewards[exp_key]), ), dtype=np.float32)
                R_t[-1] = discounts[-1] * rewards[exp_key][
                    -1]  # We need to set the last reward

                for idx in range(2, len(rewards[exp_key]) + 1):
                    R_t[-idx] = discounts[-idx] * rewards[exp_key][-idx] + R_t[
                        -idx + 1]

                #R_t = (R_t - R_t.mean()) / (R_t.std() + 1e-12)
                R_n = sum(rewards[exp_key])

                H = torch.cat(saved_entropies[exp_key]).sum()  # Entropy Term
                # H = 0

                policy_loss = []
                critic_loss = []
                for idx, (log_prob,
                          r_t) in enumerate(zip(saved_log_probs[exp_key],
                                                R_t)):
                    # Policy Gradients using (Rt)
                    # policy_loss.append(log_prob * r_t)

                    # Policy Gradients using (Rt - b)
                    # policy_loss.append(log_prob * (r_t - biases[exp_key]))

                    # Policy Gradients using  (Rt - v(s_t | theta_v)) Actor-Critic style
                    policy_loss.append(log_prob *
                                       (r_t - s_t_values[idx].item()) -
                                       self.entropy_beta *
                                       saved_entropies[exp_key][idx])

                J = torch.cat(policy_loss).sum()

                # Actor Loss
                policy_loss = -J

                # Critic Loss
                critic_loss = F.mse_loss(
                    torch.cat(s_t_values),
                    torch.tensor(R_t, dtype=torch.float32, device=device)
                )  # Last state is an outlier, do not use it for loss minimization
                # critic_loss = F.smooth_l1_loss(torch.cat(s_t_values), torch.tensor(R_t, dtype=torch.float32, device=device)) # Last state is an outlier, do not use it for loss minimization

                # Apending losses for future optimization
                if not math.isnan(policy_loss.detach().cpu()):
                    epoch_losses.append(policy_loss.unsqueeze(0))
                critic_losses.append(critic_loss.unsqueeze(0))

                # biases[exp_key] = 0.9 * biases[exp_key] + 0.1 * np.mean(rewards[exp_key]) ### BIAS Term (Update via moving average as in https://github.com/KaiyangZhou/pytorch-vsumm-reinforce/blob/master/main.py)
                biases[exp_key] = np.mean(
                    rewards[exp_key])  # BIAS Term (simple average)

                # Logging to Tensorboard (check the file location for inspection)
                self.writer.add_scalar(
                    'Rewards_{}/avg_reward_{}'.format(dataset_name, exp_key),
                    np.mean(scores[exp_key]), i_epoch)
                self.writer.add_scalar(
                    'Rewards_{}/curr_reward_{}'.format(dataset_name, exp_key),
                    R_n, i_epoch)
                self.writer.add_scalar(
                    'Entropy_{}/curr_entropy_{}'.format(dataset_name, exp_key),
                    H, i_epoch)
                self.writer.add_scalar(
                    'Losses_{}/curr_loss_J_{}'.format(dataset_name, exp_key),
                    J.detach().cpu(), i_epoch)
                self.writer.add_scalar(
                    'Losses_{}/curr_actor_loss_{}'.format(
                        dataset_name, exp_key),
                    policy_loss.detach().cpu(), i_epoch)
                self.writer.add_scalar(
                    'Losses_{}/curr_critic_loss_{}'.format(
                        dataset_name, exp_key),
                    critic_loss.detach().cpu(), i_epoch)

                self.write_selected_frames_image_to_log(
                    env, i_epoch, 'Training_{}'.format(dataset_name), exp_key)

            epoch_losses = torch.cat(epoch_losses).mean()
            critic_losses = torch.cat(critic_losses).mean()
            avg_epoch_losses.append(epoch_losses.detach().cpu())
            avg_critic_losses.append(critic_losses.detach().cpu())

            self.writer.add_scalar(
                'Rewards_{}/_overall_avg_reward'.format(dataset_name),
                np.mean([scores[exp_key] for exp_key in exp_keys]), i_epoch)

            self.optimizer.zero_grad()
            epoch_losses.backward(
            )  # Computes the derivative of loss with respect to theta (dLoss/dTheta)
            # torch.nn.utils.clip_grad_norm_(self.policy.parameters(), 5.0) # <--- Uncomment for grad-clipping
            self.optimizer.step(
            )  # Updates the theta parameters (e.g., theta = theta -lr * dLoss/dTheta in SGD)

            self.coptimizer.zero_grad()
            critic_losses.backward(
            )  # Computes the derivative of loss with respect to theta (dLoss/dTheta)
            # torch.nn.utils.clip_grad_norm_(self.policy.parameters(), 5.0) # <--- Uncomment for grad-clipping
            self.coptimizer.step(
            )  # Updates the theta parameters (e.g., theta = theta -lr * dLoss/dTheta in SGD)

            if i_epoch % SAVE_MODEL_FREQ == SAVE_MODEL_FREQ - 1:
                self.save_model(
                    model_path.split('.')[0] +
                    '_epoch{}.pth'.format(i_epoch + 1))

        self.save_model(
            model_path.split('.')[0] + '_epoch{}.pth'.format(i_epoch + 1))

    def test(self, env, dataset_name, exp_name):
        self.policy.eval()

        if self.writer is None:
            hostname = socket.gethostname()
            self.writer = SummaryWriter(
                log_dir='REINFORCE/logs/{}_reinforce_{}_{}_test/'.format(
                    self.creation_timestamp, hostname, dataset_name.lower()),
                filename_suffix='_{}'.format(self.creation_timestamp))

        fig, ax = plt.subplots()
        plot_vec = env.dots_vec
        idxs = np.arange(len(plot_vec))

        ax.plot(idxs, plot_vec)
        self.writer.add_figure(
            'Frames_Dots_{}/{}'.format(dataset_name, exp_name), fig)

        rewards = []
        state = env.reset()

        done = False
        idx = 0
        with torch.no_grad():
            while not done:
                action, probs = self.policy.argmax_action(state)
                # print(idx, probs.detach().cpu().numpy())
                state, reward, done, _ = env.step(action)
                rewards.append(reward)
                idx += 1

        discounts = [self.gamma**i for i in range(len(rewards) + 1)]

        R = sum([a * b for a, b in zip(discounts, rewards)])

        print(
            'Reward: {:.3f}\nDiscounted Reward: {:.3f}\nNum selected frames: {}'
            .format(sum(rewards), R, len(env.selected_frames)))

    def write_reward_function_surface_to_log(self, env):

        fig = plt.figure()
        ax = fig.gca(projection='3d')

        # Make data.
        X = np.arange(0, 30, 3)
        Y = np.arange(0, 30, 3)
        X, Y = np.meshgrid(X, Y)
        Z = env.gaussian(X, env.desired_skip, env.SIGMA,
                         env.LAMBDA_MULTIPLIER) + env.exponential_decay(Y)

        # Plot the surface.
        surf = ax.plot_surface(X,
                               Y,
                               Z,
                               cmap=cm.coolwarm,
                               linewidth=0,
                               antialiased=False)

        # Add a color bar which maps values to colors.
        fig.colorbar(surf, shrink=0.5, aspect=5)
        self.writer.add_figure('Reward Function Surface', fig)
        mesh = np.expand_dims(np.array([X, Y, Z]).reshape(3,
                                                          -1).transpose(1, 0),
                              axis=0)
        colors = np.repeat([[[0, 0, 255]]], mesh.shape[1], axis=1)
        self.writer.add_mesh('Reward Function Surface',
                             vertices=mesh,
                             colors=colors)

    def write_selected_frames_image_to_log(self,
                                           env,
                                           i_episode,
                                           prefix,
                                           suffix=None):
        skips = np.array(env.selected_frames[1:]) - np.array(
            env.selected_frames[:-1])
        fig, ax = plt.subplots()
        ax.scatter(env.selected_frames[:-1], skips)
        if suffix:
            self.writer.add_figure('{}/{}'.format(prefix, suffix), fig,
                                   i_episode)
        else:
            self.writer.add_figure('{}'.format(prefix), fig, i_episode)

    def save_model(self, model_path):
        print('[{}] Saving model...'.format(
            dt.now().strftime('%Y-%m-%d %H:%M:%S')))
        torch.save(self.policy.state_dict(), model_path)
        print('[{}] Done!'.format(dt.now().strftime('%Y-%m-%d %H:%M:%S')))

    def load_model(self, model_path):
        print('[{}] Loading model...'.format(
            dt.now().strftime('%Y-%m-%d %H:%M:%S')))
        self.policy.load_state_dict(torch.load(model_path))
        print('[{}] Done!'.format(dt.now().strftime('%Y-%m-%d %H:%M:%S')))
Example #13
0
vertices_tensor = torch.as_tensor([
    [1, 1, 1],
    [-1, -1, 1],
    [1, -1, -1],
    [-1, 1, -1],
],
                                  dtype=torch.float).unsqueeze(0)
colors_tensor = torch.as_tensor([
    [255, 0, 0],
    [0, 255, 0],
    [0, 0, 255],
    [255, 0, 255],
],
                                dtype=torch.int).unsqueeze(0)
faces_tensor = torch.as_tensor([
    [0, 2, 3],
    [0, 3, 1],
    [0, 1, 2],
    [1, 3, 2],
],
                               dtype=torch.int).unsqueeze(0)

writer = SummaryWriter()
writer.add_mesh("my_mesh",
                vertices=vertices_tensor,
                colors=colors_tensor,
                faces=faces_tensor)

writer.close()
         torch.from_numpy(flamelayer.faces.astype(np.int32)),
         verts_uvs=estimated_mesh.textures.verts_uvs_packed(),
         texture_map=estimated_texture_map,
         faces_uvs=estimated_mesh.textures.faces_uvs_packed())
existing_folders = len(list(Path('runs').glob(f'*test_experiment_*')))

writer = SummaryWriter(f'runs/test_experiment_{existing_folders}')

## write to tensorboard
vertices_tensor = estimated_mesh.verts_padded()
faces_tensor = torch.tensor(np.int32(flamelayer.faces),
                            dtype=torch.long).cuda().unsqueeze(0)

colors_tensor = torch.zeros(vertices_tensor.shape)
verts_uvs = 1 - estimated_mesh.textures.verts_uvs_packed()
verts_uvs_un = (verts_uvs * estimated_texture_map.shape[1] - 1).long()
vertices_uv_correspondence = flamelayer.extract_vertices_uv_correspondence_for_tb(
    estimated_mesh, estimated_texture_map)
for i in range(vertices_uv_correspondence.shape[0]):
    colors_tensor[0,
                  vertices_uv_correspondence[i, 0], :] = estimated_texture_map[
                      0, verts_uvs_un[vertices_uv_correspondence[i, 1], 1],
                      verts_uvs_un[vertices_uv_correspondence[i, 1],
                                   0], :].float() * 255

writer.add_mesh('my_mesh11',
                vertices=vertices_tensor,
                colors=colors_tensor,
                faces=faces_tensor)
writer.flush()
Example #15
0
        net, opt, scheduler, train_loader, dev)
    if (epoch + 1) % 5 == 0 or epoch == 0:
        test_miou, test_per_cat_miou = evaluate(net, test_loader, dev, True)
        if test_miou > best_test_miou:
            best_test_miou = test_miou
            best_test_per_cat_miou = test_per_cat_miou
            if args.save_model_path:
                torch.save(net.state_dict(), args.save_model_path)
        print(
            'Current test mIoU: %.5f (best: %.5f), per-Category mIoU: %.5f (best: %.5f)'
            % (test_miou, best_test_miou, test_per_cat_miou,
               best_test_per_cat_miou))
    # Tensorboard
    if args.tensorboard:
        colored = paint(preds)
        writer.add_mesh('data',
                        vertices=data,
                        colors=colored,
                        global_step=epoch)
        writer.add_scalar('training time for one epoch',
                          training_time,
                          global_step=epoch)
        writer.add_scalar('AvgLoss', AvgLoss, global_step=epoch)
        writer.add_scalar('AvgAcc', AvgAcc, global_step=epoch)
        if (epoch + 1) % 5 == 0:
            writer.add_scalar('test mIoU', test_miou, global_step=epoch)
            writer.add_scalar('best test mIoU',
                              best_test_miou,
                              global_step=epoch)
    print()
Example #16
0
class TensorboardLogger:
    def __init__(self, log_dir, port=3468):
        self.summary_dir = os.path.join(log_dir, get_current_time())
        self.summary_writer = SummaryWriter(self.summary_dir, flush_secs=15)
        self._launch_tensorboard(port=port)

    def _launch_tensorboard(self, port):
        """
        Launch tensorboard in background to locally inspect the created summary under a specified port
        """
        self._tb = program.TensorBoard()
        self._tb.configure(argv=[
            None, '--logdir',
            os.path.dirname(self.summary_dir), '--port',
            str(port)
        ])
        self.tensorboard_url = self._tb.launch()
        self.print_tensorboard_url()

    def print_tensorboard_url(self):
        print('\n\nTensorboard url: %s\n\n' % self.tensorboard_url)

    def log(self, log_data, stage, step):
        """
        Log a list of data to tensorboard. Step is automatically determined from the trainer current state.
        Each piece of data to be logged must be defined as a dict, with fields:
            'type': type of data ('scalar' or 'image')
            'name': the name of the log in which this new data will be included (e.g 'batch_loss', 'accuracy')
            'data': numpy array or torch tensor representing the data.
                    If image data, use NCHW format, float type and intensity range between [0, 1]
                    If pointcloud data, use Nx6xP format, where P is the number of points and the first dimension represents [x y z R G B]
                        RGB values must be in the range [0, 1]

        :param log_data: list of dicts, each one containing a piece of data to be logged. This dict must have 'type', 'name' and 'data' fields
        :param stage: string naming the training stage for the data to log. (e.g 'train', 'val', 'test')
        """

        for data_dict in log_data:

            tag = '%s/%s' % (stage, data_dict['name'])
            if data_dict['type'] == 'scalar':
                self.summary_writer.add_scalar(tag=tag,
                                               scalar_value=data_dict['data'],
                                               global_step=step)
            elif data_dict['type'] == 'image':
                self.summary_writer.add_images(tag=tag,
                                               img_tensor=data_dict['data'],
                                               global_step=step)
            elif data_dict['type'] == 'pointcloud':
                if isinstance(data_dict['data'], np.ndarray):
                    data_dict['data'] = torch.from_numpy(data_dict['data'])
                vertices = data_dict['data'][:, 0:3, :].permute(0, 2, 1)

                colors = 255 * data_dict['data'][:, 3:6, :].permute(0, 2, 1)
                colors = colors.type(torch.uint8)
                self.summary_writer.add_mesh(tag=tag,
                                             vertices=vertices,
                                             colors=colors,
                                             global_step=step)
            else:
                raise Exception(
                    'Logging input data type (%s) is not implemented' %
                    data_dict['type'])
Example #17
0
class Trainer(BaseTrainer):
    def __init__(self, model, optimizer, scheduler, generator, train_loader, val_loader, device='cpu',
                 cameras=None, lights=None, log_dir=None, vis_dir=None, debug_dir=None, val_dir=None,
                 n_eval_points=8000,
                 lambda_dr_rgb=1.0, lambda_dr_silhouette=1.0,
                 lambda_dr_proj=0.0, lambda_dr_repel=0.0,
                 overwrite_visualization=True,
                 n_debug_points=4000, steps_dss_backward_radii=100,
                 **kwargs):
        """Initialize the BaseModel class.
        Args:
            model (nn.Module)
            optimizer: optimizer
            scheduler: scheduler
            device: device
        """
        self.cfg = kwargs
        self.device = device
        self.model = model
        self.cameras = cameras
        self.lights = lights

        self.val_loader = val_loader
        self.train_loader = train_loader

        self.tb_logger = SummaryWriter(
            log_dir + datetime.datetime.now().strftime("-%Y%m%d-%H%M%S"))

        # implicit function model
        self.vis_dir = vis_dir
        self.val_dir = val_dir

        self.lambda_dr_rgb = lambda_dr_rgb
        self.lambda_dr_silhouette = lambda_dr_silhouette
        self.lambda_dr_proj = lambda_dr_proj
        self.lambda_dr_repel = lambda_dr_repel

        self.generator = generator
        self.n_eval_points = n_eval_points
        self.overwrite_visualization = overwrite_visualization

        #  tuple (score, mesh)
        init_dss_backward_radii = 0
        if isinstance(self.model, PointModel):
            init_dss_backward_radii = self.model.renderer.rasterizer.raster_settings.radii_backward_scaler

        self.training_scheduler = TrainerScheduler(init_dss_backward_radii=init_dss_backward_radii,
                                                   steps_dss_backward_radii=steps_dss_backward_radii,
                                                   limit_dss_backward_radii=1.0,
                                                   steps_proj=self.cfg.get(
                                                       'steps_proj', -1),
                                                   gamma_proj=self.cfg.get('gamma_proj', 5))

        self.debug_dir = debug_dir
        self.hooks = []
        self._mesh_cache = None

        self.n_debug_points = n_debug_points

        self.optimizer = optimizer
        self.scheduler = scheduler

        self.projection_loss = ProjectionLoss(
            reduction='mean', filter_scale=2.0, knn_k=12)
        self.repulsion_loss = RepulsionLoss(
            reduction='mean', filter_scale=2.0, knn_k=12)
        self.iou_loss = IouLoss(
            reduction='mean', channel_dim=None)
        self.l1_loss = L1Loss(reduction='mean')
        self.l2_loss = L2Loss(reduction='mean')
        self.smape_loss = SmapeLoss(reduction='mean')

    def evaluate_3d(self, val_dataloader, it, **kwargs):
        logger_py.info("[3D Evaluation]")
        t0 = time.time()
        if not os.path.exists(self.val_dir):
            os.makedirs(self.val_dir)

        # create mesh using generator
        pointcloud = self.model.get_point_clouds(
            with_colors=False, with_normals=True,
            require_normals_grad=False)

        pointcloud_tgt = val_dataloader.dataset.get_pointclouds(
            num_points=self.n_eval_points).to(device=pointcloud.device)

        cd_p, cd_n = chamfer_distance(pointcloud_tgt, pointcloud,
                         x_lengths=pointcloud_tgt.num_points_per_cloud(), y_lengths=pointcloud.num_points_per_cloud(),
                         )
        # save to "val" dict
        t1 = time.time()
        logger_py.info('[3D Evaluation] time ellapsed {}s'.format(t1 - t0))
        eval_dict = {'chamfer_point': cd_p.item(), 'chamfer_normal': cd_n.item()}
        self.tb_logger.add_scalars(
            'eval', eval_dict, global_step=it)
        if not pointcloud.is_empty:
            self.tb_logger.add_mesh('eval',
                                    np.array(pointcloud.vertices)[None, ...], global_step=it)
            # mesh.export(os.path.join(self.val_dir, "%010d.ply" % it))
        return eval_dict

    def eval_step(self, data, **kwargs):
        """
        evaluate with image mask iou or image rgb psnr
        """
        from skimage.transform import resize
        lights_model = kwargs.get(
            'lights', self.val_loader.dataset.get_lights())
        cameras_model = kwargs.get(
            'cameras', self.val_loader.dataset.get_cameras())
        img_size = self.generator.img_size
        eval_dict = {'iou': 0.0, 'psnr': 0.0}
        with autograd.no_grad():
            self.model.eval()
            data = self.process_data_dict(
                data, cameras_model, lights=lights_model)
            img_mask = data['mask_img']
            img = data['img']
            # render image
            rgbas = self.generator.raytrace_images(
                img_size, img_mask, cameras=data['camera'], lights=data['light'])
            assert(len(rgbas) == 1)
            rgba = rgbas[0]
            rgba = torch.tensor(
                rgba[None, ...], dtype=torch.float, device=img_mask.device).permute(0, 3, 1, 2)

            # compare iou
            mask_gt = F.interpolate(
                img_mask.float(), img_size, mode='bilinear', align_corners=False).squeeze(1)
            mask_pred = rgba[:, 3, :, :]
            eval_dict['iou'] += self.iou_loss(mask_gt.float(),
                                              mask_pred.float(), reduction='mean')

            # compare psnr
            rgb_gt = F.interpolate(
                img, img_size, mode='bilinear', align_corners=False)
            rgb_pred = rgba[:, :3, :, :]
            eval_dict['psnr'] += self.l2_loss(
                rgb_gt, rgb_pred, channel_dim=1, reduction='mean', align_corners=False).detach()

        return eval_dict

    def train_step(self, data, cameras, **kwargs):
        """
        Args:
            data (dict): contains img, img.mask and img.depth and camera_mat
            cameras (Cameras): Cameras object from pytorch3d
        Returns:
            loss
        """
        self.model.train()
        self.optimizer.zero_grad()
        it = kwargs.get("it", None)
        lights = kwargs.get('lights', None)
        if hasattr(self, 'training_scheduler'):
            self.training_scheduler.step(self, it)

        data = self.process_data_dict(data, cameras, lights=lights)
        self.model.train()
        # autograd.set_detect_anomaly(True)
        loss = self.compute_loss(data['img'], data['mask_img'], data['input'],
                                 data['camera'], data['light'], it=it)
        loss.backward()
        self.optimizer.step()
        check_weights(self.model.state_dict())

        return loss.item()

    def process_data_dict(self, data, cameras, lights=None):
        ''' Processes the data dictionary and returns respective tensors

        Args:
            data (dictionary): data dictionary
        '''
        device = self.device

        # Get "ordinary" data
        img = data.get('img.rgb').to(device)
        assert(img.min() >= 0 and img.max() <=
               1), "Image must be a floating number between 0 and 1."
        mask_img = data.get('img.mask').to(device)

        camera_mat = data.get('camera_mat', None)

        # inputs for SVR
        inputs = data.get('inputs', torch.empty(0, 0)).to(device)

        # set camera matrix to cameras
        if camera_mat is None:
            logger_py.warning(
                "Camera matrix is not provided! Using the default matrix")
        else:
            cameras.R, cameras.T = decompose_to_R_and_t(camera_mat)
            cameras._N = cameras.R.shape[0]
            cameras.to(device)

        if lights is not None:
            lights_params = data.get('lights', None)
            if lights_params is not None:
                lights = type(lights)(**lights_params).to(device)

        return {'img': img, 'mask_img': mask_img, 'input': inputs, 'camera': cameras, 'light': lights}

    def compute_loss(self, img, mask_img, inputs, cameras, lights, n_points=None, eval_mode=False, it=None):
        ''' Compute the loss.
        Args:
            data (dict): data dictionary
            eval_mode (bool): whether to use eval mode
            it (int): training iteration
        '''
        # Initialize loss dictionary and other values
        loss = {}

        # Shortcuts
        _, _, h, w = img.shape

        # Apply losses
        # Initialize loss
        loss['loss'] = 0

        model_outputs = self.model(
            mask_img, cameras=cameras, lights=lights, it=it)

        point_clouds = model_outputs.get('iso_pcl')
        mask_img_pred = model_outputs.get('mask_img_pred')
        img_pred = model_outputs.get('img_pred')

        # 4.) Calculate Loss
        self.calc_dr_loss(img.permute(0, 2, 3, 1), img_pred, mask_img.reshape(
            -1, h, w), mask_img_pred.reshape(-1, h, w), reduction_method='mean', loss=loss)
        self.calc_pcl_reg_loss(
            point_clouds, reduction_method='mean', loss=loss, it=it)

        for k, v in loss.items():
            mode = 'val' if eval_mode else 'train'
            if isinstance(v, torch.Tensor):
                self.tb_logger.add_scalar('%s/%s' % (mode, k), v.item(), it)
            else:
                self.tb_logger.add_scalar('%s/%s' % (mode, k), v, it)

        return loss if eval_mode else loss['loss']

    def calc_pcl_reg_loss(self, point_clouds, reduction_method='mean', loss={}, **kwargs):
        """
        Args:
            point_clouds (PointClouds3D): point clouds in source space (object coordinate)
        """
        loss_dr_repel = 0
        loss_dr_proj = 0
        if self.lambda_dr_proj > 0:
            loss_dr_proj = self.projection_loss(
                point_clouds, rebuild_knn=True, points_filter=self.model.points_filter) * self.lambda_dr_proj
        if self.lambda_dr_repel > 0:
            loss_dr_repel = self.repulsion_loss(
                point_clouds, rebuild_knn=True, points_filter=self.model.points_filter) * self.lambda_dr_repel

        loss['loss'] = loss_dr_proj + loss_dr_repel + loss['loss']
        loss['loss_dr_proj'] = loss_dr_proj
        loss['loss_dr_repel'] = loss_dr_repel

    def calc_dr_loss(self, img, img_pred, mask_img, mask_img_pred,
                     reduction_method='mean', loss={}, **kwargs):
        """
        Calculates image loss
        Args:
            img (tensor): (N,H,W,C) range [0, 1]
            img_pred (tensor): (N,H,W,C) range [0, 1]
            mask_img (tensor): (N,H,W) range [0, 1]
            mask_img_pred (tensor): (N,H,W) range [0, 1]
        """
        lambda_dr_silhouette = self.lambda_dr_silhouette
        lambda_dr_rgb = self.lambda_dr_rgb

        loss_dr_silhouette = 0.
        loss_dr_rgb = 0.
        loss_image_grad = 0.

        assert(img.shape == img_pred.shape), \
            "Ground truth mage shape and predicted image shape is unequal"
        if lambda_dr_rgb > 0:
            mask_pred = mask_img.bool() & mask_img_pred.bool()
            if mask_pred.sum() > 0:
                loss_dr_rgb = self.l1_loss(
                    img, img_pred, mask=mask_pred, reduction=reduction_method) * lambda_dr_rgb
                # loss_dr_rgb = self.smape_loss(
                #     img, img_pred, mask=mask_pred) * lambda_dr_rgb

        else:
            loss_dr_rgb = 0.0

        if lambda_dr_silhouette > 0:
            # gt_edge = self.image_gradient_loss.edge_extractor.to(
            #     device=mask_img.device)(mask_img.float().unsqueeze(1)).squeeze(1)
            loss_mask = (mask_img.float() - mask_img_pred).abs()
            loss_mask = loss_mask.mean()

            loss_iou = self.iou_loss(mask_img.float(), mask_img_pred)
            loss_dr_silhouette = (
                0.01 * loss_iou + loss_mask) * lambda_dr_silhouette

        loss['loss'] = loss_dr_rgb + loss_dr_silhouette + \
            loss_image_grad + loss['loss']
        loss['loss_dr_silhouette'] = loss_dr_silhouette
        loss['loss_dr_rgb'] = loss_dr_rgb
        loss['loss_image_gradients'] = loss_image_grad

    def visualize(self, data, cameras, lights=None, it=0, vis_type='mesh', **kwargs):
        ''' Visualized the data.

        Args:
            data (dict): data dictionary
            it (int): training iteration
            vis_type (string): visualization type
        '''
        if not os.path.exists(self.vis_dir):
            os.makedirs(self.vis_dir)

        # use only one instance in the mini-batch
        data = slice_dict(data, [0, ])

        with torch.autograd.no_grad():
            data = self.process_data_dict(data, cameras, lights)
            cameras = data['camera']
            lights = data['light']
            # visualize the rendered image and pointcloud
            try:
                if vis_type == 'image':
                    img_list = self.generator.generate_images(
                        data, cameras=cameras, lights=lights, **kwargs)
                    for i, img in enumerate(img_list):
                        self.tb_logger.add_image(
                            'train/vis/render%02d' % i, img[..., :3], global_step=it, dataformats='HWC')

                    # visualize ground truth image and mask
                    img_gt = data.get('img')
                    self.tb_logger.add_image(
                        'train/vis/gt', img_gt[0, :3], global_step=it, dataformats='CHW')

                elif vis_type == 'pointcloud':
                    pcl_list = self.generator.generate_pointclouds(
                        data, cameras=cameras, lights=lights, **kwargs)
                    camera_threejs = {}
                    if isinstance(cameras, FoVPerspectiveCameras):
                        camera_threejs = {'cls': 'PerspectiveCamera', 'fov': cameras.fov.item(),
                                          'far': cameras.zfar.item(), 'near': cameras.znear.item(),
                                          'aspect': cameras.aspect_ratio.item()}
                    for i, pcl in enumerate(pcl_list):
                        if isinstance(pcl, trimesh.Trimesh):
                            self.tb_logger.add_mesh('train/vis/points', np.array(pcl.vertices)[None, ...],
                                                    config_dict=camera_threejs,
                                                    global_step=it)

                elif vis_type == 'mesh':
                    mesh = self.generator.generate_mesh(
                        data, with_colors=False, with_normals=False)
                    camera_threejs = {}
                    if isinstance(cameras, FoVPerspectiveCameras):
                        camera_threejs = {'cls': 'PerspectiveCamera', 'fov': cameras.fov.item(),
                                          'far': cameras.far.item(), 'near': cameras.near.item(),
                                          'aspect': cameras.aspect_ratio.item()}
                    if isinstance(mesh, trimesh.Trimesh):
                        self.tb_logger.add_mesh('train/vis/mesh', np.array(mesh.vertices)[None, ...],
                                                faces=np.array(mesh.faces)[
                            None, ...],
                            config_dict=camera_threejs, global_step=it)

            except Exception as e:
                logger_py.error(
                    "Exception occurred during visualization: {} ".format(e))

    def eval(self):
        """Make models eval mode during test time"""
        for name in self.model_names:
            if isinstance(name, str):
                net = getattr(self, 'net' + name)
                net.eval()

    def update_learning_rate(self, it):
        """Update learning rates for all modifiers"""
        self.scheduler.step()
        for param_group in self.optimizer.param_groups:
            v = param_group['lr']
            self.tb_logger.add_scalar('train/lr', v, it)

    def debug(self, data_dict, cameras, lights=None, it=0, mesh_gt=None, **kwargs):
        """
        output interactive plots for debugging
        # TODO(yifan): reused code from visualize
        """
        self._threads = getattr(self, '_threads', [])
        for t in self._threads:
            t.join()

        if not os.path.exists(self.debug_dir):
            os.makedirs(self.debug_dir)

        # use only one mini-batch
        data_dict = slice_dict(data_dict, [0, ])

        data = self.process_data_dict(data_dict, cameras, lights)

        # incoming data is channel fist
        mask_img_gt = data['mask_img'].detach().cpu().squeeze()
        H, W = mask_img_gt.shape

        set_debugging_mode_(True)
        self.model.train()
        self.model.debug(True)
        self.optimizer.zero_grad()
        loss = self.compute_loss(data['img'], data['mask_img'], data['input'],
                                 data['camera'], data['light'], it=it)
        loss.backward()

        # plot
        with torch.autograd.no_grad():
            dbg_tensor = get_debugging_tensor()

            # save figure
            if self.overwrite_visualization:
                ending = ''
            else:
                ending = '%010d_' % it

            # plot ground truth mesh if provided
            if mesh_gt is not None:
                assert(len(mesh_gt) == 1), \
                    "mesh_gt and gt_mask_img must have the same or broadcastable batchsize"
                mesh_gt = mesh_gt[0]
            try:
                # prepare data to create 2D and 3D figure
                n_pts = OrderedDict((k, dbg_tensor.pts_world_grad[k][0].shape[0])
                                    for k in dbg_tensor.pts_world_grad)

                for i, k in enumerate(dbg_tensor.pts_world_grad):
                    if dbg_tensor.pts_world[k][0].shape[0] != n_pts[k]:
                        logger_py.error('Found unequal pts[{0}] ({2}) and pts_grad[{0}] ({1}).'.format(
                            k, n_pts[k], dbg_tensor.pts_world[k][0].shape[0]))

                pts_list = [dbg_tensor.pts_world[k][0] for k in n_pts]
                grad_list = [dbg_tensor.pts_world_grad[k][0]
                             for k in n_pts]

                pts_world = torch.cat(pts_list, dim=0)
                pts_world_grad = torch.cat(grad_list, dim=0)

                try:
                    img_mask_grad = dbg_tensor.img_mask_grad[0].clone()
                except Exception:
                    img_mask_grad = None

                # convert world to ndc
                if len(cameras) > 1:
                    _cams = cameras.clone().to(device=pts_world.device)
                    _cams.R = _cams[0:0 + 1].R
                    _cams.T = _cams[0:0 + 1].T
                    _cams._N = 1
                else:
                    _cams = cameras.clone().to(device=pts_world.device)

                pts_ndc = _cams.transform_points_screen(pts_world.view(
                    1, -1, 3), ((W, H),), eps=1e-17).view(-1, 3)[..., :2]
                pts_grad_ndc = _cams.transform_points_screen(
                    (pts_world + pts_world_grad).view(1, -1, 3), ((W, H),), eps=1e-8).view(-1, 3)[..., :2]

                # create 2D plot
                pts_ndc_dict = {k: t for t, k in zip(torch.split(
                    pts_ndc, list(n_pts.values())), n_pts.keys())}
                grad_ndc_dict = {k: t for t, k in zip(torch.split(
                    pts_grad_ndc, list(n_pts.values())), n_pts.keys())}

                plotter_2d = Thread(target=plot_2D_quiver, name='%sproj.html' % ending,
                                    args=(pts_ndc_dict, grad_ndc_dict,
                                          mask_img_gt.clone()),
                                    kwargs=dict(img_mask_grad=img_mask_grad,
                                                save_html=os.path.join(
                                                    self.debug_dir, '%sproj.html' % ending)),
                                    )
                plotter_2d.start()
                self._threads.append(plotter_2d)

                # create 3D plot
                pts_world_dict = {k: t for t, k in zip(torch.split(
                    pts_world, list(n_pts.values())), n_pts.keys())}
                grad_world_dict = {k: t for t, k in zip(torch.split(
                    pts_world_grad, list(n_pts.values())), n_pts.keys())}
                plotter_3d = Thread(target=plot_3D_quiver, name='%sworld.html' % ending,
                                    args=(pts_world_dict, grad_world_dict),
                                    kwargs=dict(mesh_gt=mesh_gt, mesh=None,
                                                camera=_cams, n_debug_points=self.n_debug_points,
                                                save_html=os.path.join(self.debug_dir, '%sworld.html' % ending)),
                                    )
                plotter_3d.start()
                self._threads.append(plotter_3d)

            except Exception as e:
                logger_py.error('Could not plot gradient: {}'.format(repr(e)))

        # set debugging to false and remove hooks
        set_debugging_mode_(False)
        self.model.debug(False)
        self.iou_loss.debug(False)
        self.repulsion_loss.debug(False)
        self.projection_loss.debug(False)
        logger_py.info('Disabled debugging mode.')

        for h in self.hooks:
            h.remove()
        self.hooks.clear()

    def evaluate_2d(self, val_dataloader, reduce=True, **kwargs):
        """ evaluate the model by the rendered images """
        eval_dict = super().evaluate(val_dataloader, reduce=reduce,
                                     cameras=self.val_loader.dataset.get_cameras(), lights=self.val_loader.dataset.get_lights())

        return eval_dict
Example #18
0
def main():
    parser = argparse.ArgumentParser(
        description='Test',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('data', metavar='DATA', help='path to file')

    parser.add_argument('--tb-save-path',
                        dest='tb_save_path',
                        metavar='PATH',
                        default='../checkpoints/',
                        help='tensorboard checkpoints path')
    parser.add_argument('--sdf-weight',
                        dest='sdf_weight',
                        metavar='PATH',
                        default=None,
                        help='pretrained weight for SDF model')

    parser.add_argument('--batchsize',
                        dest='batchsize',
                        type=int,
                        metavar='BATCHSIZE',
                        default=1,
                        help='batch size')
    parser.add_argument('--epoch',
                        dest='epoch',
                        type=int,
                        metavar='EPOCH',
                        default=200,
                        help='epochs for adam and lgd')

    parser.add_argument('--width',
                        dest='width',
                        type=int,
                        metavar='WIDTH',
                        default=128,
                        help='width for rendered image')
    parser.add_argument('--height',
                        dest='height',
                        type=int,
                        metavar='HEIGHT',
                        default=128,
                        help='height for rendered image')
    parser.add_argument('--lr',
                        dest='lr',
                        type=float,
                        metavar='LEARNING_RATE',
                        default=1e-3,
                        help='learning rate')

    parser.add_argument('--outfile',
                        dest='outfile',
                        metavar='OUTFILE',
                        help='output file')

    args = parser.parse_args()

    width = args.width
    height = args.height
    epoch = args.epoch
    lr = args.lr

    writer = SummaryWriter(args.tb_save_path)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # create models
    model = Siren(in_features=3,
                  out_features=1,
                  hidden_features=256,
                  hidden_layers=5,
                  outermost_linear=True).to(device)

    if args.sdf_weight != None:
        try:
            model.load_state_dict(torch.load(args.sdf_weight))
        except:
            print("Couldn't load pretrained weight: " + args.sdf_weight)

    model.eval()
    for param in model.parameters():
        param.requires_grad = False

    # load
    mm = torch.tensor([-0.1, -0.1, 0.1], device=device, dtype=torch.float)
    mx = torch.tensor([0.1, 0.1, 0.1], device=device, dtype=torch.float)
    wh = torch.tensor([width, height, 1], device=device, dtype=torch.int)

    rot = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]],
                       device=device,
                       dtype=torch.float)
    trans = torch.tensor([[0, 0, -0.8]], device=device, dtype=torch.float)

    p_distribution = GridDataset(mm, mx, wh)

    d = torch.zeros((width * height, 1), device=device,
                    dtype=torch.float).requires_grad_(True)

    sampler = nn.Sequential(UniformSample(width * height), PointTransform(rot))

    p = sampler(p_distribution)

    ds = ObjDataset(args.data)
    objsampler = ObjUniformSample(1000)
    x_preview = (objsampler(ds)['p']).to(device)

    d2_eval = lambda d: torch.pow(d, 2).mean()
    sdf_eval = lambda d: torch.pow(model(d * ray_n + p + trans)[0], 2).sum(
        dim=1).mean()
    d_eval = lambda d: (torch.tanh(d) - 1.).mean() * 0.5

    d2_eval_list = lambda d: d2_eval(d[0])
    sdf_eval_list = lambda d: sdf_eval(d[0])
    d_eval_list = lambda d: d_eval(d[0])

    ray_n = torch.tensor([[0, 0, 1]], device=device,
                         dtype=torch.float).repeat(width * height, 1)

    writer.add_mesh("preview",
                    torch.cat([(p + trans), x_preview]).unsqueeze(0),
                    global_step=0)

    print("adam")
    optimizer = optim.Adam([d], lr=lr)

    for i in range(epoch):
        optimizer.zero_grad()

        loss = sdf_eval(d)
        loss.backward(retain_graph=True)

        optimizer.step()

        if i % 10 == 0:
            writer.add_scalars("regression_loss", {"Adam": loss},
                               global_step=i)
            writer.add_mesh("raymarch_Adam",
                            torch.cat([(d * ray_n + trans + p),
                                       x_preview]).unsqueeze(0),
                            global_step=i)

    writer.close()
Example #19
0
def main():
    parser = argparse.ArgumentParser(
        description='Test',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('data', metavar='DATA', help='path to file')

    parser.add_argument('--tb-save-path',
                        dest='tb_save_path',
                        metavar='PATH',
                        default='../checkpoints/',
                        help='tensorboard checkpoints path')

    parser.add_argument('--sdf-weight',
                        dest='sdf_weight',
                        metavar='PATH',
                        default=None,
                        help='pretrained weight for SDF model')

    parser.add_argument('--batchsize',
                        dest='batchsize',
                        type=int,
                        metavar='BATCHSIZE',
                        default=1,
                        help='batch size')
    parser.add_argument('--epoch',
                        dest='epoch',
                        type=int,
                        metavar='EPOCH',
                        default=500,
                        help='epochs for adam and lgd')
    parser.add_argument('--n',
                        dest='n',
                        type=int,
                        metavar='N',
                        default=30000,
                        help='number of points to sample')
    parser.add_argument('--lr',
                        dest='lr',
                        type=float,
                        metavar='LEARNING_RATE',
                        default=1e-3,
                        help='learning rate')

    parser.add_argument('--outfile',
                        dest='outfile',
                        metavar='OUTFILE',
                        help='output file')

    args = parser.parse_args()

    n = args.n
    epoch = args.epoch
    lr = args.lr

    writer = SummaryWriter(args.tb_save_path)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # create models
    model = Siren(in_features=3,
                  out_features=1,
                  hidden_features=256,
                  hidden_layers=5,
                  outermost_linear=True).to(device)

    if args.sdf_weight != None:
        try:
            model.load_state_dict(torch.load(args.sdf_weight))
        except:
            print("Couldn't load pretrained weight: " + args.sdf_weight)

    model.eval()
    for param in model.parameters():
        param.requires_grad = False

    ds = ObjDataset(args.data)
    sampler = ObjUniformSample(n)

    p = (sampler(ds)['p']).to(device)

    # load
    with torch.no_grad():
        mm = torch.min(p, dim=0)[0]
        mx = torch.max(p, dim=0)[0]

        x = torch.rand(n, 3).to(device) * (mx - mm) + mm
        x.requires_grad_(True)

        x_original = x.clone().detach()

    origin_eval = lambda x: torch.pow(x_original - x, 2).sum(dim=1).mean()
    sdf_eval = lambda x: torch.pow(model(x)[0], 2).sum(dim=1).mean()

    origin_eval_list = lambda x: origin_eval(x[0])
    sdf_eval_list = lambda x: sdf_eval(x[0])

    print("adam")
    optimizer = optim.Adam([x], lr=lr)

    for i in range(epoch):
        optimizer.zero_grad()

        loss = sdf_eval(x)
        loss.backward(retain_graph=True)

        optimizer.step()

        if i % 10 == 0:
            writer.add_scalars("regression_loss", {"Adam": loss},
                               global_step=i)
            writer.add_mesh("point cloud regression_Adam",
                            x.unsqueeze(0),
                            global_step=i)

            writer.add_scalars("chamfer_distance",
                               {"Adam": chamfer_distance(x, p)},
                               global_step=i)

    writer.close()
Example #20
0
class logger(object):
    def __init__(self, log_dir : str, tag : str):
        os.makedirs(log_dir, exist_ok= True)
        localtime = time.localtime(time.time()); date = "%02d%02d"%(localtime.tm_mon, localtime.tm_mday)
        logfile_path = os.path.join(log_dir, tag + "_" + date + "_" +"log.txt")
        self.txt_writer = open(logfile_path, 'a+')
        self.board_writer = SummaryWriter(log_dir=os.path.join(log_dir, "board"))

    def __del__(self):
        self.txt_writer.close()
        self.board_writer.close()

    def log_the_str(self, *print_paras):
        for para_i in print_paras:
            print(para_i, end= "")
            print(para_i, end= "", file = self.txt_writer)
        print("")
        print("", file = self.txt_writer)
        self.txt_writer.flush()

    def log_the_table(self, title, info_table: list):
        table = AsciiTable(info_table, title).table
        self.log_the_str(table)


    def log_scalars_singleline(self, info_table: list):
        line_str = ""
        for idx_i, info_pair in enumerate(info_table):
            info_val = info_pair[1]
            if isinstance(info_val, float): 
                info_val = str(round(info_val, 5))
            elif isinstance(info_val, int):
                info_val = "%03d"%(info_val)
            info_str = "= ".join([str(info_pair[0]), str(info_val)])
            if idx_i != len(info_table): 
                info_str += ", "
            line_str += info_str
        self.log_the_str(line_str)
        

    def summarize_config(self, pConfig):
        info_table = [['item', 'detail']]
        info_table.append(["config id", pConfig.saving_id])
        info_table.append(["# epochs", pConfig.total_epoch])
        info_table.append(["# checkpoint begin", pConfig.save_epoch_begin])
        info_table.append(["batch size", pConfig.ld_batchsize])
        info_table.append(["#workers ", pConfig.ld_workers])
        info_table.append(["init mode", pConfig.method_init])
        info_table.append(["device", pConfig.device_info])
        
        self.log_the_table("config" , info_table)

    def summarize_netarch(self, pNet):
        # input_size : (C, H, W)
        # in terminal:
        # summary(pNet, input_size)
        # in file
        info_table = [['item', 'detail']]
        para_num = 0
        for para_i in pNet.parameters():
            # para_i is a torch.nn.parameter.Parameter, grad is True by default. 
            # print(para_i.shape, para_i.requires_grad)
            para_num += para_i.numel()
        info_table.append(["#parameters", para_num])

        conv_num = 0; fc_num = 0
        for m in pNet.modules():
            if isinstance(m, nn.modules.conv._ConvNd):
                # nn.convNd
                conv_num += 1
            elif isinstance(m, nn.Linear):
                fc_num += 1
        info_table.append(["#conv_layers", conv_num])
        info_table.append(["#fc_layers", fc_num])

        self.log_the_table("Net-" + pNet._get_name() , info_table)
    
    def fetch_netweights(self, pNet):
        para_Lst = []
        for para_i in pNet.parameters():
            para_Lst.append(para_i.reshape(-1))
        return torch.cat(para_Lst)

    def board_net_weightdist(self, chart_tag, pNet, step_i):
        '''
        view the distribution of the net weights in tfboard. 
        '''
        weights = self.fetch_netweights(pNet)
        self.board_writer.add_histogram(chart_tag, weights, step_i)

    def board_scalars_singlechart(self, chart_tag, data_dic, step_i):
        self.board_writer.add_scalars(chart_tag, data_dic, step_i)
    
    def board_scalars_multicharts(self, charts_tag, data_dic, step_i):
        # key : str; item : val
        for key, val in data_dic.item():
            self.board_writer.add_scalar(chart_tag + "_"+ key, val, step_i)
        
    def board_imgs_singlefig(self, fig_tag, img_Tsor, step_i):
        '''
        img_Tsor : (C, H, W)
        img_Tsor = torchvision.utils.make_grid(tensor = imgF_Tsor_bacth_i, nrow= w_layout)
        '''
        self.board_writer.add_image(fig_tag, img_Tsor, step_i)

    def board_geos_singlefig(self, fig_tag, vert_Tsor, color_Tsor, faces_Tsor, step_i):
        '''
        vert_Tsor, color_Tsor, faces_Tsor (BS, N, 3)
        '''
        self.board_writer.add_mesh(fig_tag, vert_Tsor, color_Tsor, faces_Tsor, global_step = step_i)