Ejemplo n.º 1
0
    def __init__(self, args):
        self.dataset_name = args.dataset
        if args.epochs != None:
            self.epochs = args.epochs
        elif args.encoder == 'foldnet':
            self.epochs = 278
        elif args.encoder == 'dgcnn_cls':
            self.epochs = 250
        elif args.encoder == 'dgcnn_seg':
            self.epochs = 290
        self.batch_size = args.batch_size
        self.snapshot_interval = args.snapshot_interval
        self.no_cuda = args.no_cuda
        self.model_path = args.model_path

        # create exp directory
        file = [f for f in args.model_path.split('/')]
        if args.exp_name != None:
            self.experiment_id = "Reconstruct_" + args.exp_name
        elif file[-2] == 'models':
            self.experiment_id = file[-3]
        else:
            self.experiment_id = "Reconstruct" + time.strftime('%m%d%H%M%S')
        snapshot_root = 'snapshot/%s' % self.experiment_id
        tensorboard_root = 'tensorboard/%s' % self.experiment_id
        self.save_dir = os.path.join(snapshot_root, 'models/')
        self.tboard_dir = tensorboard_root

        # check arguments
        if self.model_path == '':
            if not os.path.exists(self.save_dir):
                os.makedirs(self.save_dir)
            else:
                shutil.rmtree(self.save_dir)
                os.makedirs(self.save_dir)
            if not os.path.exists(self.tboard_dir):
                os.makedirs(self.tboard_dir)
            else:
                shutil.rmtree(self.tboard_dir)
                os.makedirs(self.tboard_dir)
        sys.stdout = Logger(os.path.join(snapshot_root, 'log.txt'))
        self.writer = SummaryWriter(log_dir=self.tboard_dir)

        # print args
        print(str(args))

        # get gpu id
        gids = ''.join(args.gpu.split())
        self.gpu_ids = [int(gid) for gid in gids.split(',')]
        self.first_gpu = self.gpu_ids[0]

        # generate dataset
        self.train_dataset = Dataset(root=args.dataset_root,
                                     dataset_name=args.dataset,
                                     split='all',
                                     num_points=args.num_points,
                                     random_translate=args.use_translate,
                                     random_rotate=True,
                                     random_jitter=args.use_jitter)
        self.train_loader = torch.utils.data.DataLoader(
            self.train_dataset,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.workers)
        print("Training set size:", self.train_loader.dataset.__len__())

        # initialize model
        self.model = ReconstructionNet(args)
        if self.model_path != '':
            self._load_pretrain(args.model_path)

        # load model to gpu
        if not self.no_cuda:
            if len(self.gpu_ids) != 1:  # multiple gpus
                self.model = torch.nn.DataParallel(
                    self.model.cuda(self.first_gpu), self.gpu_ids)
            else:
                self.model = self.model.cuda(self.gpu_ids[0])

        # initialize optimizer
        self.parameter = self.model.parameters()
        self.optimizer = optim.Adam(self.parameter,
                                    lr=0.0001 * 16 / args.batch_size,
                                    betas=(0.9, 0.999),
                                    weight_decay=1e-6)
def visualize(args):
    # create exp directory
    file = [f for f in args.model_path.split('/')]
    if args.exp_name != None:
        experiment_id = args.exp_name
    elif file[-1] == '':
        experiment_id = time.strftime('%m%d%H%M%S')
        one_model = True
    elif file[-1][-4:] == '.pkl':
        experiment_id = file[-3]
        one_model = True
    elif file[-1] == 'models':
        experiment_id = file[-2]
        one_model = False
    else:
        experiment_id = time.strftime('%m%d%H%M%S')
    save_root = os.path.join('mitsuba', experiment_id, args.dataset,
                             args.split + str(args.item))
    os.makedirs(save_root, exist_ok=True)

    # initialize dataset
    from dataset import Dataset
    dataset = Dataset(root=args.dataset_root,
                      dataset_name=args.dataset,
                      num_points=args.num_points,
                      split=args.split,
                      load_name=True)

    # load data from dataset
    pts, lb, n = dataset[args.item]
    print(
        f"Dataset: {args.dataset}, split: {args.split}, item: {args.item}, category: {n}"
    )

    # generate XML file for original point cloud
    if args.draw_original:
        save_path = os.path.join(
            save_root, args.dataset + '_' + args.split + str(args.item) + '_' +
            str(n) + '_origin.xml')
        color = [0.4, 0.4, 0.6]
        mitsuba(pts.numpy(), save_path, color)

    # generate XML file for decoder souce point
    if args.draw_source_points:
        if args.shape == 'plane':
            meshgrid = [[-0.3, 0.3, 45], [-0.3, 0.3, 45]]
            x = np.linspace(*meshgrid[0])
            y = np.linspace(*meshgrid[1])
            points = np.array(list(itertools.product(x, y)))
            points = np.concatenate((points, np.zeros(2025)[..., np.newaxis]),
                                    axis=1)
        elif args.shape == 'sphere':
            points = np.load("sphere.npy")
        elif args.shape == 'gaussian':
            points = np.load("gaussian.npy")
        save_path = os.path.join(
            save_root, args.dataset + '_' + args.split + str(args.item) + '_' +
            str(n) + '_epoch0.xml')
        mitsuba(points, save_path, clr=args.shape)

    # initialize model
    model = ReconstructionNet(args)

    if one_model:
        if file[0] != '':
            model = load_pretrain(model, args.model_path)
        model.eval()
        reconstructed_pl, _ = model(pts.view(1, 2048, 3))
        save_path = os.path.join(
            save_root, file[-1][:-4] + args.split + str(args.item) + '_' +
            str(n) + '.xml')
        mitsuba(reconstructed_pl[0].detach().numpy(),
                save_path,
                clr=args.shape)
    else:
        load_path = glob(os.path.join(args.model_path, '*.pkl'))
        load_path.sort()
        for path in load_path:
            model_name = [p for p in path.split('/')][-1]
            model = load_pretrain(model, path)
            model.eval()
            reconstructed_pl, _ = model(pts.view(1, 2048, 3))
            save_path = os.path.join(
                save_root, model_name[:-4] + '_' + args.dataset + '_' +
                args.split + str(args.item) + '_' + str(n) + '.xml')
            mitsuba(reconstructed_pl[0].detach().numpy(),
                    save_path,
                    clr=args.shape)
Ejemplo n.º 3
0
class Reconstruction(object):
    def __init__(self, args):
        self.dataset_name = args.dataset
        if args.epochs != None:
            self.epochs = args.epochs
        elif args.encoder == 'foldnet':
            self.epochs = 278
        elif args.encoder == 'dgcnn_cls':
            self.epochs = 250
        elif args.encoder == 'dgcnn_seg':
            self.epochs = 290
        self.batch_size = args.batch_size
        self.snapshot_interval = args.snapshot_interval
        self.no_cuda = args.no_cuda
        self.model_path = args.model_path

        # create exp directory
        file = [f for f in args.model_path.split('/')]
        if args.exp_name != None:
            self.experiment_id = "Reconstruct_" + args.exp_name
        elif file[-2] == 'models':
            self.experiment_id = file[-3]
        else:
            self.experiment_id = "Reconstruct" + time.strftime('%m%d%H%M%S')
        snapshot_root = 'snapshot/%s' % self.experiment_id
        tensorboard_root = 'tensorboard/%s' % self.experiment_id
        self.save_dir = os.path.join(snapshot_root, 'models/')
        self.tboard_dir = tensorboard_root

        # check arguments
        if self.model_path == '':
            if not os.path.exists(self.save_dir):
                os.makedirs(self.save_dir)
            else:
                shutil.rmtree(self.save_dir)
                os.makedirs(self.save_dir)
            if not os.path.exists(self.tboard_dir):
                os.makedirs(self.tboard_dir)
            else:
                shutil.rmtree(self.tboard_dir)
                os.makedirs(self.tboard_dir)
        sys.stdout = Logger(os.path.join(snapshot_root, 'log.txt'))
        self.writer = SummaryWriter(log_dir=self.tboard_dir)

        # print args
        print(str(args))

        # get gpu id
        gids = ''.join(args.gpu.split())
        self.gpu_ids = [int(gid) for gid in gids.split(',')]
        self.first_gpu = self.gpu_ids[0]

        # generate dataset
        self.train_dataset = Dataset(root=args.dataset_root,
                                     dataset_name=args.dataset,
                                     split='all',
                                     num_points=args.num_points,
                                     random_translate=args.use_translate,
                                     random_rotate=True,
                                     random_jitter=args.use_jitter)
        self.train_loader = torch.utils.data.DataLoader(
            self.train_dataset,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.workers)
        print("Training set size:", self.train_loader.dataset.__len__())

        # initialize model
        self.model = ReconstructionNet(args)
        if self.model_path != '':
            self._load_pretrain(args.model_path)

        # load model to gpu
        if not self.no_cuda:
            if len(self.gpu_ids) != 1:  # multiple gpus
                self.model = torch.nn.DataParallel(
                    self.model.cuda(self.first_gpu), self.gpu_ids)
            else:
                self.model = self.model.cuda(self.gpu_ids[0])

        # initialize optimizer
        self.parameter = self.model.parameters()
        self.optimizer = optim.Adam(self.parameter,
                                    lr=0.0001 * 16 / args.batch_size,
                                    betas=(0.9, 0.999),
                                    weight_decay=1e-6)

    def run(self):
        self.train_hist = {'loss': [], 'per_epoch_time': [], 'total_time': []}
        best_loss = 1000000000
        print('Training start!!')
        start_time = time.time()
        self.model.train()
        if self.model_path != '':
            start_epoch = self.model_path[-7:-4]
            if start_epoch[0] == '_':
                start_epoch = start_epoch[1:]
            start_epoch = int(start_epoch)
        else:
            start_epoch = 0
        for epoch in range(start_epoch, self.epochs):
            loss = self.train_epoch(epoch)

            # save snapeshot
            if (epoch + 1) % self.snapshot_interval == 0:
                self._snapshot(epoch + 1)
                if loss < best_loss:
                    best_loss = loss
                    self._snapshot('best')

            # save tensorboard
            if self.writer:
                self.writer.add_scalar('Train Loss',
                                       self.train_hist['loss'][-1], epoch)
                self.writer.add_scalar('Learning Rate', self._get_lr(), epoch)

        # finish all epoch
        self._snapshot(epoch + 1)
        if loss < best_loss:
            best_loss = loss
            self._snapshot('best')
        self.train_hist['total_time'].append(time.time() - start_time)
        print("Avg one epoch time: %.2f, total %d epochs time: %.2f" %
              (np.mean(self.train_hist['per_epoch_time']), self.epochs,
               self.train_hist['total_time'][0]))
        print("Training finish!... save training results")

    def train_epoch(self, epoch):
        epoch_start_time = time.time()
        loss_buf = []
        num_batch = int(len(self.train_loader.dataset) / self.batch_size)
        for iter, (pts, _) in enumerate(self.train_loader):
            if pts.size(0) == 1:
                continue

            if not self.no_cuda:
                pts = pts.cuda(self.first_gpu)

            # forward
            self.optimizer.zero_grad()
            output, _ = self.model(pts)

            # loss
            if len(self.gpu_ids) != 1:  # multiple gpus
                loss = self.model.module.get_loss(pts, output)
            else:
                loss = self.model.get_loss(pts, output)

            # backward
            loss.backward()
            self.optimizer.step()
            loss_buf.append(loss.detach().cpu().numpy())

        # finish one epoch
        epoch_time = time.time() - epoch_start_time
        self.train_hist['per_epoch_time'].append(epoch_time)
        self.train_hist['loss'].append(np.mean(loss_buf))
        print(
            f'Epoch {epoch+1}: Loss {np.mean(loss_buf)}, time {epoch_time:.4f}s'
        )
        return np.mean(loss_buf)

    def _snapshot(self, epoch):
        state_dict = self.model.state_dict()
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for key, val in state_dict.items():
            if key[:6] == 'module':
                name = key[7:]  # remove 'module.'
            else:
                name = key
            new_state_dict[name] = val
        save_dir = os.path.join(self.save_dir, self.dataset_name)
        torch.save(new_state_dict, save_dir + "_" + str(epoch) + '.pkl')
        print(f"Save model to {save_dir}_{str(epoch)}.pkl")

    def _load_pretrain(self, pretrain):
        state_dict = torch.load(pretrain, map_location='cpu')
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for key, val in state_dict.items():
            if key[:6] == 'module':
                name = key[7:]  # remove 'module.'
            else:
                name = key
            new_state_dict[name] = val
        self.model.load_state_dict(new_state_dict)
        print(f"Load model from {pretrain}")

    def _get_lr(self, group=0):
        return self.optimizer.param_groups[group]['lr']
Ejemplo n.º 4
0
    def __init__(self, args):
        self.batch_size = args.batch_size
        self.no_cuda = args.no_cuda
        self.task = args.task

        # create exp directory
        file = [f for f in args.model_path.split('/')]
        if args.exp_name != None:
            self.experiment_id = args.exp_name
        else:
            self.experiment_id = time.strftime('%m%d%H%M%S')
        cache_root = 'cache/%s' % self.experiment_id
        os.makedirs(cache_root, exist_ok=True)
        self.feature_dir = os.path.join(cache_root, 'features/')
        sys.stdout = Logger(os.path.join(cache_root, 'log.txt'))

        # check directory
        if not os.path.exists(self.feature_dir):
            os.makedirs(self.feature_dir)
        else:
            shutil.rmtree(self.feature_dir)
            os.makedirs(self.feature_dir)

        # print args
        print(str(args))

        # get gpu id
        gids = ''.join(args.gpu.split())
        self.gpu_ids = [int(gid) for gid in gids.split(',')]
        self.first_gpu = self.gpu_ids[0]

        # generate dataset
        self.infer_dataset_train = Dataset(
            root=args.dataset_root,
            dataset_name=args.dataset,
            split='train',
            num_points=args.num_points,
        )
        self.infer_dataset_test = Dataset(
            root=args.dataset_root,
            dataset_name=args.dataset,
            split='test',
            num_points=args.num_points,
        )
        self.infer_loader_train = torch.utils.data.DataLoader(
            self.infer_dataset_train,
            batch_size=args.batch_size,
            shuffle=False,
            num_workers=args.workers
        )
        self.infer_loader_test = torch.utils.data.DataLoader(
            self.infer_dataset_test,
            batch_size=args.batch_size,
            shuffle=False,
            num_workers=args.workers
        )
        print("Inference set size (train):", self.infer_loader_train.dataset.__len__())
        print("Inference set size (test):", self.infer_loader_test.dataset.__len__())

        # initialize model
        if args.task == "reconstruct":
            self.model = ReconstructionNet(args)
        elif args.task == "classify":
            self.model = ClassificationNet(args)
        elif args.task == "segment":
            self.model = SegmentationNet(args)
        if args.model_path != '':
            self._load_pretrain(args.model_path)

        # load model to gpu
        if not args.no_cuda:
            if len(self.gpu_ids) != 1:  # multiple gpus
                self.model = torch.nn.DataParallel(self.model.cuda(self.first_gpu), self.gpu_ids)
            else:
                self.model = self.model.cuda(self.gpu_ids[0])