예제 #1
0
class Args(object):
    def __init__(self, config):
        is_test = False
        if is_test:
            self.experiment_id = "KPConvNet" + time.strftime('%m%d%H%M') + 'Test'
        else:
            self.experiment_id = "KPConvNet" + time.strftime('%m%d%H%M')

        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.verbose = True

        # snapshot
        self.snapshot_interval = 5
        snapshot_root = f'snapshot/{config.dataset}_{self.experiment_id}'
        tensorboard_root = f'tensorboard/{config.dataset}_{self.experiment_id}'
        os.makedirs(snapshot_root, exist_ok=True)
        os.makedirs(tensorboard_root, exist_ok=True)
        shutil.copy2(os.path.join('.', 'training_ShapeNetCls.py'), os.path.join(snapshot_root, 'train.py'))
        shutil.copy2(os.path.join('datasets', 'ShapeNet.py'), os.path.join(snapshot_root, 'dataset.py'))
        shutil.copy2(os.path.join('datasets', 'dataloader.py'), os.path.join(snapshot_root, 'dataloader.py'))
        self.save_dir = os.path.join(snapshot_root, 'models/')
        self.result_dir = os.path.join(snapshot_root, 'results/')
        self.tboard_dir = tensorboard_root

        # dataset & dataloader
        self.train_set = ShapeNetDataset(root=config.data_train_dir,
                                         split='train',
                                         first_subsampling_dl=config.first_subsampling_dl,
                                         classification=True,
                                         config=config,
                                         )
        self.test_set = ShapeNetDataset(root=config.data_test_dir,
                                        split='test',
                                        first_subsampling_dl=config.first_subsampling_dl,
                                        classification=True,
                                        config=config,
                                        )
        self.train_loader = get_dataloader(dataset=self.train_set,
                                           batch_size=config.train_batch_size,
                                           shuffle=True,
                                           num_workers=4,
                                           )
        self.test_loader = get_dataloader(dataset=self.test_set,
                                          batch_size=config.test_batch_size,
                                          shuffle=False,
                                          num_workers=4,
                                          )
        print("Training set size:", self.train_loader.dataset.__len__())
        print("Test set size:", self.test_loader.dataset.__len__())

        # model
        self.model = KPCNN(config)
        self.resume = config.resume
        # optimizer 
        self.start_epoch = 0
        self.epoch = config.max_epoch
        self.optimizer = optim.SGD(self.model.parameters(), lr=config.learning_rate, momentum=config.momentum, weight_decay=1e-6)
        self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=config.exp_gamma)
        self.scheduler_interval = config.exp_interval

        # evaluate
        self.evaluate_interval = 1
        self.evaluate_metric = nn.CrossEntropyLoss(reduction='mean')

        self.check_args()

    def check_args(self):
        """checking arguments"""
        if not os.path.exists(self.save_dir):
            os.makedirs(self.save_dir)
        if not os.path.exists(self.result_dir):
            os.makedirs(self.result_dir)
        if not os.path.exists(self.tboard_dir):
            os.makedirs(self.tboard_dir)
        return self
예제 #2
0
def train():

    logger.info("test Dataloader")
    cfg = get_cfg_defaults()
    cfg.merge_from_file("./yaml/modelnet.yaml")

    writer = SummaryWriter()

    v = cfg.NETWORK.FIRST_SUBSAMPLING_DL
    mode_neigh = 0  # neighbors or edge
    architecture, list_voxel_size, list_radius, \
        list_radius_conv, list_size = get_list_constants(
            cfg.NETWORK.FIRST_SUBSAMPLING_DL, cfg.NETWORK.DENSITY_PARAMETER/2,
            cfg.NETWORK.ARCHITECTURE, cfg.NETWORK.FIRST_DIM,
            cfg.INPUT.IN_FEATURES_DIM,
            cfg.INPUT.NUM_CLASSES)

    list_transfo = [DataSubsampling(list_voxel_size)]
    neigh = Neighbors(list_radius,
                      max_num_neighbors=cfg.INPUT.MAX_NUM_NEIGHBORS,
                      is_pool=True,
                      is_upsample=False,
                      mode=mode_neigh)
    list_transfo.append(neigh)

    transfo = Compose(list_transfo)
    # transfo = DataSubsampling(list_voxel_size)

    dataset = ModelNet("./Data/ModelNet40",
                       subsampling_parameter=v,
                       transforms=transfo)

    dataloader = MultiScaleDataLoader(dataset,
                                      batch_size=cfg.NETWORK.BATCH_NUM,
                                      shuffle=True,
                                      num_workers=cfg.SYSTEM.NUM_WORKERS,
                                      pin_memory=False)

    model = KPCNN(architecture,
                  list_radius_conv,
                  list_size,
                  cfg,
                  mode=mode_neigh)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=cfg.TRAIN.LEARNING_RATE,
                                momentum=cfg.TRAIN.MOMENTUM)
    model.cuda()
    for e in range(1):
        logger.info("epoch {:d}", e)
        model.train()
        for i, batch in enumerate(dataloader):
            if (mode_neigh == 0):
                batch = shadow_neigh(batch).to('cuda')
            else:
                batch = batch.to('cuda')
            # print(batch.list_neigh[3][:20])
            pred = model(batch)
            loss = compute_classification_loss(pred, batch.y, model,
                                               cfg.TRAIN.WEIGHTS_DECAY)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (i % cfg.TRAIN.LOG_INTERVAL == 0):
                accuracy = compute_classification_accuracy(pred, batch.y)
                global_step = e * len(dataloader) + i
                logger.info("Epoch: {} Step {}, loss {:3f}, accuracy: {:3f}",
                            e, i, loss.item(), accuracy.item())
                writer.add_scalar('Loss/train', loss.item(), global_step)
                writer.add_scalar('Accuracy/train', accuracy.item(),
                                  global_step)