if self.args['GENERAL']['debug']:
            model_utils.check_occupation(complet_inputs['complet_input'],
                                         batch_complet.dense())

        complet_output = self.complet_head(batch_complet)
        torch.cuda.empty_cache()

        return seg_output, complet_output


classifier = J3SC_Net()
if use_cuda:
    classifier = classifier.cuda()
classifier = classifier.eval()

scn.checkpoint_restore(classifier, model_path, use_cuda)
print('#classifer parameters %d' %
      sum([x.nelement() for x in classifier.parameters()]))

dataset = importlib.import_module('kitti_dataset')
input_data = kitti_dataset.get_dataset(config, split=args.dataset)
data_loader = torch.utils.data.DataLoader(
    input_data,
    batch_size=1,
    collate_fn=seg_head.Merge,
    num_workers=config['TRAIN']['train_workers'],
    pin_memory=True,
    shuffle=False,
    drop_last=False,
    worker_init_fn=lambda x: np.random.seed(x + int(time.time())))
num_sample = len(data_loader)
예제 #2
0
                                     scn.BatchNormReLU(m)).add(
                                         scn.OutputLayer(data.dimension))
        self.linear = nn.Linear(m, 20)

    def forward(self, x):
        x = self.sparseModel(x)
        x = self.linear(x)
        return x


unet = Model()
if use_cuda:
    unet = unet.cuda()

training_epochs = 2048
training_epoch = scn.checkpoint_restore(unet, exp_name, 'unet', use_cuda)
optimizer = optim.Adam(unet.parameters())
print('#classifer parameters', sum([x.nelement() for x in unet.parameters()]))

for epoch in range(training_epoch, training_epochs + 1):
    unet.train()
    stats = {}
    scn.forward_pass_multiplyAdd_count = 0
    scn.forward_pass_hidden_states = 0
    start = time.time()
    train_loss = 0
    for i, batch in enumerate(data.train_data_loader):
        optimizer.zero_grad()
        if use_cuda:
            batch['x'][1] = batch['x'][1].cuda()
            batch['y'] = batch['y'].cuda()
예제 #3
0
                                              requires_grad=True)

    def forward(self, x):
        seg_output, _ = self.seg_head(x)

        return seg_output


classifier = J3SC_Net(config)
print(classifier)

if use_cuda:
    classifier = classifier.cuda()
classifier = classifier.eval()

training_epoch = scn.checkpoint_restore(classifier, model_path, use_cuda)
print('#classifer parameters %d' %
      sum([x.nelement() for x in classifier.parameters()]))
'''Load Dataset'''
config_file = os.path.join('opt/SemanticPOSS.yaml')
poss_config = yaml.safe_load(open(config_file, 'r'))
scan = laserscan.SemLaserScan(nclasses=11,
                              sem_color_dict=poss_config['color_map'])
sequences = poss_config['split']['valid']

label_to_names = {
    0: 'people',
    1: 'rider',
    2: 'car',
    3: 'trunk',
    4: 'plants',