Example #1
0
    def __init__(self, nb_epochs, lr, resume, start_epoch, evaluate, train_loader, test_loader,
                 optim, crit, target_size, intervals, threshs, writer, synapse_reset, **kwargs):
        self.nb_epochs = nb_epochs
        self.lr = lr
        self.resume = resume
        self.start_epoch = start_epoch
        self.evaluate = evaluate
        self.train_loader = train_loader
        self.test_loader = test_loader
        self.best_prec1 = 0
        self.target_size = target_size
        self.intervals = intervals
        self.kwargs = kwargs
        self.writer = writer
        self.cur_epoch = 0
        self.train_iter = 0
        self.test_iter = 1
        self.layer_names = []
        self.grad_idx = 0

        data0, label = next(iter(self.train_loader))

        input_spikes = to_spike_train(data0, **self.kwargs)
        self.input_size = input_spikes.size()
        print('==> Build model and setup loss and optimizer')
        self.model = spiking_resnet_18(self.input_size, synapse_reset=synapse_reset, threshs=threshs,
                                       nb_classes=self.target_size).cuda()
        self.criterion = getattr(nn, crit)().cuda()
        if optim == 'SGD':
            self.optimizer = torch.optim.SGD(self.model.parameters(), self.lr, momentum=0.9, nesterov=True)
        elif optim == 'ADAM':
            self.optimizer = torch.optim.Adam(self.model.parameters(), self.lr)
        else:
            raise print('{} optimizer is not acceptable. Please select SGD or ADAM'.format(optim))
Example #2
0
    def validate_1epoch(self):
        print('==> Epoch:[{0}/{1}][validation stage]'.format(
            self.epoch, self.nb_epochs))
        batch_time = AverageMeter()
        losses = AverageMeter()
        top1 = AverageMeter()
        top5 = AverageMeter()
        # switch to evaluate mode
        self.model.eval()
        self.dic_video_level_preds = {}
        end = time.time()
        progress = tqdm(self.test_loader)
        with torch.no_grad():
            for i, (data0, label) in enumerate(progress):

                input_spikes = to_spike_train(data0, **self.kwargs)

                if input_spikes.shape[0] != self.input_size[0]:
                    continue  # avoiding data less than batch size

                output, r = self.model(input_spikes.cuda(), self.input_size[1],
                                       True)
                # compute output
                # measure elapsed time
                batch_time.update(time.time() - end)
                end = time.time()
                # Calculate pred acc
                prec1, prec5 = accuracy(output.data, label.cuda(), topk=(1, 5))
                top1.update(prec1.item(), data0.size(0))
                top5.update(prec5.item(), data0.size(0))
                self.writer.add_scalar('Eval/top1(step)', prec1.item(),
                                       self.test_iter)
                self.writer.add_scalar('Eval/top5(step)', prec5.item(),
                                       self.test_iter)
                self.test_iter += 1

        info = {
            'Epoch': [self.epoch],
            'Batch Time': [round(batch_time.avg, 3)],
            'Prec@1': [round(top1.avg, 4)],
            'Prec@5': [round(top5.avg, 4)],
        }
        record_info(info, 'record/spatial/rgb_test.csv', 'test')
        return top1.avg
Example #3
0
    def train_1epoch(self):
        print('==> Epoch:[{0}/{1}][training stage]'.format(
            self.epoch, self.nb_epochs))
        batch_time = AverageMeter()
        data_time = AverageMeter()
        losses = AverageMeter()
        top1 = AverageMeter()
        top5 = AverageMeter()
        # switch to train mode
        self.model.train()
        end = time.time()
        # mini-batch training
        progress = tqdm(self.train_loader)
        for i, (data0, label) in enumerate(progress):
            # self.set_grads_requirements()
            self.grad_idx += 1
            # measure data loading time
            data_time.update(time.time() - end)

            input_spikes = to_spike_train(data0, **self.kwargs)
            label_one_hot = self.to_one_hot(label, self.target_size)

            window_blocks = self.input_size[1] // self.intervals
            if input_spikes.shape[0] != self.input_size[0]:
                continue  # avoiding data less than batch size
            reset = True
            # cur_lr = lr_step_size = self.lr / window_blocks
            for wb in range(window_blocks):
                output, rs = self.model(
                    input_spikes[:, wb * self.intervals:(wb + 1) *
                                 self.intervals].cuda(), self.intervals, reset)

                sn = 0
                for r in rs:
                    self.writer.add_scalar('Spikes/' + str(sn), r,
                                           self.train_iter)
                    sn += 1

                reset = False
                loss = self.criterion(output, label_one_hot.cuda())
                # compute gradient and do SGD step
                self.optimizer.zero_grad()
                # loss.backward(retain_graph=True)
                loss.backward()
                # torch.nn.utils.clip_grad_value_(self.model.parameters(), 0.2)
                self.record_grads()
                # This line is used to prevent the vanishing / exploding gradient problem
                # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.25)
                # self.optimizer.param_groups[0]['lr'] = cur_lr  # TODO: check with nesterov and wd
                self.optimizer.step()
                # cur_lr += lr_step_size
                self.train_iter += 1

            # --- measure accuracy and record loss
            prec1, prec5 = accuracy(output.data, label.cuda(), topk=(1, 5))
            losses.update(loss.item(), data0.size(0))
            top1.update(prec1.item(), data0.size(0))
            top5.update(prec5.item(), data0.size(0))
            self.writer.add_scalar('Train/loss(step)', loss.item(),
                                   self.train_iter // window_blocks)
            self.writer.add_scalar('Train/top1(step)', prec1.item(),
                                   self.train_iter // window_blocks)
            self.writer.add_scalar('Train/top5(step)', prec5.item(),
                                   self.train_iter // window_blocks)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

        info = {
            'Epoch': [self.epoch],
            'Batch Time': [round(batch_time.avg, 3)],
            'Data Time': [round(data_time.avg, 3)],
            'Loss': [round(losses.avg, 5)],
            'Prec@1': [round(top1.avg, 4)],
            'Prec@5': [round(top5.avg, 4)],
            'lr': self.optimizer.param_groups[0]['lr']
        }
        record_info(info, 'record/spatial/rgb_train.csv', 'train')
Example #4
0
    to_st_train_kwargs['out_h'] = wh  #args.Q_resolution

    train_data = get_loader(24, train=True, **get_loader_kwargs)
    gen_train = iter(train_data)

    fig, ax = plt.subplots(2, 1)
    plt.ion()
    plt.show()
    for step in range(10):
        try:
            input, labels = next(gen_train)
        except StopIteration:
            gen_train = iter(train_data)
            input, labels = next(gen_train)

        input_spikes = to_spike_train(input, **to_st_train_kwargs)

        for idx in range(24):
            if labels[idx] == modulation_idx:
                img = None
                im3d = np.zeros((1024, wh, wh), dtype=np.uint8)
                for i in range(1024):

                    im3d[i] = input_spikes[idx, i, 0, :, :]
                    if False:
                        ax[0].clear()
                        ax[1].clear()
                        if img is None:
                            img = input_spikes[idx, i, 0, :, :]
                        else:
                            img += input_spikes[idx, i, 0, :, :]