示例#1
0
 def forward(self, x):
     Vthr = self.Vthr
     kernel_size = self.kernel_size
     stride = self.stride
     padding = self.padding
     if isinstance(x, SpikeTensor):
         if stride is None:
             stride = kernel_size
         weight = torch.ones([x.chw[0], 1,
                              *_pair(kernel_size)]).to(x.data.device)
         out = F.conv2d(x.data,
                        weight,
                        None,
                        _pair(stride),
                        _pair(padding),
                        1,
                        groups=x.chw[0])
         chw = out.size()[1:]
         out_s = out.view(x.timesteps, -1, *chw)
         self.mem_potential = torch.zeros(out_s.size(1),
                                          *chw).to(out_s.device)
         spikes = generate_spike_mem_potential(out_s, self.mem_potential,
                                               Vthr, self.reset_mode)
         out = SpikeTensor(
             torch.cat(spikes, 0), x.timesteps,
             F.avg_pool2d(x.scale_factor.unsqueeze(0), kernel_size, stride,
                          padding).squeeze(0))
         return out
     else:
         out = F.avg_pool2d(x, kernel_size, stride, padding)
         return out
示例#2
0
 def forward(self, x):
     if isinstance(x, SpikeTensor):
         Vthr = self.Vthr.view(1, -1, 1, 1)
         out = F.conv_transpose2d(x.data, self.weight, self.bias,
                                  self.stride, self.padding,
                                  self.output_padding, self.groups,
                                  self.dilation)
         if self.bn is not None:
             out = self.bn(out)
         chw = out.size()[1:]
         out_s = out.view(x.timesteps, -1, *chw)
         self.mem_potential = torch.zeros(out_s.size(1),
                                          *chw).to(out_s.device)
         spikes = generate_spike_mem_potential(out_s, self.mem_potential,
                                               Vthr, self.reset_mode)
         out = SpikeTensor(torch.cat(spikes, 0), x.timesteps,
                           self.out_scales)
         return out
     else:
         out = F.conv_transpose2d(x.data, self.weight, self.bias,
                                  self.stride, self.padding,
                                  self.output_padding, self.groups,
                                  self.dilation)
         if self.bn is not None:
             out = self.bn(out)
         return out
 def __call__(self, *xs):
     if isinstance(xs[0], SpikeTensor):
         out = torch.cat([_.data for _ in xs], dim=self.dim)
         scale_factor = torch.cat([_.scale_factor for _ in xs],
                                  dim=self.dim - 1)
         out = SpikeTensor(out, xs[0].timesteps, scale_factor)
     else:
         out = torch.cat(xs, dim=self.dim)
     return out
示例#4
0
 def forward(self, x):
     if isinstance(x, SpikeTensor):
         Vthr = self.Vthr.view(1, -1)
         out = F.linear(x.data, self.weight, self.bias)
         chw = out.size()[1:]
         out_s = out.view(x.timesteps, -1, *chw)
         self.mem_potential = torch.zeros(out_s.size(1), *chw).to(out_s.device)
         spikes = generate_spike_mem_potential(out_s, self.mem_potential, Vthr, self.reset_mode)
         out = SpikeTensor(torch.cat(spikes, 0), x.timesteps, self.out_scales)
         return out
     else:
         out = F.linear(x, self.weight, self.bias)
         if not self.last_layer:
             out = F.relu(out)
         return out
示例#5
0
def mobile_evaluate(
        opt,
        data,
        ann,
        snn,
        timesteps=16,
        batch_size=16,
        imgsz=224,
        conf_thres=0.001,
        iou_thres=0.6,  # for nms
        save_json=False,
        single_cls=False,
        augment=False,
        dataloader=None,
        multi_label=True):
    verbose = opt.task == 'test'
    device = torch_utils.select_device(opt.device, batch_size=batch_size)
    # Configure run
    #data = parse_data_cfg(data)
    nc = 1000  # number of classes
    path = data  # path to test images
    #names = load_classes(data['names'])  # class names
    #iouv = torch.linspace(0.5, 0.95, 10).to(device)  # iou vector for [email protected]:0.95
    #iouv = iouv[0].view(1)  # comment for [email protected]:0.95
    #niou = iouv.numel()

    # Dataloader
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])

    dataloader, testloader = get_loaders(opt.data, batch_size, batch_size,
                                         imgsz, nw)
    #trainset = torchvision.datasets.CIFAR10(root='./data/CIFAR10/', train=True,download=True, transform=get_transform(False, 224))
    #dataloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,shuffle=True, num_workers=nw)
    #testset = torchvision.datasets.CIFAR10(root='./data/CIFAR10/', train=False,download=True, transform=get_transform(False, 224))
    #testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,shuffle=False, num_workers=nw)

    #ann.eval()
    snn.eval()
    #ann.to(device)
    snn.to(device)
    print(imgsz)
    #_ = ann(torch.zeros((1, 3, imgsz, imgsz), device=device)) if device.type != 'cpu' else None  # run once

    nb = len(dataloader)
    acc_top1 = 0
    acc_top5 = 0
    cnt = 0
    with torch.no_grad():
        for batch_i, (imgs, targets) in enumerate(tqdm(dataloader, total=nb)):
            snn.zero_grad()
            imgs = imgs.to(device).float(
            ) / 255.0  # uint8 to float32, 0 - 255 to 0.0 - 1.0

            targets = targets.to(device)
            nb, _, height, width = imgs.shape  # batch size, channels, height, width

            whwh = torch.Tensor([width, height, width, height]).to(device)

            replica_data = torch.cat([imgs for _ in range(timesteps)],
                                     0)  # replica for input(first) layer
            data = SpikeTensor(replica_data, timesteps, scale_factor=1)

            # Run model
            t = torch_utils.time_synchronized()
            spike_tensor.firing_ratio_record = True
            output_snn = snn(data)

            spike_tensor.firing_ratio_record = False
            output_ann_true = output_snn.to_float()
            print(output_ann_true.shape)
            print((output_ann_true != 0).sum())
            pred = F.log_softmax(output_ann_true.view(output_ann_true.size(0),
                                                      -1),
                                 dim=1)
            pred_max = pred.max()

            corr = correct(pred, targets, topk=(1, 5))
            acc_top1 += float(corr[0])
            acc_top5 += float(corr[1])
            cnt += batch_size
            print('accuracy:', 100. * acc_top1 / cnt, 100. * acc_top5 / cnt,
                  pred_max)
示例#6
0
def snn_evaluate(
        opt,
        data,
        ann,
        snn,
        timesteps=16,
        batch_size=16,
        imgsz=416,
        conf_thres=0.001,
        iou_thres=0.6,  # for nms
        save_json=False,
        single_cls=False,
        augment=False,
        dataloader=None,
        multi_label=True):
    verbose = opt.task == 'test'
    device = torch_utils.select_device(opt.device, batch_size=batch_size)
    # Configure run
    data = parse_data_cfg(data)
    nc = 1 if single_cls else int(data['classes'])  # number of classes
    path = data['valid']  # path to test images
    names = load_classes(data['names'])  # class names
    iouv = torch.linspace(0.5, 0.95,
                          10).to(device)  # iou vector for [email protected]:0.95
    iouv = iouv[0].view(1)  # comment for [email protected]:0.95
    niou = iouv.numel()

    # Dataloader
    if dataloader is None:
        dataset = LoadImagesAndLabels(path,
                                      imgsz,
                                      batch_size,
                                      rect=True,
                                      single_cls=opt.single_cls,
                                      pad=0.5)
        batch_size = min(batch_size, len(dataset))
        dataloader = DataLoader(dataset,
                                batch_size=batch_size,
                                num_workers=min([
                                    os.cpu_count(),
                                    batch_size if batch_size > 1 else 0, 8
                                ]),
                                pin_memory=True,
                                collate_fn=dataset.collate_fn)

    seen = 0
    ann.eval()
    snn.eval()
    ann.to(device)
    snn.to(device)
    print(imgsz)
    _ = ann(torch.zeros(
        (1, 3, imgsz,
         imgsz), device=device)) if device.type != 'cpu' else None  # run once
    coco91class = coco80_to_coco91_class()
    s = ('%20s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R',
                                 '[email protected]', 'F1')
    p, r, f1, mp, mr, map, mf1, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
    loss = torch.zeros(3, device=device)
    jdict, stats, ap, ap_class = [], [], [], []
    total_firing_ratios = []
    for batch_i, (imgs, targets, paths,
                  shapes) in enumerate(tqdm(dataloader, desc=s)):
        #imgs=torch.zeros(1, 3, imgsz, imgsz)
        imgs = imgs.to(
            device).float() / 255.0  # uint8 to float32, 0 - 255 to 0.0 - 1.0
        targets = targets.to(device)
        nb, _, height, width = imgs.shape  # batch size, channels, height, width

        whwh = torch.Tensor([width, height, width, height]).to(device)
        print(whwh)
        replica_data = torch.cat([imgs for _ in range(timesteps)],
                                 0)  # replica for input(first) layer
        data = SpikeTensor(replica_data, timesteps, scale_factor=1)
        with torch.no_grad():
            # Run model
            t = torch_utils.time_synchronized()
            spike_tensor.firing_ratio_record = True
            output_snn = snn(data)
            print('snn_test', output_snn)
        '''
        # Disable gradients
        with torch.no_grad():
            # Run model
            t = torch_utils.time_synchronized()
            spike_tensor.firing_ratio_record = True
            output_snn1, output_snn2 = snn(data)  # two branches
            spike_tensor.firing_ratio_record = False
            output_ann1 = output_snn1.to_float()  # spike to real-value
            output_ann2 = output_snn2.to_float()
            # post-processing: conv, yolo
            output_ann1 = ann.module_list[14](output_ann1)
            output_ann2 = ann.module_list[-2](output_ann2)
            yolo_outputs, out = [], []
            yolo_outputs.append(ann.module_list[15](output_ann1, out))
            yolo_outputs.append(ann.module_list[-1](output_ann2, out))
            inf_out, _ = zip(*yolo_outputs)  # inference output, training output
            inf_out = torch.cat(inf_out, 1)  # cat yolo outputs
            if augment:  # de-augment results
                inf_out = torch.split(inf_out, nb, dim=0)
                inf_out[1][..., :4] /= s[0]  # scale
                img_size = imgs.shape[-2:]  # height, width
                inf_out[1][..., 0] = img_size[1] - inf_out[1][..., 0]  # flip lr
                inf_out[2][..., :4] /= s[1]  # scale
                inf_out = torch.cat(inf_out, 1)
            t0 += torch_utils.time_synchronized() - t

            # Run NMS
            t = torch_utils.time_synchronized()
            output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, multi_label=multi_label)
            t1 += torch_utils.time_synchronized() - t
        '''

        # Statistics per image
        for si, pred in enumerate(output):
            labels = targets[targets[:, 0] == si, 1:]
            nl = len(labels)
            tcls = labels[:, 0].tolist() if nl else []  # target class
            seen += 1

            if pred is None:
                if nl:
                    stats.append((torch.zeros(0, niou, dtype=torch.bool),
                                  torch.Tensor(), torch.Tensor(), tcls))
                continue

            # Append to text file
            # with open('test.txt', 'a') as file:
            #    [file.write('%11.5g' * 7 % tuple(x) + '\n') for x in pred]

            # Clip boxes to image bounds
            clip_coords(pred, (height, width))

            # Append to pycocotools JSON dictionary
            if save_json:
                # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
                image_id = int(Path(paths[si]).stem.split('_')[-1])
                box = pred[:, :4].clone()  # xyxy
                scale_coords(imgs[si].shape[1:], box, shapes[si][0],
                             shapes[si][1])  # to original shape
                box = xyxy2xywh(box)  # xywh
                box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
                for p, b in zip(pred.tolist(), box.tolist()):
                    jdict.append({
                        'image_id': image_id,
                        'category_id': coco91class[int(p[5])],
                        'bbox': [round(x, 3) for x in b],
                        'score': round(p[4], 5)
                    })

            # Assign all predictions as incorrect
            correct = torch.zeros(pred.shape[0],
                                  niou,
                                  dtype=torch.bool,
                                  device=device)
            if nl:
                detected = []  # target indices
                tcls_tensor = labels[:, 0]

                # target boxes
                tbox = xywh2xyxy(labels[:, 1:5]) * whwh

                # Per target class
                for cls in torch.unique(tcls_tensor):
                    ti = (cls == tcls_tensor).nonzero().view(
                        -1)  # target indices
                    pi = (cls == pred[:, 5]).nonzero().view(
                        -1)  # prediction indices

                    # Search for detections
                    if pi.shape[0]:
                        # Prediction to target ious
                        ious, i = box_iou(pred[pi, :4], tbox[ti]).max(
                            1)  # best ious, indices

                        # Append detections
                        for j in (ious > iouv[0]).nonzero():
                            d = ti[i[j]]  # detected target
                            if d not in detected:
                                detected.append(d)
                                correct[
                                    pi[j]] = ious[j] > iouv  # iou_thres is 1xn
                                if len(
                                        detected
                                ) == nl:  # all targets already located in image
                                    break

            # Append statistics (correct, conf, pcls, tcls)
            stats.append(
                (correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))

        # Plot images
        if batch_i < 1:
            f = 'test_batch%g_gt.jpg' % batch_i  # filename
            plot_images(imgs, targets, paths=paths, names=names,
                        fname=f)  # ground truth
            f = 'test_batch%g_pred.jpg' % batch_i
            plot_images(imgs,
                        output_to_target(output, width, height),
                        paths=paths,
                        names=names,
                        fname=f)  # predictions

        total_firing_ratios.append(
            [_.mean().item() for _ in spike_tensor.firing_ratios])
        spike_tensor.firing_ratios = []

    # Compute statistics
    stats = [np.concatenate(x, 0) for x in zip(*stats)]  # to numpy
    if len(stats):
        p, r, ap, f1, ap_class = ap_per_class(*stats)
        if niou > 1:
            p, r, ap, f1 = p[:, 0], r[:, 0], ap.mean(
                1), ap[:, 0]  # [P, R, [email protected]:0.95, [email protected]]
        mp, mr, map, mf1 = p.mean(), r.mean(), ap.mean(), f1.mean()
        nt = np.bincount(stats[3].astype(np.int64),
                         minlength=nc)  # number of targets per class
    else:
        nt = torch.zeros(1)

    # Print results
    pf = '%20s' + '%10.3g' * 6  # print format
    print(pf % ('all', seen, nt.sum(), mp, mr, map, mf1))

    # Print results per class
    if verbose and nc > 1 and len(stats):
        for i, c in enumerate(ap_class):
            print(pf % (names[c], seen, nt[c], p[i], r[i], ap[i], f1[i]))

    # Print speeds
    if verbose or save_json:
        t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (
            imgsz, imgsz, batch_size)  # tuple
        print(
            'Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g'
            % t)

    # Save JSON
    if save_json and map and len(jdict):
        print('\nCOCO mAP with pycocotools...')
        imgIds = [
            int(Path(x).stem.split('_')[-1])
            for x in dataloader.dataset.img_files
        ]
        with open('results.json', 'w') as file:
            json.dump(jdict, file)

        try:
            from pycocotools.coco import COCO
            from pycocotools.cocoeval import COCOeval

            # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
            cocoGt = COCO(
                glob.glob('../coco/annotations/instances_val*.json')
                [0])  # initialize COCO ground truth api
            cocoDt = cocoGt.loadRes('results.json')  # initialize COCO pred api

            cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
            cocoEval.params.imgIds = imgIds  # [:32]  # only evaluate these images
            cocoEval.evaluate()
            cocoEval.accumulate()
            cocoEval.summarize()
            # mf1, map = cocoEval.stats[:2]  # update to pycocotools results ([email protected]:0.95, [email protected])
        except:
            print(
                'WARNING: pycocotools must be installed with numpy==1.17 to run correctly. '
                'See https://github.com/cocodataset/cocoapi/issues/356')

    # Return results
    maps = np.zeros(nc) + map
    for i, c in enumerate(ap_class):
        maps[c] = ap[i]

    total_firing_ratios = np.mean(total_firing_ratios, 0)
    mean_firing_ratio = total_firing_ratios.mean()
    print(
        f"Mean Firing ratios {mean_firing_ratio}, Firing ratios: {total_firing_ratios}"
    )

    for layer in snn.modules():
        if hasattr(layer, 'mem_potential'):
            layer.mem_potential = None
    return (
        mp, mr, map, mf1,
        *(loss.cpu() / len(dataloader)).tolist()), maps, total_firing_ratios
示例#7
0
def snn_evaluate(ann, snn, path, iou_thres, conf_thres, nms_thres, img_size, batch_size, timesteps, device):
    ann.eval()
    snn.eval()
    ann.to(device)
    snn.to(device)


    # Get dataloader
    dataset = ListDataset(path, img_size=img_size, augment=False, multiscale=False)
    dataloader = torch.utils.data.DataLoader(
        dataset, batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=dataset.collate_fn
    )

    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor

    labels = []
    sample_metrics = []  # List of tuples (TP, confs, pred)
    total_firing_ratios = []
    for batch_i, (_, imgs, targets) in enumerate(tqdm.tqdm(dataloader, desc="Detecting objects")):
        if targets is None:
            continue
        # Extract labels
        labels += targets[:, 1].tolist()
        # Rescale target
        targets[:, 2:] = xywh2xyxy(targets[:, 2:])
        targets[:, 2:] *= img_size

        imgs = Variable(imgs.type(Tensor), requires_grad=False)
        # print(f"imgs.size: {imgs.size()}, timesteps: {timesteps}")
        replica_data = torch.cat([imgs for _ in range(timesteps)], 0)  # replica for input(first) layer
        data = SpikeTensor(replica_data, timesteps, scale_factor=1)

        with torch.no_grad():
            spike_tensor.firing_ratio_record = True
            output_snn1, output_snn2 = snn(data)  # two branches
            spike_tensor.firing_ratio_record = False
            output_ann1 = output_snn1.to_float()  # spike to real-value
            output_ann2 = output_snn2.to_float()
            # post-processing: conv, yolo, nms
            output_ann1 = ann.module_list[14](output_ann1)
            output_ann2 = ann.module_list[-2](output_ann2)
            yolo_outputs = []
            x1, _ = ann.module_list[15][0](output_ann1, img_dim=img_size)
            yolo_outputs.append(x1)
            x2, _ = ann.module_list[-1][0](output_ann2, img_dim=img_size)
            yolo_outputs.append(x2)
            yolo_outputs = to_cpu(torch.cat(yolo_outputs, 1))
            outputs = non_max_suppression(yolo_outputs, conf_thres=conf_thres, nms_thres=nms_thres)

        sample_metrics += get_batch_statistics(outputs, targets, iou_threshold=iou_thres)

        total_firing_ratios.append([_.mean().item() for _ in spike_tensor.firing_ratios])
        spike_tensor.firing_ratios = []

    total_firing_ratios = np.mean(total_firing_ratios, 0)
    mean_firing_ratio = total_firing_ratios.mean()
    print(f"Mean Firing ratios {mean_firing_ratio}, Firing ratios: {total_firing_ratios}")

    for layer in snn.modules():
        if hasattr(layer, 'mem_potential'):
            layer.mem_potential = None

    # Concatenate sample statistics
    true_positives, pred_scores, pred_labels = [np.concatenate(x, 0) for x in list(zip(*sample_metrics))]
    precision, recall, AP, f1, ap_class = ap_per_class(true_positives, pred_scores, pred_labels, labels)

    return precision, recall, AP, f1, ap_class, total_firing_ratios