コード例 #1
0
def make_dataloader(FG):
    x, y, train_idx, test_idx, ratio = fold_split(
        k=FG.fold, running_k=FG.running_fold, labels=FG.labels,
        subject_ids_path=os.path.join(FG.data_root, 'subject_ids.pkl'),
        diagnosis_path=os.path.join(FG.data_root, 'diagnosis.pkl'))
    # x = image, y=target
    trainset = ADNIDataset(FG.labels, FG.data_root, x[train_idx], y[train_idx],
                           transform_preset('random_crop'))
    validset = ADNIDataset(FG.labels, FG.data_root, x[test_idx], y[test_idx])

    trainloader = DataLoader(trainset, batch_size=FG.batch_size,
                             shuffle=True, pin_memory=True,
                             num_workers=4)
    validloader = DataLoader(validset, batch_size=len(FG.devices),
                             shuffle=False, pin_memory=True,
                             num_workers=4)
    return trainloader, validloader
コード例 #2
0
ファイル: test.py プロジェクト: christy4526/classifier_ad
def original_load(validblock, target_dict, transformer, device):
    originalset = ADNIDataset(FLG.labels,
                              pjoin(FLG.data_root, 'spm_normalized'),
                              validblock,
                              target_dict,
                              transform=transformer)
    originloader = DataLoader(originalset, pin_memory=True)
    for image, target in originloader:
        if len(image.shape) == 6:
            _, npatches, c, x, y, z = image.shape
            image = image.view(-1, c, x, y, z)
            target = torch.stack([target for _ in range(npatches)]).squeeze()
        image = image.cuda(device, non_blocking=True)
        target = target.cuda(device, non_blocking=True)
        break
    return image, target
コード例 #3
0
                                               ytinkmax=10)),
                    acc=Scalar(vis,
                               'Accuracy',
                               opts=dict(showlegend=True,
                                         title='Accuracy',
                                         ytickmin=0,
                                         ytinkmax=2.0)),
                    inputs=Image3D(vis, 'inputs'),
                    outputs=Image3D(vis, 'outputs'))

    # create train set,  x = image, y=target
    x, y, train_idx, test_idx, ratio = fold_split(FG)
    #transform=Compose([ToWoldCoordinateSystem(), Normalize((0.5, 0.9)), ToTensor()])
    transform = Compose([ToWoldCoordinateSystem(), ToTensor()])

    trainset = ADNIDataset(FG, x[train_idx], y[train_idx], transform=transform)
    testset = ADNIDataset(FG, x[test_idx], y[test_idx], transform=transform)

    trainloader = DataLoader(trainset,
                             batch_size=FG.batch_size,
                             shuffle=True,
                             pin_memory=True,
                             num_workers=4)
    testloader = DataLoader(testset,
                            batch_size=FG.batch_size,
                            shuffle=True,
                            num_workers=4,
                            pin_memory=True)

    D = Discriminator(FG).to('cuda:{}'.format(
        FG.devices[0]))  # discriminator D(x, z)
コード例 #4
0
 def _dataset(block, transform):
     return ADNIDataset(FLG.labels,
                        pjoin(FLG.data_root, FLG.modal),
                        block,
                        target_dict,
                        transform=transform)
コード例 #5
0
        D_x = Scalar(vis, 'D_x', opts=dict(
            showlegend=True, title='D_x', ytickmin=0, ytinkmax=2.0)),
        inputs0 = Image3D(vis, 'inputs0'),
        inputs1 = Image3D(vis, 'inputs1'),
        fake0 = Image3D(vis, 'fake0'),
        fake1 = Image3D(vis, 'fake1'),
        outputs0 = Image3D(vis, 'outputs0'),
        outputs1 = Image3D(vis, 'outputs1'))

    # dataset setting
    x, y = Trainset(FG)
    # x, y, train_idx, test_idx, ratio = fold_split(FG)
    # transform = Compose([ToFloatTensor(), Normalize(0.5,0.5)])
    # trainset = ADNIDataset2D(FG, x, y, transform=transform)
    transform=Compose([ToWoldCoordinateSystem(), ToTensor(), Pad(1,0,1,0,1,0), Normalize(0.5,0.5)])
    trainset = ADNIDataset(FG, x, y, transform=transform)
    trainloader = DataLoader(trainset, batch_size=FG.batch_size,
                             shuffle=True, pin_memory=True)
    # trainset = ADNIDataset2D(FG, x[train_idx], y[train_idx], transform=transform)
    # testset = ADNIDataset2D(FG, x[test_idx], y[test_idx], transform=transform)
    # trainloader = DataLoader(trainset, batch_size=FG.batch_size, shuffle=True,
    #                          pin_memory=True, num_workers=4)
    # testloader = DataLoader(testset, batch_size=FG.batch_size, shuffle=True,
    #                         num_workers=4, pin_memory=True)

    # models
    D = infoDiscriminator3D(FG.c_code).to('cuda:{}'.format(FG.devices[0]))
    G = infoGenerator3D(FG.z_dim, FG.c_code).to('cuda:{}'.format(FG.devices[0]))

    if len(FG.devices) != 1:
        D = torch.nn.DataParallel(D, FG.devices)
コード例 #6
0
    def __init__(self, FG, SUPERVISED=True):
        # parameters
        self.num_epoch = FG.num_epoch
        self.batch_size = FG.batch_size
        self.save_dir = FG.save_dir
        self.result_dir = FG.result_dir
        self.dataset = 'MRI'
        self.log_dir = FG.log_dir
        self.model_name = 'infoGAN'
        self.input_size = FG.input_size
        self.z_dim = FG.z
        self.SUPERVISED = SUPERVISED        # if it is true, label info is directly used for code
        self.len_discrete_code = 10         # categorical distribution (i.e. label)
        self.len_continuous_code = 2        # gaussian distribution (e.g. rotation, thickness)
        self.sample_num = self.len_discrete_code ** 2
        
        # torch setting
        self.device = torch.device('cuda:{}'.format(FG.devices[0]))
        torch.cuda.set_device(FG.devices[0])
        timer = SimpleTimer()

        # load dataset
        x, y = Trainset(FG)      # x = image, y=target
        trainset = ADNIDataset(FG, x, y, cropping=NineCrop((40,40,40),(32,32,32)),
                               transform=Compose([Lambda(lambda patches: torch.stack([ToTensor()(patch) for patch in patches]))]))     
        self.trainloader = DataLoader(trainset, batch_size=self.batch_size,
                                 shuffle=True, pin_memory=True,
                                 num_workers=4)
        #self.data_loader = dataloader(self.dataset, self.input_size, self.batch_size)
        #data = self.trainloader
        for _, data in enumerate(self.trainloader):
            data = data['image']
            break

        # networks init
        self.G = generator(input_dim=self.z_dim, output_dim=data.shape[1],
                           input_size=self.input_size, len_discrete_code=self.len_discrete_code,
                           len_continuous_code=self.len_continuous_code).to('cuda:{}'.format(FG.devices[0]))
        self.D = discriminator(input_dim=data.shape[1], output_dim=1, input_size=self.input_size,
                               len_discrete_code=self.len_discrete_code, len_continuous_code=self.len_continuous_code).to('cuda:{}'.format(FG.devices[0]))
        self.G_optimizer = optim.Adam(self.G.parameters(), lr=FG.lrG, betas=(FG.beta1, FG.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(), lr=FG.lrD, betas=(FG.beta1, FG.beta2))
        self.info_optimizer = optim.Adam(itertools.chain(self.G.parameters(), self.D.parameters()), lr=FG.lrD, betas=(FG.beta1, FG.beta2))

        if len(FG.devices) != 1:
            self.G = torch.nn.DataParallel(self.G, FG.devices)
            self.D = torch.nn.DataParallel(self.D, FG.devices)
        self.BCE_loss = nn.BCELoss().to('cuda:{}'.format(FG.devices[0]))
        self.CE_loss = nn.CrossEntropyLoss().to('cuda:{}'.format(FG.devices[0]))
        self.MSE_loss = nn.MSELoss().to('cuda:{}'.format(FG.devices[0]))

        print('---------- Networks architecture -------------')
        ori_utils.print_network(self.G)
        ori_utils.print_network(self.D)
        print('-----------------------------------------------')

        # fixed noise & condition
        self.sample_z = torch.zeros((self.sample_num, self.z_dim))
        for i in range(self.len_discrete_code):
            self.sample_z[i * self.len_discrete_code] = torch.rand(1, self.z_dim)
            for j in range(1, self.len_discrete_code):
                self.sample_z[i * self.len_discrete_code + j] = self.sample_z[i * self.len_discrete_code]

        temp = torch.zeros((self.len_discrete_code, 1))
        for i in range(self.len_discrete_code):
            temp[i, 0] = i

        temp_y = torch.zeros((self.sample_num, 1))
        for i in range(self.len_discrete_code):
            temp_y[i * self.len_discrete_code: (i + 1) * self.len_discrete_code] = temp

        self.sample_y = torch.zeros((self.sample_num, self.len_discrete_code)).scatter_(1, temp_y.type(torch.LongTensor), 1)
        self.sample_c = torch.zeros((self.sample_num, self.len_continuous_code))

        # manipulating two continuous code
        #self.sample_z2 = torch.rand((1, self.z_dim)).expand(self.sample_num, self.z_dim)
        self.sample_z2 = torch.zeros((self.sample_num, self.z_dim))
        z2 = torch.rand(1, self.z_dim)
        for i in range(self.sample_num):
            self.sample_z2[i] = z2
        
        self.sample_y2 = torch.zeros(self.sample_num, self.len_discrete_code)
        self.sample_y2[:, 0] = 1

        temp_c = torch.linspace(-1, 1, 10)
        self.sample_c2 = torch.zeros((self.sample_num, 2))
        for i in range(self.len_discrete_code):
            for j in range(self.len_discrete_code):
                self.sample_c2[i*self.len_discrete_code+j, 0] = temp_c[i]
                self.sample_c2[i*self.len_discrete_code+j, 1] = temp_c[j]

        self.sample_z = self.sample_z.cuda(self.device, non_blocking=True)
        self.sample_y = self.sample_y.cuda(self.device, non_blocking=True) 
        self.sample_c = self.sample_c.cuda(self.device, non_blocking=True)
        self.sample_z2 = self.sample_z2.cuda(self.device, non_blocking=True)
        self.sample_y2 = self.sample_y2.cuda(self.device, non_blocking=True)
        self.sample_c2 = self.sample_c2.cuda(self.device, non_blocking=True)


        vis = Visdom(port=10002, env=str(FG.vis_env))

        self.printers = dict(
            D_loss = Scalar(vis, 'D_loss', opts=dict(
                showlegend=True, title='D loss', ytickmin=0, ytinkmax=2.0)),
            G_loss = Scalar(vis, 'G_loss', opts=dict(
                showlegend=True, title='G loss', ytickmin=0, ytinkmax=10)),
            info_loss = Scalar(vis, 'info_loss', opts=dict(
                showlegend=True, title='info loss', ytickmin=0, ytinkmax=10)),
            input = Image3D(vis, 'input'),
            input_fi = Image3D(vis, 'input_fi'),
            output = Image3D(vis, 'output'),
            output2 = Image3D(vis, 'output2'))

        self.timer = SimpleTimer()
コード例 #7
0
def test(FLG):
    device = torch.device('cuda:{}'.format(FLG.devices[0]))
    torch.set_grad_enabled(False)
    torch.backends.cudnn.benchmark = True
    torch.cuda.set_device(FLG.devices[0])
    report = [ScoreReport() for _ in range(FLG.fold)]
    overall_report = ScoreReport()
    target_dict = np.load(pjoin(FLG.data_root, 'target_dict.pkl'))

    if 'plane' in FLG.model:
        model = Plane(len(FLG.labels), name=FLG.model)
    elif 'resnet11' in FLG.model:
        model = resnet11(len(FLG.labels), FLG.model)
    elif 'resnet19' in FLG.model:
        model = resnet19(len(FLG.labels), FLG.model)
    elif 'resnet35' in FLG.model:
        model = resnet35(len(FLG.labels), FLG.model)
    elif 'resnet51' in FLG.model:
        model = resnet51(len(FLG.labels), FLG.model)
    else:
        raise NotImplementedError(FLG.model)
    model.to(device)

    for running_fold in range(FLG.fold):
        _, validblock, _ = fold_split(
            FLG.fold, running_fold, FLG.labels,
            np.load(pjoin(FLG.data_root, 'subject_indices.npy')), target_dict)
        validset = ADNIDataset(FLG.labels,
                               pjoin(FLG.data_root, FLG.modal),
                               validblock,
                               target_dict,
                               transform=transform_presets(FLG.augmentation))
        validloader = DataLoader(validset, pin_memory=True)

        epoch, _ = load_checkpoint(model, FLG.checkpoint_root, running_fold,
                                   FLG.model, None, True)
        model.eval()
        for image, target in validloader:
            true = target
            npatches = 1
            if len(image.shape) == 6:
                _, npatches, c, x, y, z = image.shape
                image = image.view(-1, c, x, y, z)
                target = torch.stack([target
                                      for _ in range(npatches)]).squeeze()
            image = image.cuda(device, non_blocking=True)
            target = target.cuda(device, non_blocking=True)

            output = model(image)

            if npatches == 1:
                score = F.softmax(output, dim=1)
            else:
                score = torch.mean(F.softmax(output, dim=1),
                                   dim=0,
                                   keepdim=True)

            report[running_fold].update_true(true)
            report[running_fold].update_score(score)

            overall_report.update_true(true)
            overall_report.update_score(score)

        print('At {}'.format(epoch))
        print(
            metrics.classification_report(report[running_fold].y_true,
                                          report[running_fold].y_pred,
                                          target_names=FLG.labels,
                                          digits=4))
        print('accuracy {}'.format(report[running_fold].accuracy))

    print('over all')
    print(
        metrics.classification_report(overall_report.y_true,
                                      overall_report.y_pred,
                                      target_names=FLG.labels,
                                      digits=4))
    print('accuracy {}'.format(overall_report.accuracy))

    with open(FLG.model + '_stat.pkl', 'wb') as f:
        pickle.dump(report, f, pickle.HIGHEST_PROTOCOL)
コード例 #8
0
ファイル: test.py プロジェクト: christy4526/classifier_ad
def test(FLG):
    device = torch.device('cuda:{}'.format(FLG.devices[0]))
    torch.set_grad_enabled(False)
    torch.backends.cudnn.benchmark = True
    torch.cuda.set_device(FLG.devices[0])
    report = [ScoreReport() for _ in range(FLG.fold)]
    overall_report = ScoreReport()
    target_dict = np.load(pjoin(FLG.data_root, 'target_dict.pkl'))

    with open(FLG.model + '_stat.pkl', 'rb') as f:
        stat = pickle.load(f)
    summary = Summary(port=10001, env=str(FLG.model) + 'CAM')

    class Feature(object):
        def __init__(self):
            self.blob = None

        def capture(self, blob):
            self.blob = blob

    if 'plane' in FLG.model:
        model = Plane(len(FLG.labels), name=FLG.model)
    elif 'resnet11' in FLG.model:
        model = resnet11(len(FLG.labels), FLG.model)
    elif 'resnet19' in FLG.model:
        model = resnet19(len(FLG.labels), FLG.model)
    elif 'resnet35' in FLG.model:
        model = resnet35(len(FLG.labels), FLG.model)
    elif 'resnet51' in FLG.model:
        model = resnet51(len(FLG.labels), FLG.model)
    else:
        raise NotImplementedError(FLG.model)
    model.to(device)

    ad_h = []
    nl_h = []
    adcams = np.zeros((4, 3, 112, 144, 112), dtype="f8")
    nlcams = np.zeros((4, 3, 112, 144, 112), dtype="f8")
    sb = [9.996e-01, 6.3e-01, 1.001e-01]
    for running_fold in range(FLG.fold):
        _, validblock, _ = fold_split(
            FLG.fold, running_fold, FLG.labels,
            np.load(pjoin(FLG.data_root, 'subject_indices.npy')), target_dict)
        validset = ADNIDataset(FLG.labels,
                               pjoin(FLG.data_root, FLG.modal),
                               validblock,
                               target_dict,
                               transform=transform_presets(FLG.augmentation))
        validloader = DataLoader(validset, pin_memory=True)

        epoch, _ = load_checkpoint(model, FLG.checkpoint_root, running_fold,
                                   FLG.model, None, True)
        model.eval()
        feature = Feature()

        def hook(mod, inp, oup):
            return feature.capture(oup.data.cpu().numpy())

        _ = model.layer4.register_forward_hook(hook)
        fc_weights = model.fc.weight.data.cpu().numpy()

        transformer = Compose([CenterCrop((112, 144, 112)), ToFloatTensor()])
        im, _ = original_load(validblock, target_dict, transformer, device)

        for image, target in validloader:
            true = target
            npatches = 1
            if len(image.shape) == 6:
                _, npatches, c, x, y, z = image.shape
                image = image.view(-1, c, x, y, z)
                target = torch.stack([target
                                      for _ in range(npatches)]).squeeze()
            image = image.cuda(device, non_blocking=True)
            target = target.cuda(device, non_blocking=True)

            output = model(image)

            if npatches == 1:
                score = F.softmax(output, dim=1)
            else:
                score = torch.mean(F.softmax(output, dim=1),
                                   dim=0,
                                   keepdim=True)

            report[running_fold].update_true(true)
            report[running_fold].update_score(score)

            overall_report.update_true(true)
            overall_report.update_score(score)

            print(target)
            if FLG.cam:
                s = 0
                cams = []
                if target[0] == 0:
                    s = score[0][0]
                    #s = s.cpu().numpy()[()]
                    cams = adcams
                else:
                    sn = score[0][1]
                    #s = s.cpu().numpy()[()]
                    cams = nlcams
                if s > sb[0]:
                    cams[0] = summary.cam3d(FLG.labels[target],
                                            im,
                                            feature.blob,
                                            fc_weights,
                                            target,
                                            cams[0],
                                            s,
                                            num_images=5)
                elif s > sb[1]:
                    cams[1] = summary.cam3d(FLG.labels[target],
                                            im,
                                            feature.blob,
                                            fc_weights,
                                            target,
                                            cams[1],
                                            s,
                                            num_images=5)
                elif s > sb[2]:
                    cams[2] = summary.cam3d(FLG.labels[target],
                                            im,
                                            feature.blob,
                                            fc_weights,
                                            target,
                                            cams[2],
                                            s,
                                            num_images=5)
                else:
                    cams[3] = summary.cam3d(FLG.labels[target],
                                            im,
                                            feature.blob,
                                            fc_weights,
                                            target,
                                            cams[3],
                                            s,
                                            num_images=5)
                #ad_h += [s]
                #nl_h += [sn]

        print('At {}'.format(epoch))
        print(
            metrics.classification_report(report[running_fold].y_true,
                                          report[running_fold].y_pred,
                                          target_names=FLG.labels,
                                          digits=4))
        print('accuracy {}'.format(report[running_fold].accuracy))

    #print(np.histogram(ad_h))
    #print(np.histogram(nl_h))

    print('over all')
    print(
        metrics.classification_report(overall_report.y_true,
                                      overall_report.y_pred,
                                      target_names=FLG.labels,
                                      digits=4))
    print('accuracy {}'.format(overall_report.accuracy))

    with open(FLG.model + '_stat.pkl', 'wb') as f:
        pickle.dump(report, f, pickle.HIGHEST_PROTOCOL)
コード例 #9
0
ファイル: test.py プロジェクト: christy4526/classifier_ad
def test_cam():
    with open(FLG.model + '_stat.pkl', 'rb') as f:
        stat = pickle.load(f)
    summary = Summary(port=10001, env=str(FLG.model) + 'CAM')

    class Feature(object):
        def __init__(self):
            self.blob = None

        def capture(self, blob):
            self.blob = blob

    # TODO: create model
    device = torch.device('cuda:{}'.format(FLG.devices[0]))
    torch.set_grad_enabled(False)
    torch.backends.cudnn.benchmark = True
    torch.cuda.set_device(FLG.devices[0])
    report = [ScoreReport() for _ in range(FLG.fold)]
    target_dict = np.load(pjoin(FLG.data_root, 'target_dict.pkl'))

    model = Plane(len(FLG.labels), name=FLG.model)
    model.to(device)

    transformer = Compose([CenterCrop((112, 144, 112)), ToFloatTensor()])

    def original_load(validblock):
        originalset = ADNIDataset(FLG.labels,
                                  pjoin(FLG.data_root, 'spm_normalized'),
                                  validblock,
                                  target_dict,
                                  transform=transformer)
        originloader = DataLoader(originalset, pin_memory=True)
        for image, target in originloader:
            if len(image.shape) == 6:
                _, npatches, c, x, y, z = image.shape
                image = image.view(-1, c, x, y, z)
                target = torch.stack([target
                                      for _ in range(npatches)]).squeeze()
            image = image.cuda(device, non_blocking=True)
            target = target.cuda(device, non_blocking=True)
            break
        return image, target

    hadcams = np.zeros((3, 112, 144, 112), dtype="f8")
    madcams = np.zeros((3, 112, 144, 112), dtype="f8")
    sadcams = np.zeros((3, 112, 144, 112), dtype="f8")
    zadcams = np.zeros((3, 112, 144, 112), dtype="f8")

    nlcams = np.zeros((4, 3, 112, 144, 112), dtype="f8")
    sb = [4.34444371e-16, 1.67179015e-18, 4.08813312e-23]
    #im, _ = original_load(validblock)
    for running_fold in range(FLG.fold):
        # validset
        _, validblock, _ = fold_split(
            FLG.fold, running_fold, FLG.labels,
            np.load(pjoin(FLG.data_root, 'subject_indices.npy')), target_dict)
        validset = ADNIDataset(FLG.labels,
                               pjoin(FLG.data_root, FLG.modal),
                               validblock,
                               target_dict,
                               transform=transformer)
        validloader = DataLoader(validset, pin_memory=True)

        load_checkpoint(model,
                        FLG.checkpoint_root,
                        running_fold,
                        FLG.model,
                        epoch=None,
                        is_best=True)
        model.eval()
        feature = Feature()

        def hook(mod, inp, oup):
            return feature.capture(oup.data.cpu().numpy())

        _ = model.layer4.register_forward_hook(hook)
        fc_weights = model.fc.weight.data.cpu().numpy()

        im, _ = original_load(validblock)
        ad_s = []
        for image, target in validloader:
            true = target
            npatches = 1
            if len(image.shape) == 6:
                _, npatches, c, x, y, z = image.shape
                image = image.view(-1, c, x, y, z)
                target = torch.stack([target
                                      for _ in range(npatches)]).squeeze()
            image = image.cuda(device, non_blocking=True)
            target = target.cuda(device, non_blocking=True)

            #_ = model(image.view(*image.shape))
            output = model(image)

            if npatches == 1:
                score = F.softmax(output, dim=1)
            else:
                score = torch.mean(F.softmax(output, dim=1),
                                   dim=0,
                                   keepdim=True)

            sa = score[0][1]
            #name = 'k'+str(running_fold)
            sa = sa.cpu().numpy()[()]
            print(score, score.shape)
            if true == torch.tensor([0]):
                if sa > sb[0]:
                    hadcams = summary.cam3d(FLG.labels[target],
                                            im,
                                            feature.blob,
                                            fc_weights,
                                            target,
                                            hadcams,
                                            sa,
                                            num_images=5)
                elif sa > sb[1]:
                    madcams = summary.cam3d(FLG.labels[target],
                                            im,
                                            feature.blob,
                                            fc_weights,
                                            target,
                                            madcams,
                                            sa,
                                            num_images=5)
                elif sa > sb[2]:
                    sadcams = summary.cam3d(FLG.labels[target],
                                            im,
                                            feature.blob,
                                            fc_weights,
                                            target,
                                            sadcams,
                                            sa,
                                            num_images=5)
                else:
                    zadcams = summary.cam3d(FLG.labels[target],
                                            im,
                                            feature.blob,
                                            fc_weights,
                                            target,
                                            zadcams,
                                            sa,
                                            num_images=5)
            else:
                if s > sb[0]:
                    nlcams[0] = summary.cam3d(FLG.labels[target],
                                              im,
                                              feature.blob,
                                              fc_weights,
                                              target,
                                              nlcams[0],
                                              sr,
                                              num_images=5)
                elif sr > sb[1]:
                    nlcams[1] = summary.cam3d(FLG.labels[target],
                                              im,
                                              feature.blob,
                                              fc_weights,
                                              target,
                                              nlcams[1],
                                              sr,
                                              num_images=5)
                elif sr > sb[2]:
                    nlcams[2] = summary.cam3d(FLG.labels[target],
                                              im,
                                              feature.blob,
                                              fc_weights,
                                              target,
                                              nlcams[2],
                                              sr,
                                              num_images=5)
                else:
                    nlcams[3] = summary.cam3d(FLG.labels[target],
                                              im,
                                              feature.blob,
                                              fc_weights,
                                              target,
                                              nlcams[3],
                                              sr,
                                              num_images=5)
            ad_s += [sr]
        print('histogram', np.histogram(ad_s))