Beispiel #1
0
def quick_anal_rconv(net, save_dir, res_data, iter_data, full_anal=False):
    ensure_dir(save_dir)
    #corr_method=["pearson", "kendall", "spearman"]
    corr_method = ["pearson"]

    for key in res_data.keys():
        data = res_data[
            key]  #(sample_num, N_num, feature_map_width, feature_map_height)
        #print(data.size())
        data = data.permute(
            0, 2, 3,
            1)  #(sample_num, feature_map_width, feature_map_height, N_num)
        #print(data.size())
        res_data[key] = data.contiguous().view(
            data.size(0) * data.size(1) * data.size(2), data.size(3))

    res_corr = cal_res_corr_pearson(net,
                                    res_data,
                                    separate_ei=net.dict["separate_ei"],
                                    net_dict=net.dict)

    plot_res_weight_corr(net=net,
                         save_dir=save_dir + "response-weight correlation/",
                         res_corr=res_corr)

    plot_weight_rcnn(net=net, logger=logger, save_dir=save_dir + "weight/")
    visualize_weight(net=net, name="r", save_dir=save_dir + "weight/plot/")

    if full_anal:
        ensure_dir(save_dir + "response_analysis/")
        for key in res_data.keys():
            #print(key)
            plot_res(res_data[key],
                     name=key,
                     save_dir=save_dir + "response_analysis/" + key + "/",
                     is_act=(".u" in key))

    #randomly select one column to do analysis.
    for key in iter_data.keys():
        data = iter_data[
            key]  #(sample_num, iter_time, N_num, feature_map_width, feature_map_height)
        #print(key, end=' ')
        #print(data.size())
        width = random.sample(range(data.size(3)), 1)[0]
        height = random.sample(range(data.size(4)), 1)[0]
        iter_data[key] = data[:, :, :, width, height]

    if full_anal:
        plot_iter(net, iter_data, logger=logger, save_dir=save_dir + "iter/")

    anal_stability(net=net,
                   iter_data=iter_data,
                   save_dir=save_dir + "stability/")

    for key in res_corr.keys():
        plot_dist(data=res_corr[key],
                  logger=logger,
                  name=key,
                  bins=50,
                  save_dir=save_dir + "responses/" + key + "/")
Beispiel #2
0
    def __getitem__(self, index):  # index : 11452 猜测这个数字是不是根据roidb的数量随机生成的,表示要加载的roidb数据的索引
        # get the anchor index for current sample index
        item = self._roidb[index]
        ########最重要的就是这个函数#########
        blobs = get_minibatch([item], self.phase)   # 返回的blob的顺序为[1,3,768,112,112]
        data = torch.from_numpy(blobs['data'])
        length, height, width = data.shape[-3:]
        data = data.contiguous().view(3, length, height, width)

        gt_windows = torch.from_numpy(blobs['gt_windows'])
        # 因为定义中的最大正样本数量是20,因此需要人为将gt_windows的数量人为补齐到20
        # 因此先预设[20,3]全0的矩阵
        gt_windows_padding = gt_windows.new(self.max_num_box, gt_windows.size(1)).zero_()  # gt_windows.size(1)是为了获取gt_windows第一个维度的大小
        num_gt = min(gt_windows.size(0), self.max_num_box)  # 获取gt_windows的数目
        # 将真实的gt的值填充前面
        gt_windows_padding[:num_gt, :] = gt_windows[:num_gt] # 将生成的
        
        if self.phase == 'test':
            video_info = ''
            for key, value in item.items():
                video_info = video_info + " {}: {}\n".format(key, value)
            # drop the last "\n"
            video_info = video_info[:-1]
            return data, gt_windows_padding, num_gt, video_info
        else:
            # data里面是一个tmp中的视频图片数据
            # gt_windows_padding是将gt_windows人为填充到20个后的标签信息
            # num_gt 代表的是gt_windows的真实数量
            return data, gt_windows_padding, num_gt     
Beispiel #3
0
    def __getitem__(self, index):  # index:(0,13711)
        # get the anchor index for current sample index
        item = self._roidb[
            index]  # item形如{'frames': array([[  0,   0, 768,   1]]), 'fg_name': '/home/tx/Dataset/tx/THUMOS14/val/video_validation_0000934', 'flipped': False, 'durations': array([30.]), 'bg_name': '/home/tx/Dataset/tx/THUMOS14/val/video_validation_0000934', 'max_classes': array([18.]), 'gt_classes': array([18.]), 'wins': array([[235., 265.]]), 'max_overlaps': array([1.])}
        blobs = get_minibatch([item], self.phase)

        data = torch.from_numpy(
            blobs['data'])  # blobs['data']的形状:[batch_size, 3, 512, 112, 112]
        length, height, width = data.shape[-3:]
        data = data.contiguous().view(3, length, height, width)

        gt_windows = torch.from_numpy(
            blobs['gt_windows']
        )  #blobs['gt_windows']的形状为(1,3),前两位是时序片段在视频中的“起止帧时刻”(大概是这个意思),第三位是该时序片段的类别
        gt_windows_padding = gt_windows.new(self.max_num_box, gt_windows.size(
            1)).zero_()  # gt_windows_padding为20行3列的tensor包裹的0矩阵
        ####################################################################################################
        num_gt = min(gt_windows.size(0),
                     self.max_num_box)  # num_gt = 1(此处貌似有误,实际上是(1~20)间的数)
        gt_windows_padding[:
                           num_gt, :] = gt_windows[:
                                                   num_gt]  # 把该时序片段的起始帧时刻赋值给gt_windows_padding的第一行所有列(3)(此处暂时把gt_windows_padding看成形状为(20,3)的列表)

        if self.phase == 'test':
            video_info = ''
            for key, value in item.items():
                video_info = video_info + " {}: {}\n".format(key, value)
            # drop the last "\n"
            video_info = video_info[:-1]
            return data, gt_windows_padding, num_gt, video_info
        else:
            return data, gt_windows_padding, num_gt  # 先看成如下形状:data(3,768,112,112),gt_windows_padding(20,3),num_gt(1)   其中num_gt那个数取值范围为(1~20)
Beispiel #4
0
def get_next_tensor_part(src, dims, prev_pos=0):
    if not isinstance(dims, list):
        dims = [dims]
    n = functools.reduce(lambda x, y: x * y, dims)
    data = src.narrow(-1, prev_pos, n)
    return data.contiguous().view(
        list(data.size())[:-1] + dims) if len(dims) > 1 else data, prev_pos + n
Beispiel #5
0
    def __getitem__(self, index):
        # get the anchor index for current sample index
        item = self._roidb[index]
        blobs = get_minibatch([item], self.phase)
        data = torch.from_numpy(blobs['data'])
        length, height, width = data.shape[-3:]
        data = data.contiguous().view(3, length, height, width)

        gt_windows = torch.from_numpy(blobs['gt_windows'])
        gt_windows_padding = gt_windows.new(self.max_num_box,
                                            gt_windows.size(1)).zero_()
        num_gt = min(gt_windows.size(0), self.max_num_box)
        gt_windows_padding[:num_gt, :] = gt_windows[:num_gt]

        if self.phase == 'test':
            video_info = ''
            for key, value in item.items():
                # if not key == 'fewshot_label':
                video_info = video_info + " {}: {}\n".format(key, value)
            # drop the last "\n"
            video_info = video_info[:-1]
            return data, gt_windows_padding, num_gt, video_info, item[
                'fewshot_label']
        else:
            return data, gt_windows_padding, num_gt
Beispiel #6
0
def make_db(path):
    with torch.no_grad():
        list = []
        file = glob.glob(path)
        for f in file:
            img = cv2.imread(f)
            img = img.transpose((2, 0, 1)) / 255.
            data = torch.from_numpy(img.astype(np.float32)).clone().to(device)

            data = data.unsqueeze(0)

            # mu, logvar = model.encode(data.contiguous().view(-1, 784 * 3))
            mu, logvar = model.encode(data.contiguous().view(
                -1, image_size * image_size * chn_num))
            z = model.reparameterize(mu, logvar).cpu().detach().numpy().copy()
            z = z.tolist()
            z[0].append(f)
            list.append(np.array(z[0]))
    df = pd.DataFrame(list,
                      columns=[
                          'z1', 'z2', 'z3', 'z4', 'z5', 'z6', 'z7', 'z8', 'z9',
                          'z10', 'path'
                      ])

    return df
 def __getitem__(self, index):
     # get the anchor index for current sample index
     # here we set the anchor index to the last one
     # sample in this group
     minibatch_db = [self._roidb[index]]
     blobs = get_minibatch(minibatch_db, self._num_classes)
     data = torch.from_numpy(blobs['data'])
     length, height, width = data.size(-3), data.size(-2), data.size(-1)
     data = data.contiguous().view(3, length, height, width)
     if cfg.TRAIN.HAS_RPN or cfg.TEST.HAS_RPN:
         gt_windows = torch.from_numpy(blobs['gt_windows'])
         #num_twin = gt_windows.size()
         #gt_windows.view(3)
         #print("data {}".format(data.shape))
         #print("gt_windows {}".format(gt_windows.shape))
         return data, gt_windows
     else:  # not using RPN
         raise NotImplementedError
Beispiel #8
0
def decode_batch_list(mod, zmat, batch_size=256):
    data = torch.utils.data.TensorDataset(
        torch.FloatTensor(zmat), torch.FloatTensor(zmat))
    loader = torch.utils.data.DataLoader(data,
        batch_size=batch_size, shuffle=False, pin_memory=True)
    batch_res = []
    if torch.cuda.is_available():
        mod.cuda()
        do_cuda = True
    for batch_idx, (data, target) in enumerate(pyprind.prog_bar(loader)):
        data, target = Variable(data), Variable(target)
        if do_cuda:
            data, target = data.cuda(), target.cuda()
            data, target = data.contiguous(), target.contiguous()

        res = mod.decode(data)
        batch_res.append(res.data.cpu())

    return torch.cat(batch_res, dim=0)
Beispiel #9
0
def test(args, model, device, test_loader, flatten=False):
    model.eval()  # evaluation mode
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(
                data.contiguous().view(128, -1) if flatten else data)
            test_loss += F.nll_loss(
                output, target, reduction='sum').item()  # sum up batch loss
            pred = output.max(
                1, keepdim=True)[1]  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    print(
        '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
            test_loss, correct, len(test_loader.dataset),
            100. * correct / len(test_loader.dataset)))
def run(modelcheckpoint, normalizeData, simfile):
    """
    """

    model = wresnet34x2().cpu()

    if os.path.isfile(modelcheckpoint):
        print("=> Loading checkpoint '{}'".format(modelcheckpoint))
        checkpoint = torch.load(modelcheckpoint,
                                map_location=lambda storage, loc: storage)
        best_acc = checkpoint['best_acc']
        print("This model had an accuracy of %.2f on the validation set." %
              (best_acc, ))
        keys = checkpoint['state_dict'].keys()
        for old_key in keys:
            new_key = old_key.replace('module.', '')
            checkpoint['state_dict'][new_key] = checkpoint['state_dict'].pop(
                old_key)
        model.load_state_dict(checkpoint['state_dict'])
        print("=> Loaded checkpoint '{}' (epoch {})".format(
            modelcheckpoint, checkpoint['epoch']))
    else:
        print("=> No model checkpoint found. Exiting")
        return None

    cudnn.benchmark = False

    # Load the Normalizer function
    h = h5py.File(normalizeData, 'r')
    mean = torch.FloatTensor(h['mean'][:])
    mean = mean.permute(2, 0, 1)
    std_dev = torch.FloatTensor(h['std_dev'][:])
    std_dev = std_dev.permute(2, 0, 1)
    h.close()
    normalize = transforms.Normalize(mean=mean, std=std_dev)

    # Load simulation data
    time_freq_resolution = (384, 512)
    aca = ibmseti.compamp.SimCompamp(open(simfile, 'rb').read())
    complex_data = aca.complex_data()
    complex_data = complex_data.reshape(time_freq_resolution[0],
                                        time_freq_resolution[1])
    complex_data = complex_data * np.hanning(complex_data.shape[1])
    cpfft = np.fft.fftshift(np.fft.fft(complex_data), 1)
    spectrogram = np.abs(cpfft)
    features = np.stack(
        (np.log(spectrogram**2), np.arctan(cpfft.imag / cpfft.real)), -1)

    # create FloatTensor, permute to proper dimensional order, and normalize
    data = torch.FloatTensor(features)
    data = data.permute(2, 0, 1)
    data = normalize(data)

    # The model expects a 4D tensor
    s = data.size()
    data = data.contiguous().view(1, s[0], s[1], s[2])

    input_var = torch.autograd.Variable(data, volatile=True)

    model.eval()

    softmax = torch.nn.Softmax()
    softmax.zero_grad()
    output = model(input_var)
    probs = softmax(output).data.view(7).tolist()

    return probs