def return_compress(grad_data, layer_count):

    # import ipdb; ipdb.set_trace()
    global metrics_dict
    global compression_dict
    compression_precision = 7
    grad_data_raster = grad_data.view(-1)
    grad_data_numpy = grad_data_raster.numpy()
    grad_data_list = grad_data_numpy.tolist()
    len_of_str_rep_array = float(len(grad_data_numpy.tostring()))
    lossy_compress = nc.compress(grad_data_list,
                                 precision=compression_precision)
    len_compress_str = len(lossy_compress)
    compression_ratio = len_of_str_rep_array / len_compress_str
    metrics_dict['compression_ratio'].append(compression_ratio)
    compression_dict[layer_count].append(compression_ratio)

    decompress_list = nc.decompress(lossy_compress)
    decompress_numpy = np.array(decompress_list)
    decompress_tensor = torch.from_numpy(decompress_numpy)
    # reshape the rasterized tensor back to original shape
    decompress_tensor = decompress_tensor.reshape(grad_data.shape)
    decompress_tensor = decompress_tensor.float()
    # import ipdb; ipdb.set_trace()
    return (decompress_tensor)
Exemple #2
0
    def forward(self, x):
        index = 0
        for layer, (name,
                    module) in enumerate(self.vgg.features._modules.items()):
            x = module(x)
            if isinstance(module, torch.nn.modules.ReLU) or isinstance(
                    module, torch.nn.modules.MaxPool2d) or isinstance(
                        module, torch.nn.modules.AvgPool2d):
                if index == self.partition_index:
                    features = x
                    if self.QF != 100:
                        features = features.view(
                            features.size(0) * features.size(2), -1)
                        features = features.data.numpy()
                        features = np.round(features * 255)
                        im = Image.fromarray(features)
                        if im.mode != 'L':
                            im = im.convert('L')
                        im.save("temp.jpeg", quality=self.QF)
                        im_decode = Image.open("temp.jpeg")
                        encoded_data_size = os.path.getsize("temp.jpeg")
                        raw_data_size = x.size(0) * x.size(1) * x.size(
                            2) * x.size(3) * 4
                        decode_array = np.array(im_decode)
                        decode_array = decode_array / 255
                        decode_va = Variable(torch.from_numpy(decode_array))
                        decode_va = decode_va.view(x.size()).float()

                    else:
                        features = features.view(
                            features.size(0) * features.size(1) *
                            features.size(2) * features.size(3), -1)
                        features = torch.squeeze(features)
                        features = features.data.numpy()
                        features = features.tolist()
                        raw_data_size = sys.getsizeof(features)
                        compressed = compress(features, precision=4)
                        encoded_data_size = sys.getsizeof(compressed)
                        decode_va = np.array(decompress(compressed))
                        decode_va = Variable(torch.from_numpy(decode_va))
                        decode_va = decode_va.view(x.size()).float()
                    # print("x:")
                    # print(x.data.numpy())
                    # print("decode:")
                    # print(decode_va.data.numpy())
                    x.data = decode_va.data
                index += 1

        out = x.view(x.size(0), -1)
        out = self.vgg.classifier(out)
        return out, raw_data_size, encoded_data_size
Exemple #3
0
    def test_compression_ratio_for_series_of_floats(self):
        seed = 1247.53
        series = [seed]
        previous = seed

        for _ in range(10000):
            current = previous + random.randrange(1000, 100000) * (10**-2)
            series.append(round(current, 3))
            previous = current

        original_size = sum(sys.getsizeof(i) for i in series)
        text = compress(series)
        compressed_size = sys.getsizeof(text)
        reduction = ((original_size - compressed_size) * 100.0) / original_size

        self.assertEqual(compressed_size < original_size, True)
        print('10k floats compressed by ', round(reduction, 2), '%')

        self.assertEqual(decompress(text), series)
Exemple #4
0
    def test_compression_ratio_for_series_of_epoch_timestamps(self):
        seed = 946684800  # start of year 2000
        series = [seed]
        previous = seed

        for _ in range(10000):
            current = previous + random.randrange(0, 600)
            series.append(current)
            previous = current

        original_size = sum(sys.getsizeof(i) for i in series)
        text = compress(series, 0)
        compressed_size = sys.getsizeof(text)
        reduction = ((original_size - compressed_size) * 100.0) / original_size

        self.assertEqual(compressed_size < original_size, True)
        print('10k timestamps compressed by ', round(reduction, 2), '%')

        self.assertEqual(decompress(text), series)
Exemple #5
0
 def test_compress_higher_than_limit_precision_raises_exception(self):
     with pytest.raises(ValueError):
         compress(23, precision=17)
Exemple #6
0
 def test_compress_negative_precision_raises_exception(self):
     with pytest.raises(ValueError):
         compress([123, 125], precision=-2)
Exemple #7
0
 def test_compress_non_integer_precision_raises_exception(self):
     with pytest.raises(ValueError):
         compress([123, 125], precision='someValue')
Exemple #8
0
 def test_compress_non_numerical_list_value_raises_exception(self):
     with pytest.raises(ValueError):
         compress([123, 'someText', 456])
Exemple #9
0
 def test_compress_decompress_works_with_empty_list(self):
     self.assertEqual(compress([]), '')
     self.assertEqual(decompress(''), [])
Exemple #10
0
 def test_compress_non_list_value_raises_exception(self):
     with pytest.raises(ValueError):
         compress(23)
Exemple #11
0
 def test_compress_decompress_works_in_lossless_fashion_for_longer_floats_when_appropriate_precision_is_spcified(self):
     original_series = [12365.54524354, 14789.54673, 11367.67845987]
     text = 'Io{pcifu|_Folwenp}ak@f~itlgxf}@'
     self.assertEqual(compress(original_series, 10), text)
     self.assertEqual(decompress(text), original_series)
Exemple #12
0
 def test_compress_decompress_works_in_lossy_fashion_for_longer_floats_when_enough_precision_not_spcified(self):
     original_series = [12365.54524354, 14789.54699, 11367.67845123]
     lossy_series = [12365.545, 14789.547, 11367.678]
     text = 'BqmvqVck}rCxizoE'
     self.assertEqual(compress(original_series), text)
     self.assertEqual(decompress(text), lossy_series)
Exemple #13
0
 def test_compress_decompress_works_for_single_int(self):
     series = [12345]
     text = '?qbW'
     self.assertEqual(compress(series, 0), text)
     self.assertEqual(decompress(text), series)