Пример #1
0
 def compressFunc(self):
     path = self.name.get()
     global h 
     h = HuffmanCoding(path)
     global output_path 
     output_path = h.compress()
     print("Compressed file path: " + output_path)
Пример #2
0
def main():

    """ In this function we read the folder healthy_cow where are store the csv files that contains the matrix of pixels of the images and we selected an specific file.
    At first, we applied a compression with FFT and it returns a csv file, which will be compress again using the huffman method and will return a binary file.
    Followed by this, we decompress the binary file with the huffman method and it returns a csv file that will be decompress with FFT.
    Finally, using the library PIL, we show the original imagen and the decompress image.

    :raises: file not found

    :rtype: Image in PNG format
    """
    directory_sick_cow = "/Users/isabella/Documents/Segundo Semestre/Estructura de Datos /Proyecto/ganado_enfermo_csv"
    directory_healthy_cow = "/Users/isabella/Documents/Segundo Semestre/Estructura de Datos /Proyecto/Entrega 3/Codigo/huffman-coding-master/Vacas_Enferma"
     
    directory = directory_healthy_cow
    cont = os.listdir(directory)

    matriz_csv_var = load_img(directory+'/'+cont[0])

    fft_compression(matriz_csv_var, 0.05)
    h = HuffmanCoding("compressFFT.csv")
    output_path = h.compress()
    print("Compressed file path: " + output_path)


    decom_path = h.decompress(output_path)
    print("Decompressed file path: " + decom_path)
    img_fft_descompress = fft_descompression(decom_path)


    show_img(matriz_csv_var)
    show_img(img_fft_descompress)


    savetxt('dataff.csv', matriz_csv_var, delimiter=',')
Пример #3
0
def main():
    from huffman import HuffmanCoding
    import sys

    inputFilePath = "sample.txt"
    handle = HuffmanCoding(inputFilePath)
    output_path = handle.compress()
    print("Compressed file path: " + output_path)
    decom_path = h.decompress(output_path)
    print("Decompressed file path: " + decom_path)
Пример #4
0
def algorithm(path):
    h = HuffmanCoding(path)
    first1 = time.time()
    output_path = h.compress()
    second1 = time.time()
    delta_time1 = second1 - first1
    first2 = time.time()
    decom_path = h.decompress(output_path)
    second3 = time.time()
    delta_time2 = second3 - first2
    return output_path, delta_time1, delta_time2, decom_path
Пример #5
0
def main(argv):
    filepath = argv[1]
    read_bit_size = 8

    if len(argv) > 2:
        read_bit_size = argv[2]
        print(read_bit_size)

    h = HuffmanCoding(filepath, read_bit_size)

    output_path = h.compress()
    print("Compressed file path: " + output_path)

    decom_path = h.decompress(output_path)
    print("Decompressed file path: " + decom_path)
Пример #6
0
def mask_compression(mask):
    prev = 1
    rl = 0
    cnt = 0
    result = []
    for e in mask:
        if e == prev:
            rl += 1
        else:
            result += [rl]
            rl = 0
        prev = e
    if rl > 0:
        result += [rl]
    huffman = HuffmanCoding()
    size = len(huffman.compress(result)) * 4
    return size
Пример #7
0
def use_huffman(filename, wordlength=14):
    h = HuffmanCoding(filename, wordlength)
    output_path = h.compress()
    #h.decompress(output_path)
    return output_path
# # cv2.imshow("deltaback", img)

# print "Redecoded entropy: "
# print shannon_entropy(img)

filenames = glob.glob("images/*.png")

images = [cv2.imread(img) for img in filenames]

sum_ratio = 0

for img in images:
    img = img[:, :, 0]
    img = delta_encode(img)
    h = HuffmanCoding(img, os.getcwd() + "/test")
    h.compress()
    img = h.decompress(os.getcwd() + "/test.bin")
    img = delta_decode(img)

    rawsize = os.stat('raw.bin')
    testsize = os.stat('test.bin')

    ratio = float(float(testsize.st_size) / float(rawsize.st_size))
    sum_ratio += ratio

    print "Redecoded entropy: "
    print shannon_entropy(img)

    print "Compression ratio: "
    print ratio
Пример #9
0
from huffman import HuffmanCoding
import sys
from pathlib import Path
import time

import cv2

path = "tiger.bmp"

image = cv2.imread(path, 0)
cv2.imwrite("tiger_gray.bmp", image)
h = HuffmanCoding(path)

output_path, image_shape = h.compress()
print("Compressed file path: " + output_path)
a = Path("tiger_gray.bmp").stat().st_size
b = Path(output_path).stat().st_size

print("Calculating size")
for i in range(10):
    print(".", end='')
    time.sleep(1)
decom_path = h.decompress(output_path, image_shape)

print("compression percent", 100 * (a - b) / a)
print("Decompressed file path: " + decom_path)
Пример #10
0
 def encode_block(self, block):
     symbols = diagonalOrder(block)
     h = HuffmanCoding(symbols)
     encoded = h.compress()
     return (encoded, h.reverse_mapping)
clientSocket = socket.socket()
host = socket.gethostname()
port = 9001

clientSocket.connect((host, port))

filePath = input("Enter the path of the file: ")
fileName = os.path.basename(filePath)

start = datetime.now()

clientSocket.send(bytes(fileName, "utf-8"))

huffman = HuffmanCoding(filePath)
compressedFilePath = huffman.compress()
sleep(1)

clientSocket.send(pickle.dumps(huffman))

with open(compressedFilePath, "rb") as fp:
    data = fp.read(1024)
    while data:
        clientSocket.send(data)
        data = fp.read(1024)

end = datetime.now()

duration = end - start
compressionRatio = os.path.getsize(filePath) / os.path.getsize(
    compressedFilePath)
Пример #12
0
def compress(fromdir):
    h = HuffmanCoding(fromdir)
    output_path = h.compress()
    return output_path
Пример #13
0
total_cmp = 0
total_len = 0
h = HuffmanCoding(2)

for filename in os.listdir("contracts"):
    with open("contracts/" + filename, 'r') as file:
        text = file.read().rstrip()
        total_len += len(text)
        h.make_frequency_dict(text)

# compressing all contracts
for filename in os.listdir("contracts"):
    with open("contracts/" + filename,
              'r') as input, open("compressed_contracts/" + filename,
                                  'w') as output:
        compressed = h.compress(input.read())
        total_cmp += len(compressed)
        output.write(compressed)

saved = total_len - total_cmp

print("Total chars saved: " + str(saved))
print("Compression rate: " + str(saved * 100 / total_len) + "%")

# decompressing all contracts
for filename in os.listdir("compressed_contracts"):
    with open("compressed_contracts/" + filename,
              "r") as input, open("decompressed_contracts/" + filename,
                                  'w') as output:
        text = input.read()
        decompressed = h.decompress(text)
Пример #14
0
    # Etape 6 huffman and RLE
    # blocs_stringY = ''.join([str(elem) for elem in bloc[0]])
    # blocs_stringCb = ''.join([str(elem) for elem in bloc[1]])
    # blocs_stringCr = ''.join([str(elem) for elem in bloc[2]])
    # str_bloc = np.asarray(bloc).flatten().astype('str')
    # str_bloc = ''.join(str_bloc)
    # #print(blocs_stringCr)
    # rleBlock = RLE.compress(str_bloc)
    # rleBlock = str(int(rleBlock, 2))
    # huffmanBlock = HuffmanCoding.compress(rleBlock)
    # jpegImage.append(huffmanBlock)

rle_compress = RLE.compress(zigzag_string)
rle_ascii =''
for i in range(len(rle_compress)):
    rle_ascii += chr((int(rle_compress[i], 2)))
huffman_compress, dictionnaire = HuffmanCoding.compress(rle_ascii)
print(len(huffman_compress))

#Decompress




#cv2.imshow("Normal", image)
#cv2.imshow("YCC", YCCimage)
cv2.waitKey()


Пример #15
0
def huffman_encoding(filename):
    print("Huffman Encoding...")
    h = HuffmanCoding(filename)
    output_path = h.compress()
    #print(output_path)
    h.decompress(output_path)
Пример #16
0
from huffman import HuffmanCoding

#input file path of your pc where the files are stored
path = "C:/Users/Panchal/Desktop/huffman/sample.txt"

h = HuffmanCoding(path)

h.compress()  # calling compress function

h.decompress("C:/Users/Panchal/Desktop/huffman/sample.bin"
             )  # calling decompresse fuction
Пример #17
0
def main():
    h = HuffmanCoding(sample_data)
    output_path = h.compress()
    h.decompress(output_path)
Пример #18
0
        print("Invalid Switch/Usage " + sys.argv[1])
    print("Usage :\n")
    print("To compress : \npython " + sys.argv[0] +
          " -c filename.txt [dictfile.dict]\n")
    print("To decompress : \npython " + sys.argv[0] +
          " -x filename.bin [dictfile.dict]")
    print(
        "filename.dict is optional, to be used if the dictionary was saved under a different name."
    )
    exit(0)

if [sys.argv[1]] == ['-c']:
    print()
    pathf = sys.argv[2]
    dictf = ''
    if len(sys.argv) > 3:
        dictf = sys.argv[3]
    h = HuffmanCoding(pathf)
    out = h.compress()
    h.save_codes(dictf)
    h.get_code()
    h.get_freq()
elif [sys.argv[1]] == ['-x']:
    print()
    pathf = sys.argv[2]
    dictf = ''
    if len(sys.argv) > 3:
        dictf = sys.argv[3]
    h = HuffmanCoding(pathf, dictf)
    h.decompress()
Пример #19
0
def testing(text, test_number, path, test_name):

    ratio = []
    timing = []

    print(f"test number: {test_number}")
    output = open(path + f"/test_{test_number}.txt", 'w')
    output.write(text)
    original_size = os.path.getsize(path + f"/test_{test_number}.txt")

    # Huffman
    print("Compressing with Huffman...")
    h = HuffmanCoding(output.name)

    start = time.time()
    compressed = h.compress()
    timing.append((time.time() - start) * 1000)

    h.decompress(compressed)
    ratio.append(os.path.getsize(compressed) / original_size * 100)
    print("Compressing with Huffman finished")

    # RLE
    print("Compressing with RLE...")
    rle = RLE()
    output = open(path + f"/test_{test_number}_rle.rle", 'w')

    start = time.time()
    output.write(rle.encode(text))
    timing.append((time.time() - start) * 1000)

    ratio.append(
        os.path.getsize(path + f"/test_{test_number}_rle.rle") /
        original_size * 100)
    print("Compressing with RLE finished")

    # LZW
    print("Compressing with LZW...")

    start = time.time()
    lzw3Compressor.LZWCompressor().compress(
        path + f"/test_{test_number}.txt",
        path + f"/test_{test_number}_lzw.lzw")
    timing.append((time.time() - start) * 1000)

    # lzw3Decompressor.LZWDecompressor().decompress(path + f"/test_{test_number}_lzw.lzw", path + f"/test_{test_number}_lzw_decompressed.txt")
    ratio.append(
        os.path.getsize(path + f"/test_{test_number}_lzw.lzw") /
        original_size * 100)
    print("Compressing with LZW finished")

    # LZ78
    print("Compressing with LZ78...")
    output = open(path + f"/test_{test_number}_lz78.lz78", 'w')

    start = time.time()
    output.write(lz78_compress(text))
    timing.append((time.time() - start) * 1000)

    ratio.append(
        os.path.getsize(path + f"/test_{test_number}_lz78.lz78") /
        original_size * 100)
    print("Compressing with LZ78 finished")

    # PPM
    print("compression with PPM...")

    start = time.time()
    ppm_compression(path + f"/test_{test_number}.txt",
                    path + f"/test_{test_number}_ppm.ppm")
    timing.append((time.time() - start) * 1000)

    # ppm_decompression(path + f"/test_{test_number}_ppm.ppm", path + f"/test_{test_number}_ppm_decompresed.txt")
    ratio.append(
        os.path.getsize(path + f"/test_{test_number}_ppm.ppm") /
        original_size * 100)
    print("compressing with PPM finished")

    save_bar_graph(
        ratio, timing,
        f"{test_name} N°{test_number}\nOriginal Size: {original_size} bytes",
        f"graphs/{test_name} {test_number}.svg")

    tick_label = ['Huffman', 'RLE', 'LZW', 'LZ78', 'PPM']

    with open(os.getcwd() + f"/data.txt", 'a') as records:
        records.write(f"\nOriginal Size: {original_size} bytes\n")
        records.write(f"\t\t\tSize\t\tCompression Ratio\t\t\tTime\n")
        for i in range(5):
            spacing = [
                "\t" if i != 0 else "", "\t" if int(ratio[i]) < 100 else "",
                "\t" if int(ratio[i] / 100 * original_size) < 100000 else ""
            ]
            records.write(
                f"{tick_label[i]}:\t{spacing[0]}{int(ratio[i]/100*original_size)} bytes{spacing[2]}\t{ratio[i]}%\t\t{timing[i]} ms\n"
            )

    return ratio, timing
Пример #20
0
def upload_file():
    f = request.files['file']
    tag = request.form['tag']
    data = bytes(f.read())

    input_file_size = len(data)
    filename, file_extension = os.path.splitext(f.filename)

    if (tag == "huffman"):
        h = HuffmanCoding(data)
        huffman_file_size = h.compress(f.filename)
        return jsonify({
            'success': True,
            'fileSize': input_file_size,
            'HuffmanEncoding': {
                'compressionRatio': huffman_file_size / input_file_size,
                'compressionFactor': input_file_size / huffman_file_size,
                'savingPercentage':
                (input_file_size - huffman_file_size) / input_file_size,
                'fileSize': huffman_file_size
            },
        })
    if (tag == "shannon"):
        ShannonCompress(data, f.filename)
        shf_file_size = os.path.getsize(filename + ".shf")
        os.remove(filename + ".shf")
        return jsonify({
            'success': True,
            'fileSize': input_file_size,
            'ShannonFano': {
                'compressionRatio': shf_file_size / input_file_size,
                'compressionFactor': input_file_size / shf_file_size,
                'savingPercentage':
                (input_file_size - shf_file_size) / input_file_size,
                'fileSize': shf_file_size
            },
        })
    if (tag == "lempel"):
        LempelZivWelch(data, f.filename, 8)
        lzw_file_size = os.path.getsize(filename + ".lzw")
        os.remove(filename + ".lzw")
        return jsonify({
            'success': True,
            'fileSize': input_file_size,
            'LempelZivWelch': {
                'compressionRatio': lzw_file_size / input_file_size,
                'compressionFactor': input_file_size / lzw_file_size,
                'savingPercentage':
                (input_file_size - lzw_file_size) / input_file_size,
                'fileSize': lzw_file_size
            }
        })
    if (tag == "rle"):
        RunLengthEncoding(data, f.filename)
        rle_file_size = os.path.getsize(filename + ".rle")
        os.remove(filename + ".rle")
        return jsonify({
            'success': True,
            'fileSize': input_file_size,
            'RunLengthEncoding': {
                'compressionRatio': rle_file_size / input_file_size,
                'compressionFactor': input_file_size / rle_file_size,
                'savingPercentage':
                (input_file_size - rle_file_size) / input_file_size,
                'fileSize': rle_file_size
            },
        })

    return jsonify({'success': False, "message": "Please pass a valid tag"})
Пример #21
0
from huffman import HuffmanCoding
import sys

path = "textfile.txt"

h = HuffmanCoding(path)

output_path = h.compress()
print("Compressed file path: " + output_path)

decom_path = h.decompress(output_path)
print("Decompressed file path: " + decom_path)
Пример #22
0
    def forward(self, x, ss_map=None):
        # sample from input
        if self.use_subsampling:
            x, thresh = x
            self.sizes[0] += x.view(-1).size(0) * 8
            # feature
            feat_1 = self.ctx(x)
            feat_1_ = self.unpool(feat_1)
        else:
            self.sizes[0] += x.view(-1).size(0) * 8
        x = self.sample(x)
        # after CNN
        self.sizes[1] += x.view(-1).size(0) * 8

        if ss_map is not None:
            ss_map = self.unpool(ss_map) > 0.5
            unpooled = self.unpool(self.pool(x))
            x = torch.where(ss_map, unpooled, x)

        # subsampling
        # data to be sent: mask + actual data
        B, C, H, W = x.size()
        if self.use_subsampling:
            th_1 = thresh
            # sub-sample
            ss_1 = self.unpool(self.pool1(x))
            # conditions
            cond_1 = feat_1_ < th_1
            mask_1 = feat_1 < th_1
            # subsampled data in different areas
            data_1 = self.pool1(x)[mask_1]
            cond_0 = torch.logical_not(cond_1)
            data_0 = x[cond_0]
            comp_data = torch.cat((data_0, data_1), 0)
            # after RAF
            self.sizes[2] += comp_data.size(0) * 8
            # affected data in the original shape
            if not self.training:
                x = torch.where(cond_1, ss_1, x)
            else:
                x = torch.mul(x, feat_1_) + torch.mul(ss_1, 1 - feat_1_)

        # quantization
        xsize = list(x.size())
        x = x.view(*(xsize + [1]))
        quant_dist = torch.pow(x - self.centers, 2)
        softout = torch.sum(self.centers *
                            nn.functional.softmax(-quant_dist, dim=-1),
                            dim=-1)
        minval, index = torch.min(quant_dist, dim=-1, keepdim=True)
        hardout = torch.sum(self.centers * (minval == quant_dist), dim=-1)
        x = softout
        # x = softout + (hardout - softout).detach()
        if self.use_subsampling:
            comp_data = comp_data.view(*(list(comp_data.size()) + [1]))
            quant_dist = torch.pow(comp_data - self.centers, 2)
            index2 = torch.min(quant_dist, dim=-1, keepdim=True)[1]
            # after Q
            self.sizes[3] += index2.view(-1).size(0) * 3
            # running length coding on bitmap
            huffman = HuffmanCoding()
            real_size = len(huffman.compress(
                index2.view(-1).cpu().numpy())) * 4  # bit
            rle_len1 = mask_compression(mask_1.view(-1).cpu().numpy())
            real_size += rle_len1
            # after lossless
            self.sizes[4] += real_size
            filter_loss = torch.mean(feat_1)
            real_cr = 1 / 16. * real_size / (H * W * C * B * 8)
            softmax_dist = nn.functional.softmax(-quant_dist, dim=-1)
            soft_prob = torch.mean(softmax_dist, dim=0)
            entropy = -torch.sum(torch.mul(soft_prob, torch.log(soft_prob)))
            return x, (filter_loss, real_cr, entropy)
        else:
            self.sizes[2] += index.view(-1).size(0) * 3
            huffman = HuffmanCoding()
            real_size = len(huffman.compress(index.view(-1).cpu().numpy())) * 4
            self.sizes[3] += real_size
            real_cr = 1 / 16. * real_size / (H * W * C * B * 8)
            return x, real_cr