def decode(self): output = np.zeros(self.img.shape) cnum = self.img.shape[1] + self.cpad horizontal_block_count = cnum // self.block_size h_tmp = HuffmanCoding([]) for channel in range(self.img.shape[2]): for index, (encoded, rev_map) in enumerate(self.encoded_blocks[channel]): i = index // horizontal_block_count j = index % horizontal_block_count r_min = i * self.block_size r_max = min((i + 1) * self.block_size, self.img.shape[0]) row_diff = r_max - r_min c_min = j * self.block_size c_max = min((j + 1) * self.block_size, self.img.shape[1]) col_diff = c_max - c_min h_tmp.reverse_mapping = rev_map zigzag = h_tmp.decompress(encoded) coeffs = fillDiagonal(zigzag, self.block_size) is_Y = (channel == 0) block = inv_dct(coeffs, mult=self.mult, is_Y=is_Y) output[r_min:r_max, c_min:c_max, channel] = block[:row_diff, :col_diff] return output
def main(): """ In this function we read the folder healthy_cow where are store the csv files that contains the matrix of pixels of the images and we selected an specific file. At first, we applied a compression with FFT and it returns a csv file, which will be compress again using the huffman method and will return a binary file. Followed by this, we decompress the binary file with the huffman method and it returns a csv file that will be decompress with FFT. Finally, using the library PIL, we show the original imagen and the decompress image. :raises: file not found :rtype: Image in PNG format """ directory_sick_cow = "/Users/isabella/Documents/Segundo Semestre/Estructura de Datos /Proyecto/ganado_enfermo_csv" directory_healthy_cow = "/Users/isabella/Documents/Segundo Semestre/Estructura de Datos /Proyecto/Entrega 3/Codigo/huffman-coding-master/Vacas_Enferma" directory = directory_healthy_cow cont = os.listdir(directory) matriz_csv_var = load_img(directory+'/'+cont[0]) fft_compression(matriz_csv_var, 0.05) h = HuffmanCoding("compressFFT.csv") output_path = h.compress() print("Compressed file path: " + output_path) decom_path = h.decompress(output_path) print("Decompressed file path: " + decom_path) img_fft_descompress = fft_descompression(decom_path) show_img(matriz_csv_var) show_img(img_fft_descompress) savetxt('dataff.csv', matriz_csv_var, delimiter=',')
def algorithm(path): h = HuffmanCoding(path) first1 = time.time() output_path = h.compress() second1 = time.time() delta_time1 = second1 - first1 first2 = time.time() decom_path = h.decompress(output_path) second3 = time.time() delta_time2 = second3 - first2 return output_path, delta_time1, delta_time2, decom_path
def main(argv): filepath = argv[1] read_bit_size = 8 if len(argv) > 2: read_bit_size = argv[2] print(read_bit_size) h = HuffmanCoding(filepath, read_bit_size) output_path = h.compress() print("Compressed file path: " + output_path) decom_path = h.decompress(output_path) print("Decompressed file path: " + decom_path)
from huffman import HuffmanCoding import sys path = "textfile.txt" h = HuffmanCoding(path) output_path = h.compress() print("Compressed file path: " + output_path) decom_path = h.decompress(output_path) print("Decompressed file path: " + decom_path)
from huffman import HuffmanCoding path = "/home/ubuntu/Desktop/github/datastructures-algorithms/sample.txt" h = HuffmanCoding(path) output_path = h.compress() print("Compressed file path:") h.decompress(output_path)
def testing(text, test_number, path, test_name): ratio = [] timing = [] print(f"test number: {test_number}") output = open(path + f"/test_{test_number}.txt", 'w') output.write(text) original_size = os.path.getsize(path + f"/test_{test_number}.txt") # Huffman print("Compressing with Huffman...") h = HuffmanCoding(output.name) start = time.time() compressed = h.compress() timing.append((time.time() - start) * 1000) h.decompress(compressed) ratio.append(os.path.getsize(compressed) / original_size * 100) print("Compressing with Huffman finished") # RLE print("Compressing with RLE...") rle = RLE() output = open(path + f"/test_{test_number}_rle.rle", 'w') start = time.time() output.write(rle.encode(text)) timing.append((time.time() - start) * 1000) ratio.append( os.path.getsize(path + f"/test_{test_number}_rle.rle") / original_size * 100) print("Compressing with RLE finished") # LZW print("Compressing with LZW...") start = time.time() lzw3Compressor.LZWCompressor().compress( path + f"/test_{test_number}.txt", path + f"/test_{test_number}_lzw.lzw") timing.append((time.time() - start) * 1000) # lzw3Decompressor.LZWDecompressor().decompress(path + f"/test_{test_number}_lzw.lzw", path + f"/test_{test_number}_lzw_decompressed.txt") ratio.append( os.path.getsize(path + f"/test_{test_number}_lzw.lzw") / original_size * 100) print("Compressing with LZW finished") # LZ78 print("Compressing with LZ78...") output = open(path + f"/test_{test_number}_lz78.lz78", 'w') start = time.time() output.write(lz78_compress(text)) timing.append((time.time() - start) * 1000) ratio.append( os.path.getsize(path + f"/test_{test_number}_lz78.lz78") / original_size * 100) print("Compressing with LZ78 finished") # PPM print("compression with PPM...") start = time.time() ppm_compression(path + f"/test_{test_number}.txt", path + f"/test_{test_number}_ppm.ppm") timing.append((time.time() - start) * 1000) # ppm_decompression(path + f"/test_{test_number}_ppm.ppm", path + f"/test_{test_number}_ppm_decompresed.txt") ratio.append( os.path.getsize(path + f"/test_{test_number}_ppm.ppm") / original_size * 100) print("compressing with PPM finished") save_bar_graph( ratio, timing, f"{test_name} N°{test_number}\nOriginal Size: {original_size} bytes", f"graphs/{test_name} {test_number}.svg") tick_label = ['Huffman', 'RLE', 'LZW', 'LZ78', 'PPM'] with open(os.getcwd() + f"/data.txt", 'a') as records: records.write(f"\nOriginal Size: {original_size} bytes\n") records.write(f"\t\t\tSize\t\tCompression Ratio\t\t\tTime\n") for i in range(5): spacing = [ "\t" if i != 0 else "", "\t" if int(ratio[i]) < 100 else "", "\t" if int(ratio[i] / 100 * original_size) < 100000 else "" ] records.write( f"{tick_label[i]}:\t{spacing[0]}{int(ratio[i]/100*original_size)} bytes{spacing[2]}\t{ratio[i]}%\t\t{timing[i]} ms\n" ) return ratio, timing
def main(): h = HuffmanCoding(sample_data) output_path = h.compress() h.decompress(output_path)
from huffman import HuffmanCoding #input file path of your pc where the files are stored path = "C:/Users/Panchal/Desktop/huffman/sample.txt" h = HuffmanCoding(path) h.compress() # calling compress function h.decompress("C:/Users/Panchal/Desktop/huffman/sample.bin" ) # calling decompresse fuction
# print "Redecoded entropy: " # print shannon_entropy(img) filenames = glob.glob("images/*.png") images = [cv2.imread(img) for img in filenames] sum_ratio = 0 for img in images: img = img[:, :, 0] img = delta_encode(img) h = HuffmanCoding(img, os.getcwd() + "/test") h.compress() img = h.decompress(os.getcwd() + "/test.bin") img = delta_decode(img) rawsize = os.stat('raw.bin') testsize = os.stat('test.bin') ratio = float(float(testsize.st_size) / float(rawsize.st_size)) sum_ratio += ratio print "Redecoded entropy: " print shannon_entropy(img) print "Compression ratio: " print ratio avg_ratio = sum_ratio / len(images)
from huffman import HuffmanCoding import sys from pathlib import Path import time import cv2 path = "tiger.bmp" image = cv2.imread(path, 0) cv2.imwrite("tiger_gray.bmp", image) h = HuffmanCoding(path) output_path, image_shape = h.compress() print("Compressed file path: " + output_path) a = Path("tiger_gray.bmp").stat().st_size b = Path(output_path).stat().st_size print("Calculating size") for i in range(10): print(".", end='') time.sleep(1) decom_path = h.decompress(output_path, image_shape) print("compression percent", 100 * (a - b) / a) print("Decompressed file path: " + decom_path)
print("Invalid Switch/Usage " + sys.argv[1]) print("Usage :\n") print("To compress : \npython " + sys.argv[0] + " -c filename.txt [dictfile.dict]\n") print("To decompress : \npython " + sys.argv[0] + " -x filename.bin [dictfile.dict]") print( "filename.dict is optional, to be used if the dictionary was saved under a different name." ) exit(0) if [sys.argv[1]] == ['-c']: print() pathf = sys.argv[2] dictf = '' if len(sys.argv) > 3: dictf = sys.argv[3] h = HuffmanCoding(pathf) out = h.compress() h.save_codes(dictf) h.get_code() h.get_freq() elif [sys.argv[1]] == ['-x']: print() pathf = sys.argv[2] dictf = '' if len(sys.argv) > 3: dictf = sys.argv[3] h = HuffmanCoding(pathf, dictf) h.decompress()
path = input("Give file...\n") input = input("Run on Debug ? \n") debug = True if input == '1' else False #Compression creating the bin file h = HuffmanCoding(path,debug) output_path = h.compress() ######## #### Taking all the bits from the comppressed file compressed_bytes= make_string_with_bits("test1.bin") ####### compressed_bytes = compressed_bytes[0:4] #taking all the first 4 for start ###Routine to making the array for the 4 bits lista = [] for i in compressed_bytes: lista.append(int(i)) #####Routine to encode a message of 4 bit x = Hamiltation( np.array( lista ) )#init the message to the class encoded = x.encode(debug) #encode it #####Routine to decode the 8 bit codeword encode is the e x = Hamiltation( np.array( encoded ) ) # Decoded it decoded = x.decode(debug)) # the decoded message of 4 bit h.decompress(output_path) # decopress after the decode you have to give file
from huffman import HuffmanCoding import sys if __name__ == "__main__": path = sys.argv[1] #arg[1] = file name h = HuffmanCoding(path) # path ,code, heap and reverse mapping creating print("Compressing...") output_path = h.compress() print(f"Compressed file: {output_path}\n") print("Decompressing...") #print('otput_com',output_path) output_path = h.decompress(output_path) #pass as argument output compressed file print(f"Decompressed file: {output_path}")
def decompress(fromdir): h = HuffmanCoding(fromdir) return h.decompress(fromdir)
output_path = h.compress() #encode('1compressed.bin') #hammtest #encode_hamming('1compressed.bin') #correction hamming_encode('1compressed.bin') #addnoise = Noisy('2encrypted.bin') #addnoise.volume_it_up('2encrypted.bin') #correct('2encrypted.bin') #detect_error('2encrypted.bin') hamming_correct('2encrypted.bin') #detect_error('3noise.bin') decompressed = h.decompress('4decrypted.bin') print("*** file info ***") print("BEFORE COMPRESSION : File length = " + str(Path(path).stat().st_size)) print('BEFORE COMPRESSION : Entropy =' + str(calculateEntropy(path))) print("AFTER COMPRESSION : File length : " + str(Path('1compressed.bin').stat().st_size)) print('AFTER COMPRESSION : Entropy :' + str(calculateEntropy('1compressed.bin'))) print("AFTER ENCRYPTION : File length : " + str(Path('2encrypted.bin').stat().st_size)) print('AFTER ENCRYPTION : Entropy :' + str(calculateEntropy('2encrypted.bin'))) print("AFTER DECRYPTION : File length : " + str(Path('4decrypted.bin').stat().st_size)) print('AFTER DECRYPTION : Entropy :' + str(calculateEntropy('4decrypted.bin')))
text = file.read().rstrip() total_len += len(text) h.make_frequency_dict(text) # compressing all contracts for filename in os.listdir("contracts"): with open("contracts/" + filename, 'r') as input, open("compressed_contracts/" + filename, 'w') as output: compressed = h.compress(input.read()) total_cmp += len(compressed) output.write(compressed) saved = total_len - total_cmp print("Total chars saved: " + str(saved)) print("Compression rate: " + str(saved * 100 / total_len) + "%") # decompressing all contracts for filename in os.listdir("compressed_contracts"): with open("compressed_contracts/" + filename, "r") as input, open("decompressed_contracts/" + filename, 'w') as output: text = input.read() decompressed = h.decompress(text) if len(decompressed) == 0: print(len(text)) output.write(decompressed) # verify()
def huffman_encoding(filename): print("Huffman Encoding...") h = HuffmanCoding(filename) output_path = h.compress() #print(output_path) h.decompress(output_path)