def main(): option = sys.argv[1] try: if option == 'compress': if (len(sys.argv) != 5): raise Error() file_name = str(sys.argv[2]) L = int(sys.argv[3]) epsilon = float(sys.argv[4]) compress.compress(file_name, L, epsilon) elif option == 'plot_epsilon': if (len(sys.argv) != 7): raise Error() L = float(sys.argv[2]) start = float(sys.argv[3]) end = float(sys.argv[4]) step = float(sys.argv[5]) n = int(sys.argv[6]) compress.plot_epsilon(L, start, end, step,n) elif option == 'plot_L': if (len(sys.argv) != 4): raise Error() epsilon = float(sys.argv[2]) n = int(sys.argv[3]) compress.plot_L(epsilon,n) else: raise Error() except: print('Error processing parameters.') print_help()
def test_bmp_file_2(self): file_orig = "./testdata/additional/bmp/poirot_new.bmp" compress(file_orig, FILE_COMPRESSED_NAME, 1024 * 1024, False) decompress(FILE_COMPRESSED_NAME, FILE_DECOMPRESSED_NAME) self.assertTrue(compare_files(file_orig, FILE_DECOMPRESSED_NAME)) os.remove(FILE_COMPRESSED_NAME) os.remove(FILE_DECOMPRESSED_NAME)
def Start(): """right click -- entry the main program""" # First step-- <Saviour -- system stray> import Saviour Saviour.Saviour() # Second step--<search -- system stray> if Saviour.sign_1 == True: import search search.search() # Third step--<compress -- system stray> if search.sign_2 == True: import compress compress.compress() # Fourth step--<launch -- system stray> if compress.sign_3 == True: import launch_check if launch_check.sign_4 == True: import launch_final # Fifth step-- <boom -- system stray> if launch_check.sign_4 == True: import boom win32api.PostQuitMessage(0) sys.exit()
def test_122kb_file_16mb_dict(self): file_orig = "./testdata/basic/KB0122" compress(file_orig, FILE_COMPRESSED_NAME, 3500, True) decompress(FILE_COMPRESSED_NAME, FILE_DECOMPRESSED_NAME) self.assertTrue(compare_files(file_orig, FILE_DECOMPRESSED_NAME)) os.remove(FILE_COMPRESSED_NAME) os.remove(FILE_DECOMPRESSED_NAME)
def test_4kb_file(self): file_orig = "./testdata/basic/KB0004" compress(file_orig, FILE_COMPRESSED_NAME, 1000, False) decompress(FILE_COMPRESSED_NAME, FILE_DECOMPRESSED_NAME) self.assertTrue(compare_files(file_orig, FILE_DECOMPRESSED_NAME)) os.remove(FILE_COMPRESSED_NAME) os.remove(FILE_DECOMPRESSED_NAME)
def test_pdf_file(self): file_orig = "./testdata/additional/pdf/Izb_04_08.pdf" compress(file_orig, FILE_COMPRESSED_NAME, 1024 * 1024, False) decompress(FILE_COMPRESSED_NAME, FILE_DECOMPRESSED_NAME) self.assertTrue(compare_files(file_orig, FILE_DECOMPRESSED_NAME)) os.remove(FILE_COMPRESSED_NAME) os.remove(FILE_DECOMPRESSED_NAME)
def compressJS(debug=False): """compress javascript files""" import compress global SOURCEDIR global BUILDDIR compress.compress("config/HSLayers.cfg", SOURCEDIR, BUILDDIR, debug)
def testfile(filename, cond_huffman): print '\n' + '%' * 90 report = {'name' : filename} start_time = time() report['before_size'] = str(os.path.getsize(filename)) + " bytes" # Compress compress(filename, [cond_huffman == 'True']) report['comp_time'] = '%.2f' % (time() - start_time) + 's' report['after_size'] = str(os.path.getsize(filename + '.z')) + " bytes" report['ratio'] = Fore.YELLOW + str(os.path.getsize(filename + '.z') * 100 / os.path.getsize(filename)) + '%' + Fore.RESET start_time = time() print '-' * 60 # Decompress decompress(filename + '.z', filename + '.new') report['decomp_time'] = '%.2f' % (time() - start_time) + 's' success = True path = os.path.dirname(filename) + '/' if filecmp.cmp(path + 'out2', path + 'out22'): print Fore.GREEN + 'Sequences files are identical' + Fore.RESET else: print Fore.RED + 'Sequences files are different!' + Fore.RESET success = False if filecmp.cmp(path + 'out3', path + 'out33'): print Fore.GREEN + 'Quality files are identical' + Fore.RESET else: print Fore.RED + 'Quality files are different!' + Fore.RESET success = False if filecmp.cmp(path + 'out1', path + 'out11'): print Fore.GREEN + 'Info files are identical' + Fore.RESET else: print Fore.RED + 'Info files are different!' + Fore.RESET success = False if filecmp.cmp(filename, filename + '.new'): print Fore.GREEN + 'Files are identical' + Fore.RESET else: print Fore.RED + 'Files are different!' + Fore.RESET success = False report['success'] = success return report
def test_compress(self): stringBuffer = StringIO() stringBuffer.write('>Test \nAACCTGACT\n>Test2 \nAACCTGAA') # Sample multifasta byteBuffer = BytesIO() # Init a byte buffer to write to compress.compress(stringBuffer, byteBuffer) byteBuffer.seek(0) # Reset the pointer header = byteBuffer.readline().decode() headerParts = header.split(';') self.assertEqual(headerParts[0], 'dnazip file', 'Should be dnazip file') self.assertEqual(headerParts[2].strip(), 'https://github.com/Bartvelp/dnazip', 'Should contain project link')
def submit(): if request.form["pass"] == "me2x": memeURL = request.form["url"] tempname = config.meme_directory + "temp.jpeg" randname = config.meme_directory + randomword(8) urllib.urlretrieve(memeURL, tempname) compress.compress(tempname, randname) remove(tempname) return "Meme submitted as: " + randname[8:] else: return "Setting Invalid, did you mean 'meme'?"
def store_data(access_point): base_path = 'C:/Users/Harsha/Desktop/tinyfile_system/models/storage/data/' filepathToCompress = 'C:/Users/Harsha/Desktop/tinyfile_system/tests/' objectCompress = 'test4' extension = '.jpg' folder_name = access_point newPath = base_path + folder_name os.chdir(newPath) c.compress(objectCompress + '_' + 'compressed.tar.gz', [filepathToCompress + objectCompress + extension]) return newPath pass
def test_compressAndDecompress(self): inputBuffer = StringIO() outputBuffer = StringIO() multiFastaData = '>Test \nAACCTGACT\n>Test2 \nAACCTGAA' inputBuffer.write(multiFastaData) # Sample multifasta inputBuffer.seek(0) byteBuffer = BytesIO() # Init a byte buffer to write to compress.compress(inputBuffer, byteBuffer) # Compress to bytebuffer byteBuffer.seek(0) # Reset the pointer decompress.decompress(byteBuffer, outputBuffer) # Decompress from bytebuffer outputBuffer.seek(0) body = outputBuffer.read() self.assertEqual(body.strip(), multiFastaData.strip(), 'Full cycle should be lossless') # Newlines can be added in decompression
def compress_file(inname, outname, compression, verbose): file = open(inname, 'rb') out = BytesIO() print('Compressing...') compress.compress(file, out, compression, verbose) file.close() if outname is None: outname = filename + '.cmp' out.seek(0) outfile = open(outname, 'wb') outfile.write(out.read()) out.close() outfile.close() print('Compressed')
def main(argv): operation, input_file, output_file = get_startup_arguments(argv) if operation == '-c': if output_file: file_name = output_file else: file_name = input_file.replace('.txt', '.z78') compress(input_file, file_name) elif operation == '-x': if output_file: file_name = output_file else: file_name = input_file.replace('.z78', '.txt') decompress(input_file, file_name)
def main(): extensions = ['.table.npy', '.syn1neg.npy', '.syn1.npy', '.syn0.npy'] for f in os.listdir('.'): gzipped_files = map(lambda x: '{0}{1}.gz'.format(f, x), extensions) is_gzipped_model = all(map(os.path.isfile, gzipped_files)) if not is_gzipped_model: continue compress.compress(f) os.remove(f) move_to_bak('{0}.gz'.format(f)) for ff in gzipped_files: move_to_bak(ff)
def test_with_list_with_one_true_element(self): iterable_one = ["Ivo", "Rado", "Panda"] iterable_two = [False, False, True] result = list(compress(iterable_one, iterable_two)) self.assertEqual(result, ["Panda"])
def test_when_mask_contains_only_False_then_return_empty_iterable(self): iterable = list(range(0, 5)) mask = [False for i in range(0, 5)] expected_result = [] self.assertEqual(list(compress(iterable, mask)), expected_result)
def test_when_iterable_is_tuple(self): iterable = (5, 4, 5, 6, 3, 1) mask = (True, True, False, False, True, True) expected_result = (5, 4, 3, 1) self.assertEqual(tuple(compress(iterable, mask)), expected_result)
def test_when_mask_contains_both_True_and_False_then_return_only_True_from_iterable(self): iterable = list(range(0, 5)) mask = [True, False, True, False, True] expected_result = [0, 2, 4] self.assertEqual(list(compress(iterable, mask)), expected_result)
def test_with_range_with_no_true_elements(self): iterable_one = range(2) iterable_two = [False, False, ] result = list(compress(iterable_one, iterable_two)) self.assertEqual(result, [])
def create(book_name): if book_name == '': book_name = input("Please enter a book's name: ") link = search_ask(book_name) folder = build_epub(link) real_name = compress(folder) return real_name
def test_compress_generator(self): iterable = ['Ivo', 'Rado', 'Panda', 'Dodo'] mask = [False, False, True, True] result = list(compress(iterable, mask)) self.assertEqual(result, ['Panda', 'Dodo'])
def test_with_tuple_with_all_true_elements(self): iterable_one = ("Ivo", "Rado", "Panda") iterable_two = [True, True, True] result = list(compress(iterable_one, iterable_two)) self.assertEqual(result, ["Ivo", "Rado", "Panda"])
def test_when_mask_contains_only_True_then_return_whole_iterable(self): iterable = list(range(0, 5)) mask = [True for i in range(0, 5)] expected_result = list(range(0, 5)) self.assertEqual(list(compress(iterable, mask)), expected_result)
def actuallyFind(srcFile, maxSize, method, minQ, maxQ): if method == 'jpg': tmpFile = "tmp.jpg" qprobe = int(math.ceil((minQ + maxQ) / 2.0)) if qprobe == minQ or qprobe == maxQ: return qprobe # optimal parameter # print qprobe compress.saveAsJPG(srcFile, tmpFile, qprobe) curSize = os.path.getsize(tmpFile) if curSize < maxSize: return actuallyFind(srcFile,maxSize,method,qprobe,maxQ) else: return actuallyFind(srcFile,maxSize,method,minQ,qprobe) if method == 'jxr' or method == "jp2": tmpFile = "tmp." + method qprobe = int(math.ceil((minQ + maxQ) / 2.0)) if (maxQ - minQ) <= 1: # termination condition curSize = os.path.getsize(tmpFile) while curSize > maxSize: qprobe += 1 compress.compress(srcFile, tmpFile, method, qprobe) curSize = os.path.getsize(tmpFile) #print str(qprobe) # print "Detailed " + str(curSize) + " probe " + str(qprobe) return qprobe # optimal parameter compress.compress(srcFile, tmpFile, method, qprobe) curSize = os.path.getsize(tmpFile) if curSize > maxSize: return actuallyFind(srcFile,maxSize,method,qprobe,maxQ) else: return actuallyFind(srcFile,maxSize,method,minQ,qprobe) else: print "ERROR: Invalid method " + method exit(-1)
def __init__(self): self.ip = getConfig.getConfig("client", "ip") self.port = int(getConfig.getConfig("client", "port")) con = conn.newCon() self.client = con.build_new_tcp(self.ip, self.port) self.compress = compress.compress(5) #初始化压缩类 self.buffer = 1024 self.obey = obey_command.boeyCommand() #命令处理类
def nid(filename1, filename2, compressor, level, decompress): """ DEFINITION: Use the compressor to calculate respectively c(f1.f2),c(f1) and c(f2) and calculate the distance acording to the definition of normalized information distance: d(f1,f2) = (c(f1.f2)-min{c(f1),c(f2)})/max{c(f1),c(f2)}, where c is the chosen compressor,and an application of c to a file is the size of that file compressed (This formula is based on Kolmogorov complexity concepts). ARGUMENTS: String file name 1, String file name 2, String compressor, int level bool decompress. RETURN: A float that represents the distance between the two files. """ file_total_data = [] temp_file = tempfile.NamedTemporaryFile(delete=False) with open(filename1, "r") as file1: file_total_data += file1.readlines() with open(filename2, "r") as file2: file_total_data += file2.readlines() for line in file_total_data: temp_file.write(line) temp_file.close() file1_cdata = compress.compress(filename1, compressor, level, decompress)[filename1] file2_cdata = compress.compress(filename2, compressor, level, decompress)[filename2] temp_file_cdata = compress.compress(temp_file.name, compressor, level, decompress)[temp_file.name] if decompress: dist = (temp_file_cdata.time - min(file1_cdata.time, file2_cdata.time)) / float( max(file1_cdata.time, file2_cdata.time)) else: dist = (temp_file_cdata.compressed - min(file1_cdata.compressed, file2_cdata.compressed)) / float( max(file1_cdata.compressed, file2_cdata.compressed)) os.unlink(temp_file.name) return dist
def put_message(text, image_name): import compress compress.create_encoding(compress.create_encoding_source(input_text)) binary_string = compress.compress(text + end_of_message) # header will be binary encoding for each character # ie # spot 1 will be the first in compress.POSSIBLE_CHARACTERS # and binary based on encoding # NOTE: all characters will take up 8 bits, so it is absolute spacing -> needed to parse header = '' for ch in compress.POSSIBLE_CHARACTERS: header += compress.encoding[ch].rjust(8, '0') binary_string = header + binary_string if stats: print 'Placing encoding in image.' len_full = len(text + end_of_message) * 8 len_bin = len(binary_string) - len(header) if stats: print 'Overhead of {0} bits required to send the encoding'.format(len(string.printable) * 8) print 'Compressed {0} bits to {1}'.format(len_full, len_bin) print 'Compression ratio is {0}%'.format(round(100 * compression_ratio(len_bin, len_full), 5)) print 'Need {0} pixels to send {1} bits'.format(int(ceil(len(binary_string)/3)), len(binary_string)) #place binary in last position for each RGB value for each pixel #placement is linear i = 0 #index in binary of message im = Image.open(image_name) l,w = im.size if stats: print l * w, 'pixels are available for use' #check to make sure image can fit message if l * w < int(ceil(len(binary_string)/3)): print 'Cannot send message in this image. Need a larger image' im.close() exit() for x in range(l): for y in range(w): r,g,b = im.getpixel((x,y)) r = determine_bit(r,i,binary_string) i += 1 g = determine_bit(g,i,binary_string) i += 1 b = determine_bit(b,i,binary_string) i += 1 im.putpixel((x,y), (r,g,b)) im.save(open(image_name, 'wb')) im.close()
def d1(filename1, filename2, compressor, level, decompress): """ Use the compressor to calculate respectively c(f1.f2),c(f1) and c(f2) and calculate the distance acording to the definition presented by Mirko Degli Esposti , Chiara Farinelli , Marco Manca , Andrea Tolomelli in their article -- A similarity measure for biological signals: new applications to HRV analysis : d(f1,f2) = max(c(f1.f2) − c(f1), c(f2.f1) − c(f2))/ max(c(f1), c(f2)), where c is the chosen compressor, and an application of c to a file is the size of that file compressed. Arguments: filename for both files, compressor, level of compression. Return: A float that represents the distance between the two files. Algorithm: Both files are opened and their content concatenated in a temporary file. Compression is then calculated for each file including the concatenation file, and the formula is applied. """ file1_file2, file2_file1 = create_concatenated_files(filename1, filename2) file1_cdata = compress.compress(filename1, compressor, level, decompress)[filename1] file2_cdata = compress.compress(filename2, compressor, level, decompress)[filename2] file1_file2_cdata = compress.compress(file1_file2.name, compressor, level, decompress)[file1_file2.name] file2_file1_cdata = compress.compress(file2_file1.name, compressor, level, decompress)[file2_file1.name] if decompress: # this float conversion will become unecessary if the code is ever migrated to python3 dist = max(file1_file2_cdata.time - file1_cdata.time, file2_file1_cdata.time - file2_cdata.time) / float( max(file1_cdata.time, file2_cdata.time)) else: dist = max(file1_file2_cdata.compressed - file1_cdata.compressed, file2_file1_cdata.compressed - file2_cdata.compressed) / float( max(file1_cdata.compressed, file2_cdata.compressed)) # os.unlink(file1_file2.name) # os.unlink(file2_file1.name) return dist
def commit(self): log.debug('commit', db=self) with self.lock: batch = leveldb.WriteBatch() for k, v in self.uncommitted.iteritems(): if v is None: batch.Delete(k) else: batch.Put(k, compress.compress(v)) self.db.Write(batch, sync=False) self.uncommitted.clear()
def plot_times(compress_types, file_to_compress): elapsed_times = compress.compress(file_to_compress, compress_types) if elapsed_times != None: plt.bar(compress_types, elapsed_times) plt.ylabel('time in seconds') plt.title('Compression times with different algorithms') fig = plt.gcf() return fig else: return None
def __init__(self): config_handler = open("config.yaml") config_info = yaml.load(config_handler) self.comp = compress() self.codes = config_info['dir_backup'] self.databases = config_info['mysql_backup'] self.mail_recive = config_info['mail_recive'] self.compress = config_info['compress'] self.size_bag =self.compress['size_bag'] self.gmail =config_info['gmail_send'] self.split = split(self.size_bag)
def decorate_file(filename_list): file_list = [] for filename in filename_list: if not os.path.isfile(filename): print "Error:file %s not found or is a directory!" % filename sys.exit() else: file_list.append( filename ) print_output('compressing file......', 'success') if len(file_list) >1: tar_file = compress.tar(file_list,'temp/%s.tar' % file_list[0] ) compressed_file = '%s.z' % tar_file compress.compress(tar_file, compressed_file) os.remove(tar_file) return compressed_file,os.path.getsize(compressed_file),'tarAndCompressed' else: source_file = file_list[0].split('/')[-1] compressed_file = 'temp/%s.z' % source_file compress.compress(file_list[0], compressed_file) return compressed_file,os.path.getsize(compressed_file),'compressed'
def test_roundtrip(): #assert False with open("lzss3.py", "rb") as f: indata = f.read() out = BytesIO() compress(indata, out) compressed_data = out.getvalue() assert len(compressed_data) < len(indata) decompressed_data = decompress(out.getvalue()) assert indata == decompressed_data #same as above, but with lz11 out = BytesIO() compress_nlz11(indata, out) compressed_data = out.getvalue() assert len(compressed_data) < len(indata) decompressed_data = decompress(out.getvalue()) assert indata == decompressed_data
def __init__(self): cf = configparser.ConfigParser() configDir = os.path.join(os.path.dirname(__file__),"./config.ini") cf.read(configDir) self.con = conn.newCon() self.press = compress.compress(5) self.port = int(cf.get("server","port")) self.get_ipv4() self.deal = deal_command.deal() if self.netStatus: print("running on " + self.ip + ":" + str(self.port) ) else: print("network not be found")
def decorate_file(filename_list): file_list = [] for filename in filename_list: if not os.path.isfile(filename): print "Error:file %s not found or is a directory!" % filename sys.exit() else: file_list.append(filename) print_output('compressing file......', 'success') if len(file_list) > 1: tar_file = compress.tar(file_list, 'temp/%s.tar' % file_list[0]) compressed_file = '%s.z' % tar_file compress.compress(tar_file, compressed_file) os.remove(tar_file) return compressed_file, os.path.getsize( compressed_file), 'tarAndCompressed' else: source_file = file_list[0].split('/')[-1] compressed_file = 'temp/%s.z' % source_file compress.compress(file_list[0], compressed_file) return compressed_file, os.path.getsize( compressed_file), 'compressed'
def main(): if len(sys.argv) > 1: for arg in sys.argv[1:]: if arg == "-c": compress(path) if arg == "-tjpg": tojpg(path) if arg == "-tpng": topng(path) if arg == "-heic": heictojpg(path) if arg == "-s": scale(path) if arg == "-h": print( "-h \t help;\n-c \t compress the image;\n-t \t transform image from png to jpg;\n-s \t scale image;" ) else: pass else: print( "-h \t help;\n-c \t compress the image;\n-t \t transform image from png to jpg;\n-s \t scale image;" )
def compressLinks(self, eps=1e-4): ''' This method attempts to compress all top-level Links in the Network. This method takes one keyword argument: eps - The accuracy of the compression to perform. ''' compressed = set() while len(compressed) < len(self.topLevelLinks()): todo = self.topLevelLinks().difference(compressed) todo = list(todo) link, _, _ = compress(todo[0], eps=eps) compressed.add(link)
def encrypt(message, key): letters, compressed = compress.compress(message) message_list = [] message_int = 0 encrypted = "" for c in compressed: message_list.append(ord(c)) temp_key = key for i in range(len(message_list)): message_list[i] = message_list[i] ^ temp_key temp_key += ((256 - key) // len(message_list)) for item in message_list: message_int = message_int << 8 message_int += item encrypted += chr(item) return encrypted, letters
def OnPageChanged(self, event): for i in range(3): self.text[i] = self.GetPage(i).text.GetValue() page = self.GetCurrentPage() self.title = page.file self.file = page.title + '.txt' if self.select == self.GetSelection() or (self.select == 3 and self.GetSelection() == 2): if self.select == 0: res = compare.removeDiff(self.text[1], self.text[2], reverse=True) self.title = '原文 %s 移除 %s 的差异' % (self.GetPage(1).title, self.GetPage(2).title) elif self.select == 1: try: res = compress.decompress(self.text[0], self.text[2]) except: res = compare.removeDiff(self.text[0], self.text[2]) self.title = '原文 %s 增加 %s 的差异' % (self.GetPage(0).title, self.GetPage(2).title) elif self.select == 2: res = compare.makeDiff(self.text[0], self.text[1]) self.title = '比较 %s 和 %s 的结果' % (self.GetPage(0).title, self.GetPage(1).title) elif self.select == 3: res = compress.compress(self.text[0], self.text[1]) self.title = '比较 %s 和 %s 的结果(压缩)' % (self.GetPage(0).title, self.GetPage(1).title) self.GetCurrentPage().text.SetValue(res) self.file = self.title + '.txt' length = len(self.GetCurrentPage().text.GetValue()) if length: self.parent.info.SetLabel(' 字数: %s' % length) else: self.parent.info.SetLabel(' 联系作者: QQ11313213') frm.SetTitle('%s - 文本比对 %s' % (self.title, __ver__))
def main(): args = get_args() # up to 1 type of regularization allowed # assert (args.l1 and not args.l2 and not args.tv) or \ # (args.l2 and not args.l1 and not args.tv) or \ # (args.tv and not args.l1 and not args.l2) or \ # (not args.tv and not args.l1 and not args.l2) assert args.lambda_mask is None or len(args.lambda_mask) == len(args.mask) assert (not args.no_tv and (args.tv and not args.tv3d and not args.tv4d) or \ (args.tv3d and not args.tv and not args.tv4d) or \ (args.tv4d and not args.tv and not args.tv3d) ) or \ (not args.tv and not args.tv3d and not args.tv4d) # schedule must match the lr update of 0.1, 0.01 and 0.001 assert args.tv_lambda_schedule is None or len(args.tv_lambda_schedule) <= 3 tv_state = (int(args.tv), int(args.tv3d), int(args.tv4d)) tv_dict = { (0, 0, 0): TVMat, (1, 0, 0): TVMat, (0, 1, 0): TVMat3D, (0, 0, 1): TVMat4D } tv_loss = tv_dict[tv_state] # set seeds random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.backends.cudnn.deterministic = True device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") trainloader, testloader = setup_data(args) if args.model == 'alexnet': model = AlexNet(10).to(device) if args.mask == None: args.mask = [0, 1, 2, 3, 4] tv_fn = lambda model: TVLossMat(model, args.mask) elif args.model == 'resnet20': if args.mask == None: args.mask = [0, 1, 2, 3] model = resnet20().to(device) tv_fn = lambda model: TVLossResNet(model, args.mask, tv_loss) else: raise Exception('Given model is invalid.') # transfer init weights to reduce compounding factors of stochasticity if args.load_model_init is not None and (not args.no_tv or args.l1 or args.l2): model.load_state_dict(torch.load(args.load_model_init)) else: torch.save(model.state_dict(), args.save_model_init) if args.compress is not None: print('Goal Deflation', args.compress * 100, '%') for model_state_dict in os.listdir('./compression'): print(model_state_dict) model.load_state_dict( torch.load('./compression/' + model_state_dict)) l2_diff, compression_ratio = compress(model, args.compress, args.mask, device) print('Compressed:', compression_ratio * 100, '%') print('Weight L2 Norm Change:', l2_diff) acc, loss = eval_model(model, testloader, nn.CrossEntropyLoss(), device) print('Accuracy:', acc, 'Loss:', loss) print('=' * 80) else: # train now train_model(model, trainloader, testloader, args, tv_fn, device)
def test_2(self): brackets = '{[]}' self.assertEquals(compress(brackets), True)
def test_zero_none_compress(self): assert_equal(compress('AAB'), 'AA0B') def test_zero_compress_twice(self): assert_equal(compress('AABB'), 'AA0BB0')
num_sentences = 10 # fileName refers to cache/asasCache/D1001A ... for fileName in os.listdir(cachePath)[:1]: # grab the topic dictionary with docModels inside of it pickleFilePath = os.path.join(cachePath, fileName) # open if os.path.exists(pickleFilePath): pickleFile = open(pickleFilePath, 'rb') topicDictionary = pickle.load(pickleFile) topicTitleDict = {} # all the cached sentences from the topic allSentences = extractionclustering.sentence.factory(topicDictionary, topicTitleDict, goldSentences[fileName]) if num_sentences: for sentenceId in allSentences: if num_sentences: #this is where I am testing the compressor s = allSentences[sentenceId] print s.simple c = compress(s) print(c.simple) print("") num_sentences -= 1 docIndex += 1
def test_single_chars(self): assert_equal(compress('ABAC'), 'ABAC') def test_zero_compress(self): assert_equal(compress('AA'), 'AA0')
def main(): #Test compression assert compress("A") == "A" assert compress("ABAC") == "ABAC" assert compress("AA") == "AA0" assert compress("AAB") == "AA0B" assert compress("AABB") == "AA0BB0" assert compress("AAACBBC") == "AA1CBB0C" #Test strings with long runs of the same character assert compress("AAA") == "AA1" assert compress("AAAB") == "AA1B" assert compress("AAABB") == "AA1BB0" assert compress("AAABBB") == "AA1BB1" assert compress("AAABBBB") == "AA1BB2" assert compress("AAABBBBB") == "AA1BB3" assert compress("AAABBBBBB") == "AA1BB4" assert compress("AAABBBBBBB") == "AA1BB5" assert compress("AAABBBBBBBB") == "AA1BB6" assert compress("AAABBBBBBBBB") == "AA1BB7" assert compress("AAABBBBBBBBBB") == "AA1BB8" assert compress("AAABBBBBBBBBBB") == "AA1BB9" assert compress("AAABBBBBBBBBBBB") == "AA1BB9B" assert compress("AAABBBBBBBBBBBBB") == "AA1BB9BB0" assert compress("AAABBBBBBBBBBBBBB") == "AA1BB9BB1" assert compress("AAABBBBBBBBBBBBBBB") == "AA1BB9BB2" assert compress("AAABBBBBBBBBBBBBBBB") == "AA1BB9BB3" #Test decompression assert decompress("A") == "A" assert decompress("ABAC") == "ABAC" assert decompress("AA0") == "AA" assert decompress("AA0B") == "AAB" assert decompress("AA0BB0") == "AABB" assert decompress("AA1CBB0C") == "AAACBBC" #Test that decompress acts as an inverse of compress. assert decompress(compress("A")) == "A" assert decompress(compress("ABAC")) == "ABAC" assert decompress(compress("AA")) == "AA" assert decompress(compress("AAB")) == "AAB" assert decompress(compress("AABB")) == "AABB" assert decompress(compress("AAACBBC")) == "AAACBBC" print "Compression/decompression tests successful."
def test_complex_compress(self): assert_equal(compress('AAACBBC'), 'AA1CBB0C') # Example long runs def test_ten_repeat(self): assert_equal(compress('AAAAAAAAAA'), 'AA8')
def test_eleven_repeat(self): assert_equal(compress('AAAAAAAAAAA'), 'AA9') def test_twelve_repeat(self): assert_equal(compress('AAAAAAAAAAAA'), 'AA9A')
def test_zero_compress(self): assert_equal(compress('AA'), 'AA0') def test_zero_none_compress(self): assert_equal(compress('AAB'), 'AA0B')
def test_1(self): brackets = '[]' self.assertEquals(compress(brackets), True)
def test_thirteen_repeat(self): assert_equal(compress('AAAAAAAAAAAAA'), 'AA9AA0') def test_fourteen_repeat(self): assert_equal(compress('AAAAAAAAAAAAAA'), 'AA9AA1')
def test_raises_exception_if_invalid_input(self): compress('abc1 D') def test_single_char(self): assert_equal(compress('A'), 'A')
def test_zero_compress_twice(self): assert_equal(compress('AABB'), 'AA0BB0') def test_complex_compress(self): assert_equal(compress('AAACBBC'), 'AA1CBB0C')
# Copyright (c) 2010 Carlos Duarte do Nascimento (Chester) # # Permission is hereby granted, free of charge, to any person obtaining a copy of this # software and associated documentation files (the "Software"), to deal in the Software # without restriction, including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons # to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, # INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE # FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # """Monta o "all-scripts.js" com as versões JsMin dos scripts diversos Deve ser chamado sempre que o marcadores.js (ou algum dos outros) for alterado""" import compress compress.compress( # Scripts a comprimir ("static/jquery-1.4.2.min.js", "static/marcadores.js", "static/fancybox/jquery.fancybox-1.3.1.pack.js"), # Script final "static/all-scripts.js")
def test_4(self): brackets = '{[}]' self.assertEquals(compress(brackets), False)
import sys, time from pathlib import Path from compress import compress from decompress import decompress f = open("data/document.txt", "r") # open the original document content = ''.join(list(f)) # add every line to the "content" variable f.close() #close the file after finishing reading it. start_time = time.time() # capture time before compressing _compressed = compress(content) # calling the compression algorithm length_in_time = time.time( ) - start_time # capture time after compressing and meassure the differenec _decompressed = decompress(_compressed) # calling the decompression algorithm # print(_compressed) # save the compressed version of the document f = open("data/compressed.txt", "w+") f.write(compress(content)) f.close() # make sure source matches the decompressed (to avoid data loss) if (_decompressed == content): print("✅No data lost.") else: print(f"❌Some data was lost", _decompressed, content) initial = Path('data/document.txt').stat().st_size last = Path('data/compressed.txt').stat().st_size percentage = 100 - round(last / initial * 100) print(
def test_3(self): brackets = '{[1]}' self.assertEquals(compress(brackets), False)