def main(): out_uncompressed = '../build/webtundra.js' out = '../build/webtundra.min.js' externs = 'externs.js' scripts = 'scripts.txt' # Parsing the arguments parser = argparse.ArgumentParser(description='Compile and compress the project.') parser.add_argument("-a", "--advanced", help="Use advanced compress methods. May break the code!", action="store_true") parser.add_argument("-v", "--verbose", help="Increase compiler verbosity.", action="store_true") parser.add_argument("-e", "--externs", help="Externs js-file path.") parser.add_argument("-s", "--scripts", help="A file listing the js-files to-be-compiled.") args = parser.parse_args() if args.advanced: print '## Compiling with Advanced Compilation mode.' if args.verbose: print '## Verbose mode on.' if args.externs: externs = args.externs if args.scripts: scripts = args.scripts # Parsing the scripts file scriptsList = [line.strip() for line in open(scripts) if not line.startswith('#') and line.strip()] print '## Compressing JavaScript files into %s' % out compress(scriptsList, out, externs, out_uncompressed, args.advanced, args.verbose)
def export(self, path): """ Export all to the target path :param str path: the target path :return: """ self.export_outline(path) self.export_pads(path) self.export_components(path) compress(path)
def testCompress(self): '''compress must return the expected output for known strings''' words = (('aaaabbbccd', 'a4b3c2d1'), ('aaabbbbccd', 'a3b4c2d1'), ('abccccdddeeeee', 'a1b1c4d3e5')) for (orig, expected) in words: e = compress(orig) self.assertEqual(expected, e)
def testCompress(self): '''compress must return the expected output for known strings''' words = ( ('aaaabbbccd', 'a4b3c2d1'), ('aaabbbbccd', 'a3b4c2d1'), ('abccccdddeeeee', 'a1b1c4d3e5') ) for (orig, expected) in words: e = compress(orig) self.assertEqual(expected, e)
def testNoCompress(self): '''compress must not compress strings that turn out to have length >= than the original''' words = ( ('aabc', 'aabc'), ('abbbc', 'abbbc'), ('abccdee', 'abccdee') ) for (orig, expected) in words: e = compress(orig) self.assertEqual(expected, e)
def upload_file(): if request.method == 'POST': if 'file' not in request.files: flash('No document uploaded!!') return redirect(request.url) file = request.files['file'] if file.filename == '': flash('Invalid Document!!') return redirect(request.url) # Validation if file and allowed_file_types(file.filename): filename = secure_filename(file.filename) filetype = filename.split('.')[1] filename = str(0) + '.' + filetype # Upload file into /uploads file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) print('Uploaded successfully', filename) # Check for condition where document is a device screenshot print("[INFO] Compressing image to (256,256)... ") if os.stat('uploads/' + filename).st_size < 500000: thresh_filename = harsh_thresh('uploads/' + filename) compress(thresh_filename) else: compress(filename) # Masking unetProcessing() # Document extraction docdetect(filename) # Path to saved processed files full_filename = os.path.join('static', 'FinalTransformedDoc', '0.jpg') print(full_filename) return render_template("home.html", document=full_filename)
# Reads the image img = cv2.imread(args.input, cv2.IMREAD_GRAYSCALE) logging.info('Mode(s): {}'.format(str([m for m in mode]))) logging.info('Loading file: {}'.format(args.input)) if display: cv2.imshow('image orig', img.astype(np.uint8)) cv2.waitKey(0) orig_img = None if 'compress' in mode: orig_img = img.copy() res_img = compress(img, x_01, x_01, mu, lamb, m_ratio, display, method, output) img = res_img if 'decompress' in mode: res_img = decompress(img, x_01, x_01, mu, lamb, m_ratio, display, method, output) if orig_img is not None: psnr_val = psnr(orig_img, res_img) print(psnr_val) p_name = os.path.splitext(os.path.basename(output))[0] p_file = output.replace(p_name, p_name + 'psnr') p_file = output.replace( os.path.splitext(os.path.basename(output))[1], '.txt')
def test(n_bytes, resolution): src = random_r_bytes(n_bytes, resolution) c = compress(src) dc = decompress(c) assert_same(src, dc)
#src = b"\x01\x02\x01\x04"*2 #assert len(compress(src)) == 8 def assert_same(s1, s2): d = differs(s1, s2) assert len(d) == 0, str(d) def test(n_bytes, resolution): src = random_r_bytes(n_bytes, resolution) c = compress(src) dc = decompress(c) assert_same(src, dc) #for i in range(100): # test(256, 16) src = random_r_bytes(256, 8) bytes_to_file("testing/raw.b", src) c = compress(src) bytes_to_file("testing/press.b", c) c = bytes_from_file("testing/press.b") dc = decompress(c) assert_same(src, dc) #src = b"\x01\x02\x03\x02\x03\x02\x03" #wf = find_wordfills(src) #print(wf)
def testSingle(self): '''compress must return the original string for single character strings''' self.assertEqual('a', compress('a'))
def testEmpty(self): '''compress must return empty for empty strings''' self.assertEqual('', compress(''))
def testNoCompress(self): '''compress must not compress strings that turn out to have length >= than the original''' words = (('aabc', 'aabc'), ('abbbc', 'abbbc'), ('abccdee', 'abccdee')) for (orig, expected) in words: e = compress(orig) self.assertEqual(expected, e)
def main(total_cores): # Calculate usable cores across all 'executor' processes. usable_cores = total_cores if CORE_EXECUTOR_MAX < total_cores: r = total_cores % CORE_EXECUTOR_MAX usable_cores = total_cores - r buf = rand_buf() # Start compression routine. # Node list is the de-referenced index offsets into the bloom cand lists. # Form: [a, b, c, ...] lining up with list of cands [[cands...], [cands..]] gcs_table, accurate_chksum, fuzzy_chksum, node_list, candidate_lists, candidate_list_no = compress( sc, buf, usable_cores) print("in bytes = ", len(buf) / 8) # Compress ---------------------------------------------- # Could be done by both compress and decompress # but for prototype we'll only do it once since it's slow AF nonce_list, cipher_bit_str = shared_pow_manager(node_list, usable_cores) print( "out bytes = ", len(gcs_table) + (len(fuzzy_chksum) / 8) + (len(accurate_chksum) / 8) + (len(cipher_bit_str) / 8)) # Decompression routine ---------------------- data = decompress(accurate_chksum, candidate_lists, candidate_list_no, nonce_list)
from params import * from utils import * from shared_pow import * from magic_filter import * from compress import * assert (len(RAND_BIT_STR) % CHUNK_SIZE_BITS == 0) test_rand_bv = Bits(bitstring=RAND_BIT_STR) assert (len(test_rand_bv) % CHUNK_SIZE_BITS == 0) # Connect to spark. sc = get_sc() print("Starting brute.") print(compress(sc, test_rand_bv)) # Add N third param to specify how many words to brute up to WORD_NO.
def gui(self): if self.file == "": QtGui.QMessageBox.about(self, "Error", "Please Select Destination Directory") else: compress(self.file)
# Read file into buffer -- binary. padded_bv = None with open(in_file_path, mode='rb') as fp: buf = fp.read(1024) assert (len(buf) == 1024) # Convert to bit vector and pad to multiple of chunk size. bv = Bits(rawbytes=buf) bit_str = str(bv) r = CHUNK_SIZE_BITS - (len(bit_str) % CHUNK_SIZE_BITS) padding = "0" * r padded_bv = Bits(bitstring=(bit_str + padding)) # Compression code rapper. assert (len(padded_bv) % CHUNK_SIZE_BITS == 0) gcs_table, node_list, candidate_no_list, out_buf = compress( sc, padded_bv, usable_cores=int(cluster_cores)) # Write buffer in binary format to disk. with open(out_file_path, mode='wb') as fp: fp.truncate() fp.write(out_buf) # Show compression ratio. print("New file size = %d" % len(out_buf)) print("File size reduced by = %d%%" % ((1 - (len(out_buf) / 1024)) * 100)) else: # Connect to spark. sc = get_sc(spark_url, cluster_cores) # Read file into buffer in binary format. buf = None
from custom_nodes import node from compress import * from decompress import * if __name__ == "__main__": print("\n") print("Would you like to compress or decompress a file?") choice = input().lower() print("Enter file to "+choice) filename = input() if choice == "compress": compress(filename) print("File compressed!") else: if choice == "decompress": decompress(filename) print("File decompressed!") else: print("Invalid choice")
def main(): print 'Compressing JavaScript files into %s' % out compress(scripts, out, False, out_uncompressed)
compression_time = dict() compression_ratio = dict() rmse = dict() #set file paths filepath1 = './files/compressed_tolerance_' filepath2 = './files/decompressed_tolerance_' #decompressed lists decom_lists = [] #compress for all tolerance for tolerance in tolerances: #compress and write the result list to file start = time.time() compressed_list = compress(original_series, tolerance) write_to_file(compressed_list, filepath1 + str(tolerance) + '.txt') end = time.time() compression_time[tolerance] = end - start #calculate compression ratio compressed_size = os.path.getsize(filepath1 + str(tolerance) + '.txt') compression_ratio[tolerance] = compressed_size / original_size #decompress decompressed_list = decompress(filepath1 + str(tolerance) + '.txt') decom_lists.append(decompressed_list) #save the decompressed list in file f = open(filepath2 + str(tolerance) + '.txt', 'w') for item in decompressed_list: f.write(str(item))