def get_payload(self, i = None, decode = False): if i is None: payload = self._payload elif not isinstance(self._payload, list): raise TypeError('Expected list, got %s' % type(self._payload)) else: payload = self._payload[i] if decode: if self.is_multipart(): return cte = self.get('content-transfer-encoding', '').lower() if cte == 'quoted-printable': return utils._qdecode(payload) if cte == 'base64': try: return utils._bdecode(payload) except binascii.Error: return payload elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'): sfp = StringIO() try: uu.decode(StringIO(payload + '\n'), sfp, quiet=True) payload = sfp.getvalue() except uu.Error: return payload return payload
def decode_payload(self, encoding, payload): """ Decode attachment payload data. :param encoding: The current encoding of the payload data. :param payload: the payload data """ cte = encoding.lower() if cte == 'quoted-printable': return utils._qdecode(payload) elif cte == 'base64': try: return utils._bdecode(payload) except binascii.Error: # Incorrect padding return payload elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'): sfp = StringIO() try: uu.decode(StringIO(payload + '\n'), sfp, quiet=True) payload = sfp.getvalue() except uu.Error: # Some decoding problem return payload
def invoke(self, sessionID, service, method, *args): # Raise exception if this is not a valid session if not self.manager().validate(sessionID, touch=1): raise EInvalidSession('Invalid session') s = self._as_node(service) a = getattr(s, method) ## Check to see if any of the args have been UUEncoded ## If so, decode them here.. cnt=-1 args_new = [] args_new.extend( args ) for ar in args_new: cnt = cnt + 1 if type(ar) == types.StringType: if ar.startswith('begin '): # is uuencoded infile = StringIO.StringIO(ar) outfile = StringIO.StringIO() uu.decode(infile, outfile ) args_new[cnt] = outfile.getvalue() result = apply(a, args_new) if isinstance(result,BinaryString): # This is the legacy ConfigTool specific base64 encoding. return ''.join(("<encode type='base64'>", base64.standard_b64encode(result), "</encode>")) return result
def test_garbage_padding(self): # Issue #22406 encodedtext1 = ( b"begin 644 file\n" # length 1; bits 001100 111111 111111 111111 b"\x21\x2C\x5F\x5F\x5F\n" b"\x20\n" b"end\n" ) encodedtext2 = ( b"begin 644 file\n" # length 1; bits 001100 111111 111111 111111 b"\x21\x2C\x5F\x5F\x5F\n" b"\x60\n" b"end\n" ) plaintext = b"\x33" # 00110011 for encodedtext in encodedtext1, encodedtext2: with self.subTest("uu.decode()"): inp = io.BytesIO(encodedtext) out = io.BytesIO() uu.decode(inp, out, quiet=True) self.assertEqual(out.getvalue(), plaintext) with self.subTest("uu_codec"): import codecs decoded = codecs.decode(encodedtext, "uu_codec") self.assertEqual(decoded, plaintext)
def block_dev_get_crypto_footer(block_dev): """ Looks for a crypto footer at the end of a block device and returns the footer object if there is one. If there is not footer, False is returned. If there were any errors, None is returned """ shortname = os.path.basename(block_dev) print_progress('Checking if {} has a crypto footer... '.format(shortname)) size = block_dev_get_size_in_512_bytes(block_dev) if not size: return if size*512 < 16*1024: print_error('Size of {} is just {} bytes.'.format(size*512)) return # FIXME busybox seems to be compiled without large file support and fails # to supply sane data at the end of partitions larger than 2 GiB. skip = size - 16*1024/512 footer_text = adb_shell(('dd if={} bs=512 count=32 skip={} 2>/dev/null' '| uuencode -') .format(block_dev, skip)) footer_bytes = BytesIO() uu.decode(BytesIO(footer_text), footer_bytes) footer_bytes.seek(0) try: return cryptfooter.CryptFooter(footer_bytes) except cryptfooter.ValidationException as e: return False
def test_missingbegin(self): inp = io.BytesIO(b"") out = io.BytesIO() try: uu.decode(inp, out) self.fail("No exception raised") except uu.Error as e: self.assertEqual(str(e), "No valid begin line found in input file")
def test_truncatedinput(self): inp = cStringIO.StringIO("begin 644 t1\n" + encodedtext) out = cStringIO.StringIO() try: uu.decode(inp, out) self.fail("No exception thrown") except uu.Error, e: self.assertEqual(str(e), "Truncated input file")
def test_missingbegin(self): inp = cStringIO.StringIO("") out = cStringIO.StringIO() try: uu.decode(inp, out) self.fail("No exception thrown") except uu.Error, e: self.assertEqual(str(e), "No valid begin line found in input file")
def test_truncatedinput(self): inp = io.BytesIO(b"begin 644 t1\n" + encodedtext) out = io.BytesIO() try: uu.decode(inp, out) self.fail("No exception raised") except uu.Error as e: self.assertEqual(str(e), "Truncated input file")
def test_decode(self): sys.stdin = FakeIO(encodedtextwrapped(0o666, "t1").decode("ascii")) sys.stdout = FakeIO() uu.decode("-", "-") stdout = sys.stdout sys.stdout = self.stdout sys.stdin = self.stdin self.assertEqual(stdout.getvalue(), plaintext.decode("ascii"))
def get_payload(self, i=None, decode=False): """Return a reference to the payload. The payload will either be a list object or a string. If you mutate the list object, you modify the message's payload in place. Optional i returns that index into the payload. Optional decode is a flag indicating whether the payload should be decoded or not, according to the Content-Transfer-Encoding header (default is False). When True and the message is not a multipart, the payload will be decoded if this header's value is `quoted-printable' or `base64'. If some other encoding is used, or the header is missing, or if the payload has bogus data (i.e. bogus base64 or uuencoded data), the payload is returned as-is. If the message is a multipart and the decode flag is True, then None is returned. """ if i is None: payload = self._payload elif not isinstance(self._payload, list): raise TypeError('Expected list, got %s' % type(self._payload)) else: payload = self._payload[i] if not decode: return payload # Decoded payloads always return bytes. XXX split this part out into # a new method called .get_decoded_payload(). if self.is_multipart(): return None cte = self.get('content-transfer-encoding', '').lower() if cte == 'quoted-printable': return utils._qdecode(payload) elif cte == 'base64': try: if isinstance(payload, str): payload = payload.encode('raw-unicode-escape') return base64.b64decode(payload) #return utils._bdecode(payload) except binascii.Error: # Incorrect padding pass elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'): in_file = BytesIO(payload.encode('raw-unicode-escape')) out_file = BytesIO() try: uu.decode(in_file, out_file, quiet=True) return out_file.getvalue() except uu.Error: # Some decoding problem pass # Is there a better way to do this? We can't use the bytes # constructor. if isinstance(payload, str): return payload.encode('raw-unicode-escape') return payload
def test_decodetwice(self): # Verify that decode() will refuse to overwrite an existing file with open(self.tmpin, 'wb') as f: f.write(encodedtextwrapped(0o644, self.tmpout)) with open(self.tmpin, 'rb') as f: uu.decode(f) with open(self.tmpin, 'rb') as f: self.assertRaises(uu.Error, uu.decode, f)
def decodetxt( text, encoding, charset): #necessary due to a bug in python 3 email module if not charset: charset="UTF-8" if not encoding: encoding="8bit" if charset!=None: try: "test".encode(charset) except: charset="UTF-8" bytetext=text.encode(charset,unicodeerror) result=bytetext cte=encoding.upper() if cte=="BASE64": pad_err = len(bytetext) % 4 if pad_err: padded_encoded = bytetext + b'==='[:4-pad_err] else: padded_encoded = bytetext try: result= base64.b64decode(padded_encoded, validate=True) except binascii.Error: for i in 0, 1, 2, 3: try: result= base64.b64decode(bytetext+b'='*i, validate=False) break except binascii.Error: pass else: raise AssertionError("unexpected binascii.Error") elif cte=="QUOTED-PRINTABLE": result=quopri.decodestring(bytetext) elif cte in ('X-UUENCODE', 'UUENCODE', 'UUE', 'X-UUE'): in_file = BytesIO(bytetext) out_file =BytesIO() try: uu.decode(in_file, out_file, quiet=True) result=out_file.getvalue() except uu.Error: pass return result.decode(charset,unicodeerror)
def test_decode_filename(self): with open(self.tmpin, 'wb') as f: f.write(encodedtextwrapped(0o644, self.tmpout)) uu.decode(self.tmpin) with open(self.tmpout, 'rb') as f: s = f.read() self.assertEqual(s, plaintext)
def __call__(self, path, target): """Decode C{path} into C{target} using the C{uu} module. @todo: Confirm that this will always extract within C{target}""" import uu cwd = os.getcwd() try: os.chdir(target) uu.decode(file(path, 'rb')) finally: os.chdir(cwd)
def check_uu(msg, data): assert msg._payload.encode() != data, "Payload has not been transformed" outfile = BytesIO() try: uu.decode(BytesIO(msg._payload.encode()), outfile) payload = outfile.getvalue() except uu.Error: assert False, "Payload could not be decoded" assert payload == data, "Decoded payload does not match input data" assert INBOXEN_ENCODING_ERROR_HEADER_NAME not in msg.keys(), "Unexpected error header"
def seperateAndArchive(filename, delivery, archive): # Filenames scpdel = delivery scparc = archive #scpdel = cfgGet("dir.scp.delivery") #scparc = cfgGet("dir.scp.archive") tmpfn = scparc + '/rich_tmp' xmlfn = scpdel + "/" + filename pdffn = 'pdfs/' + filename[:-4]+".pdf" # Read xml try: dom = parse(open(xmlfn)) except: print timeLog() + "ERROR: Invalid file. Error parsing." \ +" Archiving and removing." shutil.copy(xmlfn, scparc + "/" + filename) os.remove(xmlfn) return None # Grab data ctype = dom.getElementsByTagName('ContentType')[0].childNodes[0] \ .nodeValue content = dom.getElementsByTagName('Content')[0].childNodes[0] \ .nodeValue if ctype == "UUSTORY": if content.rstrip()[:5] == "begin" and \ content.rstrip()[-3:] == "end": # First time seeing, archive and rip out pdf archFile = scparc + "/" + filename shutil.copy(xmlfn, archFile) print timeLog() + "ARCHIVED: " + archFile tmpfile = open(tmpfn, 'w') tmpfile.write(content) tmpfile.close() uu.decode(tmpfn, pdffn, None, True) os.remove(tmpfn) dom.getElementsByTagName('Content')[0].childNodes[0] \ .nodeValue = filename[:-4]+".pdf" f = open(xmlfn, 'w') f.write(dom.toxml()) f.close() elif content.rstrip()[-3:] != "pdf": # Don't know what you're doing here, but get out print timeLog() + "ERROR: Invalid content. No UU " \ + "Data or pdf name found with UUSTORY. " \ + "Archiving and removing." os.remove(xmlfn) return None return mnimsgxml(dom)
def test_decode(self): inp = cStringIO.StringIO(encodedtextwrapped % (0666, "t1")) out = cStringIO.StringIO() uu.decode(inp, out) self.assertEqual(out.getvalue(), plaintext) inp = cStringIO.StringIO( "UUencoded files may contain many lines,\n" + "even some that have 'begin' in them.\n" + encodedtextwrapped % (0666, "t1") ) out = cStringIO.StringIO() uu.decode(inp, out) self.assertEqual(out.getvalue(), plaintext)
def test_main(): uu.decode(findfile("testrgb.uue"), "test.rgb") uu.decode(findfile("greyrgb.uue"), "greytest.rgb") # Test a 3 byte color image testimage("test.rgb") # Test a 1 byte greyscale image testimage("greytest.rgb") unlink("test.rgb") unlink("greytest.rgb")
def test_decode(self): inp = io.BytesIO(encodedtextwrapped(0o666, "t1")) out = io.BytesIO() uu.decode(inp, out) self.assertEqual(out.getvalue(), plaintext) inp = io.BytesIO( b"UUencoded files may contain many lines,\n" + b"even some that have 'begin' in them.\n" + encodedtextwrapped(0o666, "t1") ) out = io.BytesIO() uu.decode(inp, out) self.assertEqual(out.getvalue(), plaintext)
def main(): uu.decode(findfile('testrgb.uue'), 'test.rgb') uu.decode(findfile('greyrgb.uue'), 'greytest.rgb') # Test a 3 byte color image testimage('test.rgb') # Test a 1 byte greyscale image testimage('greytest.rgb') unlink('test.rgb') unlink('greytest.rgb')
def test_decodetwice(self): # Verify that decode() will refuse to overwrite an existing file try: f = cStringIO.StringIO(encodedtextwrapped % (0644, self.tmpout)) f = open(self.tmpin, "rb") uu.decode(f) f.close() f = open(self.tmpin, "rb") self.assertRaises(uu.Error, uu.decode, f) f.close() finally: self._kill(f)
def get_payload(self, i=None, decode=False): if self.is_multipart(): if decode: return if i is None: return self._payload return self._payload[i] if i is not None and not isinstance(self._payload, list): raise TypeError('Expected list, got %s' % type(self._payload)) payload = self._payload cte = str(self.get('content-transfer-encoding', '')).lower() if isinstance(payload, str): if utils._has_surrogates(payload): bpayload = payload.encode('ascii', 'surrogateescape') if not decode: try: payload = bpayload.decode(self.get_param('charset', 'ascii'), 'replace') except LookupError: payload = bpayload.decode('ascii', 'replace') if decode: try: bpayload = payload.encode('ascii') except UnicodeError: bpayload = payload.encode('raw-unicode-escape') elif decode: try: bpayload = payload.encode('ascii') except UnicodeError: bpayload = payload.encode('raw-unicode-escape') if not decode: return payload if cte == 'quoted-printable': return utils._qdecode(bpayload) if cte == 'base64': (value, defects) = decode_b(b''.join(bpayload.splitlines())) for defect in defects: self.policy.handle_defect(self, defect) return value if cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'): in_file = BytesIO(bpayload) out_file = BytesIO() try: uu.decode(in_file, out_file, quiet=True) return out_file.getvalue() except uu.Error: return bpayload if isinstance(payload, str): return bpayload return payload
def get_payload(self, i=None, decode=False): """Return a reference to the payload. The payload will either be a list object or a string. If you mutate the list object, you modify the message's payload in place. Optional i returns that index into the payload. Optional decode is a flag indicating whether the payload should be decoded or not, according to the Content-Transfer-Encoding header (default is False). When True and the message is not a multipart, the payload will be decoded if this header's value is `quoted-printable' or `base64'. If some other encoding is used, or the header is missing, or if the payload has bogus data (i.e. bogus base64 or uuencoded data), the payload is returned as-is. If the message is a multipart and the decode flag is True, then None is returned. """ if i is None: payload = self._payload elif not isinstance(self._payload, ListType): raise TypeError, i else: payload = self._payload[i] if decode: if self.is_multipart(): return None cte = self.get('content-transfer-encoding', '').lower() if cte == 'quoted-printable': return Utils._qdecode(payload) elif cte == 'base64': try: return Utils._bdecode(payload) except binascii.Error: # Incorrect padding return payload elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'): sfp = StringIO() try: uu.decode(StringIO(payload+'\n'), sfp) payload = sfp.getvalue() except uu.Error: # Some decoding problem return payload # Everything else, including encodings with 8bit or 7bit are returned # unchanged. return payload
def test_decodetwice(self): # Verify that decode() will refuse to overwrite an existing file f = None try: f = io.BytesIO(encodedtextwrapped(0o644, self.tmpout)) f = open(self.tmpin, 'rb') uu.decode(f) f.close() f = open(self.tmpin, 'rb') self.assertRaises(uu.Error, uu.decode, f) f.close() finally: self._kill(f)
def test_garbage_padding(self): # Issue #22406 encodedtext = ( "begin 644 file\n" # length 1; bits 001100 111111 111111 111111 "\x21\x2C\x5F\x5F\x5F\n" "\x20\n" "end\n" ) plaintext = "\x33" # 00110011 inp = cStringIO.StringIO(encodedtext) out = cStringIO.StringIO() uu.decode(inp, out, quiet=True) self.assertEqual(out.getvalue(), plaintext)
def test_decode_filename(self): f = None try: support.unlink(self.tmpin) f = open(self.tmpin, 'wb') f.write(encodedtextwrapped(0o644, self.tmpout)) f.close() uu.decode(self.tmpin) f = open(self.tmpout, 'rb') s = f.read() f.close() self.assertEqual(s, plaintext) finally: self._kill(f)
def test_decode_mode(self): # Verify that decode() will set the given mode for the out_file expected_mode = 0o444 with open(self.tmpin, 'wb') as f: f.write(encodedtextwrapped(expected_mode, self.tmpout)) # make file writable again, so it can be removed (Windows only) self.addCleanup(os.chmod, self.tmpout, expected_mode | stat.S_IWRITE) with open(self.tmpin, 'rb') as f: uu.decode(f) self.assertEqual( stat.S_IMODE(os.stat(self.tmpout).st_mode), expected_mode )
def test_decode(self): try: f = open(self.tmpin, "wb") f.write(encodedtextwrapped % (0644, self.tmpout)) f.close() f = open(self.tmpin, "rb") uu.decode(f) f.close() f = open(self.tmpout, "r") s = f.read() f.close() self.assertEqual(s, plaintext) # XXX is there an xp way to verify the mode? finally: self._kill(f)
def test_decode(self): f = None try: support.unlink(self.tmpin) f = open(self.tmpin, 'wb') f.write(encodedtextwrapped(0o644, self.tmpout)) f.close() f = open(self.tmpin, 'rb') uu.decode(f) f.close() f = open(self.tmpout, 'rb') s = f.read() f.close() self.assertEqual(s, plaintext) # XXX is there an xp way to verify the mode? finally: self._kill(f)
def splitfiles(self, text_only=True, **kwargs): """Extract the original documents from the temporary file from EDGAR. The archieved filings on EDGAR contain multiple files bundled in a .txt-file. This method splits the files into its original components. Args: text_only (bool): If True, only html and txt files are saved. All other (media) files are discarded. Defaults to True. **kwargs: Arbitrary keyword arguments. Returns: None Raises: None """ result = pd.DataFrame(columns=["url", "seq", "server_fname", "type", "desc", "local_fname", ]) # Iterate over all documents to split files for index, row in self.temp_files.iterrows(): # Get get filename and make sure path exists temp_fname = self.dir_work + self.sub_filings + row["temp_fname"] fname_part = create_filename(row, **kwargs) fname_full = self.dir_work + self.sub_filings + str(fname_part) fname_full = t.finduniquefname(fname_full, exact=False, **kwargs) path = os.path.dirname(fname_full) if os.path.isdir(path) is False: os.makedirs(path) # Open the temporary file and read the content with open(temp_fname) as f: txt = f.read() # Extract the filing metadata, write to file & append to DataFrame m_sec_header = re.search("<SEC-HEADER>.*</SEC-HEADER>", txt, re.DOTALL) b_sec_header = m_sec_header.group(0) local_fname = fname_full + "header.txt" with open(local_fname, "w") as out: out.write(b_sec_header) result.loc[result.shape[0]] = ([row["url"], 0, "HEADER", "SEC Header", "Header file", local_fname]) # Extract all other document parts splits = txt.split("<DOCUMENT>")[1:] # For each documents path: Extract information, save to file and # append to dataframe; depending on text_only only save text or all # information for i in splits: text_startpos = i.find("<TEXT>\n") + 7 text_endpos = i.find("</TEXT") - 1 text = i[text_startpos:text_endpos] search = ("<TYPE>(?P<TYPE>.+?)[\\n]+?" "<SEQUENCE>(?P<SEQ>.+?)[\\n]+" "<FILENAME>(?P<FNAME>.+?)[\\n]" "(<DESCRIPTION>(?P<DESC>.+?)[\\n])?") try: info = re.search(search, i, re.MULTILINE).groupdict() except: print(i) break if info["TYPE"] != "GRAPHIC": local_fname = fname_full + str(info["SEQ"]) + ".html" with open(local_fname, "w") as out: out.write(text) else: if text_only is True: break local_fname = fname_full + str(info["SEQ"]) + ".jpg" f = io.BytesIO(bytes(text, encoding="ascii")) uu.decode(f, local_fname, quiet=True) f.close() result.loc[result.shape[0]] = ([row["url"], info["SEQ"], info["FNAME"], info["TYPE"], info["DESC"], local_fname]) self.docs = result
def test_decode(self): sys.stdin = cStringIO.StringIO(encodedtextwrapped % (0666, "t1")) sys.stdout = cStringIO.StringIO() uu.decode("-", "-") self.assertEqual(sys.stdout.getvalue(), plaintext)
if verbose: print '1. encode file->file' inp = StringIO(teststr) out = StringIO() uu.encode(inp, out, "t1") verify(out.getvalue() == encoded1) inp = StringIO(teststr) out = StringIO() uu.encode(inp, out, "t1", 0644) verify(out.getvalue() == "begin 644 t1\n" + expected + "\n \nend\n") if verbose: print '2. decode file->file' inp = StringIO(encoded1) out = StringIO() uu.decode(inp, out) verify(out.getvalue() == teststr) inp = StringIO("""UUencoded files may contain many lines, even some that have 'begin' in them.\n""" + encoded1) out = StringIO() uu.decode(inp, out) verify(out.getvalue() == teststr) stdinsave = sys.stdin stdoutsave = sys.stdout try: if verbose: print '3. encode stdin->stdout' sys.stdin = StringIO(teststr) sys.stdout = StringIO() uu.encode("-", "-", "t1", 0666)
for line in lines[:-1]: if not line: # empty line pass elif line[:5] == "begin": # beginning of segment in_segment = 1 segment = [line] fname = string.strip(line[9:]) elif string.strip(line) == "end": # end of segment in_segment = 0 segment.append(line) chunk = string.join(segment, "\n") infile = cStringIO.StringIO(chunk+"\n") if os.path.isfile(fname): # avoid decoding existing file print "File already exists:", fname elif string.find(fname, ".r") > -1: print "Skipping presumed RAR archive" else: try: # might not have nice segment uu.decode(infile) print "Extracted file:", fname+"!" except: print "Problem extracting segment..." else: if in_segment: # segment content if len(segment) > 20000: segment = [segment[0]] print "Segment truncated to avoid memory overrun!" segment.append(line)
def uudecode(self, string): # uu decode a srting stringio = StringIO() uu.decode(StringIO(string), stringio) return stringio.getvalue()
def secCorrespondenceLoader(modelXbrl, mappedUri, filepath, *args, **kwargs): if (mappedUri.startswith("http://www.sec.gov/Archives/edgar/Feed/") and mappedUri.endswith(".nc.tar.gz")): # daily feed loader (the rss object) rssObject = ModelRssObject(modelXbrl, uri=mappedUri, filepath=filepath) # location for expanded feed files tempdir = os.path.join(modelXbrl.modelManager.cntlr.userAppDir, "tmp", "edgarFeed") # remove prior files if os.path.exists(tempdir): os.system("rm -fr {}".format(tempdir)) # rmtree does not work with this many files! os.makedirs(tempdir, exist_ok=True) # untar to /temp/arelle/edgarFeed for faster operation startedAt = time.time() modelXbrl.fileSource.open() modelXbrl.fileSource.fs.extractall(tempdir) modelXbrl.info("info", "untar edgarFeed temp files in %.2f sec" % (time.time() - startedAt), modelObject=modelXbrl) # find <table> with <a>Download in it for instanceFile in sorted(os.listdir(tempdir)): # modelXbrl.fileSource.dir: if instanceFile != ".": rssObject.rssItems.append( SECCorrespondenceItem(modelXbrl, instanceFile, mappedUri + '/' + instanceFile)) return rssObject elif "rssItem" in kwargs and ".nc.tar.gz/" in mappedUri: rssItem = kwargs["rssItem"] text = None # no instance information # parse document try: startedAt = time.time() file, encoding = modelXbrl.fileSource.file( os.path.join(modelXbrl.modelManager.cntlr.userAppDir, "tmp", "edgarFeed", os.path.basename(rssItem.url))) s = file.read() file.close() for match in re.finditer(r"[<]([^>]+)[>]([^<\n\r]*)", s, re.MULTILINE): tag = match.group(1).lower() v = match.group(2) if tag == "accession-number": rssItem.accessionNumber = v elif tag == "form-type": rssItem.formType = v if v != "UPLOAD": rssItem.doNotProcessRSSitem = True # skip this RSS item in validate loop, don't load DB elif tag == "filing-date": try: rssItem.filingDate = datetime.date(int(v[0:4]), int(v[4:6]), int(v[6:8])) except (ValueError, IndexError): pass elif tag == "conformed-name": rssItem.companyName = v elif tag == "cik": rssItem.cikNumber = v elif tag == "assigned-sic": rssItem.assignedSic = v elif tag == "fiscal-year-end": try: rssItem.fiscalYearEnd = v[0:2] + '-' + v[2:4] except (IndexError, TypeError): pass match = re.search("<PDF>(.*)</PDF>", s, re.DOTALL) if match: import uu, io pageText = [] uuIn = io.BytesIO(match.group(1).encode(encoding)) uuOut = io.BytesIO() uu.decode(uuIn, uuOut) from pyPdf import PdfFileReader uuOut.seek(0,0) try: pdfIn = PdfFileReader(uuOut) for pageNum in range(pdfIn.getNumPages()): pageText.append(pdfIn.getPage(pageNum).extractText()) except: # do we want a warning here that the PDF can't be read with this library? pass uuIn.close() uuOut.close() text = ''.join(pageText) else: match = re.search("<TEXT>(.*)</TEXT>", s, re.DOTALL) if match: text = match.group(1) except (IOError, EnvironmentError): pass # give up, no instance # daily rss item loader, provide unpopulated instance document to be filled in by RssItem.Xbrl.Loaded if not text: rssItem.doNotProcessRSSitem = True # skip this RSS item in validate loop, don't load DB instDoc = ModelDocument.create(modelXbrl, ModelDocument.Type.UnknownXML, rssItem.url, isEntry=True, base='', # block pathname from becomming absolute initialXml='<DummyXml/>') else: instDoc = ModelDocument.create(modelXbrl, ModelDocument.Type.INSTANCE, rssItem.url, isEntry=True, base='', # block pathname from becomming absolute initialXml=''' <xbrli:xbrl xmlns:doc="http://arelle.org/doc/2014-01-31" xmlns:link="http://www.xbrl.org/2003/linkbase" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xbrli="http://www.xbrl.org/2003/instance"> <link:schemaRef xlink:type="simple" xlink:href="http://arelle.org/2014/doc-2014-01-31.xsd"/> <xbrli:context id="pubDate"> <xbrli:entity> <xbrli:identifier scheme="http://www.sec.gov/CIK">{cik}</xbrli:identifier> </xbrli:entity> <xbrli:period> <xbrli:instant>{pubDate}</xbrli:instant> </xbrli:period> </xbrli:context> <doc:Correspondence contextRef="pubDate">{text}</doc:Correspondence> </xbrli:xbrl> '''.format(cik=rssItem.cikNumber, pubDate=rssItem.pubDate.date(), text=text.strip().replace("&","&").replace("<","<"))) #modelXbrl.info("info", "loaded in %.2f sec" % (time.time() - startedAt), # modelDocument=instDoc) return instDoc return None
cursor = conn.execute("SELECT * from SAFE WHERE FILE_NAME=" + '"' + FILE_ + '"') # Loops through the cursor object defined above # Appends the string found in the 4th column (file information in bytes) into the file_string container file_string = "" for row in cursor: file_string += row[4] # Performs check to decide whether or not to decode a bytes text file into a video or audio file. if file_type == "mp4" or file_type == "mp3": with open(file_name + ".txt", 'wb') as f_output: f_output.write(base64.b64decode(file_string)) # Decodes the requested audio or video file as the text file in which the byte information # was initially stored using the 'uu' Python library. uu.decode(file_name + ".txt", FILE_) os.remove(file_name + ".txt") else: # Creates a file in the current working directory # and writes the decoded string information from the file_string container defined above. with open(FILE_, 'wb') as f_output: f_output.write(base64.b64decode(file_string)) if input_.lower() == "rd": # Recovers a directory by checking the database for the input placed by the user. directory = input("Please enter directory path containing previously stored files.\n") cursor = conn.execute("SELECT * from SAFE WHERE DIRECTORY=" + '"' + directory + '"') recovered_files = [] # Appends files found in the database with the directory path that was intially stored.
M,EQX9C=<>&,P4UM<>&$T7'AE-UQX9C!<;BI<>#$T+W!<;EQX9F%<>&9F7'AF M94MQ7'AD95QX.&%<>&-D>UQX9C-<>#%B7'AD-%A&7'AC.5QX961<>&0R7&Y< G;EQX,#!<># P7'@P,%QX,#!)14Y$7'AA94)@7'@X,B<*<#$P"F$N end""" def as_bytes(bytes_or_text): if isinstance(bytes_or_text, bytes): return bytes_or_text else: return bytes_or_text.encode("latin-1") # retrieve image of numbers from uuencoded pickled objects c_r = io.BytesIO() uu.decode(io.BytesIO(numbers_coded), c_r) c_r.seek(0) if sys.version_info < (3, ): objs = pickle.load(c_r) else: objs = pickle.load(c_r, encoding="latin-1") im_numbers = [Image.open(io.BytesIO(as_bytes(obj))) for obj in objs] def usage(argv): print("%s filename" % argv[0]) print(" Helps stamp PNG files with numbered labels, ") print(" outputs a new PNG ready for documentation.")
""" Tests for uu module. Nick Mathewson """ from test_support import verify, TestFailed, verbose, TESTFN import sys, os import uu from StringIO import StringIO teststr = "The smooth-scaled python crept over the sleeping dog\n" expected = """\ M5&AE('-M;V]T:\"US8V%L960@<'ET:&]N(&-R97!T(&]V97(@=&AE('-L965P (:6YG(&1O9PH """ encoded1 = "begin 666 t1\n" + expected + "\n \nend\n" if verbose: print '1. encode file->file' inp = StringIO(teststr) out = StringIO() uu.encode(inp, out, "t1") verify(out.getvalue() == encoded1) inp = StringIO(teststr) out = StringIO() uu.encode(inp, out, "t1", 0644) verify(out.getvalue() == "begin 644 t1\n" + expected + "\n \nend\n") if verbose: print '2. decode file->file' inp = StringIO(encoded1) out = StringIO() uu.decode(inp, out) verify(out.getvalue() == teststr) inp = StringIO("""UUencoded files may contain many lines,
>>> def prep(): ... uu.decode('file2-basetests.uu', 'file2-basetests') >>> def g1(f): ... prep() ... d = f(['file1-basetests', 'BIN:file2-basetests']) ... return [d[chr(n)] for n in range(256)] >>> def protect(f, *args): ... try: ... return f(*args) ... except: ... return 'exception: ' + sys.exc_info()[0].__name__ >>> def assert_true(cond): ... if cond: ... print 'Ok' ... else: ... print 'Assert failed!' >>> True,False = 1==1,1==0 >>> PY21 = sys.hexversion < 0x02020000 >>> if sys.hexversion >= 0x02040000: ... def clamp(x): ... return x ... else: ... def clamp(x): ... if x & (sys.maxint+1L): ... return int(x & sys.maxint) - sys.maxint - 1 ... else: ... return int(x & sys.maxint) ################### #### TEST1 #### ################### >>> print test1.f1(217) 1115467 >>> print test1.f2(0) 0 >>> print test1.f2(-192570368) -385140736 >>> test1.f3([3,4,0,'testing',12.75,(5,3,1),0]) 6 8 testingtesting 25.5 (5, 3, 1, 5, 3, 1) >>> print g1(test1.f5) [20, 14, 19, 9, 12, 25, 13, 9, 17, 23, 289, 26, 15, 13, 17, 19, 23, 18, 15, 14, 18, 22, 14, 18, 18, 17, 21, 27, 21, 20, 15, 12, 1745, 18, 17, 136, 25, 14, 27, 18, 92, 87, 386, 12, 52, 29, 63, 102, 49, 39, 26, 26, 18, 14, 16, 13, 22, 16, 29, 49, 24, 34, 30, 20, 16, 100, 98, 61, 20, 85, 61, 75, 54, 49, 19, 47, 32, 18, 67, 47, 86, 66, 184, 61, 42, 42, 74, 14, 26, 55, 74, 16, 37, 15, 19, 211, 17, 348, 369, 130, 12, 248, 264, 328, 162, 68, 69, 41, 128, 29, 240, 95, 244, 288, 673, 295, 108, 134, 380, 40, 42, 224, 137, 30, 15, 24, 11, 15, 6, 15, 19, 26, 19, 17, 15, 18, 16, 17, 11, 14, 11, 15, 11, 11, 19, 13, 18, 25, 14, 20, 12, 14, 8, 13, 17, 15, 16, 17, 15, 13, 12, 12, 10, 19, 15, 15, 16, 12, 24, 21, 14, 10, 19, 17, 14, 13, 20, 18, 11, 17, 21, 13, 21, 19, 17, 15, 19, 10, 17, 12, 16, 13, 16, 15, 13, 16, 16, 16, 15, 14, 11, 14, 18, 15, 25, 9, 19, 12, 13, 12, 18, 12, 13, 16, 13, 17, 18, 19, 16, 11, 18, 18, 27, 11, 22, 17, 13, 22, 20, 16, 9, 17, 14, 12, 20, 17, 15, 18, 16, 15, 15, 16, 16, 18, 18, 17, 21, 17, 12, 12, 17, 10, 20, 19, 18, 25] >>> assert_true(PY21 or g1(test1.f4) == g1(test1.f5)) Ok >>> print test1.f6(n=100, p=10001) 803 >>> test1.f7(-2-1j, 1+1j, 0.04+0.08j) !!!!!!!""""""####################$$$$$$$$%%%&'*.)+ %$$$$$######"""""""""""" !!!!!!"""""####################$$$$$$$$%%%%&'(+2-)'&%%$$$$$######"""""""""" !!!!!""""###################$$$$$$$$$%%%&&'6E0~ 9=6(&%%%%$$$$######"""""""" !!!!"""###################$$$$$$$$%%&&&&''(+B @('&&%%%%%$$#######"""""" !!!"""##################$$$$$$$%%&(,32)),5+,/M E-,*+)''''-&$$#######""""" !!!"#################$$$$$%%%%%&&&(,b~~/: 0,,:/;/&%$########""" !!"###############$$$%%%%%%%%&&&&()+/? ='&%$$########"" !!"###########$$$%'&&&&%%%&&&&'')U ~ G,('%%$$#######"" !"######$$$$$$%%&&*+)(((2*(''(()2p :@:'%$$########" !###$$$$$$$$%%%%&'(*.IB24 0J,**+~ -(&%$$$######## !#$$$$$$$$%%%%%&'',+2~ // ?*&%$$$######## !$$$$$$$%&&&&'(I+,-j 9 ~*&%%$$$######## !%%&&')''((()-+/S ('&%%$$$$####### !%%&&')''((()-+/S ('&%%$$$$####### !$$$$$$$%&&&&'(I+,-j 9 ~*&%%$$$######## !#$$$$$$$$%%%%%&'',+2~ // ?*&%$$$######## !###$$$$$$$$%%%%&'(*.IB24 0J,**+~ -(&%$$$######## !"######$$$$$$%%&&*+)(((2*(''(()2p :@:'%$$########" !!"###########$$$%'&&&&%%%&&&&'')U ~ G,('%%$$#######"" !!"###############$$$%%%%%%%%&&&&()+/? ='&%$$########"" !!!"#################$$$$$%%%%%&&&(,b~~/: 0,,:/;/&%$########""" !!!"""##################$$$$$$$%%&(,32)),5+,/M E-,*+)''''-&$$#######""""" !!!!"""###################$$$$$$$$%%&&&&''(+B @('&&%%%%%$$#######"""""" !!!!!""""###################$$$$$$$$$%%%&&'6E0~ 9=6(&%%%%$$$$######"""""""" !!!!!!"""""####################$$$$$$$$%%%%&'(+2-)'&%%$$$$$######"""""""""" !!!!!!!""""""####################$$$$$$$$%%%&'*.)+J%$$$$$######"""""""""""" >>> test1.f7bis((-2.0, -1.0), (1.0, 1.0), (0.04, 0.08)) !!!!!!!""""""####################$$$$$$$$%%%&'*.)+ %$$$$$######"""""""""""" !!!!!!"""""####################$$$$$$$$%%%%&'(+2-)'&%%$$$$$######"""""""""" !!!!!""""###################$$$$$$$$$%%%&&'6E0~ 9=6(&%%%%$$$$######"""""""" !!!!"""###################$$$$$$$$%%&&&&''(+B @('&&%%%%%$$#######"""""" !!!"""##################$$$$$$$%%&(,32)),5+,/M E-,*+)''''-&$$#######""""" !!!"#################$$$$$%%%%%&&&(,b~~/: 0,,:/;/&%$########""" !!"###############$$$%%%%%%%%&&&&()+/? ='&%$$########"" !!"###########$$$%'&&&&%%%&&&&'')U ~ G,('%%$$#######"" !"######$$$$$$%%&&*+)(((2*(''(()2p :@:'%$$########" !###$$$$$$$$%%%%&'(*.IB24 0J,**+~ -(&%$$$######## !#$$$$$$$$%%%%%&'',+2~ // ?*&%$$$######## !$$$$$$$%&&&&'(I+,-j 9 ~*&%%$$$######## !%%&&')''((()-+/S ('&%%$$$$####### !%%&&')''((()-+/S ('&%%$$$$####### !$$$$$$$%&&&&'(I+,-j 9 ~*&%%$$$######## !#$$$$$$$$%%%%%&'',+2~ // ?*&%$$$######## !###$$$$$$$$%%%%&'(*.IB24 0J,**+~ -(&%$$$######## !"######$$$$$$%%&&*+)(((2*(''(()2p :@:'%$$########" !!"###########$$$%'&&&&%%%&&&&'')U ~ G,('%%$$#######"" !!"###############$$$%%%%%%%%&&&&()+/? ='&%$$########"" !!!"#################$$$$$%%%%%&&&(,b~~/: 0,,:/;/&%$########""" !!!"""##################$$$$$$$%%&(,32)),5+,/M E-,*+)''''-&$$#######""""" !!!!"""###################$$$$$$$$%%&&&&''(+B @('&&%%%%%$$#######"""""" !!!!!""""###################$$$$$$$$$%%%&&'6E0~ 9=6(&%%%%$$$$######"""""""" !!!!!!"""""####################$$$$$$$$%%%%&'(+2-)'&%%$$$$$######"""""""""" !!!!!!!""""""####################$$$$$$$$%%%&'*.)+J%$$$$$######"""""""""""" >>> print protect(test1.f8) in finally clause exception: ZeroDivisionError >>> test1.f9(50) 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 >>> test1.f10() 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 >>> print test1.f11(50) 950 >>> print test1.f11(-1.0) 1001.0 ################### #### TEST2 #### ################### >>> def f1(): ... return -abs(-10) >>> print f1() -10 >>> def f11(): ... return -abs(-10.125)+5.0+6.0 >>> print f11() 0.875 ################### #### TEST3 #### ################### >>> test3.f11([5,6,7,5,3,5,6,2,5,5,6,7,5]) [0, 0, 0, 1, 0, 1, 2, 0, 1, 1, 2, 3, 4] >>> print test3.f13((None, 'hello')) None hello None hello ('hello', None) >>> print test3.f13([12, 34]) 12 34 12 34 (34, 12) >>> test3.f14(5) ${True} >>> test3.f14(-2) ${False} >>> print test3.f16(123) -124 >>> print test3.f17('abc') ('abc', 0, ()) >>> print test3.f17('abc', 'def', 'ghi', 'jkl') ('abc', 'def', ('ghi', 'jkl')) >>> print test3.f19([1,2,3,4]) hello ${([1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4], True, 0)} >>> print test3.f20('l', 12) [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110] >>> print test3.f20('L', 12) [0L, 10L, 20L, 30L, 40L, 50L, 60L, 70L, 80L, 90L, 100L, 110L] >>> print test3.f20('i', 12) [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110] >>> print test3.f20('I', 12) [0L, 10L, 20L, 30L, 40L, 50L, 60L, 70L, 80L, 90L, 100L, 110L] >>> print test3.f20('c', 12, 'x') ['\x00', '\n', '\x14', '\x1e', '(', '2', '<', 'F', 'P', 'Z', 'd', 'n'] >>> print test3.f20('b', 12) [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110] >>> print test3.f20('B', 12) [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110] >>> print test3.f20('h', 12) [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110] >>> print test3.f20('H', 12) [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110] >>> print test3.f20('B', 17) [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160] >>> print test3.f20('h', 28) [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270] >>> print test3.f21([2,6,'never',9,3,0.123]) 3 >>> test3.f22(0) Catch! >>> print test3.f22(-2.0) None >>> test3.f23(0.0) Catch! Catch! NameError >>> test3.f23(23) NameError >>> print [x.__name__ for x in test3.f26()] ['float', 'list', 'urllib', 'psyco.classes', 'full'] >>> test3.N=5; print test3.f28(), test3.f28_1(), test3.f28_b(), test3.f28_b1() test_getframe(): test_getframe f28 ? 5 test_getframe(): test_getframe test_getframe1 f28_1 ? 5 test_getframe_b(): test_getframe_b f28_b ? 5 test_getframe_b(): test_getframe_b test_getframe_b1 f28_b1 ? 5 >>> print test3.f27(), test3.f27_1(), test3.f27_b(), test3.f27_b1() test_getframe(): test_getframe f28 f27 ? test_getframe(): test_getframe f28 f27 ? test_getframe(): test_getframe f28 f27 ? (5, 6, 7) test_getframe(): test_getframe test_getframe1 f28_1 f27_1 ? test_getframe(): test_getframe test_getframe1 f28_1 f27_1 ? test_getframe(): test_getframe test_getframe1 f28_1 f27_1 ? (51, 61, 71) test_getframe_b(): test_getframe_b f28_b f27_b ? test_getframe_b(): test_getframe_b f28_b f27_b ? test_getframe_b(): test_getframe_b f28_b f27_b ? (95, 96, 97) test_getframe_b(): test_getframe_b test_getframe_b1 f28_b1 f27_b1 ? test_getframe_b(): test_getframe_b test_getframe_b1 f28_b1 f27_b1 ? test_getframe_b(): test_getframe_b test_getframe_b1 f28_b1 f27_b1 ? (951, 961, 971) >>> print test3.f29(range(10,0,-1)) [10, 9, 7, 6, 5, 4, 3, 2, 1] >>> print test3.f32('c:/temp') ('c:', '/temp') >>> print test3.f32('*') ('', '*') >>> print test3.f32('/dev/null') ('', '/dev/null') >>> test3.f33(31) 31 >>> print test3.f33(33) None >>> print test3.f33(32) None >>> test3.f34(70000) 0 1 3 7 15 31 63 127 255 511 1023 2047 4095 8191 16383 32767 65535 >>> test3.f35() [[5 5 5] [5 5 5] [5 5 5]] >>> test3.f37(None) ${True} 1 >>> print test3.f38(12) [234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234] ################### #### TEST5 #### ################### >>> print hash(test5.f('file1-basetests')) 33961417 >>> prep(); print hash(test5.f('file2-basetests', 'rb')) 2034921519 >>> print test5.f2(4,5,6) (6, 5, 4) >>> print test5.f2(4,5,(6,)) (6, 5, 4) >>> assert_true(test5.f3(15, 32) == (clamp(64424509440L), 30, ... clamp(32212254720L), 0, 7, 0)) Ok >>> assert_true(test5.f3(15, 31) == (clamp(32212254720L), 30, ... clamp(32212254720L), 0, 7, 0)) Ok >>> assert_true(test5.f3(15, 33) == (clamp(128849018880L), 30, ... clamp(32212254720L), 0, 7, 0)) Ok >>> assert_true(test5.f3(15, 63) == (clamp(138350580552821637120L), 30, ... clamp(32212254720L), 0, 7, 0)) Ok >>> assert_true(test5.f3(-15, 63) == (clamp(-138350580552821637120L), -30, ... clamp(-32212254720L), -1, -8, -1)) Ok >>> assert_true(test5.f3(-15, 32) == (clamp(-64424509440L), -30, ... clamp(-32212254720L), -1, -8, -1)) Ok >>> assert_true(test5.f3(-15, 31) == (clamp(-32212254720L), -30, ... clamp(-32212254720L), -1, -8, -1)) Ok >>> assert_true(test5.f3(-15, 33) == (clamp(-128849018880L), -30, ... clamp(-32212254720L), -1, -8, -1)) Ok >>> assert_true(test5.f3(-15, 2) == (-60, -30, ... clamp(-32212254720L), -4, -8, -1)) Ok >>> assert_true(test5.f3(-15, 1) == (-30, -30, ... clamp(-32212254720L), -8, -8, -1)) Ok >>> assert_true(test5.f3(-15, 0) == (-15, -30, ... clamp(-32212254720L), -15, -8, -1)) Ok >>> assert_true(test5.f3(15, 0) == (15, 30, ... clamp(32212254720L), 15, 7, 0)) Ok >>> assert_true(test5.f3(15, 1) == (30, 30, ... clamp(32212254720L), 7, 7, 0)) Ok >>> assert_true(test5.f3(15, 2) == (60, 30, ... clamp(32212254720L), 3, 7, 0)) Ok >>> assert_true(test5.f3(-1, 0) == (-1, -2, -2147483648L, -1, -1, -1)) Ok >>> print test5.f4("some-string") abcsome-string >>> print test5.overflowtest() -3851407362L >>> test5.booltest() ${False} ${True} ${False} ${True} ${False} ${True} ${[a & b for a in (False,True) for b in (False,True)]} ${[a | b for a in (False,True) for b in (False,True)]} ${[a ^ b for a in (False,True) for b in (False,True)]} ${True} >>> test5.exc_test() IndexError list index out of range 2 >>> test5.seqrepeat() 'abcabcabcabcabc' 'abcabcabcabcabc' [3, 'z', 3, 'z', 3, 'z', 3, 'z', 3, 'z'] [6, 3, 6, 3, 6, 3, 6, 3, 6, 3] 'yyyyyx' 'abcabcabcabcabcabc' 'abcabcabcabcabcabc' [3, 'z', 3, 'z', 3, 'z', 3, 'z', 3, 'z', 3, 'z'] [6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3] 'yyyyyyx' '' '' [] [] 'x' >>> test5.f5(99) 100 100 4 100 >>> test5.f5(-3.0) -2.0 -2.0 4 -2.0 >>> test5.f6(3) None None None None 5 >>> test5.f6(-2) -1 -1 None -1 48 >>> test5.f6(99) 100 100 None 100 IndexError >>> test5.f6("error") TypeError >>> test5.f7(8) [8, 75, 121] [8, 15, 11] >>> test5.f7(8.5) [8.5, 75, 121] [8.5, 15, 11] >>> test5.f8(8) [0, 30, 44] [0.0, 0.0, 0.0] >>> test5.f8(8.5) [0.0, 30, 44] [0.0, 0.0, 0.0] >>> print test5.f9(10) (4, 11) >>> assert_true(PY21 or test5.f9(sys.maxint) == (4, 2147483648L)) Ok >>> test5.teststrings() 'userhruuiadsfz1if623opadoa8ua09q34rx093q\x00qw09exdqw0e9dqw9e8d8qw9r8qw\x1d\xd7\xae\xa2\x06\x10\x1a\x00a\xff\xf6\xee\x15\xa2\xea\x89akjsdfhqweirewru 3498cr 3849rx398du389du389dur398d31623' 'someanother' 'userhru.' '.userhru' 'userhruuiadsfz' 'akjsdfhqweirewru 3498cr 3849rx398du389du389dur398d31623\x1d\xd7\xae\xa2\x06\x10\x1a\x00a\xff\xf6\xee\x15\xa2\xea\x89qw09exdqw0e9dqw9e8d8qw9r8qw\x0009q34rx093qoa8uaopad623if1uiadsfzuserhru' 1 'a' 'sdfhqweirewru 3498cr 3849rx398du389du389dur398d3' 1 1 0 1 >>> test5.testslices('hello') '' 'hello' 'hello' '' 'hello' 'hello' '' 'ello' 'ello' '' '' '' >>> test5.testovf(1987654321, 2012345789) 4000000110 4012345789 3987654321 -24691468 -12345789 -12345679 3999847802852004269 4024691578000000000 3975308642000000000 1987654321 -1987654321 1987654321 >>> test5.testovf(-2147483647-1, 2012345789) -135137859 4012345789 -147483648 -4159829437 -12345789 -4147483648 -4321479675999158272 4024691578000000000 -4294967296000000000 -2147483648 2147483648 2147483648 >>> test5.testovf(-2147483647-1, -2147483647-1) -4294967296 -147483648 -147483648 0 4147483648 -4147483648 4611686018427387904 -4294967296000000000 -4294967296000000000 -2147483648 2147483648 2147483648 >>> test5.rangetypetest(12) list list list xrange xrange xrange >>> test5.rangetest(15) 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 10 11 12 13 14 15 14 13 12 11 >>> test5.xrangetest(15) 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 10 11 12 13 14 15 14 13 12 11 >>> test5.longrangetest() [1234567890123456789L, 1234567890123456790L] >>> print list(xrange(10)) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> x = xrange(10); print sys.getrefcount(x); print list(x) 2 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> print list(xrange(-2, 5)) [-2, -1, 0, 1, 2, 3, 4] >>> print list(xrange(-2, 5, 2)) [-2, 0, 2, 4] >>> print list(xrange(-2, 5, -2)) [] >>> print list(xrange(5, -2, -2)) [5, 3, 1, -1] >>> test5.arraytest() S >>> test5.proxy_defargs() 12 >>> test5.setfilter() ('f1', 1) ('f2', 0) >>> test5.makeSelection() do stuff here >>> test5.class_creation_1(1111) ok >>> test5.class_creation_2(1111) ok >>> test5.class_creation_3() ok >>> test5.class_creation_4(1111) ok >>> test5.power_int(1) 332833500 >>> test5.power_int(10) 332833500 >>> test5.power_int_long(1) long 332833500 >>> test5.power_int_long(10) long 332833500 >>> test5.power_float(1) float 7038164 >>> test5.power_float(10) float 7038164 >>> test5.conditional_doubletest_fold() ok(1) ok(2) >>> test5.importname(['ab', 'cd', 'ef']) Ok >>> test5.sharedlists(3); test5.sharedlists(12) 4 8 >>> test5.variousslices() slice(4, None, None) slice(None, 7, None) slice(9, 'hello', None) slice('world', None, None) slice(1, 10, 'hello') 4 2147483647 0 7 slice(9, 'hello', None) slice('world', None, None) slice(1, 10, 'hello') >>> test5.listgetitem() foobar Ok >>> test5.negintpow(8) 0.015625 ########################### #### COMPACTOBJECT #### ########################### >>> for i in range(5): print compactobject.do_test(i) or i 0 1 2 3 4 >>> for i in range(5, 10): ... compactobject.do_test(i, ... do_test_1=psyco.proxy(compactobject.do_test_1)) ... print i 5 6 7 8 9 >>> compactobject.pcompact_test() 13 hello None >>> compactobject.pcompact_creat('hel' + 'lo') (0, 0, 0) 3 None hello (0, 1, 2) 4 None hello (0, 2, 4) 5 None hello (0, 3, 6) 6 None hello (0, 4, 8) 7 None hello (0, 5, 10) 8 None hello (0, 6, 12) 9 None hello (0, 7, 14) 10 None hello (0, 8, 16) 11 None hello (0, 9, 18) 12 None hello (0, 10, 20) 13 None hello 11 0 >>> compactobject.pcompact_modif('hel' + 'lo') hello 1 0 1 hello 1 0 2 hello 1 0 3 hello 1 0 4 hello 1 0 5 hello 1 0 6 hello 1 0 7 hello 1 0 8 hello 1 0 9 hello 1 0 10 hello 1 0 11 hello 1 0 12 hello 1 0 13 hello 1 0 14 hello 1 0 15 hello 1 0 16 hello 1 0 17 hello 1 0 18 hello 1 0 19 hello 1 0 20 hello 1 0 21 0
#! /usr/bin/env python # coding: utf-8 import uu # uuenc.txtをデコードuudec.txtに出力 uu.decode('uuenc.txt', 'uudec.txt')
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Jan 28 19:16:33 2018 Decode "UU" CTF Link : https://www.root-me.org/fr/Challenges/Cryptanalyse/Encodage-UU Ressources : http://repository.root-me.org/Cryptographie/EN%20-%20Encodings%20format.pdf module : https://docs.python.org/3/library/uu.html @author: adminSeb """ import uu uu.decode("UUchalenge.txt", "UUfinal.txt")
def get_payload(self, i=None, decode=False): """Return a reference to the payload. The payload will either be a list object or a string. If you mutate the list object, you modify the message's payload in place. Optional i returns that index into the payload. Optional decode is a flag indicating whether the payload should be decoded or not, according to the Content-Transfer-Encoding header (default is False). When True and the message is not a multipart, the payload will be decoded if this header's value is `quoted-printable' or `base64'. If some other encoding is used, or the header is missing, or if the payload has bogus data (i.e. bogus base64 or uuencoded data), the payload is returned as-is. If the message is a multipart and the decode flag is True, then None is returned. """ if self.is_multipart(): if decode: return None if i is None: return self._payload else: return self._payload[i] if i is not None and not isinstance(self._payload, list): raise TypeError('Expected list, got %s' % type(self._payload)) payload = self._payload cte = str(self.get('content-transfer-encoding', '')).lower() if isinstance(payload, str): if utils._has_surrogates(payload): bpayload = payload.encode('ascii', 'surrogateescape') if not decode: try: payload = bpayload.decode( self.get_param('charset', 'ascii'), 'replace') except LookupError: payload = bpayload.decode('ascii', 'replace') elif decode: try: bpayload = payload.encode('ascii') except UnicodeError: bpayload = payload.encode('raw-unicode-escape') if not decode: return payload if cte == 'quoted-printable': return quopri.decodestring(bpayload) elif cte == 'base64': value, defects = decode_b(b''.join(bpayload.splitlines())) for defect in defects: self.policy.handle_defect(self, defect) return value elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'): in_file = BytesIO(bpayload) out_file = BytesIO() try: uu.decode(in_file, out_file, quiet=True) return out_file.getvalue() except uu.Error: return bpayload if isinstance(payload, str): return bpayload return payload
#! /usr/bin/env python
def dump_file(self): in_file = StringIO.StringIO(self.x_file_data) out_file = StringIO.StringIO() uu.decode(in_file, out_file) return out_file.getvalue()
def coder_body(in_p, out_p): if flag == 'e': uu.encode(in_p, out_p) if flag == 'd': uu.decode(in_p, out_p)
filesize = int(filesize) with open(filename, "wb") as f: print("receiving") while True: bytes_read = client_socket.recv(BUFFER_SIZE) if not bytes_read: break f.write(bytes_read) print(bytes_read) print("received") client_socket.close() s.close() uu.decode(filename, "Video.mp4") video = cv2.VideoCapture("Video.mp4") a = 0 while True: a = a + 1 check, frame = video.read() #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) cv2.imshow("Capturing", frame) key = cv2.waitKey(1) if (key == ord('q')): break
import uu if __name__ == '__main__': with open('h', 'rb') as i: with open('h.uud', 'wb') as o: uu.encode(i, o) with open('h.uud', 'rb') as i: with open('h.txt', 'wb') as o: uu.decode(i, o)
Mmh5Vm1wS1MyTnJOVmhQVmxwcApZbXRLTmdwV01WcGhXVmRTUms1V1dsVmlSMmhYUTJ4a1JsTnRP VmRXTTJoeVZsUkdUMUl5U2tkaFJUVlhWMFpLVmxadE1UQlpWMVpYCldraEtXR0pVYUV4WFZsWlda VVpaZVZScmJHbFNiVkp3Q2xWdGRIZFVWbHB6V1ROb1YwMXJNVFJWTWpWWFZsZEtXR1JGZUZkV2Vr RjQKVlZSS1NtVkdWbk5oUjNkTFZXeG9VMVl4V25SbFNHUnNWbXh3TUZSV1ZtdGhSa2w0VW1wYVZs WXphSG9LVm0weFIyTnNaSFJoUmxwTwpVbTVDYUZkc1dsWmxSbHBYVW01T1YySlhlRmhXTUZaTFUx WmFkR05GWkZaa00wSlRWRlphYzA1V1ZuUk9WWFJvVmxSQ05WWlhlRzlYClozQlhUVEZLYndwV2JY QkhaREZaZUZwSVNsQldNMEp3Vm14YWQxTldXbkZUV0docVRWWldOVlV5TlV0V1IwcElZVVZXV21F eGNETlUKVlZweVpERmFWVlpzWkdGTk1FcFFWbGQwVjFOck1VZGFSbFpTQ21KVlduQlVWM1IzVTBa VmVVNVdUbGRpVlhCSlEyMVdSMXBHY0ZkTgpNVXB2VVRJeFIxSXhXbGxhUm1ocFYwWktlRmRYZEd0 Vk1sWnpXa2hLWVZKNmJGaFVWM1JYVG14V1dFMVZaRmNLVFZad01GWkhjRk5XCmJVWnlWMjFHWVZa c2NFeFdNV1JMVWpGa2MyRkdUazVXV0VKSVZtcEdZV0l5VVhoWFdHZExWa2QwYTFZeFpFaGxTRXBX WVdzMVZGbHEKUm5OamJGcDFXa1pTVXdwaVdHZzFWbTB4ZDFVeFdYZE5WbHBxVTBjNVRGVlVTalJo TWsxNFZtNUtWbUpYZUZoV2ExWldaREZhYzFWcgpkRTVTTUZZMVZXMDFUMVpIUlhsVmJrWldZa1pL ZGxaRldtRmpkM0JoQ2xKRlNtOVVWVkpYVTBaVmVXVkhkRnBXYXpWSVZqSTFRMVpXCldrWmpSbEpY Vm14d2FGbDZSbUZXVmtwMFpFWmthVkp1UWtwV2JYaGhZakpGZUZkcmFGWlhSM2hSVld0a05GSlda SFVLWWpOa1VGVlkKUWtWWGJtOTNUMVZPYmxCVU1Fc0sK """.replace("\n", "") for i in range(16): str = base64.b64decode(str) # print(str) f = open('in.txt', 'w') f.write(str.decode()) f.close() uu.decode('in.txt', 'out.txt') f = open('out.txt', 'r') print(f.read()) f.close
def get_payload(self, i=None, decode=False): """Return a reference to the payload. The payload will either be a list object or a string. If you mutate the list object, you modify the message's payload in place. Optional i returns that index into the payload. Optional decode is a flag indicating whether the payload should be decoded or not, according to the Content-Transfer-Encoding header (default is False). When True and the message is not a multipart, the payload will be decoded if this header's value is `quoted-printable' or `base64'. If some other encoding is used, or the header is missing, or if the payload has bogus data (i.e. bogus base64 or uuencoded data), the payload is returned as-is. If the message is a multipart and the decode flag is True, then None is returned. """ # Here is the logic table for this code, based on the email5.0.0 code: # i decode is_multipart result # ------ ------ ------------ ------------------------------ # None True True None # i True True None # None False True _payload (a list) # i False True _payload element i (a Message) # i False False error (not a list) # i True False error (not a list) # None False False _payload # None True False _payload decoded (bytes) # Note that Barry planned to factor out the 'decode' case, but that # isn't so easy now that we handle the 8 bit data, which needs to be # converted in both the decode and non-decode path. if self.is_multipart(): if decode: return None if i is None: return self._payload else: return self._payload[i] # For backward compatibility, Use isinstance and this error message # instead of the more logical is_multipart test. if i is not None and not isinstance(self._payload, list): raise TypeError('Expected list, got %s' % type(self._payload)) payload = self._payload # cte might be a Header, so for now stringify it. cte = str(self.get('content-transfer-encoding', '')).lower() # payload may be bytes here. if isinstance(payload, str): if utils._has_surrogates(payload): bpayload = payload.encode('ascii', 'surrogateescape') if not decode: try: payload = bpayload.decode( self.get_param('charset', 'ascii'), 'replace') except LookupError: payload = bpayload.decode('ascii', 'replace') elif decode: try: bpayload = payload.encode('ascii') except UnicodeError: # This won't happen for RFC compliant messages (messages # containing only ASCII code points in the unicode input). # If it does happen, turn the string into bytes in a way # guaranteed not to fail. bpayload = payload.encode('raw-unicode-escape') if not decode: return payload if cte == 'quoted-printable': return quopri.decodestring(bpayload) elif cte == 'base64': # XXX: this is a bit of a hack; decode_b should probably be factored # out somewhere, but I haven't figured out where yet. value, defects = decode_b(b''.join(bpayload.splitlines())) for defect in defects: self.policy.handle_defect(self, defect) return value elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'): in_file = BytesIO(bpayload) out_file = BytesIO() try: uu.decode(in_file, out_file, quiet=True) return out_file.getvalue() except uu.Error: # Some decoding problem return bpayload if isinstance(payload, str): return bpayload return payload
def update_event(self, inp=-1): self.set_output_val(0, uu.decode(self.input(0), self.input(1), self.input(2), self.input(3)))
def uudecode(infile, outfile): uu_deb = open(infile, 'r') bin_deb = open(outfile, 'w') uu.decode(uu_deb, bin_deb) uu_deb.close() bin_deb.close()
TkZsV1RYbFVXR3hWWVRKb2MxVnRlSGRYVmxaelZtNWtWMkpHYkRSWFZFNXZWR3hKZUZKcVZsZFNN Mmh5Vm1wS1MyTnJOVmhQVmxwcApZbXRLTmdwV01WcGhXVmRTUms1V1dsVmlSMmhYUTJ4a1JsTnRP VmRXTTJoeVZsUkdUMUl5U2tkaFJUVlhWMFpLVmxadE1UQlpWMVpYCldraEtXR0pVYUV4WFZsWlda VVpaZVZScmJHbFNiVkp3Q2xWdGRIZFVWbHB6V1ROb1YwMXJNVFJWTWpWWFZsZEtXR1JGZUZkV2Vr RjQKVlZSS1NtVkdWbk5oUjNkTFZXeG9VMVl4V25SbFNHUnNWbXh3TUZSV1ZtdGhSa2w0VW1wYVZs WXphSG9LVm0weFIyTnNaSFJoUmxwTwpVbTVDYUZkc1dsWmxSbHBYVW01T1YySlhlRmhXTUZaTFUx WmFkR05GWkZaa00wSlRWRlphYzA1V1ZuUk9WWFJvVmxSQ05WWlhlRzlYClozQlhUVEZLYndwV2JY QkhaREZaZUZwSVNsQldNMEp3Vm14YWQxTldXbkZUV0docVRWWldOVlV5TlV0V1IwcElZVVZXV21F eGNETlUKVlZweVpERmFWVlpzWkdGTk1FcFFWbGQwVjFOck1VZGFSbFpTQ21KVlduQlVWM1IzVTBa VmVVNVdUbGRpVlhCSlEyMVdSMXBHY0ZkTgpNVXB2VVRJeFIxSXhXbGxhUm1ocFYwWktlRmRYZEd0 Vk1sWnpXa2hLWVZKNmJGaFVWM1JYVG14V1dFMVZaRmNLVFZad01GWkhjRk5XCmJVWnlWMjFHWVZa c2NFeFdNV1JMVWpGa2MyRkdUazVXV0VKSVZtcEdZV0l5VVhoWFdHZExWa2QwYTFZeFpFaGxTRXBX WVdzMVZGbHEKUm5OamJGcDFXa1pTVXdwaVdHZzFWbTB4ZDFVeFdYZE5WbHBxVTBjNVRGVlVTalJo TWsxNFZtNUtWbUpYZUZoV2ExWldaREZhYzFWcgpkRTVTTUZZMVZXMDFUMVpIUlhsVmJrWldZa1pL ZGxaRldtRmpkM0JoQ2xKRlNtOVVWVkpYVTBaVmVXVkhkRnBXYXpWSVZqSTFRMVpXCldrWmpSbEpY Vm14d2FGbDZSbUZXVmtwMFpFWmthVkp1UWtwV2JYaGhZakpGZUZkcmFGWlhSM2hSVld0a05GSlda SFVLWWpOa1VGVlkKUWtWWGJtOTNUMVZPYmxCVU1Fc0sK""" for i in range(16): cipher = base64.b64decode(cipher) f = open("tmp.txt", "w") f.write(cipher) f.close() uu.decode("tmp.txt", "tmp2.txt") f = open("tmp2.txt", "r") raw = f.read() print(raw)
def process(self, infile, out_dir=None, create_subdir=True, rm_infile=False): """Process a text file and save processed files. Args: infile (str): Full path to a text file. out_dir (str): Directory to store output files. Defaults to the parent directory of infile. create_subdir (bool): If a subdirectory with the name of the infile should be created. If this is not true, files will be prefixed with the infile filename. rm_infile (bool): If the infile should be removed after processing. Defaults to False. Returns: None """ if not infile.endswith('.txt'): raise ValueError( '{file} Does not appear to be a .txt file.'.format( file=infile)) with open(infile, encoding="utf8") as f: intxt = f.read() if out_dir is None: out_dir = os.path.dirname(infile) infile_base = os.path.basename(infile).split('.txt')[0] metadata_file_format = "{base}_{num}.metadata.json" document_file_format = '{base}_{sec_doc_num}.{file}' if create_subdir: out_dir = os.path.join(out_dir, infile_base) make_path(out_dir) metadata_file_format = "{num}.metadata.json" document_file_format = '{sec_doc_num}.{file}' sec_doc_cursor = 0 sec_doc_count = intxt.count("<SEC-DOCUMENT>") for sec_doc_num in range(sec_doc_count): sec_doc_match = self.re_sec_doc.search(intxt, pos=sec_doc_cursor) if not sec_doc_match: break sec_doc_cursor = sec_doc_match.span()[1] sec_doc = sec_doc_match.group(1) # metadata metadata_match = self.re_sec_header.search(sec_doc) metadata_txt = metadata_match.group(1) metadata_cursor = metadata_match.span()[1] metadata_filename = metadata_file_format.format(base=infile_base, num=sec_doc_num) metadata_file = os.path.join(out_dir, metadata_filename) metadata_dict = self.process_metadata(metadata_txt) # logging.info("Metadata written into {}".format(metadata_file)) # Loop through every document metadata_dict["documents"] = [] documents = sec_doc[metadata_cursor:].strip() doc_count = documents.count("<DOCUMENT>") doc_cursor = 0 for doc_num in range(doc_count): doc_match = self.re_doc.search(documents, pos=doc_cursor) if not sec_doc_match: break doc = doc_match.group(1) doc_cursor = doc_match.span()[1] doc_metadata = self.process_document_metadata(doc) metadata_dict["documents"].append(doc_metadata) # Get file data and file name doc_filename = doc_metadata["filename"] doc_txt = self.re_text.search(doc).group(1).strip() target_doc_filename = document_file_format.format( base=infile_base, sec_doc_num=sec_doc_num, file=doc_filename) doc_outfile = os.path.join(out_dir, target_doc_filename) is_uuencoded = doc_txt.find("begin 644 ") != -1 if is_uuencoded: logging.info( "{} contains an uu-encoded file".format(infile)) encfn = doc_outfile + ".uu" with open(encfn, "w", encoding="utf8") as encfh: encfh.write(doc_txt) uu.decode(encfn, doc_outfile) os.remove(encfn) else: logging.info( "{} contains an non uu-encoded file".format(infile)) with open(doc_outfile, "w", encoding="utf8") as outfh: outfh.write(doc_txt) # Save SEC-DOCUMENT metadata to file with open(metadata_file, "w", encoding="utf8") as fileh: formatted_metadata = json.dumps(metadata_dict, indent=2, sort_keys=True, ensure_ascii=False) fileh.write(formatted_metadata) if rm_infile: os.remove(infile)
def test_main(): run_unittest(InputValidationTests) try: import imgfile except ImportError: return # Create binary test files uu.decode(get_qualified_path('testrgb'+os.extsep+'uue'), 'test'+os.extsep+'rgb') image, width, height = getimage('test'+os.extsep+'rgb') # Return the selected part of image, which should by width by height # in size and consist of pixels of psize bytes. if verbose: print 'crop' newimage = imageop.crop (image, 4, width, height, 0, 0, 1, 1) # Return image scaled to size newwidth by newheight. No interpolation # is done, scaling is done by simple-minded pixel duplication or removal. # Therefore, computer-generated images or dithered images will # not look nice after scaling. if verbose: print 'scale' scaleimage = imageop.scale(image, 4, width, height, 1, 1) # Run a vertical low-pass filter over an image. It does so by computing # each destination pixel as the average of two vertically-aligned source # pixels. The main use of this routine is to forestall excessive flicker # if the image two vertically-aligned source pixels, hence the name. if verbose: print 'tovideo' videoimage = imageop.tovideo (image, 4, width, height) # Convert an rgb image to an 8 bit rgb if verbose: print 'rgb2rgb8' greyimage = imageop.rgb2rgb8(image, width, height) # Convert an 8 bit rgb image to a 24 bit rgb image if verbose: print 'rgb82rgb' image = imageop.rgb82rgb(greyimage, width, height) # Convert an rgb image to an 8 bit greyscale image if verbose: print 'rgb2grey' greyimage = imageop.rgb2grey(image, width, height) # Convert an 8 bit greyscale image to a 24 bit rgb image if verbose: print 'grey2rgb' image = imageop.grey2rgb(greyimage, width, height) # Convert an 8-bit deep greyscale image to a 1-bit deep image by # thresholding all the pixels. The resulting image is tightly packed # and is probably only useful as an argument to mono2grey. if verbose: print 'grey2mono' monoimage = imageop.grey2mono (greyimage, width, height, 0) # monoimage, width, height = getimage('monotest.rgb') # Convert a 1-bit monochrome image to an 8 bit greyscale or color image. # All pixels that are zero-valued on input get value p0 on output and # all one-value input pixels get value p1 on output. To convert a # monochrome black-and-white image to greyscale pass the values 0 and # 255 respectively. if verbose: print 'mono2grey' greyimage = imageop.mono2grey (monoimage, width, height, 0, 255) # Convert an 8-bit greyscale image to a 1-bit monochrome image using a # (simple-minded) dithering algorithm. if verbose: print 'dither2mono' monoimage = imageop.dither2mono (greyimage, width, height) # Convert an 8-bit greyscale image to a 4-bit greyscale image without # dithering. if verbose: print 'grey2grey4' grey4image = imageop.grey2grey4 (greyimage, width, height) # Convert an 8-bit greyscale image to a 2-bit greyscale image without # dithering. if verbose: print 'grey2grey2' grey2image = imageop.grey2grey2 (greyimage, width, height) # Convert an 8-bit greyscale image to a 2-bit greyscale image with # dithering. As for dither2mono, the dithering algorithm is currently # very simple. if verbose: print 'dither2grey2' grey2image = imageop.dither2grey2 (greyimage, width, height) # Convert a 4-bit greyscale image to an 8-bit greyscale image. if verbose: print 'grey42grey' greyimage = imageop.grey42grey (grey4image, width, height) # Convert a 2-bit greyscale image to an 8-bit greyscale image. if verbose: print 'grey22grey' image = imageop.grey22grey (grey2image, width, height) # Cleanup unlink('test'+os.extsep+'rgb')
''' # 使用 uu 模块编码二进制文件 # encode(infile, outfile, filename) 函数从编码输入文件中的数据, 然后写入到输出文件中. # infile 和 outfile 可以是文件名或文件对象. # filename 参数作为起始域的文件名写入. ''' import uu import os, sys infile = 'samples/sample.jpg' print(type(sys.stdout),sys.stdout) print(type(os.path.basename(infile)),os.path.basename(infile)) uu.encode(infile, sys.stdout, os.path.basename(infile)) ''' # 使用 uu 模块解码 uu 格式的文件 import uu import io infile = 'samples/sample.uue' outfile = 'samples/sample.jpg' fi = open(infile) fo = io.StringIO() uu.decode(fi, fo) data = open(outfile, 'rb').read() if fo.getvalue() == data: print(len(data), 'bytes ok')
"""Various tools used by MIME-reading or MIME-writing programs."""
def __init__(self, video_string): with open('../images/video2.txt', 'w') as wfile: wfile.write(video_string) uu.decode('../images/video2.txt', '../images/video.mp4') self.vidcap = cv2.VideoCapture('../images/video.mp4')
rgbimg.longstoimage(rgb, width, height, depth, '@.rgb') os.unlink('@.rgb') table = [ ('testrgb' + os.extsep + 'uue', 'test' + os.extsep + 'rgb'), ('testimg' + os.extsep + 'uue', 'test' + os.extsep + 'rawimg'), ('testimgr' + os.extsep + 'uue', 'test' + os.extsep + 'rawimg' + os.extsep + 'rev'), ] for source, target in table: source = findfile(source) target = findfile(target) if verbose: print "uudecoding", source, "->", target, "..." uu.decode(source, target) if verbose: print "testing..." ttob = rgbimg.ttob(0) if ttob != 0: raise error, 'ttob should start out as zero' testimg('test' + os.extsep + 'rgb', 'test' + os.extsep + 'rawimg') ttob = rgbimg.ttob(1) if ttob != 0: raise error, 'ttob should be zero' testimg('test' + os.extsep + 'rgb',
import base64 import uu enc_file = open("encrypto.txt", mode="r") enc = enc_file.read() enc_file.close() while True: try: enc = base64.urlsafe_b64decode(enc) except Exception as e: break with open("result.txt", mode="w") as f: f.write(enc.decode("utf-8")) result = uu.decode("result.txt", "output.txt") print(result)