Ejemplo n.º 1
0
 def csv_reader(self, url, delimiter=',', header=False, encoding=None, skip_rows=0, data=None, **kwargs):
     if not data:
         result = urlparse(url)
         if result.scheme == 'ftp':
             data = StringIO()
             ftp = FTP(result.hostname)
             ftp.login(result.username, result.password)
             ftp.retrbinary('RETR {}'.format(result.path), lambda block: data.write(block.decode('utf-8')))
             ftp.quit()
             data.seek(0)
         else:
             response = self.get(url, **kwargs)
             if encoding:
                 response.encoding = encoding
             text = response.text.strip()
             if text.startswith('\ufeff'):  # BOM
                 text = text[1:]
             data = StringIO(text)
     if skip_rows:
         for _ in range(skip_rows):
             data.readline()
     if header:
         return csv.DictReader(data, delimiter=delimiter)
     else:
         return csv.reader(data, delimiter=delimiter)
Ejemplo n.º 2
0
    def __init__(self, server, server_id):
        self.server_id = server_id
        self.host, self.port = server.split(":")
        try:
            stat = send_cmd(self.host, self.port, "stat\n")
            sio = StringIO(stat)
            line = sio.readline()
            m = re.search(".*: (\d+\.\d+\.\d+)-.*", line)
            self.version = m.group(1)
            sio.readline()
            self.sessions = []
            for line in sio:
                if not line.strip():
                    break
                self.sessions.append(Session(line.strip(), server_id))
            for line in sio:
                attr, value = line.split(":")
                attr = attr.strip().replace(" ", "_").replace("/", "_").lower()
                setattr(self, attr, value.strip())

            self.min_latency, self.avg_latency, self.max_latency = self.latency_min_avg_max.split("/")

            self.unavailable = False
        except:
            self.unavailable = True
            self.mode = "Unavailable"
            self.sessions = []
            self.version = "Unknown"
            return
Ejemplo n.º 3
0
 def test_read_3 (self) :
     r    = StringIO("1 10\n100 200\n201 210\n900 1000\n")
     r.readline()
     r.readline()
     r.readline()
     r.readline()
     
     self.assertEqual(len(collatz_read(r)), 0)
Ejemplo n.º 4
0
 def test_read_2 (self) : 
 
     r    = StringIO("1 10\n100 200\n201 210\n900 1000\n")
     r.readline()
     r.readline()
     i, j = collatz_read(r)
     self.assertEqual(i, 201)
     self.assertEqual(j, 210)
Ejemplo n.º 5
0
def get_login_passport():
    # Get something unique (not important as far as I can tell)
    cmd_result = run(['wmic', 'csproduct', 'get', 'uuid'], stdout=PIPE)
    cmd_result_str = StringIO(cmd_result.stdout.decode('utf-8'))

    # skip the first line
    cmd_result_str.readline()

    # Grab UUID
    uuid = cmd_result_str.readline().strip()

    # Ask for username/password.
    username = input("Username: "******"Password: ")

    # Immediately convert it.
    password = hexlify(sha512(bytes(password, 'utf-8')).digest()).decode('utf-8')

    # First request.
    headers = {
        'User-Agent': 'NexonLauncher.nxl-17.04.01-290-621f8e0',
        'Content-Type': 'application/json'
    }
    body = {
        'id': username,
        'password': password,
        'auto_login': False,
        'client_id': '7853644408',
        'scope': 'us.launcher.all',
        'device_id': uuid
    }
    body_str = dumps(body)
    connection = client.HTTPSConnection('accounts.nexon.net', 443)

    connection.request('POST', '/account/login/launcher', body=body_str,
                       headers=headers)

    response = loads(connection.getresponse().read().decode('utf-8'))
    b64_token = b64encode(bytes(response['access_token'],
                                'utf-8')).decode('utf-8')

    # Second request.
    headers = {
        'User-Agent': 'NexonLauncher.nxl-17.04.01-290-621f8e0',
        'Cookie': 'nxtk=' + response['access_token'] +
                  ';domain=.nexon.net;path=/;',
        'Authorization': 'bearer ' + b64_token
    }
    connection = client.HTTPSConnection('api.nexon.io', 443)

    connection.request('GET', '/users/me/passport', headers=headers)
    response = loads(connection.getresponse().read().decode('utf-8'))

    # Return the passport.
    return response['passport']
Ejemplo n.º 6
0
    def _parse_stat(self, data):
        """ Parse the output from the 'stat' 4letter word command """
        h = StringIO(data.decode())

        result = {}

        version = h.readline()
        if version:
            result['zk_version'] = version[version.index(':')+1:].strip()

        # skip all lines until we find the empty one
        while h.readline().strip():
            pass

        for line in h.readlines():
            m = re.match('Latency min/avg/max: (\d+)/(\d+)/(\d+)', line)
            if m:
                result['zk_min_latency'] = int(m.group(1))
                result['zk_avg_latency'] = int(m.group(2))
                result['zk_max_latency'] = int(m.group(3))
                continue

            m = re.match('Received: (\d+)', line)
            if m:
                result['zk_packets_received'] = int(m.group(1))
                continue

            m = re.match('Sent: (\d+)', line)
            if m:
                result['zk_packets_sent'] = int(m.group(1))
                continue

            m = re.match('Outstanding: (\d+)', line)
            if m:
                result['zk_outstanding_requests'] = int(m.group(1))
                continue

            m = re.match('Mode: (.*)', line)
            if m is not None:
                result['zk_server_state'] = m.group(1)
                continue

            m = re.match('Node count: (\d+)', line)
            if m:
                result['zk_znode_count'] = int(m.group(1))
                continue

        return result
Ejemplo n.º 7
0
    def find_testcases(cls, test_program):
        # Collect all test cases
        args = [ test_program, '--gtest_list_tests' ]
        proc = LocalSubprocess(args)
        proc.start()
        returncode = proc.wait()
        if returncode != 0:
            raise JubaSkipTest('%s cannot list testcases' % test_program)

        # read input
        stri = StringIO(proc.stdout.decode())
        testcases = []
        current_test = None
        re_test = re.compile(r'^([a-zA-Z0-9_]+\.)')
        re_testcase = re.compile(r'^  ([a-zA-Z0-9_]+)')
        while True:
            line = stri.readline()
            if line == '': break
            if line.find('Running main') != -1: continue
            match = re_test.match(line)
            if match:
                current_test = match.group(1)

            match = re_testcase.match(line)
            if match and current_test:
                testcases.append('%s%s' % (current_test, match.group(1)))
        return testcases
Ejemplo n.º 8
0
def trystringio(n):
    if n == 1:
        f = StringIO()
        f.write('Hello world!')
        #s = f.readline()
        s = f.getvalue()
        print(s)

    if n == 2:
        f = StringIO('Hello!\nHi!\nGoodbye!')
        s = f.readline()
        print(s)

    if n == 3:
        f = StringIO('Hello!\nHi!\nGoodbye!')
        for l in f:
            print(l, end='')

    if n == 4:
        f = StringIO()
        f.write('Hello world!\nHi\nGoodbye!')
        print('Pos: ', f.tell())
        f.seek(0)
        for l in f:
            print(l, end='')

    return
Ejemplo n.º 9
0
def svg_plot(fig):
    f = StringIO()
    plt.savefig(f, format="svg")
    f.seek(0)
    header = ""
    for i in range(4):
        header += f.readline()
    return (header, f.read())
Ejemplo n.º 10
0
def extract_categories(data):
    if data:
        stringio = StringIO(data.getvalue().decode('utf-8'))
        categories = set()

        # First line is header
        stringio.readline()
        line = stringio.readline()
        while line:
            categories.add(line.split(',')[0])
            line = stringio.readline()
        for category in sorted(categories):
            logging.info(category)

        return categories
    else:
        print('extract_categories - data empty')
class HTTPRequest(BaseHTTPRequestHandler):
   def __init__(self, request_text):
      if VER >= 3:
          self.rfile = BytesIO(request_text)
      else:
          self.rfile = StringIO(request_text)
      self.raw_requestline = self.rfile.readline()
      self.error_code = self.error_message = None
      self.parse_request()
Ejemplo n.º 12
0
    def version(self, page, version):
        try:
            out = str(git.show("-s", "--pretty=format:%aN%n%aD%n%B%n%h%e", version))

            buf = StringIO()
            buf.write(out)
            buf.seek(0)
            author = str(buf.readline())
            date = str(buf.readline())
            message = str(buf.readline())
            buf.close()

            raw = str(git.show("%s:%s" % (version, page.path)))

            data = {"author": author, "date": date, "message": message, "raw": raw}

            return data
        except:
            return ""
Ejemplo n.º 13
0
 def test_parse_item(self):
     f = StringIO(self.ex_item)
     line = f.readline()
     result = p.get_item_block(f, line)
     expected = [('name', 'blink'),
                 ('ID', '1'),
                 ('AbilityName', 'item_blink'),
                 ('var_type', 'FIELD_INTEGER'),
                 ('blink_range', '1200')]
     self.assertEqual(result, expected)
     f.close()
Ejemplo n.º 14
0
    def test_namedtuple_data_is_written_to_csv_file_from_generator(self):
        TestCls = namedtuple('TestCls',
                             ('field_1', 'field_2', 'field_3'))

        test_sequence = (v for v in [TestCls("value_1", "value_2", "value_3"),
                                     TestCls("value_4", "value_5", "value_6")])
        output = StringIO()

        CSV_Exporter._export_namedtuple_values_fd(output, test_sequence)
        output.seek(0)
        header_line = output.readline().strip()
        self.assertEqual(header_line.split(","), ["field_1", "field_2", "field_3"])
        first_line = output.readline().strip()
        second_line = output.readline().strip()
        print("First: ", first_line)
        print("Second: ", second_line)
        self.assertEqual(first_line.split(","),
                         ["value_1", "value_2", "value_3"])
        self.assertEqual(second_line.split(","),
                         ["value_4", "value_5", "value_6"])
Ejemplo n.º 15
0
    def test_parse_ability(self):
        f = StringIO(self.ex_ability)
        line = f.readline()
        result = p.get_ability_block(f, line)
        expected = [('name', 'antimage_blink'),
                    ('ID', '5004'),
                    ('AbilityName', 'antimage_blink'),
                    ('var_type', 'FIELD_INTEGER'),
                    ('blink_range', '1000 1075 1150 1150')]

        self.assertEqual(result, expected)
        f.close()
Ejemplo n.º 16
0
class HTTPRequest(BaseHTTPRequestHandler):
    def __init__(self, request_text):
        # request_text = str(request_text).encode('utf-')
        request_text = request_text.decode('utf-8')
        self.rfile = StringIO(request_text)
        self.raw_requestline = self.rfile.readline()
        self.error_code = self.error_message = None
        self.parse_request()

    def send_error(self, code, message):
        self.error_code = code
        self.error_message = message
Ejemplo n.º 17
0
def handle_request(client_connection):
    try:
        request = client_connection.recv(1024)
        request_text = request.decode()
        rfile = StringIO(request_text)
        raw_requestline = rfile.readline()
        shiet = raw_requestline.split()
        met = shiet[0]
        url = shiet[1][1:]
        destination = os.path.join(config_dict['server_location'], url)

        if url == 'redirect':
            print('Redirect')
            http_response = b"""HTTP/1.1 301 Moved Permanently
Location: """ + bytes(os.path.join(config_dict['server_host'], 'test.txt'), 'utf-8') + b"""\n\n"""
            client_connection.sendall(http_response)
            return

        if url == '' and config_dict['default_home']:
            print('Home')
            with open(config_dict['default_file'], 'rb') as htmlDisplay:
                textDisplay = htmlDisplay.read()
                content_type, encoding = guess_type(config_dict['default_file'], True)
                http_response = b"""HTTP/1.1 200 OK\nContent-Type: """ + bytes(content_type, 'utf-8') + b"""\n\n""" + textDisplay
                client_connection.sendall(http_response)
                return
        elif url == '' and not config_dict['default_home']:
            print('Home')
            http_response = b"""HTTP/1.1 301 Moved Permanently
Location: """ + bytes(os.path.join(config_dict['server_host'], config_dict['default_file']), 'utf-8') + b"""\n\n"""
            client_connection.sendall(http_response)
            return

        try:
            with open(destination, 'rb') as fileDisplay:
                textDisplay = fileDisplay.read()
                content_type, encoding = guess_type(destination, True)
                http_response = b"""HTTP/1.1 200 OK \nContent-Type: """ + bytes(content_type, 'utf-8') + b""" \n\n""" + textDisplay

        except FileNotFoundError:
            http_response = b"""HTTP/1.1 404 Not Found \n
Error 404: Page not found
                """
        except IsADirectoryError:
            http_response = b"""HTTP/1.1 200 OK \n
""" + bytes('\n'.join(os.listdir(destination)).encode('utf-8'))

    except Exception as e:
        http_response = b"""HTTP/1.1 500 Internal Server Error \n
""" + bytes(traceback.format_exc())

    client_connection.sendall(http_response)
Ejemplo n.º 18
0
def arguments_from_docstring(doc):
    """Parse first line of docstring for argument name

    Docstring should be of the form ``min(iterable[, key=func])``.

    It can also parse cython docstring of the form
    ``Minuit.migrad(self[, int ncall_me =10000, resume=True, int nsplit=1])``
    """
    if doc is None:
        raise RuntimeError('__doc__ is None')
    sio = StringIO(doc.lstrip())
    #care only the firstline
    #docstring can be long
    line = sio.readline()
    if line.startswith("('...',)"):
        line = sio.readline()  # stupid cython
    p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
    #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
    sig = p.search(line)
    if sig is None:
        return []
    # iterable[, key=func]' -> ['iterable[' ,' key=func]']
    sig = sig.groups()[0].split(',')
    ret = []
    for s in sig:
        #print s
        #get the last one after all space after =
        #ex: int x= True
        tmp = s.split('=')[0].split()[-1]
        #clean up non _+alphanum character
        ret.append(''.join(filter(lambda x: str.isalnum(x) or x == '_', tmp)))
        #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
        #ret += self.docstring_kwd_re.findall(s)
    ret = filter(lambda x: x != '', ret)

    if len(ret) == 0:
        raise RuntimeError('Your doc is unparsable\n'+doc)

    return ret
Ejemplo n.º 19
0
def test_sniff_reset():
    files = StringIO()
    files.write("We shouldn't see this\n")
    start = files.tell()
    gold = "#This is the gold!"
    files.write(gold + "\n")
    for _ in range(random.randint(0, 3)):
        files.write('#\n')
    files.write("a a\n")
    files.seek(start)
    _ = _sniff_conll_file(files)
    res = files.readline().rstrip()
    assert res == gold
Ejemplo n.º 20
0
def read_pcd(data):
    if type(data) == str:
        file_name = data
        assert file_name[-4:] == '.pcd', '%s is not a pcd file?' % file_name
        pcd = open(file_name, 'r')

    elif type(data) == bytes:
        pcd = StringIO()
        pcd.write(data.decode())
        pcd.seek(0)

    pc = []
    for _ in range(11):
        pcd.readline()  # read the header

    for line in pcd:
        point = [float(j) for j in line.rstrip().split()]
        rgb = int(point.pop())
        b, g, r = rgb & 0x0000ff, (rgb >> 8) & 0x0000ff, (rgb >> 16) & 0x0000ff
        point += [r, g, b]
        pc.append(point)
    return pc
Ejemplo n.º 21
0
def stringio_rw():
    f = StringIO()
    f.write('hello')
    f.write(' ')
    f.write('中文!')
    print(f.getvalue())

    f = StringIO('Hello!\nHi!\n中文!')
    while True:
        s = f.readline()
        if not s:
            break
        print(s.strip())
Ejemplo n.º 22
0
    def get_list(self, api_key, list_id):
        url = 'https://us1.api.mailchimp.com/export/1.0/list/?apikey={0}&id={1}'.format(api_key,list_id)
        response = self.get_http(url)
        buf = StringIO(unicode(response))
        header = json.loads(buf.readline())

        content = buf.readlines()
        members = []
        for line in content:
            member_line = json.loads(line)
            member = self.make_member(header,member_line)
            members.append(member)

        return members
Ejemplo n.º 23
0
def testStringIO():
    # write to StringIO:
    f = StringIO()
    f.write('hello')
    f.write(' ')
    f.write('world!')
    print(f.getvalue())

    # read from StringIO:
    f = StringIO('AAA\nBBB\nCCC')
    while True:
        s = f.readline()
        if s == '':
            break
        print(s.strip())
Ejemplo n.º 24
0
def code_line_generator(code):
    ''' A generator for lines from a file/string, keeping the \n at end '''
    if isinstance(code, unicode):
        stream = StringIO(code)
    elif isinstance(code, str):
        stream = BytesIO(code)
    else:
        stream = code # Should be a file input stream, already

    while True:
        line = stream.readline()
        if line:
            yield line
        else: # Line is empty (without \n) at EOF
            break
Ejemplo n.º 25
0
    def list_keys(self, secret=False):
        """ list the keys currently in the keyring

        >>> import shutil
        >>> shutil.rmtree("/tmp/pygpgtest")
        >>> gpg = GPG(gnupghome="/tmp/pygpgtest")
        >>> input = gpg.gen_key_input()
        >>> result = gpg.gen_key(input)
        >>> print1 = result.fingerprint
        >>> result = gpg.gen_key(input)
        >>> print2 = result.fingerprint
        >>> pubkeys = gpg.list_keys()
        >>> assert print1 in pubkeys.fingerprints
        >>> assert print2 in pubkeys.fingerprints

        """

        which='keys'
        if secret:
            which='secret-keys'
        args = "--list-%s --fixed-list-mode --fingerprint --with-colons" % (which)
        args = [args]
        p = self._open_subprocess(args)

        # there might be some status thingumy here I should handle... (amk)
        # ...nope, unless you care about expired sigs or keys (stevegt)

        # Get the response information
        result = ListKeys()
        self._collect_output(p, result)
        stdout = StringIO(result.data)
        valid_keywords = 'pub uid sec fpr'.split()
        while True:
            line = stdout.readline()
            if self.verbose:
                print(line)
            logger.debug("%s", line.rstrip())
            if not line:
                break
            L = line.strip().split(':')
            if not L:
                continue
            keyword = L[0]
            if keyword in valid_keywords:
                getattr(result, keyword)(L)
        return result
Ejemplo n.º 26
0
def test_coverage_to_csv():
    from io import StringIO
    from collections import Counter

    example_counter = Counter({'ukjer 2015-10-01': 2,
                               'bejab 2016-11-06': 166,
                               'bezav 2015-10-11': 8})
    results = ["countryradar,date,vp_files\r\n",
               "bejab,2016-11-06,166\r\n",
               "bezav,2015-10-11,8\r\n",
               "ukjer,2015-10-01,2\r\n"
               ]
    outfile = StringIO()
    coverage_to_csv(outfile, example_counter)
    outfile.seek(0)
    for line in results:
        assert line == outfile.readline()
Ejemplo n.º 27
0
 def test_parse_hero(self):
     f = StringIO(self.ex_hero)
     line = f.readline()
     result = p.get_ability_block(f, line)
     expected = [('name', 'npc_dota_hero_antimage'),
                 ('HeroID', '1'),
                 ('Ability4', 'antimage_mana_void'),
                 ('SlotIndex', '0'),
                 ('SlotName', 'weapon'),
                 ('SlotText', '#LoadoutSlot_Weapon'),
                 ('TextureWidth', '128'),
                 ('TextureHeight', '256'),
                 ('MaxPolygonsLOD0', '400'),
                 ('MaxPolygonsLOD1', '350'),
                 ('HeroType', 'DOTA_BOT_HARD_CARRY'),
                 ('SoloDesire', '1')]
     self.assertEqual(result, expected)
     f.close()
Ejemplo n.º 28
0
 def from_string(string):
     s = StringIO(string)
     
     blocks = Blocks()
     
     while True:
         line = s.readline()
     
         line = line.strip()
         
         if len(line) == 0:
             break
         
         b = Block.from_string(line)
         
         blocks.add_block(b)
     
     return blocks
Ejemplo n.º 29
0
def server(listen_socket):
	while True:
		buf = StringIO(txt)
		sock, addr = listen_socket.accept()
		print('Somebody at %s wants poetry!' %(addr,))
		while True:
			try:
				line = buf.readline().strip()
				if not line:
					sock.close()
					break
				sock.sendall(line.encode('utf8')) # this is a blocking call
				print('send bytes to client: %s' %line)
			except socket.error:
				sock.close()
				break
			time.sleep(0.5) # server每发送一个单词后等待一会
		sock.close()
		print('\n')
Ejemplo n.º 30
0
class stupid_gzip_wrapper(closeable_response):
    def __init__(self, response):
        self._response = response

        c = stupid_gzip_consumer()
        gzc = GzipConsumer(c)
        gzc.feed(response.read())
        self.__data = StringIO("".join(c.data))

    def read(self, size=-1):
        return self.__data.read(size)
    def readline(self, size=-1):
        return self.__data.readline(size)
    def readlines(self, sizehint=-1):
        return self.__data.readlines(sizehint)

    def __getattr__(self, name):
        # delegate unknown methods/attributes
        return getattr(self._response, name)
Ejemplo n.º 31
0
#     print(line.strip())

# from io import BytesIO
# f=BytesIO()   #首先创建一个BytesIO()再写入
# f.write('你好'.encode('utf-8'))
# print(f.getvalue())    #getvalue()方法得到值

# if __name__ == '__main__':
#      from io import BytesIO
#      f=BytesIO(b'\xe7\x92\x87\xe7\x92\x87')  #也可以先用一个bytes初始化BytesIO然后再像文件一样读取
#      print(f.read())
from io import StringIO

file = StringIO("璇璇\n是一个\n好孩子!\n")
for line in file.readlines():
    print(line.strip())

from io import StringIO

file = StringIO('璇璇\n是一个\n好孩子!')
print(file.read())

from io import StringIO

file = StringIO("璇璇\n是一个\n好孩子!")
while True:
    content = file.readline()
    if content == '':
        break
    print(content.strip())
Ejemplo n.º 32
0
def run_blast(refpath, perc_identity, min_length):

    #set up output files and counters
    regionspath = refpath + '.rpt.regions'
    statspath = refpath + '.rpt.stats'

    total_bp = 0
    repetitive_bp = 0
    num_regions = 0

    #run blast
    blastn_cline = NcbiblastnCommandline(
        query=refpath,
        db=refpath,
        dust='no',
        word_size=17,
        gapopen=5,
        gapextend=2,
        evalue=0.0001,
        perc_identity=perc_identity,
        outfmt='"6 qseqid sseqid pident length qstart qend sstart send"')
    try:
        blast_out, blast_err = blastn_cline()
        assert not blast_err
    except (Bio.Application.ApplicationError, AssertionError) as err:
        raise Exception(
            'Error: Blast failed during construction of repeat mask: %s' % err)

    repregions_fp = open(regionspath, 'w')
    repregions_fp.write(
        '# Annotation file for storing repetitive regions with columns CHROM, START, END, RPT\n# number from 1, coordinates inclusive of start and stop\n'
    )

    # each blast_rec is result from one query sequence (contig)
    blast_stream = StringIO(blast_out)
    for contig_count, contig in enumerate(SeqIO.parse(refpath, 'fasta'), 1):

        total_bp += len(contig)
        repmask = np.zeros(len(contig), dtype=np.bool)

        try:
            fields = blast_stream.readline().split()
        except StopIteration:
            fields = None

        while fields and fields[0] == contig.name:
            contig_name, match_name = fields[:2]
            hit_perc_ident = float(fields[2])
            hit_length, q_start, q_end, s_start, s_end = (int(x)
                                                          for x in fields[3:])
            (x1, y1), (x2, y2) = sorted(
                ((q_start, q_end), tuple(sorted((s_start, s_end)))))
            if hit_length >= min_length and (
                    contig_name != match_name
                    or not (x2 <= x1 <= y2 and x2 <= y1 <= y2)):
                repmask[q_start - 1:q_end] = True
            try:
                fields = blast_stream.readline().split()
            except StopIteration:  # end of blast hits
                fields = None

        # identify postitions of repetitive regions (runs of 1s in the
        # repmask array)
        # 0-based numbering
        region_starts = list(np.where(repmask[1:] > repmask[:-1])[0] + 1)
        region_ends = list(np.where(repmask[1:] < repmask[:-1])[0] + 1)

        # special case: full blast hit for this contig against another
        # contig
        if repmask.all():
            region_starts = [0]
            region_ends = [len(repmask)]

        # fix ends, in case regions start from the first position in the
        # sequence or end at the last
        if region_starts and ((not region_ends) or
                              (region_starts[-1] > region_ends[-1])):
            region_ends.append(len(repmask))

        if region_ends and ((not region_starts) or
                            (region_starts[0] > region_ends[0])):
            region_starts = [0] + region_starts

        repregions_fp.writelines(
            '{0}\t{1}\t{2}\t{3}\n'.format(contig.name, rs, re, 1)
            for rs, re in zip(region_starts, region_ends))

        repetitive_bp += repmask.sum()
        num_regions += len(region_starts)

    repregions_fp.close()
    pct_repetitive = '{0:.2f}'.format((float(repetitive_bp) / total_bp) * 100)

    sys.stdout.write('Repetitive regions %s%% \n' % pct_repetitive)

    # save result summary
    statsvalues = '\t'.join(
        (refpath, str(contig_count), str(total_bp), str(repetitive_bp),
         str(num_regions), pct_repetitive))
    o = open(statspath, 'w')
    o.write(
        'refpath\tcontigs\tnumbp\trepetitivebp\trepregions\trepetitivepct\n{values}\n%s'
        % statsvalues)
    o.close()
Ejemplo n.º 33
0
# coding=utf-8
from io import StringIO

# 1.try using stringio
f = StringIO()
f.write("hello")
f.write(" ")
f.write("world")
print(f.getvalue())
print()

# 2.try read StringIO
f_read = StringIO("Hi\nthis is Mick\ngoodbye")
while True:
    s = f_read.readline()
    if s == '':
        break
    print(s.strip())
Ejemplo n.º 34
0
    def from_endf(cls, ev, mt):
        """Generate an angular distribution from an ENDF evaluation

        Parameters
        ----------
        ev : endf.Evaluation
            ENDF evaluation
        mt : int
            The MT value of the reaction to get angular distributions for

        Returns
        -------
        AngleDistribution
            Angular distribution

        """
        file_obj = StringIO(ev.section[4, mt])

        # Read HEAD record
        items = get_head_record(file_obj)
        lvt = items[2]
        ltt = items[3]

        # Read CONT record
        items = get_cont_record(file_obj)
        li = items[2]
        nk = items[4]
        center_of_mass = (items[3] == 2)

        # Check for obsolete energy transformation matrix. If present, just skip
        # it and keep reading
        if lvt > 0:
            warn('Obsolete energy transformation matrix in MF=4 angular '
                 'distribution.')
            for _ in range((nk + 5)//6):
                file_obj.readline()

        if ltt == 0 and li == 1:
            # Purely isotropic
            energy = np.array([0., ev.info['energy_max']])
            mu = [Uniform(-1., 1.), Uniform(-1., 1.)]

        elif ltt == 1 and li == 0:
            # Legendre polynomial coefficients
            params, tab2 = get_tab2_record(file_obj)
            n_energy = params[5]

            energy = np.zeros(n_energy)
            mu = []
            for i in range(n_energy):
                items, al = get_list_record(file_obj)
                temperature = items[0]
                energy[i] = items[1]
                coefficients = np.asarray([1.0] + al)
                mu.append(Legendre(coefficients))

        elif ltt == 2 and li == 0:
            # Tabulated probability distribution
            params, tab2 = get_tab2_record(file_obj)
            n_energy = params[5]

            energy = np.zeros(n_energy)
            mu = []
            for i in range(n_energy):
                params, f = get_tab1_record(file_obj)
                temperature = params[0]
                energy[i] = params[1]
                if f.n_regions > 1:
                    raise NotImplementedError('Angular distribution with multiple '
                                              'interpolation regions not supported.')
                mu.append(Tabular(f.x, f.y, INTERPOLATION_SCHEME[f.interpolation[0]]))

        elif ltt == 3 and li == 0:
            # Legendre for low energies / tabulated for high energies
            params, tab2 = get_tab2_record(file_obj)
            n_energy_legendre = params[5]

            energy_legendre = np.zeros(n_energy_legendre)
            mu = []
            for i in range(n_energy_legendre):
                items, al = get_list_record(file_obj)
                temperature = items[0]
                energy_legendre[i] = items[1]
                coefficients = np.asarray([1.0] + al)
                mu.append(Legendre(coefficients))

            params, tab2 = get_tab2_record(file_obj)
            n_energy_tabulated = params[5]

            energy_tabulated = np.zeros(n_energy_tabulated)
            for i in range(n_energy_tabulated):
                params, f = get_tab1_record(file_obj)
                temperature = params[0]
                energy_tabulated[i] = params[1]
                if f.n_regions > 1:
                    raise NotImplementedError('Angular distribution with multiple '
                                              'interpolation regions not supported.')
                mu.append(Tabular(f.x, f.y, INTERPOLATION_SCHEME[f.interpolation[0]]))

            energy = np.concatenate((energy_legendre, energy_tabulated))

        return AngleDistribution(energy, mu)
Ejemplo n.º 35
0
from io import StringIO
from io import BytesIO

f = StringIO()
print(f.write('hello'))
print(f.write(',world'))
print(f.getvalue())

print('----read stringio---')
f = StringIO('hello\nbob\nsss')
print(f.read())
print(f.read())
while True:
    s = f.readline()
    if s == '':
        break
    print(s.strip())

print('--byteio---------')
f = BytesIO()
a = f.write('中文'.encode('utf-8'))
print(a)
print(type(a))
print(f.getvalue())

# 读取bytes
f = BytesIO(b'\xe4\xb8\xad\xe6\x96\x87')
print(f.read())

f = StringIO('Hello World')
# f.write('Hello World');
Ejemplo n.º 36
0
def Analyze(data_in, line_num=None, line_analyze=None):
    '''
    Analyze kakaoTalk text. input parameter is file io or string.
    It returns Chatroom instance.
    '''

    # Variables, queue is for multiline message.
    loop = 0
    date = None
    chatname = None
    line = True
    queue = []

    if type(data_in) == str:
        from io import StringIO
        data_in = StringIO(data_in)

    # Find Chatroom Name
    line = data_in.readline()
    chatname = search('(.+?) 님과 카카오톡 대화|(.+?) \d+ 카카오톡 대화', line)

    # Android or PC
    if chatname:
        chatname = chatname.group(1, 2)
        # Android
        if chatname[0] == None:
            chatname = chatname[1]
            datetime_exp = compile(
                '(?P<year>\d{4})년 (?P<month>\d{1,2})월 (?P<day>\d{1,2})일 .. \d{1,2}:\d{2}\r?\n?$'
            )
            message_exp = compile(
                '\d{4}년 \d{1,2}월 \d{1,2}일 (?P<afm>..) (?P<hour>\d{1,2}):(?P<min>\d{2}), (?P<name>.+?) : (?P<con>.+)'
            )
            etc_exp = compile(
                '\d{4}년 \d{1,2}월 \d{1,2}일 .. \d{1,2}:\d{1,2}, .+')
        # PC
        else:
            chatname = chatname[0]
            datetime_exp = compile(
                '-+ (?P<year>\d{4})년 (?P<month>\d{1,2})월 (?P<day>\d{1,2})일 .요일 -+\r?\n?'
            )
            message_exp = compile(
                '\[(?P<name>.+?)\] \[(?P<afm>..) (?P<hour>\d{1,2}):(?P<min>\d{2})\] (?P<con>.+)'
            )
            etc_exp = compile(
                '.+님이 나갔습니다.\r?\n?|.+님이 .+님을 초대하였습니다.\r?\n?|.+님이 들어왔습니다.\r?\n?'
            )
    # ipad
    else:
        chatname = line
        datetime_exp = compile(
            '(?P<year>\d{4})년 (?P<month>\d{1,2})월 (?P<day>\d{1,2})일 .요일\r?\n?')
        message_exp = compile(
            '(?P<year>\d{4}). (?P<month>\d{1,2}). (?P<day>\d{1,2}). (?P<afm>..) (?P<hour>\d{1,2}):(?P<min>\d{1,2}), (?P<name>.+?) : (?P<con>.+?)\r?\n?$'
        )
        etc_exp = compile('\d{4}. \d{1,2}. \d{1,2}. .. \d{1,2}:\d{1,2}: .+')
    chatroom = Chatroom(chatname, line_analyze)

    # Check Text lines
    while line:
        line = data_in.readline()

        # Check line with regular expression
        m_date = datetime_exp.match(line)
        m_message = message_exp.match(line)

        # The case this line is new date.
        if m_date:
            # Excute
            if len(queue):
                chatroom.append(*queue[0])
                del queue[0]
            # Update date
            date = datetime(int(m_date.group('year')),
                            int(m_date.group('month')),
                            int(m_date.group('day')))

        # The case this line is new message.
        elif m_message:
            # Excute
            if len(queue):
                chatroom.append(*queue[0])
                del queue[0]

            name = m_message.group('name')
            afm = m_message.group('afm')
            hour = int(m_message.group('hour'))
            minute = int(m_message.group('min'))
            content = m_message.group('con')

            if afm == '오후' and hour != 12:
                hour += 12
            date = date.replace(hour=hour, minute=minute)

            # Enqueue
            queue.append([date, name, content])

        # The case this line is addition string of last message.
        elif len(queue) and not etc_exp.match(line):
            queue[-1][2] += '\n' + line

        if line_num:
            loop += 1
            print(loop, '/', line_num)

    # Last Dequeuing
    if len(queue):
        chatroom.append(*queue[0])

    data_in.close()
    return chatroom
Ejemplo n.º 37
0
"""
    StringIO: 在内存中读写str
"""
from io import StringIO
f = StringIO()
print(f.write('hello'))
print(f.write(' '))
print(f.write('world!'))
print(f.getvalue())

# 初始化SringIO
f2 = StringIO('hello!\nHi!\nGoodbye!')
while True:
    s = f2.readline()
    if s == '':
        break
    print(s.strip())


"""
   BytesIO:在内存中读写二进制数据
"""

from io import BytesIO

b = BytesIO()
b.write('中文'.encode('utf-8'))
print(b.getvalue())

b2 = BytesIO(b'\xe4\xb8\xad\xe6\x96\x87')
print(b2.read())
Ejemplo n.º 38
0
from io import StringIO
from io import BytesIO

f = StringIO()
f.write('hello')
f.write(' ')
f.write('python!')

print(f.getvalue())

f1 = StringIO('Hello!\nHi!\nGoodbye!')
while True:
    s = f1.readline()
    if s == '':
        break
    print(s.strip())

fb = BytesIO()
fb.write('中文'.encode('utf-8'))
print(fb.getvalue())

fb1 = BytesIO(b'\xe4\xb8\xad\xe6\x96\x87')
print(fb1.read())
Ejemplo n.º 39
0
def load_data(
    scale: Scale,
    time_scale: TimeScale,
    year: int,
    month: Optional[int] = None,
    region: Optional[Region] = None,
    data_path: Optional[str] = None,
) -> pd.DataFrame:
    """
    :param region: Only required if scale is 'place'
    :param month: Only required if time_scale is 'monthly_current' or 'monthly_year_to_date'
    """
    _validate_load_data_inputs(scale, time_scale, year, month, region)

    if month is not None:
        year_last_digits = year % 100
        time_scale_letter = {
            "monthly_current": "c",
            "monthly_year_to_date": "y",
            "annual": "a",
        }[time_scale]

        filename_part_2 = f"{year_last_digits:02d}{month:02d}{time_scale_letter}"
    else:
        filename_part_2 = f"{year:04d}a"

    if scale == "place":
        region_mapping = {
            "south": "so",
            "northeast": "ne",
            "west": "we",
            "midwest": "mw",
        }  # type: ignore
        filename_part_1 = region_mapping[region]  # type: ignore
        extra_path: Optional[str] = region.capitalize() + " Region"  # type: ignore
    elif scale == "county":
        if region is not None:
            raise ValueError("region must be None in since scale = 'county'")
        filename_part_1 = "co"
        extra_path = None
    elif scale == "metro":
        if region is not None:
            raise ValueError("region must be None in since scale = 'metro'")
        # "ma" stands for "metro area"
        filename_part_1 = "ma"
        extra_path = None
    elif scale == "state":
        if region is not None:
            raise ValueError("region must be None in since scale = 'state'")
        filename_part_1 = "st"
        extra_path = None

    scale_path = scale.capitalize()

    if extra_path is not None:
        path = f"{scale_path}/{extra_path}/{filename_part_1}{filename_part_2}.txt"
    else:
        path = f"{scale_path}/{filename_part_1}{filename_part_2}.txt"

    if data_path is None:
        path = quote(path)
    text = get_url_text((CENSUS_DATA_PATH, path), data_path)

    result = (
        text
        # OMG so dumb that they didn't wrap with quotations
        .replace("Bristol, VA", '"Bristol, VA"').replace("Bristol, TN", '"Bristol, TN"')
    )
    if ERROR_STRING in result:
        raise ValueError(f"Path {path} is not valid")

    csv_handle = StringIO(result)

    header_row_1 = csv_handle.readline().rstrip().split(",")
    header_row_2 = csv_handle.readline().rstrip().split(",")

    # Skip blank line after header
    line = csv_handle.readline()
    assert line.strip() == ""

    df = pd.read_csv(csv_handle, header=None, index_col=False)
    fix_row_lengths = not (year == 1984 and region == "west")

    if scale == "county" and year >= 1990 and year <= 1998:
        df.columns = _fix_column_names_old_county_level(header_row_1, header_row_2)
    else:
        df.columns = _fix_column_names(
            header_row_1, header_row_2, fix_row_lengths=fix_row_lengths
        )

    if scale == "state":
        state_cleanup(df)

    if scale == "place":
        df = place_cleanup(df, year)

    if scale == "county":
        df = county_cleanup(df)

    return df
Ejemplo n.º 40
0
        read_reault = fss.read(10)

# write-mode w,wb,a
with open('log.log', 'a') as f:
    f.write('\n' + time.strftime('%Y-%m-%d %H:%M:%S %p'))

from io import StringIO

str_f = StringIO()
str_f.write('hello ')
str_f.write('world')
print(str_f.getvalue())

w_str_f = StringIO('hi\nfine\nThank you!')
while True:
    s = w_str_f.readline()
    if s == '':
        break
    print(s.strip())

from io import BytesIO

byte_f = BytesIO()
byte_f.write('你好'.encode('utf-8'))
print(byte_f.getvalue())

w_byte_f = BytesIO(b'\xe4\xbd\xa0\xe5\xa5\xbd')
print(w_byte_f.read())

print(os.name)
# os.uname() windows dont support this attribute.\
Ejemplo n.º 41
0
#很多时候,数据读写不一定是文件,也可以在内存中读写。
#StringIO顾名思义就是在内存中读写str。
#要把str写入StringIO,我们需要先创建一个StringIO,然后,像文件一样写入即可:
from io import StringIO

f = StringIO()
f.write('hello')
f.write(' ')
f.write('world!')
print(f.getvalue())

s = StringIO('Hello!\nHi!\nGoodbye!')
while True:
    w = s.readline()
    if w == '':
        break
    print(w.strip())
Ejemplo n.º 42
0
parser.add_argument('file', help='The file to be analyzed.')
args = parser.parse_args()

# nm -U <file> | c++filt
# How to run command with pipe: https://stackoverflow.com/a/13332300/4968633
ps = subprocess.Popen(('nm', '-U', args.file), stdout=subprocess.PIPE)
result = subprocess.check_output(('c++filt'), stdin=ps.stdout)
ps.wait()
symbols_string = result.decode('utf-8')

ocpp_symbols = set()
oc_category_name_and_method_symbols = set()
oc_category_getter_and_setter_symbols = []
f = StringIO(symbols_string)
while True:
    line = f.readline()
    if line == '':
        break

    # OC/C/C++. For class name, protocol name, global variable, c(++) function name
    # e.g. 0000000000000300 S _OBJC_CLASS_$_SDWebImageCacheKeyFilter
    match = re.fullmatch('[0-9a-f]{16} [STD] (_OBJC_CLASS_\\$|__OBJC_LABEL_PROTOCOL_\\$)?_([_A-Za-z][^_]\\w+)\n', line)
    if match and util.is_apple_symbol(match.group(2)) is False:
        ocpp_symbols.add(match.group(2))

    # OC category. For category name and method name
    # e.g. 0000000000000000 t +[UIImage(GIF) sd_imageWithGIFData:]
    match = re.fullmatch('[0-9a-f]{16} unsigned short [+-]\\[\\w+\\((\\w+)\\) ([\\w:]+)\\]\n', line)
    if match:
        oc_category_name_and_method_symbols.add(match.group(1))  # category name
        # If the method name looks like "- a:b:c", we just need to rewrite "a" part
from io import StringIO
buff = StringIO() # save written text to a string
print(buff.write('spam\n'))
# 5
# ######################################################################################################################

print(buff.write('eggs\n'))
# 5
# ######################################################################################################################

print(buff.getvalue())
# 'spam\neggs\n'
# ######################################################################################################################

buff = StringIO('ham\nspam\n') # provide input from a string
print(buff.readline())
# 'ham\n'
# ######################################################################################################################

print(buff.readline())
# 'spam\n'
# ######################################################################################################################

print(buff.readline())
# ''
# ######################################################################################################################

print('#' * 52)
from io import StringIO
import sys
buff = StringIO()
Ejemplo n.º 44
0
def convert_precalc_to_biom(precalc_in,
                            ids_to_load=None,
                            transpose=True,
                            md_prefix='metadata_'):
    """Loads PICRUSTs tab-delimited version of the precalc file and outputs a BIOM object"""

    #if given a string convert to a filehandle
    if type(precalc_in) == str:
        fh = StringIO(precalc_in)
    else:
        fh = precalc_in

    #first line has to be header
    header_ids = fh.readline().strip().split('\t')

    col_meta_locs = {}
    for idx, col_id in enumerate(header_ids):
        if col_id.startswith(md_prefix):
            col_meta_locs[col_id[len(md_prefix):]] = idx

    end_of_data = len(header_ids) - len(col_meta_locs)
    trait_ids = header_ids[1:end_of_data]

    col_meta = []
    row_meta = [{} for i in trait_ids]

    if ids_to_load is not None and len(ids_to_load) > 0:
        ids_to_load = set(ids_to_load)
        load_all_ids = False
    else:
        load_all_ids = True

    matching = []
    otu_ids = []
    for line in fh:
        fields = line.strip().split('\t')
        row_id = fields[0]
        if (row_id.startswith(md_prefix)):
            #handle metadata

            #determine type of metadata (this may not be perfect)
            metadata_type = determine_metadata_type(line)
            for idx, trait_name in enumerate(trait_ids):
                row_meta[idx][row_id[len(md_prefix):]] = parse_metadata_field(
                    fields[idx + 1], metadata_type)

        elif load_all_ids or (row_id in set(ids_to_load)):
            otu_ids.append(row_id)
            matching.append(list(map(float, fields[1:end_of_data])))

            #add metadata
            col_meta_dict = {}
            for meta_name in col_meta_locs:
                col_meta_dict[meta_name] = fields[col_meta_locs[meta_name]]
            col_meta.append(col_meta_dict)

            if not load_all_ids:
                ids_to_load.remove(row_id)

    if not otu_ids:
        raise ValueError(
            "No OTUs match identifiers in precalculated file. PICRUSt requires an OTU table reference/closed picked against GreenGenes.\nExample of the first 5 OTU ids from your table: {0}"
            .format(', '.join(list(ids_to_load)[:5])))

    if ids_to_load:
        raise ValueError(
            "One or more OTU ids were not found in the precalculated file!\nAre you using the correct --gg_version?\nExample of (the {0}) unknown OTU ids: {1}"
            .format(len(ids_to_load), ', '.join(list(ids_to_load)[:5])))

    #note that we transpose the data before making biom obj
    matching = np.asarray(matching)
    if transpose:
        return Table(matching.T,
                     trait_ids,
                     otu_ids,
                     row_meta,
                     col_meta,
                     type='Gene table')
    else:
        return Table(matching,
                     otu_ids,
                     trait_ids,
                     col_meta,
                     row_meta,
                     type='Gene table')
Ejemplo n.º 45
0
# -*- coding: utf-8 -*-

# StringIO,即在内存中读写str
from io import StringIO

# 把str写入StringIO
f = StringIO()  # 创建一个StringIO对象
f.write('hello world!')
print(f.getvalue())

# 读取StringIO
fr = StringIO('Hello!\nPython\n321')
while 1:
    s = fr.readline()
    if s == '':
        break
    print(s.strip())
Ejemplo n.º 46
0
#使用io模組的StringIO類別 -- 將現有字串以類檔案物件來處理
from io import StringIO

flo = StringIO('Though leaves are many,' + '\nthe root is one;' +
               '\nThrough all the lying days of my youth!')

print('讀取17個字元:', flo.read(17))
print('第一行未讀取:', flo.read())

#從the root ...讀起
while True:
    msg = flo.readline()  #讀取整行
    if msg == '':
        break
    print(msg.strip())
Ejemplo n.º 47
0
70 10'''


def cruise(i, d, h, oh=sys.stdout):
    h = h[h[:, 0] < d]
    answer = d / ((d - h[:, 0]) / h[:, 1])
    oh.write('Case #{0}:  {1}\n'.format(i + 1, answer.min()))


if __name__ == '__main__':
    import sys
    if len(sys.argv) == 1:
        from io import StringIO
        fh = StringIO(DATA)
        oh = sys.stdout
    else:
        fh = open(sys.argv[1], 'r')
        oh = open('output.txt', 'w')
    with fh:
        with oh:
            N = int(fh.readline())
            for i in range(N):
                line = fh.readline()
                d, n = [int(_) for _ in line.strip().split()]
                h = []
                for j in range(n):
                    line = fh.readline()
                    h.append([int(_) for _ in line.strip().split()])
                h = np.array(h).astype('float')
                cruise(i, d, h, oh)
Ejemplo n.º 48
0
def strToItt(foo):
	stri = StringIO(foo)
	while True:
		nl = stri.readline()
		if nl == '': break
		yield nl.strip('\n')
Ejemplo n.º 49
0
from io import StringIO, BytesIO

f = StringIO()
f.write('hello')
f.write(' ')
f.write('world')
print(f.getvalue())
ff = StringIO('hello \ngmg\nyr')
while True:
    s = ff.readline()
    if s == '':
        break
    print(s.strip())
Ejemplo n.º 50
0
def pretty_print(tree):
    output = StringIO()
    pretty_output = StringIO()

    current_level = Queue()
    next_level = Queue()
    current_level.enqueue(tree)
    depth = 0

    # get the depth of current tree
    # get the tree node data and store in list
    if tree:
        while not current_level.isEmpty():
            current_node = current_level.dequeue()
            output.write('%s ' % current_node.data if current_node else 'N ')
            next_level.enqueue(
                current_node.left if current_node else current_node)
            next_level.enqueue(
                current_node.right if current_node else current_node)

            if current_level.isEmpty():
                if sum([i is not None
                        for i in next_level.queue]):  # if next level has node
                    current_level, next_level = next_level, current_level
                    depth = depth + 1
                output.write('\n')
    # print('the tree print level by level is :')
    # print(output.getvalue())
    # print("current tree's depth is %i" % (depth+1))

    # add space to each node
    output.seek(0)
    pad_length = 3
    keys = []
    spaces = int(math.pow(2, depth))

    while spaces > 0:
        skip_start = spaces * pad_length
        skip_mid = (2 * spaces - 1) * pad_length

        key_start_spacing = ' ' * skip_start
        key_mid_spacing = ' ' * skip_mid

        keys = output.readline().split(' ')  # read one level to parse
        padded_keys = (add_padding(key, pad_length) for key in keys)
        padded_str = key_mid_spacing.join(padded_keys)
        complete_str = ''.join([key_start_spacing, padded_str])

        pretty_output.write(complete_str)

        # add space and slashes to middle layer
        slashes_depth = spaces
        # print('current slashes depth im_resize:')
        # print(spaces)
        # print("current levle's list is:")
        # print(keys)
        spaces = spaces // 2
        if spaces > 0:
            pretty_output.write('\n')  # print '\n' each level

            cnt = 0
            while cnt < slashes_depth:
                inter_symbol_spacing = ' ' * (pad_length + 2 * cnt)
                symbol = ''.join(['/', inter_symbol_spacing, '\\'])
                symbol_start_spacing = ' ' * (skip_start - cnt - 1)
                symbol_mid_spacing = ' ' * (skip_mid - 2 * (cnt + 1))
                pretty_output.write(''.join([symbol_start_spacing, symbol]))
                for i in keys[1:-1]:
                    pretty_output.write(''.join([symbol_mid_spacing, symbol]))
                pretty_output.write('\n')
                cnt = cnt + 1

    print(pretty_output.getvalue())
Ejemplo n.º 51
0
from io import BytesIO, StringIO

bio = BytesIO()
print(bio.readable(), bio.writable(), bio.seekable())
bio.write(b'magede\nPython')
bio.seek(0)
print(bio.readline())
print(bio.getvalue())
bio.close()

sio = StringIO()
print(sio.readable(), sio.writable(), sio.seekable())
sio.write('magedu\nPython')
sio.seek(0)
print(sio.readline())
print(sio.getvalue())
sio.close()

# 二者都是io模块中的类:在内存中,开辟一个文本或者二进制模式的buffer,可以像文件对象一样操作它,
# 当close方法被调用的时候,这个buffer会被释放
# getvalue()获取全部内容,跟文件指针没有关系
# StringIO的好处:一般来说,磁盘的操作比内存的操作要慢的多,内存足够的情况下,
# 一般的优化思路是少落地,减少磁盘IO的过程,可以大大提高程序的运行效率

# 类文件对象:file-like对象,可以像文件对象一样操作
from sys import stdout

f = stdout
print(type(f))
f.write('magedu.com')  # 控制台输出
Ejemplo n.º 52
0
def application(environ, start_response):

    response_body = ""
    cn = None
    emailaddress = None
    password = None

    # schema currently may be  'autoconfig', 'autodiscover', 'mobileconfig'
    schema = None

    # subschema currently is either 'mobile' or 'outlook'
    subschema = None

    process = True
    data = None
    status = STAT_OK

    try:
        data = Config(environ)
    except Exception as e:
        process = False
        status = STAT_ERR
        print(e, file=environ["wsgi.errors"])

    if process:
        try:
            logging.basicConfig(
                filename=data.logfile,
                format='%(asctime)s %(levelname)s: %(message)s',
                level=logging.DEBUG)
        except IOError as e:
            print(e, file=environ["wsgi.errors"])

        request_method = environ['REQUEST_METHOD']
        request_method = escape(request_method)

        # Adding some more useful debugging information
        if data.debug:
            logging.debug("-" * 15 + " BEGIN environ " + "-" * 15)
            for k, v in environ.items():
                logging.debug("%s: %s" % (k, v))
            logging.debug("-" * 15 + " END environ " + "-" * 15)

        if request_method == "POST":
            try:
                request_body_size = int(environ.get('CONTENT_LENGTH', 0))
            except ValueError:
                request_body_size = 0

            # When the method is POST the query string will be sent
            # in the HTTP request body which is passed by the WSGI server
            # in the file like wsgi.input environment variable.
            request_body = environ['wsgi.input'].read(request_body_size)

            if data.debug:
                logging.debug("Request POST (raw)\n" +
                              request_body.decode('utf-8'))

            fd = StringIO(request_body.decode("utf-8"))
            fd.readline()  # Skip XML declaration
            try:
                tree = etree.parse(fd)
            except XMLSyntaxError:
                # We did not receive XML, so it might be a mobileconfig request
                # TODO: We also might check the User-Agent here
                d = parse_qs(request_body.decode('utf-8'))

                if d is not None:
                    if data.debug:
                        logging.debug(str(d))
                    if "_mobileconfig" in d:
                        mobileconfig = d["_mobileconfig"][0]
                        if mobileconfig == "true":
                            if data.debug:
                                logging.debug("Requesting mobileconfig "
                                              "configuration")
                            if "cn" in d:
                                cn = d["cn"][0]
                                cn.strip()
                            if "password" in d:
                                password = d["password"][0]
                                password.strip()
                            if "emailaddress" in d:
                                emailaddress = d["emailaddress"][0]
                                emailaddress.strip()
                                status = STAT_OK
                                schema = "mobileconfig"
                            else:
                                logging.warning("Error in mobileconfig "
                                                "request!")
                                process = False
                                status = STAT_ERR
                        else:
                            process = False
                            status = STAT_ERR
                    else:
                        process = False
                        status = STAT_ERR
                else:
                    process = False
                    status = STAT_ERR
            else:
                root = tree.getroot()

                # We need to strip the namespace for XPath
                expr = "//*[local-name() = $name]"

                response_schema = root.xpath(expr,
                                             name="AcceptableResponseSchema")
                if len(response_schema) == 0:
                    logging.warning("Error in XML request")
                    process = False
                    status = STAT_ERR
                    data.memcache.set_client()
                else:
                    # element.text is a http-URI that has a location part
                    # which we need to scan.
                    if "/mobilesync/" in response_schema[0].text:
                        subschema = "mobile"
                    elif "/outlook/" in response_schema[0].text:
                        subschema = "outlook"
                    else:
                        process = False

                    emailaddresses = root.xpath(expr, name="EMailAddress")
                    if len(emailaddresses) == 0:
                        logging.warning("Error in autodiscover request!")
                        process = False
                        status = STAT_ERR
                        data.memcache.set_client()
                    else:
                        emailaddress = emailaddresses[0].text
                        schema = "autodiscover"
                        status = STAT_OK

        elif request_method == "GET":
            # FIXME: maybe we need to catch AutoDiscover GET-REDIRECT requests
            if any("autodiscover" in s
                   for s in (environ["HTTP_HOST"],
                             environ["REQUEST_URI"].lower())):
                process = False
                status = STAT_ERR

            # autoconfig
            else:
                qs = environ['QUERY_STRING']
                d = parse_qs(qs)

                if data.debug:
                    logging.debug("Request GET: QUERY_STRING: %s" % qs)

                if d is not None:
                    if "emailaddress" in d:
                        emailaddress = d["emailaddress"][0]
                        emailaddress.strip()
                        if '@' not in emailaddress:
                            emailaddress = unquote(emailaddress)
                        status = STAT_OK
                        schema = "autoconfig"
                    else:
                        logging.warning("Error in autoconfig request!")
                        process = False
                        status = STAT_ERR
                else:
                    logging.error("Request GET: QUERY_STRING failed!")
                    process = False
                    status = STAT_ERR

    if process:
        if data.debug:
            logging.debug("Entering data.configure()")
        try:
            if data.memcache.allow_client():
                data.configure(emailaddress, cn, password)
            else:
                process = False
                status = STAT_ERR
                logging.warning(
                    "Request %d [%s] blocked!" %
                    (data.memcache.counter(), environ["REMOTE_ADDR"]))
        except DataNotFoundException:
            process = False
            status = STAT_ERR
            data.memcache.set_client()
            logging.warning("Request %d [%s]" %
                            (data.memcache.counter(), environ["REMOTE_ADDR"]))
        except Exception as e:
            if data.debug:
                tb = traceback.format_exc()
                logging.error(tb)
            else:
                logging.error("data.configure(): %s" % e)
            process = False
            status = STAT_ERR

    if process:
        if data.debug:
            logging.debug("Entering view()")
        try:
            view = View(data, schema, subschema)
            response_body = view.render()
            if len(response_body) == 0:
                status = STAT_ERR
        except Exception as e:
            if data.debug:
                tb = traceback.format_exc()
                logging.error(tb)
            else:
                logging.error("view.render(): %s" % e)
            status = STAT_ERR

    if process:
        if data.debug:
            if (schema == "mobileconfig" and "sign_mobileconfig" in data.domain
                    and data.domain["sign_mobileconfig"] is True):
                logging.debug("No debugging output for signed mobileconfig!")
            else:
                if sys.version_info < (3, ):
                    logging.debug("Response:\n" +
                                  response_body.decode('utf-8'))
                else:
                    logging.debug(str("Response:\n%s" % response_body))

    body_len = str(len(response_body))

    def aenc(key, value):
        """Auto-enocde to ascii; Make headers compatible for Py2/Py3

        :param key: header key
        :param value: header value
        :return: auto encoded tuple
        """
        if sys.version_info < (3, ):
            return key.encode("ascii"), value.encode("ascii")
        else:
            return key, value

    if schema in ('autoconfig', "autodiscover"):
        response_headers = [
            aenc('Content-Type', 'text/xml'),
            aenc('Content-Length', body_len)
        ]
    elif schema == "mobileconfig":
        response_headers = [
            aenc('Content-Type', 'application/x-apple-aspen-config'
                 '; charset=utf-8'),
            aenc('Content-Disposition', 'attachment; '
                 'filename="company.mobileconfig'),
            aenc('Content-Length', body_len)
        ]
    else:
        # Failure?
        response_headers = [
            aenc('Content-Type', 'text/html'),
            aenc('Content-Length', body_len)
        ]

    if sys.version_info < (3, ):
        status = status.encode("ascii")

    start_response(status, response_headers)

    return [response_body]
Ejemplo n.º 53
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#-----------学习python 中在内存中操作str和bytes的方法-----------------------
#在内存中操作str
from io import StringIO
f=StringIO()
f.write('hello')        #在内存中写入str
f.write(' ')
f.write('world!')
print(f.getvalue())   #获得写入的str

#从内存中读取str
f2=StringIO('Hello!\nHi\nGoodbye')
while True:
    s=f2.readline()    #我们会发现从内存中读取str与操作文件是类似的
    if s=='':
        break
    print(s.strip())

#操作内存中的二进制数据使用BytesIO
from io import BytesIO
f=BytesIO()
#写入一个字节流
f.write('中文'.encode('utf-8'))    #encode是将str转变为bytes

print(f.getvalue().decode('utf-8'))   #decode的方法是将字节流转变成str

#读取一个字节流
f2=BytesIO(b'\xe4\xb8\xad\xe6\x96\x87')
print(f2.read().decode('utf-8'))
Ejemplo n.º 54
0
# in order for your user to be able to learn from the examples. 

# Python provides a class called StringIO in module io that 
# creates a *mock* open file that you can work with 
# as if it were a real file. 
# These StringIO objects can be used anywhere that TextIO
# objects are expected. 

# In the next example, we create a StringIOobject
# containing the same information as the file number_pairs.txt above. 
# The we read the first line as if it were read from the file. 

# from io import StringIO
input_string = '1.3 3.4\n2 4.2\n-1 1\n'
infile = StringIO(input_string)
infile.readline()


# We can also write to StringIOobjects as if they were files. 
# Then we can read their contents as strings using the method getvalue.


from io import StringIO
outfile = StringIO()
outfile.write('1.3 3.4 4.7\n')

outfile.write('2 4.2 6.2\n')


outfile.write('-1 1 0.0\n')
Ejemplo n.º 55
0
# with open('./mydict2.py', 'a') as f2:
#     f2.write('Hello, world!')

#数据读写不一定是文件,也可以在内存中读写 StringIO顾名思义就是在内存中读写str。

from io import StringIO
f = StringIO()
f.write('hello')
print(f.getvalue())

# 要读取StringIO,可以用一个str初始化StringIO,然后,像读文件一样读取:

f = StringIO('Hello!\nHi!\nGoodbye!')
while True:
    s = f.readline()
    if s == '':
        break
    print(s.strip())
#StringIO操作的只能是str,如果要操作二进制数据,就需要使用BytesIO。

from io import BytesIO
f = BytesIO()
f.write('中文'.encode('utf-8'))
print(f.getvalue())

import os
#如果是posix,说明系统是Linux、Unix或Mac OS X,如果是nt,就是Windows系统。
print(os.name)
#注意uname()函数在Windows上不提供,也就是说,os模块的某些函数是跟操作系统相关的。
print(os.uname())
Ejemplo n.º 56
0
# -*- coding: utf-8 -*-

from io import StringIO

f = StringIO()
f.write('Hello')
f.write(', ')
f.write('world.')
print(f.getvalue())

fs = StringIO('Hello!\nHave a nice day!\nByeBye!')
while True:
    s = fs.readline()
    if s == '':
        break
    print(s.strip())
Ejemplo n.º 57
0
from http import client
from hashlib import sha512
from binascii import hexlify
from base64 import b64encode
from json import loads, dumps
from subprocess import run, Popen, PIPE
from io import StringIO
from pathlib import Path
from getpass import getpass

# Get the UUID
cmd_result = run(['wmic', 'csproduct', 'get', 'uuid'], stdout=PIPE)
cmd_result_str = StringIO(cmd_result.stdout.decode('utf-8'))

# skip the first line
cmd_result_str.readline()

# Grab UUID
uuid = cmd_result_str.readline().strip()

print("Using uuid: '{}'".format(uuid))

username = input("Username: "******"Password: ")

# immediately convert it.
password = hexlify(sha512(bytes(password, 'utf-8')).digest()).decode('utf-8')

print(password)

headers = {
Ejemplo n.º 58
0
file = open("path")
while True:
	txt = file.read(1) #按字节迭代,txt = file.readline(1)->按行迭代
	if not txt:
		break
	print(txt)
file.close()
#懒加载式迭代-for循环----
import fileinput
for line in fileinput.input(path):
	print (line)
#StringIO()函数----
from io import StringIO
io_str = StringIO("Hello\nWorld\nWelcome")
while True:
	line = io_str.readline()
	if line =="":
		break
	print(line.strip())

StringIO().write("")
StringIO().readline()
StringIO().getvalue()

===================序列化与反序列化====================================

------python独有---------------
import pickle
# 内存->文件:pickle.dump()/pickle.dumps()-后者先读取为bytes.
dic =dict(name="萌萌",num=6017)
pickle.dumps(dic) #将数据通过特殊的形式转化为只有python语言认识的字符串
# StringIO
# 很多时候,数据读写不一定是文件,也可以在内存中读写。
# StringIO顾名思义就是在内存中读写str。
# 要把str写入StringIO,我们需要先创建一个StringIO,然后,像文件一样写入即可:
from io import StringIO

fStringIO = StringIO()
fStringIO.write('Hello')
fStringIO.write(' ')
fStringIO.write('World!')
# getvalue()方法用于获得写入后的str。
print(fStringIO.getvalue())
# 要读取StringIO,可以用一个str初始化StringIO,然后,像读文件一样读取:
fStringIO1 = StringIO('Hello\nHi\nBybey')
while True:
    s = fStringIO1.readline()
    if s == '':
        break
    print(s.strip())

# BytesIO
print('BytesIO========================================================')
# StringIO操作的只能是str,如果要操作二进制数据,就需要使用BytesIO。
# BytesIO实现了在内存中读写bytes,我们创建一个BytesIO,然后写入一些bytes:
from io import BytesIO

fByteIO = BytesIO()
# 请注意,写入的不是str,而是经过UTF-8编码的bytes。
fByteIO.write('哈喽'.encode('utf-8'))
print(fByteIO.getvalue())
# 和StringIO类似,可以用一个bytes初始化BytesIO,然后,像读文件一样读取:
Ejemplo n.º 60
0
class OnlyReadline:
    def __init__(self, s):
        self.sio = StringIO(s)

    def readline(self):
        return self.sio.readline()