Example #1
0
    def display(self, text):
        self.input.write(text)  # store history data

        if self.actionHex.isChecked():
            convertedtext = ''
            buf = StringIO(text)
            line = buf.readline()
            while line:
                if len(line) <= 16:
                    hexpart = ' '.join('{:02X}'.format(ord(c))
                                       for c in line).ljust(52)
                    strpart = line.translate(TRANS_TABLE)

                    convertedtext += hexpart + strpart + '\n'
                    line = buf.readline()
                else:
                    hexpart = ' '.join('{:02X}'.format(ord(c))
                                       for c in line[:16]).ljust(52)
                    strpart = line[:16].translate(TRANS_TABLE)

                    convertedtext += hexpart + strpart + '\n'
                    line = line[16:]

            self.recvTextEdit.moveCursor(QTextCursor.End)
            self.recvTextEdit.insertPlainText(convertedtext)
            self.recvTextEdit.moveCursor(QTextCursor.End)
        else:
            # self.recvTextEdit.append(text)
            self.recvTextEdit.moveCursor(QTextCursor.End)
            self.recvTextEdit.insertPlainText(text)
            self.recvTextEdit.moveCursor(QTextCursor.End)
Example #2
0
def list_destfiles(conn, destdir, issudo=False):
    # 得到目标目录中的文件清单,文件名:(d/-,大小,修改时间)。如果目标目录不存在则创建,issudo指示是否用root身份创建
    # 使用 find 命令列出来的文件名有个注意事项,将目标目录末尾无斜杠时,列出文件名以斜杠开头。将目标目录末尾有斜杠时,列出文件名则无斜杠开头
    cmdprefix = 'sudo ' if issudo else ''
    filedict = {}
    if conn.run('{}test -e {}'.format(cmdprefix, destdir), warn=True).failed:
        conn.run('{}mkdir -p {}'.format(cmdprefix, destdir))
    else:
        resp = conn.run(
            '{}find {} | xargs ls -ld --time-style="+%Y-%m-%d %H:%M:%S"'.
            format(cmdprefix, destdir),
            hide=True)
        from cStringIO import StringIO
        fakefile = StringIO()
        fakefile.write(resp.stdout.strip())
        filenames = []
        fakefile.seek(0)
        row = fakefile.readline()
        while row:
            row = row.split(' ')
            filedict[row[-1].strip().replace(destdir, '',
                                             1)] = (row[0][0], row[-4],
                                                    row[-3] + ' ' + row[-2])
            row = fakefile.readline()
        fakefile.close()
    return filedict
    def _compare_csv_files_time_insensitive(self, expected, output):

        skip_first_row = expected[0:2] == '\r\n'
        expected = StringIO(expected)
        output = StringIO(output)
        line_number = 1

        if skip_first_row:
            self.assertEqual(expected.readline(), output.readline())
            line_number += 1

        expected = csv.DictReader(expected)
        output = csv.DictReader(output)

        for expected_row in expected:
            output_row = output.next()

            try:
                timestamp = float(output_row['_time'])
                datetime.fromtimestamp(timestamp)
            except BaseException as error:
                self.fail(error)
            else:
                output_row['_time'] = expected_row['_time']

            self.assertDictEqual(
                expected_row, output_row,
                'Error on line {0}: expected {1}, not {2}'.format(
                    line_number, expected_row, output_row))

            line_number += 1

        self.assertRaises(StopIteration, output.next)
        return
Example #4
0
    def display(self, text):
        self.input.write(text)      # store history data

        if self.actionHex.isChecked():
            convertedtext = ''
            buf = StringIO(text)
            line = buf.readline()
            while line:
                if len(line) <= 16:
                    hexpart = ' '.join('{:02X}'.format(ord(c)) for c in line).ljust(52)
                    strpart = line.translate(TRANS_TABLE)

                    convertedtext += hexpart + strpart + '\n'
                    line = buf.readline()
                else:
                    hexpart = ' '.join('{:02X}'.format(ord(c)) for c in line[:16]).ljust(52)
                    strpart = line[:16].translate(TRANS_TABLE)

                    convertedtext += hexpart + strpart + '\n'
                    line = line[16:]
            
            self.recvTextEdit.moveCursor(QTextCursor.End)
            self.recvTextEdit.insertPlainText(convertedtext)
        else: 
            # self.recvTextEdit.append(text)
            self.recvTextEdit.moveCursor(QTextCursor.End)
            self.recvTextEdit.insertPlainText(text)
            self.recvTextEdit.moveCursor(QTextCursor.End)
    def _compare_csv_files_time_sensitive(self, expected, output):

        self.assertEqual(len(expected), len(output))

        skip_first_row = expected[0:2] == '\r\n'
        expected = StringIO(expected)
        output = StringIO(output)
        line_number = 1

        if skip_first_row:
            self.assertEqual(expected.readline(), output.readline())
            line_number += 1

        expected = csv.DictReader(expected)
        output = csv.DictReader(output)

        for expected_row in expected:
            output_row = output.next()
            self.assertDictEqual(
                expected_row, output_row, 'Error on line {0}: expected {1}, not {2}'.format(
                    line_number, expected_row, output_row))
            line_number += 1

        self.assertRaises(StopIteration, output.next)
        return
Example #6
0
def upload(request, data=None, phome=None, con_len=None):
    try:
        err = {}
        tmp_fname = get_random_name()

        if not os.path.exists(config.tmp_dir):
            os.makedirs(config.tmp_dir)

        tmp_fpath = os.path.join(config.tmp_dir, tmp_fname)
        tmp_dirpath = tmp_fpath + '_dir'

        package_data = StringIO(data) if data \
                       else StringIO(request.raw_post_data)
        username = request.user.username

        package_home = phome or package_data.readline().strip()
        package_home = '.'.join([username, package_home]).strip('.')

        content_length = con_len or int(package_data.readline().strip())
        store_file(tmp_fpath, package_data, content_length, config.chunk_size)

        callback_fn = make_parse_fn(request, err)
        num_package_dirs = add_packages_from_zip(tmp_fpath, tmp_dirpath,
                              package_home, config.repo_dir, callback_fn)
                              
    except RepositoryException, e:
        return apiresponse(e.message, 'err')
    def _compare_csv_files_time_sensitive(self, expected, output):

        self.assertEqual(len(expected), len(output))

        skip_first_row = expected[0:2] == '\r\n'
        expected = StringIO(expected)
        output = StringIO(output)
        line_number = 1

        if skip_first_row:
            self.assertEqual(expected.readline(), output.readline())
            line_number += 1

        expected = csv.DictReader(expected)
        output = csv.DictReader(output)

        for expected_row in expected:
            output_row = output.next()
            self.assertDictEqual(
                expected_row, output_row,
                'Error on line {0}: expected {1}, not {2}'.format(
                    line_number, expected_row, output_row))
            line_number += 1

        self.assertRaises(StopIteration, output.next)
        return
    def _compare_csv_files_time_insensitive(self, expected, output):

        skip_first_row = expected[0:2] == '\r\n'
        expected = StringIO(expected)
        output = StringIO(output)
        line_number = 1

        if skip_first_row:
            self.assertEqual(expected.readline(), output.readline())
            line_number += 1

        expected = csv.DictReader(expected)
        output = csv.DictReader(output)

        for expected_row in expected:
            output_row = output.next()

            try:
                timestamp = float(output_row['_time'])
                datetime.fromtimestamp(timestamp)
            except BaseException as error:
                self.fail(error)
            else:
                output_row['_time'] = expected_row['_time']

            self.assertDictEqual(
                expected_row, output_row, 'Error on line {0}: expected {1}, not {2}'.format(
                    line_number, expected_row, output_row))

            line_number += 1

        self.assertRaises(StopIteration, output.next)
        return
Example #9
0
def parse_rows(data):
    body = StringIO(data)

    if data.startswith('#'):
        body.readline()

    reader = csv.reader(body, quotechar='"')
    return [DataRow(r) for r in reader]
Example #10
0
 def raw_to_matrix(self, raw):
     raw_io = StringIO("".join(raw))
     index = int(raw_io.readline())
     matrix = {}
     for idx in range(4):
         data = map(lambda x: int(x), raw_io.readline().strip().split())
         matrix[idx + 1] = set(data)
     return (index, matrix)
Example #11
0
class Tokenizer:
    '''
    TODO:
        self,line and DataSetIdentifierException are not tested here but in other places
        It would be still nice to test it here
    '''
    def __init__(self, buffer):
        if hasattr(buffer, 'readline'):
            self.stream = buffer
        else:
            self.stream = StringIO(buffer)
        self.line = ''
        
    def read(self, length):
        if self.line == '':
            self.line = self.stream.readline(-1)
            if self.line == dataset_marker or self.line[:-1] == dataset_marker:
                raise DataSetIdentifierException
        
        buffer = self.line[:length]
        self.line = self.line[length:]
        
        if buffer == '':
            raise StopIteration('Empty string is returned from stream')
            
        if buffer == '\n':
            return self.read(length)
        if buffer.endswith('\n'):
            buffer = buffer[:-1]
        return buffer
    
    def read_line(self):
        self.line = ''
        buffer = self.stream.readline(-1)
        if buffer.endswith('\n'):
            buffer = buffer[:-1]
        return buffer
        
    def read_all(self):
        self.line = ''
        return self.stream.read()
    
    def tell(self):
        return self.stream.tell() - len(self.line)
        
    def seek(self, pos):
        return self.stream.seek(pos)
        
    def close(self):
        self.stream.close()
        
    #__enter__ and __exit__ are required to be used with "with" statements
    def __enter__(self):
        return self
        
    def __exit__(self, type, value, traceback):
        self.close()
Example #12
0
class Tokenizer:
    '''
    TODO:
        self,line and DataSetIdentifierException are not tested here but in other places
        It would be still nice to test it here
    '''
    def __init__(self, buffer):
        if hasattr(buffer, 'readline'):
            self.stream = buffer
        else:
            self.stream = StringIO(buffer)
        self.line = ''

    def read(self, length):
        if self.line == '':
            self.line = self.stream.readline(-1)
            if self.line == dataset_marker or self.line[:-1] == dataset_marker:
                raise DataSetIdentifierException

        buffer = self.line[:length]
        self.line = self.line[length:]

        if buffer == '':
            raise StopIteration('Empty string is returned from stream')

        if buffer == '\n':
            return self.read(length)
        if buffer.endswith('\n'):
            buffer = buffer[:-1]
        return buffer

    def read_line(self):
        self.line = ''
        buffer = self.stream.readline(-1)
        if buffer.endswith('\n'):
            buffer = buffer[:-1]
        return buffer

    def read_all(self):
        self.line = ''
        return self.stream.read()

    def tell(self):
        return self.stream.tell() - len(self.line)

    def seek(self, pos):
        return self.stream.seek(pos)

    def close(self):
        self.stream.close()

    #__enter__ and __exit__ are required to be used with "with" statements
    def __enter__(self):
        return self

    def __exit__(self, type, value, traceback):
        self.close()
Example #13
0
    def headers(self):
        from httplib import HTTPMessage

        header = StringIO(''.join(self.header))
        try:
            header.readline() # eat the first line 'HTTP/1.1 200 OK'
            return HTTPMessage(header)
        finally:
            header.close()
Example #14
0
def splitLines(buf):
  lines=[]
  f=StringIO(buf)
  l=f.readline()
  while len(l):
    while l and l[-1] in ['\r', '\n']:
      l=l[:-1]
    lines.append(l)
    l=f.readline()
  return lines
    def test_recorder(self):

        if (python_version[0] == 2 and python_version[1] < 7):
            print("Skipping test since we're on {1}".format(
                "".join(python_version)))
            pass

        # Grab an input/output recording, the results of a prior countmatches run

        recording = os.path.join(self._package_path, 'recordings', 'scpv2',
                                 'Splunk-6.3', 'countmatches.')

        with gzip.open(recording + 'input.gz', 'rb') as file_1:
            with io.open(recording + 'output', 'rb') as file_2:
                ifile = StringIO(file_1.read())
                result = StringIO(file_2.read())

        # Set up the input/output recorders that are under test

        ifile = Recorder(mktemp(), ifile)

        try:
            ofile = Recorder(mktemp(), StringIO())

            try:
                # Read and then write a line
                ifile.readline()
                ofile.write(result.readline())

                # Read and then write a block
                ifile.read()
                ofile.write(result.read())

                # Verify that what we wrote is equivalent to the original recording, the result from a prior
                # countmatches run
                self.assertEqual(ofile.getvalue(), result.getvalue())

                # Verify that we faithfully recorded the input and output files
                ifile._recording.close()
                ofile._recording.close()

                with gzip.open(ifile._recording.name, 'rb') as file_1:
                    with gzip.open(ofile._recording.name, 'rb') as file_2:
                        self.assertEqual(file_1.read(), ifile._file.getvalue())
                        self.assertEqual(file_2.read(), ofile._file.getvalue())

            finally:
                ofile._recording.close()
                os.remove(ofile._recording.name)

        finally:
            ifile._recording.close()
            os.remove(ifile._recording.name)

        return
Example #16
0
    def headers(self):
        from httplib import HTTPMessage

        if not self.cached_headers:
            header = StringIO(self.raw_headers)
            try:
                header.readline() # eat the first line 'HTTP/1.1 200 OK'
                self.cached_headers = HTTPMessage(header)
            finally:
                header.close()

        return self.cached_headers
    def test_recorder(self):

        if python_version[0] == 2 and python_version[1] < 7:
            print("Skipping test since we're on {1}".format("".join(python_version)))
            pass

        # Grab an input/output recording, the results of a prior countmatches run

        recording = os.path.join(self._package_path, "recordings", "scpv2", "Splunk-6.3", "countmatches.")

        with gzip.open(recording + "input.gz", "rb") as file_1:
            with io.open(recording + "output", "rb") as file_2:
                ifile = StringIO(file_1.read())
                result = StringIO(file_2.read())

        # Set up the input/output recorders that are under test

        ifile = Recorder(mktemp(), ifile)

        try:
            ofile = Recorder(mktemp(), StringIO())

            try:
                # Read and then write a line
                ifile.readline()
                ofile.write(result.readline())

                # Read and then write a block
                ifile.read()
                ofile.write(result.read())

                # Verify that what we wrote is equivalent to the original recording, the result from a prior
                # countmatches run
                self.assertEqual(ofile.getvalue(), result.getvalue())

                # Verify that we faithfully recorded the input and output files
                ifile._recording.close()
                ofile._recording.close()

                with gzip.open(ifile._recording.name, "rb") as file_1:
                    with gzip.open(ofile._recording.name, "rb") as file_2:
                        self.assertEqual(file_1.read(), ifile._file.getvalue())
                        self.assertEqual(file_2.read(), ofile._file.getvalue())

            finally:
                ofile._recording.close()
                os.remove(ofile._recording.name)

        finally:
            ifile._recording.close()
            os.remove(ifile._recording.name)

        return
Example #18
0
def git_add_commit_push(obj, gitpath, _test_=None):
    # git本地复本完整提交过程
    # gitpath = '/Users/vs/Projects/vansky'
    if _test_ == None: _test_ = GV_TEST
    command = 'git -C {} status -s'.format(gitpath)
    six.print_(command)
    if _test_: return
    ret = obj['_conn_'].run(command, pty=True, hide=True)
    from cStringIO import StringIO
    fakefile = StringIO()
    fakefile.write(ret.stdout)
    filenames = []
    fakefile.seek(0)
    row = fakefile.readline()
    while row:
        filenames.append(row.split(' ')[-1])
        six.print_('[{}] {}'.format(len(filenames), row.strip()))
        row = fakefile.readline()
    fakefile.close()
    if not filenames:
        six.print_('Nothing to commit, working directory clean')
        return
    commits = []
    nos = input("Enter the numbers you want to submit(A for All): ").strip()
    if not nos:
        six.print_("You didn't select any files.")
        return
    if nos in ('A'):
        commits = filenames
    else:
        for no in nos.split(' '):
            if no.isdigit() and int(no) > 0 and int(no) <= len(filenames):
                commits.append(filenames[int(no) - 1])
    if commits:
        command = 'git -C {} add {}'.format(
            gitpath,
            ' '.join([os.path.join(gitpath, x.strip()) for x in commits]))
        six.print_(command)
        obj['_conn_'].run(command, pty=True)
        summary = input("Enter summit message: ").strip()
        if summary:
            command = 'git -C {} commit -m "{}"'.format(gitpath, summary)
            six.print_(command)
            obj['_conn_'].run(command, pty=True)
            command = 'git -C {} push'.format(gitpath)
            six.print_(command)
            obj['_conn_'].run(command, pty=True)
        else:
            six.print_("You didn't give commit message.")
    else:
        six.print_("You didn't select any files.")
    def __init__(self, boundary, content):
        self.field = {}
        fields = content.split(boundary)

        for field in fields:  
            if len(field) < 5:
                continue

            data = StringIO(field)

            data.readline()
            field_head = data.readline()
            name = field_head[field_head.find("name=") + 5:-2]
            if name.find(";") != -1:
                name = name[:name.find(";")]
                data.readline()
                data.readline()

                value = data.read(len(field))
                print "v:" + value
                value = value[:-2]
                self.field[name] = value 
            else:
                data.readline()
                value = data.readline().rstrip("\r\n")
                self.field[name] = value 
def extract_data(state, district, type):
    url = "%sdata/csv/%s/%s/%s/%s/%s" % (base, state, district, type, MIN_YEAR, MAX_YEAR)
    data = scraperwiki.scrape(url)
    f = StringIO(data)
    f.readline()  # blank first line
    f.readline()  # header row
    reader = csv.reader(f, dialect=csv.excel_tab)
    tempdata = {}
    for row in reader:
        year = row[0]
        for month in range(1, 13):
            value = row[month]
            key = "%s%02d" % (year, month)
            tempdata[key] = value
    return tempdata
Example #21
0
 def save(self):
   buf = StringIO()
   self._parser.write(buf)
   buf.seek(0)
   assert buf.readline() == '[__global__]\n'
   with open(self.filename, 'w') as dst:
     dst.write(buf.read())
Example #22
0
class HTTPRequestParser(BaseHTTPRequestHandler):
    """"Parse the request message into meaningful components.

    The results are as follows:

        error
        method
        url
        version
        headers
        body

    See http://stackoverflow.com/questions/2115410/does-python-have-a-module-for-parsing-http-requests-and-responses
    """
    def __init__(self, request):
        metadata, data = request.split('\r\n\r\n', 1)
        self.rfile = StringIO(metadata)
        self.raw_requestline = self.rfile.readline()
        self.error = ()
        self.parse_request()

        # Note: values on the right side are generated
        #       in BaseHTTPRequestHandler::parse_request()
        if not self.error:
            self.method = self.command
            self.url = self.path
            self.version = self.request_version
            self.headers = {name: self.headers[name] for name in self.headers}
            self.body = data

    def send_error(self, code, message):
        self.error = (code, message)
 def _getCmdPairs(self, cmd, sep='=', vector=False):
     result = {}
     try:
         p = subprocess.Popen(cmd,
                              shell=True,
                              stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE)
         out, err = p.communicate()
         buf = StringIO(str(out))
         while True:
             s = buf.readline()
             if None == s or len(s) <= 0:
                 break
             s = s.strip()
             i = s.find(sep)
             if i <= 0:
                 continue
             k = s[:i].strip()
             v = s[i + 1:].strip()
             if vector:
                 if not result.has_key(k):
                     result[k] = [v]
                 else:
                     result[k].append(v)
             else:
                 result[k] = v
     except:
         print("Failed to execute command: " + cmd)
     return result
Example #24
0
class ReadableDocument(BaseDocument):

    implements(IReadableDocument)

    def __init__(self, data, mime_type, encoding=None):
        BaseDocument.__init__(self, mime_type, encoding)
        self._data = StringIO(data)

    def read(self, size=-1, decode=True):
        data = self._data.read(size)
        return self._decode(data) if decode else data

    def readline(self, size=-1, decode=True):
        data = self._data.readline(size)
        return self._decode(data) if decode else data

    def readlines(self, sizehint=-1, decode=True):
        lines = self._data.readlines(sizehint)
        return [self._decode(l) for l in lines] if decode else lines

    def __iter__(self):
        return self

    def next(self):
        return self._decode(self._data.next())
def max_input_num(txt):
    
    result = -1;
    input_line_count = 0;
    io = StringIO(txt)
    reach = 0
    des_line_count = 0;
    while(1):
        string = io.readline()
        if str(string) == "Output\n" or str(string) == "Output \n":
            break
        if str(string) == "":
            break
        if reach == 0:
            if str(string) == "Input\n" or str(string) == "Input \n":
                reach = 1
            des_line_count += 1
            continue
        else:
            input_line_count+=1;
            tmp = re.findall(r'\d+', string)
            for element in tmp:
                if element > result:
                    result = element
    return result,input_line_count, des_line_count
Example #26
0
class Producer(object):
    implements(ipg.IProducer)

    def __init__(self, fail=False):
        self.fail = fail

        self.fp = StringIO(copyData)

    def description(self, ntuples, binaryTuples):
        self.ntuples = ntuples
        self.binaryTuples = binaryTuples

    def read(self):
        if self.fail:
            raise Exception("copy failed")

        return self.fp.readline()

    def close(self):
        result = protocol.Result()

        # XXX
        result.ntuples = self.ntuples
        result.binaryTuples = self.binaryTuples

        result.status = protocol.PGRES_COPY_IN

        return result
Example #27
0
    def go_iter(self, f):
        """ iterator that parses a file object, and yields the
		    resulting key-value pairs """
        if isinstance(f, str):
            f = StringIO(f)
        self._f = f  # this is just used for the __finpt_obj

        if self._debug:
            print 'STARTED PARSING'

        try:
            while True:
                l = f.readline()
                if l == '':
                    break
                for pattern, commands in self._rules:
                    match = pattern.match(l)
                    if match is not None:
                        rets = self._execute_commands(commands, match)
                        for ret in rets:
                            yield ret
        except StopParsing:
            pass

        if self._eof_flush:
            yield dict(self._current_data)
        if self._debug:
            print 'ENDED PARSING'
Example #28
0
 def test_read_section(self):
     c = StringIO('http http_value { # http section\n\tsome write_sectionvalue;\n another value;\n}')
     self.provider._fp = c
     self.assertTrue(self.provider.read_section(c.readline(), self.root))
     self.assertEqual(self.provider._cursect.find('http').text.strip(), 'http_value')
     self.assertEqual(self.provider._cursect.find('http').getiterator()[1].text.strip(), 'http section')
     del(c)
Example #29
0
 def _parse_files(self, files_str):
     files = []
     files_str_io = StringIO(files_str)
     nl = files_str_io.readline()
     
     while nl.strip()!="":
         file_str = nl.split()
         path = file_str[0].split("#")[0]
         file_revision = file_str[0].split("#")[1]
         action = file_str[2]
         changelist_no = file_str[4]
         files.append(p4_file(path, file_revision, action, changelist_no))
         nl = files_str_io.readline()          
     
     return files
     
Example #30
0
 def _parse_changelists(self, changelists_str):
     changelists = []
     changelists_str_io = StringIO(changelists_str)
     nl = changelists_str_io.readline()
     
     while nl.strip()!="":
         change = nl.split()
         change_no = change[1]
         change_date = datetime.strptime(change[3]+" "+change[4], "%Y/%m/%d %H:%M:%S")
         user = change[6].split('@')[0]
         workspace = change[6].split('@')[1]
         desc = nl.rsplit('\'')[1]
         changelists.append(p4_changelist(change_no, change_date, user, workspace, desc))
         nl = changelists_str_io.readline()
         
     return changelists
Example #31
0
def load_arends(g):
    """ Loads a graph stored in the format used by Felix Arends. """
    f = StringIO(g)
    ret = {}
    n = int(f.readline()[:-1])
    for i in xrange(n):
        ret[i] = set()
    for v in xrange(n):
        bits = map(int, f.readline()[:-1].split(' '))
        assert bits[0] == len(bits) - 1
        for w in bits[1:]:
            ret[v].add(w)
    for v in ret:
        for w in ret[v]:
            assert v in ret[w]
    return ret
Example #32
0
	def go_iter(self, f):
		""" iterator that parses a file object, and yields the
		    resulting key-value pairs """
		if isinstance(f, str):
			f = StringIO(f)
		self._f = f # this is just used for the __finpt_obj

		if self._debug:
			print 'STARTED PARSING'

		try:
			while True:
				l = f.readline()
				if l == '':
					break
				for pattern, commands in self._rules:
					match = pattern.match(l)
					if match is not None:
						rets = self._execute_commands(commands, match)
						for ret in rets:
							yield ret
		except StopParsing:
			pass

		if self._eof_flush:
			yield dict(self._current_data)
		if self._debug:
			print 'ENDED PARSING'
Example #33
0
 def test_concept2_readonly(self):
     """Test that the .concept2 property is immutable."""
     relObj = random.sample(self.relObjects, 1)
     drugsFile = StringIO(drugsData)
     drugObj = rxnorm.Drug(drugsFile.readline())
     self.assertRaises(AttributeError, relObj.__setattr__, 'concept2',
                       drugObj)
Example #34
0
class HTTP(httplib.HTTP):
    # A revised version of the HTTP class that can do basic
    # HTTP 1.1 connections, and also compensates for a bug
    # that occurs on some platforms in 1.5 and 1.5.1 with
    # socket.makefile().read()

    read_bug = sys.version[:5] in ('1.5 (', '1.5.1')

    def putrequest(self, request, selector, ver='1.1'):
        selector = selector or '/'
        str = '%s %s HTTP/%s\r\n' % (request, selector, ver)
        self.send(str)

    def getreply(self):
        file = self.sock.makefile('rb')
        data = string.join(file.readlines(), '')
        file.close()
        self.file = StringIO(data)
        line = self.file.readline()
        try:
            [ver, code, msg] = string.split(line, None, 2)
        except ValueError:
            try:
                [ver, code] = string.split(line, None, 1)
                msg = ""
            except ValueError:
                return -1, line, None
        if ver[:5] != 'HTTP/':
            return -1, line, None
        code = string.atoi(code)
        msg = string.strip(msg)
        headers = mimetools.Message(self.file, 0)
        return ver, code, msg, headers
Example #35
0
 def _parse_description_prop(self, client_str, p4conf):
     arg_name = "_description"
     ind = client_str.find(p4conf.ArgToClient(arg_name))
     
     if ind == -1:
         return
     
     client_str_io = StringIO(client_str[ind:])
     nl = client_str_io.readline() # "Description:" string
     nl = client_str_io.readline()
     description = ""
     while nl.strip()!="":
         description += nl.strip('\t')
         nl = client_str_io.readline()
         
     setattr(p4conf, arg_name, description)
Example #36
0
def iter_lines(foo):
    stri = StringIO(foo)
    while True:
        nl = stri.readline()
        if nl == '':
            break
        yield nl.strip('\n')
Example #37
0
def replace_text(org_data, license_data):
    """Replace the license token in the string"""
    shandle = StringIO(org_data)
    out = ''
    test = False

    while True:
        line = shandle.readline()

        if line == '':
            break

        if SECTION_MARKER.search(line) and test == False:
            test = True
        elif SECTION_MARKER.search(line) and test == True:
            test = False
            if license_data:
                out += license_data
                out += '\n'
        elif test == True:
            continue

        out += line
    shandle.close()
    return out
Example #38
0
def replace_text(org_data, license_data):
    """Replace the license token in the string"""
    shandle = StringIO(org_data)
    out = ''
    test = False

    while True:
        line = shandle.readline()

        if line == '':
            break

        if SECTION_MARKER.search(line) and test == False:
            test = True
        elif SECTION_MARKER.search(line) and test == True:
            test = False
            if license_data:
                out += license_data
                out += '\n'
        elif test == True:
            continue

        out += line
    shandle.close()
    return out
Example #39
0
def process(msg):
    if msg.get_content_type() <> 'multipart/mixed':
        return None
    # Find the first subpart, which has no MIME type
    try:
        subpart = msg.get_payload(0)
    except IndexError:
        # The message *looked* like a multipart but wasn't
        return None
    data = subpart.get_payload()
    if isinstance(data, ListType):
        # The message is a multi-multipart, so not a matching bounce
        return None
    body = StringIO(data)
    state = 0
    addrs = []
    while 1:
        line = body.readline()
        if not line:
            break
        if state == 0:
            if scre.search(line):
                state = 1
        if state == 1:
            if '@' in line:
                addrs.append(line)
    return addrs
Example #40
0
 def test_read_section(self):
     c = StringIO('http http_value { # http section\n\tsome write_sectionvalue;\n another value;\n}')
     self.provider._fp = c
     self.assertTrue(self.provider.read_section(c.readline(), self.root))
     self.assertEqual(self.provider._cursect.find('http').text.strip(), 'http_value')
     self.assertEqual(self.provider._cursect.find('http').getiterator()[1].text.strip(), 'http section')
     del(c)
Example #41
0
  def _parse(self, grid):
    header = {}
    buf = StringIO(grid)

    while True:
      line = buf.readline().split()
      if not line:
        break
      word = line[0].lower()
      if word in ['ncols', 'nrows']:
        header[word] = int(line[1])
      elif word in ['xllcorner', 'yllcorner', 'cellsize', 'nodata_value']:
        header[word] = float(line[1])
      else:
        if len(line) != header['ncols']:
          raise ArcGridParseError('Expected %d columns, found %d' % (len(line), header['ncols']))
        self.data.extend(map(float, line))

    self.width = int(header['ncols'])
    self.height = int(header['nrows'])
    self.llx = header['xllcorner']
    self.lly = header['yllcorner']
    self.cellsize = header['cellsize']
    if header.has_key('nodata_value'):
      self.nodata = float(header.get('nodata_value'))
    else:
      self.nodata = None
Example #42
0
def arrayfile(data_file, shape, descr, fortran=False):
    '''
    Returns an array that is memory-mapped to an NPY (v1.0) file

    Arguments
    ---------
    data_file :
        a file-like object opened with write mode compatible to NumPy's
        memory-mapped array types (see `numpy.memmap`). It is responsibility of
        the caller to close the file.

    shape : tuple
        shape of the ndarray.

    descr : str
        a typecode str (see `array` of `numpy.dtype`). Will be converted to a
        NumPy dtype.

    fortran : bool
        optional; if True, the array uses Fortran data order. Default: use C
        order.
    '''
    from numpy.lib import format
    header = {'descr': descr, 'fortran_order': fortran, 'shape': shape}
    cio = StringIO()
    format.write_array_header_1_0(cio, header)  # write header here first
    format.write_array_header_1_0(data_file, header)  # write header
    cio.seek(0)
    offset = len(cio.readline())  # get offset
    return np.memmap(data_file,
                     dtype=np.dtype(descr),
                     mode=data_file.mode,
                     shape=shape,
                     offset=offset)
Example #43
0
def process(msg):
    if msg.get_content_type() <> 'multipart/mixed' or not msg['x-mailer']:
        return None
    if msg['x-mailer'][:3].lower() not in ('nov', 'ntm', 'int'):
        return None
    addrs = {}
    # find the first text/plain part in the message
    textplain = find_textplain(msg)
    if not textplain:
        return None
    body = StringIO(textplain.get_payload())
    while 1:
        line = body.readline()
        if not line:
            break
        mo = acre.search(line)
        if mo:
            addrs[mo.group('addr')] = 1
        elif '@' in line:
            i = line.find(' ')
            if i == 0:
                continue
            if i < 0:
                addrs[line] = 1
            else:
                addrs[line[:i]] = 1
    return addrs.keys()
 def _getCmdTable(self, cmd, sep=' '):
     result = {}
     try:
         p = subprocess.Popen(cmd,
                              shell=True,
                              stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE)
         out, err = p.communicate()
         buf = StringIO(str(out))
         head = []
         while True:
             s = buf.readline()
             if None == s or len(s) <= 0:
                 break
             s = self._strReplaceAll(s.strip(), sep + sep, sep)
             ss = s.split(sep)
             if None == ss or len(ss) <= 0:
                 continue
             if len(result.keys()) <= 0:
                 # head/title
                 head = ss
                 for s in ss:
                     result[s.strip()] = []
             else:
                 # data
                 for i in range(0, len(head)):
                     if i >= len(ss):
                         result[head[i]].append('')
                     else:
                         result[head[i]].append(ss[i])
     except Exception as e:
         print("Failed to get result table from cmd: " + cmd)
         print(e)
     return result
Example #45
0
class FastI(IOBase):
    def __init__(self, file):
        self._fd = file.fileno()
        self._buffer = StringIO()
        self.newlines = 0

    def read(self):
        while True:
            b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
            if not b:
                break
            ptr = self.buffer.tell()
            self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
        self.newlines = 0
        return self.buffer.read()

    def readline(self):
        while self.newlines == 0:
            b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
            self.newlines = b.count("\n") + (not b)
            ptr = self._buffer.tell()
            self._buffer.seek(0,
                              2), self._buffer.write(b), self._buffer.seek(ptr)
        self.newlines -= 1
        return self._buffer.readline()
Example #46
0
class Producer(object):
    implements(ipg.IProducer)

    def __init__(self, fail=False):
        self.fail = fail
        
        self.fp = StringIO(copyData)
        
    def description(self, ntuples, binaryTuples):
        self.ntuples = ntuples
        self.binaryTuples = binaryTuples
        
    def read(self):
        if self.fail:
            raise Exception("copy failed")
        
        return self.fp.readline()

    def close(self):
        result = protocol.Result()
        
        # XXX
        result.ntuples = self.ntuples
        result.binaryTuples = self.binaryTuples

        result.status = protocol.PGRES_COPY_IN
        
        return result
Example #47
0
class ReadVFile(object):
    """Adapt a readv result iterator to a file like protocol."""
    def __init__(self, readv_result):
        self.readv_result = readv_result
        # the most recent readv result block
        self._string = None

    def _next(self):
        if (self._string is None
                or self._string.tell() == self._string_length):
            length, data = self.readv_result.next()
            self._string_length = len(data)
            self._string = StringIO(data)

    def read(self, length):
        self._next()
        result = self._string.read(length)
        if len(result) < length:
            raise errors.BzrError(
                'request for too much data from a readv hunk.')
        return result

    def readline(self):
        """Note that readline will not cross readv segments."""
        self._next()
        result = self._string.readline()
        if self._string.tell() == self._string_length and result[-1] != '\n':
            raise errors.BzrError('short readline in the readvfile hunk.')
        return result
Example #48
0
 def get_head(self):
     for y in self.read_all(seps=("\n\r\n", "\n\n")):
         yield y
     if self.request_data == '' and isinstance(self.sep_hit, EOR):
         raise SocketClosed()
     shs = StringIO(self.request_data)
     self.http_dict = {'header_string': self.request_data}
     # state line
     self.http_dict['status_line'] = stl = shs.readline()
     hh = stl.split(None, 2)
     if len(hh) != 3:
         raise Exception("Miss status line %s" % repr(stl))
     if hh[0].startswith("HTTP"):    # server responce, use for client
         self.http_dict['version'] = hh[0]
         self.http_dict['status'] = hh[1]
         self.http_dict['reason'] = hh[2].strip()
         #self.http_dict.update(zip(('version', 'status', 'reason'), hh))
     elif hh[2].startswith("HTTP"):  # client request, use for server
         self.http_dict['action'] = hh[0]
         self.http_dict['path'] = hh[1]
         self.http_dict['version'] = hh[2].strip()
         #self.http_dict.update(action=hh[0], path=hh[1], version=hh[2])
     else:
         raise Exception("Bad status line: %s" % repr(stl))
     # shs be readed one line, ready for http headers
     self.http_dict['headers'] = HTTPMessage(shs, 0)
Example #49
0
class FastIO(IOBase):
    newlines = 0

    def __init__(self, file):
        self._buffer = BytesIO()
        self._fd = file.fileno()
        self._writable = 'x' in file.mode or 'r' not in file.mode
        self.write = self._buffer.write if self._writable else None

    def read(self):
        return self._buffer.read() if self._buffer.tell() else os.read(
            self._fd,
            os.fstat(self._fd).st_size)

    def readline(self):
        while self.newlines == 0:
            b, ptr = os.read(self._fd,
                             max(os.fstat(self._fd).st_size,
                                 BUFSIZE)), self._buffer.tell()
            self._buffer.seek(0,
                              2), self._buffer.write(b), self._buffer.seek(ptr)
            self.newlines += b.count('\n') + (not b)
        self.newlines -= 1
        return self._buffer.readline()

    def flush(self):
        if self._writable:
            os.write(self._fd, self._buffer.getvalue())
            self._buffer.truncate(0), self._buffer.seek(0)
Example #50
0
class HTTP(httplib.HTTP):
    # A revised version of the HTTP class that can do basic
    # HTTP 1.1 connections, and also compensates for a bug
    # that occurs on some platforms in 1.5 and 1.5.1 with
    # socket.makefile().read()

    read_bug = sys.version[:5] in ('1.5 (', '1.5.1')

    def putrequest(self, request, selector, ver='1.1'):
        selector = selector or '/'
        str = '%s %s HTTP/%s\r\n' % (request, selector, ver)
        self.send(str)

    def getreply(self):
        file = self.sock.makefile('rb')
        data = string.join(file.readlines(), '')
        file.close()
        self.file = StringIO(data)
        line = self.file.readline()
        try:
            [ver, code, msg] = string.split(line, None, 2)
        except ValueError:
            try:
                [ver, code] = string.split(line, None, 1)
                msg = ""
            except ValueError:
                return -1, line, None
        if ver[:5] != 'HTTP/':
            return -1, line, None
        code = string.atoi(code)
        msg = string.strip(msg)
        headers = mimetools.Message(self.file, 0)
        return ver, code, msg, headers
Example #51
0
    def find_testcases(cls, test_program):
        # Collect all test cases
        args = [ test_program, '--gtest_list_tests' ]
        proc = LocalSubprocess(args)
        proc.start()
        returncode = proc.wait()
        if returncode != 0:
            raise JubaSkipTest('%s cannot list testcases' % test_program)

        # read input
        stri = StringIO(proc.stdout)
        testcases = []
        current_test = None
        re_test = re.compile('^([a-zA-Z0-9_]+\.)')
        re_testcase = re.compile('^  ([a-zA-Z0-9_]+)')
        while True:
            line = stri.readline()
            if line == '': break
            if line.find('Running main') != -1: continue
            match = re_test.match(line)
            if match:
                current_test = match.group(1)

            match = re_testcase.match(line)
            if match and current_test:
                testcases.append('%s%s' % (current_test, match.group(1)))
        return testcases
Example #52
0
def test():
    import sys
    base = ''
    if sys.argv[1:]:
        fn = sys.argv[1]
        if fn == '-':
            fp = sys.stdin
        else:
            fp = open(fn)
    else:
        try:
            from cStringIO import StringIO
        except ImportError:
            from StringIO import StringIO
        fp = StringIO(test_input)
    while 1:
        line = fp.readline()
        if not line: break
        words = line.split()
        if not words:
            continue
        url = words[0]
        parts = urlparse(url)
        print '%-10s : %s' % (url, parts)
        abs = urljoin(base, url)
        if not base:
            base = abs
        wrapped = '<URL:%s>' % abs
        print '%-10s = %s' % (url, wrapped)
        if len(words) == 3 and words[1] == '=':
            if wrapped != words[2]:
                print 'EXPECTED', words[2], '!!!!!!!!!!'
Example #53
0
class TokenScanner(object):
    """
    The scanner reads a gherkin doc (typically read from a `.feature` file) and creates a token for
    each line.

    The tokens are passed to the parser, which outputs an AST (Abstract Syntax Tree).

    If the scanner sees a `#` language header, it will reconfigure itself dynamically to look for
    Gherkin keywords for the associated language. The keywords are defined in
    :file:`gherkin-languages.json`.
    """

    def __init__(self, path_or_str):
        if isinstance(path_or_str, str):
            if os.path.exists(path_or_str):
                self.io = io.open(path_or_str, 'rU', encoding='utf8')
            else:
                self.io = StringIO(path_or_str)
        self.line_number = 0

    def read(self):
        self.line_number += 1
        location = {'line': self.line_number}
        line = self.io.readline()
        return Token((GherkinLine(line, self.line_number) if line else line), location)

    def __del__(self):
        # close file descriptor if it's still open
        try:
            self.io.close()
        except AttributeError:
            pass
Example #54
0
def extract_data(state, district, type):
    url = "%sdata/csv/%s/%s/%s/%s/%s" % (base, state, district, type, MIN_YEAR,
                                         MAX_YEAR)
    data = scraperwiki.scrape(url)
    f = StringIO(data)
    f.readline()  # blank first line
    f.readline()  # header row
    reader = csv.reader(f, dialect=csv.excel_tab)
    tempdata = {}
    for row in reader:
        year = row[0]
        for month in range(1, 13):
            value = row[month]
            key = '%s%02d' % (year, month)
            tempdata[key] = value
    return tempdata
Example #55
0
class SubclassableCStringIO(object):
    """A wrapper around cStringIO to allow for subclassing"""
    __csio = None
    def __init__(self, *a, **kw):
        from cStringIO import StringIO
        self.__csio = StringIO(*a, **kw)
    def __iter__(self):
        return self.__csio.__iter__()
    def next(self):
        return self.__csio.next()
    def close(self):
        return self.__csio.close()
    def isatty(self):
        return self.__csio.isatty()
    def seek(self, pos, mode=0):
        return self.__csio.seek(pos, mode)
    def tell(self):
        return self.__csio.tell()
    def read(self, n=-1):
        return self.__csio.read(n)
    def readline(self, length=None):
        return self.__csio.readline(length)
    def readlines(self, sizehint=0):
        return self.__csio.readlines(sizehint)
    def truncate(self, size=None):
        return self.__csio.truncate(size)
    def write(self, s):
        return self.__csio.write(s)
    def writelines(self, list):
        return self.__csio.writelines(list)
    def flush(self):
        return self.__csio.flush()
    def getvalue(self):
        return self.__csio.getvalue()
Example #56
0
def test():
    import sys
    base = ''
    if sys.argv[1:]:
        fn = sys.argv[1]
        if fn == '-':
            fp = sys.stdin
        else:
            fp = open(fn)
    else:
        try:
            from cStringIO import StringIO
        except ImportError:
            from StringIO import StringIO
        fp = StringIO(test_input)
    while 1:
        line = fp.readline()
        if not line: break
        words = line.split()
        if not words:
            continue
        url = words[0]
        parts = urlparse(url)
        print '%-10s : %s' % (url, parts)
        abs = urljoin(base, url)
        if not base:
            base = abs
        wrapped = '<URL:%s>' % abs
        print '%-10s = %s' % (url, wrapped)
        if len(words) == 3 and words[1] == '=':
            if wrapped != words[2]:
                print 'EXPECTED', words[2], '!!!!!!!!!!'
def process(msg):
    if msg.get_type() <> 'multipart/mixed':
        return None
    # Find the first subpart, which has no MIME type
    try:
        subpart = msg.get_payload(0)
    except IndexError:
        # The message *looked* like a multipart but wasn't
        return None
    data = subpart.get_payload()
    if isinstance(data, ListType):
        # The message is a multi-multipart, so not a matching bounce
        return None
    body = StringIO(data)
    state = 0
    addrs = []
    while 1:
        line = body.readline()
        if not line:
            break
        if state == 0:
            if scre.search(line):
                state = 1
        if state == 1:
            if '@' in line:
                addrs.append(line)
    return addrs