Example #1
0
class UserEnvFile(object):
    """
    Behaves like a file object, but instead of being directly mapped to a file
    it writes to that file from inside of a UserEnv.
    """

    def __init__(self, userenv, filename):
        self.stringio = StringIO()
        self.userenv = userenv
        self.filename = filename

    def read(self, *args, **kwargs):
        self.stringio.read(*args, **kwargs)

    def readlines(self, *args, **kwargs):
        self.stringio.readlines(*args, **kwargs)

    def write(self, *args, **kwargs):
        self.stringio.write(*args, **kwargs)

    def writelines(self, *args, **kwargs):
        self.stringio.writelines(*args, **kwargs)

    def seek(self, *args, **kwargs):
        self.stringio.seek(*args, **kwargs)

    def close(self):
        self.userenv.write_string_to_file(self.stringio.getvalue(), self.filename)
        self.userenv = None
        self.stringio.close()
Example #2
0
    def test_tree_status_specific_files(self):
        """Tests branch status with given specific files"""
        wt = self.make_branch_and_tree('.')
        b = wt.branch

        self.build_tree(
            ['directory/', 'directory/hello.c', 'bye.c', 'test.c', 'dir2/'])
        wt.add('directory')
        wt.add('test.c')
        wt.commit('testing')

        self.assertStatus(
            ['unknown:\n', '  bye.c\n', '  dir2/\n', '  directory/hello.c\n'],
            wt)

        self.assertStatus(
            ['?   bye.c\n', '?   dir2/\n', '?   directory/hello.c\n'],
            wt,
            short=True)

        tof = StringIO()
        self.assertRaises(errors.PathsDoNotExist,
                          show_tree_status,
                          wt,
                          specific_files=['bye.c', 'test.c', 'absent.c'],
                          to_file=tof)

        tof = StringIO()
        show_tree_status(wt, specific_files=['directory'], to_file=tof)
        tof.seek(0)
        self.assertEquals(tof.readlines(),
                          ['unknown:\n', '  directory/hello.c\n'])
        tof = StringIO()
        show_tree_status(wt,
                         specific_files=['directory'],
                         to_file=tof,
                         short=True)
        tof.seek(0)
        self.assertEquals(tof.readlines(), ['?   directory/hello.c\n'])

        tof = StringIO()
        show_tree_status(wt, specific_files=['dir2'], to_file=tof)
        tof.seek(0)
        self.assertEquals(tof.readlines(), ['unknown:\n', '  dir2/\n'])
        tof = StringIO()
        show_tree_status(wt, specific_files=['dir2'], to_file=tof, short=True)
        tof.seek(0)
        self.assertEquals(tof.readlines(), ['?   dir2/\n'])

        tof = StringIO()
        revs = [RevisionSpec.from_string('0'), RevisionSpec.from_string('1')]
        show_tree_status(wt,
                         specific_files=['test.c'],
                         to_file=tof,
                         short=True,
                         revision=revs)
        tof.seek(0)
        self.assertEquals(tof.readlines(), ['+N  test.c\n'])
Example #3
0
def header_line_numbers(idl, filename):
    """Map each production to its line number in the header.

    See xpidl.header.print_header for more information. This method is essentially a copy,
    with fd.write calls replaced by line increments.
    """
    def number_lines(text):
        return len(text.splitlines())

    production_map = {}
    line = number_lines(header % {
        'filename': filename,
        'basename': idl_basename(filename)
    }) + 1

    foundinc = False
    for inc in idl.includes():
        if not foundinc:
            foundinc = True
            line += 1
        line += number_lines(include %
                             {'basename': idl_basename(inc.filename)})

    if idl.needsJSTypes():
        line += number_lines(jsvalue_include)

    # Include some extra files if any attributes are infallible.
    for iface in [p for p in idl.productions if p.kind == 'interface']:
        for attr in [m for m in iface.members if isinstance(m, Attribute)]:
            if attr.infallible:
                line += number_lines(infallible_includes)
                break

    line += number_lines(header_end) + 1

    for p in idl.productions:
        production_map[p] = line
        if p.kind == 'cdata':
            line += number_lines(p.data)
        elif p.kind == 'forward':
            line += number_lines(forward_decl % {'name': p.name})
        elif p.kind == 'interface':
            # write_interface inserts a blank line at the start.
            production_map[p] += 1
            # Eh....
            fd = StringIO()
            write_interface(p, fd)
            line += len(fd.readlines())
        elif p.kind == 'typedef':
            fd = StringIO()
            printComments(fd, p.doccomments, '')
            line += len(fd.readlines())
            line += number_lines("typedef %s %s;\n\n" %
                                 (p.realtype.nativeType('in'), p.name))

    return production_map
Example #4
0
def header_line_numbers(idl, filename):
    """Map each production to its line number in the header.

    See xpidl.header.print_header for more information. This method is essentially a copy,
    with fd.write calls replaced by line increments.
    """

    def number_lines(text):
        return len(text.splitlines())

    production_map = {}
    line = number_lines(header % {'filename': filename,
                                  'basename': idl_basename(filename)}) + 1

    foundinc = False
    for inc in idl.includes():
        if not foundinc:
            foundinc = True
            line += 1
        line += number_lines(include % {'basename': idl_basename(inc.filename)})

    if idl.needsJSTypes():
        line += number_lines(jsvalue_include)

    # Include some extra files if any attributes are infallible.
    for iface in [p for p in idl.productions if p.kind == 'interface']:
        for attr in [m for m in iface.members if isinstance(m, Attribute)]:
            if attr.infallible:
                line += number_lines(infallible_includes)
                break

    line += number_lines(header_end) + 1

    for p in idl.productions:
        production_map[p] = line
        if p.kind == 'cdata':
            line += number_lines(p.data)
        elif p.kind == 'forward':
            line += number_lines(forward_decl % {'name': p.name})
        elif p.kind == 'interface':
            # write_interface inserts a blank line at the start.
            production_map[p] += 1
            # Eh....
            fd = StringIO()
            write_interface(p, fd)
            line += len(fd.readlines())
        elif p.kind == 'typedef':
            fd = StringIO()
            printComments(fd, p.doccomments, '')
            line += len(fd.readlines())
            line += number_lines("typedef %s %s;\n\n" % (p.realtype.nativeType('in'),
                                                         p.name))

    return production_map
Example #5
0
def main():
    global WIDTH
    global HEIGHT
    global PAGESIZE
    parser = OptionParser()
    parser.add_option("-o",
                      "--output",
                      dest="output",
                      default="allocations-map.png")
    parser.add_option("--debug", dest="debug", action="store_true")
    parser.add_option("--entry", dest="entry")
    opts, args = parser.parse_args()

    if len(args) != 1:
        die(USAGE)

    # Create an image, parse the output using a regexp.
    # Accumulate per node information to decide which are the nodes
    # to be highlighted and which not.
    # Split large allocations in pages of PAGESIZE bytes and
    # add contributions to each page accordingly.
    # The allocations list contains elements of the following form:
    #
    # node, symbol, pos, size
    #
    data = numpy.zeros(WIDTH * HEIGHT, numpy.uint32)
    print "Reading file %s" % args[0]
    f = file(args[0], "r")
    allocations = []
    datafile = f.read()
    try:
        atIndex = datafile.index("@")
        allocationsFile = StringIO(datafile[:atIndex])
        stackTraceFile = StringIO(datafile[atIndex:])
    except:
        allocationsFile = StringIO(datafile)
        stackTraceFile = None

    print "Done reading"
    allocations = [allocationInfo(l) for l in allocationsFile.readlines()]
    if stackTraceFile:
        [parseStacktrace(l) for l in stackTraceFile.readlines()]
    # Map which keeps track of the total amount of memory allocated per node.
    nodeAllocations = dict((info[0], []) for info in allocations)

    print "Painting"
    paintData(data, allocations, nodeAllocations)
    accumulateData(allocations, nodeAllocations)

    print "Saving image"
    saveImage(data, nodeAllocations, opts)
Example #6
0
    def test_printers(self):
        out_stream = StringIO()
        err_stream = StringIO()

        with mt.MultiThreadingManager(
                print_stream=out_stream,
                error_stream=err_stream) as thread_manager:

            # Sanity-checking these gives power to the previous test which
            # looked at the default values of thread_manager.print/error_stream
            self.assertEqual(out_stream, thread_manager.print_stream)
            self.assertEqual(err_stream, thread_manager.error_stream)

            self.assertEqual(self.starting_thread_count + 2,
                             threading.active_count())

            thread_manager.print_msg('one-argument')
            thread_manager.print_msg('one %s, %d fish', 'fish', 88)
            thread_manager.error('I have %d problems, but a %s is not one', 99,
                                 u'\u062A\u062A')
            thread_manager.print_msg('some\n%s\nover the %r', 'where',
                                     u'\u062A\u062A')
            thread_manager.error('one-error-argument')
            thread_manager.error('Sometimes\n%.1f%% just\ndoes not\nwork!',
                                 3.14159)

        self.assertEqual(self.starting_thread_count, threading.active_count())

        out_stream.seek(0)
        self.assertEqual([
            'one-argument\n',
            'one fish, 88 fish\n',
            'some\n',
            'where\n',
            "over the u'\\u062a\\u062a'\n",
        ], list(out_stream.readlines()))

        err_stream.seek(0)
        self.assertEqual([
            u'I have 99 problems, but a \u062A\u062A is not one\n'.encode(
                'utf8'),
            'one-error-argument\n',
            'Sometimes\n',
            '3.1% just\n',
            'does not\n',
            'work!\n',
        ], list(err_stream.readlines()))

        self.assertEqual(3, thread_manager.error_count)
Example #7
0
def main():
  global WIDTH
  global HEIGHT
  global PAGESIZE
  parser = OptionParser()
  parser.add_option("-o", "--output", dest="output", 
                    default="allocations-map.png")
  parser.add_option("--debug", dest="debug", action="store_true")
  parser.add_option("--entry", dest="entry")
  opts, args = parser.parse_args()

  if len(args) != 1:
    die(USAGE)

  # Create an image, parse the output using a regexp.
  # Accumulate per node information to decide which are the nodes
  # to be highlighted and which not.
  # Split large allocations in pages of PAGESIZE bytes and
  # add contributions to each page accordingly.
  # The allocations list contains elements of the following form:
  #
  # node, symbol, pos, size
  #
  data = numpy.zeros(WIDTH*HEIGHT, numpy.uint32)
  print "Reading file %s" % args[0]
  f = file(args[0], "r")
  allocations = []
  datafile = f.read()
  try:
    atIndex = datafile.index("@")
    allocationsFile = StringIO(datafile[:atIndex])
    stackTraceFile = StringIO(datafile[atIndex:])
  except:
    allocationsFile = StringIO(datafile)
    stackTraceFile = None

  print "Done reading"
  allocations = [allocationInfo(l) for l in allocationsFile.readlines()]
  if stackTraceFile:
    [parseStacktrace(l) for l in stackTraceFile.readlines()]
  # Map which keeps track of the total amount of memory allocated per node.
  nodeAllocations = dict((info[0], []) for info in allocations)

  print "Painting"
  paintData(data, allocations, nodeAllocations)
  accumulateData(allocations, nodeAllocations)
  
  print "Saving image"
  saveImage(data, nodeAllocations, opts)
Example #8
0
def targetswitcher(refinement_machine,targetdir,targets,substitutions):

    if refinement_machine:
        name = "TargetSwitcher-rm"
        print "Generating TargetSwitcher-rm: %s" % (name)
        type_param = "rm"
    else:
        name="TargetSwitcher"
        print "Generating TargetSwitcher: %s" % (name)
        type_param = "am"

    handle = StringIO()
    generatetaskswitcher.main(targetdir,type_param,None,targets,handle)
    handle.flush()
    handle.seek(0)
    taskswitcher = handle.readlines()
    handle.close()

    outputfile=os.path.join(targetdir,"%s.lsts" % name)
    with open(outputfile,'w') as handle:
        for line in taskswitcher:
            for subs in substitutions:
                line = subs[0].sub(subs[1],line)
            handle.write(line)

    return name
Example #9
0
class SubclassableCStringIO(object):
    """A wrapper around cStringIO to allow for subclassing"""
    __csio = None
    def __init__(self, *a, **kw):
        from cStringIO import StringIO
        self.__csio = StringIO(*a, **kw)
    def __iter__(self):
        return self.__csio.__iter__()
    def next(self):
        return self.__csio.next()
    def close(self):
        return self.__csio.close()
    def isatty(self):
        return self.__csio.isatty()
    def seek(self, pos, mode=0):
        return self.__csio.seek(pos, mode)
    def tell(self):
        return self.__csio.tell()
    def read(self, n=-1):
        return self.__csio.read(n)
    def readline(self, length=None):
        return self.__csio.readline(length)
    def readlines(self, sizehint=0):
        return self.__csio.readlines(sizehint)
    def truncate(self, size=None):
        return self.__csio.truncate(size)
    def write(self, s):
        return self.__csio.write(s)
    def writelines(self, list):
        return self.__csio.writelines(list)
    def flush(self):
        return self.__csio.flush()
    def getvalue(self):
        return self.__csio.getvalue()
Example #10
0
def ReadTorrentIntoBuffer():
  global bencode
  buffer = vim.current.buffer

  torrent = open(buffer.name,'r')
  bencode = bdec(torrent.read())
  torrent.close()

  config_file=StringIO()
  config = ConfigParser.RawConfigParser()
  config.add_section('torrent')
  if bencode.has_key('announce-list'):
    tr_list=[]
    for tier in bencode['announce-list']:
      for tracker in tier:
        tr_list.append(tracker)
      tr_list.append('')
    del tr_list[-1]
    config.set('torrent', 'trackers', '\n'+'\n'.join(tr_list))
  else:
    config.set('torrent', 'trackers', '\n'+bencode['announce'])
  config.write(config_file)
  config_file.seek(0)

  buffer.append(config_file.readlines())
  config_file.close()
  vim.command(':retab')
  if not buffer[0]:
    del buffer[0]
Example #11
0
def get_playlist_with_signed_url(playlist_path):
    # Get playlist file from S3
    s3_conn = connect_s3(settings.ACCESS_KEY_ID, settings.SECRET_ACCESS_KEY)
    bucket = s3_conn.get_bucket(settings.PLAYLIST_BUCKET_NAME)
    key = bucket.get_key(playlist_path)

    if key is None:
        raise Exception("No such key was found. key={}".format(key))

    fp = StringIO()
    key.get_contents_to_file(fp)
    fp.seek(0)

    # Convert with signed url
    cf_conn = CloudFrontConnection(settings.ACCESS_KEY_ID, settings.SECRET_ACCESS_KEY)
    dist = Distribution(cf_conn)
    expire_time = int(time.time() + 60 * 60)  # 60 mins

    outlines = []
    for line in fp.readlines():
        line = line.rstrip()
        matchObj = re.search(TS_PATTERN, line)
        if matchObj is not None:
            file_name = matchObj.group()
            url = os.path.join(os.path.dirname(os.path.join(settings.CLOUDFRONT_URL_PREFIX, playlist_path)), file_name)
            signed_url = dist.create_signed_url(url, settings.CLOUDFRONT_KEYPAIR_ID, expire_time, private_key_file=settings.CLOUDFRONT_PRIVATE_KEY_FILE_LOCATION)
            outlines.append(signed_url)
        else:
            outlines.append(line)
    fp.close()
    return '\n'.join(outlines)
Example #12
0
def _stream_helper(s):
    """
    保证外部对config对象的调用方式一致
    """
    io = StringIO(s)
    yield ConfigX((x.strip() for x in io.readlines() if x.strip()))
    io.close()
Example #13
0
 def testXReadLines(self):
     # "Test lzma.LZMAFile.xreadlines()"
     self.createTempFile()
     lzmaf = lzma.LZMAFile(self.filename)
     sio = StringIO(self.TEXT)
     self.assertEqual(list(lzmaf.xreadlines()), sio.readlines())
     lzmaf.close()
Example #14
0
 def testXReadLines(self):
     # "Test BZ2File.xreadlines()"
     self.createTempFile()
     bz2f = BZ2File(self.filename)
     sio = StringIO(self.TEXT)
     self.assertEqual(list(bz2f.xreadlines()), sio.readlines())
     bz2f.close()
Example #15
0
def fix_file(filename, opts, output=sys.stdout):
    tmp_source = read_from_filename(filename)
    fix = FixPEP8(filename, opts, contents=tmp_source)
    fixed_source = fix.fix()
    original_source = copy.copy(fix.original_source)
    tmp_filename = filename
    for _ in range(opts.pep8_passes):
        if fixed_source == tmp_source:
            break
        tmp_source = copy.copy(fixed_source)
        if not pep8:
            tmp_filename = tempfile.mkstemp()[1]
            fp = open(tmp_filename, 'w')
            fp.write(fixed_source)
            fp.close()
        fix = FixPEP8(tmp_filename, opts, contents=tmp_source)
        fixed_source = fix.fix()
        if not pep8:
            os.remove(tmp_filename)
    del tmp_filename
    del tmp_source

    if opts.diff:
        new = StringIO(''.join(fix.source))
        new = new.readlines()
        output.write(_get_difftext(original_source, new, filename))
    elif opts.in_place:
        fp = open_with_encoding(filename,
                                encoding=detect_encoding(filename),
                                mode='w')
        fp.write(fixed_source)
        fp.close()
    else:
        output.write(fixed_source)
Example #16
0
 def __init__(self, filename, options, contents=None):
     self.filename = filename
     if contents is None:
         self.source = read_from_filename(filename, readlines=True)
     else:
         sio = StringIO(contents)
         self.source = sio.readlines()
     self.original_source = copy.copy(self.source)
     self.newline = _find_newline(self.source)
     self.options = options
     self.indent_word = _get_indentword("".join(self.source))
     # method definition
     self.fix_e111 = self.fix_e101
     self.fix_e202 = self.fix_e201
     self.fix_e203 = self.fix_e201
     self.fix_e211 = self.fix_e201
     self.fix_e221 = self.fix_e271
     self.fix_e222 = self.fix_e271
     self.fix_e223 = self.fix_e271
     self.fix_e241 = self.fix_e271
     self.fix_e242 = self.fix_e224
     self.fix_e261 = self.fix_e262
     self.fix_e272 = self.fix_e271
     self.fix_e273 = self.fix_e271
     self.fix_e274 = self.fix_e271
     self.fix_w191 = self.fix_e101
Example #17
0
def opal_formater(input_file):
    try:
        from cStringIO import StringIO
    except:
        from StringIO import StringIO
    import re
    # Calls the pdf_to_csv function and receives the opal report data. Separator is ',', threshold is the size of each column
    csv_raw_data = pdf_to_csv(input_file, separator, threshold)
    # StringIO reads the long string from the pdf_to_csv and allow read lines from it as if it was a file
    csv_data = StringIO(csv_raw_data)
    for csv_raw_line in csv_data.readlines():
        # readlines is stupid and adds unnecessary new-line escapes (\n), so we'll rstrip it to avoid blank lines
        csv_line = csv_raw_line.rstrip()
        if len(csv_line) > 3 and csv_line[0].isdigit() and csv_line[1].isdigit():
            # print csv_line #Worked!
            ###regex the lines that starts with any date format, I guess(??)
            date_regex = '^(3[01]|[12][0-9]|0?[1-9])/(1[0-2]|0?[1-9])/(?:[0-9]{2})?[0-9]{2}'
            if re.match (date_regex, csv_line):
                # if matches, we'll split the lines and add two "," in order do shift the line two rows to de right on excel
                separated_csv_line = csv_line.split(',')
                complete_csv_line = ''
                for separated_item in separated_csv_line:
                    complete_csv_line = complete_csv_line+","+separated_item+","
                csv_line = complete_csv_line
            # Print the lines that doesn't starts with a date
            print csv_line
Example #18
0
    def readData(self):
        """Read all unit data from file, return number loaded"""

        f = StringIO(UNIT_DATA)
        lines = f.readlines()
        for i in range(len(lines)):     # join continuation lines
            delta = 1
            while lines[i].rstrip().endswith('\\'):
                lines[i] = u''.join([lines[i].rstrip()[:-1], lines[i+delta]])
                lines[i+delta] = u''
                delta += 1

        units = [UnitAtom(line) for line in lines if line.split('#', 1)[0].strip()]   # remove comment lines

        typeText = ''
        for unit in units:               # find & set headings
            if unit.name.startswith('['):
                typeText = unit.name[1:-1].strip()
                self.typeList.append(typeText)

            unit.typeName = typeText

        units = [unit for unit in units if unit.equiv]  # keep valid units
        for unit in units:
            self[unit.name.lower().replace(' ', '')] = unit

        self.sortedKeys = self.keys()
        self.sortedKeys.sort()
        if len(self.sortedKeys) < len(units):
            raise UnitDataError('Duplicate unit names found %s != %s' % (len(self.sortedKeys), len(units)))

        return len(units)
def make_data_uri(img):
	"Convert an Image to a base64 URI"
	s = StringIO()
	img.save(s, 'png')
	s.seek(0)
	img_s = ''.join(s.readlines()).encode('base64').translate(None, '\n')
	return "data:image/png;base64,%s" % img_s
Example #20
0
 def testIterator(self):
     # "Test iter(BZ2File)"
     self.createTempFile()
     bz2f = BZ2File(self.filename)
     sio = StringIO(self.TEXT)
     self.assertEqual(list(iter(bz2f)), sio.readlines())
     bz2f.close()
Example #21
0
 def readlines(self):
     """Return an iterator that produces newline-terminated lines,
     excluding header chunks."""
     assert not self._isNewStyle, "not available in new-style steps"
     alltext = "".join(self.getChunks([STDOUT], onlyText=True))
     io = StringIO(alltext)
     return io.readlines()
Example #22
0
def GetUnloadSymbolCommand(EnclaveFile, Base):
    text = readelf.ReadElf(EnclaveFile)
    if text == None:
        return -1
    SegsFile = StringIO(text)

    try:
        FileList = SegsFile.readlines()
        # Parse the readelf output file to extract the section names and
        # their offsets and add the Proj base address.
        for line in FileList:
            list = line.split()
            if (len(list) > 0):
                SegOffset = -1
                # The readelf will put a space after the open bracket for single
                # digit section numbers.  This causes the line.split to create
                # an extra element in the array for these lines.
                if (re.match('\[\s*[0-9]+\]', list[0])):
                    SegOffset = 0
                if (re.match('\s*[0-9]+\]', list[1])):
                    SegOffset = 1

                if (SegOffset != -1):
                    if (list[SegOffset + 1][0] == '.'):
                        # If it is the .text section, get the .text start address and plus enclave start address
                        if (list[SegOffset + 1].find(".text") != -1):
                            return "remove-symbol-file -a " + str(
                                int(list[SegOffset + 3], 16) + int(Base, 10))

    except:
        print("Error parsing enclave file.  Check format of file.")
        return -1
Example #23
0
def GetUnloadSymbolCommand(EnclaveFile, Base):
    text = readelf.ReadElf(EnclaveFile)
    if text == None:
        return -1
    SegsFile = StringIO(text)

    try:
        FileList = SegsFile.readlines()
        # Parse the readelf output file to extract the section names and
        # their offsets and add the Proj base address.
        for line in FileList:
            list = line.split();
            if(len(list) > 0):
                SegOffset = -1;
                # The readelf will put a space after the open bracket for single
                # digit section numbers.  This causes the line.split to create
                # an extra element in the array for these lines.
                if(re.match('\[\s*[0-9]+\]',list[0])):
                    SegOffset = 0;
                if(re.match('\s*[0-9]+\]',list[1])):
                    SegOffset = 1;

                if(SegOffset != -1):
                    if (list[SegOffset+1][0] == '.'):
                        # If it is the .text section, get the .text start address and plus enclave start address
                        if(list[SegOffset+1].find(".text") != -1):
                            return "remove-symbol-file -a " + str(int(list[SegOffset+3], 16) + int(Base, 10))

    except:
        print ("Error parsing enclave file.  Check format of file.")
        return -1
Example #24
0
def ReadTorrentIntoBuffer():
    global bencode
    buffer = vim.current.buffer

    torrent = open(buffer.name, 'r')
    bencode = bdec(torrent.read())
    torrent.close()

    config_file = StringIO()
    config = ConfigParser.RawConfigParser()
    config.add_section('torrent')
    if bencode.has_key('announce-list'):
        tr_list = []
        for tier in bencode['announce-list']:
            for tracker in tier:
                tr_list.append(tracker)
            tr_list.append('')
        del tr_list[-1]
        config.set('torrent', 'trackers', '\n' + '\n'.join(tr_list))
    else:
        config.set('torrent', 'trackers', '\n' + bencode['announce'])
    config.write(config_file)
    config_file.seek(0)

    buffer.append(config_file.readlines())
    config_file.close()
    vim.command(':retab')
    if not buffer[0]:
        del buffer[0]
Example #25
0
 def testIterator(self):
     # "Test iter(lzma.LZMAFile)"
     self.createTempFile()
     lzmaf = lzma.LZMAFile(self.filename)
     sio = StringIO(self.TEXT)
     self.assertEqual(list(iter(lzmaf)), sio.readlines())
     lzmaf.close()
def main():
    """tool main"""
    parser = OptionParser(version="autopep8: %s" % __version__, description=__doc__)
    parser.add_option("-v", "--verbose", action="store_true", dest="verbose", help="print to verbose result.")
    parser.add_option("-d", "--diff", action="store_true", dest="diff", help="diff print of fixed source.")
    parser.add_option("-p", "--pep8-passes", default=5, type="int", help="maximum number of additional pep8 passes")
    opts, args = parser.parse_args()
    if not len(args):
        print parser.format_help()
        return 1
    filename = args[0]
    original_filename = filename
    tmp_source = open(filename).read()
    fix = FixPEP8(filename, opts)
    fixed_source = fix.fix()
    original_source = copy.copy(fix.original_source)
    for cnt in range(opts.pep8_passes):
        if fixed_source == tmp_source:
            break
        tmp_source = copy.copy(fixed_source)
        filename = tempfile.mkstemp()[1]
        fp = open(filename, "w")
        fp.write(fixed_source)
        fp.close()
        fix = FixPEP8(filename, opts)
        fixed_source = fix.fix()
        os.remove(filename)
    if opts.diff:
        new = StringIO("".join(fix.source))
        new = new.readlines()
        print _get_difftext(original_source, new, original_filename),
    else:
        print fixed_source,
Example #27
0
	def __init__(self,comment,name):

		comment = StringIO(comment)
		self.data = {
			"body": "",
			"returns": [],
			"params": [],
			"name": ""
		}
		for line in comment.readlines():
			if(line.lstrip() and line.lstrip()[0] == ":"): # This is a tag
				tag = line.lstrip().split(" ")[0][1:]
				if tag == "rtype:":
					self.data["returns"].append(" ".join(line.lstrip().split(" ")[1:])[:-1])
				elif tag == "param":
					splitted = line.lstrip().split(" ")
					pName = splitted[2]
					pType = splitted[1]
					description = " ".join(splitted[3:])[:-1]
					self.data["params"].append((pName,pType,description))
			else: # It's part of the body then.
				self.data["body"] += line
		comment.close()

		# Chop up the name
		methodName = name.lstrip().split(" ")
		try:
			if methodName[0][:2] == "__":
				methodName = methodName[2].split("(")[0]
			else:
				methodName = methodName[1].split("(")[0]
		except Exception:
			print methodName
		self.data["name"] = methodName
Example #28
0
    def get_commits(self, repository, start, end=None, full=None):
        branch = None
        if repository:
            repository = repository.lower()
            if repository not in self.branches:
                return None
            branch = self.branches[repository]

        if not branch:
            if len(self.branches) == 1:
                (repository, branch) = self.branches.items()[0]
            else:
                (repository,
                 branch) = sorted(self.branches.iteritems(),
                                  reverse=True,
                                  key=lambda (k, v): v.repository.get_revision(
                                      v.last_revision_info()[1]).timestamp)[0]

        if not start:
            start = branch.revision_id_to_revno(branch.last_revision())

        f = StringIO()
        log.show_log(branch,
                     LogFormatter(f, repository, branch, full),
                     start_revision=start,
                     end_revision=end or start)
        f.seek(0)
        commits = f.readlines()
        commits.reverse()
        return commits
Example #29
0
 def readlines(self):
     """Return an iterator that produces newline-terminated lines,
     excluding header chunks."""
     assert not self._isNewStyle, "not available in new-style steps"
     alltext = "".join(self.getChunks([STDOUT], onlyText=True))
     io = StringIO(alltext)
     return io.readlines()
Example #30
0
 def testReadLines(self):
     # "Test BZ2File.readlines()"
     self.createTempFile()
     with BZ2File(self.filename) as bz2f:
         self.assertRaises(TypeError, bz2f.readlines, None)
         sio = StringIO(self.TEXT)
         self.assertEqual(bz2f.readlines(), sio.readlines())
Example #31
0
 def testIterator(self):
     # "Test iter(BZ2File)"
     self.createTempFile()
     bz2f = BZ2File(self.filename)
     sio = StringIO(self.TEXT)
     self.assertEqual(list(iter(bz2f)), sio.readlines())
     bz2f.close()
Example #32
0
 def __init__(self, filename, options, contents=None):
     self.filename = filename
     if contents is None:
         self.source = read_from_filename(filename, readlines=True)
     else:
         sio = StringIO(contents)
         self.source = sio.readlines()
     self.original_source = copy.copy(self.source)
     self.newline = _find_newline(self.source)
     self.options = options
     self.indent_word = _get_indentword("".join(self.source))
     # method definition
     self.fix_e111 = self.fix_e101
     self.fix_e202 = self.fix_e201
     self.fix_e203 = self.fix_e201
     self.fix_e211 = self.fix_e201
     self.fix_e221 = self.fix_e271
     self.fix_e222 = self.fix_e271
     self.fix_e223 = self.fix_e271
     self.fix_e241 = self.fix_e271
     self.fix_e242 = self.fix_e224
     self.fix_e261 = self.fix_e262
     self.fix_e272 = self.fix_e271
     self.fix_e273 = self.fix_e271
     self.fix_e274 = self.fix_e271
     self.fix_w191 = self.fix_e101
Example #33
0
    def execute(self):
        """
        Read a bootstrap's fixture and create the corresponding model
        instances based on it.
        """
        BootstrapModel.check_for_data()
        handle, filepath = tempfile.mkstemp()
        # Just need the filepath, close the file description
        os.close(handle)

        filepath = os.path.extsep.join([filepath, self.get_extension()])

        with open(filepath, 'w') as file_handle:
            file_handle.write(self.cleaned_fixture)

        content = StringIO()
        management.call_command(COMMAND_LOADDATA,
                                filepath,
                                verbosity=0,
                                stderr=content)
        content.seek(0, os.SEEK_END)
        if content.tell():
            content.seek(0)
            raise Exception(content.readlines()[-2])

        os.unlink(filepath)
Example #34
0
    def _write_pdb_to_stringio(self,
                               cys_cys_atomidx_set=None,
                               disulfide_conect=True,
                               noter=False,
                               **kwargs):
        stringio_file = StringIO()
        stringio_file_out = StringIO()
        self.parm.write_pdb(stringio_file, **kwargs)
        stringio_file.seek(0)
        lines = stringio_file.readlines()

        if noter:
            for line in lines:
                if line.startswith("TER"):
                    lines.remove(line)

        # TODO: update ParmEd?
        if disulfide_conect:
            conect_record = [
                'CONECT%5d%5d\n' % (idx0 + 1, idx1 + 1)
                for (idx0, idx1) in cys_cys_atomidx_set
            ]
            conect_str = ''.join(conect_record)
            lines[-1] = conect_str + 'END\n'

        if noter:
            for line in lines:
                if line.startswith("TER"):
                    lines.remove(line)

        stringio_file_out.writelines(lines)
        stringio_file_out.seek(0)
        return stringio_file_out
Example #35
0
 def testIterator(self):
     # "Test iter(lzma.LZMAFile)"
     self.createTempFile()
     lzmaf = lzma.LZMAFile(self.filename)
     sio = StringIO(self.TEXT)
     self.assertEqual(list(iter(lzmaf)), sio.readlines())
     lzmaf.close()
Example #36
0
 def testXReadLines(self):
     # "Test lzma.LZMAFile.xreadlines()"
     self.createTempFile()
     lzmaf = lzma.LZMAFile(self.filename)
     sio = StringIO(self.TEXT)
     self.assertEqual(list(lzmaf.xreadlines()), sio.readlines())
     lzmaf.close()
Example #37
0
	def toRST(self):
		rst = ""

		# Write the name of the method
		rst += "**" + self.data["name"] + "**\n\n"

		tempIO = StringIO(self.data["body"])
		body = ""
		for line in tempIO.readlines():
			body += "| "+ line
		tempIO.close()

		rst += body[:-2] + "\n\n"	

		if self.data["params"]:
			rst += "   :Parameters:\n"
			for param in self.data["params"]:
				rst += "      "+param[0][:-1]+" ("+param[1]+"): "+param[2]+"\n"

		if self.data["returns"]:
			rst += "   :returns:\n"
			for rtype in self.data["returns"]:
				rst += "      "+rtype

		return rst
Example #38
0
 def test_simple(self):
     f = StringIO()
     c = Commit()
     c.committer = c.author = "Jelmer <*****@*****.**>"
     c.commit_time = c.author_time = 1271350201
     c.commit_timezone = c.author_timezone = 0
     c.message = "This is the first line\nAnd this is the second line.\n"
     c.tree = Tree().id
     write_commit_patch(f, c, "CONTENTS", (1, 1), version="custom")
     f.seek(0)
     lines = f.readlines()
     self.assertTrue(lines[0].startswith("From 0b0d34d1b5b596c928adc9a727a4b9e03d025298"))
     self.assertEquals(lines[1], "From: Jelmer <*****@*****.**>\n")
     self.assertTrue(lines[2].startswith("Date: "))
     self.assertEquals([
         "Subject: [PATCH 1/1] This is the first line\n",
         "And this is the second line.\n",
         "\n",
         "\n",
         "---\n"], lines[3:8])
     self.assertEquals([
         "CONTENTS-- \n",
         "custom\n"], lines[-2:])
     if len(lines) >= 12:
         # diffstat may not be present
         self.assertEquals(lines[8], " 0 files changed\n")
Example #39
0
def targetswitcher(refinement_machine, targetdir, targets, substitutions):

    if refinement_machine:
        name = "TargetSwitcher-rm"
        print "Generating TargetSwitcher-rm: %s" % (name)
        type_param = "rm"
    else:
        name = "TargetSwitcher"
        print "Generating TargetSwitcher: %s" % (name)
        type_param = "am"

    handle = StringIO()
    generatetaskswitcher.main(targetdir, type_param, None, targets, handle)
    handle.flush()
    handle.seek(0)
    taskswitcher = handle.readlines()
    handle.close()

    outputfile = os.path.join(targetdir, "%s.lsts" % name)
    with open(outputfile, 'w') as handle:
        for line in taskswitcher:
            for subs in substitutions:
                line = subs[0].sub(subs[1], line)
            handle.write(line)

    return name
Example #40
0
def fix_file(filename, opts, output=sys.stdout):
    tmp_source = read_from_filename(filename)
    fix = FixPEP8(filename, opts, contents=tmp_source)
    fixed_source = fix.fix()
    original_source = copy.copy(fix.original_source)
    tmp_filename = filename
    for _ in range(opts.pep8_passes):
        if fixed_source == tmp_source:
            break
        tmp_source = copy.copy(fixed_source)
        if not pep8:
            tmp_filename = tempfile.mkstemp()[1]
            fp = open(tmp_filename, 'w')
            fp.write(fixed_source)
            fp.close()
        fix = FixPEP8(tmp_filename, opts, contents=tmp_source)
        fixed_source = fix.fix()
        if not pep8:
            os.remove(tmp_filename)
    del tmp_filename
    del tmp_source

    if opts.diff:
        new = StringIO(''.join(fix.source))
        new = new.readlines()
        output.write(_get_difftext(original_source, new, filename))
    elif opts.in_place:
        fp = open_with_encoding(filename, encoding=detect_encoding(filename),
                                mode='w')
        fp.write(fixed_source)
        fp.close()
    else:
        output.write(fixed_source)
Example #41
0
class ReadableDocument(BaseDocument):

    implements(IReadableDocument)

    def __init__(self, data, mime_type, encoding=None):
        BaseDocument.__init__(self, mime_type, encoding)
        self._data = StringIO(data)

    def read(self, size=-1, decode=True):
        data = self._data.read(size)
        return self._decode(data) if decode else data

    def readline(self, size=-1, decode=True):
        data = self._data.readline(size)
        return self._decode(data) if decode else data

    def readlines(self, sizehint=-1, decode=True):
        lines = self._data.readlines(sizehint)
        return [self._decode(l) for l in lines] if decode else lines

    def __iter__(self):
        return self

    def next(self):
        return self._decode(self._data.next())
Example #42
0
	def run(self):
		for root, dirs, files in os.walk(self.top_dir):
			if not self.path_permitted(root):
				continue
			for name in files:
				if self.include_filename(name):
					path = join(root, name)
					module_info = {
							'function_info': {},
							'fanin': 0,
							'fanout': 0,
							'ploc': 0,
							'sloc': 0,
							'bloc': 0,
							'cloc': 0,
							'cyclomatic': 0,
							}
					self.module_list[strip_prefix(self.top_dir, path)] = \
							module_info
					self.nom += 1

					f = open(path)
					data = f.read()
					f.close()

					# find functions and count their parameters
					for match in re.findall(self.function_rx, data):
						func_name = match[0]
						func_params = len(match[1].split(','))
						module_info['function_info'][func_name] = func_params
						module_info['fanin'] += func_params
						self.fanin_total += func_params

					# find and count return statements
					module_info['fanout'] = \
							len(re.findall(self.return_rx, data))
					self.fanout_total += module_info['fanout']

					# count different kinds of loc and cyclomatic complexity
					loc_data = self.comment_rx.sub(self._comment_sub, data)
					sio = StringIO(loc_data)
					for line in sio.readlines():
						module_info['ploc'] += 1
						if len(line) == 0 or line.isspace():
							module_info['bloc'] += 1
						elif line == self.comment:
							module_info['cloc'] += 1
						else:
							module_info['sloc'] += 1

						if self.function_rx.search(line) or \
								self.decision_rx.search(line):
									module_info['cyclomatic'] += 1

					self.ploc_total += module_info['ploc']
					self.bloc_total += module_info['bloc']
					self.cloc_total += module_info['cloc']
					self.sloc_total += module_info['sloc']
					self.cyclomatic_total += module_info['cyclomatic']
Example #43
0
def GetLoadSymbolCommand(EnclaveFile, Base):
    text = readelf.ReadElf(EnclaveFile)
    if text == None:
        return -1
    SegsFile = StringIO(text)

    try:
        FileList = SegsFile.readlines()
        n = 4
        m = 100
        Out = [[[] for ni in range(n)] for mi in range(m)]
        i = 0
        # Parse the readelf output file to extract the section names and
        # their offsets and add the Proj base address.
        textSectionFound = False
        for line in FileList:
            list = line.split()
            if (len(list) > 0):
                SegOffset = -1
                # The readelf will put a space after the open bracket for single
                # digit section numbers.  This causes the line.split to create
                # an extra element in the array for these lines.
                if (re.match('\[\s*[0-9]+\]', list[0])):
                    SegOffset = 0
                if (re.match('\s*[0-9]+\]', list[1])):
                    SegOffset = 1

                if (SegOffset != -1):
                    if (list[SegOffset + 1][0] == '.'):
                        if (list[SegOffset + 1].find(".text") != -1):
                            textSectionFound = True
                            Out[i][0] = list[SegOffset + 1]
                            Out[i][1] = str(
                                int(list[SegOffset + 3], 16) + int(Base, 10))
                            i = i + 1
                        elif (list[SegOffset + 1] == ".tdata"):
                            continue
                        #print "%#08x" % (int(list[SegOffset+3], 16))
                        elif (int(list[SegOffset + 3], 16) != 0):
                            Out[i][0] = list[SegOffset + 1]
                            Out[i][1] = str(
                                int(list[SegOffset + 3], 16) + int(Base, 10))
                            i = i + 1
        if (textSectionFound == True):
            # Write the LLDB 'target modules add' command with all the arguments to the setup LLDB command file.
            lldbcmd = "target modules add " + EnclaveFile
            lldbcmd += "\n"
            # Write the LLDB 'target modules load' command with all the arguments to the setup LLDB command file.
            lldbcmd += "target modules load --file " + EnclaveFile
            for j in range(i):
                lldbcmd += " " + Out[j][0] + " " + '%(Location)#08x' % {
                    'Location': int(Out[j][1])
                }
            return lldbcmd
        else:
            return -1
    except:
        print("Error parsing enclave file.  Check format of file.")
        return -1
Example #44
0
 def testReadLines(self):
     # "Test lzma.LZMAFile.readlines()"
     self.createTempFile()
     lzmaf = lzma.LZMAFile(self.filename)
     self.assertRaises(TypeError, lzmaf.readlines, None)
     sio = StringIO(self.TEXT)
     self.assertEqual(lzmaf.readlines(), sio.readlines())
     lzmaf.close()
Example #45
0
 def testReadLines(self):
     # "Test lzma.LZMAFile.readlines()"
     self.createTempFile()
     lzmaf = lzma.LZMAFile(self.filename)
     self.assertRaises(TypeError, lzmaf.readlines, None)
     sio = StringIO(self.TEXT)
     self.assertEqual(lzmaf.readlines(), sio.readlines())
     lzmaf.close()
Example #46
0
def build_stopwords(language, encoding="utf8"):
    file = StringIO(language.stopwords)
    stopwords = []
    for line in file.readlines():
        word = unicode(line.strip().split("|")[0].strip(), encoding)
        if word:
            stopwords.append(word)
    return stopwords
Example #47
0
def test_write_words_underscore():
    cfg = conf.CephConf()
    cfg.add_section("foo")
    cfg.set("foo", "bar thud quux", "baz")
    f = StringIO()
    cfg.write(f)
    f.reset()
    assert f.readlines() == ["[foo]\n", "bar_thud_quux = baz\n", "\n"]
Example #48
0
 def testReadLine(self):
     # "Test BZ2File.readline()"
     self.createTempFile()
     bz2f = BZ2File(self.filename)
     sio = StringIO(self.TEXT)
     for line in sio.readlines():
         self.assertEqual(bz2f.readline(), line)
     bz2f.close()
Example #49
0
def execute(in_file,
            xscale=config.xscale,
            yscale=config.yscale):
    """
    Execute pstoedit returning the generated HPGL to the calling function.
    """
    # Arguments to pass to pstoedit
    args = [config.pstoedit,
            "-f",
            config.pstoedit_format,
            "-dt", # Draw the text rather than assume cutter can handle text
            "-pta", # Precision text spacing (spaces handled gracefully)
            "-xscale",
            "%s" % xscale,
            "-yscale",
            "%s" % yscale,
            "-",
            ]

    # Execute pstoedit
    process = subprocess.Popen(args, cwd=config.tmp_dir,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE,
                               stdin=subprocess.PIPE)
    (stdoutdata, stderrdata) = process.communicate(in_file.read())
    process.wait()

    # Check that pstoedit functioned correctly.
    if process.returncode != 0:
        log.crit("pstoedit failed during execution with returncode = %s."
                 % process.returncode)
        # Printing out stdout and stderr
        log.debug("Standard output for pstoedit was:")
        out = StringIO(stdoutdata)
        for line in out.readlines():
            log.debug(line)
        out = StringIO(stderrdata)
        log.debug("Standard error for pstoedit was:")
        for line in out.readlines():
            log.debug(line)
        sys.exit(1)
    else:
        # Collect the outputted text.
        text = stdoutdata
        # Return the text.
        return text
Example #50
0
 def create_graph_from_string(self, locked_string):
     stream = StringIO(locked_string)
     stream_iter = iter(stream.readlines())
     try:
         self.parse(stream_iter)
     except StopIteration:
         pass
     return self.graph, self.top_level
Example #51
0
def get_cmp_asm_lines(asm):
    sio = StringIO(asm)
    lines = []
    get_cmp_asm = get_cmp_asm
    for line in sio.readlines():
        line = line.strip("\n")
        lines.append(get_cmp_asm(line))
    return "\n".join(lines)
Example #52
0
def test_write_words_underscore():
    cfg = conf.ceph.CephConf()
    cfg.add_section('foo')
    cfg.set('foo', 'bar thud quux', 'baz')
    f = StringIO()
    cfg.write(f)
    f.seek(0)
    assert f.readlines() == ['[foo]\n', 'bar_thud_quux = baz\n','\n']
def test_write_words_underscore():
    cfg = conf.ceph.CephConf()
    cfg.add_section('foo')
    cfg.set('foo', 'bar thud quux', 'baz')
    f = StringIO()
    cfg.write(f)
    f.reset()
    assert f.readlines() == ['[foo]\n', 'bar_thud_quux = baz\n', '\n']
Example #54
0
def build_stopwords(language, encoding="utf8"):
    file = StringIO(language.stopwords)
    stopwords = []
    for line in file.readlines():
        word = unicode(line.strip().split("|")[0].strip(), encoding)
        if word:
            stopwords.append(word)
    return stopwords
Example #55
0
def has_socket_wrapper(bindir):
    """Check if Samba has been built with socket wrapper support.
    """
    f = StringIO()
    subprocess.check_call([os.path.join(bindir, "smbd"), "-b"], stdout=f)
    for l in f.readlines():
        if "SOCKET_WRAPPER" in l:
            return True
    return False