Example #1
0
    def test_do_mission_correctly_with_old_filenames(self):
        orig_response = self.client.get(
            reverse(views.diffrecursive_get_original_tarball))
        tfile = tarfile.open(
            fileobj=StringIO(orig_response.content), mode='r:gz')
        diff = StringIO()
        for fileinfo in tfile:
            if not fileinfo.isfile():
                continue

            # calcualate the old name
            transformed_name = view_helpers.DiffRecursiveMission.name_new2old(
                fileinfo.name)

            oldlines = tfile.extractfile(fileinfo).readlines()
            newlines = []
            for line in oldlines:
                for old, new in view_helpers.DiffRecursiveMission.SUBSTITUTIONS:
                    line = line.replace(old, new)
                newlines.append(line)

            diff.writelines(difflib.unified_diff(oldlines, newlines,
                                                 'orig-' + transformed_name,
                                                 transformed_name))

        diff.seek(0)
        diff.name = 'foo.patch'
        submit_response = self.client.post(
            reverse(views.diffrecursive_submit), {'diff': diff})
        self.assert_(submit_response.context['diffrecursive_success'])

        paulproteus = Person.objects.get(user__username='******')
        self.assertEqual(len(StepCompletion.objects.filter(
            step__name='diffpatch_diffrecursive', person=paulproteus)), 1)
Example #2
0
def write_htaccess(lines):
    if not env.path:
        env.path = ''
    htaccess = StringIO()
    htaccess.writelines(lines)
    htaccess.seek(0)
    put(remote_path='%s/.htaccess' % env.path, local_path=htaccess, use_sudo=use_sudo())
Example #3
0
    def test_do_mission_incorrectly_revdiff(self):
        orig_response = self.client.get(
            reverse(views.diffrecursive_get_original_tarball))
        tfile = tarfile.open(
            fileobj=StringIO(orig_response.content), mode='r:gz')
        diff = StringIO()
        for fileinfo in tfile:
            if not fileinfo.isfile():
                continue
            oldlines = tfile.extractfile(fileinfo).readlines()
            newlines = []
            for line in oldlines:
                for old, new in view_helpers.DiffRecursiveMission.SUBSTITUTIONS:
                    line = line.replace(old, new)
                newlines.append(line)

            # We're very similar to test_do_mission-correctly, but here we
            # switch newlines and oldlines, to create a reverse patch
            diff.writelines(
                difflib.unified_diff(newlines, oldlines, 'orig-' + fileinfo.name, fileinfo.name))
        diff.seek(0)
        diff.name = 'foo.patch'

        # Submit, and see if we get the same error message we expect.
        error = self.client.post(
            reverse(views.diffrecursive_submit), {'diff': diff})
        self.assert_(
            'You submitted a patch that would revert the correct changes back to the originals.  You may have mixed the parameters for diff, or performed a reverse patch.' in utf8(error))
        paulproteus = Person.objects.get(user__username='******')
        self.assertEqual(len(StepCompletion.objects.filter(
            step__name='diffpatch_diffrecursive', person=paulproteus)), 0)
def readFileContent(path):
    buf = StringIO()
    f = file(path,'r')
    lines = f.readlines()
    buf.writelines(lines)
    f.close()
    return buf.getvalue().strip()
Example #5
0
def arches_version():
    import os
    import subprocess
    from StringIO import StringIO
    import archesproject.settings as settings

    sb = StringIO()
    ver = ''
    try:
        hg_archival = open(os.path.join(settings.ROOT_DIR.replace('archesproject', ''),'.hg_archival.txt'),'r')
        the_file = hg_archival.readlines()
        hg_archival.close()
        node = ''
        latesttag = ''
        for line in the_file:
            if line.startswith('node:'):
                node = line.split(':')[1].strip()[:12]
            if line.startswith('latesttag:'):
                latesttag = line.split(':')[1].strip()

        ver = '%s.%s' % (latesttag, node)
        settings.ARCHES_VERSION = ver     
        sb.writelines(['__VERSION__="%s"' % ver])    
        WriteToFile(os.path.join(settings.ROOT_DIR,'version.py'), sb.getvalue(), 'w')
    except:
        try:
            ver = subprocess.check_output(['hg', 'log', '-r', '.', '--template', '{latesttag}.{node|short}'])
            settings.ARCHES_VERSION = ver     
            sb.writelines(['__VERSION__="%s"' % ver])
            WriteToFile(os.path.join(settings.ROOT_DIR,'version.py'), sb.getvalue(), 'w')
        except:
            ver = settings.ARCHES_VERSION
    return ver
Example #6
0
def correct_INCA_format(fp):
    fp_list = list()
    fp.seek(0)
    if '(' in fp.readline():
        for line in fp:
            line = line.replace(
                "(MLX::",
                "").replace(
                " : ",
                "\t").replace(
                " :",
                "\t").replace(
                " ",
                "\t").lower().strip().replace(
                ")",
                "\n")
            if "record-by" in line:
                if "image" in line:
                    line = "record-by\timage"
                if "vector" in line:
                    line = "record-by\tvector"
                if "dont-care" in line:
                    line = "record-by\tdont-care"
            fp_list.append(line)
        fp = StringIO()
        fp.writelines(fp_list)
    fp.seek(0)
    return fp
    def test_invalid_row_is_skipped(self):
        f = StringIO()
        f.writelines(["a,b"])
        f.seek(0)

        logging.basicConfig(level=40)  # Errors up to 30 (WARNING) are expected

        assert [] == list(CsvReader.read_csv(f))
Example #8
0
def source_def(def_content, piped=False, verbose=False):
    content = StringIO()
    content.writelines([
        'import pysource\n',
        '@pysource.function(piped={})\n'.format(piped),
        'def {}'.format(def_content)])
    return source_content(content.getvalue(),
                          verbose=verbose)
 def setUp(self):
     TestCase.setUp(self)
     out = StringIO()
     out.writelines(["Q1,373,string,Universe\n",
                     "Q1,31,wikibase-entityid,Q223557\n",
                     "Q1,31,wikibase-entityid,Q1088088\n"])
     out.seek(0)
     self.result = list(CsvReader.read_csv(out))
    def test_unknown_type(self):
        out = StringIO()
        out.writelines(["Q1,unknown,373,string,Universe\n"])
        out.seek(0)

        logging.basicConfig(level=40)  # Errors up to 30 (WARNING) are expected

        result = list(CsvReader.read_csv(out))
        assert 'Q1' == result[0].title
Example #11
0
def parser_view(response) :
    resp = StringIO()
    fp = open(joinpath(dirname(__file__),'error_log-20110904'), 'r')
    for line in fp.readlines():
        resp.writelines('|'.join(parsear(line)) + '\n')
        
    hresp = HttpResponse(resp.getvalue())
    hresp['Content-Type'] = 'text/plain'
    return hresp
Example #12
0
    def test_multiple_entities(self):
        out = StringIO()
        out.writelines(["Q1,claim,373,string,Universe\n",
                        "Q2,claim,143,wikibase-item,Q328\n"])
        out.seek(0)
        result = list(CsvReader.read_csv(out))

        self.assertThat(result, HasLength(2))
        self.assertThat(result[0].title, Equals("Q1"))
        self.assertThat(result[1].title, Equals("Q2"))
    def test_multiple_entities(self):
        out = StringIO()
        out.writelines(["Q1,claim,373,string,Universe\n",
                        "Q2,claim,143,wikibase-item,Q328\n"])
        out.seek(0)
        result = list(CsvReader.read_csv(out))

        assert 2 == len(result)
        assert 'Q1' == result[0].title
        assert 'Q2' == result[1].title
Example #14
0
    def test_unused_converter(self):
        c = StringIO()
        c.writelines([asbytes("1 21\n"), asbytes("3 42\n")])
        c.seek(0)
        data = np.loadtxt(c, usecols=(1,), converters={0: lambda s: int(s, 16)})
        assert_array_equal(data, [21, 42])

        c.seek(0)
        data = np.loadtxt(c, usecols=(1,), converters={1: lambda s: int(s, 16)})
        assert_array_equal(data, [33, 66])
def format_merge_log(merge_log):
    output = StringIO()
    output.write("== Records merged: ==\n")
    for src_rows, merged_row in merge_log.items():
        output.write("Merged:\n")
        output.writelines(map(format_row, src_rows))
        output.write("Into:\n")
        output.write(format_row(merged_row))
    
    return output.getvalue()
Example #16
0
 def test_universe(self):
     out = StringIO()
     out.writelines(["Q1,claim,373,string,Universe\n",
                     "Q1,reference,143,wikibase-item,Q328\n"
                     "Q1,claim,31,wikibase-item,Q223557\n",
                     "Q1,claim,31,wikibase-item,Q1088088\n",
                     "Q1,claim,361,wikibase-item,Q3327819\n",
                     "Q1,qualifier,31,wikibase-item,Q41719\n"])
     out.seek(0)
     result = list(CsvReader.read_csv(out))
     self.assert_universe(result)
Example #17
0
 def run(self):
     (br, path) = branch.Branch.open_containing(".")
     Gtk = open_display()
     from bzrlib.plugins.gtk.mergedirective import SendMergeDirectiveDialog
     from StringIO import StringIO
     dialog = SendMergeDirectiveDialog(br)
     if dialog.run() == Gtk.ResponseType.OK:
         outf = StringIO()
         outf.writelines(dialog.get_merge_directive().to_lines())
         mail_client = br.get_config().get_mail_client()
         mail_client.compose_merge_request(dialog.get_mail_to(), "[MERGE]", 
             outf.getvalue())
Example #18
0
 def writelines(self,sequence):
     """
     A call to write will put the contents of the input sequence into the
     underlying stringIO object and then determine if the curses screen is
     active. If it isn't, the output it sent the buffer this object originally
     overwrote.
     """
     StringIO.writelines(self,sequence)
     if not self.running.value and self.replaces:
         self.replaces.writelines(sequence)
         self.replaces.flush()
     else:
         for line in sequence:
             self.locq.put(('debug',line))
Example #19
0
    def assertOutput(self, _, outp, msg=None):
        actual = sys.stdout.getvalue()   

        if sys.stdout.getvalue() != (outp + "\n"):
            if msg is None:
                actual = sys.stdout.getvalue()
                out = StringIO()
                out.write("\n")
                out.write("Expected:\n")
                out.writelines('\t%s\n'%s for s in outp.splitlines())
                out.write("Actual:\n")
                out.writelines('\t%s\n'%s for s in actual.splitlines())
                msg = out.getvalue()

            self.fail(msg)
        sys.stdout.truncate(0)
Example #20
0
 def test_oops_content(self):
     self.assertEqual(0, len(self.oopses))
     self.trigger_oops()
     self.attachOopses()
     content = StringIO()
     content.writelines(self.getDetails()['oops-0'].iter_bytes())
     content.seek(0)
     # Safety net: ensure that no autocasts have occured even on Python 2.6
     # which is slightly better.
     self.assertIsInstance(content.getvalue(), str)
     # In tests it should be rfc822 for easy reading.
     from_details = oops_datedir_repo.serializer_rfc822.read(content)
     # Compare with the in-memory model (but only a select key, because the
     # rfc822 serializer is lossy).
     oops_report = self.oopses[0]
     self.assertEqual(from_details['id'], oops_report['id'])
Example #21
0
    def testPatchNoLineEndInVersion1(self):
        version1 = StringIO()
        version1.writelines(["test 1\n", "test 2"])

        udiff = StringIO();
        udiff.writelines([
            "--- 1234 Sat Jan 26 23:30:50 1991\n",
            "+++ 5678 Fri Jun 06 10:20:52 2003\n",
            "@@ -1,2 +1,2 @@\n",
            " test 1\n",
            "-test 2+test 3\n",
        ])

        p = patch.fromstring(udiff.getvalue())

        self.assertEqual(0, len(p.hunks[0]))
def errexit(errcodeobj, showtrace=True, *args):
    errcode = errcodeobj[0]
    errmsg = errcodeobj[1]
    if args:
        errmsg = errmsg % args
    if showtrace:
        errio = StringIO()
        errio.write(errmsg + "\n")
        traceio = StringIO()
        traceback.print_exc(file=traceio)
        traceio.seek(0)
        errio.writelines([INDENT + line for line in traceio.readlines()])
        errio.seek(0)
        errmsg = errio.read()
    error(errmsg)
    exit(errcode)
Example #23
0
def lambda_handler(event, context):
    s3 = boto3.resource('s3')
    for record in event['Records']:
        logger.info(record)
        if 's3' not in record:
            continue
        bucket_name = record['s3']['bucket']['name']
        name = record['s3']['object']['key']
        tmp_file = os.path.join('/tmp', os.path.basename(name))
        s3.meta.client.download_file(
            bucket_name, name, tmp_file)
        json_data = StringIO()
        with gzip.open(tmp_file, 'rb') as fin:
            json_data.writelines(fin)
        json_data.seek(0)
        jobj = json.load(json_data)
        process_event(jobj)
Example #24
0
  def writehunks(self, orig, hunks):
    src = StringIO(orig.data)
    tgt = StringIO()

    tgt.writelines(self.patchstream(src, hunks))

    orig.new_version(tgt.getvalue())
    if orig.path.endswith('.py'):
        mod_name = 'nomic.' + orig.path.replace('/', '.')[:-3]
        mod = sys.modules.get(mod_name)
        if mod:
            try:
                reload(mod)
            except:
                pass

    return True
Example #25
0
class FlickrFile(object):

    """A file-like object representing a file on flickr. Caches with a StringIO object"""

    def __init__(self, imageid, name, data):
        self.imageid = imageid
        self.name = name
        self.stringio = StringIO(data)
        self.closed = False
        self.newlines = ('\r', '\n', '\r\n')
        self.flush()

    def close(self):
        self.flush()
        self.closed = True

    def _stringio_get_data(self):
        old_seek = self.stringio.tell()
        self.stringio.seek(0)
        data = self.stringio.read()
        self.stringio.seek(old_seek)
        return data

    def flush(self):
        with tempfile.NamedTemporaryFile() as tf:
            data_to_png(self._stringio_get_data()).save(tf, 'png')
            if self.imageid:
                flickr.replace(filename=tf.name, photo_id=self.imageid, title=self.name, description=str(len(data)), format='bs4')
            else:
                self.imageid = flickr.upload(filename=tf.name, title=self.name, description=str(len(data)), format='bs4').photoid.text

    def iter(self):
        return self

    def next(self):
        return self.stringio.next()

    def read(self, size=-1):
        return self.stringio.read(size)

    def readline(self, size=-1):
        return self.stringio.read(size)

    def seek(self, offset, whence=0):
        return self.stringio.seek(offset, whence)

    def tell(self):
        return self.stringio.tell()

    def truncate(self, size=0):
        return self.stringio.truncate(size)

    def write(self, data):
        return self.stringio.write(data)

    def writelines(self, seq):
        return self.stringio.writelines(seq)
Example #26
0
    def _calculate_correct_recursive_diff(self):
        orig_response = self.client.get(reverse(views.diffrecursive_get_original_tarball))
        tfile = tarfile.open(fileobj=StringIO(orig_response.content), mode='r:gz')
        diff = StringIO()
        for fileinfo in tfile:
            if not fileinfo.isfile():
                continue
            oldlines = tfile.extractfile(fileinfo).readlines()
            newlines = []
            for line in oldlines:
                for old, new in controllers.DiffRecursiveMission.SUBSTITUTIONS:
                    line = line.replace(old, new)
                newlines.append(line)
            diff.writelines(difflib.unified_diff(oldlines, newlines, 'orig-'+fileinfo.name, fileinfo.name))

        diff.seek(0)
        diff.name = 'foo.patch'
        return diff
Example #27
0
class Stream:
    def __init__(self):
        self.buffer = StringIO()

    def write(self, text):
        self.buffer.write(text)

    def writelines(self, lines):
        self.buffer.writelines(lines)

    def flush(self):
        return

    def getvalue(self):
        return self.buffer.getvalue()

    def clear(self):
        self.buffer.seek(0)
        self.buffer.truncate()
class MultiPartRequestBuilder (object):
    def __init__(self):
        self.body_buffer = StringIO()
        self.boundary = "---------abcdefghijklmnop$"
        self.file_content = "default content"
        self.file_name = "default.txt"
        self.form_fields = {}

    def file(self, file_name, file_content):
        self.file_name = file_name
        self.file_content = file_content
        return self

    def form_field(self, key, value):
        self.form_fields[key] = value
        return self

    def build(self):
        self._write_form_fields()
        self._write_file()
        self._write_line("--{0}--".format(self.boundary))
        self._write_line()

        body = self.body_buffer.getvalue()
        headers = {"Content-Type": "multipart/form-data; boundary={0}".format(self.boundary)}
        return MultiPartRequest(body, headers)

    def _write_file(self):
        self._write_line("--" + self.boundary)
        self._write_line("Content-Disposition: form-data; name=\"content\"; filename=\"{0}\"".format(self.file_name))
        self._write_line("Content-Type: application/x-tar")
        self._write_line()
        self._write_line(self.file_content)

    def _write_form_fields(self):
        for key, value in self.form_fields.iteritems():
            self._write_line("--{0}".format(self.boundary))
            self._write_line("Content-Disposition: form-data; name=\"{0}\"".format(key))
            self._write_line()
            self._write_line(value)

    def _write_line(self, text=""):
        self.body_buffer.writelines("{0}\n".format(text))
Example #29
0
    def testPatch(self):
        version1 = StringIO()
        version1.writelines(["test 1\n", "test 2\n"])

        udiff = StringIO();
        udiff.writelines([
            "--- 1234 Sat Jan 26 23:30:50 1991\n",
            "+++ 5678 Fri Jun 06 10:20:52 2003\n",
            "@@ -1,2 +1,2 @@\n",
            " test 1\n",
            "-test 2\n",
            "+test 3"
        ])

        p = patch.fromstring(udiff.getvalue())
        patched = p.patch_stream(version1, p.hunks[0])

        expected = "test 1\ntest 3"

        self.assertEqual(expected, "".join(list(patched)))
    def test_universe(self):
        out = StringIO()
        out.writelines(["Q1,claim,373,string,Universe\n",
                        "Q1,reference,143,wikibase-item,Q328\n"
                        "Q1,claim,31,wikibase-item,Q223557\n",
                        "Q1,claim,31,wikibase-item,Q1088088\n",
                        "Q1,claim,361,wikibase-item,Q3327819\n",
                        "Q1,qualifier,31,wikibase-item,Q41719\n"])
        out.seek(0)
        result = list(CsvReader.read_csv(out))
        assert 1 == len(result)
        q1 = result[0]

        assert "Q1" == q1.title
        assert (Claim(Snak(373, "string", "Universe"), [],
                      [Snak(143, "wikibase-item", "Q328")]) in
                q1.claims)
        assert Claim(Snak(31, "wikibase-item", "Q223557")) in q1.claims
        assert Claim(Snak(31, "wikibase-item", "Q1088088")) in q1.claims
        assert (Claim(Snak(361, "wikibase-item", "Q3327819"),
                      [Snak(31, "wikibase-item", "Q41719")], []) in
                q1.claims)
    def poll(self, cr, uid=1):
        """
        Poll the ADS FTP server, download a file list and iterate over them by oldest first,
        then by _file_process_order. 
        
        For each file, look for a child class of ads_data whose file_name_prefix field includes 
        the part before the first '-' of the file name. Download the file contents and use it
        to instantiate the found class, then call process_all on it.
        
        Any errors caught in the process are added to errors returned by the process_all
        function, and then written to the /errors/ directory as a .txt file, along with any
        data nodes left in self.data after processing. 
        """

        _logger.info(_("Polling ADS Server..."))
        files_processed = 0

        # get connection FTP server
        with self.connection(cr) as conn:

            conn.cd(conn._vers_client)

            # get list of files and directories and remove any files that cannot be processed
            files_and_directories = conn.ls()
            files_to_process = map(lambda f: ads_file(f),
                                   files_and_directories)
            files_to_process = filter(lambda f: f.valid, files_to_process)
            files_to_process = filter(lambda f: f.to_process(),
                                      files_to_process)

            # then sort by date and add to dictionary where the key is the date so we can process
            # chronologically and with file prefix order
            files_to_process.sort(key=lambda f: f.date)
            files_by_date = AutoVivification()
            for f in files_to_process:
                if not isinstance(files_by_date[f.date], list):
                    files_by_date[f.date] = []
                files_by_date[f.date].append(f)

            try:
                # create archive and errors directory if doesn't already exist
                if 'archives' not in files_and_directories:
                    conn.mkd('archives')

                if 'errors' not in files_and_directories:
                    conn.mkd('errors')

                # process by earliest date first
                for date in sorted(files_by_date.keys()):
                    # then according to file_process_order
                    for prefix in self._file_process_order:
                        for file_to_process in [
                                f for f in files_by_date[date]
                                if f.prefix == prefix
                        ]:

                            files_processed += 1
                            file_prefix = file_to_process.prefix
                            file_name = file_to_process.file_name

                            # find ads_data subclass with matching 'type' property
                            class_for_data_type = [
                                cls for cls in ads_data.__subclasses__()
                                if file_prefix in cls.file_name_prefix
                            ]

                            if class_for_data_type:

                                # log warning if found more than one matching class
                                if len(class_for_data_type) != 1:
                                    _logger.warn(
                                        _('The following subclasses of ads_data share the file_name_prefix: %s'
                                          % class_for_data_type))

                                errors = StringIO()

                                # catch any errors not caught by data.process etc
                                try:
                                    # Download and decode the file contents
                                    file_contents = conn.download_data(
                                        file_name).decode("utf-8-sig").encode(
                                            "utf-8")

                                    # instantiate found subclass with correctly encoded file_data
                                    data = class_for_data_type[0](
                                        file_contents)

                                    # trigger process to import into OpenERP
                                    process_errors = data.process_all(
                                        self.pool, cr, conn)

                                    if process_errors:
                                        errors.writelines([
                                            line + '\n'
                                            for line in process_errors
                                        ])

                                except Exception as e:
                                    errors.writelines('%s: %s' %
                                                      (type(e), unicode(e)))

                                finally:
                                    # archive the file we processed
                                    conn.move_to_archives(file_name)

                                    # if we have errors, create txt file containing description in /errors/*.txt
                                    if errors.getvalue():
                                        errors.seek(0)
                                        conn.mkf(file_name[0:-4] + '.txt',
                                                 errors, 'errors')

                                        # and upload remaining data (or unparsable file contents) to /errors/*.xml
                                        try:
                                            if data.data:
                                                conn.mkf(
                                                    file_name,
                                                    data.generate_xml(),
                                                    'errors')
                                        except NameError:
                                            contents = StringIO(file_contents)
                                            conn.mkf(file_name, contents,
                                                     'errors')
                                            contents.close()

                                # commit the OpenERP cursor inbetween files
                                errors.close()
                                cr and cr.commit()
                            else:
                                _logger.info(
                                    _("Could not find subclass of ads_data with file_name_prefix %s"
                                      % file_prefix))
                                conn.move_to_archives(file_name)
            finally:
                # check we are still connected, then navigate back a directory for any further operations
                if conn._connected:
                    conn.cd('..')
                else:
                    conn._connect()

        _logger.info(_("Processed %d files" % files_processed))
        return True
Example #32
0
def parseRoutine(inFile):
    """Parses a routine"""
    logger = logging.getLogger('prettify-logger')

    FCT_RE = re.compile(
        r"^([^\"'!]* )?FUNCTION\s+\w+\s*(\(.*\))?(\s*RESULT\s*\(\w+\))?\s*;?\s*$",
        re.IGNORECASE)

    SUBR_RE = re.compile(r"^([^\"'!]* )?SUBROUTINE\s+\w+\s*(\(.*\))?\s*;?\s*$",
                         re.IGNORECASE)

    endRe = re.compile(r" *end\s*(?:subroutine|function)", re.IGNORECASE)
    startRoutineRe = re.compile(
        r"^([^\"'!]* )?(?P<kind>subroutine|function) +(?P<name>[a-zA-Z_][a-zA-Z_0-9]*) *(?:\((?P<arguments>[^()]*)\))? *(?:result *\( *(?P<result>[a-zA-Z_][a-zA-Z_0-9]*) *\))? *(?:bind *\([^()]+\))? *\n?",
        re.IGNORECASE)  # $
    typeBeginRe = re.compile(
        r" *(?P<type>integer(?: *\* *[0-9]+)?|logical|character(?: *\* *[0-9]+)?|real(?: *\* *[0-9]+)?|complex(?: *\* *[0-9]+)?|type)[,( ]",
        re.IGNORECASE)
    attributeRe = re.compile(
        r" *, *(?P<attribute>[a-zA-Z_0-9]+) *(?:\( *(?P<param>(?:[^()]+|\((?:[^()]+|\([^()]*\))*\))*)\))? *",
        re.IGNORECASE)
    ignoreRe = re.compile(r" *(?:|implicit +none *)$", re.IGNORECASE)
    interfaceStartRe = re.compile(r" *interface *$", re.IGNORECASE)
    interfaceEndRe = re.compile(r" *end +interface *$", re.IGNORECASE)
    routine = {
        'preRoutine': [],
        'core': [],
        'strippedCore': [],
        'begin': [],
        'end': [],
        'preDeclComments': [],
        'declarations': [],
        'declComments': [],
        'postDeclComments': [],
        'parsedDeclarations': [],
        'postRoutine': [],
        'kind': None,
        'name': None,
        'arguments': None,
        'result': None,
        'interfaceCount': 0,
        'use': []
    }
    includeRe = re.compile(r"#? *include +[\"'](?P<file>.+)[\"'] *$",
                           re.IGNORECASE)
    stream = InputStream(inFile)
    while 1:
        (jline, _, lines) = stream.nextFortranLine()
        if len(lines) == 0:
            break
        if FCT_RE.match(jline) or SUBR_RE.match(jline):
            break
        routine['preRoutine'].extend(lines)
        m = includeRe.match(lines[0])
        if m:
            try:
                subF = open(m.group('file'), 'r')
                subStream = InputStream(subF)
                while 1:
                    (subjline, _, sublines) = subStream.nextFortranLine()
                    if not sublines:
                        break
                    routine['strippedCore'].append(subjline)
                subF.close()
            except:
                import traceback
                logger.debug("error trying to follow include " +
                             m.group('file') + '\n')
                logger.debug(
                    "warning this might lead to the removal of used variables\n"
                )
                if logger.isEnabledFor(logging.DEBUG):
                    traceback.print_exc()
    if jline:
        routine['begin'] = lines
        m = startRoutineRe.match(jline)
        if not m or m.span()[1] != len(jline):
            raise SyntaxError("unexpected subroutine start format:" +
                              repr(lines))
        routine['name'] = m.group('name')
        routine['kind'] = m.group('kind')
        if (m.group('arguments') and m.group('arguments').strip()):
            routine['arguments'] = list(
                map(lambda x: x.strip(),
                    m.group('arguments').split(",")))
        if (m.group('result')):
            routine['result'] = m.group('result')
        if (not routine['result']) and (routine['kind'].lower() == "function"):
            routine['result'] = routine['name']
    while 1:
        (jline, comment_list, lines) = stream.nextFortranLine()
        comments = '\n'.join(_ for _ in comment_list)
        if len(lines) == 0:
            break
        if lines[0].lower().startswith("#include"):
            break
        if not ignoreRe.match(jline):
            if typeBeginRe.match(jline):
                if routine['postDeclComments']:
                    routine['declComments'].extend(routine['postDeclComments'])
                routine['postDeclComments'] = []

            if typeBeginRe.match(jline):
                m = VAR_DECL_RE.match(jline)
                if (m.group('type').lower() == 'type'
                        and not m.group('parameters')):
                    break
                if not m or m.span()[1] != len(jline):
                    raise SyntaxError("unexpected type format:" + repr(jline))
                decl = {
                    'type': m.group("type"),
                    'parameters': None,
                    'attributes': [],
                    'vars': []
                }
                if m.group('parameters'):
                    decl['parameters'] = (m.group("parameters").replace(
                        " ", "").replace(",", ", "))
                str = m.group("attributes")
                while (str):
                    m2 = attributeRe.match(str)
                    if not m2:
                        raise SyntaxError("unexpected attribute format " +
                                          repr(str) + " in " + repr(lines))
                    decl['attributes'].append(m2.group().replace(
                        " ", "").replace(",", ", ")[2:])
                    str = str[m2.span()[1]:]
                str = m.group("vars")
                while 1:
                    m2 = VAR_RE.match(str)
                    if not m2:
                        raise SyntaxError("unexpected var format " +
                                          repr(str) + " in " + repr(lines))
                    var = m2.group("var")
                    if m2.group("param"):
                        var += "(" + m2.group("param") + ")"
                    if m2.group("value"):
                        var += " = "
                        var += m2.group("value")
                    decl['vars'].append(var)
                    str = str[m2.span()[1]:]
                    if not m2.group("continue"):
                        if str:
                            raise SyntaxError("error parsing vars (leftover=" +
                                              repr(str) + ") in " +
                                              repr(lines))
                        break
                routine['parsedDeclarations'].append(decl)
            elif interfaceStartRe.match(jline):
                istart = lines
                interfaceDeclFile = StringIO()
                while 1:
                    (jline, _, lines) = stream.nextFortranLine()
                    if interfaceEndRe.match(jline):
                        iend = lines
                        break
                    interfaceDeclFile.writelines(lines)
                interfaceDeclFile = StringIO(interfaceDeclFile.getvalue())
                iroutines = []
                while 1:
                    iroutine = parseRoutine(interfaceDeclFile)
                    if not iroutine['kind']:
                        if len(iroutines) == 0:
                            interfaceDeclFile.seek(0)
                            raise SyntaxError("error parsing interface:" +
                                              repr(interfaceDeclFile.read()))
                        iroutines[-1]['postRoutine'].extend(
                            iroutine['preRoutine'])
                        break
                    iroutines.append(iroutine)
                for iroutine in iroutines:
                    routine['interfaceCount'] += 1
                    decl = {
                        'type':
                        'z_interface%02d' % (routine['interfaceCount']),
                        'parameters': None,
                        'attributes': [],
                        'vars': [iroutine['name']],
                        'iroutine': iroutine,
                        'istart': istart,
                        'iend': iend
                    }
                    routine['parsedDeclarations'].append(decl)
            elif USE_PARSE_RE.match(jline):
                routine['use'].append("".join(lines))
            else:
                break
        routine['declarations'].append("".join(lines))
        if (len(routine['parsedDeclarations']) == 0
                and len(routine['use']) == 0
                and not re.match(" *implicit +none *$", jline, re.IGNORECASE)):
            routine['preDeclComments'].append("".join(lines))
        else:
            routine['postDeclComments'].append(comments)
    containsRe = re.compile(r" *contains *$", re.IGNORECASE)

    while len(lines) > 0:
        if endRe.match(jline):
            routine['end'] = lines
            break
        routine['strippedCore'].append(jline)
        routine['core'].append("".join(lines))
        if containsRe.match(lines[0]):
            break
        m = includeRe.match(lines[0])
        if m:
            try:
                subF = open(m.group('file'), 'r')
                subStream = InputStream(subF)
                while 1:
                    (subjline, _, sublines) = subStream.nextFortranLine()
                    if not sublines:
                        break
                    routine['strippedCore'].append(subjline)
                subF.close()
            except:
                import traceback
                logger.debug("error trying to follow include " +
                             m.group('file') + '\n')
                logger.debug(
                    "warning this might lead to the removal of used variables\n"
                )
                if logger.isEnabledFor(logging.DEBUG):
                    traceback.print_exc()
        (jline, _, lines) = stream.nextFortranLine()
    return routine
Example #33
0
def parseRoutine(inFile):
    """Parses a routine"""
    logger = logging.getLogger("prettify-logger")

    routine = {
        "preRoutine": [],
        "core": [],
        "strippedCore": [],
        "begin": [],
        "end": [],
        "preDeclComments": [],
        "declarations": [],
        "declComments": [],
        "postDeclComments": [],
        "parsedDeclarations": [],
        "postRoutine": [],
        "kind": None,
        "name": None,
        "arguments": None,
        "result": None,
        "interfaceCount": 0,
        "use": [],
    }
    stream = InputStream(inFile)
    while 1:
        (jline, _, lines) = stream.nextFortranLine()
        if len(lines) == 0:
            break
        if FCT_RE.match(jline) or SUBR_RE.match(jline):
            break
        routine["preRoutine"].extend(lines)
        m = INCLUDE_RE.match(lines[0])
        if m:
            try:
                subF = open(m.group("file"), "r")
                subStream = InputStream(subF)
                while 1:
                    (subjline, _, sublines) = subStream.nextFortranLine()
                    if not sublines:
                        break
                    routine["strippedCore"].append(subjline)
                subF.close()
            except:
                import traceback

                logger.debug("error trying to follow include " +
                             m.group("file") + "\n")
                logger.debug(
                    "warning this might lead to the removal of used variables\n"
                )
                if logger.isEnabledFor(logging.DEBUG):
                    traceback.print_exc()
    if jline:
        routine["begin"] = lines
        m = START_ROUTINE_RE.match(jline)
        if not m or m.span()[1] != len(jline):
            raise SyntaxError("unexpected subroutine start format:" +
                              repr(lines))
        routine["name"] = m.group("name")
        routine["kind"] = m.group("kind")
        if m.group("arguments") and m.group("arguments").strip():
            routine["arguments"] = list(
                x.strip() for x in m.group("arguments").split(","))
        if m.group("result"):
            routine["result"] = m.group("result")
        if (not routine["result"]) and (routine["kind"].lower() == "function"):
            routine["result"] = routine["name"]
    while 1:
        (jline, comment_list, lines) = stream.nextFortranLine()
        comments = "\n".join(_ for _ in comment_list)
        if len(lines) == 0:
            break
        if lines[0].lower().startswith("#include"):
            break
        if not IGNORE_RE.match(jline):
            if typeBeginRe.match(jline):
                if routine["postDeclComments"]:
                    routine["declComments"].extend(routine["postDeclComments"])
                routine["postDeclComments"] = []

            if typeBeginRe.match(jline):
                m = VAR_DECL_RE.match(jline)
                if m.group("type").lower(
                ) == "type" and not m.group("parameters"):
                    break
                if not m or m.span()[1] != len(jline):
                    raise SyntaxError("unexpected type format:" + repr(jline))
                decl = {
                    "type": m.group("type"),
                    "parameters": None,
                    "attributes": [],
                    "vars": [],
                }
                if m.group("parameters"):
                    decl["parameters"] = (m.group("parameters").replace(
                        " ", "").replace(",", ", "))
                str = m.group("attributes")
                while str:
                    m2 = attributeRe.match(str)
                    if not m2:
                        raise SyntaxError("unexpected attribute format " +
                                          repr(str) + " in " + repr(lines))
                    decl["attributes"].append(m2.group().replace(
                        " ", "").replace(",", ", ")[2:])
                    str = str[m2.span()[1]:]
                str = m.group("vars")
                while 1:
                    m2 = VAR_RE.match(str)
                    if not m2:
                        raise SyntaxError("unexpected var format " +
                                          repr(str) + " in " + repr(lines))
                    var = m2.group("var")
                    if m2.group("param"):
                        var += "(" + m2.group("param") + ")"
                    if m2.group("value"):
                        var += " = "
                        var += m2.group("value")
                    decl["vars"].append(var)
                    str = str[m2.span()[1]:]
                    if not m2.group("continue"):
                        if str:
                            raise SyntaxError("error parsing vars (leftover=" +
                                              repr(str) + ") in " +
                                              repr(lines))
                        break
                routine["parsedDeclarations"].append(decl)
            elif INTERFACE_START_RE.match(jline):
                istart = lines
                interfaceDeclFile = StringIO()
                while 1:
                    (jline, _, lines) = stream.nextFortranLine()
                    if INTERFACE_END_RE.match(jline):
                        iend = lines
                        break
                    interfaceDeclFile.writelines(lines)
                interfaceDeclFile = StringIO(interfaceDeclFile.getvalue())
                iroutines = []
                while 1:
                    iroutine = parseRoutine(interfaceDeclFile)
                    if not iroutine["kind"]:
                        if len(iroutines) == 0:
                            interfaceDeclFile.seek(0)
                            raise SyntaxError("error parsing interface:" +
                                              repr(interfaceDeclFile.read()))
                        iroutines[-1]["postRoutine"].extend(
                            iroutine["preRoutine"])
                        break
                    iroutines.append(iroutine)
                for iroutine in iroutines:
                    routine["interfaceCount"] += 1
                    decl = {
                        "type":
                        "z_interface%02d" % (routine["interfaceCount"]),
                        "parameters": None,
                        "attributes": [],
                        "vars": [iroutine["name"]],
                        "iroutine": iroutine,
                        "istart": istart,
                        "iend": iend,
                    }
                    routine["parsedDeclarations"].append(decl)
            elif USE_PARSE_RE.match(jline):
                routine["use"].append("".join(lines))
            else:
                break
        routine["declarations"].append("".join(lines))
        if (len(routine["parsedDeclarations"]) == 0
                and len(routine["use"]) == 0
                and not re.match(" *implicit +none *$", jline, re.IGNORECASE)):
            routine["preDeclComments"].append("".join(lines))
        else:
            routine["postDeclComments"].append(comments)

    while len(lines) > 0:
        if END_RE.match(jline):
            routine["end"] = lines
            break
        routine["strippedCore"].append(jline)
        routine["core"].append("".join(lines))
        if CONTAINS_RE.match(lines[0]):
            break
        m = INCLUDE_RE.match(lines[0])
        if m:
            try:
                subF = open(m.group("file"), "r")
                subStream = InputStream(subF)
                while 1:
                    (subjline, _, sublines) = subStream.nextFortranLine()
                    if not sublines:
                        break
                    routine["strippedCore"].append(subjline)
                subF.close()
            except:
                import traceback

                logger.debug("error trying to follow include " +
                             m.group("file") + "\n")
                logger.debug(
                    "warning this might lead to the removal of used variables\n"
                )
                if logger.isEnabledFor(logging.DEBUG):
                    traceback.print_exc()
        (jline, _, lines) = stream.nextFortranLine()
    return routine
Example #34
0
def cleanDeclarations(routine):
    """cleans up the declaration part of the given parsed routine
    removes unused variables"""
    logger = logging.getLogger("prettify-logger")

    global R_VAR
    if routine["core"]:
        if CONTAINS_RE.match(routine["core"][-1]):
            logger.debug(
                "routine %s contains other routines\ndeclarations not cleaned\n"
                % (routine["name"]))
            return
    nullifyRe = re.compile(r" *nullify *\(([^()]+)\) *\n?",
                           re.IGNORECASE | re.MULTILINE)

    if not routine["kind"]:
        return
    if routine["core"]:
        if re.match(" *type *[a-zA-Z_]+ *$", routine["core"][0],
                    re.IGNORECASE):
            logger.debug(
                "routine %s contains local types, not fully cleaned\n" %
                (routine["name"]))
        if re.match(" *import+ *$", routine["core"][0], re.IGNORECASE):
            logger.debug("routine %s contains import, not fully cleaned\n" %
                         (routine["name"]))
    if re.search("^#", "".join(routine["declarations"]), re.MULTILINE):
        logger.debug(
            "routine %s declarations contain preprocessor directives\ndeclarations not cleaned\n"
            % (routine["name"]))
        return
    try:
        rest = "".join(routine["strippedCore"]).lower()
        nullifys = ",".join(nullifyRe.findall(rest))
        rest = nullifyRe.sub("", rest)
        paramDecl = []
        decls = []
        for d in routine["parsedDeclarations"]:
            d["normalizedType"] = d["type"]
            if d["parameters"]:
                d["normalizedType"] += d["parameters"]
            if d["attributes"]:
                d["attributes"].sort(key=lambda x: x.lower())
                d["normalizedType"] += ", "
                d["normalizedType"] += ", ".join(d["attributes"])
            if any(a.lower() == "parameter" for a in d["attributes"]):
                paramDecl.append(d)
            else:
                decls.append(d)

        sortDeclarations(paramDecl)
        sortDeclarations(decls)
        has_routinen = 0
        pos_routinep = -1
        for d in paramDecl:
            for i in range(len(d["vars"])):
                v = d["vars"][i]
                m = VAR_RE.match(v)
                lowerV = m.group("var").lower()
                if lowerV == "routinen":
                    has_routinen = 1
                    d["vars"][i] = "routineN = '" + routine["name"] + "'"
                elif lowerV == "routinep":
                    pos_routinep = i
                    d["vars"][i] = "routineP = moduleN//':'//routineN"
            if not has_routinen and pos_routinep >= 0:
                d["vars"].insert(pos_routinep,
                                 "routineN = '" + routine["name"] + "'")

        if routine["arguments"]:
            routine["lowercaseArguments"] = list(x.lower()
                                                 for x in routine["arguments"])
        else:
            routine["lowercaseArguments"] = []
        if routine["result"]:
            routine["lowercaseArguments"].append(routine["result"].lower())
        argDeclDict = {}
        localDecl = []
        for d in decls:
            localD = {}
            localD.update(d)
            localD["vars"] = []
            argD = None
            for v in d["vars"]:
                m = VAR_RE.match(v)
                lowerV = m.group("var").lower()
                if lowerV in routine["lowercaseArguments"]:
                    argD = {}
                    argD.update(d)
                    argD["vars"] = [v]
                    if lowerV in argDeclDict.keys():
                        raise SyntaxError(
                            "multiple declarations not supported. var=" + v +
                            " declaration=" + str(d) + "routine=" +
                            routine["name"])
                    argDeclDict[lowerV] = argD
                else:
                    pos = findWord(lowerV, rest)
                    if pos != -1:
                        localD["vars"].append(v)
                    else:
                        if findWord(lowerV, nullifys) != -1:
                            if not rmNullify(lowerV, routine["core"]):
                                raise SyntaxError(
                                    "could not remove nullify of " + lowerV +
                                    " as expected, routine=" + routine["name"])
                        logger.info("removed var %s in routine %s\n" %
                                    (lowerV, routine["name"]))
                        R_VAR += 1
            if len(localD["vars"]):
                localDecl.append(localD)
        argDecl = []
        for arg in routine["lowercaseArguments"]:
            if arg in argDeclDict.keys():
                argDecl.append(argDeclDict[arg])
            else:
                logger.debug("warning, implicitly typed argument '" + arg +
                             "' in routine " + routine["name"] + "\n")
        if routine["kind"].lower() == "function":
            aDecl = argDecl[:-1]
        else:
            aDecl = argDecl

        # try to have arg/param/local, but checks for dependencies arg/param
        # and param/local
        argDecl.extend(paramDecl)
        enforceDeclDependecies(argDecl)
        splitPos = 0
        for i in range(len(argDecl) - 1, -1, -1):
            if not any(a.lower() == "parameter"
                       for a in argDecl[i]["attributes"]):
                splitPos = i + 1
                break
        paramDecl = argDecl[splitPos:]
        argDecl = argDecl[:splitPos]
        paramDecl.extend(localDecl)
        enforceDeclDependecies(paramDecl)
        splitPos = 0
        for i in range(len(paramDecl) - 1, -1, -1):
            if any(a.lower() == "parameter"
                   for a in paramDecl[i]["attributes"]):
                splitPos = i + 1
                break
        localDecl = paramDecl[splitPos:]
        paramDecl = paramDecl[:splitPos]

        newDecl = StringIO()
        for comment in routine["preDeclComments"]:
            if not COMMENT_TO_REMOVE_RE.match(comment):
                newDecl.write(comment)
        newDecl.writelines(routine["use"])
        writeDeclarations(argDecl, newDecl)
        if argDecl and paramDecl:
            newDecl.write("\n")
        writeDeclarations(paramDecl, newDecl)
        if (argDecl or paramDecl) and localDecl:
            newDecl.write("\n")
        writeDeclarations(localDecl, newDecl)
        if argDecl or paramDecl or localDecl:
            newDecl.write("\n")
        wrote = 0
        for comment in routine["declComments"]:
            if comment.strip() and not COMMENT_TO_REMOVE_RE.match(comment):
                newDecl.write(comment.strip())
                newDecl.write("\n")
                wrote = 1
        if wrote:
            newDecl.write("\n")
        routine["declarations"] = [newDecl.getvalue()]
    except:
        if "name" in routine.keys():
            logger.critical("exception cleaning routine " + routine["name"])
        logger.critical("parsedDeclartions=" +
                        str(routine["parsedDeclarations"]))
        raise

    newDecl = StringIO()
    if routine["postDeclComments"]:
        comment_start = 0
        for comment in routine["postDeclComments"]:
            if comment.strip():
                break
            else:
                comment_start += 1

        for comment in routine["postDeclComments"][comment_start:]:
            if not COMMENT_TO_REMOVE_RE.match(comment):
                newDecl.write(comment)
                newDecl.write("\n")
        routine["declarations"][0] += newDecl.getvalue()