Esempio n. 1
0
  def write_with_template(self, fname, tname, data):
    fout = StringIO()

    template = ezt.Template(compress_whitespace = 0)
    template.parse_file(os.path.join('build', 'generator', tname))
    template.generate(fout, data)
    self.write_file_if_changed(fname, fout.getvalue())
Esempio n. 2
0
 def test_no_inherit_future(self):
     # This file has from __future__ import print_function...
     f = StringIO()
     print('hello', file=f)
     # ...but the template doesn't
     exec_in('print >> f, "world"', dict(f=f))
     self.assertEqual(f.getvalue(), 'hello\nworld\n')
Esempio n. 3
0
    def get_tokens(self, text, unfiltered=False):
        """
        Return an iterable of (tokentype, value) pairs generated from
        `text`. If `unfiltered` is set to `True`, the filtering mechanism
        is bypassed even if filters are defined.

        Also preprocess the text, i.e. expand tabs and strip it if
        wanted and applies registered filters.
        """
        if isinstance(text, str):
            if self.stripall:
                text = text.strip()
            elif self.stripnl:
                text = text.strip('\n')

            if sys.version_info[0] < 3 and isinstance(text, str):
                text = StringIO(text.encode('utf-8'))
                self.encoding = 'utf-8'
            else:
                text = StringIO(text)

        def streamer():
            for i, t, v in self.get_tokens_unprocessed(text):
                yield t, v
        stream = streamer()
        if not unfiltered:
            stream = apply_filters(stream, self.filters, self)
        return stream
Esempio n. 4
0
def _run_complexity_analysis(on_ci):
    """Generates cyclomatic complexity reports for the package

    :param bool on_ci: Indicates whether an automated tool is running this operation. Output will be customized for
                    machine readability
    """
    modlog.debug("Running complexity analysis")

    # generate cyclomatic complexities for source files in XML format for integration with external tools
    pyjen_path = os.path.join(os.getcwd(), "pyjen")
    from radon.cli import cc

    # TODO: output in XML format when running on CI
    standard_output = StringIO()
    with redirect_stdout(standard_output):
        modlog.debug("Calling radon.cc")
        cc(paths=[pyjen_path], show_complexity=True, show_closures=True, total_average=True, xml=on_ci)

    modlog.debug("Writing report to disk")
    cc_report = os.path.join(log_folder, "radon_complexity.xml")
    with open(cc_report, "w") as fh:
        fh.write(standard_output.getvalue())
    standard_output.close()

    modlog.info("Cyclomatic complexity analysis complete. See " + os.path.relpath(cc_report))
Esempio n. 5
0
def test_lazy_load_index():
    f = StringIO()
    dump({'wakka': 42}, f)
    f.seek(0)
    lj = LazyJSON(f)
    assert_equal({'wakka': 10, '__total__': 0}, lj.offsets)
    assert_equal({'wakka': 2, '__total__': 14}, lj.sizes)
    def testYAMLConfigFileParser_All(self):
        try:
            import yaml
        except:
            logging.warning("WARNING: PyYAML not installed. "
                            "Couldn't test YAMLConfigFileParser")
            return

        p = configargparse.YAMLConfigFileParser()

        # test the all syntax case
        config_lines = [
            "a: '3'",
            "list_arg:",
            "- 1",
            "- 2",
            "- 3",
        ]

        # test parse
        input_config_str = StringIO("\n".join(config_lines)+"\n")
        parsed_obj = p.parse(input_config_str)

        # test serialize
        output_config_str = p.serialize(parsed_obj)
        self.assertEqual(input_config_str.getvalue(), output_config_str)

        self.assertDictEqual(parsed_obj, dict([
            ('a', '3'),
            ('list_arg', [1,2,3]),
        ]))
def test_simple_json_output():
    output = StringIO()

    reporter = JSONReporter()
    linter = PyLinter(reporter=reporter)
    checkers.initialize(linter)

    linter.config.persistent = 0
    linter.reporter.set_output(output)
    linter.open()
    linter.set_current_module("0123")
    linter.add_message("line-too-long", line=1, args=(1, 2))

    # we call this method because we didn't actually run the checkers
    reporter.display_messages(None)

    expected_result = [
        [
            ("column", 0),
            ("line", 1),
            ("message", "Line too long (1/2)"),
            ("message-id", "C0301"),
            ("module", "0123"),
            ("obj", ""),
            ("path", "0123"),
            ("symbol", "line-too-long"),
            ("type", "convention"),
        ]
    ]
    report_result = json.loads(output.getvalue())
    report_result = [sorted(report_result[0].items(), key=lambda item: item[0])]
    assert report_result == expected_result
Esempio n. 8
0
 def contentxml(self):
     """
     Generates the content.xml file
     @return a bytestream in UTF-8 encoding
     """
     xml=StringIO()
     xml.write(_XMLPROLOGUE)
     x = DocumentContent()
     x.write_open_tag(0, xml)
     if self.scripts.hasChildNodes():
         self.scripts.toXml(1, xml)
     if self.fontfacedecls.hasChildNodes():
         self.fontfacedecls.toXml(1, xml)
     a = AutomaticStyles()
     stylelist = self._used_auto_styles([self.styles, self.automaticstyles, self.body])
     if len(stylelist) > 0:
         a.write_open_tag(1, xml)
         for s in stylelist:
             s.toXml(2, xml)
         a.write_close_tag(1, xml)
     else:
         a.toXml(1, xml)
     self.body.toXml(1, xml)
     x.write_close_tag(0, xml)
     return xml.getvalue().encode("utf-8")
Esempio n. 9
0
    def stylesxml(self):
        """
        Generates the styles.xml file
        @return valid XML code as a unicode string
        """
        xml=StringIO()
        xml.write(_XMLPROLOGUE)
        x = DocumentStyles()
        x.write_open_tag(0, xml)
        if self.fontfacedecls.hasChildNodes():
            self.fontfacedecls.toXml(1, xml)
        self.styles.toXml(1, xml)
        a = AutomaticStyles()
        a.write_open_tag(1, xml)
        for s in self._used_auto_styles([self.masterstyles]):
            s.toXml(2, xml)
        a.write_close_tag(1, xml)
        if self.masterstyles.hasChildNodes():
            self.masterstyles.toXml(1, xml)
        x.write_close_tag(0, xml)
        result = xml.getvalue()

        assert(type(result)==type(u""))

        return result
Esempio n. 10
0
    def test_low_read_quality(self):
        remap_file = StringIO("""\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
Example_read_1,99,V3LOOP,1,44,32M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,000000000000000000AAAAAAAAAAAAAA
Example_read_1,147,V3LOOP,1,44,32M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,000000000000000000AAAAAAAAAAAAAA
Example_read_2,99,INT,1,44,32M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
Example_read_2,147,INT,1,44,32M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
""")
        expected_aligned_csv = """\
refname,qcut,rank,count,offset,seq
INT,15,0,1,0,TGTACAAGACCCAACAACAATACAAGAAAAAG
"""
        expected_failed_csv = """\
qname,cause
Example_read_1,manyNs
"""
        actual_aligned_csv = StringIO()
        actual_failed_csv = StringIO()
        sam2aln(remap_file,
                actual_aligned_csv,
                failed_csv=actual_failed_csv)

        self.assertMultiLineEqual(expected_aligned_csv,
                                  actual_aligned_csv.getvalue())
        self.assertMultiLineEqual(expected_failed_csv,
                                  actual_failed_csv.getvalue())
Esempio n. 11
0
def excerpt(mass, note_indices):
    from patternfinder.geometric_helsinki.geometric_notes import NotePointSet
    score = music21.converter.parse(PALESTRINA_PATH + mass + '.mid.xml')
    pointset = list(NotePointSet(score).flat.notes)

    pointset_indices = [int(i) for i in note_indices.split(',')]
    score_note_ids = [pointset[i].original_note_id for i in pointset_indices]

    # Get stream excerpt
    _, start_measure = score.beatAndMeasureFromOffset(pointset[pointset_indices[0]].offset)
    _, end_measure = score.beatAndMeasureFromOffset(pointset[pointset_indices[-1]].offset + pointset[-1].duration.quarterLength - 1)
    excerpt = score.measures(numberStart=start_measure.number, numberEnd=end_measure.number)

    # Colour notes
    for note in excerpt.flat.notes:
        if note.id in score_note_ids:
            note.style.color = 'red'

    # Delete part names (midi files have bad data)
    for part in excerpt:
        part.partName = ''

    sx = music21.musicxml.m21ToXml.ScoreExporter(excerpt)
    musicxml = sx.parse()

    from io import StringIO
    import sys
    bfr = StringIO()
    sys.stdout = bfr
    sx.dump(musicxml)
    output = bfr.getvalue()
    sys.stdout = sys.__stdout__
    return Response(output, mimetype='application/xml')
Esempio n. 12
0
    def test_insertion(self):
        remap_file = StringIO("""\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
Example_read_1,99,V3LOOP,1,44,12M6I14M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
Example_read_1,147,V3LOOP,1,44,12M6I14M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
""")
        expected_aligned_csv = """\
refname,qcut,rank,count,offset,seq
V3LOOP,15,0,1,0,TGTACAAGACCCAATACAAGAAAAAG
"""
        expected_insert_csv = """\
qname,fwd_rev,refname,pos,insert,qual
Example_read_1,F,V3LOOP,12,AACAAC,AAAAAA
Example_read_1,R,V3LOOP,12,AACAAC,AAAAAA
"""
        actual_aligned_csv = StringIO()
        actual_insert_csv = StringIO()
        sam2aln(remap_file,
                actual_aligned_csv,
                actual_insert_csv)

        self.assertMultiLineEqual(expected_aligned_csv,
                                  actual_aligned_csv.getvalue())
        self.assertMultiLineEqual(expected_insert_csv,
                                  actual_insert_csv.getvalue())
Esempio n. 13
0
    def test_low_mapq(self):
        """ We no longer fail reads because of low mapq.

        When we use more than one reference, reads can receive low mapq if they
        are in a conserved region that matches more than one reference.
        """
        remap_file = StringIO("""\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
Example_read_1,99,V3LOOP,1,44,32M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
Example_read_1,147,V3LOOP,1,44,32M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
Example_read_2,99,INT,1,8,32M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
Example_read_2,147,INT,1,44,32M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
""")
        expected_aligned_csv = """\
refname,qcut,rank,count,offset,seq
INT,15,0,1,0,TGTACAAGACCCAACAACAATACAAGAAAAAG
V3LOOP,15,0,1,0,TGTACAAGACCCAACAACAATACAAGAAAAAG
"""
        expected_failed_csv = """\
qname,cause
"""
        actual_aligned_csv = StringIO()
        actual_failed_csv = StringIO()
        sam2aln(remap_file,
                actual_aligned_csv,
                failed_csv=actual_failed_csv)

        self.assertMultiLineEqual(expected_aligned_csv,
                                  actual_aligned_csv.getvalue())
        self.assertMultiLineEqual(expected_failed_csv,
                                  actual_failed_csv.getvalue())
Esempio n. 14
0
    def assertBlock(self, python, java):
        self.maxDiff = None
        dump = False

        py_block = PyBlock(parent=PyModule('test', 'test.py'))
        if python:
            python = adjust(python)
            code = compile(python, '<test>', 'exec')
            py_block.extract(code, debug=dump)

        java_code = py_block.transpile()

        out = BytesIO()
        constant_pool = ConstantPool()
        java_code.resolve(constant_pool)

        constant_pool.add(Utf8('test'))
        constant_pool.add(Utf8('Code'))
        constant_pool.add(Utf8('LineNumberTable'))

        writer = ClassFileWriter(out, constant_pool)
        java_code.write(writer)

        debug = StringIO()
        reader = ClassFileReader(BytesIO(out.getbuffer()), constant_pool, debug=debug)
        JavaCode.read(reader, dump=0)

        if dump:
            print(debug.getvalue())

        java = adjust(java)
        self.assertEqual(debug.getvalue(), java[1:])
Esempio n. 15
0
	def run(self):
		try:
			# Try parsing as XML
			root = etree.parse(self.fn)
			ns = "{%s}" % schemas['corpus']
			out = StringIO()
			for i in root.getiterator(ns + "entry"):
				out.write(i.text + "\n")
			self.corpus = out.getvalue()
			del out
		except:
			# Turns out it's not XML
			self.corpus = open(self.fn, 'r')
		
		try:
			open(self.dct) # test existence
		except:
			raise # TODO: wrap error for output
		
		if not self.result:
			delim = re.compile(r"\$[^^]*\^")
			f = open(self.fn, 'r')			
			data = f.read()
			f.close()

			output = destxt(data).encode('utf-8')
			timing_begin = time.time()
			proc = Popen([self.app] + self.app_args + [self.dct], stdin=PIPE, stdout=PIPE, close_fds=True)
			output = str(proc.communicate(output)[0].decode('utf-8'))
			self.timer = time.time() - timing_begin
			output = retxt(output) 
			
			output = delim.sub("$\n^", output)
			self.result = output.split('\n')
		return 0
Esempio n. 16
0
def correct_INCA_format(fp):
    fp_list = list()
    fp.seek(0)
    if '(' in fp.readline():
        for line in fp:
            line = line.replace(
                "(MLX::",
                "").replace(
                " : ",
                "\t").replace(
                " :",
                "\t").replace(
                " ",
                "\t").lower().strip().replace(
                ")",
                "\n")
            if "record-by" in line:
                if "image" in line:
                    line = "record-by\timage"
                if "vector" in line:
                    line = "record-by\tvector"
                if "dont-care" in line:
                    line = "record-by\tdont-care"
            fp_list.append(line)
        fp = StringIO()
        fp.writelines(fp_list)
    fp.seek(0)
    return fp
Esempio n. 17
0
def export_set(dataset):
	"""HTML representation of a Dataset."""

	stream = StringIO()

	page = markup.page()
	page.table.open()

	if dataset.headers is not None:
		new_header = [item if item is not None else '' for item in dataset.headers] 

		page.thead.open()
		headers = markup.oneliner.th(new_header)
		page.tr(headers)
		page.thead.close()

	for row in dataset:
		new_row = [item if item is not None else '' for item in row] 

		html_row = markup.oneliner.td(new_row)
		page.tr(html_row)

	page.table.close()

	stream.writelines(str(page))

	return stream.getvalue()
Esempio n. 18
0
    def test_with_thumbor_disabled(self) -> None:
        self.login(self.example_email("hamlet"))
        fp = StringIO("zulip!")
        fp.name = "zulip.jpeg"

        result = self.client_post("/json/user_uploads", {'file': fp})
        self.assert_json_success(result)
        json = ujson.loads(result.content)
        self.assertIn("uri", json)
        uri = json["uri"]
        base = '/user_uploads/'
        self.assertEqual(base, uri[:len(base)])

        quoted_uri = urllib.parse.quote(uri[1:], safe='')

        with self.settings(THUMBOR_URL=''):
            result = self.client_get("/thumbnail?url=%s&size=original" % (quoted_uri))
        self.assertEqual(result.status_code, 302, result)
        self.assertEqual(uri, result.url)

        uri = 'https://www.google.com/images/srpr/logo4w.png'
        quoted_uri = urllib.parse.quote(uri, safe='')
        with self.settings(THUMBOR_URL=''):
            result = self.client_get("/thumbnail?url=%s&size=original" % (quoted_uri))
        self.assertEqual(result.status_code, 302, result)
        self.assertEqual(uri, result.url)

        uri = 'http://www.google.com/images/srpr/logo4w.png'
        quoted_uri = urllib.parse.quote(uri, safe='')
        with self.settings(THUMBOR_URL=''):
            result = self.client_get("/thumbnail?url=%s&size=original" % (quoted_uri))
        self.assertEqual(result.status_code, 302, result)
        base = 'https://external-content.zulipcdn.net/7b6552b60c635e41e8f6daeb36d88afc4eabde79/687474703a2f2f7777772e676f6f676c652e636f6d2f696d616765732f737270722f6c6f676f34772e706e67'
        self.assertEqual(base, result.url)
Esempio n. 19
0
def test_run_api_500_error():
    os.environ["CODECLIMATE_API_HOST"] = "http://example.com"

    api_mock = ApiMock()
    api_mock.setup(500)

    err = StringIO()
    runner = Runner(["--file", "./coverage.txt", "--token", "token"], err=err)

    orig_dir = os.getcwd()
    os.chdir("./tests/fixtures")

    subprocess.call(["git", "init"])
    subprocess.call(["git", "config", "user.name", "Test User"])
    subprocess.call(["git", "config", "user.email", "*****@*****.**"])
    subprocess.call(["git", "commit", "--allow-empty", "--message", "init"])

    try:
        return_code = runner.run()

        assert(return_code == 1)
        assert("500 Server Error" in err.getvalue().strip())
    finally:
        del os.environ["CODECLIMATE_API_HOST"]
        os.chdir(orig_dir)
        shutil.rmtree("./tests/fixtures/.git")
        api_mock.cleanup()
Esempio n. 20
0
class ContentState( State ):
    """ This state records every line it meets as content until it reaches a line notifying a deposit reference or a publication reference.
        Can return ``self`` or :class:`.PublicationState` or :class:`.DepositState`.
    """
    def __init__( self, store ):
        State.__init__( self, store )
        self.textIO = StringIO()

    def process_line( self, line ):
        if publication_line_re.match( line ):
            self.store_content()
            return PublicationState( self.store ).process_line( line )
        elif deposit_line_re.match( line ):
            self.store_content()
            return DepositState( self.store ).process_line( line )
        else:
            self.add_content_line( line )
            globalOutputter.writeTagLine( 'TXT', line )
            return self
    
    def add_content_line( self, line ):
        self.textIO.write( line )
        self.textIO.write( '\n' )
    
    def store_content( self ):
        self.store['current_article']['content'] = self.textIO.getvalue()
Esempio n. 21
0
 def test_opened_file(self):
     sio = StringIO()
     sio.write('test_data')
     sio.seek(0)
     file, close = open_if_filename(sio)
     assert not close
     eq_('test_data', file.read())
Esempio n. 22
0
def results(id):
    select_stmt = "SELECT q.name, d.name as department, q.description, q.query " \
                  "FROM query q JOIN departments d ON q.department_id = d.id " \
                  "WHERE q.id=%s;"
    with RealDictConnection(dsn=local) as conn:
        with conn.cursor() as cursor:
            cursor.execute(select_stmt, (str(id), ))
            res = cursor.fetchone()
    if res:
        with RealDictConnection(dsn=local) as conn:
            with conn.cursor() as cursor:
                cursor.execute(res['query'])
                result = cursor.fetchall()
                header = result[0].keys()
        if request.args.get('download', '').strip():
            si = StringIO()
            f = csv.writer(si)
            f.writerow(header)
            f.writerows([row.values() for row in result])
            output = make_response(si.getvalue())
            output.headers["Content-Disposition"] = "attachment; filename=%s.csv" \
                                                    % str(res['name'])
            output.headers["Content-type"] = "text/csv"
            return output
        else:
            return render_template('results.html',
                                   details=res, rows=result[0:5], id=id,
                                   header=header)
    else:
        return 'Query with id %s does not exist!' % str(id)
Esempio n. 23
0
    def _open(self):
        header = StringIO(self.fp.read(24))
        magic = header.read(4)
        if magic != "FTEX":
            raise ValueError("not a FTEX file")

        version = unpack("i", header.read(4))
        self.size = unpack("ii", header.read(8))
        linesize = (self.size[0] + 3) / 4 * 8
        mipmap_count, format_count = unpack("ii", header.read(8))
        self.mode = "RGB"

        self.tile = []
        for i in range(format_count):
            format, where = unpack("ii", self.fp.read(8))

            if format == 0:
                data = []
                self.fp.seek(where)
                size, = unpack("i", self.fp.read(4))
                for yb in xrange((self.size[1] + 3) / 4):
                    decoded = dxtc.decodeDXT1(self.fp.read(linesize))
                    for d in decoded:
                        data.append(d)

                data = "".join(data[:self.size[1]])
                self.im = Image.core.new(self.mode, self.size)
                return self.fromstring(data)

            elif format == 1: # Uncompressed RGB
                self.tile.append(("raw", (0, 0) + self.size, where+4, (self.mode, 0, 1)))

            else:
                raise ValueError("Invalid texture format (expected 0 or 1, got %i)" % (format))
Esempio n. 24
0
 def flush(self):
     self.file.flush()
     #super(TeeFile, self).flush()
     StringIO.flush(self)
     if self.queue is not None:
         self.queue.put((current_process().pid, ''.join(self.queue_buffer)))
         self.queue_buffer = []
Esempio n. 25
0
 def test_import_loop(self):
     finders.get_finder.cache_clear()
     err = StringIO()
     with self.assertRaisesMessage(RuntimeError, 'Max post-process passes exceeded'):
         call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
     self.assertEqual("Post-processing 'All' failed!\n\n", err.getvalue())
     self.assertPostCondition()
Esempio n. 26
0
def call_command_returns(*args, **kwargs):
    "call command but wich returns the output."
    from django.core.management import call_command
    stdout = StringIO()
    kwargs['stdout'] = stdout
    call_command(*args, **kwargs)
    return stdout.getvalue().strip()
Esempio n. 27
0
    def __repr__(self):
        if self._parent is None:
            return super().__repr__()

        out = StringIO()
        self.__call__(out=out)
        return out.getvalue()
Esempio n. 28
0
 def cmd(self, *args, **kw):
     exit_code = kw.get('exit_code', 0)
     fork = kw.get('fork', False)
     if fork:
         try:
             output = subprocess.check_output((sys.executable, '-m', 'borg.archiver') + args)
             ret = 0
         except subprocess.CalledProcessError as e:
             output = e.output
             ret = e.returncode
         output = os.fsdecode(output)
         if ret != exit_code:
             print(output)
         self.assert_equal(exit_code, ret)
         return output
     args = list(args)
     stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr
     try:
         sys.stdin = StringIO()
         output = StringIO()
         sys.stdout = sys.stderr = output
         ret = self.archiver.run(args)
         sys.stdin, sys.stdout, sys.stderr = stdin, stdout, stderr
         if ret != exit_code:
             print(output.getvalue())
         self.assert_equal(exit_code, ret)
         return output.getvalue()
     finally:
         sys.stdin, sys.stdout, sys.stderr = stdin, stdout, stderr
Esempio n. 29
0
 def scenario_parse_args_exits(self, argv):
     with self.assertRaises(SystemExit) as cm:
         stdout = StringIO()
         stderr = StringIO()
         with Redirect(stdout=stdout, stderr=stderr):
             parsed_args = args.parse(argv)
     return (stdout.getvalue(), stderr.getvalue(), cm.exception.code)
Esempio n. 30
0
class DistantInteractiveConsole(InteractiveConsole):
    def __init__(self, ipc):
        InteractiveConsole.__init__(self, globals())

        self.ipc = ipc
        self.set_buffer()

    def set_buffer(self):
        self.out_buffer = StringIO()
        sys.stdout = sys.stderr = self.out_buffer

    def unset_buffer(self):
        sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__
        value = self.out_buffer.getvalue()
        self.out_buffer.close()

        return value

    def raw_input(self, prompt=""):
        output = self.unset_buffer()
        # payload format: 'prompt' ? '\n' 'output'
        self.ipc.send('\n'.join((prompt, output)))

        cmd = self.ipc.recv()

        self.set_buffer()

        return cmd
Esempio n. 31
0
 def _make_reader(**kwds):
     return TextReader(StringIO(data), delimiter=",", **kwds)
Esempio n. 32
0
def test_wait_import_task_with_failure_no_error(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
    api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)

    if token_ins:
        mock_token_get = MagicMock()
        mock_token_get.return_value = 'my token'
        monkeypatch.setattr(token_ins, 'get', mock_token_get)

    mock_open = MagicMock()
    mock_open.side_effect = [
        StringIO(to_text(json.dumps({
            'finished_at': 'some_time',
            'state': 'failed',
            'error': {},
            'messages': [
                {
                    'level': 'error',
                    'message': u'Somé error',
                },
                {
                    'level': 'warning',
                    'message': u'Some wärning',
                },
                {
                    'level': 'info',
                    'message': u'Somé info',
                },
            ],
        }))),
    ]
    monkeypatch.setattr(galaxy_api, 'open_url', mock_open)

    mock_display = MagicMock()
    monkeypatch.setattr(Display, 'display', mock_display)

    mock_vvv = MagicMock()
    monkeypatch.setattr(Display, 'vvv', mock_vvv)

    mock_warn = MagicMock()
    monkeypatch.setattr(Display, 'warning', mock_warn)

    mock_err = MagicMock()
    monkeypatch.setattr(Display, 'error', mock_err)

    expected = 'Galaxy import process failed: Unknown error, see %s for more details \\(Code: UNKNOWN\\)' % full_import_uri
    with pytest.raises(AnsibleError, match=expected):
        api.wait_import_task(import_uri)

    assert mock_open.call_count == 1
    assert mock_open.mock_calls[0][1][0] == full_import_uri
    assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type

    assert mock_display.call_count == 1
    assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri

    assert mock_vvv.call_count == 1
    assert mock_vvv.mock_calls[0][1][0] == u'Galaxy import message: info - Somé info'

    assert mock_warn.call_count == 1
    assert mock_warn.mock_calls[0][1][0] == u'Galaxy import warning message: Some wärning'

    assert mock_err.call_count == 1
    assert mock_err.mock_calls[0][1][0] == u'Galaxy import error message: Somé error'
Esempio n. 33
0
 def test_read_new_configuration_space_easy(self):
     expected = StringIO()
     expected.write('# This is a \n')
     expected.write('   # This is a comment with a leading whitespace ### ffds \n')
     expected.write('\n')
     expected.write('float_a real [-1.23, 6.45] [2.61] # bla\n')
     expected.write('e_float_a real [.5E-2, 4.5e+06] [2250000.0025]\n')
     expected.write('int_a integer [-1, 6] [2]\n')
     expected.write('log_a real [4e-1, 6.45] [1.6062378404]log\n')
     expected.write('int_log_a integer [1, 6] [2]log\n')
     expected.write('cat_a categorical {a,"b",c,d} [a]\n')
     expected.write(r'@.:;/\?!$%&_-<>*+1234567890 categorical {"const"} ["const"]\n')
     expected.seek(0)
     cs = pcs_new.read(expected)
     self.assertEqual(cs, easy_space)
Esempio n. 34
0
class _TestResult(TestResult):
    # note: _TestResult is a pure representation of results.
    # It lacks the output and reporting ability compares to unittest._TextTestResult.

    def __init__(self, verbosity=1):
        TestResult.__init__(self)
        self.stdout0 = None
        self.stderr0 = None
        self.success_count = 0
        self.failure_count = 0
        self.error_count = 0
        self.verbosity = verbosity

        # result is a list of result in 4 tuple
        # (
        #   result code (0: success; 1: fail; 2: error),
        #   TestCase object,
        #   Test output (byte string),
        #   stack trace,
        # )
        self.result = []
        #增加一个测试通过率 --Findyou
        self.passrate = float(0)

    def startTest(self, test):
        TestResult.startTest(self, test)
        # just one buffer for both stdout and stderr
        self.outputBuffer = StringIO()
        stdout_redirector.fp = self.outputBuffer
        stderr_redirector.fp = self.outputBuffer
        self.stdout0 = sys.stdout
        self.stderr0 = sys.stderr
        sys.stdout = stdout_redirector
        sys.stderr = stderr_redirector

    def complete_output(self):
        """
        Disconnect output redirection and return buffer.
        Safe to call multiple times.
        """
        if self.stdout0:
            sys.stdout = self.stdout0
            sys.stderr = self.stderr0
            self.stdout0 = None
            self.stderr0 = None
        return self.outputBuffer.getvalue()

    def stopTest(self, test):
        # Usually one of addSuccess, addError or addFailure would have been called.
        # But there are some path in unittest that would bypass this.
        # We must disconnect stdout in stopTest(), which is guaranteed to be called.
        self.complete_output()

    def addSuccess(self, test):
        self.success_count += 1
        TestResult.addSuccess(self, test)
        output = self.complete_output()
        self.result.append((0, test, output, ''))
        if self.verbosity > 1:
            sys.stderr.write('ok ')
            sys.stderr.write(str(test))
            sys.stderr.write('\n')
        else:
            sys.stderr.write('.')

    def addError(self, test, err):
        self.error_count += 1
        TestResult.addError(self, test, err)
        _, _exc_str = self.errors[-1]
        output = self.complete_output()
        self.result.append((2, test, output, _exc_str))
        if self.verbosity > 1:
            sys.stderr.write('E  ')
            sys.stderr.write(str(test))
            sys.stderr.write('\n')
        else:
            sys.stderr.write('E')

    def addFailure(self, test, err):
        self.failure_count += 1
        TestResult.addFailure(self, test, err)
        _, _exc_str = self.failures[-1]
        output = self.complete_output()
        self.result.append((1, test, output, _exc_str))
        if self.verbosity > 1:
            sys.stderr.write('F  ')
            sys.stderr.write(str(test))
            sys.stderr.write('\n')
        else:
            sys.stderr.write('F')
Esempio n. 35
0
 def test_string_factorize(self):
     # should this be optional?
     data = "a\nb\na\nb\na"
     reader = TextReader(StringIO(data), header=None)
     result = reader.read()
     assert len(set(map(id, result[0]))) == 2
Esempio n. 36
0
 def test_empty_csv_input(self):
     # GH14867
     with read_csv(
         StringIO(), chunksize=20, header=None, names=["a", "b", "c"]
     ) as df:
         assert isinstance(df, TextFileReader)
Esempio n. 37
0
 def _test(text, **kwargs):
     nice_text = text.replace("\r", "\r\n")
     result = TextReader(StringIO(text), **kwargs).read()
     expected = TextReader(StringIO(nice_text), **kwargs).read()
     assert_array_dicts_equal(result, expected)
Esempio n. 38
0
 def save_df_to_csv(self, df: pd.DataFrame, key: str):
     csv_buffer = StringIO()
     df.to_csv(csv_buffer, index=False)
     self.save_object(body=csv_buffer.getvalue().encode('euc-kr'), key=key)
Esempio n. 39
0
    def bulk_create(self, workspace_name):
        """
        ---
        post:
          tags: ["Bulk", "Host"]
          description: Creates hosts in bulk
          responses:
            201:
              description: Created
              content:
                application/json:
                  schema: HostSchema
            400:
              description: Bad request
            403:
              description: Forbidden
        tags: ["Bulk", "Host"]
        responses:
          200:
            description: Ok
        """
        try:
            validate_csrf(flask.request.form.get('csrf_token'))
        except wtforms.ValidationError:
            flask.abort(403)

        def parse_hosts(list_string):
            items = re.findall(r"([.a-zA-Z0-9_-]+)", list_string)
            return items

        workspace = self._get_workspace(workspace_name)

        logger.info("Create hosts from CSV")
        if 'file' not in flask.request.files:
            abort(400, "Missing File in request")
        hosts_file = flask.request.files['file']
        stream = StringIO(hosts_file.stream.read().decode("utf-8"),
                          newline=None)
        FILE_HEADERS = {'description', 'hostnames', 'ip', 'os'}
        try:
            hosts_reader = csv.DictReader(stream)
            if set(hosts_reader.fieldnames) != FILE_HEADERS:
                logger.error("Missing Required headers in CSV (%s)",
                             FILE_HEADERS)
                abort(400, f"Missing Required headers in CSV ({FILE_HEADERS})")
            hosts_created_count = 0
            hosts_with_errors_count = 0
            for host_dict in hosts_reader:
                try:
                    hostnames = parse_hosts(host_dict.pop('hostnames'))
                    other_fields = {
                        'owned': False,
                        'mac': u'00:00:00:00:00:00',
                        'default_gateway_ip': u'None'
                    }
                    host_dict.update(other_fields)
                    host = super()._perform_create(host_dict, workspace_name)
                    host.workspace = workspace
                    for name in hostnames:
                        get_or_create(db.session,
                                      Hostname,
                                      name=name,
                                      host=host,
                                      workspace=host.workspace)
                    db.session.commit()
                except Exception as e:
                    logger.error("Error creating host (%s)", e)
                    hosts_with_errors_count += 1
                else:
                    logger.debug("Host Created (%s)", host_dict)
                    hosts_created_count += 1
            return make_response(
                jsonify(hosts_created=hosts_created_count,
                        hosts_with_errors=hosts_with_errors_count), 200)
        except Exception as e:
            logger.error("Error parsing hosts CSV (%s)", e)
            abort(400, f"Error parsing hosts CSV ({e})")
Esempio n. 40
0
 def truncate(self, size=None):
     StringIO.truncate(self, size)
     if hasattr(self, "softspace"):
         del self.softspace
Esempio n. 41
0
    def reset(self):
        """Reset internal buffer.

        This method is useful only when autoreset=False.
        """
        self._buffer = StringIO()
Esempio n. 42
0
    def __init__(self, ev_or_filename):
        # Get evaluation if str is passed
        if isinstance(ev_or_filename, Evaluation):
            ev = ev_or_filename
        else:
            ev = Evaluation(ev_or_filename)

        file_obj = StringIO(ev.section[8, 457])

        self.nuclide = {}
        self.modes = []
        self.spectra = {}
        self.average_energies = {}

        # Get head record
        items = get_head_record(file_obj)
        Z, A = divmod(items[0], 1000)
        metastable = items[3]
        self.nuclide['atomic_number'] = Z
        self.nuclide['mass_number'] = A
        self.nuclide['isomeric_state'] = metastable
        if metastable > 0:
            self.nuclide['name'] = '{}{}_m{}'.format(ATOMIC_SYMBOL[Z], A,
                                                     metastable)
        else:
            self.nuclide['name'] = '{}{}'.format(ATOMIC_SYMBOL[Z], A)
        self.nuclide['mass'] = items[1]  # AWR
        self.nuclide['excited_state'] = items[
            2]  # State of the original nuclide
        self.nuclide['stable'] = (items[4] == 1)  # Nucleus stability flag

        # Determine if radioactive/stable
        if not self.nuclide['stable']:
            NSP = items[5]  # Number of radiation types

            # Half-life and decay energies
            items, values = get_list_record(file_obj)
            self.half_life = ufloat(items[0], items[1])
            NC = items[4] // 2
            pairs = list(zip(values[::2], values[1::2]))
            ex = self.average_energies
            ex['light'] = ufloat(*pairs[0])
            ex['electromagnetic'] = ufloat(*pairs[1])
            ex['heavy'] = ufloat(*pairs[2])
            if NC == 17:
                ex['beta-'] = ufloat(*pairs[3])
                ex['beta+'] = ufloat(*pairs[4])
                ex['auger'] = ufloat(*pairs[5])
                ex['conversion'] = ufloat(*pairs[6])
                ex['gamma'] = ufloat(*pairs[7])
                ex['xray'] = ufloat(*pairs[8])
                ex['bremsstrahlung'] = ufloat(*pairs[9])
                ex['annihilation'] = ufloat(*pairs[10])
                ex['alpha'] = ufloat(*pairs[11])
                ex['recoil'] = ufloat(*pairs[12])
                ex['SF'] = ufloat(*pairs[13])
                ex['neutron'] = ufloat(*pairs[14])
                ex['proton'] = ufloat(*pairs[15])
                ex['neutrino'] = ufloat(*pairs[16])

            items, values = get_list_record(file_obj)
            spin = items[0]
            # ENDF-102 specifies that unknown spin should be reported as -77.777
            if spin == -77.777:
                self.nuclide['spin'] = None
            else:
                self.nuclide['spin'] = spin
            self.nuclide['parity'] = items[1]  # Parity of the nuclide

            # Decay mode information
            n_modes = items[5]  # Number of decay modes
            for i in range(n_modes):
                decay_type = get_decay_modes(values[6 * i])
                isomeric_state = int(values[6 * i + 1])
                energy = ufloat(*values[6 * i + 2:6 * i + 4])
                branching_ratio = ufloat(*values[6 * i + 4:6 * (i + 1)])

                mode = DecayMode(self.nuclide['name'], decay_type,
                                 isomeric_state, energy, branching_ratio)
                self.modes.append(mode)

            discrete_type = {
                0.0: None,
                1.0: 'allowed',
                2.0: 'first-forbidden',
                3.0: 'second-forbidden',
                4.0: 'third-forbidden',
                5.0: 'fourth-forbidden',
                6.0: 'fifth-forbidden'
            }

            # Read spectra
            for i in range(NSP):
                spectrum = {}

                items, values = get_list_record(file_obj)
                # Decay radiation type
                spectrum['type'] = _RADIATION_TYPES[items[1]]
                # Continuous spectrum flag
                spectrum['continuous_flag'] = {
                    0: 'discrete',
                    1: 'continuous',
                    2: 'both'
                }[items[2]]
                spectrum['discrete_normalization'] = ufloat(*values[0:2])
                spectrum['energy_average'] = ufloat(*values[2:4])
                spectrum['continuous_normalization'] = ufloat(*values[4:6])

                NER = items[5]  # Number of tabulated discrete energies

                if not spectrum['continuous_flag'] == 'continuous':
                    # Information about discrete spectrum
                    spectrum['discrete'] = []
                    for j in range(NER):
                        items, values = get_list_record(file_obj)
                        di = {}
                        di['energy'] = ufloat(*items[0:2])
                        di['from_mode'] = get_decay_modes(values[0])
                        di['type'] = discrete_type[values[1]]
                        di['intensity'] = ufloat(*values[2:4])
                        if spectrum['type'] == 'ec/beta+':
                            di['positron_intensity'] = ufloat(*values[4:6])
                        elif spectrum['type'] == 'gamma':
                            if len(values) >= 6:
                                di['internal_pair'] = ufloat(*values[4:6])
                            if len(values) >= 8:
                                di['total_internal_conversion'] = ufloat(
                                    *values[6:8])
                            if len(values) == 12:
                                di['k_shell_conversion'] = ufloat(
                                    *values[8:10])
                                di['l_shell_conversion'] = ufloat(
                                    *values[10:12])
                        spectrum['discrete'].append(di)

                if not spectrum['continuous_flag'] == 'discrete':
                    # Read continuous spectrum
                    ci = {}
                    params, ci['probability'] = get_tab1_record(file_obj)
                    ci['type'] = get_decay_modes(params[0])

                    # Read covariance (Ek, Fk) table
                    LCOV = params[3]
                    if LCOV != 0:
                        items, values = get_list_record(file_obj)
                        ci['covariance_lb'] = items[3]
                        ci['covariance'] = zip(values[0::2], values[1::2])

                    spectrum['continuous'] = ci

                # Add spectrum to dictionary
                self.spectra[spectrum['type']] = spectrum

        else:
            items, values = get_list_record(file_obj)
            items, values = get_list_record(file_obj)
            self.nuclide['spin'] = items[0]
            self.nuclide['parity'] = items[1]
            self.half_life = ufloat(float('inf'), float('inf'))
Esempio n. 43
0
def createStringIO():
    if PY3K:
        from io import StringIO
    else:
        from StringIO import StringIO
    return StringIO()
import numpy as np
import json
from pymongo import *
import os
from io import StringIO

fileName = "intl_pricePercentile.csv"
array_id = pd.read_csv("/data/search/predict-2019/fline_id.csv",
                       header=None).values.reshape(-1, )
clientMongo = MongoClient("10.0.1.212:27017",
                          authSource="flight",
                          username='******',
                          password='******',
                          authMechanism='MONGODB-CR')
cursor = clientMongo.flight.priceRange_statistic_intl
valueIO = StringIO()
for id in array_id:
    priceRange = cursor.find({'_id': id}, {
        '_id': 1,
        'priceRange_basePercentile': 1,
        'priceRange_baseAvg': 1
    })
    L = list(priceRange)
    df_percentile = pd.DataFrame(L[0]['priceRange_basePercentile'],
                                 columns=['percentile', L[0]['_id']])
    df_percentile[L[0]['_id']] = pd.to_numeric(df_percentile[L[0]['_id']],
                                               downcast='integer')
    df_avg = pd.DataFrame(pd.to_numeric(L[0]['priceRange_baseAvg']).reshape(
        1, 2),
                          index=[L[0]['_id']])
    df = pd.concat([df_avg, df_percentile.T], axis=1, join='inner')
Esempio n. 45
0
 def parse_document(self, document):
     return csv.DictReader(StringIO(document), delimiter="\t")
Esempio n. 46
0
 def pack_map_pairs(self, pairs):
     self._pack_map_pairs(len(pairs), pairs)
     if self._autoreset:
         ret = self._buffer.getvalue()
         self._buffer = StringIO()
         return ret
Esempio n. 47
0
    def run(self, args):
        """ Install / upgrade a Splunk app from an archive file """
        # Handle ignored files by preserving them as much as possible.
        # Add --dry-run mode?  j/k - that's what git is for!

        DEBUG = KSCONF_DEBUG in os.environ

        if not os.path.isfile(args.tarball):
            self.stderr.write("No such file or directory {}\n".format(
                args.tarball))
            return EXIT_CODE_FAILED_SAFETY_CHECK

        if not os.path.isdir(args.dest):
            self.stderr.write(
                "Destination directory does not exist: {}\n".format(args.dest))
            return EXIT_CODE_FAILED_SAFETY_CHECK

        f_hash = file_hash(args.tarball)
        self.stdout.write("Inspecting archive:               {}\n".format(
            args.tarball))

        new_app_name = args.app_name
        # ARCHIVE PRE-CHECKS:  Archive must contain only one app, no weird paths, ...
        app_name = set()
        app_conf = {}
        files = 0
        local_files = set()
        a = extract_archive(args.tarball,
                            extract_filter=gaf_filter_name_like("app.conf"))
        for gaf in sanity_checker(a):
            gaf_app, gaf_relpath = gaf.path.split("/", 1)
            files += 1
            if gaf.path.endswith("app.conf") and gaf.payload:
                conffile = StringIO(gaf.payload.decode(default_encoding))
                conffile.name = os.path.join(args.tarball, gaf.path)
                app_conf = parse_conf(conffile, profile=PARSECONF_LOOSE)
                del conffile
            elif gaf_relpath.startswith("local" + os.path.sep) or \
                 gaf_relpath.endswith("local.meta"):
                local_files.add(gaf_relpath)
            app_name.add(gaf.path.split("/", 1)[0])
            del gaf_app, gaf_relpath
        if len(app_name) > 1:
            self.stderr.write(
                "The 'unarchive' command only supports extracting a single splunk"
                " app at a time.\nHowever the archive {} contains {} apps:  {}\n"
                "".format(args.tarball, len(app_name), ", ".join(app_name)))
            return EXIT_CODE_FAILED_SAFETY_CHECK
        else:
            app_name = app_name.pop()
        del a
        if local_files:
            self.stderr.write("Local {} files found in the archive.  ".format(
                len(local_files)))
            if args.allow_local:
                self.stderr.write(
                    "Keeping these due to the '--allow-local' flag\n")
            else:
                self.stderr.write("Excluding local files by default.  "
                                  "Use '--allow-local' to override.")

        if not new_app_name and True:  # if not --no-app-name-fixes
            if app_name.endswith("-master"):
                self.stdout.write(
                    "Automatically dropping '-master' from the app name.  "
                    "This is often the result of a github export.\n")
                # Trick, but it works...
                new_app_name = app_name[:-7]
            mo = re.search(r"(.*)-\d+\.[\d.-]+$", app_name)
            if mo:
                self.stdout.write(
                    "Automatically removing the version suffix from the app name.  "
                    "'{}' will be extracted as '{}'\n".format(
                        app_name, mo.group(1)))
                new_app_name = mo.group(1)

        app_basename = new_app_name or app_name
        dest_app = os.path.join(args.dest, app_basename)
        self.stdout.write("Inspecting destination folder:    {}\n".format(
            os.path.abspath(dest_app)))

        # FEEDBACK TO THE USER:   UPGRADE VS INSTALL, GIT?, APP RENAME, ...
        app_name_msg = app_name

        git_ver = git_version()
        if git_ver is None:
            vc_msg = "without version control support (git not present)"
            is_git = False
        else:
            vc_msg = "without version control support"

        old_app_conf = {}

        if os.path.isdir(dest_app):
            mode = "upgrade"
            if git_ver:
                is_git = git_is_working_tree(dest_app)
            try:
                # Ignoring the 'local' entries since distributed apps shouldn't contain local
                old_app_conf_file = os.path.join(dest_app, args.default_dir
                                                 or "default", "app.conf")
                old_app_conf = parse_conf(old_app_conf_file,
                                          profile=PARSECONF_LOOSE)
            except ConfParserException:
                self.stderr.write(
                    "Unable to read app.conf from existing install.\n")
                # Assume upgrade form unknown version
        else:
            mode = "install"
            if git_ver:
                is_git = git_is_working_tree(args.dest)
        if is_git:
            vc_msg = "with git support"
        if new_app_name and new_app_name != app_name:
            app_name_msg = "{} (renamed from {})".format(
                new_app_name, app_name)

        def show_pkg_info(conf, label):
            self.stdout.write(
                "{} packaging info:    '{}' by {} (version {})\n".format(
                    label,
                    conf.get("ui", {}).get("label", "Unknown"),
                    conf.get("launcher", {}).get("author", "Unknown"),
                    conf.get("launcher", {}).get("version", "Unknown")))

        if old_app_conf:
            show_pkg_info(old_app_conf, " Installed app")
        if app_conf:
            show_pkg_info(app_conf, "   Tarball app")

        self.stdout.write("About to {} the {} app {}.\n".format(
            mode, app_name_msg, vc_msg))

        existing_files = set()
        if mode == "upgrade":
            if is_git:
                existing_files.update(git_ls_files(dest_app))
                if not existing_files:
                    self.stderr.write(
                        "App is in a git repository but no files have been staged "
                        "or committed.  Either commit or remove '{}' and try again."
                        "\n".format(dest_app))
                    return EXIT_CODE_FAILED_SAFETY_CHECK
                if args.git_sanity_check == "off":
                    self.stdout.write(
                        "The 'git status' safety checks have been disabled via CLI"
                        "argument.  Skipping.\n")
                else:
                    d = {
                        #        untracked, ignored
                        "changed": (False, False),
                        "untracked": (True, False),
                        "ignored": (True, True)
                    }
                    is_clean = git_is_clean(dest_app,
                                            *d[args.git_sanity_check])
                    del d
                    if is_clean:
                        self.stdout.write(
                            "Git folder is clean.  "
                            "Okay to proceed with the upgrade.\n")
                    else:
                        self.stderr.write(
                            "Unable to move forward without a clean working tree.\n"
                            "Clean up and try again.  "
                            "Modifications are listed below.\n\n")
                        self.stderr.flush()
                        if args.git_sanity_check == "changed":
                            git_status_ui(dest_app, "--untracked-files=no")
                        elif args.git_sanity_check == "ignored":
                            git_status_ui(dest_app, "--ignored")
                        else:
                            git_status_ui(dest_app)
                        return EXIT_CODE_FAILED_SAFETY_CHECK
            else:
                for (root, dirs, filenames) in relwalk(dest_app):
                    for fn in filenames:
                        existing_files.add(os.path.join(root, fn))
            self.stdout.write("Before upgrade.  App has {} files\n".format(
                len(existing_files)))
        elif is_git:
            self.stdout.write(
                "Git clean check skipped.  Not needed for a fresh app install.\n"
            )

        def fixup_pattern_bw(patterns, prefix=None):
            modified = []
            for pattern in patterns:
                if pattern.startswith("./"):
                    if prefix:
                        pattern = "{0}/{1}".format(prefix, pattern[2:])
                    else:
                        pattern = pattern[2:]
                    modified.append(pattern)
                # If a pattern like 'tags.conf' or '*.bak' is provided, use basename match (any dir)
                elif "/" not in pattern:
                    modified.append("(^|.../)" + pattern)
                else:
                    modified.append(pattern)
            return modified

        # PREP ARCHIVE EXTRACTION
        installed_files = set()
        excludes = list(args.exclude)
        '''
        for pattern in args.exclude:
            # If a pattern like 'default.meta' or '*.bak' is provided, assume it's a basename match.
            if "/" not in pattern:
                excludes.append(".../" + pattern)
            else:
                excludes.append(pattern)
        '''
        if not args.allow_local:
            for pattern in local_files:
                excludes.append("./" + pattern)
        excludes = fixup_pattern_bw(excludes, app_basename)
        self.stderr.write(
            "Extraction exclude patterns:  {!r}\n".format(excludes))
        path_rewrites = []
        files_iter = extract_archive(args.tarball)
        if True:
            files_iter = sanity_checker(files_iter)
        if args.default_dir:
            rep = "/{}/".format(args.default_dir.strip("/"))
            path_rewrites.append(("/default/", rep))
            del rep
        if new_app_name:
            # We do have the "app_name" extracted from our first pass above, but
            regex = re.compile(r'^([^/]+)(?=/)')
            path_rewrites.append((regex, new_app_name))
        if path_rewrites:
            files_iter = gen_arch_file_remapper(files_iter, path_rewrites)

        self.stdout.write("Extracting app now...\n")
        for gaf in files_iter:
            if match_bwlist(gaf.path, excludes, escape=False):
                self.stdout.write("Skipping [blacklist] {}\n".format(gaf.path))
                continue
            if not is_git or args.git_mode in ("nochange", "stage"):
                self.stdout.write("{0:60s} {2:o} {1:-6d}\n".format(
                    gaf.path, gaf.size, gaf.mode))
            installed_files.add(gaf.path.split("/", 1)[1])
            full_path = os.path.join(args.dest, gaf.path)
            dir_exists(os.path.dirname(full_path))
            with open(full_path, "wb") as fp:
                fp.write(gaf.payload)
            os.chmod(full_path, gaf.mode)
            del fp, full_path

        files_new, files_upd, files_del = _cmp_sets(installed_files,
                                                    existing_files)

        if DEBUG:
            print("New: \n\t{}".format("\n\t".join(sorted(files_new))))
            print("Existing: \n\t{}".format("\n\t".join(sorted(files_upd))))
            print("Removed:  \n\t{}".format("\n\t".join(sorted(files_del))))

        self.stdout.write(
            "Extracted {} files:  {} new, {} existing, and {} removed\n".
            format(len(installed_files), len(files_new), len(files_upd),
                   len(files_del)))

        # Filer out "removed" files; and let us keep some based on a keep-whitelist:  This should
        # include things like local, ".gitignore", ".gitattributes" and so on

        keep_list = [".git*"]
        keep_list.extend(args.keep)
        if not args.allow_local:
            keep_list += ["local/...", "local.meta"]
        keep_list = fixup_pattern_bw(keep_list)
        self.stderr.write("Keep file patterns:  {!r}\n".format(keep_list))

        files_to_delete = []
        files_to_keep = []
        for fn in files_del:
            if match_bwlist(fn, keep_list, escape=False):
                # How to handle a keep of "default.d/..." when we DO want to cleanup the default
                # redirect folder of "default.d/10-upstream"?
                # This may be an academic question since most apps will continue to send
                # an ever increasing list of default files (to mask out old/unused ones)
                self.stdout.write("Keeping {}\n".format(fn))
                files_to_keep.append(fn)
            else:
                files_to_delete.append(fn)
        if files_to_keep:
            self.stdout.write(
                "Keeping {} of {} files marked for deletion due to whitelist.\n"
                .format(len(files_to_keep), len(files_del)))
        git_rm_queue = []

        if files_to_delete:
            self.stdout.write(
                "Removing files not present in the upgraded version of the app.\n"
            )
        for fn in files_to_delete:
            path = os.path.join(dest_app, fn)
            if is_git and args.git_mode in ("stage", "commit"):
                self.stdout.write("git rm -f {}\n".format(path))
                git_rm_queue.append(fn)
            else:
                self.stdout.write("rm -f {}\n".format(path))
                os.unlink(path)

        if git_rm_queue:
            # Run 'git rm file1 file2 file3 ..." (using an xargs like mechanism)
            git_cmd_iterable(["rm"], git_rm_queue, cwd=dest_app)
        del git_rm_queue

        if is_git:
            if args.git_mode in ("stage", "commit"):
                git_cmd(["add", os.path.basename(dest_app)],
                        cwd=os.path.dirname(dest_app))
                # self.stdout.write("git add {}\n".format(os.path.basename(dest_app)))
            '''
            else:
                self.stdout.write("git add {}\n".format(dest_app))
            '''

            # Is there anything to stage/commit?
            if git_is_clean(os.path.dirname(dest_app), check_untracked=False):
                self.stderr.write(
                    "No changes detected.  Nothing to {}\n".format(
                        args.git_mode))
                return

            git_commit_app_name = app_conf.get("ui", {}).get(
                "label", os.path.basename(dest_app))
            git_commit_new_version = app_conf.get("launcher",
                                                  {}).get("version", None)
            if mode == "install":
                git_commit_message = "Install {}".format(git_commit_app_name)

                if git_commit_new_version:
                    git_commit_message += " version {}".format(
                        git_commit_new_version)
            else:
                # Todo:  Specify Upgrade/Downgrade/Refresh
                git_commit_message = "Upgrade {}".format(git_commit_app_name)
                git_commit_old_version = old_app_conf.get("launcher", {}).get(
                    "version", None)
                if git_commit_old_version and git_commit_new_version:
                    git_commit_message += " version {} (was {})".format(
                        git_commit_new_version, git_commit_old_version)
                elif git_commit_new_version:
                    git_commit_message += " to version {}".format(
                        git_commit_new_version)
            # Could possibly include some CLI arg details, like what file patterns were excluded
            git_commit_message += "\n\nSHA256 {} {}\n\nSplunk-App-managed-by: ksconf" \
                .format(f_hash, os.path.basename(args.tarball))
            git_commit_cmd = [
                "commit",
                os.path.basename(dest_app), "-m", git_commit_message
            ]

            if not args.no_edit:
                git_commit_cmd.append("--edit")

            git_commit_cmd.extend(args.git_commit_args)

            if args.git_mode == "commit":
                capture_std = True if args.no_edit else False
                proc = git_cmd(git_commit_cmd,
                               cwd=os.path.dirname(dest_app),
                               capture_std=capture_std)
                if proc.returncode == 0:
                    self.stderr.write(
                        dedent("""\
                    Your changes have been committed.  Please review before pushing.  If you
                    find any issues, here are some possible solutions:


                    To fix issues in the last commit, edit and add the files to be fixed, then run:

                        git commit --amend

                    To roll back the last commit but KEEP the app upgrade, run:

                        git reset --soft HEAD^1

                    To roll back the last commit and REVERT the app upgrade, run:

                        git reset --hard HEAD^1

                    NOTE:  Make sure you have *no* other uncommitted changes before running 'reset'.
                    """))
                else:
                    self.stderr.write(
                        "Git commit failed.  Return code {}.  Git args:  git {}\n"
                        .format(proc.returncode, list2cmdline(git_commit_cmd)))
                    return EXIT_CODE_GIT_FAILURE
            elif args.git_mode == "stage":
                self.stdout.write("To commit later, use the following\n")
                self.stdout.write("\tgit {}\n".format(
                    list2cmdline(git_commit_cmd).replace("\n", "\\n")))
Esempio n. 48
0
 def _upload_wake_word(self, audio, metadata):
     requests.post(self.upload_url,
                   files={
                       'audio': BytesIO(audio.get_wav_data()),
                       'metadata': StringIO(json.dumps(metadata))
                   })
Esempio n. 49
0
	def save_currentFplanFahrt(self):
		"""Funkion speichert die aktuellen Werte zu einer FPLAN-Fahrt"""
		self.__fplanFahrtG_strIO.seek(0)
		self.__fplanFahrtAVE_strIO.seek(0)
		self.__fplanFahrtLauf_strIO.seek(0)
		cur = self.__hrdfdb.connection.cursor()
		cur.copy_expert("COPY HRDF_FPLANFahrtG_TAB (fk_eckdatenid,fk_fplanfahrtid,categorycode,fromStop,toStop,deptimeFrom,arrtimeTo) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtG_strIO)
		cur.copy_expert("COPY HRDF_FPLANFahrtVE_TAB (fk_eckdatenid,fk_fplanfahrtid,fromStop,toStop,bitfieldno,deptimeFrom,arrtimeTo) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtAVE_strIO)
		cur.copy_expert("COPY HRDF_FPLANFahrtLaufweg_TAB (fk_eckdatenid,fk_fplanfahrtid,stopno,stopname,sequenceno,arrtime,deptime,tripno,operationalno,ontripsign) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtLauf_strIO)
		if self.__fplanFahrtA_strIO.tell() > 0:
			self.__fplanFahrtA_strIO.seek(0)
			cur.copy_expert("COPY HRDF_FPLANFahrtA_TAB (fk_eckdatenid,fk_fplanfahrtid,attributecode,fromStop,toStop,bitfieldno,deptimeFrom,arrtimeTo) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtA_strIO)
		if self.__fplanFahrtR_strIO.tell() > 0:
			self.__fplanFahrtR_strIO.seek(0)
			cur.copy_expert("COPY HRDF_FPLANFahrtR_TAB (fk_eckdatenid,fk_fplanfahrtid,directionshort,directioncode,fromStop,toStop,deptimeFrom,arrtimeTo) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtR_strIO)
		if self.__fplanFahrtI_strIO.tell() > 0:
			self.__fplanFahrtI_strIO.seek(0)
			cur.copy_expert("COPY HRDF_FPLANFahrtI_TAB (fk_eckdatenid,fk_fplanfahrtid,infotextcode,infotextno,fromStop,toStop,bitfieldno,deptimeFrom,arrtimeTo) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtI_strIO)
		if self.__fplanFahrtL_strIO.tell() > 0:
			self.__fplanFahrtL_strIO.seek(0)
			cur.copy_expert("COPY HRDF_FPLANFahrtL_TAB (fk_eckdatenid,fk_fplanfahrtid,lineno,fromStop,toStop,deptimeFrom,arrtimeTo) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtL_strIO)
		if self.__fplanFahrtSH_strIO.tell() > 0:
			self.__fplanFahrtSH_strIO.seek(0)
			cur.copy_expert("COPY HRDF_FPLANFahrtSH_TAB (fk_eckdatenid,fk_fplanfahrtid,stop,bitfieldno,deptimeFrom) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtSH_strIO)
		if self.__fplanFahrtC_strIO.tell() > 0:
			self.__fplanFahrtC_strIO.seek(0)
			cur.copy_expert("COPY HRDF_FPLANFahrtC_TAB (fk_eckdatenid,fk_fplanfahrtid,checkintime,checkouttime,fromStop,toStop,deptimeFrom,arrtimeTo) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtC_strIO)
		if self.__fplanFahrtGR_strIO.tell() > 0:
			self.__fplanFahrtGR_strIO.seek(0)
			cur.copy_expert("COPY HRDF_FPLANFahrtGR_TAB (fk_eckdatenid,fk_fplanfahrtid,borderStop,prevStop,nextStop,deptimePrev,arrtimeNext) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtGR_strIO)
		
		self.__hrdfdb.connection.commit()
		# Schließen der StringIOs und anschließendes neu anlegen soll performanter sein als truncate(0)
		self.__fplanFahrtG_strIO.close()
		self.__fplanFahrtAVE_strIO.close()
		self.__fplanFahrtLauf_strIO.close()
		self.__fplanFahrtA_strIO.close()
		self.__fplanFahrtR_strIO.close()
		self.__fplanFahrtI_strIO.close()
		self.__fplanFahrtL_strIO.close()
		self.__fplanFahrtSH_strIO.close()
		self.__fplanFahrtC_strIO.close()
		self.__fplanFahrtGR_strIO.close()

		self.__fplanFahrtG_strIO = StringIO()
		self.__fplanFahrtAVE_strIO = StringIO()
		self.__fplanFahrtLauf_strIO = StringIO()
		self.__fplanFahrtA_strIO = StringIO()
		self.__fplanFahrtR_strIO = StringIO()
		self.__fplanFahrtI_strIO = StringIO()
		self.__fplanFahrtL_strIO = StringIO()
		self.__fplanFahrtSH_strIO = StringIO()
		self.__fplanFahrtC_strIO = StringIO()
		self.__fplanFahrtGR_strIO = StringIO()

		self.__fkdict["fk_fplanfahrtid"] = -1
Esempio n. 50
0
 def test_should_return_none_for_stream_handler_not_stdout_or_stderr(self):
     handler = logging.StreamHandler(StringIO())
     assert _get_first_found_console_logging_handler([handler]) is None
Esempio n. 51
0
	def read_zugart(self, filename):
		"""Lesen der Datei ZUGART"""
		logger.info('lesen und verarbeiten der Datei ZUGART')
		zugart_strIO = StringIO()
		zugartcategory_strIO = StringIO()
		zugartclass_strIO = StringIO()
		zugartoption_strIO = StringIO()

		languagecode = "--"
		bTextblock = False
		for line in fileinput.input(filename, openhook=self.__hrdfzip.open):
			line = line.decode(self.__charset).replace('\r\n', '')
			# Eine Zeile mit dem Inhalt "<text>" gibt an, dass nun nur noch die Textangaben in verschiedenen Sprachen folgen
			if not bTextblock:
				# solange das nicht der Fall ist, sollen die Daten als Zugarten weiter eingearbeitet werden
				if line != '<text>':
					# Der string setzt sich aus folgenden Elementen zusammen: code,produktklasse,tarifgruppe,ausgabesteuerung,gattungsbezeichnung,zuschlag,flag,gattungsbildernamen,kategorienummer
					zugart_strIO.write(self.__fkdict['fk_eckdatenid']+';'
											+line[:3].strip()+';'
											+line[4:6].strip()+';'
											+line[7:8]+';'
											+line[9:10]+';'
											+line[11:19].strip()+';'
											+line[20:21].strip()+';'
											+line[22:23]+';'
											+line[24:28].strip()+';'
											+line[30:33]+
											'\n')
				# sobald die Textangaben beginnen, werden die Daten sprachspezifisch in das jeweilige dictionary geschrieben
				else:
					bTextblock = True
			elif line[0] == '<':
				languagecode = line[1:3].lower()
			elif line[:8] == 'category':
				zugartcategory_strIO.write(self.__fkdict['fk_eckdatenid']+';'+line[8:11]+';'+languagecode+';'+line[12:]+'\n')
			elif line[:6] == 'option':
				zugartoption_strIO.write(self.__fkdict['fk_eckdatenid']+';'+line[6:8]+';'+languagecode+';'+line[9:]+'\n')
			elif line[:5] == 'class':
				zugartclass_strIO.write(self.__fkdict['fk_eckdatenid']+';'+line[5:7]+';'+languagecode+';'+line[8:]+'\n')

		zugart_strIO.seek(0)
		zugartcategory_strIO.seek(0)
		zugartclass_strIO.seek(0)
		zugartoption_strIO.seek(0)

		cur = self.__hrdfdb.connection.cursor()
		cur.copy_expert("COPY HRDF_ZUGART_TAB (fk_eckdatenid,categorycode,classno,tariffgroup,outputcontrol,categorydesc,extracharge,flags,categoryimage,categoryno) FROM STDIN USING DELIMITERS ';' NULL AS ''", zugart_strIO)
		cur.copy_expert("COPY HRDF_ZUGARTKategorie_TAB (fk_eckdatenid,categoryno,languagecode,categorytext) FROM STDIN USING DELIMITERS ';' NULL AS ''",zugartcategory_strIO)
		cur.copy_expert("COPY HRDF_ZUGARTKlasse_TAB (fk_eckdatenid,classno,languagecode,classtext) FROM STDIN USING DELIMITERS ';' NULL AS ''",zugartclass_strIO)
		cur.copy_expert("COPY HRDF_ZUGARTOption_TAB (fk_eckdatenid,optionno,languagecode,optiontext) FROM STDIN USING DELIMITERS ';' NULL AS ''",zugartoption_strIO)
		self.__hrdfdb.connection.commit()
		cur.close()
		zugart_strIO.close()
		zugartcategory_strIO.close()
		zugartclass_strIO.close()
		zugartoption_strIO.close()
Esempio n. 52
0
 def __init__(self, charlen=100, depth=4, children=5):
     LargeFileLike.__init__(self, charlen, depth, children)
     self.data = StringIO()
     self.chars  = _str('a') * charlen
     self.more = self.iterelements(depth)
Esempio n. 53
0
class HrdfReader:
	"""
	Die Klasse liest die HRDF-Dateien und schreibt diese in die Datenbank

	HrdfReader(hrdfzipfile, db, hrdffiles)

	"""
	def __init__(self, hrdfzipfile, db, hrdffiles, charset='utf-8'):
		"""
		hrdfzipfile	- HRDF-ZipFile
		db - HRDF-DB
		hrdffiles - Liste der zu lesenden HRDF-Files
		charset - Charset der gezippten Dateien
		"""
		self.__hrdfzip = hrdfzipfile
		self.__hrdfdb = db
		self.__hrdffiles = hrdffiles
		self.__charset = charset
		self.__fkdict = dict(fk_eckdatenid="-1", fk_fplanfahrtid="-1")
		self.__eckdaten_validFrom = date.today()
		self.__eckdaten_validTo = date.today()

		# private Klassenvariablen, da über Funktion die Daten einer Fahrt gespeichert werden
		self.__fplanFahrtG_strIO = StringIO()
		self.__fplanFahrtAVE_strIO = StringIO()
		self.__fplanFahrtLauf_strIO = StringIO()
		self.__fplanFahrtA_strIO = StringIO()
		self.__fplanFahrtR_strIO = StringIO()
		self.__fplanFahrtI_strIO = StringIO()
		self.__fplanFahrtL_strIO = StringIO()
		self.__fplanFahrtSH_strIO = StringIO()
		self.__fplanFahrtC_strIO = StringIO()
		self.__fplanFahrtGR_strIO = StringIO()
		
		#Workaround um Zugehörigkeit einer AVE-Zeile prüfen zu können (gibt es auch bei Kurswagen)
		self.__AVE_type = "None"


	def readfiles(self):
		"""Liest die gewünschten HRDF-Dateien und schreibt sie in die Datenbank"""

		for filename in self.__hrdffiles:
			if filename == "ECKDATEN":
				self.read_eckdaten(filename)
			elif filename == "BITFELD":
				self.read_bitfeld(filename)
			elif filename == "RICHTUNG":
				self.read_richtung(filename)
			elif filename == "ZUGART":
				self.read_zugart(filename)
			elif filename == "ATTRIBUT":
				self.read_attribut(filename, "DE")
				self.read_attribut(filename, "EN")
				self.read_attribut(filename, "FR")
				self.read_attribut(filename, "IT")
			elif filename == "INFOTEXT":
				self.read_infotext(filename, "DE")
				self.read_infotext(filename, "EN")
				self.read_infotext(filename, "FR")
				self.read_infotext(filename, "IT")
			elif filename == "FPLAN":
				self.read_fplan(filename)
			elif filename == "BAHNHOF":
				self.read_bahnhof(filename)
			elif filename == "GLEIS":
				self.read_gleis(filename)
			elif filename == "DURCHBI":
				self.read_durchbi(filename)
			elif filename == "BFKOORD_GEO":
				self.read_bfkoordgeo(filename)
			elif filename == "UMSTEIGB":
				self.read_umsteigb(filename)
			elif filename == "BFPRIOS":
				self.read_bfprios(filename)
			elif filename == "METABHF":
				self.read_metabhf(filename)
			else:
				logger.error("Das Lesen der Datei ["+filename+"] wird nicht unterstützt")

		# Aufbereitung und Verdichtung der importierten Daten
		self.determine_linesperstop()
		self.determine_tripcount()				
				
		logger.info("Der HRDF-Import <{}> wurde eingearbeitet".format(self.__hrdfzip.filename))


	def read_eckdaten(self, filename):
		"""Lesen der Datei ECKDATEN"""
		logger.info('lesen und verarbeiten der Datei ECKDATEN')
		lines = self.__hrdfzip.read(filename).decode(self.__charset).split('\r\n')[:-1]
 		# spezifisch für SBB-Version sind die Trenner in der Bezeichnung, die hier in separate Felder geschrieben werden
		bezeichnung,exportdatum,hrdfversion,lieferant = lines[2].split('$')
		cur = self.__hrdfdb.connection.cursor()
		sql_string = "INSERT INTO HRDF_ECKDATEN_TAB (importFileName, importDateTime, validFrom, validTo, descriptionhrdf, description, creationdatetime, hrdfversion, exportsystem) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s) RETURNING id;" 
		importFileName = os.path.basename(self.__hrdfzip.filename)
		importDateTime = str(datetime.now())
		validFrom = str(datetime.strptime(lines[0], '%d.%m.%Y').date())
		validTo = str(datetime.strptime(lines[1], '%d.%m.%Y').date())
		exportdatum = str(datetime.strptime(exportdatum, '%d.%m.%Y %H:%M:%S'))
		cur.execute(sql_string, (importFileName, importDateTime, validFrom, validTo, lines[2], bezeichnung, exportdatum, hrdfversion, lieferant))
		self.__fkdict["fk_eckdatenid"] = str(cur.fetchone()[0])
		self.__hrdfdb.connection.commit()
		self.__eckdaten_validFrom = datetime.strptime(lines[0], '%d.%m.%Y').date()
		self.__eckdaten_validTo = datetime.strptime(lines[1], '%d.%m.%Y').date()
		cur.close()

	def determine_linesperstop(self):
		"""Ermitteln und Schreiben der Linien, die in der aktuellen Fahrplanperiode an einem Halt vorkommen"""
		logger.info('ermitteln der Linien pro Halt')
		sql_stopsLookup = "INSERT INTO HRDF.HRDF_LINESPERSTOP_TAB (fk_eckdatenid, stopno, operationalno, lineno, categorycode) "\
					"(SELECT DISTINCT fahrt.fk_eckdatenid, flw.stopno, fahrt.operationalno, line.lineno, cat.categorycode "\
					"FROM hrdf.hrdf_fplanfahrtlaufweg_tab flw "\
					"LEFT OUTER JOIN hrdf.hrdf_fplanfahrt_tab fahrt on flw.fk_fplanfahrtid = fahrt.id and flw.fk_eckdatenid = fahrt.fk_eckdatenid "\
					"LEFT OUTER JOIN hrdf.hrdf_fplanfahrtl_tab line on line.fk_fplanfahrtid = fahrt.id and line.fk_eckdatenid = fahrt.fk_eckdatenid "\
					"LEFT OUTER JOIN hrdf.hrdf_fplanfahrtg_tab cat on cat.fk_fplanfahrtid = fahrt.id and cat.fk_eckdatenid = fahrt.fk_eckdatenid "\
					"WHERE fahrt.fk_eckdatenid = %s)"

		curLookup = self.__hrdfdb.connection.cursor()
		curLookup.execute(sql_stopsLookup, (self.__fkdict['fk_eckdatenid'],))
		self.__hrdfdb.connection.commit()
		curLookup.close()

	def determine_tripcount(self):
		"""Ermitteln und Schreiben der Anzahl Fahrten (Linien/Kategorie) pro Verwaltungsnummer - Taktdefinitionen mit eingeschlossen"""
		logger.info('ermitteln der Anzahl Fahrten (Linien/Kategorie) pro Verwaltung')

		sql_tripsLookup = "INSERT INTO HRDF.HRDF_TripCount_Operator_TAB (fk_eckdatenid, operationalno, lineno, categorycode, tripcount) "\
					"(SELECT fahrt.fk_eckdatenid, fahrt.operationalno, line.lineno, cat.categorycode, sum(coalesce(array_length(bit.bitfieldarray, 1), eckdaten.maxdays)*coalesce(cyclecount+1,1)) "\
					"   FROM hrdf.hrdf_fplanfahrt_tab fahrt "\
					"        inner join (SELECT id, validto + 1 - validfrom as maxdays FROM hrdf.hrdf_eckdaten_tab) eckdaten on fahrt.fk_eckdatenid = eckdaten.id "\
					"        LEFT OUTER JOIN hrdf.hrdf_fplanfahrtve_tab ve on fahrt.fk_eckdatenid = ve.fk_eckdatenid and fahrt.id = ve.fk_fplanfahrtid "\
					"        LEFT OUTER JOIN hrdf.hrdf_bitfeld_tab bit on ve.bitfieldno = bit.bitfieldno and ve.fk_eckdatenid = bit.fk_eckdatenid "\
					"        LEFT OUTER JOIN hrdf.hrdf_fplanfahrtl_tab line on line.fk_fplanfahrtid = fahrt.id and line.fk_eckdatenid = fahrt.fk_eckdatenid "\
					"        LEFT OUTER JOIN hrdf.hrdf_fplanfahrtg_tab cat on cat.fk_fplanfahrtid = fahrt.id and cat.fk_eckdatenid = fahrt.fk_eckdatenid "\
					"  WHERE fahrt.fk_eckdatenid = %s "\
					"  GROUP BY fahrt.fk_eckdatenid, fahrt.operationalno, line.lineno, cat.categorycode)"

		curLookup = self.__hrdfdb.connection.cursor()
		curLookup.execute(sql_tripsLookup, (self.__fkdict['fk_eckdatenid'],))
		self.__hrdfdb.connection.commit()
		curLookup.close()

	def read_bitfeld(self, filename):
		"""Lesen der Datei BITFELD"""
		logger.info('lesen und verarbeiten der Datei BITFELD')
		bitfeld_strIO = StringIO()
		for line in fileinput.input(filename, openhook=self.__hrdfzip.open):
			line = line.decode(self.__charset).replace('\r\n','')
			bitfield = str(Bits(hex=line[7:]).bin)[2:-2]
			daycnt = (self.__eckdaten_validTo - self.__eckdaten_validFrom).days
			# Aufbauen des Datums-Array auf Grund der gesetzen Bits
			validDays = []
			i = 0
			while i <= daycnt:
				if bitfield[i] == "1":
					validDays.append(str(self.__eckdaten_validFrom + timedelta(days=i)))
				i += 1

			if len(validDays) == 0:
				validDaysString = "{}"
			else:
				validDaysString = "{'" + "','".join(map(str,validDays)) + "'}"

			bitfeld_strIO.write(self.__fkdict['fk_eckdatenid']+';'
										+line[:6]+';'
										+line[7:]+';'
										+bitfield+';'
										+validDaysString
										+'\n')
		bitfeld_strIO.seek(0)
		cur = self.__hrdfdb.connection.cursor()
		cur.copy_expert("COPY HRDF_BITFELD_TAB (fk_eckdatenid,bitfieldno,bitfield,bitfieldextend,bitfieldarray) FROM STDIN USING DELIMITERS ';' NULL AS ''",bitfeld_strIO)
		self.__hrdfdb.connection.commit()
		cur.close()
		bitfeld_strIO.close()

	def read_bahnhof(self, filename):
		"""Lesen der Datei BAHNHOF"""
		logger.info('lesen und verarbeiten der Datei BAHNHOF')
		bahnhof_strIO = StringIO()
		for line in fileinput.input(filename, openhook=self.__hrdfzip.open):
			line = line.decode(self.__charset).replace('\r\n', '')
			stopname = ''
			stopnamelong = ''
			stopnameshort = ''
			stopnamealias = ''
			# Die Analyse betrachtet noch keine Sprachangaben
			for tmpName in re.split(">", line[12:62].strip()):
				pos = tmpName.find("<");
				typeinfo = tmpName[pos:]
				name = tmpName[:pos].replace("$", "")
				for c in typeinfo[1:]:
					if c == "1": stopname = name[:30]
					if c == "2": stopnamelong = name[:50]
					if c == "3": stopnameshort = name
					if c == "4": stopnamealias = name

			bahnhof_strIO.write(self.__fkdict['fk_eckdatenid']+';'
										 +line[:7].strip()+';'
										 +line[8:11].strip()+';'
										 +stopname+';'
										 +stopnamelong+';'
										 +stopnameshort+';'
										 +stopnamealias
										+'\n')
		bahnhof_strIO.seek(0)
		cur = self.__hrdfdb.connection.cursor()
		cur.copy_expert("COPY HRDF_BAHNHOF_TAB (fk_eckdatenid,stopno,transportUnion,stopname,stopnamelong,stopnameshort,stopnamealias) FROM STDIN USING DELIMITERS ';' NULL AS ''", bahnhof_strIO)
		self.__hrdfdb.connection.commit()
		cur.close()
		bahnhof_strIO.close()

	def read_gleis(self, filename):
		"""Lesen der Datei GLEIS"""
		logger.info('lesen und verarbeiten der Datei GLEIS')
		gleis_strIO = StringIO()
		for line in fileinput.input(filename, openhook=self.__hrdfzip.open):
			line = line.decode(self.__charset).replace('\r\n', '')
			gleis_strIO.write(self.__fkdict['fk_eckdatenid']+';'
										 +line[:7].strip()+';'
										 +line[8:13].strip()+';'
										 +line[14:20].strip()+';'
										 +line[21:29].strip()+';'
										 +line[30:34].strip()+';'
										 +line[35:41].strip()
										+'\n')
		gleis_strIO.seek(0)
		cur = self.__hrdfdb.connection.cursor()
		cur.copy_expert("COPY HRDF_GLEIS_TAB (fk_eckdatenid,stopno,tripno,operationalno,stoppointtext,stoppointtime,bitfieldno) FROM STDIN USING DELIMITERS ';' NULL AS ''", gleis_strIO)
		self.__hrdfdb.connection.commit()
		cur.close()
		gleis_strIO.close()

	def read_richtung(self, filename):
		"""Lesen der Datei RICHTUNG"""
		logger.info('lesen und verarbeiten der Datei RICHTUNG')
		richtung_strIO = StringIO()
		for line in fileinput.input(filename, openhook=self.__hrdfzip.open):
			line = line.decode(self.__charset).replace('\r\n', '')
			richtung_strIO.write(self.__fkdict['fk_eckdatenid']+';'
										 +line[:7].strip()+';'
										 +line[8:59].strip()
										+'\n')
		richtung_strIO.seek(0)
		cur = self.__hrdfdb.connection.cursor()
		cur.copy_expert("COPY HRDF_RICHTUNG_TAB (fk_eckdatenid,directioncode, directiontext) FROM STDIN USING DELIMITERS ';' NULL AS ''", richtung_strIO)
		self.__hrdfdb.connection.commit()
		cur.close()
		richtung_strIO.close()


	def read_zugart(self, filename):
		"""Lesen der Datei ZUGART"""
		logger.info('lesen und verarbeiten der Datei ZUGART')
		zugart_strIO = StringIO()
		zugartcategory_strIO = StringIO()
		zugartclass_strIO = StringIO()
		zugartoption_strIO = StringIO()

		languagecode = "--"
		bTextblock = False
		for line in fileinput.input(filename, openhook=self.__hrdfzip.open):
			line = line.decode(self.__charset).replace('\r\n', '')
			# Eine Zeile mit dem Inhalt "<text>" gibt an, dass nun nur noch die Textangaben in verschiedenen Sprachen folgen
			if not bTextblock:
				# solange das nicht der Fall ist, sollen die Daten als Zugarten weiter eingearbeitet werden
				if line != '<text>':
					# Der string setzt sich aus folgenden Elementen zusammen: code,produktklasse,tarifgruppe,ausgabesteuerung,gattungsbezeichnung,zuschlag,flag,gattungsbildernamen,kategorienummer
					zugart_strIO.write(self.__fkdict['fk_eckdatenid']+';'
											+line[:3].strip()+';'
											+line[4:6].strip()+';'
											+line[7:8]+';'
											+line[9:10]+';'
											+line[11:19].strip()+';'
											+line[20:21].strip()+';'
											+line[22:23]+';'
											+line[24:28].strip()+';'
											+line[30:33]+
											'\n')
				# sobald die Textangaben beginnen, werden die Daten sprachspezifisch in das jeweilige dictionary geschrieben
				else:
					bTextblock = True
			elif line[0] == '<':
				languagecode = line[1:3].lower()
			elif line[:8] == 'category':
				zugartcategory_strIO.write(self.__fkdict['fk_eckdatenid']+';'+line[8:11]+';'+languagecode+';'+line[12:]+'\n')
			elif line[:6] == 'option':
				zugartoption_strIO.write(self.__fkdict['fk_eckdatenid']+';'+line[6:8]+';'+languagecode+';'+line[9:]+'\n')
			elif line[:5] == 'class':
				zugartclass_strIO.write(self.__fkdict['fk_eckdatenid']+';'+line[5:7]+';'+languagecode+';'+line[8:]+'\n')

		zugart_strIO.seek(0)
		zugartcategory_strIO.seek(0)
		zugartclass_strIO.seek(0)
		zugartoption_strIO.seek(0)

		cur = self.__hrdfdb.connection.cursor()
		cur.copy_expert("COPY HRDF_ZUGART_TAB (fk_eckdatenid,categorycode,classno,tariffgroup,outputcontrol,categorydesc,extracharge,flags,categoryimage,categoryno) FROM STDIN USING DELIMITERS ';' NULL AS ''", zugart_strIO)
		cur.copy_expert("COPY HRDF_ZUGARTKategorie_TAB (fk_eckdatenid,categoryno,languagecode,categorytext) FROM STDIN USING DELIMITERS ';' NULL AS ''",zugartcategory_strIO)
		cur.copy_expert("COPY HRDF_ZUGARTKlasse_TAB (fk_eckdatenid,classno,languagecode,classtext) FROM STDIN USING DELIMITERS ';' NULL AS ''",zugartclass_strIO)
		cur.copy_expert("COPY HRDF_ZUGARTOption_TAB (fk_eckdatenid,optionno,languagecode,optiontext) FROM STDIN USING DELIMITERS ';' NULL AS ''",zugartoption_strIO)
		self.__hrdfdb.connection.commit()
		cur.close()
		zugart_strIO.close()
		zugartcategory_strIO.close()
		zugartclass_strIO.close()
		zugartoption_strIO.close()


	def read_attribut(self, filename, sprache):
		"""Lesen der Datei ATTRIBUT
			ATTRIBUT aus INFO+ ist sprachabhängig in dem Format ATTRIBUT_XX
		"""
		if sprache.strip():	# wird keine Sprache übergeben, dann bleibt der Dateiname unverändert
			filename = filename + '_' + sprache
		else:
			sprache = '--'
		logger.info('lesen und verarbeiten der Datei '+filename)

		# Erster Durchlauf um die Ausgabeattributcodes für Teil- und Vollstrecke zu ermitteln
		targetcodes = {}
		for line in fileinput.input(filename, openhook=self.__hrdfzip.open):
			line = line.decode(self.__charset).replace('\r\n', '')
			if line[:1] == '#':
				targetcodes[line[2:4].strip()] = [line[5:7].strip(), line[8:10].strip()]

		attribute_strIO = StringIO()
		for line in fileinput.input(filename, openhook=self.__hrdfzip.open):
			line = line.decode(self.__charset).replace('\r\n', '')
			if line[:1] != '#':
				attrcode = line[:2].strip()
				if attrcode in targetcodes:
					attrcode_section = targetcodes[attrcode][0]
					attrcode_complete = targetcodes[attrcode][1]
				else:
					attrcode_section = ""
					attrcode_complete = ""

				attribute_strIO.write(self.__fkdict['fk_eckdatenid']+';'
											+attrcode+';'
											+sprache.lower()+';'
											+line[3:4]+';'
											+line[5:8]+';'
											+line[9:11]+';'
											+line[12:-1].replace(';','\;')+';'
											+attrcode_section+';'
											+attrcode_complete
											+'\n')
		
		attribute_strIO.seek(0)
		cur = self.__hrdfdb.connection.cursor()
		cur.copy_expert("COPY HRDF_ATTRIBUT_TAB (fk_eckdatenid,attributecode,languagecode,stopcontext,outputprio,outputpriosort,attributetext,outputforsection,outputforcomplete) FROM STDIN USING DELIMITERS ';' NULL AS ''", attribute_strIO)
		self.__hrdfdb.connection.commit()
		cur.close()
		attribute_strIO.close()


	def read_infotext(self, filename, sprache):
		"""Lesen der Datei INFOTEXT
			INFOTEXT aus INFO+ ist sprachabhängig in dem Format INFOTEXT_XX
		"""
		if sprache.strip():	# wird keine Sprache übergeben, dann bleibt der Dateiname unverändert
			filename = filename + '_' + sprache
		else:
			sprache = '--'
		logger.info('lesen und verarbeiten der Datei '+filename)

		infotext_strIO = StringIO()
		for line in fileinput.input(filename, openhook=self.__hrdfzip.open):
			line = line.decode(self.__charset).replace('\r\n', '')
			infotext_strIO.write(self.__fkdict['fk_eckdatenid']+';'
										+line[:7]+';'
										+sprache.lower()+';'
										+line[8:].replace(';','\;')
										+'\n')
		
		infotext_strIO.seek(0)
		cur = self.__hrdfdb.connection.cursor()
		cur.copy_expert("COPY HRDF_INFOTEXT_TAB (fk_eckdatenid,infotextno,languagecode,infotext) FROM STDIN USING DELIMITERS ';' NULL AS ''", infotext_strIO)
		self.__hrdfdb.connection.commit()
		cur.close()
		infotext_strIO.close()

	def read_durchbi(self, filename):
		"""Lesen der Datei DURCHBI"""
		logger.info('lesen und verarbeiten der Datei DURCHBI')
		durchbi_strIO = StringIO()
		for line in fileinput.input(filename, openhook=self.__hrdfzip.open):
			line = line.decode(self.__charset).replace('\r\n', '')
			durchbi_strIO.write(self.__fkdict['fk_eckdatenid']+';'
										 +line[:5]+';'
										 +line[6:12].strip()+';'
										 +line[13:20]+';'
										 +line[21:26]+';'
										 +line[27:33].strip()+';'
										 +line[34:40]+';'
										 +line[41:48]+';'
										 +line[49:51].strip()+';'
										 +line[53:].strip()
										+'\n')
		durchbi_strIO.seek(0)
		cur = self.__hrdfdb.connection.cursor()
		cur.copy_expert("COPY HRDF_DURCHBI_TAB (fk_eckdatenid,tripno1,operationalno1,laststopno1,tripno2,operationalno2,bitfieldno,firststopno2,attribute,comment) FROM STDIN USING DELIMITERS ';' NULL AS ''", durchbi_strIO)
		self.__hrdfdb.connection.commit()
		cur.close()
		durchbi_strIO.close()

	def read_bfkoordgeo(self, filename):
		"""Lesen der Datei BFKOORD_GEO"""
		logger.info('lesen und verarbeiten der Datei BFKOORD_GEO')
		bfkoordgeo_strIO = StringIO()
		for line in fileinput.input(filename, openhook=self.__hrdfzip.open):
			line = line.decode(self.__charset).replace('\r\n', '')
			bfkoordgeo_strIO.write(self.__fkdict['fk_eckdatenid']+';'
										 +line[:7]+';'
										 +line[8:18]+';'
										 +line[19:29]+';'
										 +line[30:36]
										+'\n')
		bfkoordgeo_strIO.seek(0)
		cur = self.__hrdfdb.connection.cursor()
		cur.copy_expert("COPY HRDF_BFKOORD_TAB (fk_eckdatenid,stopno,longitude_geo,latitude_geo,altitude_geo) FROM STDIN USING DELIMITERS ';' NULL AS ''", bfkoordgeo_strIO)
		self.__hrdfdb.connection.commit()
		cur.close()
		bfkoordgeo_strIO.close()

	def read_umsteigb(self, filename):
		"""Lesen der Datei UMSTEIGB"""
		logger.info('lesen und verarbeiten der Datei UMSTEIGB')
		umsteigb_strIO = StringIO()
		for line in fileinput.input(filename, openhook=self.__hrdfzip.open):
			line = line.decode(self.__charset).replace('\r\n', '')
			umsteigb_strIO.write(self.__fkdict['fk_eckdatenid']+';'
										 +line[:7]+';'
										 +line[8:10]+';'
										 +line[11:13]
										+'\n')
		umsteigb_strIO.seek(0)
		cur = self.__hrdfdb.connection.cursor()
		cur.copy_expert("COPY HRDF_UMSTEIGB_TAB (fk_eckdatenid,stopno,transfertime1,transfertime2) FROM STDIN USING DELIMITERS ';' NULL AS ''", umsteigb_strIO)
		self.__hrdfdb.connection.commit()
		cur.close()
		umsteigb_strIO.close()

	def read_bfprios(self, filename):
		"""Lesen der Datei BFPRIOS"""
		logger.info('lesen und verarbeiten der Datei BFPRIOS')
		bfprios_strIO = StringIO()
		for line in fileinput.input(filename, openhook=self.__hrdfzip.open):
			line = line.decode(self.__charset).replace('\r\n', '')
			bfprios_strIO.write(self.__fkdict['fk_eckdatenid']+';'
										 +line[:7]+';'
										 +line[8:10]
										+'\n')
		bfprios_strIO.seek(0)
		cur = self.__hrdfdb.connection.cursor()
		cur.copy_expert("COPY HRDF_BFPRIOS_TAB (fk_eckdatenid,stopno,transferprio) FROM STDIN USING DELIMITERS ';' NULL AS ''", bfprios_strIO)
		self.__hrdfdb.connection.commit()
		cur.close()
		bfprios_strIO.close()

	def read_metabhf(self, filename):
		"""Lesen der Datei METABHF"""
		logger.info('lesen und verarbeiten der Datei METABHF')
		metabhfUB_strIO = StringIO()
		metabhfHG_strIO = StringIO()

		previousUB = False
		strStopNoFrom = None;
		strStopNoTo = None;
		strTransferTimeMin = None;
		strTransferTimeSec = None;
		strAttributeCodes = "";
		attributeCodeList = list();
		stopMemberList = list();
		for line in fileinput.input(filename, openhook=self.__hrdfzip.open):
			line = line.decode(self.__charset).replace('\r\n', '')
			
			if line[:1] == '*':
				# Attributszeile der Übergangsbeziehung				
				if line[1:2] == 'A':
					# Uns interessieren momentan nur die A-Zeilen (Attributecode)
					attributeCodeList.append(line[3:5].strip())

			elif line[7:8] == ':':
				# Haltestellengruppen-Zeile
				# Ist noch eine offene Übergangsbeziehung vorhanden? Die muss noch gespeichert werden
				if (previousUB):
					if (len(attributeCodeList) > 0): strAttributeCodes = "{'" + "','".join(map(str,attributeCodeList)) + "'}"
					metabhfUB_strIO.write(self.__fkdict['fk_eckdatenid']+';'
												 +strStopNoFrom+';'
												 +strStopNoTo+';'
												 +strTransferTimeMin+';'
												 +strTransferTimeSec+';'
												 +strAttributeCodes
												+'\n')
					# Zurücksetzen der Attributcodes-Liste
					attributeCodeList.clear();
					strAttributeCodes = "";
					previousUB = False;

				# Behandlung der Haltestellengruppen-Zeile
				# Erster Stop beginnt bei Zeichen 10, danach beliebig viele Stops in der Länge von 7 Zeichen
				stopMemberList.clear()
				strStopMember = ""
				nextMemberStart = 10
				while (nextMemberStart < len(line)):
					stopMemberList.append(line[nextMemberStart:nextMemberStart+7])
					nextMemberStart = nextMemberStart+9
				if (len(stopMemberList) > 0): strStopMember = "{" + ",".join(map(str,stopMemberList)) + "}"
				metabhfHG_strIO.write(self.__fkdict['fk_eckdatenid']+';'
								+line[10:17]+';'
								+strStopMember
							+'\n')

			else:
				# 1. Zeile einer Übergangsbeziehung
				if (previousUB):
					# Sichern der Übergangsbeziehung
					if (len(attributeCodeList) > 0): strAttributeCodes = "{'" + "','".join(map(str,attributeCodeList)) + "'}"
					metabhfUB_strIO.write(self.__fkdict['fk_eckdatenid']+';'
												 +strStopNoFrom+';'
												 +strStopNoTo+';'
												 +strTransferTimeMin+';'
												 +strTransferTimeSec+';'
												 +strAttributeCodes
												+'\n')

				# Zurücksetzen der Attributcodes-Liste
				attributeCodeList.clear();
				strAttributeCodes = "";
				strStopNoFrom = line[:7]
				strStopNoTo = line[8:15]
				strTransferTimeMin = line[16:19]
				strTransferTimeSec = line[20:22]
				previousUB = True

		metabhfUB_strIO.seek(0)
		curUB = self.__hrdfdb.connection.cursor()
		curUB.copy_expert("COPY HRDF_METABHF_TAB (fk_eckdatenid,stopnofrom,stopnoto,transfertimemin,transfertimesec,attributecode) FROM STDIN USING DELIMITERS ';' NULL AS ''", metabhfUB_strIO)
		self.__hrdfdb.connection.commit()
		curUB.close()
		metabhfUB_strIO.close()

		metabhfHG_strIO.seek(0)
		curHG = self.__hrdfdb.connection.cursor()
		curHG.copy_expert("COPY HRDF_METABHFGRUPPE_TAB (fk_eckdatenid,stopgroupno,stopmember) FROM STDIN USING DELIMITERS ';' NULL AS ''", metabhfHG_strIO)
		self.__hrdfdb.connection.commit()
		curHG.close()
		metabhfHG_strIO.close()


	def save_currentFplanFahrt(self):
		"""Funkion speichert die aktuellen Werte zu einer FPLAN-Fahrt"""
		self.__fplanFahrtG_strIO.seek(0)
		self.__fplanFahrtAVE_strIO.seek(0)
		self.__fplanFahrtLauf_strIO.seek(0)
		cur = self.__hrdfdb.connection.cursor()
		cur.copy_expert("COPY HRDF_FPLANFahrtG_TAB (fk_eckdatenid,fk_fplanfahrtid,categorycode,fromStop,toStop,deptimeFrom,arrtimeTo) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtG_strIO)
		cur.copy_expert("COPY HRDF_FPLANFahrtVE_TAB (fk_eckdatenid,fk_fplanfahrtid,fromStop,toStop,bitfieldno,deptimeFrom,arrtimeTo) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtAVE_strIO)
		cur.copy_expert("COPY HRDF_FPLANFahrtLaufweg_TAB (fk_eckdatenid,fk_fplanfahrtid,stopno,stopname,sequenceno,arrtime,deptime,tripno,operationalno,ontripsign) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtLauf_strIO)
		if self.__fplanFahrtA_strIO.tell() > 0:
			self.__fplanFahrtA_strIO.seek(0)
			cur.copy_expert("COPY HRDF_FPLANFahrtA_TAB (fk_eckdatenid,fk_fplanfahrtid,attributecode,fromStop,toStop,bitfieldno,deptimeFrom,arrtimeTo) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtA_strIO)
		if self.__fplanFahrtR_strIO.tell() > 0:
			self.__fplanFahrtR_strIO.seek(0)
			cur.copy_expert("COPY HRDF_FPLANFahrtR_TAB (fk_eckdatenid,fk_fplanfahrtid,directionshort,directioncode,fromStop,toStop,deptimeFrom,arrtimeTo) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtR_strIO)
		if self.__fplanFahrtI_strIO.tell() > 0:
			self.__fplanFahrtI_strIO.seek(0)
			cur.copy_expert("COPY HRDF_FPLANFahrtI_TAB (fk_eckdatenid,fk_fplanfahrtid,infotextcode,infotextno,fromStop,toStop,bitfieldno,deptimeFrom,arrtimeTo) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtI_strIO)
		if self.__fplanFahrtL_strIO.tell() > 0:
			self.__fplanFahrtL_strIO.seek(0)
			cur.copy_expert("COPY HRDF_FPLANFahrtL_TAB (fk_eckdatenid,fk_fplanfahrtid,lineno,fromStop,toStop,deptimeFrom,arrtimeTo) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtL_strIO)
		if self.__fplanFahrtSH_strIO.tell() > 0:
			self.__fplanFahrtSH_strIO.seek(0)
			cur.copy_expert("COPY HRDF_FPLANFahrtSH_TAB (fk_eckdatenid,fk_fplanfahrtid,stop,bitfieldno,deptimeFrom) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtSH_strIO)
		if self.__fplanFahrtC_strIO.tell() > 0:
			self.__fplanFahrtC_strIO.seek(0)
			cur.copy_expert("COPY HRDF_FPLANFahrtC_TAB (fk_eckdatenid,fk_fplanfahrtid,checkintime,checkouttime,fromStop,toStop,deptimeFrom,arrtimeTo) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtC_strIO)
		if self.__fplanFahrtGR_strIO.tell() > 0:
			self.__fplanFahrtGR_strIO.seek(0)
			cur.copy_expert("COPY HRDF_FPLANFahrtGR_TAB (fk_eckdatenid,fk_fplanfahrtid,borderStop,prevStop,nextStop,deptimePrev,arrtimeNext) FROM STDIN USING DELIMITERS ';' NULL AS ''", self.__fplanFahrtGR_strIO)
		
		self.__hrdfdb.connection.commit()
		# Schließen der StringIOs und anschließendes neu anlegen soll performanter sein als truncate(0)
		self.__fplanFahrtG_strIO.close()
		self.__fplanFahrtAVE_strIO.close()
		self.__fplanFahrtLauf_strIO.close()
		self.__fplanFahrtA_strIO.close()
		self.__fplanFahrtR_strIO.close()
		self.__fplanFahrtI_strIO.close()
		self.__fplanFahrtL_strIO.close()
		self.__fplanFahrtSH_strIO.close()
		self.__fplanFahrtC_strIO.close()
		self.__fplanFahrtGR_strIO.close()

		self.__fplanFahrtG_strIO = StringIO()
		self.__fplanFahrtAVE_strIO = StringIO()
		self.__fplanFahrtLauf_strIO = StringIO()
		self.__fplanFahrtA_strIO = StringIO()
		self.__fplanFahrtR_strIO = StringIO()
		self.__fplanFahrtI_strIO = StringIO()
		self.__fplanFahrtL_strIO = StringIO()
		self.__fplanFahrtSH_strIO = StringIO()
		self.__fplanFahrtC_strIO = StringIO()
		self.__fplanFahrtGR_strIO = StringIO()

		self.__fkdict["fk_fplanfahrtid"] = -1

	def read_fplan(self, filename):
		"""Lesen der Datei FPLAN"""
		logger.info('lesen und verarbeiten der Datei FPLAN')
		curIns = self.__hrdfdb.connection.cursor()

		bDataLinesRead = False
		iSequenceCnt = 0
		for line in fileinput.input(filename, openhook=self.__hrdfzip.open):
			line = line.decode(self.__charset).replace('\r\n','')

			if line[:1] == '*':
				if bDataLinesRead:
					# Datenzeilen wurden gelesen, wir sind jetzt wieder beim nächsten Zug und schreiben den Vorgänger erstmal in die DB
					self.save_currentFplanFahrt()
					bDataLinesRead = False
					iSequenceCnt = 0

				# Attribut-Zeilen (!! längste Attribut-Kennung zuerst abfragen, dann weiter absteigend !!)
				if line[:5] == "*A VE":
					if self.__AVE_type == "*Z" or self.__AVE_type == "*T":
						self.__fplanFahrtAVE_strIO.write(self.__fkdict["fk_eckdatenid"]+';'
													  +self.__fkdict["fk_fplanfahrtid"]+';'
													  +line[6:13].strip()+';'
													  +line[14:21].strip()+';'
													  +line[22:28].strip()+';'
													  +line[29:35].strip()+';'
													  +line[36:42].strip()+
													  '\n')
					else:
						logger.warning("*A VE-Zeile gehört zu nicht unterstützter "+self.__AVE_type+"-Zeile und wird nicht verarbeitet")
						
				elif line[:4] == "*KWZ":
					self.__AVE_type = line[:4]
					logger.warning("Zeile "+line[:4]+" wird derzeit nicht unterstützt")

				elif line[:3] == "*KW" or line[:3] == "*TT":
					self.__AVE_type = line[:3]
					logger.warning("Zeile "+line[:3]+" wird derzeit nicht unterstützt")

				elif line[:3] == "*SH":
					self.__fplanFahrtSH_strIO.write(self.__fkdict["fk_eckdatenid"]+';'
													+self.__fkdict["fk_fplanfahrtid"]+';'
													+line[4:11].strip()+';'
													+line[12:18].strip()+';'
													+line[19:25].strip()+
													'\n')

				elif line[:3] == "*GR":
					self.__fplanFahrtGR_strIO.write(self.__fkdict["fk_eckdatenid"]+';'
													+self.__fkdict["fk_fplanfahrtid"]+';'
													+line[4:11].strip()+';'
													+line[12:19].strip()+';'
													+line[20:27].strip()+';'
													+line[28:34].strip()+';'
													+line[35:41].strip()+
													'\n')


				elif line[:2] == "*B" or line[:2] == "*E":
					logger.warning("Zeile "+line[:2]+" wird derzeit nicht unterstützt")

				elif line[:2] == "*Z":
					self.__AVE_type = line[:2]
					sql_string = "INSERT INTO HRDF_FPLANFahrt_TAB (fk_eckdatenid,triptype,tripno,operationalno,tripversion,cyclecount,cycletimemin) VALUES (%s,%s,%s,%s,%s,%s,%s) RETURNING id;"
					cyclecount = line[22:25].strip()
					cycletimemin = line[26:29].strip()
					if not cyclecount:
						cyclecount = None
					if not cycletimemin:
						cycletimemin = None
					curIns.execute(sql_string, (self.__fkdict['fk_eckdatenid'], line[1:2], line[3:8], line[9:15], line[18:21], cyclecount, cycletimemin))
					self.__fkdict["fk_fplanfahrtid"] = str(curIns.fetchone()[0])

				elif line[:2] == "*T":
					self.__AVE_type = line[:2]
					sql_string = "INSERT INTO HRDF_FPLANFahrt_TAB (fk_eckdatenid,triptype,tripno,operationalno,triptimemin,cycletimesec) VALUES (%s,%s,%s,%s,%s,%s,%s) RETURNING id;"
					triptimemin = line[16:20].strip()
					cycletimesec = line[21:25].strip()
					if not triptimemin:
						triptimemin = None
					if not cycletimesec:
						cycletimesec = None
					curIns.execute(sql_string, (self.__fkdict['fk_eckdatenid'], line[1:2], line[3:8], line[9:15], triptimemin, cycletimesec))
					self.__fkdict["fk_fplanfahrtid"] = str(curIns.fetchone()[0])

				elif line[:2] == "*C":
					checkinTime = '';
					checkoutTime = '';
					if line[:3] == "*CI":
						checkinTime = line[4:8].strip();
					if line[:3] == "*CO":
						checkoutTime = line[4:8].strip();
					self.__fplanFahrtC_strIO.write(self.__fkdict["fk_eckdatenid"]+';'
													+self.__fkdict["fk_fplanfahrtid"]+';'
													+checkinTime+';'
													+checkoutTime+';'
													+line[9:16].strip()+';'
													+line[17:24].strip()+';'
													+line[25:31].strip()+';'
													+line[32:38].strip()+
													'\n')

				elif line[:2] == "*G":
					self.__fplanFahrtG_strIO.write(self.__fkdict["fk_eckdatenid"]+';'
													+self.__fkdict["fk_fplanfahrtid"]+';'
													+line[3:6].strip()+';'
													+line[7:14].strip()+';'
													+line[15:22].strip()+';'
													+line[23:29].strip()+';'
													+line[30:36].strip()+
													'\n')
				elif line[:2] == "*A":
					self.__fplanFahrtA_strIO.write(self.__fkdict["fk_eckdatenid"]+';'
													+self.__fkdict["fk_fplanfahrtid"]+';'
													+line[3:5].strip()+';'
													+line[6:13].strip()+';'
													+line[14:21].strip()+';'
													+line[22:28].strip()+';'
													+line[29:35].strip()+';'
													+line[36:42].strip()+
													'\n')

				elif line[:2] == "*R":
					self.__fplanFahrtR_strIO.write(self.__fkdict["fk_eckdatenid"]+';'
													+self.__fkdict["fk_fplanfahrtid"]+';'
													+line[3:4].strip()+';'
													+line[5:12].strip()+';'
													+line[13:20].strip()+';'
													+line[21:28].strip()+';'
													+line[29:35].strip()+';'
													+line[36:42].strip()+
													'\n')
				elif line[:2] == "*I":
					self.__fplanFahrtI_strIO.write(self.__fkdict["fk_eckdatenid"]+';'
													+self.__fkdict["fk_fplanfahrtid"]+';'
													+line[3:5].strip()+';'
													+line[29:36].strip()+';'
													+line[6:13].strip()+';'
													+line[14:21].strip()+';'
													+line[22:28].strip()+';'
													+line[37:43].strip()+';'
													+line[44:50].strip()+
													'\n')

				elif line[:2] == "*L":
					self.__fplanFahrtL_strIO.write(self.__fkdict["fk_eckdatenid"]+';'
													+self.__fkdict["fk_fplanfahrtid"]+';'
													+line[3:11].strip()+';'
													+line[12:19].strip()+';'
													+line[20:27].strip()+';'
													+line[28:34].strip()+';'
													+line[35:41].strip()+
													'\n')

			else:
				# Laufwegszeilen
				bDataLinesRead = True
				if (line[:1] == "+"):
					logger.warning("Laufwegsdaten mit Regionen werden nicht unterstützt")
				else:
					self.__fplanFahrtLauf_strIO.write(self.__fkdict["fk_eckdatenid"]+';'
														+self.__fkdict["fk_fplanfahrtid"]+';'
														+line[:7].strip()+';'
														+line[8:29].strip()+';'
														+str(iSequenceCnt)+';'
														+line[29:35].strip()+';'
														+line[36:42].strip()+';'
														+line[43:48].strip()+';'
														+line[49:55].strip()+';'
														+line[56:57].strip()+
														'\n')
					iSequenceCnt += 1


		# Nach dem Durchlauf der Schleife muss der letzte Zug noch gespeichert werden
		if bDataLinesRead:
			self.save_currentFplanFahrt()
			bDataLinesRead = False
			iSequenceCnt = 0

		curIns.close()
Esempio n. 54
0
	def read_metabhf(self, filename):
		"""Lesen der Datei METABHF"""
		logger.info('lesen und verarbeiten der Datei METABHF')
		metabhfUB_strIO = StringIO()
		metabhfHG_strIO = StringIO()

		previousUB = False
		strStopNoFrom = None;
		strStopNoTo = None;
		strTransferTimeMin = None;
		strTransferTimeSec = None;
		strAttributeCodes = "";
		attributeCodeList = list();
		stopMemberList = list();
		for line in fileinput.input(filename, openhook=self.__hrdfzip.open):
			line = line.decode(self.__charset).replace('\r\n', '')
			
			if line[:1] == '*':
				# Attributszeile der Übergangsbeziehung				
				if line[1:2] == 'A':
					# Uns interessieren momentan nur die A-Zeilen (Attributecode)
					attributeCodeList.append(line[3:5].strip())

			elif line[7:8] == ':':
				# Haltestellengruppen-Zeile
				# Ist noch eine offene Übergangsbeziehung vorhanden? Die muss noch gespeichert werden
				if (previousUB):
					if (len(attributeCodeList) > 0): strAttributeCodes = "{'" + "','".join(map(str,attributeCodeList)) + "'}"
					metabhfUB_strIO.write(self.__fkdict['fk_eckdatenid']+';'
												 +strStopNoFrom+';'
												 +strStopNoTo+';'
												 +strTransferTimeMin+';'
												 +strTransferTimeSec+';'
												 +strAttributeCodes
												+'\n')
					# Zurücksetzen der Attributcodes-Liste
					attributeCodeList.clear();
					strAttributeCodes = "";
					previousUB = False;

				# Behandlung der Haltestellengruppen-Zeile
				# Erster Stop beginnt bei Zeichen 10, danach beliebig viele Stops in der Länge von 7 Zeichen
				stopMemberList.clear()
				strStopMember = ""
				nextMemberStart = 10
				while (nextMemberStart < len(line)):
					stopMemberList.append(line[nextMemberStart:nextMemberStart+7])
					nextMemberStart = nextMemberStart+9
				if (len(stopMemberList) > 0): strStopMember = "{" + ",".join(map(str,stopMemberList)) + "}"
				metabhfHG_strIO.write(self.__fkdict['fk_eckdatenid']+';'
								+line[10:17]+';'
								+strStopMember
							+'\n')

			else:
				# 1. Zeile einer Übergangsbeziehung
				if (previousUB):
					# Sichern der Übergangsbeziehung
					if (len(attributeCodeList) > 0): strAttributeCodes = "{'" + "','".join(map(str,attributeCodeList)) + "'}"
					metabhfUB_strIO.write(self.__fkdict['fk_eckdatenid']+';'
												 +strStopNoFrom+';'
												 +strStopNoTo+';'
												 +strTransferTimeMin+';'
												 +strTransferTimeSec+';'
												 +strAttributeCodes
												+'\n')

				# Zurücksetzen der Attributcodes-Liste
				attributeCodeList.clear();
				strAttributeCodes = "";
				strStopNoFrom = line[:7]
				strStopNoTo = line[8:15]
				strTransferTimeMin = line[16:19]
				strTransferTimeSec = line[20:22]
				previousUB = True

		metabhfUB_strIO.seek(0)
		curUB = self.__hrdfdb.connection.cursor()
		curUB.copy_expert("COPY HRDF_METABHF_TAB (fk_eckdatenid,stopnofrom,stopnoto,transfertimemin,transfertimesec,attributecode) FROM STDIN USING DELIMITERS ';' NULL AS ''", metabhfUB_strIO)
		self.__hrdfdb.connection.commit()
		curUB.close()
		metabhfUB_strIO.close()

		metabhfHG_strIO.seek(0)
		curHG = self.__hrdfdb.connection.cursor()
		curHG.copy_expert("COPY HRDF_METABHFGRUPPE_TAB (fk_eckdatenid,stopgroupno,stopmember) FROM STDIN USING DELIMITERS ';' NULL AS ''", metabhfHG_strIO)
		self.__hrdfdb.connection.commit()
		curHG.close()
		metabhfHG_strIO.close()
Esempio n. 55
0
 def __enter__(self):
     self._stdout = sys.stdout
     sys.stdout = self._stringio = StringIO()
     return self
Esempio n. 56
0
	def __init__(self, hrdfzipfile, db, hrdffiles, charset='utf-8'):
		"""
		hrdfzipfile	- HRDF-ZipFile
		db - HRDF-DB
		hrdffiles - Liste der zu lesenden HRDF-Files
		charset - Charset der gezippten Dateien
		"""
		self.__hrdfzip = hrdfzipfile
		self.__hrdfdb = db
		self.__hrdffiles = hrdffiles
		self.__charset = charset
		self.__fkdict = dict(fk_eckdatenid="-1", fk_fplanfahrtid="-1")
		self.__eckdaten_validFrom = date.today()
		self.__eckdaten_validTo = date.today()

		# private Klassenvariablen, da über Funktion die Daten einer Fahrt gespeichert werden
		self.__fplanFahrtG_strIO = StringIO()
		self.__fplanFahrtAVE_strIO = StringIO()
		self.__fplanFahrtLauf_strIO = StringIO()
		self.__fplanFahrtA_strIO = StringIO()
		self.__fplanFahrtR_strIO = StringIO()
		self.__fplanFahrtI_strIO = StringIO()
		self.__fplanFahrtL_strIO = StringIO()
		self.__fplanFahrtSH_strIO = StringIO()
		self.__fplanFahrtC_strIO = StringIO()
		self.__fplanFahrtGR_strIO = StringIO()
		
		#Workaround um Zugehörigkeit einer AVE-Zeile prüfen zu können (gibt es auch bei Kurswagen)
		self.__AVE_type = "None"
Esempio n. 57
0
 def __init__(self):
     self.__wbuf = StringIO()
 def setUp(self):
     self.stdout = sys.stdout
     sys.stdout = StringIO()
     self.dir = tempfile.mkdtemp()
     self.old_migrations_path = migrator.MIGRATIONS_PATH
     migrator.MIGRATIONS_PATH = Path(self.dir)
def test_expire_zero_assignments():
    out = StringIO()
    expire_assignments(stdout=out)
    assert "Expired 0 assignments." in out.getvalue()
Esempio n. 60
0
 def flush(self):
     msg = self.__wbuf.getvalue()
     self.__wbuf = StringIO()
     self.sendMessage(msg)