Exemplo n.º 1
0
def get_migration_status(**options):
    # type: (**Any) -> str
    verbosity = options.get('verbosity', 1)

    for app_config in apps.get_app_configs():
        if module_has_submodule(app_config.module, "management"):
            import_module('.management', app_config.name)

    app_labels = [options['app_label']] if options.get('app_label') else None
    db = options.get('database', DEFAULT_DB_ALIAS)
    out = StringIO()
    call_command(
        'showmigrations',
        '--list',
        app_labels=app_labels,
        database=db,
        no_color=options.get('no_color', False),
        settings=options.get('settings', os.environ['DJANGO_SETTINGS_MODULE']),
        stdout=out,
        traceback=options.get('traceback', True),
        verbosity=verbosity,
    )
    connections.close_all()
    out.seek(0)
    output = out.read()
    return re.sub('\x1b\[(1|0)m', '', output)
Exemplo n.º 2
0
    def export_info(self, context, info):
        data = StringIO()
        out = zipfile.ZipFile(data, 'w')

        for d in info:
            path = d.get('path', d.get('id'))
            filename = os.path.basename(path)
            dir_path = os.path.dirname(path)

            # Write data
            _d = d.get('data', '')
            fpath = os.path.join(dir_path, filename)
            out.writestr(fpath, _d)

            metadata = d.copy()
            for name in ('data', 'path'):
                if name in metadata:
                    del metadata[name]
            # Write metadata
            metadata_path = os.path.join(dir_path, '.metadata')
            fpath = os.path.join(metadata_path, filename)
            _d = str(self.atxml_template(info=metadata))
            out.writestr(fpath, _d)

        out.close()
        data.seek(0)
        return data
Exemplo n.º 3
0
def test_num_failures_by_type(capfd):
    # Test that the number of failures by status type is correctly calculated.

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run some tests with different statuses: 3 passes, 1 timeout
    logger.suite_start(["t1", "t2", "t3", "t4"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_end("t1", status="PASS", expected="PASS")
    logger.test_start("t2")
    logger.test_end("t2", status="PASS", expected="PASS")
    logger.test_start("t3")
    logger.test_end("t3", status="PASS", expected="FAIL")
    logger.test_start("t4")
    logger.test_end("t4", status="TIMEOUT", expected="CRASH")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    num_failures_by_type = json.load(output)["num_failures_by_type"]

    # We expect 3 passes and 1 timeout, nothing else.
    assert sorted(num_failures_by_type.keys()) == ["PASS", "TIMEOUT"]
    assert num_failures_by_type["PASS"] == 3
    assert num_failures_by_type["TIMEOUT"] == 1
Exemplo n.º 4
0
def test_chromium_required_fields(capfd):
    # Test that the test results contain a handful of required fields.

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # output a bunch of stuff
    logger.suite_start(["test-id-1"], run_info={}, time=123)
    logger.test_start("test-id-1")
    logger.test_end("test-id-1", status="PASS", expected="PASS")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_obj = json.load(output)

    # Check for existence of required fields
    assert "interrupted" in output_obj
    assert "path_delimiter" in output_obj
    assert "version" in output_obj
    assert "num_failures_by_type" in output_obj
    assert "tests" in output_obj

    test_obj = output_obj["tests"]["test-id-1"]
    assert "actual" in test_obj
    assert "expected" in test_obj
Exemplo n.º 5
0
            def _decompress_xz(filename):
                """Eumlates an option function in read mode for xz.

                See the comment in _compress_xz for more information.

                This function tries to emulate the lzma module as much as
                possible

                """
                if not filename.endswith('.xz'):
                    filename = '{}.xz'.format(filename)

                try:
                    with open(os.devnull, 'w') as null:
                        string = subprocess.check_output(
                            ['xz', '--decompress', '--stdout', filename],
                            stderr=null)
                except OSError as e:
                    if e.errno == errno.ENOENT:
                        raise exceptions.PiglitFatalError(
                            'No xz binary available')
                    raise

                # We need a file-like object, so the contents must be placed in
                # a StringIO object.
                io = StringIO()
                io.write(string)
                io.seek(0)

                yield io

                io.close()
Exemplo n.º 6
0
    def test_error(self):
        sio = StringIO()
        sio.write("bogus")
        sio.seek(0)
        r = flow.FlowReader(sio)
        tutils.raises(flow.FlowReadError, list, r.stream())

        f = flow.FlowReadError("foo")
        assert f.strerror == "foo"
Exemplo n.º 7
0
def colorize(source):
    """
    write colorized version to "[filename].py.html"
    """
    html = StringIO()
    Parser(source, html).format(None, None)
    html.flush()
    html.seek(0)
    return html.read()
Exemplo n.º 8
0
    def test_error(self):
        sio = StringIO()
        sio.write("bogus")
        sio.seek(0)
        r = flow.FlowReader(sio)
        tutils.raises(FlowReadException, list, r.stream())

        f = FlowReadException("foo")
        assert str(f) == "foo"
Exemplo n.º 9
0
    def test_versioncheck(self):
        f = tutils.tflow()
        d = f.get_state()
        d["version"] = (0, 0)
        sio = StringIO()
        tnetstring.dump(d, sio)
        sio.seek(0)

        r = flow.FlowReader(sio)
        tutils.raises("version", list, r.stream())
Exemplo n.º 10
0
    def test_copy_from_cols(self):
        curs = self.conn.cursor()
        f = StringIO()
        for i in xrange(10):
            f.write("%s\n" % (i,))

        f.seek(0)
        curs.copy_from(MinimalRead(f), "tcopy", columns=['id'])

        curs.execute("select * from tcopy order by id")
        self.assertEqual([(i, None) for i in range(10)], curs.fetchall())
Exemplo n.º 11
0
    def _treader(self):
        sio = StringIO()
        w = flow.FlowWriter(sio)
        for i in range(3):
            f = tutils.tflow(resp=True)
            w.add(f)
        for i in range(3):
            f = tutils.tflow(err=True)
            w.add(f)

        sio.seek(0)
        return flow.FlowReader(sio)
Exemplo n.º 12
0
     def read(self, html=None, code='@'):
         '''Get the content of the clipboard.
         
 html: BOOL. Whether to get the raw HTML code of the fomatted text on clipboard.
 code: coding of the text on clipboard.'''
         if (not html) and (not code):
             return super().read()
         else:
             stream = StringIO()
             clipb.clipboard_to_stream(stream, mode=None, code=code, null=None, html=html)
             stream.seek(0)
             return stream.read()
Exemplo n.º 13
0
    def _copy_to(self, curs, srec):
        f = StringIO()
        curs.copy_to(MinimalWrite(f), "tcopy")

        f.seek(0)
        ntests = 0
        for line in f:
            n, s = line.split()
            if int(n) < len(string.ascii_letters):
                self.assertEqual(s, string.ascii_letters[int(n)] * srec)
                ntests += 1

        self.assertEqual(ntests, len(string.ascii_letters))
Exemplo n.º 14
0
    def test_copy_from_cols_err(self):
        curs = self.conn.cursor()
        f = StringIO()
        for i in xrange(10):
            f.write("%s\n" % (i,))

        f.seek(0)
        def cols():
            raise ZeroDivisionError()
            yield 'id'

        self.assertRaises(ZeroDivisionError,
            curs.copy_from, MinimalRead(f), "tcopy", columns=cols())
Exemplo n.º 15
0
    def test_roundtrip(self):
        sio = StringIO()
        f = tutils.tflow()
        f.request.content = "".join(chr(i) for i in range(255))
        w = flow.FlowWriter(sio)
        w.add(f)

        sio.seek(0)
        r = flow.FlowReader(sio)
        l = list(r.stream())
        assert len(l) == 1

        f2 = l[0]
        assert f2.get_state() == f.get_state()
        assert f2.request == f.request
Exemplo n.º 16
0
    def test_filter(self):
        sio = StringIO()
        fl = filt.parse("~c 200")
        w = flow.FilteredFlowWriter(sio, fl)

        f = tutils.tflow(resp=True)
        f.response.status_code = 200
        w.add(f)

        f = tutils.tflow(resp=True)
        f.response.status_code = 201
        w.add(f)

        sio.seek(0)
        r = flow.FlowReader(sio)
        assert len(list(r.stream()))
Exemplo n.º 17
0
    def _copy_from(self, curs, nrecs, srec, copykw):
        f = StringIO()
        for i, c in izip(xrange(nrecs), cycle(string.ascii_letters)):
            l = c * srec
            f.write("%s\t%s\n" % (i,l))

        f.seek(0)
        curs.copy_from(MinimalRead(f), "tcopy", **copykw)

        curs.execute("select count(*) from tcopy")
        self.assertEqual(nrecs, curs.fetchone()[0])

        curs.execute("select data from tcopy where id < %s order by id",
                (len(string.ascii_letters),))
        for i, (l,) in enumerate(curs):
            self.assertEqual(l, string.ascii_letters[i] * srec)
Exemplo n.º 18
0
    def test_copy_no_column_limit(self):
        cols = [ "c%050d" % i for i in range(200) ]

        curs = self.conn.cursor()
        curs.execute('CREATE TEMPORARY TABLE manycols (%s)' % ',\n'.join(
            [ "%s int" % c for c in cols]))
        curs.execute("INSERT INTO manycols DEFAULT VALUES")

        f = StringIO()
        curs.copy_to(f, "manycols", columns = cols)
        f.seek(0)
        self.assertEqual(f.read().split(), ['\\N'] * len(cols))

        f.seek(0)
        curs.copy_from(f, "manycols", columns = cols)
        curs.execute("select count(*) from manycols;")
        self.assertEqual(curs.fetchone()[0], 2)
Exemplo n.º 19
0
def test_reftest_screenshots(capfd):
    # reftest_screenshots, if present, should be plumbed into artifacts.

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run a reftest with reftest_screenshots.
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_end("t1",
                    status="FAIL",
                    expected="PASS",
                    extra={
                        "reftest_screenshots": [
                            {
                                "url": "foo.html",
                                "hash": "HASH1",
                                "screenshot": "DATA1"
                            },
                            "!=",
                            {
                                "url": "foo-ref.html",
                                "hash": "HASH2",
                                "screenshot": "DATA2"
                            },
                        ]
                    })
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    assert test_obj["artifacts"]["screenshots"] == [
        "foo.html: DATA1",
        "foo-ref.html: DATA2",
    ]
Exemplo n.º 20
0
Arquivo: excel.py Projeto: mitcho/clld
    def render(self, ctx, req):
        if not xlwt:
            return ''  # pragma: no cover
        wb = xlwt.Workbook()
        ws = wb.add_sheet(ctx.__unicode__())

        for i, col in enumerate(self.header(ctx, req)):
            ws.write(0, i, col)

        for j, item in enumerate(ctx.get_query(limit=1000)):
            for i, col in enumerate(self.row(ctx, req, item)):
                ws.write(j + 1, i, col)

        out = StringIO()
        wb.save(out)
        out.seek(0)
        return out.read()
Exemplo n.º 21
0
def test_unexpected_subtest_pass(capfd):
    # A subtest that unexpectedly passes is considered a failure condition.

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    formatter = ChromiumFormatter()
    logger.add_handler(handlers.StreamHandler(output, formatter))

    # Run a test with a subtest that is expected to fail but passes.
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_status("t1", status="PASS", expected="FAIL", subtest="t1_a",
                       message="t1_a_message")

    # Since the subtest behaviour is unexpected, it's considered a failure, so
    # the test should be added to the set of tests with subtest failures.
    assert "t1" in formatter.tests_with_subtest_fails

    # The test status is reported as a pass here because the harness was able to
    # run the test to completion.
    logger.test_end("t1", status="PASS", expected="PASS")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    t1_artifacts = test_obj["artifacts"]
    assert t1_artifacts["log"] == "[PASS expected FAIL] t1_a: t1_a_message\n"
    assert t1_artifacts["wpt_subtest_failure"] == "true"
    # Since the subtest status is unexpected, we fail the test. But we report
    # wpt_actual_status as an artifact
    assert t1_artifacts["wpt_actual_status"] == "PASS"
    assert test_obj["actual"] == "FAIL"
    assert test_obj["expected"] == "PASS"
    # Also ensure that the formatter cleaned up its internal state
    assert "t1" not in formatter.tests_with_subtest_fails
Exemplo n.º 22
0
 def table_io(self,fmt,obj=False,
     missing=False,ignore=True,required=False,extra=False):
     from ekklesia.data import objects_equal
     columns = ('a','b','c')
     coltypes = {'a':int,'b':int,'c':(int,)}
     t = DataTable(columns,coltypes=coltypes,fileformat=fmt,ignore=ignore,required=required)
     if fmt in ('json','jsondict'): f = {}
     else: f = StringIO()
     t.open(f,'w')
     if obj:
         t.write(Obj(a=0))
         t.write(Obj(a=1))
     elif missing:
         try:
             t.write({'a':0,'b':2})
             assert ignore
         except:
             assert not ignore
             return
     elif extra:
         try:
             t.write({'a':0,'b':2,'c':[3,4],'d':4})
             assert ignore
         except:
             assert not ignore
             return
     else:
         for i in range(3): t.write({'a':i,'b':2,'c':[3,4]})
     if fmt in ('json','jsondict'):
         f2 = t.close()
         assert f is f2
     else:
         t.close()
         f.seek(0)
     t = DataTable(columns,coltypes=coltypes,fileformat=fmt)
     t.open(f,'r')
     i = 0
     for row in t:
         if obj:
             assert objects_equal(Obj(**row),Obj(a=i))
         else:
             if missing: assert row == {'a':0,'b':2,'c':[]}
             else: assert row == {'a':i,'b':2,'c':[3,4]}
         i+=1
     t.close()
Exemplo n.º 23
0
 def old(self, no_reduce_db):
     try:
         if no_reduce_db:
             touch('./dummydb')
         fileobj = StringIO()
         self.write_pdb(fileobj)
         fileobj.seek(0)
         reduce = os.path.join(os.getenv('LIBTBX_BUILD'), 'reduce', 'exe',
                               'reduce')
         if not os.path.exists(reduce):
             reduce = 'phenix.reduce'
         cmd = [reduce, '-BUILD', '-NUC', '-NOFLIP', '-DB ./dummydb', '-']
         if no_reduce_db:
             process = subprocess.Popen([
                 reduce, '-BUILD', '-NUC', '-NOFLIP', '-DB ./dummydb', '-'
             ],
                                        stdin=subprocess.PIPE,
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.PIPE)
         else:
             process = subprocess.Popen(
                 [reduce, '-BUILD', '-NUC', '-NOFLIP', '-'],
                 stdin=subprocess.PIPE,
                 stdout=subprocess.PIPE,
                 stderr=subprocess.PIPE)
         out, err = process.communicate(str.encode(fileobj.read()))
         out = out.decode()
         err = err.decode()
         if process.wait():
             logger.error("REDUCE returned non-zero exit status: "
                          "See reduce_info.log for more details")
         # print out the reduce log even if it worked
         with open('reduce_info.log', 'w') as fh:
             fh.write(err)
         pdbh = StringIO(out)
         # not using load_file since it does not read StringIO
         print('-' * 80)
         print(pdbh)
         print('-' * 80)
         self.parm = parmed.read_PDB(pdbh)
     finally:
         fileobj.close()
         if no_reduce_db:
             os.unlink('./dummydb')
     return self
Exemplo n.º 24
0
     def write(self, content, html=None, table=None, code=None):
         '''Set clipboard content.
         
 content: the object which will be put onto the clipboard.
 html: BOOL. Whether the content is rich text coded in HTML. Default: False
 table: BOOL. Whether the content is a table. Default: False
 code: string. The coding of the content text.'''
         if table:
             from wavesynlib.languagecenter.html.utils import iterable_to_table
             html = True
             content = iterable_to_table(content)          
         if (not html) and (not code) :
             super().write(content)
         else:
             stream = StringIO()
             stream.write(content)
             stream.seek(0)                    
             clipb.stream_to_clipboard(stream, mode=None, code=code, tee=None, null=None, html=html)
Exemplo n.º 25
0
     def write(self, content, html=None, table=None, code=None):
         '''Set clipboard content.
         
 content: the object which will be put onto the clipboard.
 html: BOOL. Whether the content is rich text coded in HTML. Default: False
 table: BOOL. Whether the content is a table. Default: False
 code: string. The coding of the content text.'''
         if table:
             from wavesynlib.languagecenter.html.utils import iterable_to_table
             html = True
             content = iterable_to_table(content)          
         if (not html) and (not code) :
             super().write(content)
         else:
             stream = StringIO()
             stream.write(content)
             stream.seek(0)                    
             clipb.stream_to_clipboard(stream, mode=None, code=code, tee=None, null=None, html=html)
Exemplo n.º 26
0
def parse(text, encoding='utf8'):
    """Parse the querystring into a normalized form."""
    # Initialize the query object.
    query = Query()

    # Decode the text if we got bytes.
    if isinstance(text, six.binary_type):
        text = text.decode(encoding)

    # Iterate through the characters in the query string; one-by-one
    # in order to perform one-pass parsing.
    stream = StringIO()

    for character in text:

        # We want to stop reading the query and pass it off to someone
        # when we reach a logical or grouping operator.
        if character in (constants.LOGICAL_AND, constants.LOGICAL_OR):

            if not stream.tell():
                # There is no content in the stream; a logical operator
                # was found out of place.
                raise ValueError('Found `{}` out of place'.format(
                    character))

            # Parse the segment up till the combinator
            segment = parse_segment(stream.getvalue(), character)
            query.segments.append(segment)
            stream.truncate(0)
            stream.seek(0)

        else:
            # This isn't a special character, just roll with it.
            stream.write(character)

    # TODO: Throw some nonsense here if the query string ended with a
    # & or ;, because that makes no sense.

    if stream.tell():
        # Append the remainder of the query string.
        query.segments.append(parse_segment(stream.getvalue()))

    # Return the constructed query object.
    return query
Exemplo n.º 27
0
    def add_hydrogen(self, no_reduce_db=False):
        ''' Use reduce program to add hydrogen

        Parameters
        ----------
        obj: file object or parmed.Structure or its derived class

        Returns
        -------
        parm : parmed.Structure

        Requires
        --------
        reduce
        '''
        def touch(fname, times=None):
            with open(fname, 'a'):
                os.utime(fname, times)

        from mmtbx.utils import run_reduce_with_timeout

        parameters = '-BUILD -NUC -NOFLIP'
        if no_reduce_db:
            touch('./dummydb')
            parameters += ' -DB ./dummydb'
        parameters += ' -'

        fileobj = StringIO()
        self.write_pdb(fileobj)
        fileobj.seek(0)

        reduce_out = run_reduce_with_timeout(
            parameters=parameters,
            stdin_lines=fileobj.read(),
            stdout_splitlines=False,
        )
        assert reduce_out.return_code == 0

        pdbh = StringIO()
        pdbh.write(reduce_out.stdout_buffer)
        pdbh.seek(0)
        self.parm = parmed.read_PDB(pdbh)
        return self
Exemplo n.º 28
0
def do_http(method, url, body=""):
    if isinstance(body, str):
        body = StringIO(body)
    elif isinstance(body, unicode):
        raise TypeError("do_http body must be a bytestring, not unicode")
    else:
        # We must give a Content-Length header to twisted.web, otherwise it
        # seems to get a zero-length file. I suspect that "chunked-encoding"
        # may fix this.
        assert body.tell
        assert body.seek
        assert body.read
    scheme, host, port, path = parse_url(url)
    if scheme == "http":
        c = httplib.HTTPConnection(host, port)
    elif scheme == "https":
        c = httplib.HTTPSConnection(host, port)
    else:
        raise ValueError("unknown scheme '%s', need http or https" % scheme)
    c.putrequest(method, path)
    c.putheader("Hostname", host)
    c.putheader("User-Agent", allmydata.__full_version__ + " (tahoe-client)")
    c.putheader("Accept", "text/plain, application/octet-stream")
    c.putheader("Connection", "close")

    old = body.tell()
    body.seek(0, os.SEEK_END)
    length = body.tell()
    body.seek(old)
    c.putheader("Content-Length", str(length))

    try:
        c.endheaders()
    except socket_error as err:
        return BadResponse(url, err)

    while True:
        data = body.read(8192)
        if not data:
            break
        c.send(data)

    return c.getresponse()
Exemplo n.º 29
0
def do_http(method, url, body=""):
    if isinstance(body, str):
        body = StringIO(body)
    elif isinstance(body, unicode):
        raise TypeError("do_http body must be a bytestring, not unicode")
    else:
        # We must give a Content-Length header to twisted.web, otherwise it
        # seems to get a zero-length file. I suspect that "chunked-encoding"
        # may fix this.
        assert body.tell
        assert body.seek
        assert body.read
    scheme, host, port, path = parse_url(url)
    if scheme == "http":
        c = httplib.HTTPConnection(host, port)
    elif scheme == "https":
        c = httplib.HTTPSConnection(host, port)
    else:
        raise ValueError("unknown scheme '%s', need http or https" % scheme)
    c.putrequest(method, path)
    c.putheader("Hostname", host)
    c.putheader("User-Agent", allmydata.__full_version__ + " (tahoe-client)")
    c.putheader("Accept", "text/plain, application/octet-stream")
    c.putheader("Connection", "close")

    old = body.tell()
    body.seek(0, os.SEEK_END)
    length = body.tell()
    body.seek(old)
    c.putheader("Content-Length", str(length))

    try:
        c.endheaders()
    except socket_error as err:
        return BadResponse(url, err)

    while True:
        data = body.read(8192)
        if not data:
            break
        c.send(data)

    return c.getresponse()
Exemplo n.º 30
0
    def list_directory(self, path):
        """Helper to produce a directory listing (absent index.html).

        Return value is either a file object, or None (indicating an
        error).  In either case, the headers are sent, making the
        interface the same as for send_head().

        """
        try:
            list = os.listdir(path)
        except os.error:
            self.send_error(404, "No permission to list directory")
            return None
        list.sort(key=lambda a: a.lower())
        f = StringIO()
        displaypath = cgi.escape(urllib.unquote(self.path))
        f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
        f.write("<html>\n<title>Directory listing for %s</title>\n" %
                displaypath)
        f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
        f.write("<hr>\n<ul>\n")
        for name in list:
            fullname = os.path.join(path, name)
            displayname = linkname = name
            # Append / for directories or @ for symbolic links
            if os.path.isdir(fullname):
                displayname = name + "/"
                linkname = name + "/"
            if os.path.islink(fullname):
                displayname = name + "@"
                # Note: a link to a directory displays with @ and links with /
            f.write('<li><a href="%s">%s</a>\n' %
                    (urllib.quote(linkname), cgi.escape(displayname)))
        f.write("</ul>\n<hr>\n</body>\n</html>\n")
        length = f.tell()
        f.seek(0)
        self.send_response(200)
        encoding = sys.getfilesystemencoding()
        self.send_header("Content-type", "text/html; charset=%s" % encoding)
        self.send_header("Content-Length", str(length))
        self.end_headers()
        return f
Exemplo n.º 31
0
def is_balanced_statement(lines):
    """
    Checks if the lines have balanced parens, brakets, curlies and strings

    Args:
        lines (list): list of strings

    Returns:
        bool: False if the statement is not balanced

    Doctest:
        >>> assert is_balanced_statement(['print(foobar)'])
        >>> assert is_balanced_statement(['foo = bar']) is True
        >>> assert is_balanced_statement(['foo = (']) is False
        >>> assert is_balanced_statement(['foo = (', "')(')"]) is True
        >>> assert is_balanced_statement(
        ...     ['foo = (', "'''", ")]'''", ')']) is True
        >>> #assert is_balanced_statement(['foo = ']) is False
        >>> #assert is_balanced_statement(['== ']) is False

    """
    from six.moves import cStringIO as StringIO
    import tokenize

    block = "\n".join(lines)
    if six.PY2:
        block = block.encode("utf8")
    stream = StringIO()
    stream.write(block)
    stream.seek(0)
    try:
        for t in tokenize.generate_tokens(stream.readline):
            pass
    except tokenize.TokenError as ex:
        message = ex.args[0]
        if message.startswith("EOF in multi-line"):
            return False
        raise
    else:
        # Note: trying to use ast.parse(block) will not work
        # here because it breaks in try, except, else
        return True
Exemplo n.º 32
0
def test_subtest_messages(capfd):
    # Tests accumulation of test output

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run two tests with subtest messages. The subtest name should be included
    # in the output. We should also tolerate missing messages.
    logger.suite_start(["t1", "t2"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_status("t1", status="FAIL", subtest="t1_a",
                       message="t1_a_message")
    logger.test_status("t1", status="PASS", subtest="t1_b",
                       message="t1_b_message")
    logger.test_end("t1", status="PASS", expected="PASS")
    logger.test_start("t2")
    # Currently, subtests with empty messages will be ignored
    logger.test_status("t2", status="PASS", subtest="t2_a")
    # A test-level message will also be appended
    logger.test_end("t2", status="TIMEOUT", expected="PASS",
                    message="t2_message")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    t1_log = output_json["tests"]["t1"]["artifacts"]["log"]
    assert t1_log == "subtest_failure\n" \
                     "[FAIL expected PASS] t1_a: t1_a_message\n" \
                     "[PASS] t1_b: t1_b_message\n"

    t2_log = output_json["tests"]["t2"]["artifacts"]["log"]
    assert t2_log == "[TIMEOUT expected PASS] t2_message\n"
Exemplo n.º 33
0
    def dumps(self, indent=4):
        """
        Writes the dataset out to the json format

        Example:
            >>> from coco_wrangler.coco_api import *
            >>> dataset = demo_coco_data()
            >>> self = CocoDataset(dataset, tag='demo')
            >>> text = self.dumps()
            >>> print(text)
            >>> self2 = CocoDataset(json.loads(text), tag='demo2')
            >>> assert self2.dataset == self.dataset
            >>> assert self2.dataset is not self.dataset
        """
        from six.moves import cStringIO as StringIO
        fp = StringIO()
        self.dump(fp, indent=indent)
        fp.seek(0)
        text = fp.read()
        return text
Exemplo n.º 34
0
    def marshall(self, obj, marshaller):
        REQUEST = obj.REQUEST
        RESPONSE = REQUEST.RESPONSE
        ddata = marshaller.marshall(obj, REQUEST=REQUEST,
                                    RESPONSE=RESPONSE)
        if hasattr(aq_base(obj), 'marshall_hook') \
           and obj.marshall_hook:
            ddata = obj.marshall_hook(ddata)

        content_type, length, data = ddata

        if isinstance(data, six.string_types):
            return StringIO(data)

        s = StringIO()
        while data is not None:
            s.write(data.data)
            data = data.next
        s.seek(0)
        return s
Exemplo n.º 35
0
def test_subtest_messages(capfd):
    # Tests accumulation of test output

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run two tests with subtest messages. The subtest name should be included
    # in the output. We should also tolerate missing messages.
    logger.suite_start(["t1", "t2"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_status("t1", status="FAIL", subtest="t1_a",
                       message="t1_a_message")
    logger.test_status("t1", status="PASS", subtest="t1_b",
                       message="t1_b_message")
    logger.test_end("t1", status="PASS", expected="PASS")
    logger.test_start("t2")
    # Currently, subtests with empty messages will be ignored
    logger.test_status("t2", status="PASS", subtest="t2_a")
    # A test-level message will also be appended
    logger.test_end("t2", status="TIMEOUT", expected="PASS",
                    message="t2_message")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    t1_log = output_json["tests"]["t1"]["artifacts"]["log"]
    assert t1_log == "[FAIL] t1_a: t1_a_message\n" \
                     "[PASS] t1_b: t1_b_message\n"

    t2_log = output_json["tests"]["t2"]["artifacts"]["log"]
    assert t2_log == "[TIMEOUT] t2_message\n"
Exemplo n.º 36
0
    def list_directory(self, path):
        """Helper to produce a directory listing (absent index.html).

        Return value is either a file object, or None (indicating an
        error).  In either case, the headers are sent, making the
        interface the same as for send_head().

        """
        try:
            list = os.listdir(path)
        except os.error:
            self.send_error(404, "No permission to list directory")
            return None
        list.sort(key=lambda a: a.lower())
        f = StringIO()
        displaypath = cgi.escape(urllib.unquote(self.path))
        f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
        f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
        f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
        f.write("<hr>\n<ul>\n")
        for name in list:
            fullname = os.path.join(path, name)
            displayname = linkname = name
            # Append / for directories or @ for symbolic links
            if os.path.isdir(fullname):
                displayname = name + "/"
                linkname = name + "/"
            if os.path.islink(fullname):
                displayname = name + "@"
                # Note: a link to a directory displays with @ and links with /
            f.write('<li><a href="%s">%s</a>\n'
                    % (urllib.quote(linkname), cgi.escape(displayname)))
        f.write("</ul>\n<hr>\n</body>\n</html>\n")
        length = f.tell()
        f.seek(0)
        self.send_response(200)
        encoding = sys.getfilesystemencoding()
        self.send_header("Content-type", "text/html; charset=%s" % encoding)
        self.send_header("Content-Length", str(length))
        self.end_headers()
        return f
Exemplo n.º 37
0
def make_string_buffer(string):
    """Returns a readable/writeable file-like object, containing string.

    >>> f = make_string_buffer(u'text')
    >>> print(f.read())
    text

    If the string is a bytestring, then the returned object will
    operate in binary mode.

    >>> f = make_string_buffer(b'bytes')
    >>> f.read() == b'bytes'
    True
    """
    if isinstance(string, six.text_type):
        buf = StringIO()
    else:
        buf = io.BytesIO()
    buf.write(string)
    buf.seek(0)
    return buf
Exemplo n.º 38
0
def make_string_buffer(string):
    """Returns a readable/writeable file-like object, containing string.

    >>> f = make_string_buffer(u'text')
    >>> print(f.read())
    text

    If the string is a bytestring, then the returned object will
    operate in binary mode.

    >>> f = make_string_buffer(b'bytes')
    >>> f.read() == b'bytes'
    True
    """
    if isinstance(string, six.text_type):
        buf = StringIO()
    else:
        buf = io.BytesIO()
    buf.write(string)
    buf.seek(0)
    return buf
Exemplo n.º 39
0
    def test_copy_from_with_fks(self):
        curs = self.conn.cursor()
        curs.execute('''
        CREATE TEMPORARY TABLE tcopy_ref (
            id serial,
            FOREIGN KEY(id) REFERENCES tcopy
        )
        ''')

        f = StringIO()
        f.write("%s\t%s\n" % (1, 'b'))
        f.seek(0)

        curs.copy_from(MinimalRead(f), "tcopy")

        g = StringIO()
        g.write("%s\n" % (2))
        g.seek(0)

        self.assertRaises(exceptions.OperationalError, curs.copy_from,
                          MinimalRead(g), "tcopy_ref")
Exemplo n.º 40
0
    def test_copy_from_with_fks(self):
        curs = self.conn.cursor()
        curs.execute('''
        CREATE TEMPORARY TABLE tcopy_ref (
            id serial,
            FOREIGN KEY(id) REFERENCES tcopy
        )
        ''')

        f = StringIO()
        f.write("%s\t%s\n" % (1, 'b'))
        f.seek(0)

        curs.copy_from(MinimalRead(f), "tcopy")

        g = StringIO()
        g.write("%s\n" % (2))
        g.seek(0)

        self.assertRaises(exceptions.OperationalError,
            curs.copy_from, MinimalRead(g), "tcopy_ref")
Exemplo n.º 41
0
def get_concatenated_fd(filepaths):
    """
    Generates a :class:`cStringIO` instance containing the content of
    the files at the provided ``filepaths``.

    The returned cStringIO instance is a file-like object, holding in
    memory the concatenated content of the source files, included in
    the same ordering as in the provided list.

    :param filepaths: List of filepaths
    :return: StringIO / cStringIO object with the contents of the files
    """
    fds = [open(x, 'rb') for x in filepaths]
    data = StringIO()
    while fds:
        data_read = fds[0].read(-1)
        data.write(data_read)
        fds[0].close()
        fds.pop(0)
    data.seek(0)
    return data
Exemplo n.º 42
0
def test_chromium_test_name_trie(capfd):
    # Ensure test names are broken into directories and stored in a trie with
    # test results at the leaves.

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(
        handlers.StreamHandler(output, chromium.ChromiumFormatter()))

    # output a bunch of stuff
    logger.suite_start(["/foo/bar/test-id-1", "/foo/test-id-2"],
                       run_info={},
                       time=123)
    logger.test_start("/foo/bar/test-id-1")
    logger.test_end("/foo/bar/test-id-1", status="TIMEOUT", expected="FAIL")
    logger.test_start("/foo/test-id-2")
    logger.test_end("/foo/test-id-2", status="ERROR", expected="TIMEOUT")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_obj = json.load(output)

    # Ensure that the test names are broken up by directory name and that the
    # results are stored at the leaves.
    test_obj = output_obj["tests"]["foo"]["bar"]["test-id-1"]
    assert test_obj["actual"] == "TIMEOUT"
    assert test_obj["expected"] == "FAIL"

    test_obj = output_obj["tests"]["foo"]["test-id-2"]
    # The ERROR status is mapped to FAIL for Chromium
    assert test_obj["actual"] == "FAIL"
    assert test_obj["expected"] == "TIMEOUT"
Exemplo n.º 43
0
 def table_io(self,ids,fmt,encrypt=False,sign=False):
     from ekklesia.data import objects_equal
     columns = ('a','b','c')
     coltypes = {'a':int,'b':int,'c':int}
     t = DataTable(columns,coltypes=coltypes,gpg=ids['id1'],fileformat=fmt,required=False)
     if fmt in ('json','jsondict'): f = {}
     else: f = StringIO()
     t.open(f,'w',[receiver] if encrypt else False,sign)
     for i in range(3): t.write({'a':i,'b':2,'c':3})
     if fmt in ('json','jsondict'):
         f2 = t.close()
         assert f is f2
     else:
         t.close()
         f.seek(0)
     t = DataTable(columns,coltypes=coltypes,gpg=ids['id2'],fileformat=fmt)
     t.open(f,'r',encrypt,sender if sign else False)
     i = 0
     for row in t:
         assert row == {'a':i,'b':2,'c':3}
         i+=1
     t.close()
Exemplo n.º 44
0
def test_tar():
    lucky = Ska.ftp.FTP('lucky')
    lucky.cd('/{}'.format(USER))
    files_before = lucky.ls()

    tar_put_fh = StringIO()
    with tarfile.open(mode='w:gz', fileobj=tar_put_fh) as tar_put:
        tar_put.add('test_tar.py')
        tar_put.add('test_tar.py')

    obj = {'cmd': 'test',
           'tar': tar_put_fh.getvalue()}

    obj_fh_put = StringIO()
    pickle.dump(obj, obj_fh_put, protocol=-1)

    remotefile = 'test.pkl'
    obj_fh_put.seek(0)
    lucky.storbinary('STOR ' + remotefile, obj_fh_put)

    obj_fh_get = StringIO()
    lucky.retrbinary('RETR ' + remotefile, obj_fh_get.write)

    lucky.delete(remotefile)
    files_after = lucky.ls()

    assert files_before == files_after
    assert obj_fh_put.getvalue() == obj_fh_get.getvalue()

    lucky.close()

    obj_fh_get.seek(0)
    obj_get = pickle.load(obj_fh_get)
    tar = tarfile.open(mode='r', fileobj=StringIO(obj_get['tar']))

    assert tar.getnames() == ['test_tar.py', 'test_tar.py']

    test_basic = tar.extractfile('test_tar.py').read()
    assert test_basic == open('test_tar.py').read()
Exemplo n.º 45
0
def background_src_api(rowid=None, fresh=False, **kwargs):
    r"""
    Returns the image file of annot <aid>

    Example:
        >>> # xdoctest: +REQUIRES(--slow)
        >>> # xdoctest: +REQUIRES(--web-tests)
        >>> from wbia.web.app import *  # NOQA
        >>> import wbia
        >>> with wbia.opendb_with_web('testdb1') as (ibs, client):
        ...     resp = client.get('/api/background/src/1/')
        >>> print(resp.data)
        b'\xff\xd8\xff\xe0\x00\x10JFIF...

    RESTful:
        Method: GET
        URL:    /api/annot/src/<rowid>/
    """
    from PIL import Image  # NOQA

    ibs = current_app.ibs
    gpath = ibs.get_annot_probchip_fpath(rowid)

    # Load image
    assert gpath is not None, 'image path should not be None'
    image = vt.imread(gpath, orient='auto')
    image = appf.resize_via_web_parameters(image)
    image = image[:, :, ::-1]

    # Encode image
    image_pil = Image.fromarray(image)
    if six.PY2:
        img_io = StringIO()
    else:
        img_io = BytesIO()
    image_pil.save(img_io, 'JPEG', quality=100)
    img_io.seek(0)
    return send_file(img_io, mimetype='image/jpeg')
Exemplo n.º 46
0
    def pt_upload(self, REQUEST, file=''):
        """Replace the document with the text in file."""
        if SUPPORTS_WEBDAV_LOCKS and self.wl_isLocked():
            raise ResourceLockedError("File is locked via WebDAV")

        if not isinstance(file, basestring):
            if not file: raise ValueError('File not specified')
            file = file.read()

        if file.startswith(
                "PK"):  # FIXME: this condition is probably not enough
            # this is a OOo zip file, extract the content
            builder = OOoBuilder(file)
            attached_files_list = [
                n for n in builder.getNameList()
                if n.startswith(self._OLE_directory_prefix)
                or n.startswith('Pictures') or n == 'META-INF/manifest.xml'
            ]
            # destroy a possibly pre-existing OLE document set
            if self.OLE_documents_zipstring:
                self.OLE_documents_zipstring = None
            # create a zip archive and store it
            if attached_files_list:
                memory_file = StringIO()
                try:
                    zf = ZipFile(memory_file,
                                 mode='w',
                                 compression=ZIP_DEFLATED)
                except RuntimeError:
                    zf = ZipFile(memory_file, mode='w')
                for attached_file in attached_files_list:
                    zf.writestr(attached_file, builder.extract(attached_file))
                zf.close()
                memory_file.seek(0)
                self.OLE_documents_zipstring = memory_file.read()
            self.content_type = builder.getMimeType()
            file = builder.prepareContentXml(self.ooo_xml_file_id)
        return ZopePageTemplate.pt_upload(self, REQUEST, file)
Exemplo n.º 47
0
def test_wptreport_run_info_optional(capfd):
    """per the mozlog docs, run_info is optional; check we work without it"""
    # setup the logger
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, WptreportFormatter()))

    # output a bunch of stuff
    logger.suite_start(["test-id-1"])  # no run_info arg!
    logger.test_start("test-id-1")
    logger.test_end("test-id-1", "PASS")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_obj = json.load(output)
    assert "run_info" not in output_obj or output_obj["run_info"] == {}
Exemplo n.º 48
0
def extract_comments(source):
    """
    Returns the text in each comment in a block of python code.
    Uses tokenize to account for quotations.

    CommandLine:
        python -m xdoctest.static_analysis extract_comments

    Example:
        >>> from xdoctest import utils
        >>> source = utils.codeblock(
        >>>    '''
               # comment 1
               a = '# not a comment'  # comment 2
               c = 3
               ''')
        >>> comments = list(extract_comments(source))
        >>> assert comments == ['# comment 1', '# comment 2']
        >>> comments = list(extract_comments(source.splitlines()))
        >>> assert comments == ['# comment 1', '# comment 2']
    """
    if not isinstance(source, six.string_types):
        source = '\n'.join(source)
    if six.PY2:
        try:
            source = source.encode('utf8')
        except Exception:
            pass
    stream = StringIO()
    stream.write(source)
    stream.seek(0)
    try:
        for t in tokenize.generate_tokens(stream.readline):
            if t[0] == tokenize.COMMENT:
                yield t[1]
    except tokenize.TokenError as ex:
        pass
Exemplo n.º 49
0
def test_flaky_test_unexpected(capfd):
    # Check that a flaky test with multiple possible statuses is seen as
    # unexpected if its actual status is NOT one of the possible ones.

    # set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run a test that is known to be flaky
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_end("t1",
                    status="ERROR",
                    expected="OK",
                    known_intermittent=["TIMEOUT"])
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    # The test's statuses are all mapped, changing ERROR->FAIL and OK->PASS
    assert test_obj["actual"] == "FAIL"
    # All the possible statuses are concatenated together into expected.
    assert test_obj["expected"] == "PASS TIMEOUT"
    # ...this is a regression and unexpected because the actual status is not
    # one of the expected ones
    assert test_obj["is_regression"] is True
    assert test_obj["is_unexpected"] is True
Exemplo n.º 50
0
def test_wptreport_lone_surrogate_ucs2(capfd):
    # Since UCS4 is a superset of UCS2 we can meaningfully test the UCS2 code on a
    # UCS4 build, but not the reverse. However UCS2 is harder to handle and UCS4 is
    # the commonest (and sensible) configuration, so that's OK.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, WptreportFormatter()))

    with mock.patch.object(wptreport,
                           'surrogate_replacement',
                           wptreport.SurrogateReplacementUcs2()):
        # output a bunch of stuff
        logger.suite_start(["test-id-1"])  # no run_info arg!
        logger.test_start("test-id-1")
        logger.test_status("test-id-1",
                           subtest=u"Name with surrogate\uD800",
                           status="FAIL",
                           message=u"\U0001F601 \uDE0A\uD83D \uD83D\uDE0A")
        logger.test_end("test-id-1",
                        status="PASS",
                        message=u"\uDE0A\uD83D \uD83D\uDE0A \U0001F601")
        logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_obj = json.load(output)
    test = output_obj["results"][0]
    assert test["message"] == u"U+de0aU+d83d \U0001f60a \U0001F601"
    subtest = test["subtests"][0]
    assert subtest["name"] == u"Name with surrogateU+d800"
    assert subtest["message"] == u"\U0001F601 U+de0aU+d83d \U0001f60a"
Exemplo n.º 51
0
def finfindr_passport_src(aid=None, ibs=None, **kwargs):
    from six.moves import cStringIO as StringIO
    from io import BytesIO
    from PIL import Image  # NOQA
    from flask import current_app, send_file
    from wbia.web import appfuncs as appf
    import six

    if ibs is None:
        ibs = current_app.ibs

    aid = int(aid)
    aid_list = [aid]
    passport_paths = ibs.depc_annot.get('FinfindrPassport',
                                        aid_list,
                                        'image',
                                        read_extern=False,
                                        ensure=True)
    passport_path = passport_paths[0]

    # Load image
    assert passport_paths is not None, 'passport path should not be None'
    image = vt.imread(passport_path, orient='auto')
    image = appf.resize_via_web_parameters(image)
    image = image[:, :, ::-1]

    # Encode image
    image_pil = Image.fromarray(image)
    if six.PY2:
        img_io = StringIO()
    else:
        img_io = BytesIO()
    image_pil.save(img_io, 'JPEG', quality=100)
    img_io.seek(0)

    return send_file(img_io, mimetype='image/jpeg')
Exemplo n.º 52
0
def background_src_api(rowid=None, fresh=False, **kwargs):
    r"""
    Returns the image file of annot <aid>

    Example:
        >>> # WEB_DOCTEST
        >>> from wbia.web.app import *  # NOQA
        >>> import wbia
        >>> with wbia.opendb_bg_web('testdb1', start_job_queue=False, managed=True) as web_ibs:
        ...     resp = web_ibs.send_wbia_request('/api/background/src/1/', type_='get', json=False)
        >>> print(resp)

    RESTful:
        Method: GET
        URL:    /api/annot/src/<rowid>/
    """
    from PIL import Image  # NOQA

    ibs = current_app.ibs
    gpath = ibs.get_annot_probchip_fpath(rowid)

    # Load image
    assert gpath is not None, 'image path should not be None'
    image = vt.imread(gpath, orient='auto')
    image = appf.resize_via_web_parameters(image)
    image = image[:, :, ::-1]

    # Encode image
    image_pil = Image.fromarray(image)
    if six.PY2:
        img_io = StringIO()
    else:
        img_io = BytesIO()
    image_pil.save(img_io, 'JPEG', quality=100)
    img_io.seek(0)
    return send_file(img_io, mimetype='image/jpeg')
Exemplo n.º 53
0
class PMRequestListener(object):
    """ Daemon process that responds to requests """
    def __init__(self, config, buildroot):
        self.config = config
        self.buildroot = buildroot
        self.rundir = buildroot.make_chroot_path(RUNDIR)
        self.socket_path = os.path.join(self.rundir, SOCKET_NAME)
        self.executed_commands = []
        # util.do cannot return output when the command fails, we need to
        # capture it's logging
        self.log_buffer = StringIO()
        self.log = logging.getLogger("mockbuild.plugin.pm_request")
        self.log.level = logging.DEBUG
        self.log.addFilter(OutputFilter())
        self.log.propagate = False
        self.log.addHandler(logging.StreamHandler(self.log_buffer))

    def prepare_socket(self):
        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        try:
            sock.connect(self.socket_path)
        except (socket.error, OSError):
            try:
                os.unlink(self.socket_path)
            except OSError:
                pass
        else:
            # there's another process listening
            sys.exit(0)

        util.mkdirIfAbsent(self.rundir)
        # Don't allow regular users to access the socket as they may not be in
        # the mock group
        os.chown(self.rundir, self.buildroot.chrootuid,
                 self.buildroot.chrootgid)
        os.chmod(self.rundir, 0o770)
        sock.bind(self.socket_path)
        os.chown(self.socket_path, self.buildroot.chrootuid,
                 self.buildroot.chrootgid)
        return sock

    def listen(self):
        sock = self.prepare_socket()
        sock.listen(MAX_CONNECTIONS)
        while True:
            try:
                connection, _ = sock.accept()
                try:
                    line = connection.makefile().readline()
                    command = shlex.split(line)
                    # pylint:disable=E1101
                    if command == ["!LOG_EXECUTED"]:
                        connection.sendall('\n'.join(
                            self.executed_commands).encode())
                    elif command:
                        success, out = self.execute_command(command)
                        connection.sendall(b"ok\n" if success else b"nok\n")
                        connection.sendall(out.encode())
                        if success:
                            self.executed_commands.append(line.strip())
                finally:
                    connection.close()
            except socket.error:
                continue

    def execute_command(self, command):
        try:
            self.buildroot.pkg_manager.execute(*command,
                                               printOutput=False,
                                               logger=self.log,
                                               returnOutput=False,
                                               pty=False,
                                               raiseExc=True)
            success = True
        except Error:
            success = False
        out = self.log_buffer.getvalue()
        self.log_buffer.seek(0)
        self.log_buffer.truncate()
        return success, out
Exemplo n.º 54
0
class SeekableLzopFile(Iterator):
    """
    Filelike object supporting read-only semi-random access to bz2 compressed
    files for which an offset table (bz2t) has been generated by `bzip-table`.
    """
    def __init__(self, filename, table_filename, block_cache_size=0, **kwargs):
        self.filename = filename
        self.table_filename = table_filename
        self.init_table()
        self.file = open(self.filename, "r")
        self.dirty = True
        self.at_eof = False
        self.file_pos = 0
        self.current_block_index = -1
        self.current_block = None
        if block_cache_size > 0:
            self.cache = lrucache.LRUCache(block_cache_size)
        else:
            self.cache = None

    def init_table(self):
        self.block_size = None
        self.block_info = []
        # Position of corresponding block in compressed file (in bytes)
        for line in open(self.table_filename):
            fields = line.split()
            if fields[0] == "s":
                self.block_size = int(fields[1])
            if fields[0] == "o":
                offset = int(fields[1])
                compressed_size = int(fields[2])
                size = int(fields[3])
                self.block_info.append((offset, compressed_size, size))
        self.nblocks = len(self.block_info)

    def close(self):
        self.file.close()

    def load_block(self, index):
        if self.cache is not None and index in self.cache:
            return self.cache[index]
        else:
            offset, csize, size = self.block_info[index]
            # Get the block of compressed data
            self.file.seek(offset)
            data = self.file.read(csize)
            # Need to prepend a header for python-lzo module (silly)
            data = ''.join(('\xf0', struct.pack("!I", size), data))
            value = lzo.decompress(data)
            if self.cache is not None:
                self.cache[index] = value
            return value

    def fix_dirty(self):
        chunk, offset = self.get_block_and_offset(self.file_pos)
        if self.current_block_index != chunk:
            self.current_block = StringIO(self.load_block(chunk))
            self.current_block.read(offset)
            self.current_block_index = chunk
        else:
            self.current_block.seek(offset)
        self.dirty = False

    def get_block_and_offset(self, index):
        return int(index // self.block_size), int(index % self.block_size)

    def seek(self, offset, whence=0):
        """
        Move the file pointer to a particular offset.
        """
        # Determine absolute target position
        if whence == 0:
            target_pos = offset
        elif whence == 1:
            target_pos = self.file_pos + offset
        elif whence == 2:
            raise Exception("seek from end not supported")
            ## target_pos = self.size - offset
        else:
            raise Exception("Invalid `whence` argument: %r", whence)
        # Check if this is a noop
        if target_pos == self.file_pos:
            return
        # Verify it is valid
        ## assert 0 <= target_pos < self.size, "Attempt to seek outside file"
        # Move the position
        self.file_pos = target_pos
        # Mark as dirty, the next time a read is done we need to actually
        # move the position in the bzip2 file
        self.dirty = True

    def tell(self):
        return self.file_pos

    def readline(self):
        if self.dirty:
            self.fix_dirty()
        if self.at_eof:
            return ""
        rval = []
        while 1:
            line = self.current_block.readline()
            self.file_pos += len(line)
            rval.append(line)
            if len(line) > 0 and line[-1] == '\n':
                break
            elif self.current_block_index == self.nblocks - 1:
                self.at_eof = True
                break
            else:
                self.current_block_index += 1
                self.current_block = StringIO(
                    self.load_block(self.current_block_index))
        return "".join(rval)

    def __next__(self):
        line = self.readline()
        if line == "":
            raise StopIteration

    def __iter__(self):
        return self
Exemplo n.º 55
0
def exercise_cif_model():
  loop = model.loop()
  loop["_loop_a"] = flex.double((1,2,3))
  loop.add_columns({'_loop_c': [4,5,6],
                    '_loop_b': ['7','8','9']})
  loop.add_row((4,7,'0'))
  try: loop["_loop_invalid"] = (7,8)
  except AssertionError: pass
  else: raise Exception_expected
  assert len(loop) == 3 # the number of columns (keys)
  assert loop.size() == 4 # the number of rows (loop iterations)
  assert list(loop.keys()) == ['_loop_a', '_loop_c', '_loop_b']
  try: loop["no_leading_underscore"] = 3
  except Sorry: pass
  else: raise Exception_expected
  loop2 = model.loop(header=("_loop2_a", "_loop2_b"), data=(1,2,3,4,5,6))
  assert list(loop2.keys()) == ["_loop2_a", "_loop2_b"]
  assert list(loop2.values()) == [flex.std_string(['1', '3', '5']),
                            flex.std_string(['2', '4', '6'])]
  assert list(loop2.iterrows()) == [
    {'_loop2_a': '1', '_loop2_b': '2'},
    {'_loop2_a': '3', '_loop2_b': '4'},
    {'_loop2_a': '5', '_loop2_b': '6'}]
  loop3 = model.loop(
    data={
      "_loop3_a": flex.int((-1, 2, 3)),
      "_loop3_b": flex.double((1.1, 2.2, 3.3)),
      "_loop3_c": flex.size_t((1, 2, 3)),
    }
  )
  for k in "abc":
    assert isinstance(loop3["_loop3_%s" % k], flex.std_string)
  #
  block = model.block()
  block["_tag"] = 3
  block["_tag1"] = "'a string'"
  block["_another_tag"] = 3.142
  assert "_tag" in block
  assert "_tag1" in block
  assert "_another_tag" in block
  assert block["_tag"] == '3'
  assert block["_tag1"] == "'a string'"
  assert block["_another_tag"] == "3.142"
  assert list(block.keys()) == ['_tag', '_tag1', '_another_tag']
  assert list(block.values()) == ["3", "'a string'", "3.142"]
  try: block["no_leading_underscore"] = 3
  except Sorry: pass
  else: raise Exception_expected
  block.add_loop(loop)
  assert len(block) == 6
  assert list(block.items()) == [
    ('_tag', '3'), ('_tag1', "'a string'"), ('_another_tag', '3.142'),
    ('_loop_a', flex.std_string(['1', '2', '3', '4'])),
    ('_loop_c', flex.std_string(['4', '5', '6', '7'])),
    ('_loop_b', flex.std_string(['7', '8', '9', '0']))]
  block['_loop_c'] = [11, 12, 13, 14]
  assert '_loop_c' in list(block.loops['_loop'].keys())
  assert list(block['_loop_c']) == ['11', '12', '13', '14']
  #
  block1 = model.block()
  block1["_tag"] = 2
  block1["_tag2"] = 1.2
  loop3 = model.loop(header=("_loop_a", "_loop_b"), data=(6,5,4,3,2,1))
  block1.add_loop(loop2)
  block1.add_loop(loop3)
  block.update(block1)
  for key in block._items.keys():
    assert key in ['_another_tag', '_tag2', '_tag', '_tag1']
  for value in block._items.values():
    assert value in ['3.142', '1.2', '2', "'a string'"]
  assert list(block.loops.keys()) == ['_loop', '_loop2']
  assert list(block.keys()) == ['_tag', '_tag1', '_another_tag', '_loop_a',
                          '_loop_b','_tag2', '_loop2_a', '_loop2_b']
  assert list(block['_loop_a']) == ['6', '4', '2']
  assert list(block['_loop_b']) == ['5', '3', '1']
  assert list(block['_loop2_a']) == ['1', '3', '5']
  assert list(block['_loop2_b']) == ['2', '4', '6']
  try: block['_loop_c']
  except KeyError: pass
  else: raise Exception_expected
  bad_loop = model.loop(header=("_a", "_b"), data=(1,2,3,4,5,6))
  block1.add_loop(bad_loop)
  assert "_a" in block1
  assert "_b" in block1
  assert list(block.get_looped_item("_loop_a")) == ['6', '4', '2']
  try: block.get_looped_item("_tag", value_error=ValueError)
  except ValueError: pass
  else: raise Exception_expected
  assert list(block.get_looped_item("_tag", value_error=None)) == ['2']
  try: block.get_looped_item("_none_existent")
  except KeyError: pass
  else: raise Exception_expected
  assert block.get_looped_item(
    "_none_existent", key_error=None, default="my_default") == "my_default"
  assert block.get_single_item("_tag") == "2"
  try: block.get_single_item("_loop_a")
  except ValueError: pass
  else: raise Exception_expected
  assert block.get_single_item(
    "_loop_a", value_error=None, default="default") == "default"
  try: block.get_single_item("_none_existent")
  except KeyError: pass
  else: raise Exception_expected
  assert block.get_single_item("_none_existent", key_error=None) is None
  #
  cif_model = model.cif()
  cif_model["fred"] = block
  assert "fred" in cif_model
  assert cif_model["frEd"] is block
  assert cif_model["fred"]["_Tag"] == '2'
  cif_model["fred"]["_tag"] = 4
  assert cif_model["fred"]["_tag"] == '4'
  del cif_model["fred"]["_tAg"]
  try: cif_model["fred"]["_tag"]
  except KeyError: pass
  else: raise Exception_expected
  cm = cif_model.deepcopy()
  l = cm["fred"]["_loop"]
  del cm["Fred"]["_loop_B"]
  assert "_loop_b" not in cm["fred"]
  assert "_loop_b" not in l
  assert "_loop" in cm["fred"].loops
  del cm["fred"]["_loop_a"]
  assert "_loop" not in cm["fred"].loops
  del cm["fred"]["_loop2"]
  assert "_loop2" not in cm["fred"].loops
  s = StringIO()
  print(cm, file=s)
  assert not show_diff(s.getvalue(),
"""\
data_fred
_tag1                             'a string'
_another_tag                      3.142
_tag2                             1.2

""")
  #
  cm2 = cif_model.copy()
  cm3 = cif_model.deepcopy()
  assert cm2['fred']['_loop_a'] is cif_model ['fred']['_loop_a']
  assert cm3['fred']['_loop_a'] is not cif_model ['fred']['_loop_a']
  b2 = copy.copy(block)
  b3 = copy.deepcopy(block)
  assert b2['_loop_b'] is block['_loop_b']
  assert b3['_loop_b'] is not block['_loop_b']
  l2 = loop.copy()
  l3 = loop.deepcopy()
  assert l2['_loop_b'] is loop['_loop_b']
  assert l3['_loop_b'] is not loop['_loop_b']
  #
  s = StringIO()
  cif_model.show(out=s)
  assert not show_diff(s.getvalue(),
"""\
data_fred
_tag1                             'a string'
_another_tag                      3.142
loop_
  _loop_a
  _loop_b
  6  5
  4  3
  2  1

_tag2                             1.2
loop_
  _loop2_a
  _loop2_b
  1  2
  3  4
  5  6

""")
  s = StringIO()
  cif_model.show(out=s, indent="    ", data_name_field_width=0)
  assert not show_diff(s.getvalue(),
"""\
data_fred
_tag1 'a string'
_another_tag 3.142
loop_
    _loop_a
    _loop_b
    6  5
    4  3
    2  1

_tag2 1.2
loop_
    _loop2_a
    _loop2_b
    1  2
    3  4
    5  6

""")
  s = StringIO()
  cif_model.show(out=s, indent="", indent_row="   ", data_name_field_width=0)
  assert not show_diff(s.getvalue(),
"""\
data_fred
_tag1 'a string'
_another_tag 3.142
loop_
_loop_a
_loop_b
   6  5
   4  3
   2  1

_tag2 1.2
loop_
_loop2_a
_loop2_b
   1  2
   3  4
   5  6

""")
  cif_model.sort(recursive=True)
  s = StringIO()
  cif_model.show(out=s)
  assert not show_diff(s.getvalue(),
"""\
data_fred
_another_tag                      3.142
_tag1                             'a string'
_tag2                             1.2
loop_
  _loop_a
  _loop_b
  6  5
  4  3
  2  1

loop_
  _loop2_a
  _loop2_b
  1  2
  3  4
  5  6

""")
  save = model.save()
  save.add_loop(l3)
  save['_tag1'] = 3
  block = model.block()
  block['bob'] = save
  cm = model.cif({'fred': block})
  s = StringIO()
  cm.show(out=s)
  assert not show_diff(s.getvalue(),
"""data_fred

save_bob
   loop_
    _loop_a
    _loop_c
    _loop_b
    1  11  7
    2  12  8
    3  13  9
    4  14  0

  _tag1                             3
  save_

""")
  b1 = model.block()
  b1['_a'] = 1
  b1['_b'] = 2
  b1['_c'] = 3
  b2 = model.block()
  b2['_a'] = 2
  b2['_c'] = 3
  b2['_d'] = 4
  b3 = b1.difference(b2)
  b4 = b2.difference(b1)
  for item in b3.items():
    assert item in [('_b', '2'), ('_a', '2')]
  for item in b4.items():
    assert item in [('_d', '4'), ('_a', '1')]
  l = model.loop(data=dict(_loop_d=(1,2),_loop_e=(3,4),_loop_f=(5,6)))
  assert l == l
  assert l == l.deepcopy()
  assert l != l2
  assert l != l3
  l2 = model.loop(data=dict(_loop_d=(1,2,3),_loop_e=(3,4,5),_loop_f=(5,6,7)))
  b1.add_loop(l)
  b2.add_loop(l2)
  b5 = b1.difference(b2)
  assert b5['_loop'] == l2
  l = model.loop(data=OrderedDict((('_loop_a',(1,21,-13)),
                                   ('_loop_b',(-221.3,3.01,4.246)),
                                   ('_loop_c',("a","b","cc")))))
  b = model.block()
  b.add_loop(l)
  cm = model.cif({'fred':b})
  s = StringIO()
  cm.show(out=s, loop_format_strings={'_loop':'% 4i% 8.2f %s'})
  assert not show_diff(s.getvalue(),"""\
data_fred
loop_
  _loop_a
  _loop_b
  _loop_c
   1 -221.30 a
  21    3.01 b
 -13    4.25 cc

""")
  s = StringIO()
  cm.show(out=s)
  assert not show_diff(s.getvalue(),"""\
data_fred
loop_
  _loop_a
  _loop_b
  _loop_c
    1  -221.3  a
   21    3.01  b
  -13   4.246  cc

""")
  l.add_row((".", "?", "."))
  s = StringIO()
  cm.show(out=s)
  assert not show_diff(s.getvalue(),"""\
data_fred
loop_
  _loop_a
  _loop_b
  _loop_c
    1  -221.3  a
   21    3.01  b
  -13   4.246  cc
    .       ?  .

""")
  l.delete_row(index=1)
  s = StringIO()
  cm.show(out=s)
  assert not show_diff(s.getvalue(),"""\
data_fred
loop_
  _loop_a
  _loop_b
  _loop_c
    1  -221.3  a
  -13   4.246  cc
    .       ?  .

""")
  l2 = l.deepcopy()
  l2.delete_row(index=0)
  l2.delete_row(index=0)
  l2.delete_row(index=0)
  try: l2.show(out=s)
  except AssertionError as e: pass
  else: raise Exception_expected
  l.clear()
  try: l.show(out=s)
  except AssertionError as e: pass
  else: raise Exception_expected
  #
  loop = model.loop(data={"_a_1": ('string with spaces','nospaces'),
                          "_a_2": ('a', 'b')})
  s = StringIO()
  loop.show(out=s, align_columns=True)
  assert not show_diff(s.getvalue(), """\
loop_
  _a_1
  _a_2
  'string with spaces'  a
  nospaces              b
""")
  #
  cb = model.block()
  cm = model.cif()
  cm["a"] = cb
  cb["_b"] = ""
  s = StringIO()
  cm.show(out=s)
  assert not show_diff(s.getvalue(), """\
data_a
_b                                ''
""")
  #
  loop = model.loop(data=OrderedDict((
    ("_entity_poly.entity_id", ('1', '2', '3')),
    ("_entity_poly.pdbx_seq_one_letter_code", (
      "TFGSGEADCGLRPLFEKKSLEDKTERELLESYIDGR",
      """\
IVEGSDAEIGMSPWQVMLFRKSPQELLCGASLISDRWVLTAAHCLLYPPWDKNFTENDLLVRIGKHSRTRYERNIEKISM
THVFRLKKWIQKVIDQFGE""",
      "NGDFEEIPEE(TYS)LQ",
    )),
    ("_entity_poly.pdbx_seq_one_letter_code_can", (
      "TFGSGEADCGLRPLFEKKSLEDKTERELLESYIDGR",
      """\
IVEGSDAEIGMSPWQVMLFRKSPQELLCGASLISDRWVLTAAHCLLYPPWDKNFTENDLLVRIGKHSRTRYERNIEKISM
THVFRLKKWIQKVIDQFGE""",
      "NGDFEEIPEEYLQ",
    )),
    ("_entity_poly.pdbx_strand_id", ('L', 'H', 'I'))
  )))
  s = StringIO()
  loop.show(out=s, align_columns=True)
  s.seek(0)
  assert not show_diff("\n".join(l.rstrip() for l in s.readlines()),"""\
loop_
  _entity_poly.entity_id
  _entity_poly.pdbx_seq_one_letter_code
  _entity_poly.pdbx_seq_one_letter_code_can
  _entity_poly.pdbx_strand_id
  1  TFGSGEADCGLRPLFEKKSLEDKTERELLESYIDGR  TFGSGEADCGLRPLFEKKSLEDKTERELLESYIDGR  L
  2
;
IVEGSDAEIGMSPWQVMLFRKSPQELLCGASLISDRWVLTAAHCLLYPPWDKNFTENDLLVRIGKHSRTRYERNIEKISM
THVFRLKKWIQKVIDQFGE
;

;
IVEGSDAEIGMSPWQVMLFRKSPQELLCGASLISDRWVLTAAHCLLYPPWDKNFTENDLLVRIGKHSRTRYERNIEKISM
THVFRLKKWIQKVIDQFGE
;
  H
  3  NGDFEEIPEE(TYS)LQ                     NGDFEEIPEEYLQ                         I\
""")
  #
  cb = model.block()
  cm = model.cif()
  cm["a"] = cb
  cb["_a"] = '1 "a" 2'
  cb["_b"] = "1 'b' 3"
  cb["_c"] = "O1'"
  cb["_d"] = 'O2"'
  cb["_e"] = """1 'a' "b" 3"""
  s = StringIO()
  print(cm, file=s)
  s.seek(0)
  assert not show_diff("\n".join(l.rstrip() for l in s.readlines()), """\
data_a
_a                                '1 "a" 2'
_b                                "1 'b' 3"
_c                                O1'
_d                                O2"
_e
;
1 'a' "b" 3
;

""")
  # verify that what we wrote out above is valid CIF and we can read it back in
  cm2 = iotbx.cif.reader(input_string=s.getvalue()).model()
  cb2 = cm2["a"]
  assert cb2["_a"] == cb["_a"]
  assert cb2["_b"] == cb["_b"]
  assert cb2["_c"] == cb["_c"]
  assert cb2["_d"] == cb["_d"]
  assert cb2["_e"].strip() == cb["_e"]
  #
  cm = iotbx.cif.reader(input_string="""\
data_a
loop_
  _pdbx_refine_tls_group.id
  _pdbx_refine_tls_group.refine_tls_id
  _pdbx_refine_tls_group.selection
  _pdbx_refine_tls_group.selection_details
  1  1  ?  "chain 'A' and (resid    2  through   15 )"
  2  2  ?  "chain 'A' and (resid   16  through   26 )"
  3  3  ?  "chain 'A' and (resid   27  through   43 )"
  4  4  ?  "chain 'B' and (resid    1  through   14 )"
  5  5  ?  "chain 'B' and (resid   15  through   20 )"
""").model()
  print(cm)
  #
  cif_block = model.block()
  loop_a = model.loop(header=("_a.1", "_a.2"), data=(1,2,3,4,5,6))
  cif_block.add_loop(loop_a)
  assert cif_block.get_loop("_a") is loop_a
  assert cif_block.get_loop_or_row("_a") is loop_a
  assert cif_block.get_loop("_b") is None
  assert cif_block.get_loop_or_row("_b") is None
  assert cif_block.get_loop("_b", default=loop_a) is loop_a
  assert cif_block.get_loop_or_row("_b", default=loop_a) is loop_a
  loop_a = cif_block.get_loop_with_defaults(
    "_a", default_dict={"_a.2":".", "_a.3":"?", "_a.4":"."})
  assert list(cif_block["_a.1"]) == ['1', '3', '5']
  assert list(cif_block["_a.2"]) == ['2', '4', '6']
  assert list(cif_block["_a.3"]) == ['?', '?', '?']
  assert list(cif_block["_a.4"]) == ['.', '.', '.']
  loop_a.add_row({"_a.3":"a", "_a.4":"b"})
  loop_a.add_row({"_a.3":"c", "_a.4":"d"}, default_value=".")
  assert list(cif_block["_a.1"]) == ['1', '3', '5', '?', '.']
  assert list(cif_block["_a.2"]) == ['2', '4', '6', '?', '.']
  assert list(cif_block["_a.3"]) == ['?', '?', '?', 'a', 'c']
  assert list(cif_block["_a.4"]) == ['.', '.', '.', 'b', 'd']
  loop_B = model.loop(header=("_B.1", "_B.2", "_B.3"), data=(1,2,3,4,5,6))
  cif_block.add_loop(loop_B)
  assert cif_block.get_loop("_B") is loop_B
  assert cif_block.get_loop_or_row("_B") is loop_B
  assert cif_block.get_loop("_b") is loop_B
  assert cif_block.get_loop_or_row("_b") is loop_B
  #
  cif_block = model.block()
  cif_block['_a'] = """\
123
456"""
  s = StringIO()
  cif_block.show(out=s)
  s.seek(0)
  assert not show_diff("\n".join([l.strip() for l in s.readlines()]), """\
_a
;
123
456
;
""")


  cm = iotbx.cif.reader(input_string="""\
data_a
  _test_row.id 1
  _test_row.data2 2
  _test_row.data3 3
  _test_row.data4 44
#
loop_
_test_row_range.sheet_id
_test_row_range.id
_test_row_range.beg_label_comp_id
_test_row_range.beg_label_asym_id
A 1 SER A
A 2 MET A
#
""").model()
  #
  cif_block = list(cm.values())[0]
  loop_or_row = cif_block.get_loop_or_row('_test_row')
  assert loop_or_row.n_rows() == 1
  assert loop_or_row.n_columns() == 4
  assert list(loop_or_row['_test_row.id']) == ['1']
  assert list(loop_or_row['_test_row.data2']) == ['2']
  assert list(loop_or_row['_test_row.data3']) == ['3']
  assert list(loop_or_row['_test_row.data4']) == ['44']
  for r in loop_or_row.iterrows():
    assert list(r['_test_row.id']) == ['1']
    assert list(r['_test_row.data2']) == ['2']
    assert list(r['_test_row.data3']) == ['3']
    assert list(r['_test_row.data4']) == ['4','4']
Exemplo n.º 56
0
class CLITest(TestCase):
    def setUp(self):
        self.output = StringIO()
        self.error = StringIO()
        self.input = StringIO()

    def test_cli_reading_web_form_password_with_multiple_password_attempts(self):
        password_attempts = (i for i in ("incorrect", "badger"))
        cli = self.build_cli(
            getpass=lambda prompt: next(password_attempts).encode("utf-8"),
            arguments=("--path", self.keychain_path, "onetosix",),
        )
        cli.run()

        self.assert_output("123456\n")
        self.assert_no_error_output()

    def test_cli_with_bad_item_name(self):
        cli = self.build_cli(
            getpass=lambda prompt: "badger".encode("utf-8"),
            arguments=("--path", self.keychain_path, "onetos",),
        )

        self.assert_exit_status(EX_DATAERR, cli.run)
        self.assert_no_output()
        self.assert_error_output("1pass: Could not find an item named 'onetos'\n")

    def test_cli_with_fuzzy_matching(self):
        cli = self.build_cli(
            getpass=lambda prompt: "badger".encode("utf-8"),
            arguments=("--fuzzy", "--path", self.keychain_path, "onetos",),
        )
        cli.run()

        self.assert_output("123456\n")
        self.assert_no_error_output()

    def test_cli_cancelled_password_prompt(self):
        def keyboard_interrupt(prompt):
            raise KeyboardInterrupt()
        cli = self.build_cli(
            getpass=keyboard_interrupt,
            arguments=("--path", self.keychain_path, "onetosix",),
        )

        self.assert_exit_status(0, cli.run)
        self.assert_output("\n")
        self.assert_no_error_output()

    def test_correct_password_from_stdin(self):
        def flunker(prompt):
            self.fail("Password prompt was invoked")
        self.input.write("badger\n")
        self.input.seek(0)
        cli = self.build_cli(
            getpass=flunker,
            arguments=("--no-prompt", "--path", self.keychain_path, "onetosix",),
        )
        cli.run()

        self.assert_output("123456\n")
        self.assert_no_error_output()

    def test_incorrect_password_from_stdin(self):
        def flunker(prompt):
            self.fail("Password prompt was invoked")
        self.input.write("wrong-password\n")
        self.input.seek(0)
        cli = self.build_cli(
            getpass=flunker,
            arguments=("--no-prompt", "--path", self.keychain_path, "onetosix",),
        )

        self.assert_exit_status(EX_DATAERR, cli.run)
        self.assert_no_output()
        self.assert_error_output("1pass: Incorrect master password\n")

    def build_cli(self, **kwargs):
        cli_kwargs = {
            "stdin": self.input,
            "stdout": self.output,
            "stderr": self.error,
        }
        cli_kwargs.update(kwargs)
        return CLI(**cli_kwargs)

    def assert_exit_status(self, expected_status, func):
        try:
            func()
        except SystemExit as exit:
            self.assertEquals(expected_status, exit.code)
        else:
            self.fail("Expected a SystemExit to be raised")

    def assert_output(self, expected_output):
        self.assertEquals(expected_output, self.output.getvalue())

    def assert_no_output(self):
        self.assert_output("")

    def assert_error_output(self, expected_output):
        self.assertEquals(expected_output, self.error.getvalue())

    def assert_no_error_output(self):
        self.assert_error_output("")

    @property
    def keychain_path(self):
        return os.path.join(os.path.dirname(__file__), "data", "1Password.agilekeychain")
Exemplo n.º 57
0
def exercise_mmcif_tls():
    pdb_file = libtbx.env.find_in_repositories(
        relative_path="phenix_regression/pdb/3orl.pdb", test=os.path.isfile)
    mmcif_file = libtbx.env.find_in_repositories(
        relative_path="phenix_regression/pdb/3orl.cif", test=os.path.isfile)

    if pdb_file is None or mmcif_file is None:
        print(
            "Skipping exercise_mmcif_tls(): missing phenix_regression directory."
        )
        return

    pdb_input = iotbx.pdb.input(file_name=pdb_file)
    pdb_hierarchy = pdb_input.construct_hierarchy()
    cif_input = iotbx.pdb.input(file_name=mmcif_file)
    cif_hierarchy = cif_input.construct_hierarchy()

    pdb_tls_params = pdb_input.extract_tls_params(pdb_hierarchy).tls_params

    cif_block = cif_input.cif_block
    cif_tls_params = cif_input.extract_tls_params(cif_hierarchy).tls_params

    assert len(pdb_tls_params) == len(cif_tls_params) == 3
    check_tls_params(pdb_tls_params, cif_tls_params)

    selection_strings = [tls.selection_string for tls in cif_tls_params]
    cif_block = iotbx.pdb.mmcif.tls_as_cif_block(cif_tls_params,
                                                 selection_strings)
    cif_block.update(cif_hierarchy.as_cif_block())
    cif_model = iotbx.cif.model.cif()
    cif_model["3orl"] = cif_block
    s = StringIO()
    print(cif_model, file=s)
    s.seek(0)
    cif_hierarchy_recycled = iotbx.pdb.input(
        lines=s.readlines(), source_info=None).construct_hierarchy()
    tls_params_recycled = cif_input.extract_tls_params(
        cif_hierarchy_recycled).tls_params
    assert len(tls_params_recycled) == len(cif_tls_params) == 3
    check_tls_params(tls_params_recycled, cif_tls_params)

    # this one has phenix selection strings
    pdb_file = libtbx.env.find_in_repositories(
        relative_path="phenix_regression/pdb/4g9h.pdb", test=os.path.isfile)
    mmcif_file = libtbx.env.find_in_repositories(
        relative_path="phenix_regression/pdb/4g9h.cif", test=os.path.isfile)

    pdb_input = iotbx.pdb.input(file_name=pdb_file)
    pdb_hierarchy = pdb_input.construct_hierarchy()
    pdb_tls_params = pdb_input.extract_tls_params(pdb_hierarchy).tls_params

    cif_input = iotbx.pdb.input(file_name=mmcif_file)
    cif_hierarchy = cif_input.construct_hierarchy()
    cif_block = cif_input.cif_block
    cif_tls_params = cif_input.extract_tls_params(cif_hierarchy).tls_params

    assert len(pdb_tls_params) == len(cif_tls_params) == 18
    check_tls_params(pdb_tls_params, cif_tls_params)

    selection_strings = [tls.selection_string for tls in cif_tls_params]
    cif_block = iotbx.pdb.mmcif.tls_as_cif_block(cif_tls_params,
                                                 selection_strings)
    cif_block.update(cif_hierarchy.as_cif_block())
    cif_model = iotbx.cif.model.cif()
    cif_model["4g9h"] = cif_block
    s = StringIO()
    print(cif_model, file=s)
    s.seek(0)
    cif_hierarchy_recycled = iotbx.pdb.input(
        lines=s.readlines(), source_info=None).construct_hierarchy()
    tls_params_recycled = cif_input.extract_tls_params(
        cif_hierarchy_recycled).tls_params
    assert len(tls_params_recycled) == len(cif_tls_params) == 18
    check_tls_params(tls_params_recycled, cif_tls_params)

    # in this one the tls data items are not looped
    mmcif_file = libtbx.env.find_in_repositories(
        relative_path="phenix_regression/pdb/2xw9.cif", test=os.path.isfile)
    cif_input = iotbx.pdb.input(file_name=mmcif_file)
    cif_hierarchy = cif_input.construct_hierarchy()

    cif_block = cif_input.cif_block
    cif_tls_params = cif_input.extract_tls_params(cif_hierarchy).tls_params

    assert len(cif_tls_params) == 1
    cif_tls = cif_tls_params[0]
    assert approx_equal(cif_tls.t,
                        [0.0275, 0.0202, 0.0138, -0.0004, 0.0088, -0.0002])
    assert approx_equal(cif_tls.l,
                        [0.0554, 0.0231, 0.0573, -0.0127, 0.0112, -0.017])
    assert approx_equal(cif_tls.s, [
        -0.0001, -0.0012, -0.0037, -0.0006, 0.001, 0.0007, -0.0023, -0.0001,
        -0.0009
    ])
    assert approx_equal(cif_tls.origin, [-1.219, 1.557, 13.138])
    assert approx_equal(cif_tls.selection_string, "(chain A and resseq 1:228)")

    selection_strings = [tls.selection_string for tls in cif_tls_params]
    cif_block = iotbx.pdb.mmcif.tls_as_cif_block(cif_tls_params,
                                                 selection_strings)
    cif_block.update(cif_hierarchy.as_cif_block())
    cif_model = iotbx.cif.model.cif()
    cif_model["2xw9"] = cif_block
    s = StringIO()
    print(cif_model, file=s)
    s.seek(0)
    cif_hierarchy_recycled = iotbx.pdb.input(
        lines=s.readlines(), source_info=None).construct_hierarchy()
    tls_params_recycled = cif_input.extract_tls_params(
        cif_hierarchy_recycled).tls_params
    assert len(tls_params_recycled) == len(cif_tls_params) == 1
    check_tls_params(tls_params_recycled, cif_tls_params)
Exemplo n.º 58
0
    def newContent(self, REQUEST=None, **kw):
        """
      The newContent method is overriden to implement smart content
      creation by detecting the portal type based on whatever information
      was provided and finding out the most appropriate module to store
      the content.

      explicit named parameters was:
        id - id of document
        portal_type - explicit portal_type parameter, must be honoured
        url - Identifier of external resource. Content will be downloaded
              from it
        container - if specified, it is possible to define
                    where to contribute the content. Else, ContributionTool
                    tries to guess.
        container_path - if specified, defines the container path
                         and has precedence over container
        discover_metadata - Enable metadata extraction and discovery
                            (default True)
        temp_object - build tempObject or not (default False)
        user_login - is the name under which the content will be created
                     XXX - this is a security hole which needs to be fixed by
                     making sure only Manager can use this parameter
        data - Binary representation of content
        filename - explicit filename of content
    """
        # Useful for metadata discovery, keep it as it as been provided
        input_parameter_dict = kw.copy()
        # But file and data are exceptions.
        # They are potentialy too big to be keept into memory.
        # We want to keep only one reference of thoses values
        # on futur created document only !
        if 'file' in input_parameter_dict:
            del input_parameter_dict['file']
        if 'data' in input_parameter_dict:
            del input_parameter_dict['data']
        if 'container' in input_parameter_dict:
            # Container is a persistent object
            # keep only its path in container_path key
            container = input_parameter_dict.pop('container')
            input_parameter_dict['container_path'] = container.getPath()
        # pop: remove keys which are not document properties
        url = kw.pop('url', None)
        container = kw.pop('container', None)
        container_path = kw.pop('container_path', None)
        discover_metadata = kw.pop('discover_metadata', True)
        user_login = kw.pop('user_login', None)
        document_id = kw.pop('id', None)
        # check file_name argument for backward compatibility.
        if 'file_name' in kw:
            if 'filename' not in kw:
                kw['filename'] = kw['file_name']
            del (kw['file_name'])
        filename = kw.get('filename', None)
        temp_object = kw.get('temp_object', False)

        document = None
        portal = self.getPortalObject()
        if container is None and container_path:
            # Get persistent object from its path.
            # Container may disappear, be smoother by passing default value
            container = portal.restrictedTraverse(container_path, None)
        # Try to find the filename
        if not url:
            # check if file was provided
            file_object = kw.get('file')
            if file_object is not None:
                if not filename:
                    filename = getattr(file_object, 'filename', None)
            else:
                # some channels supply data and file-name separately
                # this is the case for example for email ingestion
                # in this case, we build a file wrapper for it
                try:
                    data = kw.pop('data')
                except KeyError:
                    raise ValueError('data must be provided')
                if data is not None:
                    file_object = StringIO()
                    file_object.write(data)
                    file_object.seek(0)
                    kw['file'] = file_object
            content_type = kw.pop('content_type', None)
        else:
            file_object, filename, content_type = self._openURL(url)
            content_type = kw.pop('content_type', None) or content_type
            kw['file'] = file_object

        if not filename and url is None:
            raise ValueError('filename must be provided')

        if not content_type:
            # fallback to a default content_type according provided
            # filename
            content_type = self.guessMimeTypeFromFilename(filename)
        if content_type:
            kw['content_type'] = content_type

        portal_type = kw.pop('portal_type', None)
        if not portal_type:
            # Guess it with help of portal_contribution_registry
            portal_type = portal.portal_contribution_registry.findPortalTypeName(
                filename=filename, content_type=content_type)
            if not (container is None or container.isModuleType()
                    or container.getTypeInfo().allowType(portal_type)):
                portal_type = 'Embedded File'

        if container is None:
            # If the portal_type was provided, we can go faster
            if portal_type:
                # We know the portal_type, let us find the default module
                # and use it as container
                try:
                    container = portal.getDefaultModule(portal_type)
                except ValueError:
                    pass

        elif not url:
            # Simplify things here and return a document immediately
            # XXX Nicolas: This will break support of WebDAV
            # if _setObject is not called
            document = container.newContent(document_id, portal_type, **kw)
            if discover_metadata:
                document.activate(after_path_and_method_id=(document.getPath(),
                    ('convertToBaseFormat', 'Document_tryToConvertToBaseFormat')))\
                      .discoverMetadata(filename=filename,
                                        user_login=user_login,
                                        input_parameter_dict=input_parameter_dict)
            if REQUEST is not None:
                response = REQUEST.RESPONSE
                response.setHeader('X-Location', document.absolute_url())
                return response.redirect(self.absolute_url())
            return document

        #
        # Check if same file is already exists. if it exists, then update it.
        #
        property_dict = self.getMatchedFilenamePatternDict(filename)
        reference = property_dict.get('reference', None)
        version = property_dict.get('version', None)
        language = property_dict.get('language', None)
        if portal_type and reference and version and language:
            portal_catalog = portal.portal_catalog
            document = portal_catalog.getResultValue(portal_type=portal_type,
                                                     reference=reference,
                                                     version=version,
                                                     language=language)

            if document is not None:
                # document is already uploaded. So overrides file.
                if not _checkPermission(Permissions.ModifyPortalContent,
                                        document):
                    raise Unauthorized(
                        "[DMS] You are not allowed to update the existing document which has the same coordinates (id %s)"
                        % document.getId())
                document.edit(file=kw['file'])
                return document
        # Temp objects use the standard newContent from Folder
        if temp_object:
            # For temp_object creation, use the standard method
            return BaseTool.newContent(self, portal_type=portal_type, **kw)

        # Then put the file inside ourselves for a short while
        document = self._setObject(document_id,
                                   None,
                                   portal_type=portal_type,
                                   user_login=user_login,
                                   container=container,
                                   discover_metadata=discover_metadata,
                                   filename=filename,
                                   input_parameter_dict=input_parameter_dict)
        object_id = document.getId()
        document = self[object_id]  # Call __getitem__ to purge cache

        kw['filename'] = filename  # Override filename property
        # Then edit the document contents (so that upload can happen)
        document._edit(**kw)
        if url:
            document.fromURL(url)

        # Allow reindexing, reindex it and return the document
        try:
            del document.isIndexable
        except AttributeError:
            # Document does not have such attribute
            pass
        document.reindexObject()
        if REQUEST is not None:
            return REQUEST.RESPONSE.redirect(self.absolute_url())
        return document
Exemplo n.º 59
0
class FileCacheObject(CacheObject):
    _struct = struct.Struct('dII')  # double and two ints
    # timestamp, lifetime, position

    @classmethod
    def fromFile(cls, fd):
        dat = cls._struct.unpack(fd.read(cls._struct.size))
        obj = cls(None, None, dat[1], dat[0])
        obj.position = dat[2]
        return obj

    def __init__(self, *args, **kwargs):
        self._key = None
        self._data = None
        self._size = None
        self._buff = StringIO()
        super(FileCacheObject, self).__init__(*args, **kwargs)

    @property
    def size(self):
        if self._size is None:
            self._buff.seek(0, 2)
            size = self._buff.tell()
            if size == 0:
                if (self._key is None) or (self._data is None):
                    raise RuntimeError
                json.dump([self.key, self.data], self._buff)
                self._size = self._buff.tell()
            self._size = size
        return self._size

    @size.setter
    def size(self, value):
        self._size = value

    @property
    def key(self):
        if self._key is None:
            try:
                self._key, self._data = json.loads(self._buff.getvalue())
            except:
                pass
        return self._key

    @key.setter
    def key(self, value):
        self._key = value

    @property
    def data(self):
        if self._data is None:
            self._key, self._data = json.loads(self._buff.getvalue())
        return self._data

    @data.setter
    def data(self, value):
        self._data = value

    def load(self, fd):
        fd.seek(self.position)
        self._buff.seek(0)
        self._buff.write(fd.read(self.size))

    def dumpslot(self, fd):
        pos = fd.tell()
        fd.write(self._struct.pack(self.creation, self.lifetime,
                                   self.position))

    def dumpdata(self, fd):
        self.size
        fd.seek(self.position)
        fd.write(self._buff.getvalue())
Exemplo n.º 60
0
class FileCache( Iterator ):
    """
    Wrapper for a file that cache blocks of data in memory. 
    
    **NOTE:** this is currently an incomplete file-like object, it only
    supports seek, tell, and readline (plus iteration). Reading bytes is
    currently not implemented.
    """
    def __init__( self, file, size, cache_size=DEFAULT_CACHE_SIZE, 
                                    block_size=DEFAULT_BLOCK_SIZE ):
        """
        Create a new `FileCache` wrapping the file-like object `file` that
        has total size `size` and caching blocks of size `block_size`.
        """
        self.file = file
        self.size = size
        self.cache_size = cache_size
        self.block_size = block_size
        # Setup the cache
        self.nblocks = ( self.size // self.block_size ) + 1
        self.cache = LRUCache( self.cache_size )
        # Position in file
        self.dirty = True
        self.at_eof = False
        self.file_pos = 0
        self.current_block_index = -1
        self.current_block = None
    def fix_dirty( self ):
        chunk, offset = self.get_block_and_offset( self.file_pos )
        if self.current_block_index != chunk:
            self.current_block = StringIO( self.load_block( chunk ) )
            self.current_block.read( offset )
            self.current_block_index = chunk
        else:
            self.current_block.seek( offset )
        self.dirty = False
    def get_block_and_offset( self, index ):
        return int( index // self.block_size ), int( index % self.block_size )
    def load_block( self, index ):
        if index in self.cache:
            return self.cache[index]
        else:
            real_offset = index * self.block_size
            self.file.seek( real_offset )
            block = self.file.read( self.block_size )
            self.cache[index] = block
            return block
    def seek( self, offset, whence=0 ):
        """
        Move the file pointer to a particular offset.
        """
        # Determine absolute target position
        if whence == 0:
            target_pos = offset
        elif whence == 1:
            target_pos = self.file_pos + offset
        elif whence == 2:
            target_pos = self.size - offset
        else:
            raise Exception( "Invalid `whence` argument: %r", whence )
        # Check if this is a noop
        if target_pos == self.file_pos:
            return    
        # Verify it is valid
        assert 0 <= target_pos < self.size, "Attempt to seek outside file"
        # Move the position
        self.file_pos = target_pos
        # Mark as dirty, the next time a read is done we need to actually
        # move the position in the bzip2 file
        self.dirty = True
    def readline( self ):
        if self.dirty:
            self.fix_dirty()
        if self.at_eof:
            return ""
        rval = []
        while 1:
            line = self.current_block.readline()
            rval.append( line )
            if len( line ) > 0 and line[-1] == '\n':
                break
            elif self.current_block_index == self.nblocks - 1:
                self.at_eof = True
                break
            else:
                self.current_block_index += 1
                self.current_block = StringIO( self.load_block( self.current_block_index ) )      
        return "".join( rval )     
    def __next__( self ):
        line = self.readline()
        if line == "":
            raise StopIteration
    def __iter__( self ):
        return self
    def close( self ):
        self.file.close()