Пример #1
0
    def test_grid_01(self):
        nid = 1
        cp = 2
        cd = 0
        ps = ''
        seid = 0
        card_count = {'GRID': 1,}

        model = BDF()
        model.allocate(card_count)
        data1 = BDFCard(['GRID', nid, cp, 0., 0., 0., cd, ps, seid])

        nodes = model.grid
        nodes.add(data1)

        #print n1
        f = StringIO()
        nodes.write_bdf(f, size=8, write_header=False)
        nodes.write_bdf(f, size=16, write_header=False)
        nodes.write_bdf(f, size=16, is_double=True, write_header=False)

        # small field
        f = StringIO()
        nodes.write_bdf(f, size=8, write_header=False)
        msg = f.getvalue()
        card = 'GRID           1       2      0.      0.      0.\n'
        self.assertCardEqual(msg, card)

        # large field
        f = StringIO()
        nodes.write_bdf(f, size=16, write_header=False)
        card = ('GRID*                  1               2              0.              0.\n'
                '*                     0.\n')
        msg = f.getvalue()
        self.assertCardEqual(msg, card)
Пример #2
0
def _test_progress_bar(backend, len, increment):
    out = StringIO()
    fill_str = ('123456890' * (len//10))[:len]
    pb = DialogUI(out).get_progressbar(
        'label', fill_str, total=10, backend=backend)
    pb.start()
    # we can't increment 11 times
    for x in range(11):
        if not (increment and x == 0):
            # do not increment on 0
            pb.update(x if not increment else 1, increment=increment)
        #out.flush()  # needed atm... no longer?
        # Progress bar is having 0.1 sec between updates by default, so
        # we could either sleep:
        #import time; time.sleep(0.1)
        # or just force the refresh
        pb.refresh()
        pstr = out.getvalue()
        if backend not in ('annex-remote', 'silent'):  # no str repr
            ok_startswith(pstr.lstrip('\r'), 'label:')
            assert_re_in(r'.*\b%d%%.*' % (10*x), pstr)
        if backend == 'progressbar':
            assert_in('ETA', pstr)
    pb.finish()
    if backend not in ('annex-remote', 'silent'):
        # returns back and there is no spurious newline
        ok_endswith(out.getvalue(), '\r')
Пример #3
0
    def test_deqatn_10(self):
        """
        per nast/tpl/ptdmi1.dat
        """
        model = BDF(debug=None)
        model.cards_to_read.add('DEQATN')
        model.test_deqatn = True
        card = [
            'deqatn  2       f(x,y,z)= 1.;',
            '        L=x+y',
        ]
        model.add_card(card, 'DEQATN', is_list=False)
        model.cross_reference()

        s = StringIO()
        model.write_bdf(s, close=False)
        s.getvalue()
        s.close()
        eq = model.dequations[2]
        x = zeros(10., dtype='float32')
        y = zeros(11., dtype='float32')
        z = zeros(12., dtype='float32')
        #out = eq.func(x, y, z)
        out = eq.func(1.0, 2.0)
        print(out)
Пример #4
0
    def test_deqatn_9(self):
        """
        per nast/tpl/ptdmi1.dat
        """
        model = BDF(debug=None)
        model.cards_to_read.add('DEQATN')
        model.test_deqatn = True
        card = [
            'deqatn  2       f(x,y,z)= 1.;',
            '        L=1+2+3+',
            '        + 4/min(1,2);',
            '        b= 4.;',
            '        h= 2.;',
            '        t1= 200.;',
            '        t2= 300.;',
            '        t=t1*(L-x)/L+t2*x/L',
            '        +4'
        ]
        model.add_card(card, 'DEQATN', is_list=False)
        model.cross_reference()

        s = StringIO()
        model.write_bdf(s, close=False)
        s.getvalue()
        s.close()
Пример #5
0
def test_fail_with_short_help():
    out = StringIO()
    with assert_raises(SystemExit) as cme:
        fail_with_short_help(exit_code=3, out=out)
    assert_equal(cme.exception.code, 3)
    assert_equal(out.getvalue(), "")

    out = StringIO()
    with assert_raises(SystemExit) as cme:
        fail_with_short_help(msg="Failed badly", out=out)
    assert_equal(cme.exception.code, 1)
    assert_equal(out.getvalue(), "error: Failed badly\n")

    # Suggestions, hint, etc
    out = StringIO()
    with assert_raises(SystemExit) as cme:
        fail_with_short_help(
            msg="Failed badly",
            known=["mother", "mutter", "father", "son"],
            provided="muther",
            hint="You can become one",
            exit_code=0,  # noone forbids
            what="parent",
            out=out)
    assert_equal(cme.exception.code, 0)
    assert_equal(out.getvalue(),
                 "error: Failed badly\n"
                 "datalad: Unknown parent 'muther'.  See 'datalad --help'.\n\n"
                 "Did you mean any of these?\n"
                 "        mutter\n"
                 "        mother\n"
                 "        father\n"
                 "Hint: You can become one\n")
Пример #6
0
class UpdateAppsAndBackendsTest(TestCase):

    def setUp(self):
        self.output = StringIO()
        # BASE_APPS are needed for the managment commands to load successfully
        self.BASE_APPS = [
            'rapidsms',
            'django.contrib.auth',
            'django.contrib.contenttypes',
        ]

    def test_no_apps_then_none_added(self):
        with self.settings(INSTALLED_APPS=self.BASE_APPS):
            call_command('update_apps', stdout=self.output)
        self.assertEqual(self.output.getvalue(), '')

    def test_adds_app(self):
        # Add an app that has a RapidSMS app
        APPS = self.BASE_APPS + ['rapidsms.contrib.handlers']
        with self.settings(INSTALLED_APPS=APPS):
            call_command('update_apps', stdout=self.output)
        self.assertEqual(self.output.getvalue(), 'Added persistent app rapidsms.contrib.handlers\n')

    def test_no_backends_then_none_added(self):
        with self.settings(INSTALLED_BACKENDS={}):
            call_command('update_backends', stdout=self.output)
        self.assertEqual(self.output.getvalue(), '')

    def test_adds_backend(self):
        INSTALLED_BACKENDS = {
            "message_tester": {"ENGINE": "rapidsms.backends.database.DatabaseBackend"},
        }
        with self.settings(INSTALLED_BACKENDS=INSTALLED_BACKENDS):
            call_command('update_backends', stdout=self.output)
        self.assertEqual(self.output.getvalue(), 'Added persistent backend message_tester\n')
Пример #7
0
def test_question_choices():

    # TODO: come up with a reusable fixture for testing here

    choices = {
        'a': '[a], b, cc',
        'b': 'a, [b], cc',
        'cc': 'a, b, [cc]'
    }

    for default_value in ['a', 'b']:
        choices_str = choices[default_value]
        for entered_value, expected_value in [(default_value, default_value),
                                              ('', default_value),
                                              ('cc', 'cc')]:
            with patch_getpass(return_value=entered_value), \
                patch_getpass(return_value=entered_value):
                out = StringIO()
                response = DialogUI(out=out).question("prompt", choices=sorted(choices), default=default_value)
                eq_(response, expected_value)
                # getpass doesn't use out -- goes straight to the terminal
                eq_(out.getvalue(), '')
                # TODO: may be test that the prompt was passed as a part of the getpass arg
                #eq_(out.getvalue(), 'prompt (choices: %s): ' % choices_str)

    # check some expected exceptions to be thrown
    out = StringIO()
    ui = DialogUI(out=out)
    assert_raises(ValueError, ui.question, "prompt", choices=['a'], default='b')
    eq_(out.getvalue(), '')

    with patch_getpass(return_value='incorrect'):
        assert_raises(RuntimeError, ui.question, "prompt", choices=['a', 'b'])
    assert_re_in(".*ERROR: .incorrect. is not among choices.*", out.getvalue())
Пример #8
0
def test_text_output():
    entry = BibTeX(_sample_bibtex)
    entry2 = BibTeX(_sample_bibtex2)

    # in this case, since we're not citing any module or method, we shouldn't
    # output anything
    collector = DueCreditCollector()
    collector.cite(entry, path='package')

    strio = StringIO()
    TextOutput(strio, collector).dump(tags=['*'])
    value = strio.getvalue()
    assert "0 packages cited" in value, "value was %s" % value
    assert "0 modules cited" in value, "value was %s" % value
    assert "0 functions cited" in value, "value was %s" % value

    # but it should be cited if cite_module=True
    collector = DueCreditCollector()
    collector.cite(entry, path='package', cite_module=True)

    strio = StringIO()
    TextOutput(strio, collector).dump(tags=['*'])
    value = strio.getvalue()
    assert "1 package cited" in value, "value was %s" % value
    assert "0 modules cited" in value, "value was %s" % value
    assert "0 functions cited" in value, "value was %s" % value

    # in this case, we should be citing the package since we are also citing a
    # submodule
    collector = DueCreditCollector()
    collector.cite(entry, path='package')
    collector.cite(entry, path='package.module')

    strio = StringIO()
    TextOutput(strio, collector).dump(tags=['*'])
    value = strio.getvalue()
    assert "1 package cited" in value, "value was %s" % value
    assert "1 module cited" in value, "value was %s" % value
    assert "0 functions cited" in value, "value was %s" % value
    assert "Halchenko, Y.O." in value, "value was %s" % value
    assert value.strip().endswith("Frontiers in Neuroinformatics, 6(22).")


    # in this case, we should be citing the package since we are also citing a
    # submodule
    collector = DueCreditCollector()
    collector.cite(entry, path='package')
    collector.cite(entry2, path='package')
    collector.cite(entry, path='package.module')

    strio = StringIO()
    TextOutput(strio, collector).dump(tags=['*'])
    value = strio.getvalue()
    assert "1 package cited" in value, "value was %s" % value
    assert "1 module cited" in value, "value was %s" % value
    assert "0 functions cited" in value, "value was %s" % value
    assert "Halchenko, Y.O." in value, "value was %s" % value
    assert '[1, 2]' in value, "value was %s" %value
    assert '[3]' not in value, "value was %s" %value
Пример #9
0
class BaseProxyTestCase(test.NoDBTestCase):

    def setUp(self):
        super(BaseProxyTestCase, self).setUp()
        self.stderr = StringIO()
        self.useFixture(fixtures.MonkeyPatch('sys.stderr', self.stderr))

    @mock.patch('os.path.exists', return_value=False)
    # NOTE(mriedem): sys.exit raises TestingException so we can actually exit
    # the test normally.
    @mock.patch('sys.exit', side_effect=test.TestingException)
    def test_proxy_ssl_without_cert(self, mock_exit, mock_exists):
        self.flags(ssl_only=True)
        self.assertRaises(test.TestingException, baseproxy.proxy,
                          '0.0.0.0', '6080')
        mock_exit.assert_called_once_with(-1)
        self.assertEqual(self.stderr.getvalue(),
                         "SSL only and self.pem not found\n")

    @mock.patch('os.path.exists', return_value=False)
    @mock.patch('sys.exit', side_effect=test.TestingException)
    def test_proxy_web_dir_does_not_exist(self, mock_exit, mock_exists):
        self.flags(web='/my/fake/webserver/')
        self.assertRaises(test.TestingException, baseproxy.proxy,
                          '0.0.0.0', '6080')
        mock_exit.assert_called_once_with(-1)

    @mock.patch('os.path.exists', return_value=True)
    @mock.patch.object(logging, 'setup')
    @mock.patch.object(gmr.TextGuruMeditation, 'setup_autorun')
    @mock.patch('nova.console.websocketproxy.NovaWebSocketProxy.__init__',
                       return_value=None)
    @mock.patch('nova.console.websocketproxy.NovaWebSocketProxy.start_server')
    def test_proxy(self, mock_start, mock_init, mock_gmr, mock_log,
                   mock_exists):
        baseproxy.proxy('0.0.0.0', '6080')
        mock_log.assert_called_once_with(baseproxy.CONF, 'nova')
        mock_gmr.mock_assert_called_once_with(version)
        mock_init.assert_called_once_with(
            listen_host='0.0.0.0', listen_port='6080', source_is_ipv6=False,
            cert='self.pem', key=None, ssl_only=False,
            daemon=False, record=None, security_proxy=None, traffic=True,
            web='/usr/share/spice-html5', file_only=True,
            RequestHandlerClass=websocketproxy.NovaProxyRequestHandler)
        mock_start.assert_called_once_with()

    @mock.patch('os.path.exists', return_value=False)
    @mock.patch('sys.exit', side_effect=test.TestingException)
    def test_proxy_exit_with_error(self, mock_exit, mock_exists):
        self.flags(ssl_only=True)
        self.assertRaises(test.TestingException, baseproxy.proxy,
                          '0.0.0.0', '6080')
        self.assertEqual(self.stderr.getvalue(),
                         "SSL only and self.pem not found\n")
        mock_exit.assert_called_once_with(-1)
def export_import_export():
    out = StringIO()
    export_bloggers(out)
    out = out.getvalue()
    db.drop_all()
    db.create_all()
    import_bloggers(StringIO(out))
    out2 = StringIO()
    export_bloggers(out2)
    assert json.loads(out) == json.loads(out2.getvalue()), \
        "export(import(export())) != export()"
Пример #11
0
    def _test_deqatn_2(self):
        model = BDF(debug=None)
        #model.cards_to_read.add('DEQATN')
        #model.test_deqatn = True
        card = ["DEQATN", 1000, "MAXDIFF(t1,t2)=abs(t2-t1)/t1"]
        model.add_card(card, "DEQATN", is_list=True)
        model.cross_reference()

        s = StringIO()
        with self.assertRaises(AttributeError): # TODO: fix this...
            model.write_bdf(s)
        s.getvalue()
Пример #12
0
class TestNovaStatusMain(test.NoDBTestCase):
    """Tests for the basic nova-status command infrastructure."""

    def setUp(self):
        super(TestNovaStatusMain, self).setUp()
        self.output = StringIO()
        self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))

    @mock.patch.object(status.config, 'parse_args')
    @mock.patch.object(status, 'CONF')
    def _check_main(self, mock_CONF, mock_parse_args,
                    category_name='check', expected_return_value=0):
        mock_CONF.category.name = category_name
        return_value = status.main()

        self.assertEqual(expected_return_value, return_value)
        mock_CONF.register_cli_opt.assert_called_once_with(
            status.category_opt)

    @mock.patch.object(status.version, 'version_string_with_package',
                       return_value="x.x.x")
    def test_main_version(self, mock_version_string):
        self._check_main(category_name='version')
        self.assertEqual("x.x.x\n", self.output.getvalue())

    @mock.patch.object(status.cmd_common, 'print_bash_completion')
    def test_main_bash_completion(self, mock_print_bash):
        self._check_main(category_name='bash-completion')
        mock_print_bash.assert_called_once_with(status.CATEGORIES)

    @mock.patch.object(status.cmd_common, 'get_action_fn')
    def test_main(self, mock_get_action_fn):
        mock_fn = mock.Mock()
        mock_fn_args = [mock.sentinel.arg]
        mock_fn_kwargs = {'key': mock.sentinel.value}
        mock_get_action_fn.return_value = (mock_fn, mock_fn_args,
                                           mock_fn_kwargs)

        self._check_main(expected_return_value=mock_fn.return_value)
        mock_fn.assert_called_once_with(mock.sentinel.arg,
                                        key=mock.sentinel.value)

    @mock.patch.object(status.cmd_common, 'get_action_fn')
    def test_main_error(self, mock_get_action_fn):
        mock_fn = mock.Mock(side_effect=Exception('wut'))
        mock_get_action_fn.return_value = (mock_fn, [], {})

        self._check_main(expected_return_value=255)
        output = self.output.getvalue()
        self.assertIn('Error:', output)
        # assert the traceback is in the output
        self.assertIn('wut', output)
Пример #13
0
    def _report_testsuite(suite_name, tests, xml_document, parentElement,
                          properties):
        """
        Appends the testsuite section to the XML document.
        """
        testsuite = xml_document.createElement('testsuite')
        parentElement.appendChild(testsuite)

        testsuite.setAttribute('name', suite_name)
        testsuite.setAttribute('tests', str(len(tests)))

        testsuite.setAttribute(
            'time', '%.3f' % sum(map(lambda e: e.elapsed_time, tests))
        )
        failures = filter(lambda e: e.outcome == e.FAILURE, tests)
        testsuite.setAttribute('failures', str(len(list(failures))))

        errors = filter(lambda e: e.outcome == e.ERROR, tests)
        testsuite.setAttribute('errors', str(len(list(errors))))

        skips = filter(lambda e: e.outcome == _TestInfo.SKIP, tests)
        testsuite.setAttribute('skipped', str(len(list(skips))))
 
        _XMLTestResult._report_testsuite_properties(
            testsuite, xml_document, properties)

        for test in tests:
            _XMLTestResult._report_testcase(test, testsuite, xml_document)

        systemout = xml_document.createElement('system-out')
        testsuite.appendChild(systemout)

        stdout = StringIO()
        for test in tests:
            # Merge the stdout from the tests in a class
            if test.stdout is not None:
                stdout.write(test.stdout)
        _XMLTestResult._createCDATAsections(
            xml_document, systemout, stdout.getvalue())

        systemerr = xml_document.createElement('system-err')
        testsuite.appendChild(systemerr)

        stderr = StringIO()
        for test in tests:
            # Merge the stderr from the tests in a class
            if test.stderr is not None:
                stderr.write(test.stderr)
        _XMLTestResult._createCDATAsections(
            xml_document, systemerr, stderr.getvalue())

        return testsuite
Пример #14
0
    def test_simple_roundtrip(self):
        # FIXME: we compare the loaded json to avoid dealing with encoding
        # differences when comparing objects, but this is kinda stupid
        r_build_manifest = BuildManifest(self.sections, self.meta, {})
        f = StringIO()
        r_build_manifest._write(f)
        r_s = f.getvalue()

        build_manifest = BuildManifest.from_string(r_s)
        f = StringIO()
        build_manifest._write(f)
        s = f.getvalue()
        
        self.assertEqual(json.loads(r_s), json.loads(s))
Пример #15
0
def _test_progress_bar(len):
    out = StringIO()
    fill_str = ('123456890' * (len//10))[:len]
    pb = DialogUI(out).get_progressbar('label', fill_str, maxval=10)
    pb.start()
    for x in range(11):
        pb.update(x)
        out.flush()  # needed atm
        pstr = out.getvalue()
        ok_startswith(pstr, 'label:')
        assert_in(' %d%% ' % (10*x), pstr)
        assert_in('ETA', pstr)
    pb.finish()
    ok_endswith(out.getvalue(), '\n')
Пример #16
0
    def test_simple_roundtrip(self):
        # FIXME: we compare the loaded json to avoid dealing with encoding
        # differences when comparing objects, but this is kinda stupid
        r_ipkg = InstalledPkgDescription(self.sections, self.meta, {})
        f = StringIO()
        r_ipkg._write(f)
        r_s = f.getvalue()

        ipkg = InstalledPkgDescription.from_string(r_s)
        f = StringIO()
        ipkg._write(f)
        s = f.getvalue()

        self.assertEqual(json.loads(r_s), json.loads(s))
Пример #17
0
    def test_deqatn_3(self):
        model = BDF(debug=None)
        model.cards_to_read.add('DEQATN')
        model.test_deqatn = True
        card = ['DEQATN  1000',
                '        MAXDIFF(t1,t2)=abs(t2-t1)/t1']
        model.add_card(card, 'DEQATN', is_list=False)
        model.cross_reference()

        s = StringIO()
        model.write_bdf(s, close=False)
        s.getvalue()
        #print(s.getvalue())
        s.close()
Пример #18
0
    def render_and_save_variation(self, name, content, variation):
        """
        Renders the image variations and saves them to the storage
        """
        content.seek(0)

        img = Image.open(content)

        if self.is_smaller(img, variation):
            factor = 1
            while (img.size[0] / factor > 2 * variation['width'] and
                                   img.size[1] * 2 / factor > 2 * variation['height']):
                factor *= 2
            if factor > 1:
                img.thumbnail((int(img.size[0] / factor),
                               int(img.size[1] / factor)), resample=resample)

            if variation['crop']:
                img = ImageOps.fit(img, (variation['width'], variation['height']), method=resample)
            else:
                img.thumbnail((variation['width'], variation['height']), resample=resample)
        variation_name = self.get_variation_name(self.instance, self.field, variation)
        file_buffer = StringIO()
        format = self.get_file_extension(name).lower().replace('jpg', 'jpeg')
        img.save(file_buffer, format)
        self.storage.save(variation_name, ContentFile(file_buffer.getvalue()))
        file_buffer.close()
Пример #19
0
def test_pydotprint_cond_highlight():
    """
    This is a REALLY PARTIAL TEST.

    I did them to help debug stuff.
    """

    # Skip test if pydot is not available.
    if not theano.printing.pydot_imported:
        raise SkipTest('pydot not available')

    x = tensor.dvector()
    f = theano.function([x], x * 2)
    f([1, 2, 3, 4])

    s = StringIO()
    new_handler = logging.StreamHandler(s)
    new_handler.setLevel(logging.DEBUG)
    orig_handler = theano.logging_default_handler

    theano.theano_logger.removeHandler(orig_handler)
    theano.theano_logger.addHandler(new_handler)
    try:
        theano.printing.pydotprint(f, cond_highlight=True,
                                   print_output_file=False)
    finally:
        theano.theano_logger.addHandler(orig_handler)
        theano.theano_logger.removeHandler(new_handler)

    assert (s.getvalue() == 'pydotprint: cond_highlight is set but there'
            ' is no IfElse node in the graph\n')
Пример #20
0
def main(loops, level):
    board, solution = LEVELS[level]
    order = DESCENDING
    strategy = Done.FIRST_STRATEGY
    stream = StringIO()

    board = board.strip()
    expected = solution.rstrip()

    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        stream = StringIO()
        solve_file(board, strategy, order, stream)
        output = stream.getvalue()
        stream = None

    dt = perf.perf_counter() - t0

    output = '\n'.join(line.rstrip() for line in output.splitlines())
    if output != expected:
        raise AssertionError("got a wrong answer:\n%s\nexpected: %s"
                             % (output, expected))

    return dt
Пример #21
0
        def do_check_on(value, nd, var=None):
            """
            Checks `value` for NaNs / Infs. If detected, raises an exception
            and / or prints information about `nd`, `f`, and `is_input` to
            help the user determine the cause of the invalid values.

            Parameters
            ----------
            value : numpy.ndarray
                The value to be checked.
            nd : theano.gof.Apply
                The Apply node being executed.
            var : theano.gof.Variable
                Not used if nd is there. Otherwise, used to print the stack
                trace for inputs of the graph.

            """
            error = False
            sio = StringIO()
            if nan_is_error:
                if contains_nan(value, nd, var):
                    print('NaN detected', file=sio)
                    error = True
            if inf_is_error:
                if contains_inf(value, nd, var):
                    print('Inf detected', file=sio)
                    error = True
            if big_is_error:
                err = False
                if not _is_numeric_value(value, var):
                    err = False
                elif pygpu_available and isinstance(value, GpuArray):
                    err = (f_gpua_absmax(value.reshape(value.size)) > 1e10)
                else:
                    err = (np.abs(value).max() > 1e10)
                if err:
                    print('Big value detected', file=sio)
                    error = True
            if error:
                if nd:
                    print("NanGuardMode found an error in the "
                          "output of a node in this variable:", file=sio)
                    print(theano.printing.debugprint(nd, file='str'), file=sio)
                else:
                    print("NanGuardMode found an error in an input of the "
                          "graph.", file=sio)
                # Add the stack trace
                if nd:
                    var = nd.outputs[0]
                print(theano.gof.utils.get_variable_trace_string(var),
                      file=sio)
                msg = sio.getvalue()
                if config.NanGuardMode.action == 'raise':
                    raise AssertionError(msg)
                elif config.NanGuardMode.action == 'pdb':
                    print(msg)
                    import pdb
                    pdb.set_trace()
                elif config.NanGuardMode.action == 'warn':
                    logger.error(msg)
Пример #22
0
def generateParserSummary(files):
    valid = files['valid']
    other = files['other']
    failed = files['failed']
    processed = files['processed']

    output = StringIO()
    output.write('Successful Files with transactions:\n')
    # Successful Files
    for n,t in valid:
        output.write('  File Name: {}\n'.format(n))
        output.write('    Transactions:\n')
        for trans in t.transactions.all():
            output.write('      CRN: {}\n'.format(trans.crn))
    # Successful Files without transactions
    output.write('\nSuccessful Files without transactions:\n')
    for n,t in other:
        output.write('  File Name: {}\n'.format(n))
    # Failed files
    output.write('\nFailed Files:\n')
    for n,r in failed:
        output.write('  File Name: {}\n'.format(n))
        output.write('    Reason: {}\n'.format(r))
    # Already processed Files
    output.write('\nFiles previously processed:\n')
    for n,t in processed:
        output.write('  File Name: {}\n'.format(n))

    contents = output.getvalue()
    output.close()
    return contents
Пример #23
0
def init_3d():
    """Initialise 3D plots within the IPython notebook, by injecting the
    required javascript libraries.
    """

    library_javascript = StringIO()

    library_javascript.write("""
    <p>Loading javascript for 3D plot in browser</p>

    /* Beginning of javascript injected by multinet.js */
    <script type="text/javascript" src="multinet/static/js/jquery-2.1.4.js"></script>
    <script type="text/javascript" src="multinet/static/js/jquery-ui-1.11.4.js"></script>

    <script type="text/javascript" src="multinet/static/js/threejs/three-r71.js"></script>
    <script type="text/javascript" src="multinet/static/js/threejs/orbitcontrols.js"></script>
    <script type="text/javascript" src="multinet/static/js/threejs/stats-r12.min.js"></script>
    <script type="text/javascript" src="multinet/static/js/threejs/detector.js"></script>

    <script type="text/javascript" src="multinet/static/js/multinet-core.js"></script>
    <script type="text/javascript">
        var multinet_javascript_injected = true;
    </script>
    """)

    library_javascript.write(
                "/* End of javascript injected by multinet.js */\n</script>\n")

    display(HTML(library_javascript.getvalue()))
Пример #24
0
def test_stderr_to_StringIO():
    s = StringIO()

    with stderr_to(s):
        sys.stderr.write(u("hello"))

    assert s.getvalue() == 'hello'
Пример #25
0
def test_record_good():
    """
    Tests that when we record a sequence of events, then
    repeat it exactly, the Record class:
        1) Records it correctly
        2) Does not raise any errors
    """

    # Record a sequence of events
    output = StringIO()

    recorder = Record(file_object=output, replay=False)

    num_lines = 10

    for i in xrange(num_lines):
        recorder.handle_line(str(i) + '\n')

    # Make sure they were recorded correctly
    output_value = output.getvalue()

    assert output_value == ''.join(str(i) + '\n' for i in xrange(num_lines))

    # Make sure that the playback functionality doesn't raise any errors
    # when we repeat them
    output = StringIO(output_value)

    playback_checker = Record(file_object=output, replay=True)

    for i in xrange(num_lines):
        playback_checker.handle_line(str(i) + '\n')
Пример #26
0
def init_3d():
    """Initialise 3D plots within the IPython notebook, by injecting the
    required javascript libraries.
    """

    library_javascript = StringIO()

    library_javascript.write("""
    <script type="text/javascript">
    /* Beginning of javascript injected by OpenModes */
    var openmodes_javascript_injected = true;
    """)

    three_js_libraries = ("three.min.js", "OrbitControls.js",
                          "Lut.js", "Detector.js", "CanvasRenderer.js",
                          "Projector.js")

    # Include required parts of three.js inline
    for library in three_js_libraries:
        with open(osp.join(three_js_dir, library)) as infile:
            library_javascript.write(infile.read())

    # include my custom javascript inline
    with open(osp.join(static_dir, "three_js_plot.js")) as infile:
        library_javascript.write(infile.read())

    library_javascript.write(
                "/* End of javascript injected by OpenModes */\n</script>\n")

    display(HTML(library_javascript.getvalue()))
    logging.info("Javascript injected for 3D interactive WebGL plots")
    def test_list(self):

        def fake_network_get_all(context):
            return [db_fakes.FakeModel(self.net)]
        self.stubs.Set(db, 'network_get_all', fake_network_get_all)
        output = StringIO()
        sys.stdout = output
        self.commands.list()
        sys.stdout = sys.__stdout__
        result = output.getvalue()
        _fmt = "\t".join(["%(id)-5s", "%(cidr)-18s", "%(cidr_v6)-15s",
                          "%(dhcp_start)-15s", "%(dns1)-15s", "%(dns2)-15s",
                          "%(vlan)-15s", "%(project_id)-15s", "%(uuid)-15s"])
        head = _fmt % {'id': 'id',
                       'cidr': 'IPv4',
                       'cidr_v6': 'IPv6',
                       'dhcp_start': 'start address',
                       'dns1': 'DNS1',
                       'dns2': 'DNS2',
                       'vlan': 'VlanID',
                       'project_id': 'project',
                       'uuid': "uuid"}
        body = _fmt % {'id': self.net['id'],
                       'cidr': self.net['cidr'],
                       'cidr_v6': self.net['cidr_v6'],
                       'dhcp_start': self.net['dhcp_start'],
                       'dns1': self.net['dns1'],
                       'dns2': self.net['dns2'],
                       'vlan': self.net['vlan'],
                       'project_id': self.net['project_id'],
                       'uuid': self.net['uuid']}
        answer = '%s\n%s\n' % (head, body)
        self.assertEqual(result, answer)
Пример #28
0
def test_record_bad():
    """
    Tests that when we record a sequence of events, then
    do something different on playback, the Record class catches it.
    """

    # Record a sequence of events
    output = StringIO()

    recorder = Record(file_object=output, replay=False)

    num_lines = 10

    for i in xrange(num_lines):
        recorder.handle_line(str(i) + '\n')

    # Make sure that the playback functionality doesn't raise any errors
    # when we repeat some of them
    output_value = output.getvalue()
    output = StringIO(output_value)

    playback_checker = Record(file_object=output, replay=True)

    for i in xrange(num_lines // 2):
        playback_checker.handle_line(str(i) + '\n')

    # Make sure it raises an error when we deviate from the recorded sequence
    try:
        playback_checker.handle_line('0\n')
    except MismatchError:
        return
    raise AssertionError("Failed to detect mismatch between recorded sequence "
                         " and repetition of it.")
Пример #29
0
def create_hook_module(target):
    safe_name = SAFE_MODULE_NAME.sub("_", target, len(target))
    module_name = "bento_hook_%s" % safe_name
    main_file = os.path.abspath(target)
    module = imp.new_module(module_name)
    module.__file__ = main_file
    code = open(main_file).read()

    sys.path.insert(0, os.path.dirname(main_file))
    try:
        exec(compile(code, main_file, 'exec'), module.__dict__)
        sys.modules[module_name] = module
    except Exception:
        sys.path.pop(0)
        e = extract_exception()
        tb = sys.exc_info()[2]
        s = StringIO()
        traceback.print_tb(tb, file=s)
        msg = """\
Could not import hook file %r: caught exception %r
Original traceback (most recent call last)
%s\
""" % (main_file, e, s.getvalue())
        raise InvalidHook(msg)

    module.root_path = main_file
    return module
Пример #30
0
class JenkinsRequestTimeoutTests(testtools.TestCase):

    def setUp(self):
        super(JenkinsRequestTimeoutTests, self).setUp()
        self.server = NullServer(("127.0.0.1", 0))
        self.messages = StringIO()
        self.addOnException(self._get_messages)

    def _get_messages(self, exc_info):
        self.addDetail('timeout-tests-messages',
                       text_content(self.messages.getvalue()))

    def test_jenkins_open_timeout(self):
        j = jenkins.Jenkins("http://%s:%s" % self.server.server_address,
                            None, None, timeout=0.1)
        request = jenkins.Request('http://%s:%s/job/TestJob' %
                                  self.server.server_address)

        # assert our request times out when no response
        with testtools.ExpectedException(jenkins.TimeoutException):
            j.jenkins_open(request, add_crumb=False)

    def test_jenkins_open_no_timeout(self):
        j = jenkins.Jenkins("http://%s:%s" % self.server.server_address,
                            None, None)
        request = jenkins.Request('http://%s:%s/job/TestJob' %
                                  self.server.server_address)

        # assert we don't timeout quickly like previous test when
        # no timeout defined.
        with testtools.ExpectedException(TestsTimeoutException):
            time_limit(0.5, self.messages,
                       j.jenkins_open, request, add_crumb=False)
Пример #31
0
def str_diagnostic(expected, value, rtol, atol):
    """Return a pretty multiline string representating the cause
    of the exception"""
    sio = StringIO()

    try:
        ssio = StringIO()
        print("           : shape, dtype, strides, min, max, n_inf, n_nan:", file=ssio)
        print("  Expected :", end=' ', file=ssio)
        print(expected.shape, end=' ', file=ssio)
        print(expected.dtype, end=' ', file=ssio)
        print(expected.strides, end=' ', file=ssio)
        print(expected.min(), end=' ', file=ssio)
        print(expected.max(), end=' ', file=ssio)
        print(numpy.isinf(expected).sum(), end=' ', file=ssio)
        print(numpy.isnan(expected).sum(), end=' ', file=ssio)
        # only if all succeeds to we add anything to sio
        print(ssio.getvalue(), file=sio)
    except Exception:
        pass
    try:
        ssio = StringIO()
        print("  Value    :", end=' ', file=ssio)
        print(value.shape, end=' ', file=ssio)
        print(value.dtype, end=' ', file=ssio)
        print(value.strides, end=' ', file=ssio)
        print(value.min(), end=' ', file=ssio)
        print(value.max(), end=' ', file=ssio)
        print(numpy.isinf(value).sum(), end=' ', file=ssio)
        print(numpy.isnan(value).sum(), end=' ', file=ssio)
        # only if all succeeds to we add anything to sio
        print(ssio.getvalue(), file=sio)
    except Exception:
        pass

    print("  expected    :", expected, file=sio)
    print("  value    :", value, file=sio)

    try:
        ov = numpy.asarray(expected)
        nv = numpy.asarray(value)
        ssio = StringIO()
        absdiff = numpy.absolute(nv - ov)
        print("  Max Abs Diff: ", numpy.max(absdiff), file=ssio)
        print("  Mean Abs Diff: ", numpy.mean(absdiff), file=ssio)
        print("  Median Abs Diff: ", numpy.median(absdiff), file=ssio)
        print("  Std Abs Diff: ", numpy.std(absdiff), file=ssio)
        reldiff = numpy.absolute(nv - ov) / (numpy.absolute(nv) +
                                             numpy.absolute(ov))
        print("  Max Rel Diff: ", numpy.max(reldiff), file=ssio)
        print("  Mean Rel Diff: ", numpy.mean(reldiff), file=ssio)
        print("  Median Rel Diff: ", numpy.median(reldiff), file=ssio)
        print("  Std Rel Diff: ", numpy.std(reldiff), file=ssio)
        # only if all succeeds to we add anything to sio
        print(ssio.getvalue(), file=sio)
    except Exception:
        pass
    atol_, rtol_ = T.basic._get_atol_rtol(expected, value)
    if rtol is not None:
        rtol_ = rtol
    if atol is not None:
        atol_ = atol
    print("  rtol, atol:", rtol_, atol_, file=sio)
    return sio.getvalue()
Пример #32
0
class JsonPrinter(resource_printer_base.ResourcePrinter):
    """Prints resource records as a JSON list.

  [JSON](http://www.json.org), JavaScript Object Notation.

  Printer attributes:
    no-undefined: Does not display resource data items with null values.

  Attributes:
    _buffer: Buffer stream for record item indentation.
    _delimiter: Delimiter string before the next record.
    _empty: True if no records were output.
    _indent: Resource item indentation.
  """

    # json.dump() does not have a streaming mode. In order to print a resource`
    # list it requires the complete list contents. To get around that limitation
    # and print each resource list item, _AddRecord() prints the initial "[", the
    # intervening ",", the final "]", captures the json.dump() output for each
    # resource list item and prints it indented by STRUCTURED_INDENTATION spaces.

    _BEGIN_DELIMITER = '[\n'

    def __init__(self, *args, **kwargs):
        super(JsonPrinter, self).__init__(*args,
                                          retain_none_values=True,
                                          **kwargs)
        self._buffer = StringIO()
        self._empty = True
        self._delimiter = self._BEGIN_DELIMITER
        self._indent = ' ' * resource_printer_base.STRUCTURED_INDENTATION

    def __Dump(self, resource, out=None):
        json.dump(resource,
                  fp=out or self._out,
                  indent=resource_printer_base.STRUCTURED_INDENTATION,
                  sort_keys=True,
                  separators=(',', ': '))

    def _AddRecord(self, record, delimit=True):
        """Prints one element of a JSON-serializable Python object resource list.

    Allows intermingled delimit=True and delimit=False.

    Args:
      record: A JSON-serializable object.
      delimit: Dump one record if False, used by PrintSingleRecord().
    """
        self._empty = False
        if delimit:
            delimiter = self._delimiter + self._indent
            self._delimiter = ',\n'
            self.__Dump(record, self._buffer)
            output = self._buffer.getvalue()
            self._buffer.truncate(0)
            for line in output.split('\n'):
                self._out.write(delimiter + line)
                delimiter = '\n' + self._indent
        else:
            if self._delimiter != self._BEGIN_DELIMITER:
                self._out.write('\n]\n')
                self._delimiter = self._BEGIN_DELIMITER
            self.__Dump(record)
            self._out.write('\n')

    def Finish(self):
        """Prints the final delimiter and preps for the next resource list."""
        if self._empty:
            self._out.write('[]\n')
        elif self._delimiter != self._BEGIN_DELIMITER:
            self._out.write('\n]\n')
            self._delimiter = self._BEGIN_DELIMITER
Пример #33
0
    def __import__(self, apply_node, check=True, reason=None):
        # We import the nodes in topological order. We only are interested
        # in new nodes, so we use all variables we know of as if they were the input set.
        # (the functions in the graph module only use the input set to
        # know where to stop going down)
        new_nodes = graph.io_toposort(self.variables, apply_node.outputs)

        if check:
            for node in new_nodes:
                if hasattr(node, 'fgraph') and node.fgraph is not self:
                    raise Exception("%s is already owned by another fgraph" %
                                    node)
                for r in node.inputs:
                    if hasattr(r, 'fgraph') and r.fgraph is not self:
                        raise Exception(
                            "%s is already owned by another fgraph" % r)
                    if (r.owner is None and not isinstance(r, graph.Constant)
                            and r not in self.inputs):
                        # Verbose error message
                        # Show a complete chain of variables from the missing input to an output
                        if config.exception_verbosity == 'high':

                            def find_path_to(output_var, input_var):
                                """ Returns a list of each variable on a (not necessarily unique)
                                    path from input_var to output_var, where each variable in the
                                    list has the preceding variable as one of its inputs.
                                    Returns None if no path exists"""

                                # If output and input are the same we have a singleton path
                                if output_var is input_var:
                                    return [output_var]

                                # If output has no inputs then there is no path
                                owner = output_var.owner

                                if owner is None:
                                    return None

                                # If input_var is an input to the output node, there is a
                                # simple two element path
                                inputs = owner.inputs

                                if input_var in inputs:
                                    return [input_var, output_var]

                                # Otherwise we must recurse by searching for a path to one
                                # of our inputs, then appending the output to that path
                                for ipt in inputs:
                                    path = find_path_to(ipt, input_var)

                                    if path is not None:
                                        path.append(output_var)

                                        return path

                                # Since none of the above methods returned a path, there is none
                                return None

                            # Try different outputs until we find one that has a path to the missing input
                            for output in self.outputs:
                                path = find_path_to(output, r)

                                if path is not None:
                                    break

                            # if there is no path then r isn't really a graph input so we shouldn't be running error
                            # handler code in the first place
                            assert path is not None
                            tr = getattr(r.tag, 'trace', [])
                            detailed_err_msg = ""
                            if len(tr) > 0:
                                detailed_err_msg += "\nBacktrace when the variable is created:\n"

                                # Print separate message for each element in
                                # the list of batcktraces
                                sio = StringIO()
                                for subtr in tr:
                                    traceback.print_list(subtr, sio)
                                    detailed_err_msg += str(sio.getvalue())
                            raise MissingInputError(
                                'A variable that is an input to the graph was '
                                'neither provided as an input to the function '
                                'nor given a value. A chain of variables '
                                'leading from this input to an output is %s. '
                                'This chain may not be unique' % str(path) +
                                detailed_err_msg)

                        # Standard error message
                        raise MissingInputError(
                            ("An input of the graph, used to compute %s, "
                             "was not provided and not given a value."
                             "Use the Theano flag exception_verbosity='high',"
                             "for more information on this error." %
                             str(node)), r)

        for node in new_nodes:
            assert node not in self.apply_nodes
            self.__setup_node__(node)
            self.apply_nodes.add(node)
            for output in node.outputs:
                self.__setup_r__(output)
                self.variables.add(output)
            for i, input in enumerate(node.inputs):
                if input not in self.variables:
                    self.__setup_r__(input)
                    self.variables.add(input)
                self.__add_clients__(input, [(node, i)])
            assert node.fgraph is self
            self.execute_callbacks('on_import', node, reason)
Пример #34
0
def test_debugprint():
    A = tensor.matrix(name='A')
    B = tensor.matrix(name='B')
    C = A + B
    C.name = 'C'
    D = tensor.matrix(name='D')
    E = tensor.matrix(name='E')

    F = D + E
    G = C + F
    mode = theano.compile.get_default_mode().including('fusion')
    g = theano.function([A, B, D, E], G, mode=mode)

    # just test that it work
    s = StringIO()
    debugprint(G, file=s)

    # test ids=int
    s = StringIO()
    debugprint(G, file=s, ids='int')
    s = s.getvalue()
    # The additional white space are needed!
    reference = '\n'.join([
        "Elemwise{add,no_inplace} [id 0] ''   ",
        " |Elemwise{add,no_inplace} [id 1] 'C'   ",
        " | |A [id 2]",
        " | |B [id 3]",
        " |Elemwise{add,no_inplace} [id 4] ''   ",
        "   |D [id 5]",
        "   |E [id 6]",
    ]) + '\n'

    if s != reference:
        print('--' + s + '--')
        print('--' + reference + '--')

    assert s == reference

    # test ids=CHAR
    s = StringIO()
    debugprint(G, file=s, ids='CHAR')
    s = s.getvalue()
    # The additional white space are needed!
    reference = "\n".join([
        "Elemwise{add,no_inplace} [id A] ''   ",
        " |Elemwise{add,no_inplace} [id B] 'C'   ",
        " | |A [id C]",
        " | |B [id D]",
        " |Elemwise{add,no_inplace} [id E] ''   ",
        "   |D [id F]",
        "   |E [id G]",
    ]) + '\n'

    if s != reference:
        print('--' + s + '--')
        print('--' + reference + '--')

    assert s == reference

    # test ids=CHAR, stop_on_name=True
    s = StringIO()
    debugprint(G, file=s, ids='CHAR', stop_on_name=True)
    s = s.getvalue()
    # The additional white space are needed!
    reference = '\n'.join([
        "Elemwise{add,no_inplace} [id A] ''   ",
        " |Elemwise{add,no_inplace} [id B] 'C'   ",
        " |Elemwise{add,no_inplace} [id C] ''   ",
        "   |D [id D]",
        "   |E [id E]",
    ]) + '\n'

    if s != reference:
        print('--' + s + '--')
        print('--' + reference + '--')

    assert s == reference

    # test ids=
    s = StringIO()
    debugprint(G, file=s, ids='')
    s = s.getvalue()
    # The additional white space are needed!
    reference = '\n'.join([
        "Elemwise{add,no_inplace}  ''   ",
        " |Elemwise{add,no_inplace}  'C'   ",
        " | |A ",
        " | |B ",
        " |Elemwise{add,no_inplace}  ''   ",
        "   |D ",
        "   |E ",
    ]) + '\n'
    if s != reference:
        print('--' + s + '--')
        print('--' + reference + '--')

    assert s == reference

    # test print_storage=True
    s = StringIO()
    debugprint(g, file=s, ids='', print_storage=True)
    s = s.getvalue()
    # The additional white space are needed!
    reference = '\n'.join([
        "Elemwise{add,no_inplace}  ''   0 [None]",
        " |A  [None]",
        " |B  [None]",
        " |D  [None]",
        " |E  [None]",
    ]) + '\n'
    if s != reference:
        print('--' + s + '--')
        print('--' + reference + '--')

    assert s == reference

    # test clients
    s = StringIO()
    # We must force the mode as otherwise it can change the clients order
    f = theano.function([A, B, D], [A + B, A + B - D], mode='FAST_COMPILE')
    debugprint(f, file=s, print_clients=True)
    s = s.getvalue()
    # The additional white space are needed!
    reference = '\n'.join([
        "Elemwise{add,no_inplace} [id A] ''   0 clients:[('[id B]', 1), ('output', '')]",
        " |A [id D]",
        " |B [id E]",
        "Elemwise{sub,no_inplace} [id B] ''   1",
        " |Elemwise{add,no_inplace} [id A] ''   0 clients:[('[id B]', 1), ('output', '')]",
        " |D [id F]",
    ]) + '\n'
    if s != reference:
        print('--' + s + '--')
        print('--' + reference + '--')

    assert s == reference
Пример #35
0
        def do_check_on(var, nd, f, is_input):
            """
            Checks `var` for NaNs / Infs. If detected, raises an exception
            and / or prints information about `nd`, `f`, and `is_input` to
            help the user determine the cause of the invalid values.

            Parameters
            ----------
            var : numpy.ndarray
                The value to be checked.
            nd : theano.gof.Apply
                The Apply node being executed.
            f : callable
                The thunk for the apply node.
            is_input : bool
                If True, `var` is an input to `nd`.
                If False, it is an output.

            """
            error = False
            sio = StringIO()
            if nan_is_error:
                if contains_nan(var, nd):
                    print('NaN detected', file=sio)
                    error = True
            if inf_is_error:
                if contains_inf(var, nd):
                    print('Inf detected', file=sio)
                    error = True
            if big_is_error:
                err = False
                if isinstance(var, theano.gof.type.CDataType._cdata_type):
                    err = False
                elif isinstance(var, np.random.mtrand.RandomState):
                    err = False
                elif var.size == 0:
                    err = False
                elif cuda.cuda_available and isinstance(var, cuda.CudaNdarray):
                    err = (f_gpuabsmax(var.reshape(var.size)) > 1e10)
                else:
                    err = (np.abs(var).max() > 1e10)
                if err:
                    print('Big value detected', file=sio)
                    error = True
            if error:
                if not is_input:
                    print(
                        "NanGuardMode found an error in the"
                        " output of a node in this variable:",
                        file=sio)
                    print(theano.printing.debugprint(nd, file='str'), file=sio)
                else:
                    print(
                        "NanGuardMode found an error in an"
                        " input of this node.",
                        file=sio)
                    print('Node:', file=sio)
                    print(nd, file=sio)
                    print("The input variable that cause problem:", file=sio)
                    print(theano.printing.debugprint(nd, file='str'), file=sio)
                msg = sio.getvalue()
                if config.NanGuardMode.action == 'raise':
                    raise AssertionError(msg)
                elif config.NanGuardMode.action == 'pdb':
                    print(msg)
                    import pdb
                    pdb.set_trace()
                elif config.NanGuardMode.action == 'warn':
                    logger.error(msg)
def debugprint(obj, depth=-1, print_type=False,
               file=None, ids='CHAR', stop_on_name=False,
               done=None, print_storage=False, print_clients=False,
               used_ids=None):
    """Print a computation graph as text to stdout or a file.

    :type obj: :class:`~theano.gof.Variable`, Apply, or Function instance
    :param obj: symbolic thing to print
    :type depth: integer
    :param depth: print graph to this depth (-1 for unlimited)
    :type print_type: boolean
    :param print_type: whether to print the type of printed objects
    :type file: None, 'str', or file-like object
    :param file: print to this file ('str' means to return a string)
    :type ids: str
    :param ids: How do we print the identifier of the variable
                id - print the python id value
                int - print integer character
                CHAR - print capital character
                "" - don't print an identifier
    :param stop_on_name: When True, if a node in the graph has a name,
                         we don't print anything below it.
    :type done: None or dict
    :param done: A dict where we store the ids of printed node.
        Useful to have multiple call to debugprint share the same ids.
    :type print_storage: bool
    :param print_storage: If True, this will print the storage map
        for Theano functions. Combined with allow_gc=False, after the
        execution of a Theano function, we see the intermediate result.
    :type print_clients: bool
    :param print_clients: If True, this will print for Apply node that
         have more then 1 clients its clients. This help find who use
         an Apply node.
    :type used_ids: dict or None
    :param used_ids: the id to use for some object, but maybe we only
         refered to it yet.

    :returns: string if `file` == 'str', else file arg

    Each line printed represents a Variable in the graph.
    The indentation of lines corresponds to its depth in the symbolic graph.
    The first part of the text identifies whether it is an input
    (if a name or type is printed) or the output of some Apply (in which case
    the Op is printed).
    The second part of the text is an identifier of the Variable.
    If print_type is True, we add a part containing the type of the Variable

    If a Variable is encountered multiple times in the depth-first search,
    it is only printed recursively the first time. Later, just the Variable
    identifier is printed.

    If an Apply has multiple outputs, then a '.N' suffix will be appended
    to the Apply's identifier, to indicate which output a line corresponds to.

    """
    if not isinstance(depth, integer_types):
        raise Exception("depth parameter must be an int")
    if file == 'str':
        _file = StringIO()
    elif file is None:
        _file = sys.stdout
    else:
        _file = file
    if done is None:
        done = dict()
    if used_ids is None:
        used_ids = dict()
    used_ids = dict()
    results_to_print = []
    profile_list = []
    order = []  # Toposort
    smap = []  # storage_map
    if isinstance(obj, (list, tuple, set)):
        lobj = obj
    else:
        lobj = [obj]
    for obj in lobj:
        if isinstance(obj, gof.Variable):
            results_to_print.append(obj)
            profile_list.append(None)
            smap.append(None)
            order.append(None)
        elif isinstance(obj, gof.Apply):
            results_to_print.extend(obj.outputs)
            profile_list.extend([None for item in obj.outputs])
            smap.extend([None for item in obj.outputs])
            order.extend([None for item in obj.outputs])
        elif isinstance(obj, Function):
            results_to_print.extend(obj.maker.fgraph.outputs)
            profile_list.extend(
                [obj.profile for item in obj.maker.fgraph.outputs])
            if print_storage:
                smap.extend(
                    [obj.fn.storage_map for item in obj.maker.fgraph.outputs])
            else:
                smap.extend(
                    [None for item in obj.maker.fgraph.outputs])
            topo = obj.maker.fgraph.toposort()
            order.extend(
                [topo for item in obj.maker.fgraph.outputs])
        elif isinstance(obj, gof.FunctionGraph):
            results_to_print.extend(obj.outputs)
            profile_list.extend([getattr(obj, 'profile', None)
                                 for item in obj.outputs])
            smap.extend([getattr(obj, 'storage_map', None)
                         for item in obj.outputs])
            topo = obj.toposort()
            order.extend([topo for item in obj.outputs])
        elif isinstance(obj, (integer_types, float, np.ndarray)):
            print(obj)
        elif isinstance(obj, (theano.In, theano.Out)):
            results_to_print.append(obj.variable)
            profile_list.append(None)
            smap.append(None)
            order.append(None)
        else:
            raise TypeError("debugprint cannot print an object of this type",
                            obj)

    scan_ops = []
    if any([p for p in profile_list if p is not None and p.fct_callcount > 0]):
        print("""
Timing Info
-----------
--> <time> <% time> - <total time> <% total time>'

<time>         computation time for this node
<% time>       fraction of total computation time for this node
<total time>   time for this node + total times for this node's ancestors
<% total time> total time for this node over total computation time

N.B.:
* Times include the node time and the function overhead.
* <total time> and <% total time> may over-count computation times
  if inputs to a node share a common ancestor and should be viewed as a
  loose upper bound. Their intended use is to help rule out potential nodes
  to remove when optimizing a graph because their <total time> is very low.
""", file=_file)

    for r, p, s, o in zip(results_to_print, profile_list, smap, order):
        # Add the parent scan op to the list as well
        if (hasattr(r.owner, 'op') and
                isinstance(r.owner.op, theano.scan_module.scan_op.Scan)):
                    scan_ops.append(r)

        debugmode.debugprint(r, depth=depth, done=done, print_type=print_type,
                             file=_file, order=o, ids=ids,
                             scan_ops=scan_ops, stop_on_name=stop_on_name,
                             profile=p, smap=s, used_ids=used_ids,
                             print_clients=print_clients)

    if len(scan_ops) > 0:
        print("", file=_file)
        new_prefix = ' >'
        new_prefix_child = ' >'
        print("Inner graphs of the scan ops:", file=_file)

        for s in scan_ops:
            # prepare a dict which maps the scan op's inner inputs
            # to its outer inputs.
            if hasattr(s.owner.op, 'fn'):
                # If the op was compiled, print the optimized version.
                inner_inputs = s.owner.op.fn.maker.fgraph.inputs
            else:
                inner_inputs = s.owner.op.inputs
            outer_inputs = s.owner.inputs
            inner_to_outer_inputs = \
                dict([(inner_inputs[i], outer_inputs[o])
                      for i, o in
                      s.owner.op.var_mappings['outer_inp_from_inner_inp']
                      .items()])

            print("", file=_file)
            debugmode.debugprint(
                s, depth=depth, done=done,
                print_type=print_type,
                file=_file, ids=ids,
                scan_ops=scan_ops,
                stop_on_name=stop_on_name,
                scan_inner_to_outer_inputs=inner_to_outer_inputs,
                print_clients=print_clients, used_ids=used_ids)
            if hasattr(s.owner.op, 'fn'):
                # If the op was compiled, print the optimized version.
                outputs = s.owner.op.fn.maker.fgraph.outputs
            else:
                outputs = s.owner.op.outputs
            for idx, i in enumerate(outputs):

                if hasattr(i, 'owner') and hasattr(i.owner, 'op'):
                    if isinstance(i.owner.op, theano.scan_module.scan_op.Scan):
                        scan_ops.append(i)

                debugmode.debugprint(
                    r=i, prefix=new_prefix,
                    depth=depth, done=done,
                    print_type=print_type, file=_file,
                    ids=ids, stop_on_name=stop_on_name,
                    prefix_child=new_prefix_child,
                    scan_ops=scan_ops,
                    scan_inner_to_outer_inputs=inner_to_outer_inputs,
                    print_clients=print_clients, used_ids=used_ids)

    if file is _file:
        return file
    elif file == 'str':
        return _file.getvalue()
    else:
        _file.flush()
Пример #37
0
    def test_tload(self):
        """tests DLOAD, TLOAD1, TLOAD2, TABLED2 cards"""
        model = BDF(debug=False)
        sid = 2
        excite_id = 20
        delay = 0
        tid = 42
        tload1 = model.add_tload1(sid,
                                  excite_id,
                                  tid,
                                  delay=0,
                                  Type='LOAD',
                                  us0=0.0,
                                  vs0=0.0,
                                  comment='tload1')
        tload1 = model.add_tload1(sid,
                                  excite_id,
                                  tid,
                                  delay=1.,
                                  Type='DISP',
                                  us0=0.0,
                                  vs0=0.0,
                                  comment='')
        tload1 = model.add_tload1(sid,
                                  excite_id,
                                  tid,
                                  delay=2,
                                  Type='VELO',
                                  us0=0.0,
                                  vs0=0.0,
                                  comment='')
        tload1 = model.add_tload1(sid,
                                  excite_id,
                                  tid,
                                  delay=0,
                                  Type='ACC',
                                  us0=0.0,
                                  vs0=0.0,
                                  comment='')

        sid = 3
        excite_id = 30
        tload2 = model.add_tload2(sid,
                                  excite_id,
                                  delay=0,
                                  Type='LOAD',
                                  T1=0.,
                                  T2=None,
                                  frequency=0.,
                                  phase=0.,
                                  c=0.,
                                  b=0.,
                                  us0=0.,
                                  vs0=0.,
                                  comment='tload2')
        tload2 = model.add_tload2(sid,
                                  excite_id,
                                  delay=1.,
                                  Type='D',
                                  T1=0.,
                                  T2=None,
                                  frequency=0.,
                                  phase=0.,
                                  c=0.,
                                  b=0.,
                                  us0=0.,
                                  vs0=0.,
                                  comment='')
        tload2 = model.add_tload2(sid,
                                  excite_id,
                                  delay=2,
                                  Type='V',
                                  T1=0.,
                                  T2=None,
                                  frequency=0.,
                                  phase=0.,
                                  c=0.,
                                  b=0.,
                                  us0=0.,
                                  vs0=0.,
                                  comment='')
        tload2 = model.add_tload2(sid,
                                  excite_id,
                                  delay=0,
                                  Type='A',
                                  T1=0.,
                                  T2=1.,
                                  frequency=0.,
                                  phase=0.,
                                  c=0.,
                                  b=0.,
                                  us0=0.,
                                  vs0=0.,
                                  comment='')

        delay_id = 2
        nodes = 100
        components = 2
        delays = 1.5
        delay = model.add_delay(delay_id, nodes, components, delays)

        sid = 1
        scale = 1.0
        scale_factors = 1.
        load_ids = 2
        dload = model.add_dload(sid,
                                scale,
                                scale_factors,
                                load_ids,
                                comment='dload')

        x1 = 0.1
        x = np.linspace(0., 1.)
        y = np.sin(x)
        tabled2 = model.add_tabled2(tid, x1, x, y, comment='tabled2')

        model.pop_parse_errors()

        delay.validate()
        delay.raw_fields()
        delay.write_card()
        delay.write_card(size=16)

        tload1.validate()
        tload1.raw_fields()
        tload1.write_card()
        tload1.write_card(size=16)

        tload2.validate()
        tload2.raw_fields()
        tload2.write_card()
        tload2.write_card(size=16)

        dload.validate()
        dload.raw_fields()
        dload.write_card()
        dload.write_card(size=16)

        tabled2.validate()
        tabled2.raw_fields()
        tabled2.write_card()
        tabled2.write_card(size=16)

        model.validate()
        model.cross_reference()
        model.pop_xref_errors()

        bdf_file = StringIO()
        model.write_bdf(bdf_file, close=False)
        unused_out = bdf_file.getvalue()
        bdf_file.seek(0)
        unused_outs = model.get_bdf_stats(return_type='list')
        unused_outs = model.get_bdf_stats(return_type='string')

        time = 0.5
        out1 = tload1.get_load_at_time(time, scale=1.)
        out2 = tload2.get_load_at_time(time, scale=1.)
        #print(out1)
        assert len(out1) == 1, out1
        assert len(out2) == 1, out2
        #print(out1)
        #print(out2)

        time = [0.5, 0.9]
        out1 = tload1.get_load_at_time(time, scale=1.)
        out2 = tload2.get_load_at_time(time, scale=1.)
        assert len(out1) == 2, out1
        assert len(out2) == 2, out2
        #print(out1)
        #print(out2)

        model2 = read_bdf(bdf_file, punch=True, debug=False)
        model2.uncross_reference()
        model2.safe_cross_reference()
        model2.uncross_reference()
        #print(out)
        #print(outs)
        save_load_deck(model)
Пример #38
0
class TestNovaManagePlacementHealAllocations(
        integrated_helpers.ProviderUsageBaseTestCase):
    """Functional tests for nova-manage placement heal_allocations"""

    # This is required by the parent class.
    compute_driver = 'fake.SmallFakeDriver'
    # We want to test iterating across multiple cells.
    NUMBER_OF_CELLS = 2

    def setUp(self):
        # Since the CachingScheduler does not use Placement, we want to use
        # the CachingScheduler to create instances and then we can heal their
        # allocations via the CLI.
        self.flags(driver='caching_scheduler', group='scheduler')
        super(TestNovaManagePlacementHealAllocations, self).setUp()
        self.cli = manage.PlacementCommands()
        # We need to start a compute in each non-cell0 cell.
        for cell_name, cell_mapping in self.cell_mappings.items():
            if cell_mapping.uuid == objects.CellMapping.CELL0_UUID:
                continue
            self._start_compute(cell_name, cell_name=cell_name)
        # Make sure we have two hypervisors reported in the API.
        hypervisors = self.admin_api.api_get(
            '/os-hypervisors').body['hypervisors']
        self.assertEqual(2, len(hypervisors))
        self.flavor = self.api.get_flavors()[0]
        self.output = StringIO()
        self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))

    def _boot_and_assert_no_allocations(self, flavor, hostname):
        """Creates a server on the given host and asserts neither have usage

        :param flavor: the flavor used to create the server
        :param hostname: the host on which to create the server
        :returns: two-item tuple of the server and the compute node resource
                  provider uuid
        """
        server_req = self._build_minimal_create_server_request(
            self.api,
            'some-server',
            flavor_id=flavor['id'],
            image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
            networks=[])
        server_req['availability_zone'] = 'nova:%s' % hostname
        created_server = self.api.post_server({'server': server_req})
        server = self._wait_for_state_change(self.admin_api, created_server,
                                             'ACTIVE')

        # Verify that our source host is what the server ended up on
        self.assertEqual(hostname, server['OS-EXT-SRV-ATTR:host'])

        # Check that the compute node resource provider has no allocations.
        rp_uuid = self._get_provider_uuid_by_host(hostname)
        provider_usages = self._get_provider_usages(rp_uuid)
        for resource_class, usage in provider_usages.items():
            self.assertEqual(
                0, usage,
                'Compute node resource provider %s should not have %s '
                'usage when using the CachingScheduler.' %
                (hostname, resource_class))

        # Check that the server has no allocations.
        allocations = self._get_allocations_by_server_uuid(server['id'])
        self.assertEqual({}, allocations,
                         'Server should not have allocations when using '
                         'the CachingScheduler.')
        return server, rp_uuid

    def _assert_healed(self, server, rp_uuid):
        allocations = self._get_allocations_by_server_uuid(server['id'])
        self.assertIn(
            rp_uuid, allocations,
            'Allocations not found for server %s and compute node '
            'resource provider. %s\nOutput:%s' %
            (server['id'], rp_uuid, self.output.getvalue()))
        self.assertFlavorMatchesAllocation(self.flavor,
                                           allocations[rp_uuid]['resources'])

    def test_heal_allocations_paging(self):
        """This test runs the following scenario:

        * Schedule server1 to cell1 and assert it doesn't have allocations.
        * Schedule server2 to cell2 and assert it doesn't have allocations.
        * Run "nova-manage placement heal_allocations --max-count 1" to make
          sure we stop with just one instance and the return code is 1.
        * Run "nova-manage placement heal_allocations" and assert both
          both instances now have allocations against their respective compute
          node resource providers.
        """
        server1, rp_uuid1 = self._boot_and_assert_no_allocations(
            self.flavor, 'cell1')
        server2, rp_uuid2 = self._boot_and_assert_no_allocations(
            self.flavor, 'cell2')

        # heal server1 and server2 in separate calls
        for x in range(2):
            result = self.cli.heal_allocations(max_count=1, verbose=True)
            self.assertEqual(1, result, self.output.getvalue())
            output = self.output.getvalue()
            self.assertIn('Max count reached. Processed 1 instances.', output)
            # If this is the 2nd call, we'll have skipped the first instance.
            if x == 0:
                self.assertNotIn('already has allocations', output)
            else:
                self.assertIn('already has allocations', output)

        self._assert_healed(server1, rp_uuid1)
        self._assert_healed(server2, rp_uuid2)

        # run it again to make sure nothing was processed
        result = self.cli.heal_allocations(verbose=True)
        self.assertEqual(4, result, self.output.getvalue())
        self.assertIn('already has allocations', self.output.getvalue())

    def test_heal_allocations_paging_max_count_more_than_num_instances(self):
        """Sets up 2 instances in cell1 and 1 instance in cell2. Then specify
        --max-count=10, processes 3 instances, rc is 0
        """
        servers = []  # This is really a list of 2-item tuples.
        for x in range(2):
            servers.append(
                self._boot_and_assert_no_allocations(self.flavor, 'cell1'))
        servers.append(
            self._boot_and_assert_no_allocations(self.flavor, 'cell2'))
        result = self.cli.heal_allocations(max_count=10, verbose=True)
        self.assertEqual(0, result, self.output.getvalue())
        self.assertIn('Processed 3 instances.', self.output.getvalue())
        for server, rp_uuid in servers:
            self._assert_healed(server, rp_uuid)

    def test_heal_allocations_paging_more_instances_remain(self):
        """Tests that there is one instance in cell1 and two instances in
        cell2, with a --max-count=2. This tests that we stop in cell2 once
        max_count is reached.
        """
        servers = []  # This is really a list of 2-item tuples.
        servers.append(
            self._boot_and_assert_no_allocations(self.flavor, 'cell1'))
        for x in range(2):
            servers.append(
                self._boot_and_assert_no_allocations(self.flavor, 'cell2'))
        result = self.cli.heal_allocations(max_count=2, verbose=True)
        self.assertEqual(1, result, self.output.getvalue())
        self.assertIn('Max count reached. Processed 2 instances.',
                      self.output.getvalue())
        # Assert that allocations were healed on the instances we expect. Order
        # works here because cell mappings are retrieved by id in ascending
        # order so oldest to newest, and instances are also retrieved from each
        # cell by created_at in ascending order, which matches the order we put
        # created servers in our list.
        for x in range(2):
            self._assert_healed(*servers[x])
        # And assert the remaining instance does not have allocations.
        allocations = self._get_allocations_by_server_uuid(servers[2][0]['id'])
        self.assertEqual({}, allocations)

    def test_heal_allocations_unlimited(self):
        """Sets up 2 instances in cell1 and 1 instance in cell2. Then
        don't specify --max-count, processes 3 instances, rc is 0.
        """
        servers = []  # This is really a list of 2-item tuples.
        for x in range(2):
            servers.append(
                self._boot_and_assert_no_allocations(self.flavor, 'cell1'))
        servers.append(
            self._boot_and_assert_no_allocations(self.flavor, 'cell2'))
        result = self.cli.heal_allocations(verbose=True)
        self.assertEqual(0, result, self.output.getvalue())
        self.assertIn('Processed 3 instances.', self.output.getvalue())
        for server, rp_uuid in servers:
            self._assert_healed(server, rp_uuid)

    def test_heal_allocations_shelved(self):
        """Tests the scenario that an instance with no allocations is shelved
        so heal_allocations skips it (since the instance is not on a host).
        """
        server, rp_uuid = self._boot_and_assert_no_allocations(
            self.flavor, 'cell1')
        self.api.post_server_action(server['id'], {'shelve': None})
        # The server status goes to SHELVED_OFFLOADED before the host/node
        # is nulled out in the compute service, so we also have to wait for
        # that so we don't race when we run heal_allocations.
        server = self._wait_for_server_parameter(self.admin_api, server, {
            'OS-EXT-SRV-ATTR:host': None,
            'status': 'SHELVED_OFFLOADED'
        })
        result = self.cli.heal_allocations(verbose=True)
        self.assertEqual(4, result, self.output.getvalue())
        self.assertIn('Instance %s is not on a host.' % server['id'],
                      self.output.getvalue())
        # Check that the server has no allocations.
        allocations = self._get_allocations_by_server_uuid(server['id'])
        self.assertEqual({}, allocations,
                         'Shelved-offloaded server should not have '
                         'allocations.')

    def test_heal_allocations_task_in_progress(self):
        """Tests the case that heal_allocations skips over an instance which
        is undergoing a task state transition (in this case pausing).
        """
        server, rp_uuid = self._boot_and_assert_no_allocations(
            self.flavor, 'cell1')

        def fake_pause_instance(_self, ctxt, instance, *a, **kw):
            self.assertEqual('pausing', instance.task_state)

        # We have to stub out pause_instance so that the instance is stuck with
        # task_state != None.
        self.stub_out('nova.compute.manager.ComputeManager.pause_instance',
                      fake_pause_instance)
        self.api.post_server_action(server['id'], {'pause': None})
        result = self.cli.heal_allocations(verbose=True)
        self.assertEqual(4, result, self.output.getvalue())
        # Check that the server has no allocations.
        allocations = self._get_allocations_by_server_uuid(server['id'])
        self.assertEqual({}, allocations,
                         'Server undergoing task state transition should '
                         'not have allocations.')
        # Assert something was logged for this instance when it was skipped.
        self.assertIn(
            'Instance %s is undergoing a task state transition: '
            'pausing' % server['id'], self.output.getvalue())
Пример #39
0
        def do_check_on(value, nd, var=None):
            """
            Checks `value` for NaNs / Infs. If detected, raises an exception
            and / or prints information about `nd`, `f`, and `is_input` to
            help the user determine the cause of the invalid values.

            Parameters
            ----------
            value : numpy.ndarray
                The value to be checked.
            nd : theano.gof.Apply
                The Apply node being executed.
            var : theano.gof.Variable
                Not used if nd is there. Otherwise, used to print the stack
                trace for inputs of the graph.

            """
            error = False
            sio = StringIO()
            if nan_is_error:
                if contains_nan(value, nd, var):
                    print('NaN detected', file=sio)
                    error = True
            if inf_is_error:
                if contains_inf(value, nd, var):
                    print('Inf detected', file=sio)
                    error = True
            if big_is_error:
                err = False
                if not _is_numeric_value(value, var):
                    err = False
                elif pygpu_available and isinstance(value, GpuArray):
                    err = (f_gpua_absmax(value.reshape(value.size)) > 1e10)
                else:
                    err = (np.abs(value).max() > 1e10)
                if err:
                    print('Big value detected', file=sio)
                    error = True
            if error:
                if nd:
                    print(
                        "NanGuardMode found an error in the "
                        "output of a node in this variable:",
                        file=sio)
                    print(theano.printing.debugprint(nd, file='str'), file=sio)
                else:
                    print(
                        "NanGuardMode found an error in an input of the "
                        "graph.",
                        file=sio)
                # Add the stack trace
                if nd:
                    var = nd.outputs[0]
                print(theano.gof.utils.get_variable_trace_string(var),
                      file=sio)
                msg = sio.getvalue()
                if config.NanGuardMode.action == 'raise':
                    raise AssertionError(msg)
                elif config.NanGuardMode.action == 'pdb':
                    print(msg)
                    import pdb
                    pdb.set_trace()
                elif config.NanGuardMode.action == 'warn':
                    logger.error(msg)
Пример #40
0
    def c_code(self, node, name, inputs, outputs, sub):
        inp_ndim = node.inputs[0].ndim
        inp = inputs[0]
        indices = inputs[1:]

        # pad out the index list to the same dimension as the input
        idx_list = self.idx_list + \
            ((slice(None),) * (inp_ndim - len(self.idx_list)))

        # This case fails when we use pygpu_index(), so here is some
        # special code
        if len(idx_list) == 0:
            return """
        Py_XDECREF(%(out)s);
        %(out)s = pygpu_copy(%(inp)s, GA_ANY_ORDER);
        if (!%(out)s) {
            // Exception already set
            %(fail)s
        }
""" % dict(out=outputs[0], inp=inp, fail=sub['fail'])

        sio = StringIO()
        print("""
        ssize_t starts[%(sz)s];
        ssize_t stops[%(sz)s];
        ssize_t steps[%(sz)s];
        ssize_t cur;
        int err;

        if (%(inp)s->ga.nd != %(sz)s) {
            PyErr_SetString(PyExc_IndexError, "invalid index");
            %(fail)s
        }
        """ % dict(sz=len(idx_list), inp=inp, fail=sub['fail']), file=sio)

        def fix_idx(idx):
            if idx is None:
                return "0", 1
            elif isinstance(idx, (np.integer, integer_types)):
                return str(idx), 0
            elif isinstance(idx, gof.Type):
                return indices.pop(0), 0
            else:
                assert 0, idx

        for i, idx in enumerate(idx_list):
            if isinstance(idx, slice):
                start, start_n = fix_idx(idx.start)
                stop, stop_n = fix_idx(idx.stop)
                step, step_n = fix_idx(idx.step)
                print("""
                starts[%(i)s] = %(start)s;
                stops[%(i)s] = %(stop)s;
                steps[%(i)s] = %(step)s;
                if (fix_indices(&starts[%(i)s], &stops[%(i)s], &steps[%(i)s],
                                %(start_n)s, %(stop_n)s, %(step_n)s,
                                %(inp)s->ga.dimensions[%(i)s]) == -1) {
                    %(fail)s
                }
                """ % dict(i=i, start=start, stop=stop, step=step,
                           start_n=start_n, stop_n=stop_n, step_n=step_n,
                           fail=sub['fail'], inp=inp), file=sio)
            else:
                if isinstance(idx, gof.Type):
                    start = indices.pop(0)
                elif isinstance(idx, (np.integer, integer_types)):
                    start = idx
                else:
                    assert 0, idx
                print("""
                cur = %(start)s;
                if (cur < 0)
                    cur += %(inp)s->ga.dimensions[%(i)s];
                starts[%(i)s] = cur;
                steps[%(i)s] = 0;
                """ % dict(i=i, start=start, fail=sub['fail'], inp=inp), file=sio)

        print("""
        Py_XDECREF(%(out)s);
        %(out)s = pygpu_index(%(inp)s, starts, stops, steps);
        if (!%(out)s) { %(fail)s }
""" % dict(name=name, fail=sub['fail'], inp=inp, out=outputs[0]), file=sio)

        return sio.getvalue()
Пример #41
0
 def __repr__(self):
     string_io = StringIO()
     self.write_card(string_io)
     return string_io.getvalue().rstrip()
Пример #42
0
    def test_freq(self):
        """tests FREQ, FREQ1, FREQ2, FREQ4"""
        model = BDF(debug=False)
        sid = 101
        freqs = 0.1
        freq = model.add_freq(sid, freqs, comment='freq')
        #print(freq)

        freqs = [2.0, 3.0]
        freq = model.add_freq(sid, freqs, comment='freq')
        #print(freq)

        f1 = 0.
        df = 2.0
        freq1 = model.add_freq1(sid, f1, df, ndf=5, comment='freq1')
        assert len(freq1.freqs) == 6, 'freqs=%s' % freq1.freqs
        #print(freq1)

        f1 = 1.
        f2 = 8.0
        freq2 = model.add_freq2(sid, f1, f2, nf=6, comment='freq2')
        assert len(freq2.freqs) == 7, 'freqs=%s' % freq2.freqs
        assert np.allclose(freq2.freqs.max(), f2), freq2.freqs
        #print(freq2)

        freq4 = model.add_freq4(sid,
                                f1,
                                f2,
                                fspread=0.1,
                                nfm=3,
                                comment='freq4')
        #print(model.frequencies[sid])
        #print(freq4)

        fractions = [0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
        freq5 = model.add_freq5(sid,
                                fractions,
                                f1=0.,
                                f2=100.,
                                comment='freq5')

        fractions = np.linspace(0., 1.)
        unused_freq5b = model.add_freq5(sid,
                                        fractions,
                                        f1=0.,
                                        f2=100.,
                                        comment='freq5')
        model.validate()

        freq.raw_fields()
        freq.write_card()
        freq.write_card(size=16)

        freq1.raw_fields()
        freq1.write_card()
        freq1.write_card(size=16)

        freq2.raw_fields()
        freq2.write_card()
        freq2.write_card(size=16)

        freq4.raw_fields()
        freq4.write_card()
        freq4.write_card(size=16)

        freq5.raw_fields()
        freq5.write_card()
        freq5.write_card(size=16)

        bdf_file = StringIO()
        model.write_bdf(bdf_file, close=False)
        unused_out = bdf_file.getvalue()
        bdf_file.seek(0)

        model2 = read_bdf(bdf_file, punch=True, debug=False)
        model2.uncross_reference()
        model2.safe_cross_reference()
        model2.uncross_reference()
        save_load_deck(model)
Пример #43
0
def raise_with_op(node, thunk=None, exc_info=None, storage_map=None):
    """
    Re-raise an exception while annotating the exception object with
    debug info.

    Parameters
    ----------
    node : Apply node
        The Apply node object that resulted in the raised exception.
    exc_info : tuple, optional
        A tuple containing the exception type, exception object and
        associated traceback, as would be returned by a call to
        `sys.exc_info()` (which is done if `None` is passed).
    storage_map: dict, optional
        storage map of the theano function that resulted in the
        raised exception.

    Notes
    -----
    This re-raises the exception described by `exc_info` (or the last
    one raised, if `exc_info` is omitted) and annotates the exception
    object with several new members which may be helpful for debugging
    Theano graphs. They are:

     * __op_instance__: The Op that is responsible for the exception
       being raised.
     * __thunk_trace__: A traceback corresponding to the code that
       actually generated the exception, if it is available.
     * __applynode_index__: The index of the Apply node corresponding
       to this op in `op.fgraph.toposort()`.

    The exception is not annotated if it is of type `KeyboardInterrupt`.

    """
    if exc_info is None:
        exc_info = sys.exc_info()
    exc_type, exc_value, exc_trace = exc_info
    if exc_type == KeyboardInterrupt:
        # print a simple traceback from KeyboardInterrupt
        reraise(exc_type, exc_value, exc_trace)
    try:
        trace = node.outputs[0].tag.trace
    except AttributeError:
        try:
            trace = node.op.tag.trace
        except AttributeError:
            trace = ()
    exc_value.__thunk_trace__ = trace
    exc_value.__op_instance__ = node
    topo = node.fgraph.toposort()
    if node in topo:
        node_index = topo.index(node)
    else:
        node_index = None
    exc_value.__applynode_index__ = node_index

    hints = []
    detailed_err_msg = "\nApply node that caused the error: " + str(node)
    if exc_value.__applynode_index__ is not None:
        detailed_err_msg += "\nToposort index: %d" % node_index

    types = [getattr(ipt, 'type', 'No type') for ipt in node.inputs]
    detailed_err_msg += "\nInputs types: %s\n" % types

    if thunk is not None:
        if hasattr(thunk, 'inputs'):
            shapes = [
                getattr(ipt[0], 'shape', 'No shapes') for ipt in thunk.inputs
            ]
            strides = [
                getattr(ipt[0], 'strides', 'No strides')
                for ipt in thunk.inputs
            ]
            scalar_values = []
            for ipt in thunk.inputs:
                if getattr(ipt[0], "size", -1) <= 5:
                    scalar_values.append(ipt[0])
                else:
                    scalar_values.append("not shown")
        else:
            shapes = "The thunk don't have an inputs attributes."
            strides = "So we can't access the strides of inputs values"
            scalar_values = "And can't print its inputs scalar value"
        clients = [[c[0] for c in var.clients] for var in node.outputs]
        detailed_err_msg += ("Inputs shapes: %s" % shapes +
                             "\nInputs strides: %s" % strides +
                             "\nInputs values: %s" % scalar_values)
        if hasattr(node.op, '__input_name__'):
            detailed_err_msg += "\nInputs name: %s\n" % str(
                node.op.__input_name__)

        detailed_err_msg += "\nOutputs clients: %s\n" % clients
    else:
        hints.append("HINT: Use another linker then the c linker to"
                     " have the inputs shapes and strides printed.")

    # Print node backtraces
    tr = getattr(node.outputs[0].tag, 'trace', [])
    if len(tr) > 0:
        detailed_err_msg += "\nBacktrace when the node is created:\n"

        # Print separate message for each element in the list of batcktraces
        sio = StringIO()
        for subtr in tr:
            traceback.print_list(subtr, sio)
        detailed_err_msg += str(sio.getvalue())
    else:
        hints.append(
            "HINT: Re-running with most Theano optimization disabled could"
            " give you a back-trace of when this node was created. This can"
            " be done with by setting the Theano flag"
            " 'optimizer=fast_compile'. If that does not work,"
            " Theano optimizations can be disabled with 'optimizer=None'.")

    if theano.config.exception_verbosity == 'high':

        f = StringIO()
        theano.printing.debugprint(node,
                                   file=f,
                                   stop_on_name=True,
                                   print_type=True)
        detailed_err_msg += "\nDebugprint of the apply node: \n"
        detailed_err_msg += f.getvalue()

    # Prints output_map
    if theano.config.exception_verbosity == 'high' and storage_map is not None:
        detailed_err_msg += "\nStorage map footprint:\n"
        shared_input_list = [
            item for item in node.fgraph.inputs
            if isinstance(item, theano.compile.SharedVariable)
        ]
        nonshared_input_list = [
            item for item in node.fgraph.inputs
            if not isinstance(item, theano.compile.SharedVariable)
        ]
        storage_map_list = []
        total_size = 0
        total_size_inputs = 0
        for k in storage_map:
            storage_map_item = []

            # storage_map_item[0]: the variable
            storage_map_item.append(str(k))

            # storage_map_item[1]: the shape
            shapeinfo = None
            if hasattr(storage_map[k][0], 'shape'):
                shapeinfo = storage_map[k][0].shape
                if len(shapeinfo) != 0:
                    storage_map_item.append(shapeinfo)
                else:
                    storage_map_item.append(tuple())
            else:
                storage_map_item.append(None)

            # storage_map_item[2]: itemsize
            # storage_map_item[3]: bytes
            if hasattr(storage_map[k][0], 'dtype'):
                dtype = storage_map[k][0].dtype
                storage_map_item.append(numpy.dtype(dtype).itemsize)
                if shapeinfo is None:
                    storage_map_item.append(None)
                else:
                    sz = numpy.dtype(dtype).itemsize * numpy.prod(shapeinfo)
                    storage_map_item.append(sz)
                    total_size += sz
                    if not k.owner:
                        total_size_inputs += sz
                    else:
                        # If it is a view, don't count it twice.
                        if getattr(k.owner.op, 'view_map', None):
                            vmap = k.owner.op.view_map
                            out_idx = k.owner.outputs.index(k)
                            data = storage_map[k][0]
                            if out_idx in vmap:
                                assert len(vmap[out_idx]) == 1
                                input_data = storage_map[k.owner.inputs[
                                    vmap[out_idx][0]]][0]
                                if k.type.may_share_memory(data, input_data):
                                    total_size -= sz
                        # If it is a destroyed input, the input
                        # shouldn't be in the storage_map anymore
                        # except if there is a special flag used. So
                        # we still must check it.
                        if getattr(k.owner.op, 'destroy_map', None):
                            vmap = k.owner.op.destroy_map
                            out_idx = k.owner.outputs.index(k)
                            data = storage_map[k][0]
                            if out_idx in vmap:
                                assert len(vmap[out_idx]) == 1
                                input_data = storage_map[k.owner.inputs[
                                    vmap[out_idx][0]]][0]
                                if k.type.may_share_memory(data, input_data):
                                    total_size -= sz
            else:
                bytes = getsizeof(storage_map[k][0])
                storage_map_item.append(bytes)
                storage_map_item.append(None)

            # Flag of shared val
            # storage_map_item[4]
            if k in shared_input_list:
                storage_map_item.append(True)
            elif k in nonshared_input_list:
                storage_map_item.append(False)
            else:
                storage_map_item.append(None)
            storage_map_list.append(storage_map_item)

        from operator import itemgetter
        storage_map_list.sort(key=itemgetter(3), reverse=True)
        for item in storage_map_list:
            if item[3] is None:
                continue
            detailed_err_msg += " - " + item[0] + ", "
            if item[4] is True:
                detailed_err_msg += "Shared Input, "
            elif item[4] is False:
                detailed_err_msg += "Input, "
            if item[1] is not None:
                detailed_err_msg += "Shape: %s, " % str(item[1])
            detailed_err_msg += "ElemSize: %s Byte(s)" % item[2]
            if item[3] is not None:
                detailed_err_msg += ", TotalSize: %s Byte(s)\n" % item[3]
            else:
                detailed_err_msg += "\n"
        detailed_err_msg += " TotalSize: %s Byte(s) %.3f GB\n" % (
            total_size, total_size / 1024. / 1024 / 1024)
        detailed_err_msg += " TotalSize inputs: %s Byte(s) %.3f BG\n" % (
            total_size_inputs, total_size_inputs / 1024. / 1024 / 1024)

    else:
        hints.append(
            "HINT: Use the Theano flag 'exception_verbosity=high'"
            " for a debugprint and storage map footprint of this apply node.")

    exc_value = exc_type(
        str(exc_value) + detailed_err_msg + '\n' + '\n'.join(hints))
    reraise(exc_type, exc_value, exc_trace)
Пример #44
0
 def __repr__(self):
     f = StringIO()
     f.write('<PBUSH object> n=%s\n' % self.n)
     self.write_card(f)
     return f.getvalue()
Пример #45
0
    def xparm_xds(self,
                  real_space_a,
                  real_space_b,
                  real_space_c,
                  space_group,
                  out=None):
        from cctbx import uctbx

        R = self.imagecif_to_xds_transformation_matrix
        unit_cell_a_axis = R * matrix.col(real_space_a)
        unit_cell_b_axis = R * matrix.col(real_space_b)
        unit_cell_c_axis = R * matrix.col(real_space_c)
        A_inv = matrix.sqr(unit_cell_a_axis.elems + unit_cell_b_axis.elems +
                           unit_cell_c_axis.elems)
        metrical_matrix = (A_inv * A_inv.transpose()).as_sym_mat3()
        unit_cell = uctbx.unit_cell(metrical_matrix=metrical_matrix)
        from iotbx.xds import xparm

        from six.moves import StringIO

        b = StringIO()
        writer = xparm.writer(
            self.starting_frame,
            self.starting_angle,
            self.oscillation_range,
            self.rotation_axis,
            self.wavelength,
            self.beam_vector,
            space_group,
            unit_cell.parameters(),
            unit_cell_a_axis.elems,
            unit_cell_b_axis.elems,
            unit_cell_c_axis.elems,
            None,  # num_segments
            self.detector_size,
            self.pixel_size,
            self.detector_origin,
            self.detector_distance,
            self.detector_x_axis,
            self.detector_y_axis,
            self.detector_normal,
            segments=None,
            orientation=None,
        )
        writer.show(out=b)
        old = b.getvalue()

        new = xparm.write(
            self.starting_frame,
            self.starting_angle,
            self.oscillation_range,
            self.rotation_axis,
            self.wavelength,
            self.beam_vector,
            space_group,
            unit_cell.parameters(),
            unit_cell_a_axis.elems,
            unit_cell_b_axis.elems,
            unit_cell_c_axis.elems,
            None,  # num_segments
            self.detector_size,
            self.pixel_size,
            self.detector_origin,
            self.detector_distance,
            self.detector_x_axis,
            self.detector_y_axis,
            self.detector_normal,
            segments=None,
            orientation=None,
        )
        assert old == new
        if out:
            warnings.warn(
                "out= parameter is deprecated. Use return value instead",
                DeprecationWarning,
                stacklevel=2,
            )
            print(new, end="", file=out)
        return new
Пример #46
0
    def run(self,
            cmd,
            timeout=None,
            quote=False,
            call_line_handler_func=False):
        """
        Note: This function is based on paramiko's exec_command()
        method.

        :param timeout: How long to wait (in seconds) for the command to finish (optional).
        :type timeout: ``float``

        :param call_line_handler_func: True to call handle_stdout_line_func function for each line
                                       of received stdout and handle_stderr_line_func for each
                                       line of stderr.
        :type call_line_handler_func: ``bool``
        """

        if quote:
            cmd = quote_unix(cmd)

        extra = {'_cmd': cmd}
        self.logger.info('Executing command', extra=extra)

        # Use the system default buffer size
        bufsize = -1

        transport = self.client.get_transport()
        chan = transport.open_session()

        start_time = time.time()
        if cmd.startswith('sudo'):
            # Note that fabric does this as well. If you set pty, stdout and stderr
            # streams will be combined into one.
            # NOTE: If pty is used, every new line character \n will be converted to \r\n which
            # isn't desired. Because of that we sanitize the output and replace \r\n with \n at the
            # bottom of this method
            uses_pty = True
            chan.get_pty()
        else:
            uses_pty = False
        chan.exec_command(cmd)

        stdout = StringIO()
        stderr = StringIO()

        # Create a stdin file and immediately close it to prevent any
        # interactive script from hanging the process.
        stdin = chan.makefile('wb', bufsize)
        stdin.close()

        # Receive all the output
        # Note #1: This is used instead of chan.makefile approach to prevent
        # buffering issues and hanging if the executed command produces a lot
        # of output.
        #
        # Note #2: If you are going to remove "ready" checks inside the loop
        # you are going to have a bad time. Trying to consume from a channel
        # which is not ready will block for indefinitely.
        exit_status_ready = chan.exit_status_ready()

        if exit_status_ready:
            stdout_data = self._consume_stdout(
                chan=chan, call_line_handler_func=call_line_handler_func)
            stdout_data = stdout_data.getvalue()

            stderr_data = self._consume_stderr(
                chan=chan, call_line_handler_func=call_line_handler_func)
            stderr_data = stderr_data.getvalue()

            stdout.write(stdout_data)
            stderr.write(stderr_data)

        while not exit_status_ready:
            current_time = time.time()
            elapsed_time = (current_time - start_time)

            if timeout and (elapsed_time > timeout):
                # TODO: Is this the right way to clean up?
                chan.close()

                stdout = sanitize_output(stdout.getvalue(), uses_pty=uses_pty)
                stderr = sanitize_output(stderr.getvalue(), uses_pty=uses_pty)
                raise SSHCommandTimeoutError(
                    cmd=cmd,
                    timeout=timeout,
                    ssh_connect_timeout=self.ssh_connect_timeout,
                    stdout=stdout,
                    stderr=stderr)

            stdout_data = self._consume_stdout(
                chan=chan, call_line_handler_func=call_line_handler_func)
            stdout_data = stdout_data.getvalue()

            stderr_data = self._consume_stderr(
                chan=chan, call_line_handler_func=call_line_handler_func)
            stderr_data = stderr_data.getvalue()

            stdout.write(stdout_data)
            stderr.write(stderr_data)

            # We need to check the exit status here, because the command could
            # print some output and exit during this sleep below.
            exit_status_ready = chan.exit_status_ready()

            if exit_status_ready:
                break

            # Short sleep to prevent busy waiting
            concurrency.sleep(self.SLEEP_DELAY)
        # print('Wait over. Channel must be ready for host: %s' % self.hostname)

        # Receive the exit status code of the command we ran.
        status = chan.recv_exit_status()

        stdout = sanitize_output(stdout.getvalue(), uses_pty=uses_pty)
        stderr = sanitize_output(stderr.getvalue(), uses_pty=uses_pty)

        extra = {'_status': status, '_stdout': stdout, '_stderr': stderr}
        self.logger.debug('Command finished', extra=extra)

        return [stdout, stderr, status]
Пример #47
0
class ResultCollector(object):
    """Collecter for test results.  This handles creating
    :class:`~.TestResult` instances and handing them off the registered
    result output handlers.

    """

    # Temporary compatibility with unittest's runner
    separator2 = separator2

    def __init__(self, buffer=False, failfast=False):
        self.buffer = buffer
        self.failfast = failfast
        self._result_handlers = []
        self._sorted_handlers = None
        self.testsRun = 0
        self.expectedFailures = []
        self.unexpectedSuccesses = []
        self.skipped = []
        self.failures = []
        self.errors = []
        self.shouldStop = False
        self._successful = True
        self._mirror_output = False
        self._stderr_buffer = None
        self._stdout_buffer = None
        self._original_stderr = sys.stderr
        self._original_stdout = sys.stdout
        self._test_timing = {}

    @property
    def _handlers(self):
        if self._sorted_handlers is None:
            from .plugins.result_handler import sort_result_handlers
            self._sorted_handlers = sort_result_handlers(self._result_handlers)
        return self._sorted_handlers

    @staticmethod
    def _testcase_to_key(test):
        return (type(test), test._testMethodName)

    def _setup_stdout(self):
        """Hook stdout and stderr if buffering is enabled.

        """
        if self.buffer:
            if self._stderr_buffer is None:
                self._stderr_buffer = StringIO()
                self._stdout_buffer = StringIO()
            sys.stdout = self._stdout_buffer
            sys.stderr = self._stderr_buffer

    def _restore_stdout(self):
        """Unhook stdout and stderr if buffering is enabled.

        """
        if self.buffer:
            if self._mirror_output:
                output = sys.stdout.getvalue()
                error = sys.stderr.getvalue()
                if output:
                    if not output.endswith('\n'):
                        output += '\n'
                    self._original_stdout.write(STDOUT_LINE % output)
                if error:
                    if not error.endswith('\n'):
                        error += '\n'
                    self._original_stderr.write(STDERR_LINE % error)

            sys.stdout = self._original_stdout
            sys.stderr = self._original_stderr
            self._stdout_buffer.seek(0)
            self._stdout_buffer.truncate()
            self._stderr_buffer.seek(0)
            self._stderr_buffer.truncate()

    def printErrors(self):  # pragma: no cover
        # FIXME: Remove
        pass

    def add_result_handler(self, handler):
        """Register a new result handler.

        """
        self._result_handlers.append(handler)
        # Reset sorted handlers
        if self._sorted_handlers:
            self._sorted_handlers = None

    def startTest(self, test, start_time=None):
        """Indicate that an individual test is starting.

        Parameters
        ----------
        test : unittest.TestCase
            The test that is starting.
        start_time : datetime
            An internal parameter to allow the parallel test runner to
            set the actual start time of a test run in a subprocess.

        """
        if start_time is None:
            start_time = datetime.utcnow()
        self._test_timing[self._testcase_to_key(test)] = start_time
        self._mirror_output = False
        self._setup_stdout()
        self.testsRun += 1
        for handler in self._handlers:
            handler.start_test(test)

    def stopTest(self, test):
        """Indicate that an individual test has completed.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.

        """
        for handler in self._handlers:
            handler.stop_test(test)
        self._restore_stdout()
        self._mirror_output = False

    def startTestRun(self):
        """Indicate that the test run is starting.

        """
        for handler in self._handlers:
            handler.start_test_run()

    def stopTestRun(self):
        """Indicate that the test run has completed.

        """
        for handler in self._handlers:
            handler.stop_test_run()

    def add_result(self, result):
        """Add an already-constructed :class:`~.TestResult` to this
        :class:`~.ResultCollector`.

        This may be used when collecting results created by other
        ResultCollectors (e.g. in subprocesses).

        """
        for handler in self._handlers:
            handler(result)
        if self._successful and result.status not in _successful_results:
            self._successful = False

    def _handle_result(self, test, status, exception=None, message=None):
        """Create a :class:`~.TestResult` and add it to this
        :class:`~ResultCollector`.

        Parameters
        ----------
        test : unittest.TestCase
            The test that this result will represent.
        status : haas.result.TestCompletionStatus
            The status of the test.
        exception : tuple
            ``exc_info`` tuple ``(type, value, traceback)``.
        message : str
            Optional message associated with the result (e.g. skip
            reason).

        """
        if self.buffer:
            stderr = self._stderr_buffer.getvalue()
            stdout = self._stdout_buffer.getvalue()
        else:
            stderr = stdout = None

        started_time = self._test_timing.get(self._testcase_to_key(test))
        if started_time is None and isinstance(test, ErrorHolder):
            started_time = datetime.utcnow()
        elif started_time is None:
            raise RuntimeError(
                'Missing test start! Please report this error as a bug in '
                'haas.')

        completion_time = datetime.utcnow()
        duration = TestDuration(started_time, completion_time)
        result = TestResult.from_test_case(
            test,
            status,
            duration=duration,
            exception=exception,
            message=message,
            stdout=stdout,
            stderr=stderr,
        )
        self.add_result(result)
        return result

    @failfast
    def addError(self, test, exception):
        """Register that a test ended in an error.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.
        exception : tuple
            ``exc_info`` tuple ``(type, value, traceback)``.

        """
        result = self._handle_result(test,
                                     TestCompletionStatus.error,
                                     exception=exception)
        self.errors.append(result)
        self._mirror_output = True

    @failfast
    def addFailure(self, test, exception):
        """Register that a test ended with a failure.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.
        exception : tuple
            ``exc_info`` tuple ``(type, value, traceback)``.

        """
        result = self._handle_result(test,
                                     TestCompletionStatus.failure,
                                     exception=exception)
        self.failures.append(result)
        self._mirror_output = True

    def addSuccess(self, test):
        """Register that a test ended in success.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.

        """
        self._handle_result(test, TestCompletionStatus.success)

    def addSkip(self, test, reason):
        """Register that a test that was skipped.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.
        reason : str
            The reason the test was skipped.

        """
        result = self._handle_result(test,
                                     TestCompletionStatus.skipped,
                                     message=reason)
        self.skipped.append(result)

    def addExpectedFailure(self, test, exception):
        """Register that a test that failed and was expected to fail.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.
        exception : tuple
            ``exc_info`` tuple ``(type, value, traceback)``.

        """
        result = self._handle_result(test,
                                     TestCompletionStatus.expected_failure,
                                     exception=exception)
        self.expectedFailures.append(result)

    @failfast
    def addUnexpectedSuccess(self, test):
        """Register a test that passed unexpectedly.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.

        """
        result = self._handle_result(test,
                                     TestCompletionStatus.unexpected_success)
        self.unexpectedSuccesses.append(result)

    def wasSuccessful(self):
        """Return ``True`` if the run was successful.

        """
        return self._successful

    def stop(self):
        """Set the ``shouldStop`` flag, used by the test cases to determine if
        they should terminate early.

        """
        self.shouldStop = True
Пример #48
0
def run(args):
    if len(args) == 0:
        s = StringIO()
        master_phil_scope.show(out=s)
        raise Usage("""\
dxtbx.export_bitmaps image_files [options]

% s
""" % s.getvalue())

    unhandled = []
    datablocks = DataBlockFactory.from_args(args,
                                            verbose=False,
                                            unhandled=unhandled)
    assert len(datablocks) > 0
    imagesets = datablocks[0].extract_imagesets()

    cmd_line = command_line.argument_interpreter(
        master_params=master_phil_scope)
    working_phil = cmd_line.process_and_fetch(args=unhandled)
    working_phil.show()
    params = working_phil.extract()

    brightness = params.brightness / 100
    vendortype = "made up"

    # check that binning is a power of 2
    binning = params.binning
    if not (binning > 0 and ((binning & (binning - 1)) == 0)):
        raise Sorry("binning must be a power of 2")

    output_dir = params.output_dir
    if output_dir is None:
        output_dir = "."
    elif not os.path.exists(output_dir):
        os.makedirs(output_dir)

    for imageset in imagesets:
        detector = imageset.get_detector()
        panel = detector[0]
        # XXX is this inclusive or exclusive?
        saturation = panel.get_trusted_range()[1]
        for i_image, image in enumerate(imageset):

            if len(detector) > 1:
                # FIXME This doesn't work properly, as flex_image.size2() is incorrect
                # also binning doesn't work
                assert binning == 1
                flex_image = _get_flex_image_multipanel(
                    brightness=brightness,
                    panels=detector,
                    raw_data=image,
                    beam=imageset.get_beam(),
                )
            else:
                flex_image = _get_flex_image(
                    brightness=brightness,
                    data=image,
                    binning=binning,
                    saturation=saturation,
                    vendortype=vendortype,
                )

            flex_image.setWindow(0, 0, 1)
            flex_image.adjust(
                color_scheme=colour_schemes.get(params.colour_scheme))

            # now export as a bitmap
            flex_image.prep_string()
            try:
                import PIL.Image as Image
            except ImportError:
                import Image
            # XXX is size//binning safe here?
            try:
                pil_img = Image.fromstring(
                    "RGB",
                    (flex_image.size2() // binning,
                     flex_image.size1() // binning),
                    flex_image.export_string,
                )
            except NotImplementedError:
                pil_img = Image.frombytes(
                    "RGB",
                    (flex_image.size2() // binning,
                     flex_image.size1() // binning),
                    flex_image.export_string,
                )

            basename = os.path.basename(
                os.path.splitext(imageset.paths()[i_image])[0])
            path = os.path.join(output_dir, basename + "." + params.format)

            print("Exporting %s" % path)
            tmp_stream = open(path, "wb")
            pil_img.save(tmp_stream, format=params.format)
            tmp_stream.close()
Пример #49
0
    def do_POST(self):
        T = Timer("do_POST")
        parsed = urlparse(self.path)
        qs = parse_qs(parsed.query)

        expect = self.headers.getheaders("Expect")
        if len(expect) >= 1:
            if True in [item.find("200") >= 0 for item in expect]:
                self.send_response(
                    200)  # untested; has no apparent affect on libcurl
                return

        # Get arguments by reading body of request.
        # We read this in chunks to avoid straining
        # socket.read(); around the 10 or 15Mb mark, some platforms
        # begin to have problems (bug #792570).
        max_chunk_size = 10 * 1024 * 1024
        size_remaining = int(self.headers["content-length"])
        L = []
        while size_remaining:
            chunk_size = min(size_remaining, max_chunk_size)
            L.append(self.rfile.read(chunk_size))
            size_remaining -= len(L[-1])
        data = ''.join(L)
        post_data = StringIO(data)

        # Parse the multipart/form-data
        contentTypeHeader = self.headers.getheaders('content-type').pop()

        # Extract the boundary parameter in the content-type header
        headerParameters = contentTypeHeader.split(";")
        boundary = headerParameters[1].split("=")
        boundary = boundary[1].strip()

        parts = cgi.parse_multipart(
            post_data, {
                "boundary":
                boundary,
                "content-disposition":
                self.headers.getheaders('content-disposition')
            })
        print("*****************************")
        for item in parts.keys():
            if len(parts[item][0]) < 1000:
                print(item, parts[item])
        print("*****************************")

        if parts["filename"][0].find("EXIT") >= 0:
            self.shutdown()
            return

        from spotfinder.diffraction.imagefiles import spotfinder_image_files as ImageFiles
        from spotfinder.diffraction.imagefiles import Spotspickle_argument_module
        response_params = copy.deepcopy(common_parameters_singleton).extract()

        Files = ImageFiles(Spotspickle_argument_module(parts["filename"][0]),
                           response_params)

        print("Final image object:")
        Files.images[0].show_header()
        print("beam_center_convention", Files.images[0].beam_center_convention)
        print("beam_center_reference_frame",
              Files.images[0].beam_center_reference_frame)

        logfile = StringIO()
        if response_params.distl.bins.verbose: sys.stdout = logfile

        from spotfinder.applications.wrappers import spotfinder_factory
        S = spotfinder_factory(None, Files, response_params)
        print()
        sys.stdout = sys.__stdout__

        frames = Files.frames()

        sys.stdout = logfile

        print("Image: %s" % parts["filename"][0])
        from spotfinder.applications.stats_distl import pretty_image_stats, notes
        for frame in frames:
            #pretty_image_stats(S,frame)
            #notes(S,frames[0])
            module_image_stats(S, frame)

        sys.stdout = sys.__stdout__
        log = logfile.getvalue()
        print(log)

        ctype = 'text/plain'
        self.send_response(200)
        self.send_header("Content-type", ctype)
        self.send_header("Content-length", len(log))
        self.end_headers()
        self.wfile.write(log)
        self.opt_logging()
Пример #50
0
class TestManage(TestManageBase):

    def setUp(self):
        super(TestManage, self).setUp()
        self.db = manage.DbCommands()
        self.output = StringIO()
        self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))

    def test_db_complex_password(self):
        engine = mock.Mock()
        # See comments in get_alembic_config; make an engine url with
        # password characters that will be escaped, to ensure the
        # resulting value makes it into alembic unaltered.
        engine.url = sqlalchemy_make_url(
            'mysql+pymysql://username:pw@%/!#$()@host:1234/dbname')
        alembic_config = alembic_migrations.get_alembic_config(engine)
        self.assertEqual(str(engine.url),
                         alembic_config.get_main_option('sqlalchemy.url'))

    @mock.patch('glance.db.sqlalchemy.api.get_engine')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.data_migrations.'
        'has_pending_migrations')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head')
    def test_db_check_result(self, mock_get_alembic_branch_head,
                             mock_get_current_alembic_heads,
                             mock_has_pending_migrations,
                             get_mock_engine):

        get_mock_engine.return_value = mock.Mock()
        engine = get_mock_engine.return_value
        engine.engine.name = 'postgresql'
        exit = self.assertRaises(SystemExit, self.db.check)
        self.assertIn('Rolling upgrades are currently supported only for '
                      'MySQL and Sqlite', exit.code)

        engine = get_mock_engine.return_value
        engine.engine.name = 'mysql'

        mock_get_current_alembic_heads.return_value = ['ocata_contract01']
        mock_get_alembic_branch_head.return_value = 'pike_expand01'
        exit = self.assertRaises(SystemExit, self.db.check)
        self.assertEqual(3, exit.code)
        self.assertIn('Your database is not up to date. '
                      'Your first step is to run `glance-manage db expand`.',
                      self.output.getvalue())

        mock_get_current_alembic_heads.return_value = ['pike_expand01']
        mock_get_alembic_branch_head.side_effect = ['pike_expand01', None]
        mock_has_pending_migrations.return_value = [mock.Mock()]
        exit = self.assertRaises(SystemExit, self.db.check)
        self.assertEqual(4, exit.code)
        self.assertIn('Your database is not up to date. '
                      'Your next step is to run `glance-manage db migrate`.',
                      self.output.getvalue())

        mock_get_current_alembic_heads.return_value = ['pike_expand01']
        mock_get_alembic_branch_head.side_effect = ['pike_expand01',
                                                    'pike_contract01']
        mock_has_pending_migrations.return_value = None
        exit = self.assertRaises(SystemExit, self.db.check)
        self.assertEqual(5, exit.code)
        self.assertIn('Your database is not up to date. '
                      'Your next step is to run `glance-manage db contract`.',
                      self.output.getvalue())

        mock_get_current_alembic_heads.return_value = ['pike_contract01']
        mock_get_alembic_branch_head.side_effect = ['pike_expand01',
                                                    'pike_contract01']
        mock_has_pending_migrations.return_value = None
        self.assertRaises(SystemExit, self.db.check)
        self.assertIn('Database is up to date. No upgrades needed.',
                      self.output.getvalue())

    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head')
    @mock.patch.object(manage.DbCommands, 'expand')
    @mock.patch.object(manage.DbCommands, 'migrate')
    @mock.patch.object(manage.DbCommands, 'contract')
    def test_sync(self, mock_contract, mock_migrate, mock_expand,
                  mock_get_alembic_branch_head,
                  mock_get_current_alembic_heads):
        mock_get_current_alembic_heads.return_value = ['ocata_contract01']
        mock_get_alembic_branch_head.return_value = ['pike_contract01']
        self.db.sync()
        mock_expand.assert_called_once_with(online_migration=False)
        mock_migrate.assert_called_once_with(online_migration=False)
        mock_contract.assert_called_once_with(online_migration=False)
        self.assertIn('Database is synced successfully.',
                      self.output.getvalue())

    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head')
    @mock.patch('glance.db.sqlalchemy.alembic_migrations.'
                'place_database_under_alembic_control')
    @mock.patch('alembic.command.upgrade')
    def test_sync_db_is_already_sync(self, mock_upgrade,
                                     mock_db_under_alembic_control,
                                     mock_get_alembic_branch_head,
                                     mock_get_current_alembic_heads):
        mock_get_current_alembic_heads.return_value = ['pike_contract01']
        mock_get_alembic_branch_head.return_value = ['pike_contract01']
        self.assertRaises(SystemExit, self.db.sync)

    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head')
    @mock.patch.object(manage.DbCommands, '_validate_engine')
    @mock.patch.object(manage.DbCommands, 'expand')
    def test_sync_failed_to_sync(self, mock_expand, mock_validate_engine,
                                 mock_get_alembic_branch_head,
                                 mock_get_current_alembic_heads):
        engine = mock_validate_engine.return_value
        engine.engine.name = 'mysql'
        mock_get_current_alembic_heads.return_value = ['ocata_contract01']
        mock_get_alembic_branch_head.side_effect = ['pike_contract01', '']
        mock_expand.side_effect = exception.GlanceException
        exit = self.assertRaises(SystemExit, self.db.sync)
        self.assertIn('Failed to sync database: ERROR:', exit.code)

    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head')
    @mock.patch.object(manage.DbCommands, '_validate_engine')
    @mock.patch.object(manage.DbCommands, '_sync')
    def test_expand(self, mock_sync, mock_validate_engine,
                    mock_get_alembic_branch_head,
                    mock_get_current_alembic_heads):
        engine = mock_validate_engine.return_value
        engine.engine.name = 'mysql'
        mock_get_current_alembic_heads.side_effect = ['ocata_contract01',
                                                      'pike_expand01']
        mock_get_alembic_branch_head.side_effect = ['pike_expand01',
                                                    'pike_contract01']
        self.db.expand()
        mock_sync.assert_called_once_with(version='pike_expand01')

    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head')
    @mock.patch.object(manage.DbCommands, '_validate_engine')
    def test_expand_if_not_expand_head(self, mock_validate_engine,
                                       mock_get_alembic_branch_head,
                                       mock_get_current_alembic_heads):
        engine = mock_validate_engine.return_value
        engine.engine.name = 'mysql'
        mock_get_current_alembic_heads.return_value = ['ocata_contract01']
        mock_get_alembic_branch_head.return_value = []
        exit = self.assertRaises(SystemExit, self.db.expand)
        self.assertIn('Database expansion failed. Couldn\'t find head '
                      'revision of expand branch.', exit.code)

    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head')
    @mock.patch.object(manage.DbCommands, '_validate_engine')
    def test_expand_db_is_already_sync(self, mock_validate_engine,
                                       mock_get_alembic_branch_head,
                                       mock_get_current_alembic_heads):
        engine = mock_validate_engine.return_value
        engine.engine.name = 'mysql'
        mock_get_current_alembic_heads.return_value = ['pike_contract01']
        mock_get_alembic_branch_head.side_effect = ['pike_expand01',
                                                    'pike_contract01']
        self.assertRaises(SystemExit, self.db.expand)
        self.assertIn('Database is up to date. No migrations needed.',
                      self.output.getvalue())

    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head')
    @mock.patch.object(manage.DbCommands, '_validate_engine')
    def test_expand_already_sync(self, mock_validate_engine,
                                 mock_get_alembic_branch_head,
                                 mock_get_current_alembic_heads):
        engine = mock_validate_engine.return_value
        engine.engine.name = 'mysql'
        mock_get_current_alembic_heads.return_value = ['pike_expand01']
        mock_get_alembic_branch_head.side_effect = ['pike_expand01',
                                                    'pike_contract01']
        self.db.expand()
        self.assertIn('Database expansion is up to date. '
                      'No expansion needed.', self.output.getvalue())

    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head')
    @mock.patch.object(manage.DbCommands, '_validate_engine')
    @mock.patch.object(manage.DbCommands, '_sync')
    def test_expand_failed(self, mock_sync, mock_validate_engine,
                           mock_get_alembic_branch_head,
                           mock_get_current_alembic_heads):
        engine = mock_validate_engine.return_value
        engine.engine.name = 'mysql'
        mock_get_current_alembic_heads.side_effect = ['ocata_contract01',
                                                      'test']
        mock_get_alembic_branch_head.side_effect = ['pike_expand01',
                                                    'pike_contract01']
        exit = self.assertRaises(SystemExit, self.db.expand)
        mock_sync.assert_called_once_with(version='pike_expand01')
        self.assertIn('Database expansion failed. Database expansion should '
                      'have brought the database version up to "pike_expand01"'
                      ' revision. But, current revisions are: test ',
                      exit.code)

    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.data_migrations.'
        'has_pending_migrations')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head')
    @mock.patch.object(manage.DbCommands, '_validate_engine')
    @mock.patch.object(manage.DbCommands, '_sync')
    def test_contract(self, mock_sync, mock_validate_engine,
                      mock_get_alembic_branch_head,
                      mock_get_current_alembic_heads,
                      mock_has_pending_migrations):
        engine = mock_validate_engine.return_value
        engine.engine.name = 'mysql'
        mock_get_current_alembic_heads.side_effect = ['pike_expand01',
                                                      'pike_contract01']
        mock_get_alembic_branch_head.side_effect = ['pike_contract01',
                                                    'pike_expand01']
        mock_has_pending_migrations.return_value = False
        self.db.contract()
        mock_sync.assert_called_once_with(version='pike_contract01')

    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head')
    @mock.patch.object(manage.DbCommands, '_validate_engine')
    def test_contract_if_not_contract_head(self, mock_validate_engine,
                                           mock_get_alembic_branch_head,
                                           mock_get_current_alembic_heads):
        engine = mock_validate_engine.return_value
        engine.engine.name = 'mysql'
        mock_get_current_alembic_heads.return_value = ['ocata_contract01']
        mock_get_alembic_branch_head.return_value = []
        exit = self.assertRaises(SystemExit, self.db.contract)
        self.assertIn('Database contraction failed. Couldn\'t find head '
                      'revision of contract branch.', exit.code)

    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head')
    @mock.patch.object(manage.DbCommands, '_validate_engine')
    def test_contract_db_is_already_sync(self, mock_validate_engine,
                                         mock_get_alembic_branch_head,
                                         mock_get_current_alembic_heads):
        engine = mock_validate_engine.return_value
        engine.engine.name = 'mysql'
        mock_get_current_alembic_heads.return_value = ['pike_contract01']
        mock_get_alembic_branch_head.side_effect = ['pike_contract01',
                                                    'pike_expand01']
        self.assertRaises(SystemExit, self.db.contract)
        self.assertIn('Database is up to date. No migrations needed.',
                      self.output.getvalue())

    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head')
    @mock.patch.object(manage.DbCommands, '_validate_engine')
    def test_contract_before_expand(self, mock_validate_engine,
                                    mock_get_alembic_branch_head,
                                    mock_get_current_alembic_heads):
        engine = mock_validate_engine.return_value
        engine.engine.name = 'mysql'
        mock_get_current_alembic_heads.return_value = ['ocata_contract01']
        mock_get_alembic_branch_head.side_effect = ['pike_expand01',
                                                    'pike_contract01']
        exit = self.assertRaises(SystemExit, self.db.contract)
        self.assertIn('Database contraction did not run. Database '
                      'contraction cannot be run before database expansion. '
                      'Run database expansion first using "glance-manage db '
                      'expand"', exit.code)

    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.data_migrations.'
        'has_pending_migrations')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head')
    @mock.patch.object(manage.DbCommands, '_validate_engine')
    def test_contract_before_migrate(self, mock_validate_engine,
                                     mock_get_alembic_branch_head,
                                     mock_get_curr_alembic_heads,
                                     mock_has_pending_migrations):
        engine = mock_validate_engine.return_value
        engine.engine.name = 'mysql'
        mock_get_curr_alembic_heads.side_effect = ['pike_expand01']
        mock_get_alembic_branch_head.side_effect = ['pike_contract01',
                                                    'pike_expand01']
        mock_has_pending_migrations.return_value = [mock.Mock()]
        exit = self.assertRaises(SystemExit, self.db.contract)
        self.assertIn('Database contraction did not run. Database '
                      'contraction cannot be run before data migration is '
                      'complete. Run data migration using "glance-manage db '
                      'migrate".', exit.code)

    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.data_migrations.'
        'has_pending_migrations')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head')
    @mock.patch.object(manage.DbCommands, '_validate_engine')
    def test_migrate(self, mock_validate_engine, mock_get_alembic_branch_head,
                     mock_get_current_alembic_heads,
                     mock_has_pending_migrations):
        engine = mock_validate_engine.return_value
        engine.engine.name = 'mysql'
        mock_get_current_alembic_heads.side_effect = ['pike_expand01',
                                                      'pike_contract01']
        mock_get_alembic_branch_head.side_effect = ['pike_contract01',
                                                    'pike_expand01']
        mock_has_pending_migrations.return_value = None
        self.db.migrate()
        self.assertIn('Database migration is up to date. '
                      'No migration needed.', self.output.getvalue())

    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head')
    @mock.patch.object(manage.DbCommands, '_validate_engine')
    def test_migrate_db_is_already_sync(self, mock_validate_engine,
                                        mock_get_alembic_branch_head,
                                        mock_get_current_alembic_heads):
        engine = mock_validate_engine.return_value
        engine.engine.name = 'mysql'
        mock_get_current_alembic_heads.return_value = ['pike_contract01']
        mock_get_alembic_branch_head.side_effect = ['pike_contract01',
                                                    'pike_expand01']
        self.assertRaises(SystemExit, self.db.migrate)
        self.assertIn('Database is up to date. No migrations needed.',
                      self.output.getvalue())

    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head')
    @mock.patch.object(manage.DbCommands, '_validate_engine')
    def test_migrate_already_sync(self, mock_validate_engine,
                                  mock_get_alembic_branch_head,
                                  mock_get_current_alembic_heads):
        engine = mock_validate_engine.return_value
        engine.engine.name = 'mysql'
        mock_get_current_alembic_heads.return_value = ['ocata_contract01']
        mock_get_alembic_branch_head.side_effect = ['pike_contract01',
                                                    'pike_expand01']
        exit = self.assertRaises(SystemExit, self.db.migrate)
        self.assertIn('Data migration did not run. Data migration cannot be '
                      'run before database expansion. Run database expansion '
                      'first using "glance-manage db expand"', exit.code)

    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.data_migrations.'
        'has_pending_migrations')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads')
    @mock.patch(
        'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head')
    @mock.patch.object(manage.DbCommands, '_validate_engine')
    def test_migrate_before_expand(self, mock_validate_engine,
                                   mock_get_alembic_branch_head,
                                   mock_get_current_alembic_heads,
                                   mock_has_pending_migrations):
        engine = mock_validate_engine.return_value
        engine.engine.name = 'mysql'
        mock_get_current_alembic_heads.return_value = ['pike_expand01']
        mock_get_alembic_branch_head.side_effect = ['pike_contract01',
                                                    'pike_expand01']
        mock_has_pending_migrations.return_value = None
        self.db.migrate()
        self.assertIn('Database migration is up to date. '
                      'No migration needed.', self.output.getvalue())

    @mock.patch.object(manage.DbCommands, 'version')
    def test_db_version(self, version):
        self._main_test_helper(['glance.cmd.manage', 'db', 'version'],
                               manage.DbCommands.version)

    @mock.patch.object(manage.DbCommands, 'check')
    def test_db_check(self, check):
        self._main_test_helper(['glance.cmd.manage', 'db', 'check'],
                               manage.DbCommands.check)

    @mock.patch.object(manage.DbCommands, 'sync')
    def test_db_sync(self, sync):
        self._main_test_helper(['glance.cmd.manage', 'db', 'sync'],
                               manage.DbCommands.sync)

    @mock.patch.object(manage.DbCommands, 'upgrade')
    def test_db_upgrade(self, upgrade):
        self._main_test_helper(['glance.cmd.manage', 'db', 'upgrade'],
                               manage.DbCommands.upgrade)

    @mock.patch.object(manage.DbCommands, 'version_control')
    def test_db_version_control(self, version_control):
        self._main_test_helper(['glance.cmd.manage', 'db', 'version_control'],
                               manage.DbCommands.version_control)

    @mock.patch.object(manage.DbCommands, 'sync')
    def test_db_sync_version(self, sync):
        self._main_test_helper(['glance.cmd.manage', 'db', 'sync', 'liberty'],
                               manage.DbCommands.sync, 'liberty')

    @mock.patch.object(manage.DbCommands, 'upgrade')
    def test_db_upgrade_version(self, upgrade):
        self._main_test_helper(['glance.cmd.manage', 'db',
                                'upgrade', 'liberty'],
                               manage.DbCommands.upgrade, 'liberty')

    @mock.patch.object(manage.DbCommands, 'expand')
    def test_db_expand(self, expand):
        self._main_test_helper(['glance.cmd.manage', 'db', 'expand'],
                               manage.DbCommands.expand)

    @mock.patch.object(manage.DbCommands, 'migrate')
    def test_db_migrate(self, migrate):
        self._main_test_helper(['glance.cmd.manage', 'db', 'migrate'],
                               manage.DbCommands.migrate)

    @mock.patch.object(manage.DbCommands, 'contract')
    def test_db_contract(self, contract):
        self._main_test_helper(['glance.cmd.manage', 'db', 'contract'],
                               manage.DbCommands.contract)

    def test_db_metadefs_unload(self):
        db_metadata.db_unload_metadefs = mock.Mock()
        self._main_test_helper(['glance.cmd.manage', 'db', 'unload_metadefs'],
                               db_metadata.db_unload_metadefs,
                               db_api.get_engine())

    def test_db_metadefs_load(self):
        db_metadata.db_load_metadefs = mock.Mock()
        self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs'],
                               db_metadata.db_load_metadefs,
                               db_api.get_engine(),
                               None, False, False, False)

    def test_db_metadefs_load_with_specified_path(self):
        db_metadata.db_load_metadefs = mock.Mock()
        self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs',
                                '--path', '/mock/'],
                               db_metadata.db_load_metadefs,
                               db_api.get_engine(),
                               '/mock/', False, False, False)

    def test_db_metadefs_load_prefer_new_with_path(self):
        db_metadata.db_load_metadefs = mock.Mock()
        self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs',
                                '--path', '/mock/', '--merge', '--prefer_new'],
                               db_metadata.db_load_metadefs,
                               db_api.get_engine(),
                               '/mock/', True, True, False)

    def test_db_metadefs_load_prefer_new(self):
        db_metadata.db_load_metadefs = mock.Mock()
        self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs',
                                '--merge', '--prefer_new'],
                               db_metadata.db_load_metadefs,
                               db_api.get_engine(),
                               None, True, True, False)

    def test_db_metadefs_load_overwrite_existing(self):
        db_metadata.db_load_metadefs = mock.Mock()
        self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs',
                                '--merge', '--overwrite'],
                               db_metadata.db_load_metadefs,
                               db_api.get_engine(),
                               None, True, False, True)

    def test_db_metadefs_load_prefer_new_and_overwrite_existing(self):
        db_metadata.db_load_metadefs = mock.Mock()
        self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs',
                                '--merge', '--prefer_new', '--overwrite'],
                               db_metadata.db_load_metadefs,
                               db_api.get_engine(),
                               None, True, True, True)

    def test_db_metadefs_load_from_path_overwrite_existing(self):
        db_metadata.db_load_metadefs = mock.Mock()
        self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs',
                                '--path', '/mock/', '--merge', '--overwrite'],
                               db_metadata.db_load_metadefs,
                               db_api.get_engine(),
                               '/mock/', True, False, True)

    def test_db_metadefs_export(self):
        db_metadata.db_export_metadefs = mock.Mock()
        self._main_test_helper(['glance.cmd.manage', 'db', 'export_metadefs'],
                               db_metadata.db_export_metadefs,
                               db_api.get_engine(),
                               None)

    def test_db_metadefs_export_with_specified_path(self):
        db_metadata.db_export_metadefs = mock.Mock()
        self._main_test_helper(['glance.cmd.manage', 'db', 'export_metadefs',
                                '--path', '/mock/'],
                               db_metadata.db_export_metadefs,
                               db_api.get_engine(),
                               '/mock/')
Пример #51
0
class TestUpgradeCheckBasic(test.NoDBTestCase):
    """Tests for the nova-status upgrade check command.

    The tests in this class should just test basic logic and use mock. Real
    checks which require more elaborate fixtures or the database should be done
    in separate test classes as they are more or less specific to a particular
    release and may be removed in a later release after they are no longer
    needed.
    """
    def setUp(self):
        super(TestUpgradeCheckBasic, self).setUp()
        self.output = StringIO()
        self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
        self.cmd = status.UpgradeCommands()

    def test_check_success(self):
        fake_checks = (('good',
                        mock.Mock(return_value=status.UpgradeCheckResult(
                            status.UpgradeCheckCode.SUCCESS))), )
        with mock.patch.object(self.cmd, '_upgrade_checks', fake_checks):
            self.assertEqual(status.UpgradeCheckCode.SUCCESS, self.cmd.check())
        expected = """\
+-----------------------+
| Upgrade Check Results |
+-----------------------+
| Check: good           |
| Result: Success       |
| Details: None         |
+-----------------------+
"""
        self.assertEqual(expected, self.output.getvalue())

    def test_check_warning(self):
        fake_checks = (
            ('good',
             mock.Mock(return_value=status.UpgradeCheckResult(
                 status.UpgradeCheckCode.SUCCESS))),
            ('warn',
             mock.Mock(return_value=status.UpgradeCheckResult(
                 status.UpgradeCheckCode.WARNING, 'there might be a problem'))
             ),
        )
        with mock.patch.object(self.cmd, '_upgrade_checks', fake_checks):
            self.assertEqual(status.UpgradeCheckCode.WARNING, self.cmd.check())
        expected = """\
+-----------------------------------+
| Upgrade Check Results             |
+-----------------------------------+
| Check: good                       |
| Result: Success                   |
| Details: None                     |
+-----------------------------------+
| Check: warn                       |
| Result: Warning                   |
| Details: there might be a problem |
+-----------------------------------+
"""
        self.assertEqual(expected, self.output.getvalue())

    def test_check_failure(self):
        # make the error details over 60 characters so we test the wrapping
        error_details = 'go back to bed' + '!' * 60
        fake_checks = (
            ('good',
             mock.Mock(return_value=status.UpgradeCheckResult(
                 status.UpgradeCheckCode.SUCCESS))),
            ('warn',
             mock.Mock(return_value=status.UpgradeCheckResult(
                 status.UpgradeCheckCode.WARNING, 'there might be a problem'))
             ),
            ('fail',
             mock.Mock(return_value=status.UpgradeCheckResult(
                 status.UpgradeCheckCode.FAILURE, error_details))),
        )
        with mock.patch.object(self.cmd, '_upgrade_checks', fake_checks):
            self.assertEqual(status.UpgradeCheckCode.FAILURE, self.cmd.check())
        expected = """\
+-----------------------------------------------------------------------+
| Upgrade Check Results                                                 |
+-----------------------------------------------------------------------+
| Check: good                                                           |
| Result: Success                                                       |
| Details: None                                                         |
+-----------------------------------------------------------------------+
| Check: warn                                                           |
| Result: Warning                                                       |
| Details: there might be a problem                                     |
+-----------------------------------------------------------------------+
| Check: fail                                                           |
| Result: Failure                                                       |
| Details: go back to bed!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! |
|   !!!!!!!!!!!!!!                                                      |
+-----------------------------------------------------------------------+
"""
        self.assertEqual(expected, self.output.getvalue())
    def do_POST(self):
        T = Timer("do_POST")
        parsed = urlparse(self.path)
        qs = parse_qs(parsed.query)

        expect = self.headers.getheaders("Expect")
        if len(expect) >= 1:
            if True in [item.find("100") >= 0 for item in expect]:
                self.send_response(
                    100)  # untested; has no apparent affect on libcurl

        # Get arguments by reading body of request.
        # We read this in chunks to avoid straining
        # socket.read(); around the 10 or 15Mb mark, some platforms
        # begin to have problems (bug #792570).
        max_chunk_size = 10 * 1024 * 1024
        size_remaining = int(self.headers["content-length"])
        L = []
        while size_remaining:
            chunk_size = min(size_remaining, max_chunk_size)
            L.append(self.rfile.read(chunk_size))
            size_remaining -= len(L[-1])
        data = ''.join(L)
        post_data = StringIO(data)

        # Parse the multipart/form-data
        contentTypeHeader = self.headers.getheaders('content-type').pop()

        # Extract the boundary parameter in the content-type header
        headerParameters = contentTypeHeader.split(";")
        boundary = headerParameters[1].split("=")
        boundary = boundary[1].strip()

        parts = cgi.parse_multipart(
            post_data, {
                "boundary":
                boundary,
                "content-disposition":
                self.headers.getheaders('content-disposition')
            })
        print "*****************************"
        for item in parts.keys():
            if len(parts[item][0]) < 1000:
                print item, parts[item]
        print "*****************************"

        from iotbx.detectors.image_from_http_request import module_or_slice_from_http_request
        imgobj = module_or_slice_from_http_request(parts)
        imgobj.read()
        print "Final image object:"
        imgobj.show_header()

        from spotfinder.diffraction.imagefiles import image_files, file_names
        from spotfinder.diffraction.imagefiles import Spotspickle_argument_module

        from spotfinder.applications.overall_procedure import spotfinder_no_pickle

        class server_imagefiles(image_files):
            def __init__(self):
                pass

        Files = server_imagefiles()
        Files.filenames = file_names(
            Spotspickle_argument_module(imgobj.filename))
        Files.images = [imgobj]

        S = spotfinder_no_pickle(Files, s3_passthru="-s3 4", spot_convention=0)

        frames = Files.frames()

        logfile = StringIO()
        sys.stdout = logfile

        from spotfinder.applications.stats_distl import pretty_image_stats, notes
        for frame in frames:
            #pretty_image_stats(S,frame)
            #notes(S,frames[0])
            module_image_stats(S, frame)

        sys.stdout = sys.__stdout__
        log = logfile.getvalue()
        print log

        ctype = 'text/plain'
        self.send_response(200)
        self.send_header("Content-type", ctype)
        self.send_header("Content-length", len(log))
        self.end_headers()
        self.wfile.write(log)
        self.opt_logging()
Пример #53
0
 def __repr__(self):
     f = StringIO()
     self.write_card(f)
     return f.getvalue().rstrip()
Пример #54
0
    def get_mass(self, nid1, nid2, xyz1, xyz2, eid, pid, mid, A, J, c, nsm, E,
                 G, nu, rho, L):
        """tests a CROD and a CONROD"""
        card_count = {
            'CONROD': 1,
            'CTUBE': 1,
            'PTUBE': 1,
            'CROD': 1,
            'PROD': 1,
            'GRID': 2,
            'MAT1': 1,
        }
        model = BDF(debug=debug)
        model.allocate(card_count)
        lines = [
            'conrod,%i, %i, %i, %i, %f, %f, %f, %f' %
            (eid, nid1, nid2, mid, A, J, c, nsm)
        ]
        model.add_card(lines, 'conrod', is_list=False)

        lines = ['crod,%i, %i, %i, %i' % (eid + 1, pid, nid1, nid2)]
        model.add_card(lines, 'crod', is_list=False)

        lines = ['ctube,%i, %i, %i, %i' % (eid + 2, pid + 1, nid1, nid2)]
        model.add_card(lines, 'ctube', is_list=False)

        lines = ['prod,%i, %i, %f, %f, %f, %f' % (pid, mid, A, J, c, nsm)]
        model.add_card(lines, 'prod', is_list=False)

        OD1 = sqrt(4 * A / pi)
        t = 0.
        OD2 = OD1
        lines = [
            'ptube,%i, %i, %f, %f, %f, %f' % (pid + 1, mid, OD1, t, nsm, OD2)
        ]
        model.add_card(lines, 'ptube', is_list=False)

        lines = ['mat1,%i, %.2e, %.2e, %f, %f' % (mid, E, G, nu, rho)]
        model.add_card(lines, 'mat1', is_list=False)

        lines = [
            'grid,%i, %i, %f, %f, %f' % (nid1, 0, xyz1[0], xyz1[1], xyz1[2])
        ]
        model.add_card(lines, 'grid', is_list=False)

        lines = [
            'grid,%i, %i, %f, %f, %f' % (nid2, 0, xyz2[0], xyz2[1], xyz2[2])
        ]
        model.add_card(lines, 'grid', is_list=False)

        model.build()
        mass = L * (rho * A + nsm)

        f = StringIO()
        model.write_bdf(out_filename=f,
                        interspersed=True,
                        size=8,
                        precision='single',
                        enddata=None)
        print(f.getvalue())
        #positions = model.get_positions()
        grid_cid0 = None

        # conrod
        conrod = model.conrod.slice_by_element_id(eid)
        self.assertEquals(conrod.get_element_id_by_element_index(), eid)
        #self.assertEquals(conrod.get_property_id_by_element_id(), None)
        self.assertEquals(conrod.get_material_id_by_element_id(eid), mid)
        self.assertEquals(
            conrod.get_length_by_element_index(i=None, grid_cid0=grid_cid0), L)
        #self.assertEquals(conrod.Nsm(), nsm)

        rhoi = conrod.get_density_by_element_id(eid)
        Ai = conrod.get_area_by_element_id(eid)
        Li = conrod.get_length_by_element_id(eid, grid_cid0=grid_cid0)
        nsmi = conrod.get_non_structural_mass_by_element_id(eid)
        massa = conrod.get_mass_by_element_index()
        mass_msg_conrod = 'mass = L * (rho * A + nsm)\n'
        mass_msg_conrod += 'L=%s expected=%s\n' % (Li, L)
        mass_msg_conrod += 'rho=%s expected=%s\n' % (rhoi, rho)
        mass_msg_conrod += 'A=%s expected=%s\n' % (Ai, A)
        mass_msg_conrod += 'nsm=%s expected=%s\n' % (nsmi, nsm)
        mass_msg_conrod += 'mass=%s actual=%s expected=%s\n' % (
            Li * (rhoi * Ai + nsmi), massa, mass)
        #mass_msg_conrod += 'mass=%s expected=%s\n' % (Li * (rhoi*Ai + nsmi), mass)

        self.assertEquals(massa, mass, mass_msg_conrod)
        #self.assertEquals(conrod.E(), E)
        #self.assertEquals(conrod.G(), G)
        #self.assertEquals(conrod.area(), A)
        #self.assertEquals(conrod.J(), J)
        #self.assertEquals(conrod.C(), c)
        #self.assertEquals(conrod.Rho(), rho)

        # crod
        crod_eid = eid + 1
        crod = model.crod.slice_by_element_id([crod_eid])
        self.assertEquals(crod.get_element_id_by_element_index(), crod_eid)
        self.assertEquals(crod.get_property_id_by_element_id(crod_eid), pid)
        self.assertEquals(crod.get_material_id_by_element_id(crod_eid), mid)
        rhoi = crod.get_density_by_element_id(crod_eid)
        Ai = crod.get_area_by_element_id(crod_eid)
        Li = crod.get_length_by_element_id(crod_eid, grid_cid0=grid_cid0)
        nsmi = crod.get_non_structural_mass_by_element_id(crod_eid)
        self.assertEquals(Li, L)
        #self.assertEquals(crod.Nsm(), nsm)

        massa = crod.get_mass_by_element_id(crod_eid)
        mass_msg_crod = 'mass = L * (rho * A + nsm)\n'
        mass_msg_crod += 'L=%s expected=%s\n' % (Li, L)
        mass_msg_crod += 'rho=%s expected=%s\n' % (rhoi, rho)
        mass_msg_crod += 'A=%s expected=%s\n' % (Ai, A)
        mass_msg_crod += 'nsm=%s expected=%s\n' % (nsmi, nsm)
        mass_msg_crod += 'mass=%s actual=%s expected=%s\n' % (
            Li * (rhoi * Ai + nsmi), massa, mass)
        self.assertEquals(massa, mass, mass_msg_crod)
        #self.assertEquals(crod.E(), E)
        #self.assertEquals(crod.G(), G)
        #self.assertEquals(crod.area(), A)
        #self.assertEquals(crod.J(), J)
        #self.assertEquals(crod.C(), c)
        #self.assertEquals(crod.Rho(), rho)
        #self.assertEquals(crod.Nu(), nu)

        # prod
        prod = model.prod.slice_by_property_id([pid])
        self.assertEquals(prod.property_id[0], pid)
        self.assertEquals(prod.get_material_id_by_property_id(pid), mid)
        self.assertEquals(prod.get_non_structural_mass_by_property_id(pid),
                          nsm)
        self.assertEquals(prod.get_E_by_property_id(pid), E)
        self.assertEquals(prod.get_G_by_property_id(pid), G)
        self.assertEquals(prod.get_area_by_property_id(pid), A)
        self.assertEquals(prod.get_J_by_property_id(pid), J)
        self.assertEquals(prod.get_c_by_property_id(pid), c)
        self.assertEquals(prod.get_density_by_property_id(pid), rho)

        # ctube
        if 1:
            ctube_eid = eid + 2
            ptube_pid = pid + 1
            assert ctube_eid == 12, ctube_eid
            assert ptube_pid == 68, ptube_pid
            ctube = model.ctube.slice_by_element_id(ctube_eid)
            self.assertEquals(ctube.get_element_id_by_element_index(),
                              ctube_eid)
            self.assertEquals(ctube.get_property_id_by_element_id(ctube_eid),
                              ptube_pid)
            self.assertEquals(ctube.get_material_id_by_element_id(ctube_eid),
                              mid)
            self.assertEquals(
                ctube.get_length_by_element_id(ctube_eid, grid_cid0), L)
            self.assertEquals(
                ctube.get_non_structural_mass_by_element_id(ctube_eid), nsm)
            self.assertAlmostEquals(ctube.get_mass_by_element_id(ctube_eid),
                                    mass, 5)
            self.assertEquals(ctube.get_E_by_element_id(ctube_eid), E)
            self.assertEquals(ctube.get_G_by_element_id(ctube_eid), G)
            self.assertAlmostEquals(ctube.get_area_by_element_id(ctube_eid), A,
                                    5)
            ctube.get_J_by_element_id(ctube_eid)
            self.assertEquals(ctube.get_density_by_element_id(), rho)

            # ptube
            ptube = model.ptube.slice_by_property_id(pid + 1)
            self.assertEquals(ptube.get_property_id_by_property_index(),
                              pid + 1)
            self.assertEquals(ptube.get_material_id_by_property_id(), mid)
            self.assertEquals(ptube.get_non_structural_mass_by_property_id(),
                              nsm)
            self.assertEquals(ptube.get_E_by_property_id(), E)
            self.assertEquals(ptube.get_G_by_property_id(), G)
            self.assertAlmostEquals(ptube.get_area_by_property_id(), A, 5)
            ptube.get_J_by_property_id()
            self.assertEquals(ptube.get_density_by_property_id(), rho)
Пример #55
0
class TestPolicyCheck(test.NoDBTestCase):

    def setUp(self):
        super(TestPolicyCheck, self).setUp()
        self.output = StringIO()
        self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
        self.policy = self.useFixture(policy_fixture.RealPolicyFixture())
        self.cmd = policy.PolicyCommands()

    @mock.patch.object(policy.PolicyCommands, '_filter_rules')
    @mock.patch.object(policy.PolicyCommands, '_get_target')
    @mock.patch.object(policy.PolicyCommands, '_get_context')
    def test_check(self, mock_get_context, mock_get_target,
                   mock_filter_rules):
        fake_rules = ['fake:rule', 'faux:roule']
        mock_filter_rules.return_value = fake_rules

        self.cmd.check(target=mock.sentinel.target)

        mock_get_context.assert_called_once_with()
        mock_get_target.assert_called_once_with(mock_get_context.return_value,
                                                mock.sentinel.target)
        mock_filter_rules.assert_called_once_with(
            mock_get_context.return_value, '', mock_get_target.return_value)
        self.assertEqual('\n'.join(fake_rules) + '\n', self.output.getvalue())

    @mock.patch.object(nova_context, 'RequestContext')
    @mock.patch.object(policy, 'CONF')
    def test_get_context(self, mock_CONF, mock_RequestContext):
        context = self.cmd._get_context()

        self.assertEqual(mock_RequestContext.return_value, context)
        mock_RequestContext.assert_called_once_with(
            roles=mock_CONF.os_roles,
            user_id=mock_CONF.os_user_id,
            project_id=mock_CONF.os_tenant_id)

    def test_get_target_none(self):
        target = self.cmd._get_target(mock.sentinel.context, None)
        self.assertIsNone(target)

    def test_get_target_invalid_attribute(self):
        self.assertRaises(exception.InvalidAttribute, self.cmd._get_target,
                          mock.sentinel.context, ['nope=nada'])

    def test_get_target(self):
        expected_target = {
            'project_id': 'fake-proj',
            'user_id': 'fake-user',
            'quota_class': 'fake-quota-class',
            'availability_zone': 'fake-az',
        }
        given_target = ['='.join([key, val])
                        for key, val in expected_target.items()]

        actual_target = self.cmd._get_target(mock.sentinel.context,
                                             given_target)
        self.assertDictEqual(expected_target, actual_target)

    @mock.patch.object(nova_context, 'get_admin_context')
    @mock.patch.object(db, 'instance_get_by_uuid')
    def test_get_target_instance(self, mock_instance_get,
                                 mock_get_admin_context):
        admin_context = nova_context.RequestContext(is_admin=True)
        mock_get_admin_context.return_value = admin_context
        given_target = ['instance_id=fake_id']
        mock_instance_get.return_value = fake_instance.fake_db_instance()
        target = self.cmd._get_target(mock.sentinel.context,
                                      given_target)
        self.assertEqual(target,
            {'user_id': 'fake-user', 'project_id': 'fake-project'})
        mock_instance_get.assert_called_once_with(admin_context,
                                                  'fake_id')

    def _check_filter_rules(self, context=None, target=None,
                            expected_rules=None):
        context = context or nova_context.get_admin_context()
        if expected_rules is None:
            expected_rules = [
                r.name for r in ia_policies.list_rules()]

        passing_rules = self.cmd._filter_rules(
                context, 'os-instance-actions:list', target)
        passing_rules += self.cmd._filter_rules(
                context, 'os-instance-actions:show', target)
        passing_rules += self.cmd._filter_rules(
                context, 'os-instance-actions:events', target)
        passing_rules += self.cmd._filter_rules(
                context, 'os-instance-actions:events:details', target)
        self.assertEqual(set(expected_rules), set(passing_rules))

    def test_filter_rules_non_admin(self):
        context = nova_context.RequestContext()
        rule_conditions = [base_policies.PROJECT_READER_OR_SYSTEM_READER]
        expected_rules = [r.name for r in ia_policies.list_rules() if
                          r.check_str in rule_conditions]
        self._check_filter_rules(context, expected_rules=expected_rules)

    def test_filter_rules_admin(self):
        self._check_filter_rules()

    def test_filter_rules_instance_non_admin(self):
        db_context = nova_context.RequestContext(user_id='fake-user',
                                                 project_id='fake-project')
        instance = fake_instance.fake_instance_obj(db_context)
        context = nova_context.RequestContext()
        expected_rules = [r.name for r in ia_policies.list_rules() if
                          r.check_str == base_policies.RULE_ANY]
        self._check_filter_rules(context, instance, expected_rules)

    def test_filter_rules_instance_admin(self):
        db_context = nova_context.RequestContext(user_id='fake-user',
                                                 project_id='fake-project')
        instance = fake_instance.fake_instance_obj(db_context)
        self._check_filter_rules(target=instance)

    def test_filter_rules_instance_owner(self):
        db_context = nova_context.RequestContext(user_id='fake-user',
                                                 project_id='fake-project')
        instance = fake_instance.fake_instance_obj(db_context)
        rule_conditions = [base_policies.PROJECT_READER_OR_SYSTEM_READER]
        expected_rules = [r.name for r in ia_policies.list_rules() if
                          r.check_str in rule_conditions]
        self._check_filter_rules(db_context, instance, expected_rules)

    @mock.patch.object(policy.config, 'parse_args')
    @mock.patch.object(policy, 'CONF')
    def _check_main(self, mock_CONF, mock_parse_args,
                    category_name='check', expected_return_value=0):
        mock_CONF.category.name = category_name
        return_value = policy.main()

        self.assertEqual(expected_return_value, return_value)
        mock_CONF.register_cli_opts.assert_called_once_with(
            policy.cli_opts)
        mock_CONF.register_cli_opt.assert_called_once_with(
            policy.category_opt)

    @mock.patch.object(policy.version, 'version_string_with_package',
                       return_value="x.x.x")
    def test_main_version(self, mock_version_string):
        self._check_main(category_name='version')
        self.assertEqual("x.x.x\n", self.output.getvalue())

    @mock.patch.object(policy.cmd_common, 'print_bash_completion')
    def test_main_bash_completion(self, mock_print_bash):
        self._check_main(category_name='bash-completion')
        mock_print_bash.assert_called_once_with(policy.CATEGORIES)

    @mock.patch.object(policy.cmd_common, 'get_action_fn')
    def test_main(self, mock_get_action_fn):
        mock_fn = mock.Mock()
        mock_fn_args = [mock.sentinel.arg]
        mock_fn_kwargs = {'key': mock.sentinel.value}
        mock_get_action_fn.return_value = (mock_fn, mock_fn_args,
                                           mock_fn_kwargs)

        self._check_main(expected_return_value=mock_fn.return_value)
        mock_fn.assert_called_once_with(mock.sentinel.arg,
                                        key=mock.sentinel.value)

    @mock.patch.object(policy.cmd_common, 'get_action_fn')
    def test_main_error(self, mock_get_action_fn):
        mock_fn = mock.Mock(side_effect=Exception)
        mock_get_action_fn.return_value = (mock_fn, [], {})

        self._check_main(expected_return_value=1)
        self.assertIn("error: ", self.output.getvalue())
Пример #56
0
 def __repr__(self):
     f = StringIO()
     self.write_bdf(f)
     return f.getvalue()
Пример #57
0
class ManagerTest(parameterized.TestCase, absltest.TestCase):
    def setUp(self):
        super(ManagerTest, self).setUp()
        # Save the real modules for clean up.
        self.real_open = builtins.open
        # Create a fake file system and stub out builtin modules.
        self.fs = fake_filesystem.FakeFilesystem()
        self.os = fake_filesystem.FakeOsModule(self.fs)
        self.open = fake_filesystem.FakeFileOpen(self.fs)
        self.stdout = StringIO()
        self.stubs = mox3_stubout.StubOutForTesting()
        self.stubs.SmartSet(builtins, 'open', self.open)
        self.stubs.SmartSet(common, 'os', self.os)
        self.stubs.SmartSet(sys, 'stdout', self.stdout)

        # Setup Testdata.
        self._testdata_path = '/testdata'
        self._valid_config_path = self._testdata_path + '/valid_config.yaml'
        self._blank_config_path = self._testdata_path + '/blank_config.yaml'

        self.fs.CreateFile(self._valid_config_path, contents=_VALID_CONFIG)
        self.fs.CreateFile(self._blank_config_path, contents=_BLANK_CONFIG)

        # Load the default config.
        self._valid_default_config = common.ProjectConfig.from_yaml(
            common.DEFAULT, self._valid_config_path)

        # Create test constants.
        self._constants = {
            'test':
            app_constants.Constant(
                'test',
                'message',
                '',
                parser=utils.StringParser(allow_empty_string=False),
            ),
            'other':
            app_constants.Constant('other', 'other message', 'value'),
        }

        # Mock out the authentication credentials.
        self.auth_patcher = mock.patch.object(auth, 'CloudCredentials')
        self.mock_creds = self.auth_patcher.start()
        self.mock_creds.return_value.get_credentials.return_value = (
            credentials.AnonymousCredentials())

    def tearDown(self):
        super(ManagerTest, self).tearDown()
        self.stubs.UnsetAll()
        builtins.open = self.real_open

        self.auth_patcher.stop()

    def test_get_oauth_scopes(self):
        self.assertEqual(gng_impl._get_oauth_scopes([sys.modules[__name__]]),
                         ['one', 'two'])

    def test_manager_init(self):
        test_config = common.ProjectConfig('KEY', 'PROJECT', 'ID', 'SECRET',
                                           'BUCKET', self._blank_config_path)
        test_constants = app_constants.get_default_constants()
        test_manager = gng_impl._Manager(test_config, test_constants, None)
        self.assertEqual(str(test_manager), "_Manager for project 'PROJECT'")
        self.assertEqual(repr(test_manager),
                         '<_Manager.new(/testdata/blank_config.yaml, KEY)>')

    @parameterized.named_parameters(
        (
            'With Project Key From Prompts',
            'dev',
            '/blank_config.yaml',
            "_Manager for project 'dev-project'",
            ('dev-project', 'dev.apps.googleusercontent.com', 'dev-secret'),
        ),
        (
            'Without Project Key From YAML',
            None,
            '/valid_config.yaml',
            "_Manager for project 'default-project'",
            (common.DEFAULT, ),
        ),
    )
    def test_new(self, test_key, config_filename, expected_str, test_input):
        with mock.patch.object(utils, 'prompt_string', side_effect=test_input):
            test_manager = gng_impl._Manager.new(
                self._testdata_path + config_filename, test_key)
        self.assertEqual(str(test_manager), expected_str)

    def test_new__auth_error(self):
        side_effect = [
            'default-project',
            'default.apps.googleusercontent.com',
            'default-secret',
        ]
        mock_creds = mock.Mock()
        mock_creds.get_credentials.return_value = credentials.AnonymousCredentials(
        )
        with mock.patch.object(utils, 'prompt_string',
                               side_effect=side_effect) as mock_prompt_string:
            with mock.patch.object(
                    auth,
                    'CloudCredentials',
                    side_effect=[auth.InvalidCredentials, mock_creds]):
                gng_impl._Manager.new(self._valid_config_path, common.DEFAULT)
            self.assertEqual(3, mock_prompt_string.call_count)

    @mock.patch.object(utils, 'prompt_string', return_value='dev')
    def test_run(self, mock_prompt_string):
        test_manager = gng_impl._Manager.new(self._valid_config_path,
                                             common.DEFAULT)
        side_effect = [
            gng_impl._CHANGE_PROJECT,
            gng_impl._CONFIGURE,
            gng_impl._QUIT,
            gng_impl._QUIT,
        ]
        with mock.patch.object(utils, 'prompt_enum', side_effect=side_effect):
            self.assertEqual(test_manager.run(), 0)
        self.assertEqual(mock_prompt_string.call_count, 1)

    @mock.patch.object(utils, 'clear_screen')
    def test_main_menu_output(self, mock_clear):
        test_manager = gng_impl._Manager.new(self._valid_config_path,
                                             common.DEFAULT)
        with mock.patch.object(utils, 'input', return_value=gng_impl._QUIT):
            test_manager.run()
        self.assertEqual(self.stdout.getvalue(), _MAIN_MENU_GOLDEN)
        self.assertEqual(mock_clear.call_count, 1)

    def test_change_project(self):
        with mock.patch.object(utils, 'prompt_string',
                               return_value='dev') as mock_prompt_string:
            test_manager = gng_impl._Manager.new(self._valid_config_path,
                                                 common.DEFAULT)
            other_manager = test_manager._change_project()
        self.assertEqual(other_manager._config.project, 'dev-project')
        self.assertEqual(other_manager._config.client_secret, 'dev-secret')
        mock_prompt_string.assert_called_once_with(_CHANGE_PROJECT_GOLDEN)

    def test_configure(self):
        test_client = _TestClientAPI()
        test_manager = gng_impl._Manager(
            self._valid_default_config,
            self._constants,
            None,
            storage_api=test_client,
        )
        with mock.patch.object(utils,
                               'input',
                               side_effect=[
                                   'test', 'new_value', gng_impl._QUIT
                               ]):
            test_manager._configure()
        self.assertEqual(
            test_client.get_blob(test_manager._config.constants_storage_path,
                                 test_manager._config.bucket), {
                                     'test': 'new_value',
                                     'other': 'value'
                                 })

    def test_save_constants(self):
        test_client = _TestClientAPI()
        test_manager = gng_impl._Manager(
            self._valid_default_config,
            self._constants,
            None,
            storage_api=test_client,
        )
        test_manager._save_constants()
        self.assertEqual(
            test_client.get_blob(test_manager._config.constants_storage_path,
                                 test_manager._config.bucket), {
                                     'test': '',
                                     'other': 'value'
                                 })

    @mock.patch.object(utils, 'prompt_enum', return_value=gng_impl._QUIT)
    def test_main(self, mock_prompt_enum):
        with flagsaver.flagsaver(project=common.DEFAULT,
                                 config_file_path=self._valid_config_path):
            with self.assertRaises(SystemExit) as exit_err:
                gng_impl.main('unused')
                self.assertEqual(exit_err.exception.code, 0)
        self.assertEqual(mock_prompt_enum.call_count, 1)

    @parameterized.parameters('new', 'run')
    def test_main__errors(self, method):
        with mock.patch.object(gng_impl._Manager,
                               method,
                               side_effect=KeyboardInterrupt()):
            with flagsaver.flagsaver(project=common.DEFAULT,
                                     config_file_path=self._valid_config_path):
                with self.assertRaises(SystemExit) as exit_err:
                    gng_impl.main('unused')
                    self.assertEqual(exit_err.exception.code, 1)
Пример #58
0
    def test_rload(self):
        """tests DLOAD, RLOAD1, RLOAD2, TABLED2 cards"""
        model = BDF(debug=False)
        #model.case_control_deck = CaseControlDeck(['DLOAD=2', 'BEGIN BULK'])
        sid = 2
        excite_id = 20
        delay = 0
        tid = 42
        rload1 = model.add_rload1(sid,
                                  excite_id,
                                  delay=0,
                                  dphase=0,
                                  tc=0,
                                  td=0,
                                  Type='LOAD',
                                  comment='rload1')
        rload1 = model.add_rload1(sid,
                                  excite_id,
                                  delay=1.,
                                  dphase=0,
                                  tc=0,
                                  td=0,
                                  Type='DISP',
                                  comment='rload1')
        rload1 = model.add_rload1(sid,
                                  excite_id,
                                  delay=2,
                                  dphase=0,
                                  tc=0,
                                  td=0,
                                  Type='VELO',
                                  comment='rload1')
        rload1 = model.add_rload1(sid,
                                  excite_id,
                                  delay=0,
                                  dphase=0,
                                  tc=0,
                                  td=0,
                                  Type='ACC',
                                  comment='rload1')

        sid = 3
        excite_id = 30
        rload2 = model.add_rload2(sid,
                                  excite_id,
                                  delay=0,
                                  dphase=0,
                                  tb=0,
                                  tp=0,
                                  Type='LOAD',
                                  comment='rload2')
        rload2 = model.add_rload2(sid,
                                  excite_id,
                                  delay=1.,
                                  dphase=0,
                                  tb=0,
                                  tp=0,
                                  Type='D',
                                  comment='rload2')
        rload2 = model.add_rload2(sid,
                                  excite_id,
                                  delay=2,
                                  dphase=0,
                                  tb=0,
                                  tp=0,
                                  Type='V',
                                  comment='rload2')
        rload2 = model.add_rload2(sid,
                                  excite_id,
                                  delay=0,
                                  dphase=0,
                                  tb=0,
                                  tp=0,
                                  Type='A',
                                  comment='rload2')

        excite_id = 20
        nid = 21
        c = 1
        scale = 1.0
        model.add_darea(excite_id, nid, c, scale, comment='darea')
        model.add_grid(nid, [0., 0., 0.])

        excite_id = 30
        model.add_darea(excite_id, nid, c, scale, comment='darea')

        delay_id = 2
        nodes = 100
        components = 2
        delays = 1.5
        delay = model.add_delay(delay_id, nodes, components, delays)

        sid = 1
        scale = 1.0
        scale_factors = 1.
        load_ids = 2
        dload = model.add_dload(sid,
                                scale,
                                scale_factors,
                                load_ids,
                                comment='dload')

        x1 = 0.1
        x = np.linspace(0., 1.)
        y = np.sin(x)
        tabled2 = model.add_tabled2(tid, x1, x, y, comment='tabled2')

        model.pop_parse_errors()

        delay.validate()
        delay.raw_fields()
        delay.write_card()
        delay.write_card(size=16)

        rload1.validate()
        rload1.raw_fields()
        rload1.write_card()
        rload1.write_card(size=16)

        rload2.validate()
        rload2.raw_fields()
        rload2.write_card()
        rload2.write_card(size=16)

        dload.validate()
        dload.raw_fields()
        dload.write_card()
        dload.write_card(size=16)

        tabled2.validate()
        tabled2.raw_fields()
        tabled2.write_card()
        tabled2.write_card(size=16)

        model.validate()
        model.cross_reference()
        model.pop_xref_errors()
        #print(model.dareas)

        bdf_file = StringIO()
        model.write_bdf(bdf_file, close=False)
        unused_out = bdf_file.getvalue()
        bdf_file.seek(0)
        unused_outs = model.get_bdf_stats(return_type='list')
        unused_outs = model.get_bdf_stats(return_type='string')

        freq = 0.5
        out1 = rload1.get_load_at_freq(freq, scale=1.)
        #out2 = rload2.get_load_at_time(freq, scale=1.)
        #print(out1)
        #print(out2)
        assert len(out1) == 1, out1
        #assert len(out2) == 1, out2

        freq = [0.5, 0.9]
        out1 = rload1.get_load_at_freq(freq, scale=1.)
        #out2 = rload2.get_load_at_freq(freq, scale=1.)
        #print(out1)
        #print(out2)
        assert len(out1) == 2, out1
        #assert len(out2) == 2, out2

        model2 = read_bdf(bdf_file, punch=True, debug=False)
        model2.uncross_reference()
        model2.safe_cross_reference()
        model2.uncross_reference()
        #print(out)
        #print(outs)
        save_load_deck(model)
Пример #59
0
def run_sim2smv(ROI,prefix,crystal,spectra,rotation,rank,quick=False):
  smv_fileout = prefix + ".img"

  direct_algo_res_limit = 1.7

  wavlen, flux, wavelength_A = next(spectra) # list of lambdas, list of fluxes, average wavelength

  tophat_spectrum = True
  if tophat_spectrum:
    sum_flux = flex.sum(flux)
    #from IPython import embed; embed()
    ave_flux = sum_flux/60. # 60 energy channels
    for ix in range(len(wavlen)):
      energy = 12398.425 / wavlen[ix]
      if energy>=7090 and energy <=7150:
        flux[ix]=ave_flux
      else:
        flux[ix]=0.
  if quick:
    wavlen = flex.double([wavelength_A]);
    flux = flex.double([flex.sum(flux)])
    print("Quick sim, lambda=%f, flux=%f"%(wavelength_A,flux[0]))

  GF = gen_fmodel(resolution=direct_algo_res_limit,pdb_text=pdb_lines,algorithm="fft",wavelength=wavelength_A)
  GF.set_k_sol(0.435)
  GF.make_P1_primitive()
  sfall_main = GF.get_amplitudes()

  # use crystal structure to initialize Fhkl array
  sfall_main.show_summary(prefix = "Amplitudes used ")
  N = crystal.number_of_cells(sfall_main.unit_cell())

  #SIM = nanoBragg(detpixels_slowfast=(2000,2000),pixel_size_mm=0.11,Ncells_abc=(5,5,5),verbose=0)
  SIM = nanoBragg(detpixels_slowfast=(3000,3000),pixel_size_mm=0.11,Ncells_abc=(N,N,N),
    # workaround for problem with wavelength array, specify it separately in constructor.
    wavelength_A=wavelength_A,verbose=0)
  SIM.adc_offset_adu = 0 # Do not offset by 40
  SIM.adc_offset_adu = 10 # Do not offset by 40
  import sys
  if len(sys.argv)>2:
    SIM.seed = -int(sys.argv[2])
    print("GOTHERE seed=",SIM.seed)
  if len(sys.argv)>1:
    if sys.argv[1]=="random" : SIM.randomize_orientation()
  SIM.mosaic_spread_deg = 0.05 # interpreted by UMAT_nm as a half-width stddev
  SIM.mosaic_domains = 50  # 77 seconds.  With 100 energy points, 7700 seconds (2 hours) per image
                           # 3000000 images would be 100000 hours on a 60-core machine (dials), or 11.4 years
                           # using 2 nodes, 5.7 years.  Do this at SLAC? NERSC? combination of all?
                           # SLAC downtimes: Tues Dec 5 (24 hrs), Mon Dec 11 (72 hrs), Mon Dec 18 light use, 24 days
                           # mosaic_domains setter must come after mosaic_spread_deg setter
  SIM.distance_mm=141.7

  UMAT_nm = flex.mat3_double()
  mersenne_twister = flex.mersenne_twister(seed=0)
  scitbx.random.set_random_seed(1234)
  rand_norm = scitbx.random.normal_distribution(mean=0, sigma=SIM.mosaic_spread_deg * math.pi/180.)
  g = scitbx.random.variate(rand_norm)
  mosaic_rotation = g(SIM.mosaic_domains)
  for m in mosaic_rotation:
    site = col(mersenne_twister.random_double_point_on_sphere())
    UMAT_nm.append( site.axis_and_angle_as_r3_rotation_matrix(m,deg=False) )
  SIM.set_mosaic_blocks(UMAT_nm)

  #SIM.detector_thick_mm = 0.5 # = 0 for Rayonix
  #SIM.detector_thicksteps = 1 # should default to 1 for Rayonix, but set to 5 for CSPAD
  #SIM.detector_attenuation_length_mm = default is silicon

  # get same noise each time this test is run
  SIM.seed = 1
  SIM.oversample=1
  SIM.wavelength_A = wavelength_A
  SIM.polarization=1
  # this will become F000, marking the beam center
  SIM.default_F=0
  #SIM.missets_deg= (10,20,30)
  print("mosaic_seed=",SIM.mosaic_seed)
  print("seed=",SIM.seed)
  print("calib_seed=",SIM.calib_seed)
  print("missets_deg =", SIM.missets_deg)
  SIM.Fhkl=sfall_main
  print("Determinant",rotation.determinant())
  Amatrix_rot = (rotation * sqr(sfall_main.unit_cell().orthogonalization_matrix())).transpose()
  print("RAND_ORI", prefix, end=' ')
  for i in Amatrix_rot: print(i, end=' ')
  print()

  SIM.Amatrix_RUB = Amatrix_rot
  #workaround for failing init_cell, use custom written Amatrix setter
  print("unit_cell_Adeg=",SIM.unit_cell_Adeg)
  print("unit_cell_tuple=",SIM.unit_cell_tuple)
  Amat = sqr(SIM.Amatrix).transpose() # recovered Amatrix from SIM
  from cctbx import crystal_orientation
  Ori = crystal_orientation.crystal_orientation(Amat, crystal_orientation.basis_type.reciprocal)
  print("Python unit cell from SIM state",Ori.unit_cell())

  # fastest option, least realistic
  #SIM.xtal_shape=shapetype.Tophat # RLP = hard sphere
  #SIM.xtal_shape=shapetype.Square # gives fringes
  SIM.xtal_shape=shapetype.Gauss # both crystal & RLP are Gaussian
  #SIM.xtal_shape=shapetype.Round # Crystal is a hard sphere
  # only really useful for long runs
  SIM.progress_meter=False
  # prints out value of one pixel only.  will not render full image!
  #SIM.printout_pixel_fastslow=(500,500)
  #SIM.printout=True
  SIM.show_params()
  # flux is always in photons/s
  SIM.flux=1e12
  SIM.exposure_s=1.0 # so total fluence is e12
  # assumes round beam
  SIM.beamsize_mm=0.003 #cannot make this 3 microns; spots are too intense
  temp=SIM.Ncells_abc
  print("Ncells_abc=",SIM.Ncells_abc)
  SIM.Ncells_abc=temp
  print("Ncells_abc=",SIM.Ncells_abc)
  print("xtal_size_mm=",SIM.xtal_size_mm)
  print("unit_cell_Adeg=",SIM.unit_cell_Adeg)
  print("unit_cell_tuple=",SIM.unit_cell_tuple)
  print("missets_deg=",SIM.missets_deg)
  print("Amatrix=",SIM.Amatrix)
  print("beam_center_mm=",SIM.beam_center_mm)
  print("XDS_ORGXY=",SIM.XDS_ORGXY)
  print("detector_pivot=",SIM.detector_pivot)
  print("xtal_shape=",SIM.xtal_shape)
  print("beamcenter_convention=",SIM.beamcenter_convention)
  print("fdet_vector=",SIM.fdet_vector)
  print("sdet_vector=",SIM.sdet_vector)
  print("odet_vector=",SIM.odet_vector)
  print("beam_vector=",SIM.beam_vector)
  print("polar_vector=",SIM.polar_vector)
  print("spindle_axis=",SIM.spindle_axis)
  print("twotheta_axis=",SIM.twotheta_axis)
  print("distance_meters=",SIM.distance_meters)
  print("distance_mm=",SIM.distance_mm)
  print("close_distance_mm=",SIM.close_distance_mm)
  print("detector_twotheta_deg=",SIM.detector_twotheta_deg)
  print("detsize_fastslow_mm=",SIM.detsize_fastslow_mm)
  print("detpixels_fastslow=",SIM.detpixels_fastslow)
  print("detector_rot_deg=",SIM.detector_rot_deg)
  print("curved_detector=",SIM.curved_detector)
  print("pixel_size_mm=",SIM.pixel_size_mm)
  print("point_pixel=",SIM.point_pixel)
  print("polarization=",SIM.polarization)
  print("nopolar=",SIM.nopolar)
  print("oversample=",SIM.oversample)
  print("region_of_interest=",SIM.region_of_interest)
  print("wavelength_A=",SIM.wavelength_A)
  print("energy_eV=",SIM.energy_eV)
  print("fluence=",SIM.fluence)
  print("flux=",SIM.flux)
  print("exposure_s=",SIM.exposure_s)
  print("beamsize_mm=",SIM.beamsize_mm)
  print("dispersion_pct=",SIM.dispersion_pct)
  print("dispsteps=",SIM.dispsteps)
  print("divergence_hv_mrad=",SIM.divergence_hv_mrad)
  print("divsteps_hv=",SIM.divsteps_hv)
  print("divstep_hv_mrad=",SIM.divstep_hv_mrad)
  print("round_div=",SIM.round_div)
  print("phi_deg=",SIM.phi_deg)
  print("osc_deg=",SIM.osc_deg)
  print("phisteps=",SIM.phisteps)
  print("phistep_deg=",SIM.phistep_deg)
  print("detector_thick_mm=",SIM.detector_thick_mm)
  print("detector_thicksteps=",SIM.detector_thicksteps)
  print("detector_thickstep_mm=",SIM.detector_thickstep_mm)
  print("***mosaic_spread_deg=",SIM.mosaic_spread_deg)
  print("***mosaic_domains=",SIM.mosaic_domains)
  print("indices=",SIM.indices)
  print("amplitudes=",SIM.amplitudes)
  print("Fhkl_tuple=",SIM.Fhkl_tuple)
  print("default_F=",SIM.default_F)
  print("interpolate=",SIM.interpolate)
  print("integral_form=",SIM.integral_form)

  from libtbx.development.timers import Profiler
  P = Profiler("nanoBragg")
  # now actually burn up some CPU
  #SIM.add_nanoBragg_spots()
  del P

  # simulated crystal is only 125 unit cells (25 nm wide)
  # amplify spot signal to simulate physical crystal of 4000x larger: 100 um (64e9 x the volume)
  print(crystal.domains_per_crystal)
  SIM.raw_pixels *= crystal.domains_per_crystal; # must calculate the correct scale!
  output = StringIO() # open("myfile","w")
  for x in range(0,100,2): #len(flux)):
    if flux[x]==0.0:continue
    print("+++++++++++++++++++++++++++++++++++++++ Wavelength",x)
    CH = channel_pixels(ROI,wavlen[x],flux[x],N,UMAT_nm,Amatrix_rot,GF,output)
    SIM.raw_pixels += CH.raw_pixels * crystal.domains_per_crystal;
    print(SIM.raw_pixels)

    CH.free_all()

  message = output.getvalue().split()
  miller = (int(message[4]),int(message[5]),int(message[6]))
  intensity = float(message[9]);

  #SIM.to_smv_format(fileout=prefix + "_intimage_001.img")
  pixels = SIM.raw_pixels
  roi_pixels = pixels[ROI[1][0]:ROI[1][1], ROI[0][0]:ROI[0][1]]
  print("Reducing full shape of",pixels.focus(),"to ROI of",roi_pixels.focus())
  SIM.free_all()
  return dict(roi_pixels=roi_pixels,miller=miller,intensity=intensity)
Пример #60
0
def test_debugprint():
    A = tensor.matrix(name='A')
    B = tensor.matrix(name='B')
    C = A + B
    C.name = 'C'
    D = tensor.matrix(name='D')
    E = tensor.matrix(name='E')

    F = D + E
    G = C + F

    # just test that it work
    debugprint(G)

    # test ids=int
    s = StringIO()
    debugprint(G, file=s, ids='int')
    s = s.getvalue()
    # The additional white space are needed!
    reference = '\n'.join([
        "Elemwise{add,no_inplace} [@0] ''   ",
        " |Elemwise{add,no_inplace} [@1] 'C'   ",
        " | |A [@2]",
        " | |B [@3]",
        " |Elemwise{add,no_inplace} [@4] ''   ",
        "   |D [@5]",
        "   |E [@6]",
    ]) + '\n'

    if s != reference:
        print('--' + s + '--')
        print('--' + reference + '--')

    assert s == reference

    # test ids=CHAR
    s = StringIO()
    debugprint(G, file=s, ids='CHAR')
    s = s.getvalue()
    # The additional white space are needed!
    reference = "\n".join([
        "Elemwise{add,no_inplace} [@A] ''   ",
        " |Elemwise{add,no_inplace} [@B] 'C'   ",
        " | |A [@C]",
        " | |B [@D]",
        " |Elemwise{add,no_inplace} [@E] ''   ",
        "   |D [@F]",
        "   |E [@G]",
    ]) + '\n'

    if s != reference:
        print('--' + s + '--')
        print('--' + reference + '--')

    assert s == reference

    # test ids=CHAR, stop_on_name=True
    s = StringIO()
    debugprint(G, file=s, ids='CHAR', stop_on_name=True)
    s = s.getvalue()
    # The additional white space are needed!
    reference = '\n'.join([
        "Elemwise{add,no_inplace} [@A] ''   ",
        " |Elemwise{add,no_inplace} [@B] 'C'   ",
        " |Elemwise{add,no_inplace} [@C] ''   ",
        "   |D [@D]",
        "   |E [@E]",
    ]) + '\n'

    if s != reference:
        print('--' + s + '--')
        print('--' + reference + '--')

    assert s == reference

    # test ids=
    s = StringIO()
    debugprint(G, file=s, ids='')
    s = s.getvalue()
    # The additional white space are needed!
    reference = '\n'.join([
        "Elemwise{add,no_inplace}  ''   ",
        " |Elemwise{add,no_inplace}  'C'   ",
        " | |A ",
        " | |B ",
        " |Elemwise{add,no_inplace}  ''   ",
        "   |D ",
        "   |E ",
    ]) + '\n'
    if s != reference:
        print('--' + s + '--')
        print('--' + reference + '--')

    assert s == reference