Пример #1
0
    def test_start_logger(self):
        sio = StringIO()
        logger = logging.getLogger('CONT #abcdef: test')
        logger.addHandler(logging.StreamHandler(sio))

        # set log level as INFO
        logger = start_logger('test', 'INFO', 'abcdef')
        self.assertEqual(logging.INFO, logger.level)
        # INFO message is recorded with INFO leg level
        logger.info('test1')
        self.assertEqual(sio.getvalue(), 'test1\n')
        # DEBUG message is not recorded with INFO leg level
        logger.debug('test2')
        self.assertEqual(sio.getvalue(), 'test1\n')

        # set log level as DEBUG
        logger = start_logger('test', 'DEBUG', 'abcdef')
        self.assertEqual(logging.DEBUG, logger.level)
        # DEBUG message is recorded with DEBUG leg level
        logger.debug('test3')
        self.assertEqual(sio.getvalue(), 'test1\ntest3\n')

        # If the level parameter is unkown, use ERROR as log level
        logger = start_logger('test', 'foo', 'abcdef')
        self.assertEqual(logging.ERROR, logger.level)
Пример #2
0
def test_json_format_one():
    sf = json_format.JSONFormatter()
    c = ('a', 'b', 'c', 'd')
    d = ('A', 'B', 'C', '"escape me"')
    expected = {
        'a': 'A',
        'b': 'B',
        'c': 'C',
        'd': '"escape me"'
    }
    args = mock.Mock()
    sf.add_argument_group(args)

    args.noindent = True
    output = StringIO()
    sf.emit_one(c, d, output, args)
    value = output.getvalue()
    print(len(value.splitlines()))
    assert 1 == len(value.splitlines())
    actual = json.loads(value)
    assert expected == actual

    args.noindent = False
    output = StringIO()
    sf.emit_one(c, d, output, args)
    value = output.getvalue()
    assert 6 == len(value.splitlines())
    actual = json.loads(value)
    assert expected == actual
Пример #3
0
    def test_any_sequence_to_fasta(self):
        # store writer function, sequence object to write, expected
        # fasta filepath for default parameters, expected fasta filepath for
        # non-defaults, and expected qual filepath for non-defaults
        id_ = 'f o o'
        desc = 'b\na\nr'
        test_data = (
            (_biological_sequence_to_fasta,
             Sequence('ACGT', id=id_, description=desc,
                      quality=range(1, 5)),
             ('fasta_single_bio_seq_defaults',
              'fasta_single_bio_seq_non_defaults',
              'qual_single_bio_seq_non_defaults')),
            (_dna_sequence_to_fasta,
             DNA('TACG', id=id_, description=desc, quality=range(4)),
             ('fasta_single_dna_seq_defaults',
              'fasta_single_dna_seq_non_defaults',
              'qual_single_dna_seq_non_defaults')),
            (_rna_sequence_to_fasta,
             RNA('UACG', id=id_, description=desc, quality=range(2, 6)),
             ('fasta_single_rna_seq_defaults',
              'fasta_single_rna_seq_non_defaults',
              'qual_single_rna_seq_non_defaults')),
            (_protein_sequence_to_fasta,
             Protein('PQQ', id=id_, description=desc, quality=[42, 41, 40]),
             ('fasta_single_prot_seq_defaults',
              'fasta_single_prot_seq_non_defaults',
              'qual_single_prot_seq_non_defaults')))

        for fn, obj, fps in test_data:
            defaults_fp, non_defaults_fasta_fp, non_defaults_qual_fp = fps

            # test writing with default parameters
            fh = StringIO()
            fn(obj, fh)
            obs = fh.getvalue()
            fh.close()

            with open(get_data_path(defaults_fp), 'U') as fh:
                exp = fh.read()

            self.assertEqual(obs, exp)

            # test writing with non-defaults
            fasta_fh = StringIO()
            qual_fh = StringIO()
            fn(obj, fasta_fh, id_whitespace_replacement='-',
               description_newline_replacement='_', max_width=1, qual=qual_fh)
            obs_fasta = fasta_fh.getvalue()
            obs_qual = qual_fh.getvalue()
            fasta_fh.close()
            qual_fh.close()

            with open(get_data_path(non_defaults_fasta_fp), 'U') as fh:
                exp_fasta = fh.read()
            with open(get_data_path(non_defaults_qual_fp), 'U') as fh:
                exp_qual = fh.read()

            self.assertEqual(obs_fasta, exp_fasta)
            self.assertEqual(obs_qual, exp_qual)
Пример #4
0
    def test_prints_signature(self):
        the_time = 1406143563.020043
        key = 'secret squirrel'
        expires = 3600
        path = '/v1/a/c/o'
        redirect = 'https://example.com/done.html'
        max_file_size = str(int(1024 * 1024 * 1024 * 3.14159))  # π GiB
        max_file_count = '3'

        expected_signature = hmac.new(
            key,
            "\n".join((
                path, redirect, max_file_size, max_file_count,
                str(int(the_time + expires)))),
            hashlib.sha1).hexdigest()

        out = StringIO()
        with mock.patch('swift.cli.form_signature.time', lambda: the_time):
            with mock.patch('sys.stdout', out):
                exitcode = form_signature.main([
                    '/path/to/swift-form-signature',
                    path, redirect, max_file_size,
                    max_file_count, str(expires), key])

        self.assertEqual(exitcode, 0)
        self.assertTrue("Signature: %s" % expected_signature
                        in out.getvalue())
        self.assertTrue("Expires: %d" % (the_time + expires,)
                        in out.getvalue())

        sig_input = ('<input type="hidden" name="signature" value="%s" />'
                     % expected_signature)
        self.assertTrue(sig_input in out.getvalue())
Пример #5
0
def test_packed_workflow_execution(wf_path, job_path, namespaced, tmpdir):
    load_tool.loaders = {}

    document_loader, workflowobj, uri = fetch_document(
        get_data(wf_path), resolver=tool_resolver)

    document_loader, _, processobj, metadata, uri = validate_document(
        document_loader, workflowobj, uri, [], {})

    packed = json.loads(print_pack(document_loader, processobj, uri, metadata))

    assert not namespaced or "$namespaces" in packed

    wf_packed_handle, wf_packed_path = tempfile.mkstemp()
    with open(wf_packed_path, 'w') as temp_file:
        json.dump(packed, temp_file)

    normal_output = StringIO()
    packed_output = StringIO()

    normal_params = ['--outdir', str(tmpdir), get_data(wf_path), get_data(job_path)]
    packed_params = ['--outdir', str(tmpdir), '--debug', get_data(wf_packed_path), get_data(job_path)]

    assert main(normal_params, stdout=normal_output) == 0
    assert main(packed_params, stdout=packed_output) == 0

    assert json.loads(packed_output.getvalue()) == json.loads(normal_output.getvalue())

    os.close(wf_packed_handle)
    os.remove(wf_packed_path)
Пример #6
0
class TestUI(object):
    def setup(self):
        self.out = StringIO()
        self.err = StringIO()
        self.ui = utils.UI(self.out, self.err)

    def test_write_goes_to_out_obj(self):
        self.ui.write("Foo")
        assert self.out.getvalue() == 'Foo'
        assert self.err.getvalue() == ''

    def test_error_goes_to_err_obj(self):
        self.ui.error("Foo")
        assert self.err.getvalue() == 'Foo'
        assert self.out.getvalue() == ''

    def test_confirm_raises_own_exception(self):
        confirm = mock.Mock(spec=click.confirm)
        confirm.side_effect = click.Abort()
        ui = utils.UI(self.out, self.err, confirm)
        with pytest.raises(utils.AbortedError):
            ui.confirm("Confirm?")

    def test_confirm_returns_value(self):
        confirm = mock.Mock(spec=click.confirm)
        confirm.return_value = 'foo'
        ui = utils.UI(self.out, self.err, confirm)
        return_value = ui.confirm("Confirm?")
        assert return_value == 'foo'
Пример #7
0
class _Buffer(object):

    def __init__(self, stream):
        self._stream = stream
        self._buffer = StringIO()

    def fileno(self):
        return self._stream.fileno()

    def __getattr__(self, attr):
        # this happens on unpickling
        if attr == '_buffer':
            raise AttributeError("No _buffer yet")
        return getattr(self._buffer, attr)

    def __le__(self, obj):
        return self._buffer.getvalue() == obj

    def __eq__(self, obj):
        return self._buffer.getvalue() == obj

    def __str__(self):
        return self._buffer.getvalue()

    def __repr__(self):
        return repr(self._buffer.getvalue())
Пример #8
0
def _build_tag(tag, hide_attributes):
    tag_el = _find_tag_el(tag)
    attributes = _find_attributes(tag)
    tag_help = StringIO()
    annotation_el = tag_el.find("{http://www.w3.org/2001/XMLSchema}annotation")
    text = annotation_el.find("{http://www.w3.org/2001/XMLSchema}documentation").text
    for line in text.splitlines():
        if line.startswith("$attribute_list:"):
            attributes_str, header_level = line.split(":")[1:3]
            attribute_names = attributes_str.split(",")
            header_level = int(header_level)
            text = text.replace(line, _build_attributes_table(tag, attributes, attribute_names=attribute_names, header_level=header_level))
        if line.startswith("$assertions"):
            assertions_tag = xmlschema_doc.find("//{http://www.w3.org/2001/XMLSchema}complexType[@name='TestAssertions']")
            assertion_tag = xmlschema_doc.find("//{http://www.w3.org/2001/XMLSchema}group[@name='TestAssertion']")
            assertions_buffer = StringIO()
            assertions_buffer.write(_doc_or_none(assertions_tag))
            assertions_buffer.write("\n\n")
            assertions_buffer.write("Child Element/Assertion | Details \n")
            assertions_buffer.write("--- | ---\n")
            elements = assertion_tag.findall("{http://www.w3.org/2001/XMLSchema}choice/{http://www.w3.org/2001/XMLSchema}element")
            for element in elements:
                doc = _doc_or_none(element).strip()
                assertions_buffer.write("``%s`` | %s\n" % (element.attrib["name"], doc))
            text = text.replace(line, assertions_buffer.getvalue())
    tag_help.write(text)
    best_practices = _get_bp_link(annotation_el)
    if best_practices:
        tag_help.write("\n\n### Best Practices\n")
        tag_help.write("""
Find the Intergalactic Utilities Commision suggested best practices for this
element [here](%s).""" % best_practices)
    tag_help.write(_build_attributes_table(tag, attributes, hide_attributes))

    return tag_help.getvalue()
Пример #9
0
    def test_invalid_project_name(self):
        #with patch('sys.stdout', self.stdout):
            stderr_tmp = StringIO()
            with patch('sys.stderr', stderr_tmp):
                with self.assertRaises(SystemExit) as error:
                    conf_data = config.parse([
                        '-q',
                        '--db=postgres://user:pwd@host/dbname',
                        '-p'+self.project_dir,
                        'test'])
            self.assertTrue(stderr_tmp.getvalue().find("Project name 'test' is not valid") > -1)

            stderr_tmp = StringIO()
            with patch('sys.stderr', stderr_tmp):
                with self.assertRaises(SystemExit) as error:
                    conf_data = config.parse([
                        '-q',
                        '--db=postgres://user:pwd@host/dbname',
                        '-p'+self.project_dir,
                        'assert'])
            self.assertTrue(stderr_tmp.getvalue().find("Project name 'assert' is not valid") > -1)

            stderr_tmp = StringIO()
            with patch('sys.stderr', stderr_tmp):
                with self.assertRaises(SystemExit) as error:
                    conf_data = config.parse([
                        '-q',
                        '--db=postgres://user:pwd@host/dbname',
                        '-p'+self.project_dir,
                        'values'])
            self.assertTrue(stderr_tmp.getvalue().find("Project name 'values' is not valid") > -1)
Пример #10
0
 def test_info_unicode(self, depfile):
     output = StringIO()
     task = Task("t1", [], file_dep=[six.u('tests/data/dependency1')])
     cmd = Info(outstream=output, dep_file=depfile.name, task_list=[task])
     cmd._execute(['t1'])
     assert """name:'t1'""" in output.getvalue()
     assert """'tests/data/dependency1'""" in output.getvalue()
Пример #11
0
    def _in_process_execute(self, cli_ctx, command, expect_failure=False):
        from six import StringIO
        from vcr.errors import CannotOverwriteExistingCassetteException

        if command.startswith('az '):
            command = command[3:]

        stdout_buf = StringIO()
        logging_buf = StringIO()
        try:
            # issue: stderr cannot be redirect in this form, as a result some failure information
            # is lost when command fails.
            self.exit_code = cli_ctx.invoke(shlex.split(command), out_file=stdout_buf) or 0
            self.output = stdout_buf.getvalue()
            self.applog = logging_buf.getvalue()

        except CannotOverwriteExistingCassetteException as ex:
            raise AssertionError(ex)
        except CliExecutionError as ex:
            if expect_failure:
                self.exit_code = 1
                self.output = stdout_buf.getvalue()
                self.applog = logging_buf.getvalue()
            elif ex.exception:
                raise ex.exception
            else:
                raise ex
        except Exception as ex:  # pylint: disable=broad-except
            self.exit_code = 1
            self.output = stdout_buf.getvalue()
            self.process_error = ex
        finally:
            stdout_buf.close()
            logging_buf.close()
Пример #12
0
 def test_var(self):
     m = ConcreteModel()
     m.x = Var(bounds=(-1, 1), initialize=3)
     mc_var = mc(m.x)
     self.assertEqual(mc_var.lower(), -1)
     self.assertEqual(mc_var.upper(), 1)
     m.no_ub = Var(bounds=(0, None), initialize=3)
     output = StringIO()
     with LoggingIntercept(output, 'pyomo.contrib.mcpp', logging.WARNING):
         mc_var = mc(m.no_ub)
         self.assertIn("Var no_ub missing upper bound.",
                       output.getvalue().strip())
         self.assertEqual(mc_var.lower(), 0)
         self.assertEqual(mc_var.upper(), 500000)
     m.no_lb = Var(bounds=(None, -3), initialize=-1)
     output = StringIO()
     with LoggingIntercept(output, 'pyomo.contrib.mcpp', logging.WARNING):
         mc_var = mc(m.no_lb)
         self.assertIn("Var no_lb missing lower bound.",
                       output.getvalue().strip())
         self.assertEqual(mc_var.lower(), -500000)
         self.assertEqual(mc_var.upper(), -3)
     m.no_val = Var(bounds=(0, 1))
     output = StringIO()
     with LoggingIntercept(output, 'pyomo.contrib.mcpp', logging.WARNING):
         mc_var = mc(m.no_val)
         mc_var.subcv()
         self.assertIn("Var no_val missing value.",
                       output.getvalue().strip())
         self.assertEqual(mc_var.lower(), 0)
         self.assertEqual(mc_var.upper(), 1)
Пример #13
0
    def test_roundtrip_generators(self):
        # test that fasta and qual files can be streamed into memory and back
        # out to disk using generator reader and writer
        fps = list(map(lambda e: list(map(get_data_path, e)),
                       [('empty', 'empty'),
                        ('fasta_multi_seq_roundtrip',
                         'qual_multi_seq_roundtrip')]))

        for fasta_fp, qual_fp in fps:
            with open(fasta_fp, 'U') as fh:
                exp_fasta = fh.read()
            with open(qual_fp, 'U') as fh:
                exp_qual = fh.read()

            fasta_fh = StringIO()
            qual_fh = StringIO()
            _generator_to_fasta(_fasta_to_generator(fasta_fp, qual=qual_fp),
                                fasta_fh, qual=qual_fh)
            obs_fasta = fasta_fh.getvalue()
            obs_qual = qual_fh.getvalue()
            fasta_fh.close()
            qual_fh.close()

            self.assertEqual(obs_fasta, exp_fasta)
            self.assertEqual(obs_qual, exp_qual)
Пример #14
0
class TestRexlexTraceState(unittest.TestCase):

    expected = 'rexlex: test'

    def setUp(self):
        self.stderr = StringIO()
        self.real_stderr = sys.stderr
        logger.handlers[0].stream = self.stderr
        logger.setLevel(rexlex.log_config.REXLEX_TRACE_STATE)

    def tearDown(self):
        logger.handlers[0].stream = self.real_stderr

    def test_trace_result(self):
        logger.rexlex_trace_result("test")
        self.assertIn(self.expected, self.stderr.getvalue())

    def test_trace_meta(self):
        logger.rexlex_trace_meta("test")
        self.assertIn(self.expected, self.stderr.getvalue())

    def test_trace_state(self):
        logger.rexlex_trace_state("test")
        self.assertIn(self.expected, self.stderr.getvalue())

    def test_trace_rule(self):
        logger.rexlex_trace_rule("test")
        self.assertNotIn(self.expected, self.stderr.getvalue())

    def test_trace(self):
        logger.rexlex_trace("test")
        self.assertNotIn(self.expected, self.stderr.getvalue())
Пример #15
0
 def write_to_db(self, db, transaction=None, commit=True):
     if transaction is None:
         transaction = db
     fp = StringIO()
     if len(self) < Timeseries.MAX_ALL_BOTTOM:
         top = ''
         middle = None
         self.write(fp)
         bottom = fp.getvalue()
     else:
         dates = sorted(self.keys())
         self.write(fp, end=dates[Timeseries.ROWS_IN_TOP_BOTTOM - 1])
         top = fp.getvalue()
         fp.truncate(0)
         fp.seek(0)
         self.write(fp, start=dates[Timeseries.ROWS_IN_TOP_BOTTOM],
                    end=dates[-(Timeseries.ROWS_IN_TOP_BOTTOM + 1)])
         middle = self.blob_create(
             zlib.compress(fp.getvalue().encode('ascii')))
         fp.truncate(0)
         fp.seek(0)
         self.write(fp, start=dates[-Timeseries.ROWS_IN_TOP_BOTTOM])
         bottom = fp.getvalue()
     fp.close()
     c = db.cursor()
     c.execute("DELETE FROM ts_records WHERE id=%d" % (self.id))
     c.execute("""INSERT INTO ts_records (id, top, middle, bottom)
                  VALUES (%s, %s, %s, %s)""", (self.id, top, middle,
               bottom))
     c.close()
     if commit:
         transaction.commit()
Пример #16
0
 def toXml(self, filename='', compress=False):
     xml = StringIO()
     xml.write("<?xml version='1.0' encoding='UTF-8'?>\n")
     xml.write(
         "<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.0//EN\" \"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd \">\n")
     self.svg.toXml(0, xml)
     if not filename:
         if compress:
             import gzip
             f = StringIO()
             zf = gzip.GzipFile(fileobj=f, mode='wb')
             zf.write(xml.getvalue())
             zf.close()
             f.seek(0)
             return f.read()
         else:
             return xml.getvalue()
     else:
         if filename[-4:] == 'svgz':
             import gzip
             f = gzip.GzipFile(
                 filename=filename, mode="wb", compresslevel=9)
             f.write(xml.getvalue())
             f.close()
         else:
             f = file(filename, 'w')
             f.write(xml.getvalue())
             f.close()
Пример #17
0
 def test_commands(self):
     out = StringIO()
     call_command('billing_check', stdout=out)
     self.assertEqual(out.getvalue(), '')
     self.add_project()
     self.add_project()
     out = StringIO()
     call_command('billing_check', stdout=out)
     self.assertEqual(
         out.getvalue(),
         'Following billings are over limit:\n'
         ' * test0, test1 (test)\n'
     )
     out = StringIO()
     call_command('billing_check', '--valid', stdout=out)
     self.assertEqual(out.getvalue(), '')
     self.invoice.delete()
     out = StringIO()
     call_command('billing_check', stdout=out)
     self.assertEqual(
         out.getvalue(),
         'Following billings are over limit:\n'
         ' * test0, test1 (test)\n'
         'Following billings are past due date:\n'
         ' * test0, test1 (test)\n'
     )
     call_command('billing_check', '--notify', stdout=out)
     self.assertEqual(len(mail.outbox), 1)
Пример #18
0
def decide_notebook_merge(base, local, remote, args=None):
    # Build merge strategies for each document path from arguments
    strategies = notebook_merge_strategies(args)

    # Compute notebook specific diffs
    local_diffs = diff_notebooks(base, local)
    remote_diffs = diff_notebooks(base, remote)

    # Debug outputs
    if args and args.log_level == "DEBUG":
        nbdime.log.debug("In merge, base-local diff:")
        buf = StringIO()
        pretty_print_notebook_diff("<base>", "<local>", base, local_diffs, buf)
        nbdime.log.debug(buf.getvalue())

        nbdime.log.debug("In merge, base-remote diff:")
        buf = StringIO()
        pretty_print_notebook_diff("<base>", "<remote>", base, remote_diffs, buf)
        nbdime.log.debug(buf.getvalue())

    # Execute a generic merge operation
    decisions = decide_merge_with_diff(
        base, local, remote,
        local_diffs, remote_diffs,
        strategies)

    # Debug outputs
    if args and args.log_level == "DEBUG":
        nbdime.log.debug("In merge, decisions:")
        buf = StringIO()
        pretty_print_merge_decisions(base, decisions, buf)
        nbdime.log.debug(buf.getvalue())

    return decisions
Пример #19
0
    def test_any_sequences_to_fasta(self):
        for fn, obj in ((_sequence_collection_to_fasta, self.seq_coll),
                        (_alignment_to_fasta, self.align)):
            # test writing with default parameters
            fh = StringIO()
            fn(obj, fh)
            obs = fh.getvalue()
            fh.close()

            with open(get_data_path('fasta_3_seqs_defaults'), 'U') as fh:
                exp = fh.read()

            self.assertEqual(obs, exp)

            # test writing with non-defaults
            fasta_fh = StringIO()
            qual_fh = StringIO()
            fn(obj, fasta_fh, id_whitespace_replacement='*',
               description_newline_replacement='+', max_width=3, qual=qual_fh)
            obs_fasta = fasta_fh.getvalue()
            obs_qual = qual_fh.getvalue()
            fasta_fh.close()
            qual_fh.close()

            with open(get_data_path('fasta_3_seqs_non_defaults'), 'U') as fh:
                exp_fasta = fh.read()
            with open(get_data_path('qual_3_seqs_non_defaults'), 'U') as fh:
                exp_qual = fh.read()

            self.assertEqual(obs_fasta, exp_fasta)
            self.assertEqual(obs_qual, exp_qual)
Пример #20
0
    def _in_process_execute(self, command):
        # from azure.cli import  as cli_main
        from azure.cli.main import main as cli_main
        from six import StringIO
        from vcr.errors import CannotOverwriteExistingCassetteException

        if command.startswith('az '):
            command = command[3:]

        output_buffer = StringIO()
        try:
            # issue: stderr cannot be redirect in this form, as a result some failure information
            # is lost when command fails.
            self.exit_code = cli_main(shlex.split(command), file=output_buffer) or 0
            self.output = output_buffer.getvalue()
        except CannotOverwriteExistingCassetteException as ex:
            raise AssertionError(ex)
        except CliExecutionError as ex:
            if ex.exception:
                raise ex.exception
            else:
                raise ex
        except Exception as ex:  # pylint: disable=broad-except
            self.exit_code = 1
            self.output = output_buffer.getvalue()
            self.process_error = ex
        finally:
            output_buffer.close()
Пример #21
0
    def help_cmd_test(self):
        client = TestClient()
        try:
            old_stdout = sys.stdout
            result = StringIO()
            sys.stdout = result
            client.run("help new")
        finally:
            sys.stdout = old_stdout
        self.assertIn("Creates a new package recipe template with a 'conanfile.py'",
                      result.getvalue())

        try:
            old_stdout = sys.stdout
            result = StringIO()
            sys.stdout = result
            client.run("help build")
        finally:
            sys.stdout = old_stdout
        self.assertIn("Calls your local conanfile.py 'build()' method",
                      result.getvalue())

        client.run("help")
        self.assertIn("Creator commands",
                      client.out)
Пример #22
0
    def no_configuration_test(self):
        dummy = """GlobalSection
            EndGlobalSection
     GlobalSection(SolutionConfigurationPlatforms) = preSolution
        Debug|Win32 = Debug|Win32
        Debug|x64 = Debug|x64
        Release|Win32 = Release|Win32
        Release|x64 = Release|x64
    EndGlobalSection
"""
        folder = temp_folder()
        path = os.path.join(folder, "dummy.sln")
        save(path, dummy)
        new_out = StringIO()
        tools.set_global_instances(ConanOutput(new_out), None)
        command = build_sln_command(Settings({}), sln_path=path, targets=None, upgrade_project=False,
                                    build_type='Debug', arch="x86", parallel=False)
        self.assertIn('/p:Configuration=Debug /p:Platform="x86"', command)
        self.assertIn("WARN: ***** The configuration Debug|x86 does not exist in this solution *****",
                      new_out.getvalue())
        # use platforms
        new_out = StringIO()
        tools.set_global_instances(ConanOutput(new_out), None)
        command = build_sln_command(Settings({}), sln_path=path, targets=None, upgrade_project=False,
                                    build_type='Debug', arch="x86", parallel=False, platforms={"x86": "Win32"})
        self.assertIn('/p:Configuration=Debug /p:Platform="Win32"', command)
        self.assertNotIn("WARN", new_out.getvalue())
        self.assertNotIn("ERROR", new_out.getvalue())
Пример #23
0
    def test_trap_output(self):
        class BorkedOutput(outputs.ListOutput):

            def _write(self, x):
                raise RuntimeError("BORK")

        out = BorkedOutput(close_atexit=False)

        sio = StringIO()

        def cleanup(stderr, output):
            sys.stderr = stderr
            sio.close()
            self.log.output = output
            out.close()

        self.addCleanup(cleanup, sys.stderr, self.log.output)
        sys.stderr = sio
        self.log.output = out

        self.log.fields().info('hi')

        assert "BORK" in sio.getvalue()
        assert "Offending message: <twiggy.message.Message object" in sio.getvalue()
        assert "Error in twiggy internal log! Something is seriously broken." in sio.getvalue()
        assert "Traceback" in sio.getvalue()
Пример #24
0
class TestBuild(unittest.TestCase):

    def setUp(self):
        self.out = StringIO()
        os.mkdir('/tmp/resu')  # Append uuid
        os.chdir('/tmp/resu')

    def test_basic(self):
        resu.cli.run(args=['-g'], out=self.out)
        resu.cli.run(args=[], out=self.out)
        assert os.path.isfile('resu.pdf')
        self.assertEquals(self.out.getvalue(), '')

    def test_specify_output_file(self):
        resu.cli.run(args=['-g'], out=self.out)
        resu.cli.run(args=['-o', 'other.html'], out=self.out)
        assert os.path.isfile('other.html')
        self.assertEquals(self.out.getvalue(), '')

    def test_non_existant_parser(self):
        resu.cli.run(args=['-g'], out=self.out)
        with self.assertRaises(FeatureNotFound):
            resu.cli.run(args=['-p', 'gibberish'], out=self.out)
        assert not os.path.isfile('resu.pdf')

    def tearDown(self):
        shutil.rmtree('/tmp/resu')
Пример #25
0
def understand_buffer_usage_and_performance():
    multiplier = 1000 * 100
    big_data = data * multiplier

    # no buffer
    st = time.time()
    big_data_json = json.dumps(big_data)
    elapse = time.time() - st
    print("%.6f" % elapse)

    # write to text, then create buffer
    st = time.time()
    buffer = StringIO()
    big_data_json = json.dumps(big_data)
    buffer = StringIO(big_data_json)
    value = buffer.getvalue()
    elapse = time.time() - st
    print("%.6f" % elapse)

    # write to buffer
    st = time.time()
    buffer = StringIO()
    json.dump(big_data, buffer)
    value = buffer.getvalue()
    elapse = time.time() - st
    print("%.6f" % elapse)

    print("%s KB" % sys.getsizeof(big_data_json))
Пример #26
0
    def test_display(self):
        model = ConcreteModel()
        model.e = Expression()
        with capture_output() as out:
            model.e.display()
        self.assertEqual(out.getvalue().strip(), """
e : Size=1
    Key  : Value
    None : Undefined
        """.strip())

        model.e.set_value(1.0)
        with capture_output() as out:
            model.e.display()
        self.assertEqual(out.getvalue().strip(), """
e : Size=1
    Key  : Value
    None :   1.0
        """.strip())

        out = StringIO()
        with capture_output() as no_out:
            model.e.display(ostream=out)
        self.assertEqual(no_out.getvalue(), "")
        self.assertEqual(out.getvalue().strip(), """
e : Size=1
    Key  : Value
    None :   1.0
        """.strip())

        model.E = Expression([1,2])
        with capture_output() as out:
            model.E.display()
        self.assertEqual(out.getvalue().strip(), """
E : Size=2
    Key : Value
      1 : Undefined
      2 : Undefined
        """.strip())

        model.E[1].set_value(1.0)
        with capture_output() as out:
            model.E.display()
        self.assertEqual(out.getvalue().strip(), """
E : Size=2
    Key : Value
      1 :       1.0
      2 : Undefined
        """.strip())

        out = StringIO()
        with capture_output() as no_out:
            model.E.display(ostream=out)
        self.assertEqual(no_out.getvalue(), "")
        self.assertEqual(out.getvalue().strip(), """
E : Size=2
    Key : Value
      1 :       1.0
      2 : Undefined
        """.strip())
Пример #27
0
    def test_invalid_project_name(self):
        with patch("sys.stdout", self.stdout):
            stderr_tmp = StringIO()
            with patch("sys.stderr", stderr_tmp):
                with self.assertRaises(SystemExit) as error:
                    conf_data = config.parse(
                        ["-q", "--db=postgres://user:pwd@host/dbname", "-p" + self.project_dir, "test"]
                    )
            self.assertTrue(stderr_tmp.getvalue().find('Project name "test" is not a valid app name') > -1)

            stderr_tmp = StringIO()
            with patch("sys.stderr", stderr_tmp):
                with self.assertRaises(SystemExit) as error:
                    conf_data = config.parse(
                        ["-q", "--db=postgres://user:pwd@host/dbname", "-p" + self.project_dir, "assert"]
                    )
            self.assertTrue(stderr_tmp.getvalue().find('Project name "assert" is not a valid app name') > -1)

            stderr_tmp = StringIO()
            with patch("sys.stderr", stderr_tmp):
                with self.assertRaises(SystemExit) as error:
                    conf_data = config.parse(
                        ["-q", "--db=postgres://user:pwd@host/dbname", "-p" + self.project_dir, "values"]
                    )
            self.assertTrue(stderr_tmp.getvalue().find('Project name "values" is not a valid app name') > -1)

            stderr_tmp = StringIO()
            with patch("sys.stderr", stderr_tmp):
                with self.assertRaises(SystemExit) as error:
                    conf_data = config.parse(
                        ["-q", "--db=postgres://user:pwd@host/dbname", "-p" + self.project_dir, "project-name"]
                    )
            self.assertTrue(stderr_tmp.getvalue().find('Project name "project-name" is not a valid app name') > -1)
Пример #28
0
    def test_declared(self):
        b = PySPConfigBlock()
        safe_register_common_option(b, "verbose")
        b.display()
        b.display()
        out = StringIO()
        b.display(ostream=out)
        self.assertEqual(out.getvalue(),
                         "verbose: false\n")
        self.assertEqual(b.check_usage(), True)
        self.assertEqual(b.check_usage(error=False), True)
        b.verbose = True
        out = StringIO()
        b.display(ostream=out)
        self.assertEqual(out.getvalue(),
                         "verbose: true\n")
        with self.assertRaises(ValueError):
            b.check_usage()
        with self.assertRaises(ValueError):
            b.check_usage()
        self.assertEqual(b.check_usage(error=False), False)
        b.verbose
        self.assertEqual(b.check_usage(), True)
        self.assertEqual(b.check_usage(error=False), True)
        verbose_about = \
"""PySPConfigValue: verbose
  -    type: <%s 'bool'>
  - default: False
  -    doc: Generate verbose output for both initialization and
            execution.""" % ('class' if six.PY3 else 'type')
        self.assertEqual(b.about("verbose"),
                         verbose_about)
Пример #29
0
 def test_info_unicode(self, depfile):
     output = StringIO()
     task = Task("t1", [], file_dep=[six.u("tests/data/dependency1")])
     cmd = CmdFactory(Info, outstream=output, dep_file=depfile.name, task_list=[task])
     cmd._execute(["t1"])
     assert """t1""" in output.getvalue()
     assert """tests/data/dependency1""" in output.getvalue()
Пример #30
0
 def test_import_ignore(self):
     output = StringIO()
     call_command(
         'import_json',
         '--main-component', 'test',
         '--project', 'test',
         TEST_COMPONENTS,
         stdout=output
     )
     self.assertIn(
         'Imported Test/Gettext PO with 3 translations',
         output.getvalue()
     )
     output.truncate()
     call_command(
         'import_json',
         '--main-component', 'test',
         '--project', 'test',
         '--ignore',
         TEST_COMPONENTS,
         stderr=output
     )
     self.assertIn(
         'Component Test/Gettext PO already exists',
         output.getvalue()
     )
            rownames=["Actual Result"],
            colnames=["Predicted Result"])
from six import StringIO
from IPython.display import Image
import graphviz
from sklearn.tree import export_graphviz
import pydotplus
dot_data = StringIO()
export_graphviz(Dmodel,
                out_file=dot_data,
                filled=True,
                rounded=True,
                special_characters=True,
                feature_names=feature_cols,
                class_names=['Good', 'Bad'])
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png('company_gini.png')
Image(graph.create_png())

Dclassifier = DecisionTreeClassifier(criterion="entropy")
Dmodel = Dclassifier.fit(X_train, np.ravel(Y_train))
Dpreds = Dmodel.predict(X_test)
Dresult = confusion_matrix(Y_test, Dpreds)
print("Confusion Matrix:")
print(Dresult)
Dresult1 = classification_report(Y_test, Dpreds)
print("Classification Report:", )
print(Dresult1)
Dresult2 = accuracy_score(Y_test, Dpreds)
print("Accuracy:", Dresult2)
Пример #32
0
def changelog(since, to, write, force):
    """
    Generates a markdown file containing the list of checks that changed for a
    given Agent release. Agent version numbers are derived inspecting tags on
    `integrations-core` so running this tool might provide unexpected results
    if the repo is not up to date with the Agent release process.

    If neither `--since` or `--to` are passed (the most common use case), the
    tool will generate the whole changelog since Agent version 6.3.0
    (before that point we don't have enough information to build the log).
    """
    agent_tags = get_agent_tags(since, to)

    # store the changes in a mapping {agent_version --> {check_name --> current_version}}
    changes_per_agent = OrderedDict()

    # to keep indexing easy, we run the loop off-by-one
    for i in range(1, len(agent_tags)):
        req_file_name = os.path.basename(get_agent_release_requirements())
        current_tag = agent_tags[i - 1]
        # Requirements for current tag
        file_contents = git_show_file(req_file_name, current_tag)
        catalog_now = parse_agent_req_file(file_contents)
        # Requirements for previous tag
        file_contents = git_show_file(req_file_name, agent_tags[i])
        catalog_prev = parse_agent_req_file(file_contents)

        changes_per_agent[current_tag] = OrderedDict()

        for name, ver in iteritems(catalog_now):
            # at some point in the git history, the requirements file erroneusly
            # contained the folder name instead of the package name for each check,
            # let's be resilient
            old_ver = (
                catalog_prev.get(name)
                or catalog_prev.get(get_folder_name(name))
                or catalog_prev.get(get_package_name(name))
            )

            # normalize the package name to the check_name
            if name.startswith(DATADOG_PACKAGE_PREFIX):
                name = get_folder_name(name)

            if old_ver and old_ver != ver:
                # determine whether major version changed
                breaking = old_ver.split('.')[0] < ver.split('.')[0]
                changes_per_agent[current_tag][name] = (ver, breaking)
            elif not old_ver:
                # New integration
                changes_per_agent[current_tag][name] = (ver, False)

    # store the changelog in memory
    changelog_contents = StringIO()

    # prepare the links
    agent_changelog_url = 'https://github.com/DataDog/datadog-agent/blob/master/CHANGELOG.rst#{}'
    check_changelog_url = 'https://github.com/DataDog/integrations-core/blob/master/{}/CHANGELOG.md'

    # go through all the agent releases
    for agent, version_changes in iteritems(changes_per_agent):
        url = agent_changelog_url.format(agent.replace('.', ''))  # Github removes dots from the anchor
        changelog_contents.write('## Datadog Agent version [{}]({})\n\n'.format(agent, url))

        if not version_changes:
            changelog_contents.write('* There were no integration updates for this version of the Agent.\n\n')
        else:
            for name, ver in iteritems(version_changes):
                # get the "display name" for the check
                manifest_file = os.path.join(get_root(), name, 'manifest.json')
                if os.path.exists(manifest_file):
                    decoded = json.loads(read_file(manifest_file).strip(), object_pairs_hook=OrderedDict)
                    display_name = decoded.get('display_name')
                else:
                    display_name = name

                breaking_notice = " **BREAKING CHANGE**" if ver[1] else ""
                changelog_url = check_changelog_url.format(name)
                changelog_contents.write(
                    '* {} [{}]({}){}\n'.format(display_name, ver[0], changelog_url, breaking_notice)
                )
            # add an extra line to separate the release block
            changelog_contents.write('\n')

    # save the changelog on disk if --write was passed
    if write:
        dest = get_agent_changelog()
        # don't overwrite an existing file
        if os.path.exists(dest) and not force:
            msg = "Output file {} already exists, run the command again with --force to overwrite"
            abort(msg.format(dest))

        write_file(dest, changelog_contents.getvalue())
    else:
        echo_info(changelog_contents.getvalue())
Пример #33
0
class TestOutput(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        pass

    @classmethod
    def tearDownClass(cls):
        pass

    def setUp(self):
        self.io = StringIO()

    def tearDown(self):
        self.io.close()

    def test_out_json_valid(self):
        """
        The JSON output when the input is a dict should be the dict serialized to JSON
        """
        output_producer = OutputProducer(formatter=format_json, file=self.io)
        output_producer.out(
            CommandResultItem({
                'active': True,
                'id': '0b1f6472'
            }))
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines("""{
  "active": true,
  "id": "0b1f6472"
}
"""))

    def test_out_json_byte(self):
        output_producer = OutputProducer(formatter=format_json, file=self.io)
        output_producer.out(
            CommandResultItem({
                'active': True,
                'contents': b'0b1f6472'
            }))
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines("""{
  "active": true,
  "contents": "0b1f6472"
}
"""))

    def test_out_json_byte_empty(self):
        output_producer = OutputProducer(formatter=format_json, file=self.io)
        output_producer.out(
            CommandResultItem({
                'active': True,
                'contents': b''
            }))
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines("""{
  "active": true,
  "contents": ""
}
"""))

    def test_out_boolean_valid(self):
        output_producer = OutputProducer(formatter=format_list, file=self.io)
        output_producer.out(CommandResultItem(True))
        self.assertEqual(util.normalize_newlines(self.io.getvalue()),
                         util.normalize_newlines("""True\n\n\n"""))

    # TABLE output tests

    def test_out_table_valid_query1(self):
        output_producer = OutputProducer(formatter=format_table, file=self.io)
        result_item = CommandResultItem(
            [{
                'name': 'qwerty',
                'id': '0b1f6472qwerty'
            }, {
                'name': 'asdf',
                'id': '0b1f6472asdf'
            }],
            simple_output_query='[*].{Name:name, Id:id}')
        output_producer.out(result_item)
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines(""" Name  |       Id      
-------|---------------
qwerty | 0b1f6472qwerty
asdf   | 0b1f6472asdf  
"""))

    def test_out_table_no_query(self):
        output_producer = OutputProducer(formatter=format_table, file=self.io)
        with self.assertRaises(util.CLIError):
            output_producer.out(
                CommandResultItem({
                    'active': True,
                    'id': '0b1f6472'
                }))

    def test_out_table_valid_query2(self):
        output_producer = OutputProducer(formatter=format_table, file=self.io)
        result_item = CommandResultItem([{
            'name': 'qwerty',
            'id': '0b1f6472qwerty'
        }, {
            'name': 'asdf',
            'id': '0b1f6472asdf'
        }],
                                        simple_output_query='[*].{Name:name}')
        output_producer.out(result_item)
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines(""" Name 
------
qwerty
asdf  
"""))

    def test_out_table_bad_query(self):
        output_producer = OutputProducer(formatter=format_table, file=self.io)
        result_item = CommandResultItem([{
            'name': 'qwerty',
            'id': '0b1f6472qwerty'
        }, {
            'name': 'asdf',
            'id': '0b1f6472asdf'
        }],
                                        simple_output_query='[*].{Name:name')
        with self.assertRaises(util.CLIError):
            output_producer.out(result_item)

    def test_out_table_complex_obj(self):
        output_producer = OutputProducer(formatter=format_table, file=self.io)
        result_item = CommandResultItem([{
            'name': 'qwerty',
            'id': '0b1f6472qwerty',
            'sub': {'1'}
        }])
        with self.assertRaises(util.CLIError):
            output_producer.out(result_item)

    def test_out_table_complex_obj_with_query_ok(self):
        output_producer = OutputProducer(formatter=format_table, file=self.io)
        result_item = CommandResultItem([{
            'name': 'qwerty',
            'id': '0b1f6472qwerty',
            'sub': {'1'}
        }],
                                        simple_output_query='[*].{Name:name}')
        output_producer.out(result_item)
        self.assertEqual(util.normalize_newlines(self.io.getvalue()),
                         util.normalize_newlines(""" Name 
------
qwerty
"""))

    def test_out_table_complex_obj_with_query_still_complex(self):
        output_producer = OutputProducer(formatter=format_table, file=self.io)
        result_item = CommandResultItem(
            [{
                'name': 'qwerty',
                'id': '0b1f6472qwerty',
                'sub': {'1'}
            }],
            simple_output_query='[*].{Name:name, Sub:sub}')
        with self.assertRaises(util.CLIError):
            output_producer.out(result_item)

    # LIST output tests

    def test_out_list_valid(self):
        output_producer = OutputProducer(formatter=format_list, file=self.io)
        output_producer.out(
            CommandResultItem({
                'active': True,
                'id': '0b1f6472'
            }))
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines("""Active : True
Id     : 0b1f6472


"""))

    def test_out_list_valid_none_val(self):
        output_producer = OutputProducer(formatter=format_list, file=self.io)
        output_producer.out(
            CommandResultItem({
                'active': None,
                'id': '0b1f6472'
            }))
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines("""Active : None
Id     : 0b1f6472


"""))

    def test_out_list_valid_empty_array(self):
        output_producer = OutputProducer(formatter=format_list, file=self.io)
        output_producer.out(
            CommandResultItem({
                'active': None,
                'id': '0b1f6472',
                'hosts': []
            }))
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines("""Active : None
Id     : 0b1f6472
Hosts  :
   None


"""))

    def test_out_list_valid_array_complex(self):
        output_producer = OutputProducer(formatter=format_list, file=self.io)
        output_producer.out(
            CommandResultItem([{
                'active': True,
                'id': '783yesdf'
            }, {
                'active': False,
                'id': '3hjnme32'
            }, {
                'active': False,
                'id': '23hiujbs'
            }]))
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines("""Active : True
Id     : 783yesdf

Active : False
Id     : 3hjnme32

Active : False
Id     : 23hiujbs


"""))

    def test_out_list_valid_str_array(self):
        output_producer = OutputProducer(formatter=format_list, file=self.io)
        output_producer.out(
            CommandResultItem(['location', 'id', 'host', 'server']))
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines("""location

id

host

server


"""))

    def test_out_list_valid_complex_array(self):
        output_producer = OutputProducer(formatter=format_list, file=self.io)
        output_producer.out(
            CommandResultItem({
                'active': True,
                'id': '0b1f6472',
                'myarray': ['1', '2', '3', '4']
            }))
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines("""Active  : True
Id      : 0b1f6472
Myarray :
   1
   2
   3
   4


"""))

    def test_out_list_format_key_simple(self):
        lo = ListOutput()
        self.assertEqual(lo._formatted_keys_cache, {})
        lo._get_formatted_key('locationId')
        self.assertEqual(lo._formatted_keys_cache,
                         {'locationId': 'Location Id'})

    def test_out_list_format_key_single(self):
        lo = ListOutput()
        self.assertEqual(lo._formatted_keys_cache, {})
        lo._get_formatted_key('location')
        self.assertEqual(lo._formatted_keys_cache, {'location': 'Location'})

    def test_out_list_format_key_multiple_caps(self):
        lo = ListOutput()
        self.assertEqual(lo._formatted_keys_cache, {})
        lo._get_formatted_key('fooIDS')
        self.assertEqual(lo._formatted_keys_cache, {'fooIDS': 'Foo I D S'})

    def test_out_list_format_key_multiple_words(self):
        lo = ListOutput()
        self.assertEqual(lo._formatted_keys_cache, {})
        lo._get_formatted_key('locationIdState')
        self.assertEqual(lo._formatted_keys_cache,
                         {'locationIdState': 'Location Id State'})

    # TSV output tests
    def test_output_format_dict(self):
        obj = {}
        obj['A'] = 1
        obj['B'] = 2
        result = format_tsv(CommandResultItem(obj))
        self.assertEqual(result, '1\t2\n')

    def test_output_format_dict_sort(self):
        obj = {}
        obj['B'] = 1
        obj['A'] = 2
        result = format_tsv(CommandResultItem(obj))
        self.assertEqual(result, '2\t1\n')

    def test_output_format_ordereddict_not_sorted(self):
        from collections import OrderedDict
        obj = OrderedDict()
        obj['B'] = 1
        obj['A'] = 2
        result = format_tsv(CommandResultItem(obj))
        self.assertEqual(result, '1\t2\n')

    def test_output_format_ordereddict_list_not_sorted(self):
        from collections import OrderedDict
        obj1 = OrderedDict()
        obj1['B'] = 1
        obj1['A'] = 2

        obj2 = OrderedDict()
        obj2['A'] = 3
        obj2['B'] = 4
        result = format_tsv(CommandResultItem([obj1, obj2]))
        self.assertEqual(result, '1\t2\n3\t4\n')
Пример #34
0
def _mol_writer(data,
                fmt='sdf',
                filepath_or_buffer=None,
                update_properties=True,
                molecule_column=None,
                columns=None):
    """Universal writing function for private use.

    .. versionadded:: 0.3

    Parameters
    ----------
        fmt : string
            The format of molecular file

        filepath_or_buffer : string or None
            File path

        update_properties : bool, optional (default=True)
            Switch to update properties from the DataFrames to the molecules
            while writting.

        molecule_column : string or None, optional (default='mol')
            Name of molecule column. If None the molecules will be skipped.

        columns : list or None, optional (default=None)
            A list of columns to write to file. If None then all available
            fields are written.

    """
    if filepath_or_buffer is None:
        out = StringIO()
    elif hasattr(filepath_or_buffer, 'write'):
        out = filepath_or_buffer
    else:
        out = oddt.toolkit.Outputfile(fmt, filepath_or_buffer, overwrite=True)
    if isinstance(data, pd.DataFrame):
        molecule_column = molecule_column or data._molecule_column
        for ix, row in data.iterrows():
            mol = row[molecule_column].clone
            if update_properties:
                new_data = row.to_dict()
                del new_data[molecule_column]
                mol.data.update(new_data)
            if columns:
                for k in mol.data.keys():
                    if k not in columns:
                        del mol.data[k]
            if filepath_or_buffer is None or hasattr(filepath_or_buffer,
                                                     'write'):
                out.write(mol.write(fmt))
            else:
                out.write(mol)
    elif isinstance(data, pd.Series):
        for mol in data:
            if filepath_or_buffer is None or hasattr(filepath_or_buffer,
                                                     'write'):
                out.write(mol.write(fmt))
            else:
                out.write(mol)
    if filepath_or_buffer is None:
        return out.getvalue()
    elif not hasattr(filepath_or_buffer, 'write'):  # dont close foreign buffer
        out.close()
Пример #35
0
    def test_get_arg_vals_for_call_stack(self):
        """Exercise SBFrame.GetVariables() API to get argument vals."""
        self.build()
        exe = os.path.join(os.getcwd(), "a.out")

        # Create a target by the debugger.
        target = self.dbg.CreateTarget(exe)
        self.assertTrue(target, VALID_TARGET)

        # Now create a breakpoint on main.c by name 'c'.
        breakpoint = target.BreakpointCreateByName('c', 'a.out')
        #print("breakpoint:", breakpoint)
        self.assertTrue(breakpoint and breakpoint.GetNumLocations() == 1,
                        VALID_BREAKPOINT)

        # Now launch the process, and do not stop at the entry point.
        process = target.LaunchSimple(None, None,
                                      self.get_process_working_directory())

        process = target.GetProcess()
        self.assertTrue(process.GetState() == lldb.eStateStopped,
                        PROCESS_STOPPED)

        # Keeps track of the number of times 'a' is called where it is within a
        # depth of 3 of the 'c' leaf function.
        callsOfA = 0

        from six import StringIO as SixStringIO
        session = SixStringIO()
        while process.GetState() == lldb.eStateStopped:
            thread = lldbutil.get_stopped_thread(process,
                                                 lldb.eStopReasonBreakpoint)
            self.assertIsNotNone(thread)
            # Inspect at most 3 frames.
            numFrames = min(3, thread.GetNumFrames())
            for i in range(numFrames):
                frame = thread.GetFrameAtIndex(i)
                if self.TraceOn():
                    print("frame:", frame)

                name = frame.GetFunction().GetName()
                if name == 'a':
                    callsOfA = callsOfA + 1

                # We'll inspect only the arguments for the current frame:
                #
                # arguments     => True
                # locals        => False
                # statics       => False
                # in_scope_only => True
                valList = frame.GetVariables(True, False, False, True)
                argList = []
                for val in valList:
                    argList.append(
                        "(%s)%s=%s" %
                        (val.GetTypeName(), val.GetName(), val.GetValue()))
                print("%s(%s)" % (name, ", ".join(argList)), file=session)

                # Also check the generic pc & stack pointer.  We can't test their absolute values,
                # but they should be valid.  Uses get_GPRs() from the lldbutil
                # module.
                gpr_reg_set = lldbutil.get_GPRs(frame)
                pc_value = gpr_reg_set.GetChildMemberWithName("pc")
                self.assertTrue(pc_value, "We should have a valid PC.")
                pc_value_int = int(pc_value.GetValue(), 0)
                # Make sure on arm targets we dont mismatch PC value on the basis of thumb bit.
                # Frame PC will not have thumb bit set in case of a thumb
                # instruction as PC.
                if self.getArchitecture() in ['arm']:
                    pc_value_int &= ~1
                self.assertTrue(
                    pc_value_int == frame.GetPC(),
                    "PC gotten as a value should equal frame's GetPC")
                sp_value = gpr_reg_set.GetChildMemberWithName("sp")
                self.assertTrue(sp_value,
                                "We should have a valid Stack Pointer.")
                self.assertTrue(
                    int(sp_value.GetValue(), 0) == frame.GetSP(),
                    "SP gotten as a value should equal frame's GetSP")

            print("---", file=session)
            process.Continue()

        # At this point, the inferior process should have exited.
        self.assertTrue(process.GetState() == lldb.eStateExited,
                        PROCESS_EXITED)

        # Expect to find 'a' on the call stacks two times.
        self.assertTrue(callsOfA == 2,
                        "Expect to find 'a' on the call stacks two times")
        # By design, the 'a' call frame has the following arg vals:
        #     o a((int)val=1, (char)ch='A')
        #     o a((int)val=3, (char)ch='A')
        if self.TraceOn():
            print("Full stack traces when stopped on the breakpoint 'c':")
            print(session.getvalue())
        self.expect(session.getvalue(),
                    "Argugment values displayed correctly",
                    exe=False,
                    substrs=[
                        "a((int)val=1, (char)ch='A')",
                        "a((int)val=3, (char)ch='A')"
                    ])
Пример #36
0
            def build_example(label, param_dict_real):
                """Build the model with parameter values set in param_dict_real.

        Args:
          label: Label of the model (i.e. the filename in the zip).
          param_dict_real: Parameter dictionary (arguments to the factories
            make_graph and make_test_inputs)

        Returns:
          (tflite_model_binary, report) where tflite_model_binary is the
          serialized flatbuffer as a string and report is a dictionary with
          keys `toco_log` (log of toco conversion), `tf_log` (log of tf
          conversion), `toco` (a string of success status of the conversion),
          `tf` (a string success status of the conversion).
        """

                np.random.seed(RANDOM_SEED)
                report = {"toco": report_lib.NOTRUN, "tf": report_lib.FAILED}

                # Build graph
                report["tf_log"] = ""
                report["toco_log"] = ""
                tf.compat.v1.reset_default_graph()

                with tf.device("/cpu:0"):
                    try:
                        inputs, outputs = make_graph(param_dict_real)
                    except (tf.errors.UnimplementedError,
                            tf.errors.InvalidArgumentError, ValueError):
                        report["tf_log"] += traceback.format_exc()
                        return None, report

                sess = tf.compat.v1.Session()
                try:
                    baseline_inputs, baseline_outputs = (make_test_inputs(
                        param_dict_real, sess, inputs, outputs))
                except (tf.errors.UnimplementedError,
                        tf.errors.InvalidArgumentError, ValueError):
                    report["tf_log"] += traceback.format_exc()
                    return None, report
                report["toco"] = report_lib.FAILED
                report["tf"] = report_lib.SUCCESS
                # Convert graph to toco
                input_tensors = [(input_tensor.name.split(":")[0],
                                  input_tensor.shape, input_tensor.dtype)
                                 for input_tensor in inputs]
                output_tensors = [
                    _normalize_output_name(out.name) for out in outputs
                ]
                # pylint: disable=g-long-ternary
                graph_def = freeze_graph(
                    sess,
                    tf.global_variables() + inputs +
                    outputs) if use_frozen_graph else sess.graph_def

                if "split_tflite_lstm_inputs" in param_dict_real:
                    extra_toco_options.split_tflite_lstm_inputs = param_dict_real[
                        "split_tflite_lstm_inputs"]
                tflite_model_binary, toco_log = options.tflite_convert_function(
                    options,
                    graph_def,
                    input_tensors,
                    output_tensors,
                    extra_toco_options=extra_toco_options,
                    test_params=param_dict_real)
                report["toco"] = (report_lib.SUCCESS if tflite_model_binary
                                  is not None else report_lib.FAILED)
                report["toco_log"] = toco_log

                if options.save_graphdefs:
                    archive.writestr(label + ".pbtxt",
                                     text_format.MessageToString(graph_def),
                                     zipfile.ZIP_DEFLATED)

                if tflite_model_binary:
                    if options.make_edgetpu_tests:
                        # Set proper min max values according to input dtype.
                        baseline_inputs, baseline_outputs = generate_inputs_outputs(
                            tflite_model_binary, min_value=0, max_value=255)
                    archive.writestr(label + ".bin", tflite_model_binary,
                                     zipfile.ZIP_DEFLATED)
                    example = {
                        "inputs": baseline_inputs,
                        "outputs": baseline_outputs
                    }

                    example_fp = StringIO()
                    write_examples(example_fp, [example])
                    archive.writestr(label + ".inputs", example_fp.getvalue(),
                                     zipfile.ZIP_DEFLATED)

                    example_fp2 = StringIO()
                    write_test_cases(example_fp2, label + ".bin", [example])
                    archive.writestr(label + "_tests.txt",
                                     example_fp2.getvalue(),
                                     zipfile.ZIP_DEFLATED)

                    zip_manifest.append(label + "\n")

                return tflite_model_binary, report
Пример #37
0
def make_zip_of_tests(options,
                      test_parameters,
                      make_graph,
                      make_test_inputs,
                      extra_toco_options=ExtraTocoOptions(),
                      use_frozen_graph=False,
                      expected_tf_failures=0):
    """Helper to make a zip file of a bunch of TensorFlow models.

  This does a cartestian product of the dictionary of test_parameters and
  calls make_graph() for each item in the cartestian product set.
  If the graph is built successfully, then make_test_inputs() is called to
  build expected input/output value pairs. The model is then converted to tflite
  with toco, and the examples are serialized with the tflite model into a zip
  file (2 files per item in the cartesian product set).

  Args:
    options: An Options instance.
    test_parameters: Dictionary mapping to lists for each parameter.
      e.g. `{"strides": [[1,3,3,1], [1,2,2,1]], "foo": [1.2, 1.3]}`
    make_graph: function that takes current parameters and returns tuple
      `[input1, input2, ...], [output1, output2, ...]`
    make_test_inputs: function taking `curr_params`, `session`, `input_tensors`,
      `output_tensors` and returns tuple `(input_values, output_values)`.
    extra_toco_options: Additional toco options.
    use_frozen_graph: Whether or not freeze graph before toco converter.
    expected_tf_failures: Number of times tensorflow is expected to fail in
      executing the input graphs. In some cases it is OK for TensorFlow to fail
      because the one or more combination of parameters is invalid.

  Raises:
    RuntimeError: if there are converter errors that can't be ignored.
  """
    zip_path = os.path.join(options.output_path, options.zip_to_output)
    parameter_count = 0
    for parameters in test_parameters:
        parameter_count += functools.reduce(
            operator.mul, [len(values) for values in parameters.values()])

    all_parameter_count = parameter_count
    if options.multi_gen_state:
        all_parameter_count += options.multi_gen_state.parameter_count
    if not options.no_tests_limit and all_parameter_count > _MAX_TESTS_PER_ZIP:
        raise RuntimeError(
            "Too many parameter combinations for generating '%s'.\n"
            "There are at least %d combinations while the upper limit is %d.\n"
            "Having too many combinations will slow down the tests.\n"
            "Please consider splitting the test into multiple functions.\n" %
            (zip_path, all_parameter_count, _MAX_TESTS_PER_ZIP))
    if options.multi_gen_state:
        options.multi_gen_state.parameter_count = all_parameter_count

    # TODO(aselle): Make this allow multiple inputs outputs.
    if options.multi_gen_state:
        archive = options.multi_gen_state.archive
    else:
        archive = zipfile.PyZipFile(zip_path, "w")
    zip_manifest = []
    convert_report = []
    toco_errors = 0

    processed_labels = set()

    if options.make_edgetpu_tests:
        extra_toco_options.inference_input_type = tf.uint8
        extra_toco_options.inference_output_type = tf.uint8
        # Only count parameters when fully_quantize is True.
        parameter_count = 0
        for parameters in test_parameters:
            if True in parameters.get("fully_quantize", []):
                parameter_count += functools.reduce(operator.mul, [
                    len(values) for key, values in parameters.items()
                    if key != "fully_quantize"
                ])

    label_base_path = zip_path
    if options.multi_gen_state:
        label_base_path = options.multi_gen_state.label_base_path

    for parameters in test_parameters:
        keys = parameters.keys()
        for curr in itertools.product(*parameters.values()):
            label = label_base_path.replace(".zip", "_") + (",".join(
                "%s=%r" % z for z in sorted(zip(keys, curr))).replace(" ", ""))
            if label[0] == "/":
                label = label[1:]
            if label in processed_labels:
                # Do not populate data for the same label more than once. It will cause
                # errors when unzipping.
                continue
            processed_labels.add(label)

            param_dict = dict(zip(keys, curr))

            if options.make_edgetpu_tests and not param_dict.get(
                    "fully_quantize", False):
                continue

            def generate_inputs_outputs(tflite_model_binary,
                                        min_value=0,
                                        max_value=255):
                """Generate input values and output values of the given tflite model.

        Args:
          tflite_model_binary: A serialized flatbuffer as a string.
          min_value: min value for the input tensor.
          max_value: max value for the input tensor.

        Returns:
          (input_values, output_values): input values and output values built.
        """
                interpreter = tf.lite.Interpreter(
                    model_content=tflite_model_binary)
                interpreter.allocate_tensors()

                input_details = interpreter.get_input_details()
                input_values = []
                for input_detail in input_details:
                    input_value = create_tensor_data(input_detail["dtype"],
                                                     input_detail["shape"],
                                                     min_value=min_value,
                                                     max_value=max_value)
                    interpreter.set_tensor(input_detail["index"], input_value)
                    input_values.append(input_value)

                interpreter.invoke()

                output_details = interpreter.get_output_details()
                output_values = []
                for output_detail in output_details:
                    output_values.append(
                        interpreter.get_tensor(output_detail["index"]))

                return input_values, output_values

            def build_example(label, param_dict_real):
                """Build the model with parameter values set in param_dict_real.

        Args:
          label: Label of the model (i.e. the filename in the zip).
          param_dict_real: Parameter dictionary (arguments to the factories
            make_graph and make_test_inputs)

        Returns:
          (tflite_model_binary, report) where tflite_model_binary is the
          serialized flatbuffer as a string and report is a dictionary with
          keys `toco_log` (log of toco conversion), `tf_log` (log of tf
          conversion), `toco` (a string of success status of the conversion),
          `tf` (a string success status of the conversion).
        """

                np.random.seed(RANDOM_SEED)
                report = {"toco": report_lib.NOTRUN, "tf": report_lib.FAILED}

                # Build graph
                report["tf_log"] = ""
                report["toco_log"] = ""
                tf.compat.v1.reset_default_graph()

                with tf.device("/cpu:0"):
                    try:
                        inputs, outputs = make_graph(param_dict_real)
                    except (tf.errors.UnimplementedError,
                            tf.errors.InvalidArgumentError, ValueError):
                        report["tf_log"] += traceback.format_exc()
                        return None, report

                sess = tf.compat.v1.Session()
                try:
                    baseline_inputs, baseline_outputs = (make_test_inputs(
                        param_dict_real, sess, inputs, outputs))
                except (tf.errors.UnimplementedError,
                        tf.errors.InvalidArgumentError, ValueError):
                    report["tf_log"] += traceback.format_exc()
                    return None, report
                report["toco"] = report_lib.FAILED
                report["tf"] = report_lib.SUCCESS
                # Convert graph to toco
                input_tensors = [(input_tensor.name.split(":")[0],
                                  input_tensor.shape, input_tensor.dtype)
                                 for input_tensor in inputs]
                output_tensors = [
                    _normalize_output_name(out.name) for out in outputs
                ]
                # pylint: disable=g-long-ternary
                graph_def = freeze_graph(
                    sess,
                    tf.global_variables() + inputs +
                    outputs) if use_frozen_graph else sess.graph_def

                if "split_tflite_lstm_inputs" in param_dict_real:
                    extra_toco_options.split_tflite_lstm_inputs = param_dict_real[
                        "split_tflite_lstm_inputs"]
                tflite_model_binary, toco_log = options.tflite_convert_function(
                    options,
                    graph_def,
                    input_tensors,
                    output_tensors,
                    extra_toco_options=extra_toco_options,
                    test_params=param_dict_real)
                report["toco"] = (report_lib.SUCCESS if tflite_model_binary
                                  is not None else report_lib.FAILED)
                report["toco_log"] = toco_log

                if options.save_graphdefs:
                    archive.writestr(label + ".pbtxt",
                                     text_format.MessageToString(graph_def),
                                     zipfile.ZIP_DEFLATED)

                if tflite_model_binary:
                    if options.make_edgetpu_tests:
                        # Set proper min max values according to input dtype.
                        baseline_inputs, baseline_outputs = generate_inputs_outputs(
                            tflite_model_binary, min_value=0, max_value=255)
                    archive.writestr(label + ".bin", tflite_model_binary,
                                     zipfile.ZIP_DEFLATED)
                    example = {
                        "inputs": baseline_inputs,
                        "outputs": baseline_outputs
                    }

                    example_fp = StringIO()
                    write_examples(example_fp, [example])
                    archive.writestr(label + ".inputs", example_fp.getvalue(),
                                     zipfile.ZIP_DEFLATED)

                    example_fp2 = StringIO()
                    write_test_cases(example_fp2, label + ".bin", [example])
                    archive.writestr(label + "_tests.txt",
                                     example_fp2.getvalue(),
                                     zipfile.ZIP_DEFLATED)

                    zip_manifest.append(label + "\n")

                return tflite_model_binary, report

            _, report = build_example(label, param_dict)

            if report["toco"] == report_lib.FAILED:
                ignore_error = False
                if not options.known_bugs_are_errors:
                    for pattern, bug_number in options.known_bugs.items():
                        if re.search(pattern, label):
                            print("Ignored converter error due to bug %s" %
                                  bug_number)
                            ignore_error = True
                if not ignore_error:
                    toco_errors += 1
                    print(
                        "-----------------\nconverter error!\n%s\n-----------------\n"
                        % report["toco_log"])

            convert_report.append((param_dict, report))

    if not options.no_conversion_report:
        report_io = StringIO()
        report_lib.make_report_table(report_io, zip_path, convert_report)
        if options.multi_gen_state:
            archive.writestr(
                "report_" + options.multi_gen_state.test_name + ".html",
                report_io.getvalue())
        else:
            archive.writestr("report.html", report_io.getvalue())

    if options.multi_gen_state:
        options.multi_gen_state.zip_manifest.extend(zip_manifest)
    else:
        archive.writestr("manifest.txt", "".join(zip_manifest),
                         zipfile.ZIP_DEFLATED)

    # Log statistics of what succeeded
    total_conversions = len(convert_report)
    tf_success = sum(1 for x in convert_report
                     if x[1]["tf"] == report_lib.SUCCESS)
    toco_success = sum(1 for x in convert_report
                       if x[1]["toco"] == report_lib.SUCCESS)
    percent = 0
    if tf_success > 0:
        percent = float(toco_success) / float(tf_success) * 100.
    tf.compat.v1.logging.info(
        ("Archive %s Considered %d graphs, %d TF evaluated graphs "
         " and %d TOCO converted graphs (%.1f%%"), zip_path, total_conversions,
        tf_success, toco_success, percent)

    tf_failures = parameter_count - tf_success

    if tf_failures / parameter_count > 0.8:
        raise RuntimeError(
            ("Test for '%s' is not very useful. "
             "TensorFlow fails in %d percent of the cases.") %
            (zip_path, int(100 * tf_failures / parameter_count)))

    if not options.make_edgetpu_tests and tf_failures != expected_tf_failures:
        raise RuntimeError(
            ("Expected TF to fail %d times while generating '%s', "
             "but that happened %d times") %
            (expected_tf_failures, zip_path, tf_failures))

    if not options.ignore_converter_errors and toco_errors > 0:
        raise RuntimeError("Found %d errors while generating toco models" %
                           toco_errors)
Пример #38
0
    def _write_equations_section(self, model, output_file, all_blocks_list,
                                 active_components_data_var, symbol_map,
                                 c_labeler, output_fixed_variable_bounds,
                                 skip_trivial_constraints, sorter):

        referenced_variable_ids = set()

        def _skip_trivial(constraint_data):
            if skip_trivial_constraints:
                if constraint_data._linear_canonical_form:
                    repn = constraint_data.canonical_form()
                    if (repn.variables is None) or \
                       (len(repn.variables) == 0):
                        return True
                elif constraint_data.body.polynomial_degree() == 0:
                    return True
            return False

        #
        # Check for active suffixes to export
        #
        if isinstance(model, IBlock):
            suffix_gen = lambda b: ((suf.storage_key, suf) \
                                    for suf in pyomo.core.kernel.suffix.\
                                    export_suffix_generator(b,
                                                            active=True,
                                                            descend_into=False))
        else:
            suffix_gen = lambda b: pyomo.core.base.suffix.\
                         active_export_suffix_generator(b)
        r_o_eqns = []
        c_eqns = []
        l_eqns = []
        branching_priorities_suffixes = []
        for block in all_blocks_list:
            for name, suffix in suffix_gen(block):
                if name == 'branching_priorities':
                    branching_priorities_suffixes.append(suffix)
                elif name == 'constraint_types':
                    for constraint_data, constraint_type in iteritems(suffix):
                        if not _skip_trivial(constraint_data):
                            if constraint_type.lower() == 'relaxationonly':
                                r_o_eqns.append(constraint_data)
                            elif constraint_type.lower() == 'convex':
                                c_eqns.append(constraint_data)
                            elif constraint_type.lower() == 'local':
                                l_eqns.append(constraint_data)
                            else:
                                raise ValueError(
                                    "A suffix '%s' contained an invalid value: %s\n"
                                    "Choices are: [relaxationonly, convex, local]"
                                    % (suffix.name, constraint_type))
                else:
                    raise ValueError(
                        "The BARON writer can not export suffix with name '%s'. "
                        "Either remove it from block '%s' or deactivate it." %
                        (block.name, name))

        non_standard_eqns = r_o_eqns + c_eqns + l_eqns

        #
        # EQUATIONS
        #

        #Equation Declaration
        n_roeqns = len(r_o_eqns)
        n_ceqns = len(c_eqns)
        n_leqns = len(l_eqns)
        eqns = []

        # Alias the constraints by declaration order since Baron does not
        # include the constraint names in the solution file. It is important
        # that this alias not clash with any real constraint labels, hence
        # the use of the ".c<integer>" template. It is not possible to declare
        # a component having this type of name when using standard syntax.
        # There are ways to do it, but it is unlikely someone will.
        order_counter = 0
        alias_template = ".c%d"
        output_file.write('EQUATIONS ')
        output_file.write("c_e_FIX_ONE_VAR_CONST__")
        order_counter += 1
        for block in all_blocks_list:

            for constraint_data in block.component_data_objects(
                    Constraint, active=True, sort=sorter, descend_into=False):

                if (not constraint_data.has_lb()) and \
                   (not constraint_data.has_ub()):
                    assert not constraint_data.equality
                    continue  # non-binding, so skip

                if (not _skip_trivial(constraint_data)) and \
                   (constraint_data not in non_standard_eqns):

                    eqns.append(constraint_data)

                    con_symbol = symbol_map.createSymbol(
                        constraint_data, c_labeler)
                    assert not con_symbol.startswith('.')
                    assert con_symbol != "c_e_FIX_ONE_VAR_CONST__"

                    symbol_map.alias(constraint_data,
                                     alias_template % order_counter)
                    output_file.write(", " + str(con_symbol))
                    order_counter += 1

        output_file.write(";\n\n")

        if n_roeqns > 0:
            output_file.write('RELAXATION_ONLY_EQUATIONS ')
            for i, constraint_data in enumerate(r_o_eqns):
                con_symbol = symbol_map.createSymbol(constraint_data,
                                                     c_labeler)
                assert not con_symbol.startswith('.')
                assert con_symbol != "c_e_FIX_ONE_VAR_CONST__"
                symbol_map.alias(constraint_data,
                                 alias_template % order_counter)
                if i == n_roeqns - 1:
                    output_file.write(str(con_symbol) + ';\n\n')
                else:
                    output_file.write(str(con_symbol) + ', ')
                order_counter += 1

        if n_ceqns > 0:
            output_file.write('CONVEX_EQUATIONS ')
            for i, constraint_data in enumerate(c_eqns):
                con_symbol = symbol_map.createSymbol(constraint_data,
                                                     c_labeler)
                assert not con_symbol.startswith('.')
                assert con_symbol != "c_e_FIX_ONE_VAR_CONST__"
                symbol_map.alias(constraint_data,
                                 alias_template % order_counter)
                if i == n_ceqns - 1:
                    output_file.write(str(con_symbol) + ';\n\n')
                else:
                    output_file.write(str(con_symbol) + ', ')
                order_counter += 1

        if n_leqns > 0:
            output_file.write('LOCAL_EQUATIONS ')
            for i, constraint_data in enumerate(l_eqns):
                con_symbol = symbol_map.createSymbol(constraint_data,
                                                     c_labeler)
                assert not con_symbol.startswith('.')
                assert con_symbol != "c_e_FIX_ONE_VAR_CONST__"
                symbol_map.alias(constraint_data,
                                 alias_template % order_counter)
                if i == n_leqns - 1:
                    output_file.write(str(con_symbol) + ';\n\n')
                else:
                    output_file.write(str(con_symbol) + ', ')
                order_counter += 1

        # Create a dictionary of baron variable names to match to the
        # strings that constraint.to_string() prints. An important
        # note is that the variable strings are padded by spaces so
        # that whole variable names are recognized, and simple
        # variable names are not identified inside longer names.
        # Example: ' x[1] ' -> ' x3 '
        #FIXME: 7/18/14 CLH: This may cause mistakes if spaces in
        #                    variable names are allowed
        if isinstance(model, IBlock):
            mutable_param_gen = lambda b: \
                                b.components(ctype=Param,
                                             descend_into=False)
        else:

            def mutable_param_gen(b):
                for param in block.component_objects(Param):
                    if param._mutable and param.is_indexed():
                        param_data_iter = \
                            (param_data for index, param_data
                             in iteritems(param))
                    elif not param.is_indexed():
                        param_data_iter = iter([param])
                    else:
                        param_data_iter = iter([])

                    for param_data in param_data_iter:
                        yield param_data

        if False:
            #
            # This was part of a merge from master that caused
            # test failures.  But commenting this out didn't cause additional failures!?!
            #
            vstring_to_var_dict = {}
            vstring_to_bar_dict = {}
            pstring_to_bar_dict = {}
            _val_template = ' %' + self._precision_string + ' '
            for block in all_blocks_list:
                for var_data in active_components_data_var[id(block)]:
                    variable_stream = StringIO()
                    var_data.to_string(ostream=variable_stream, verbose=False)
                    variable_string = variable_stream.getvalue()
                    variable_string = ' ' + variable_string + ' '
                    vstring_to_var_dict[variable_string] = var_data
                    if output_fixed_variable_bounds or (not var_data.fixed):
                        vstring_to_bar_dict[variable_string] = \
                            ' '+object_symbol_dictionary[id(var_data)]+' '
                    else:
                        assert var_data.value is not None
                        vstring_to_bar_dict[variable_string] = \
                            (_val_template % (var_data.value,))

                for param_data in mutable_param_gen(block):
                    param_stream = StringIO()
                    param_data.to_string(ostream=param_stream, verbose=False)
                    param_string = param_stream.getvalue()

                    param_string = ' ' + param_string + ' '
                    pstring_to_bar_dict[param_string] = \
                        (_val_template % (param_data(),))

        # Equation Definition
        string_template = '%' + self._precision_string
        output_file.write('c_e_FIX_ONE_VAR_CONST__:  ONE_VAR_CONST__  == 1;\n')
        for constraint_data in itertools.chain(eqns, r_o_eqns, c_eqns, l_eqns):

            variables = set()
            #print(symbol_map.byObject.keys())
            eqn_body = expression_to_string(constraint_data.body,
                                            variables,
                                            smap=symbol_map)
            #print(symbol_map.byObject.keys())
            referenced_variable_ids.update(variables)

            if len(variables) == 0:
                assert not skip_trivial_constraints
                eqn_body += " + 0 * ONE_VAR_CONST__ "

            # 7/29/14 CLH:
            #FIXME: Baron doesn't handle many of the
            #       intrinsic_functions available in pyomo. The
            #       error message given by baron is also very
            #       weak.  Either a function here to re-write
            #       unallowed expressions or a way to track solver
            #       capability by intrinsic_expression would be
            #       useful.
            ##########################

            con_symbol = symbol_map.byObject[id(constraint_data)]
            output_file.write(str(con_symbol) + ': ')

            # Fill in the left and right hand side (constants) of
            #  the equations

            # Equality constraint
            if constraint_data.equality:
                eqn_lhs = ''
                eqn_rhs = ' == ' + \
                          str(string_template
                              % self._get_bound(constraint_data.upper))

            # Greater than constraint
            elif not constraint_data.has_ub():
                eqn_rhs = ' >= ' + \
                          str(string_template
                              % self._get_bound(constraint_data.lower))
                eqn_lhs = ''

            # Less than constraint
            elif not constraint_data.has_lb():
                eqn_rhs = ' <= ' + \
                          str(string_template
                              % self._get_bound(constraint_data.upper))
                eqn_lhs = ''

            # Double-sided constraint
            elif constraint_data.has_lb() and \
                 constraint_data.has_ub():
                eqn_lhs = str(string_template
                              % self._get_bound(constraint_data.lower)) + \
                          ' <= '
                eqn_rhs = ' <= ' + \
                          str(string_template
                              % self._get_bound(constraint_data.upper))

            eqn_string = eqn_lhs + eqn_body + eqn_rhs + ';\n'
            output_file.write(eqn_string)

        #
        # OBJECTIVE
        #

        output_file.write("\nOBJ: ")

        n_objs = 0
        for block in all_blocks_list:

            for objective_data in block.component_data_objects(
                    Objective, active=True, sort=sorter, descend_into=False):

                n_objs += 1
                if n_objs > 1:
                    raise ValueError(
                        "The BARON writer has detected multiple active "
                        "objective functions on model %s, but "
                        "currently only handles a single objective." %
                        (model.name))

                # create symbol
                symbol_map.createSymbol(objective_data, c_labeler)
                symbol_map.alias(objective_data, "__default_objective__")

                if objective_data.is_minimizing():
                    output_file.write("minimize ")
                else:
                    output_file.write("maximize ")

                variables = set()
                #print(symbol_map.byObject.keys())
                obj_string = expression_to_string(objective_data.expr,
                                                  variables,
                                                  smap=symbol_map)
                #print(symbol_map.byObject.keys())
                referenced_variable_ids.update(variables)

        output_file.write(obj_string + ";\n\n")
        #referenced_variable_ids.update(symbol_map.byObject.keys())

        return referenced_variable_ids, branching_priorities_suffixes
    def test_merge_model_instances(self, keep_first_or_last_instance, get_field_names, get_model_to_deduplicate):
        get_model_to_deduplicate.return_value = Person
        get_field_names.return_value = ["name"]
        keep_first_or_last_instance.return_value = "first"

        name = Name.objects.create(name="Name")
        note = Note.objects.create(note="This is a note.")
        personality_1 = Personality.objects.create(description="Child 1's personality.")
        personality_2 = Personality.objects.create(description="Child 2's personality.")
        child_1 = Person.objects.create(
            name=Name.objects.create(name="Child1"),
            age=10,
            personality=personality_1,
        )
        child_1.notes.add(note)
        child_2 = Person.objects.create(
            name=Name.objects.create(name="Child2"),
            age=10,
            personality=personality_2,
        )
        child_2.notes.add(note)

        club1 = Club.objects.create(name="Club one")
        club2 = Club.objects.create(name="Club two")
        person_1 = Person.objects.create(
            name=name,
            age=50,
            personality=Personality.objects.create(description="First personality"),
        )
        person_1.children.add(child_1)
        person_1.notes.add(note)
        Permission.objects.create(text="Permission", person=person_1)

        person_2 = Person.objects.create(
            name=name,
            age=50,
            personality=Personality.objects.create(description="Second personality"),
        )
        person_2.children.add(child_2)
        new_note = Note.objects.create(note="This is a new note")
        person_2.notes.add(new_note)
        Membership.objects.create(club=club1, person=person_2)
        Membership.objects.create(club=club1, person=person_2)
        Permission.objects.create(text="Permission", person=person_2)

        person_3 = Person.objects.create(
            name=name,
            age=50,
            personality=Personality.objects.create(description="Third personality"),
        )
        person_3.children.add(child_2)
        person_3.notes.add(new_note)
        Membership.objects.create(club=club2, person=person_3)
        Membership.objects.create(club=club2, person=person_3)
        Permission.objects.create(text="Permission", person=person_3)

        self.assertEqual(Person.objects.count(), 5)
        self.assertEqual(Membership.objects.count(), 4)
        out = StringIO()
        call_command('merge_model_instances', stdout=out)
        self.ouptput = out.getvalue()
        self.assertEqual(Person.objects.count(), 3)
        person = Person.objects.get(name__name="Name")
        self.assertRaises(
            Person.DoesNotExist,
            lambda: Person.objects.get(personality__description="Second personality"),
        )
        self.assertEqual(person.notes.count(), 2)
        self.assertEqual(person.clubs.distinct().count(), 2)
        self.assertEqual(person.permission_set.count(), 3)
        self.assertRaises(
            Personality.DoesNotExist,
            lambda: Personality.objects.get(description="Second personality"),
        )
Пример #40
0
    def __call__(self, model, output_filename, solver_capability, io_options):

        # Make sure not to modify the user's dictionary, they may be
        # reusing it outside of this call
        io_options = dict(io_options)

        # NOTE: io_options is a simple dictionary of keyword-value
        #       pairs specific to this writer.
        symbolic_solver_labels = \
            io_options.pop("symbolic_solver_labels", False)
        labeler = io_options.pop("labeler", None)

        # How much effort do we want to put into ensuring the
        # LP file is written deterministically for a Pyomo model:
        #    0 : None
        #    1 : sort keys of indexed components (default)
        #    2 : sort keys AND sort names (over declaration order)
        file_determinism = io_options.pop("file_determinism", 1)

        sorter = SortComponents.unsorted
        if file_determinism >= 1:
            sorter = sorter | SortComponents.indices
            if file_determinism >= 2:
                sorter = sorter | SortComponents.alphabetical

        output_fixed_variable_bounds = \
            io_options.pop("output_fixed_variable_bounds", False)

        # Skip writing constraints whose body section is fixed (i.e.,
        # no variables)
        skip_trivial_constraints = \
            io_options.pop("skip_trivial_constraints", False)

        # Note: Baron does not allow specification of runtime
        #       option outside of this file, so we add support
        #       for them here
        solver_options = io_options.pop("solver_options", {})

        if len(io_options):
            raise ValueError(
                "ProblemWriter_baron_writer passed unrecognized io_options:\n\t"
                + "\n\t".join("%s = %s" % (k, v)
                              for k, v in iteritems(io_options)))

        if symbolic_solver_labels and (labeler is not None):
            raise ValueError("Baron problem writer: Using both the "
                             "'symbolic_solver_labels' and 'labeler' "
                             "I/O options is forbidden")

        # Make sure there are no strange ActiveComponents. The expression
        # walker will handle strange things in constraints later.
        model_ctypes = model.collect_ctypes(active=True)
        invalids = set()
        for t in (model_ctypes - valid_active_ctypes_minlp):
            if issubclass(t, ActiveComponent):
                invalids.add(t)
        if len(invalids):
            invalids = [t.__name__ for t in invalids]
            raise RuntimeError(
                "Unallowable active component(s) %s.\nThe BARON writer cannot "
                "export models with this component type." %
                ", ".join(invalids))

        if output_filename is None:
            output_filename = model.name + ".bar"

        output_file = open(output_filename, "w")

        # Process the options. Rely on baron to catch
        # and reset bad option values
        output_file.write("OPTIONS {\n")
        summary_found = False
        if len(solver_options):
            for key, val in iteritems(solver_options):
                if (key.lower() == 'summary'):
                    summary_found = True
                if key.endswith("Name"):
                    output_file.write(key + ": \"" + str(val) + "\";\n")
                else:
                    output_file.write(key + ": " + str(val) + ";\n")
        if not summary_found:
            # The 'summary option is defaulted to 0, so that no
            # summary file is generated in the directory where the
            # user calls baron. Check if a user explicitly asked for
            # a summary file.
            output_file.write("Summary: 0;\n")
        output_file.write("}\n\n")

        if symbolic_solver_labels:
            v_labeler = AlphaNumericTextLabeler()
            c_labeler = AlphaNumericTextLabeler()
        elif labeler is None:
            v_labeler = NumericLabeler('x')
            c_labeler = NumericLabeler('c')

        symbol_map = SymbolMap()
        symbol_map.default_labeler = v_labeler
        #sm_bySymbol = symbol_map.bySymbol

        # Cache the list of model blocks so we don't have to call
        # model.block_data_objects() many many times, which is slow
        # for indexed blocks
        all_blocks_list = list(
            model.block_data_objects(active=True,
                                     sort=sorter,
                                     descend_into=True))
        active_components_data_var = {}
        #for block in all_blocks_list:
        #    tmp = active_components_data_var[id(block)] = \
        #          list(obj for obj in block.component_data_objects(Var,
        #                                                           sort=sorter,
        #                                                           descend_into=False))
        #    create_symbols_func(symbol_map, tmp, labeler)

        # GAH: Not sure this is necessary, and also it would break for
        #      non-mutable indexed params so I am commenting out for now.
        #for param_data in active_components_data(block, Param, sort=sorter):
        #instead of checking if param_data._mutable:
        #if not param_data.is_constant():
        #    create_symbol_func(symbol_map, param_data, labeler)

        #symbol_map_variable_ids = set(symbol_map.byObject.keys())
        #object_symbol_dictionary = symbol_map.byObject

        #
        # Go through the objectives and constraints and generate
        # the output so that we can obtain the set of referenced
        # variables.
        #
        equation_section_stream = StringIO()
        referenced_variable_ids, branching_priorities_suffixes = \
            self._write_equations_section(
                model,
                equation_section_stream,
                all_blocks_list,
                active_components_data_var,
                symbol_map,
                c_labeler,
                output_fixed_variable_bounds,
                skip_trivial_constraints,
                sorter)

        #
        # BINARY_VARIABLES, INTEGER_VARIABLES, POSITIVE_VARIABLES, VARIABLES
        #

        BinVars = []
        IntVars = []
        PosVars = []
        Vars = []
        for vid in referenced_variable_ids:
            name = symbol_map.byObject[vid]
            var_data = symbol_map.bySymbol[name]()

            if var_data.is_continuous():
                if var_data.has_lb() and \
                   (self._get_bound(var_data.lb) >= 0):
                    TypeList = PosVars
                else:
                    TypeList = Vars
            elif var_data.is_binary():
                TypeList = BinVars
            elif var_data.is_integer():
                TypeList = IntVars
            else:
                assert False
            TypeList.append(name)

        if len(BinVars) > 0:
            BinVars.sort()
            output_file.write('BINARY_VARIABLES ')
            output_file.write(", ".join(BinVars))
            output_file.write(';\n\n')

        if len(IntVars) > 0:
            IntVars.sort()
            output_file.write('INTEGER_VARIABLES ')
            output_file.write(", ".join(IntVars))
            output_file.write(';\n\n')

        PosVars.append('ONE_VAR_CONST__')
        PosVars.sort()
        output_file.write('POSITIVE_VARIABLES ')
        output_file.write(", ".join(PosVars))
        output_file.write(';\n\n')

        if len(Vars) > 0:
            Vars.sort()
            output_file.write('VARIABLES ')
            output_file.write(", ".join(Vars))
            output_file.write(';\n\n')

        #
        # LOWER_BOUNDS
        #

        lbounds = {}
        for vid in referenced_variable_ids:
            name = symbol_map.byObject[vid]
            var_data = symbol_map.bySymbol[name]()

            if var_data.fixed:
                if output_fixed_variable_bounds:
                    var_data_lb = var_data.value
                else:
                    var_data_lb = None
            else:
                var_data_lb = None
                if var_data.has_lb():
                    var_data_lb = self._get_bound(var_data.lb)

            if var_data_lb is not None:
                name_to_output = symbol_map.getSymbol(var_data)
                lb_string_template = '%s: %' + self._precision_string + ';\n'
                lbounds[name_to_output] = lb_string_template % (name_to_output,
                                                                var_data_lb)

        if len(lbounds) > 0:
            output_file.write("LOWER_BOUNDS{\n")
            output_file.write("".join(lbounds[key]
                                      for key in sorted(lbounds.keys())))
            output_file.write("}\n\n")
        lbounds = None

        #
        # UPPER_BOUNDS
        #

        ubounds = {}
        for vid in referenced_variable_ids:
            name = symbol_map.byObject[vid]
            var_data = symbol_map.bySymbol[name]()

            if var_data.fixed:
                if output_fixed_variable_bounds:
                    var_data_ub = var_data.value
                else:
                    var_data_ub = None
            else:
                var_data_ub = None
                if var_data.has_ub():
                    var_data_ub = self._get_bound(var_data.ub)

            if var_data_ub is not None:
                name_to_output = symbol_map.getSymbol(var_data)
                ub_string_template = '%s: %' + self._precision_string + ';\n'
                ubounds[name_to_output] = ub_string_template % (name_to_output,
                                                                var_data_ub)

        if len(ubounds) > 0:
            output_file.write("UPPER_BOUNDS{\n")
            output_file.write("".join(ubounds[key]
                                      for key in sorted(ubounds.keys())))
            output_file.write("}\n\n")
        ubounds = None

        #
        # BRANCHING_PRIORITIES
        #

        # Specifying priorities requires that the pyomo model has established an
        # EXTERNAL, float suffix called 'branching_priorities' on the model
        # object, indexed by the relevant variable
        BranchingPriorityHeader = False
        for suffix in branching_priorities_suffixes:
            for var_data, priority in iteritems(suffix):
                if id(var_data) not in referenced_variable_ids:
                    continue
                if priority is not None:
                    if not BranchingPriorityHeader:
                        output_file.write('BRANCHING_PRIORITIES{\n')
                        BranchingPriorityHeader = True
                    name_to_output = symbol_map.getSymbol(var_data)
                    output_file.write(name_to_output + ': ' + str(priority) +
                                      ';\n')

        if BranchingPriorityHeader:
            output_file.write("}\n\n")

        #
        # Now write the objective and equations section
        #
        output_file.write(equation_section_stream.getvalue())

        #
        # STARTING_POINT
        #
        output_file.write('STARTING_POINT{\nONE_VAR_CONST__: 1;\n')
        tmp = {}
        string_template = '%s: %' + self._precision_string + ';\n'
        for vid in referenced_variable_ids:
            name = symbol_map.byObject[vid]
            var_data = symbol_map.bySymbol[name]()

            starting_point = var_data.value
            if starting_point is not None:
                var_name = symbol_map.getSymbol(var_data)
                tmp[var_name] = string_template % (var_name, starting_point)

        output_file.write("".join(tmp[key] for key in sorted(tmp.keys())))
        output_file.write('}\n\n')

        output_file.close()

        return output_filename, symbol_map
Пример #41
0
class SSHSession(Session):

    "Implements a :rfc:`4742` NETCONF session over SSH."

    def __init__(self, device_handler):
        capabilities = Capabilities(device_handler.get_capabilities())
        Session.__init__(self, capabilities)
        self._host_keys = paramiko.HostKeys()
        self._transport = None
        self._connected = False
        self._channel = None
        self._channel_id = None
        self._channel_name = None
        self._buffer = StringIO()
        # parsing-related, see _parse()
        self._device_handler = device_handler
        self._parsing_state10 = 0
        self._parsing_pos10 = 0
        self._parsing_pos11 = 0
        self._parsing_state11 = 0
        self._expchunksize = 0
        self._curchunksize = 0
        self._inendpos = 0
        self._size_num_list = []
        self._message_list = []

    def _parse(self):
        "Messages ae delimited by MSG_DELIM. The buffer could have grown by a maximum of BUF_SIZE bytes everytime this method is called. Retains state across method calls and if a byte has been read it will not be considered again."
        return self._parse10()

    def _parse10(self):
        """Messages are delimited by MSG_DELIM. The buffer could have grown by
        a maximum of BUF_SIZE bytes everytime this method is called. Retains
        state across method calls and if a chunk has been read it will not be
        considered again."""

        logger.debug("parsing netconf v1.0")
        buf = self._buffer
        buf.seek(self._parsing_pos10)
        if MSG_DELIM in buf.read().decode('UTF-8'):
            buf.seek(0)
            msg, _, remaining = buf.read().decode('UTF-8').partition(MSG_DELIM)
            msg = msg.strip()
            if sys.version < '3':
                self._dispatch_message(msg.encode())
            else:
                self._dispatch_message(msg)
            # create new buffer which contains remaining of old buffer
            self._buffer = StringIO()
            self._buffer.write(remaining.encode())
            self._parsing_pos10 = 0
            if len(remaining) > 0:
                # There could be another entire message in the
                # buffer, so we should try to parse again.
                logger.debug(
                    'Trying another round of parsing since there is still data'
                )
                self._parse10()
        else:
            # handle case that MSG_DELIM is split over two chunks
            self._parsing_pos10 = buf.tell() - MSG_DELIM_LEN
            if self._parsing_pos10 < 0:
                self._parsing_pos10 = 0

    def _parse11(self):
        logger.debug("parsing netconf v1.1")
        expchunksize = self._expchunksize
        curchunksize = self._curchunksize
        idle, instart, inmsg, inbetween, inend = range(5)
        state = self._parsing_state11
        inendpos = self._inendpos
        num_list = self._size_num_list
        MAX_STARTCHUNK_SIZE = 12  # \#+4294967295+\n
        pre = 'invalid base:1:1 frame'
        buf = self._buffer
        buf.seek(self._parsing_pos11)
        message_list = self._message_list  # a message is a list of chunks
        chunk_list = []  # a chunk is a list of characters

        should_recurse = False

        while True:
            x = buf.read(1)
            if not x:
                logger.debug('No more data to read')
                # Store the current chunk to the message list
                chunk = b''.join(chunk_list)
                message_list.append(textify(chunk))
                break  # done reading
            logger.debug('x: %s', x)
            if state == idle:
                if x == b'\n':
                    state = instart
                    inendpos = 1
                else:
                    logger.debug('%s (%s: expect newline)' % (pre, state))
                    raise Exception
            elif state == instart:
                if inendpos == 1:
                    if x == b'#':
                        inendpos += 1
                    else:
                        logger.debug('%s (%s: expect "#")' % (pre, state))
                        raise Exception
                elif inendpos == 2:
                    if x.isdigit():
                        inendpos += 1  # == 3 now #
                        num_list.append(x)
                    else:
                        logger.debug('%s (%s: expect digit)' % (pre, state))
                        raise Exception
                else:
                    if inendpos == MAX_STARTCHUNK_SIZE:
                        logger.debug('%s (%s: no. too long)' % (pre, state))
                        raise Exception
                    elif x == b'\n':
                        num = b''.join(num_list)
                        num_list = []  # Reset num_list
                        try:
                            num = int(num)
                        except:
                            logger.debug('%s (%s: invalid no.)' % (pre, state))
                            raise Exception
                        else:
                            state = inmsg
                            expchunksize = num
                            logger.debug('response length: %d' % expchunksize)
                            curchunksize = 0
                            inendpos += 1
                    elif x.isdigit():
                        inendpos += 1  # > 3 now #
                        num_list.append(x)
                    else:
                        logger.debug('%s (%s: expect digit)' % (pre, state))
                        raise Exception
            elif state == inmsg:
                chunk_list.append(x)
                curchunksize += 1
                chunkleft = expchunksize - curchunksize
                if chunkleft == 0:
                    inendpos = 0
                    state = inbetween
                    chunk = b''.join(chunk_list)
                    message_list.append(textify(chunk))
                    chunk_list = []  # Reset chunk_list
                    logger.debug('parsed new chunk: %s' % (chunk))
            elif state == inbetween:
                if inendpos == 0:
                    if x == b'\n': inendpos += 1
                    else:
                        logger.debug('%s (%s: expect newline)' % (pre, state))
                        raise Exception
                elif inendpos == 1:
                    if x == b'#': inendpos += 1
                    else:
                        logger.debug('%s (%s: expect "#")' % (pre, state))
                        raise Exception
                else:
                    inendpos += 1  # == 3 now #
                    if x == b'#':
                        state = inend
                    elif x.isdigit():
                        # More trunks
                        state = instart
                        num_list = []
                        num_list.append(x)
                    else:
                        logger.debug('%s (%s: expect "#")' % (pre, state))
                        raise Exception
            elif state == inend:
                if inendpos == 3:
                    if x == b'\n':
                        inendpos = 0
                        state = idle
                        logger.debug('dispatching message')
                        self._dispatch_message(''.join(message_list))
                        # reset
                        rest = buf.read()
                        buf = BytesIO()
                        buf.write(rest)
                        buf.seek(0)
                        message_list = []
                        self._message_list = message_list
                        chunk_list = []
                        expchunksize = chunksize = 0
                        parsing_state11 = idle
                        inendpos = parsing_pos11 = 0
                        # There could be another entire message in the
                        # buffer, so we should try to parse again.
                        should_recurse = True
                        break
                    else:
                        logger.debug('%s (%s: expect newline)' % (pre, state))
                        raise Exception
            else:
                logger.debug('%s (%s invalid state)' % (pre, state))
                raise Exception

        self._expchunksize = expchunksize
        self._curchunksize = curchunksize
        self._parsing_state11 = state
        self._inendpos = inendpos
        self._size_num_list = num_list
        self._buffer = buf
        self._parsing_pos11 = self._buffer.tell()
        logger.debug('parse11 ending ...')

        if should_recurse:
            logger.debug(
                'Trying another round of parsing since there is still data')
            self._parse11()

    def load_known_hosts(self, filename=None):
        """Load host keys from an openssh :file:`known_hosts`-style file. Can
        be called multiple times.

        If *filename* is not specified, looks in the default locations i.e. :file:`~/.ssh/known_hosts` and :file:`~/ssh/known_hosts` for Windows.
        """

        if filename is None:
            filename = os.path.expanduser('~/.ssh/known_hosts')
            try:
                self._host_keys.load(filename)
            except IOError:
                # for windows
                filename = os.path.expanduser('~/ssh/known_hosts')
                try:
                    self._host_keys.load(filename)
                except IOError:
                    pass
        else:
            self._host_keys.load(filename)

    def close(self):
        if self._transport.is_active():
            self._transport.close()

        # Wait for the transport thread to close.
        while self.is_alive() and (self is not threading.current_thread()):
            self.join(10)

        self._channel = None
        self._connected = False

    # REMEMBER to update transport.rst if sig. changes, since it is hardcoded there
    def connect(self,
                host,
                port=830,
                timeout=None,
                unknown_host_cb=default_unknown_host_cb,
                username=None,
                password=None,
                key_filename=None,
                allow_agent=True,
                hostkey_verify=True,
                look_for_keys=True,
                ssh_config=None,
                sock_fd=None):
        """Connect via SSH and initialize the NETCONF session. First attempts the publickey authentication method and then password authentication.

        To disable attempting publickey authentication altogether, call with *allow_agent* and *look_for_keys* as `False`.

        *host* is the hostname or IP address to connect to

        *port* is by default 830, but some devices use the default SSH port of 22 so this may need to be specified

        *timeout* is an optional timeout for socket connect

        *unknown_host_cb* is called when the server host key is not recognized. It takes two arguments, the hostname and the fingerprint (see the signature of :func:`default_unknown_host_cb`)

        *username* is the username to use for SSH authentication

        *password* is the password used if using password authentication, or the passphrase to use for unlocking keys that require it

        *key_filename* is a filename where a the private key to be used can be found

        *allow_agent* enables querying SSH agent (if found) for keys

        *hostkey_verify* enables hostkey verification from ~/.ssh/known_hosts

        *look_for_keys* enables looking in the usual locations for ssh keys (e.g. :file:`~/.ssh/id_*`)

        *ssh_config* enables parsing of an OpenSSH configuration file, if set to its path, e.g. :file:`~/.ssh/config` or to True (in this case, use :file:`~/.ssh/config`).

        *sock_fd* is an already open socket which shall be used for this connection. Useful for NETCONF outbound ssh. Use host=None together with a valid sock_fd number
        """
        if not (host or sock_fd):
            raise SSHError("Missing host or socket fd")

        # Optionally, parse .ssh/config
        config = {}
        if ssh_config is True:
            ssh_config = "~/.ssh/config" if sys.platform != "win32" else "~/ssh/config"
        if ssh_config is not None:
            config = paramiko.SSHConfig()
            config.parse(open(os.path.expanduser(ssh_config)))
            config = config.lookup(host)
            host = config.get("hostname", host)
            if username is None:
                username = config.get("user")
            if key_filename is None:
                key_filename = config.get("identityfile")
            if hostkey_verify:
                userknownhostsfile = config.get("userknownhostsfile")
                if userknownhostsfile:
                    self.load_known_hosts(
                        os.path.expanduser(userknownhostsfile))

        if username is None:
            username = getpass.getuser()

        if sock_fd is None:
            if config.get("proxycommand"):
                sock = paramiko.proxy.ProxyCommand(config.get("proxycommand"))
            else:
                for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
                                              socket.SOCK_STREAM):
                    af, socktype, proto, canonname, sa = res
                    try:
                        sock = socket.socket(af, socktype, proto)
                        sock.settimeout(timeout)
                    except socket.error:
                        continue
                    try:
                        sock.connect(sa)
                    except socket.error:
                        sock.close()
                        continue
                    break
                else:
                    raise SSHError("Could not open socket to %s:%s" %
                                   (host, port))
        else:
            if sys.version_info[0] < 3:
                s = socket.fromfd(int(sock_fd), socket.AF_INET,
                                  socket.SOCK_STREAM)
                sock = socket.socket(socket.AF_INET,
                                     socket.SOCK_STREAM,
                                     _sock=s)
            else:
                sock = socket.fromfd(int(sock_fd), socket.AF_INET,
                                     socket.SOCK_STREAM)
            sock.settimeout(timeout)

        t = self._transport = paramiko.Transport(sock)
        t.set_log_channel(logger.name)
        if config.get("compression") == 'yes':
            t.use_compression()

        try:
            t.start_client()
        except paramiko.SSHException:
            raise SSHError('Negotiation failed')

        # host key verification
        server_key = t.get_remote_server_key()

        fingerprint = _colonify(hexlify(server_key.get_fingerprint()))

        if hostkey_verify:
            known_host = self._host_keys.check(host, server_key)
            if not known_host and not unknown_host_cb(host, fingerprint):
                raise SSHUnknownHostError(host, fingerprint)

        if key_filename is None:
            key_filenames = []
        elif isinstance(key_filename, (str, bytes)):
            key_filenames = [key_filename]
        else:
            key_filenames = key_filename

        self._auth(username, password, key_filenames, allow_agent,
                   look_for_keys)

        self._connected = True  # there was no error authenticating
        # TODO: leopoul: Review, test, and if needed rewrite this part
        subsystem_names = self._device_handler.get_ssh_subsystem_names()
        for subname in subsystem_names:
            c = self._channel = self._transport.open_session()
            self._channel_id = c.get_id()
            channel_name = "%s-subsystem-%s" % (subname, str(self._channel_id))
            c.set_name(channel_name)
            try:
                c.invoke_subsystem(subname)
            except paramiko.SSHException as e:
                logger.info("%s (subsystem request rejected)", e)
                handle_exception = self._device_handler.handle_connection_exceptions(
                    self)
                # Ignore the exception, since we continue to try the different
                # subsystem names until we find one that can connect.
                #have to handle exception for each vendor here
                if not handle_exception:
                    continue
            self._channel_name = c.get_name()
            self._post_connect()
            return
        raise SSHError(
            "Could not open connection, possibly due to unacceptable"
            " SSH subsystem name.")

    def _auth(self, username, password, key_filenames, allow_agent,
              look_for_keys):
        saved_exception = None

        for key_filename in key_filenames:
            for cls in (paramiko.RSAKey, paramiko.DSSKey, paramiko.ECDSAKey):
                try:
                    key = cls.from_private_key_file(key_filename, password)
                    logger.debug(
                        "Trying key %s from %s" %
                        (hexlify(key.get_fingerprint()), key_filename))
                    self._transport.auth_publickey(username, key)
                    return
                except Exception as e:
                    saved_exception = e
                    logger.debug(e)

        if allow_agent:
            for key in paramiko.Agent().get_keys():
                try:
                    logger.debug("Trying SSH agent key %s" %
                                 hexlify(key.get_fingerprint()))
                    self._transport.auth_publickey(username, key)
                    return
                except Exception as e:
                    saved_exception = e
                    logger.debug(e)

        keyfiles = []
        if look_for_keys:
            rsa_key = os.path.expanduser("~/.ssh/id_rsa")
            dsa_key = os.path.expanduser("~/.ssh/id_dsa")
            ecdsa_key = os.path.expanduser("~/.ssh/id_ecdsa")
            if os.path.isfile(rsa_key):
                keyfiles.append((paramiko.RSAKey, rsa_key))
            if os.path.isfile(dsa_key):
                keyfiles.append((paramiko.DSSKey, dsa_key))
            if os.path.isfile(ecdsa_key):
                keyfiles.append((paramiko.ECDSAKey, ecdsa_key))
            # look in ~/ssh/ for windows users:
            rsa_key = os.path.expanduser("~/ssh/id_rsa")
            dsa_key = os.path.expanduser("~/ssh/id_dsa")
            ecdsa_key = os.path.expanduser("~/ssh/id_ecdsa")
            if os.path.isfile(rsa_key):
                keyfiles.append((paramiko.RSAKey, rsa_key))
            if os.path.isfile(dsa_key):
                keyfiles.append((paramiko.DSSKey, dsa_key))
            if os.path.isfile(ecdsa_key):
                keyfiles.append((paramiko.ECDSAKey, ecdsa_key))

        for cls, filename in keyfiles:
            try:
                key = cls.from_private_key_file(filename, password)
                logger.debug("Trying discovered key %s in %s" %
                             (hexlify(key.get_fingerprint()), filename))
                self._transport.auth_publickey(username, key)
                return
            except Exception as e:
                saved_exception = e
                logger.debug(e)

        if password is not None:
            try:
                self._transport.auth_password(username, password)
                return
            except Exception as e:
                saved_exception = e
                logger.debug(e)

        if saved_exception is not None:
            # need pep-3134 to do this right
            raise AuthenticationError(repr(saved_exception))

        raise AuthenticationError("No authentication methods available")

    def run(self):
        chan = self._channel
        q = self._q

        def start_delim(data_len):
            return '\n#%s\n' % (data_len)

        try:
            while True:
                # select on a paramiko ssh channel object does not ever return it in the writable list, so channels don't exactly emulate the socket api
                r, w, e = select([chan], [], [], TICK)
                # will wakeup evey TICK seconds to check if something to send, more if something to read (due to select returning chan in readable list)
                if r:
                    data = chan.recv(BUF_SIZE)
                    if data:
                        self._buffer.write(data)
                        if self._server_capabilities:
                            if 'urn:ietf:params:netconf:base:1.1' in self._server_capabilities and 'urn:ietf:params:netconf:base:1.1' in self._client_capabilities:
                                logger.debug(
                                    "Selecting netconf:base:1.1 for encoding")
                                self._parse11()
                            elif 'urn:ietf:params:netconf:base:1.0' in self._server_capabilities or 'urn:ietf:params:xml:ns:netconf:base:1.0' in self._server_capabilities or 'urn:ietf:params:netconf:base:1.0' in self._client_capabilities:
                                logger.debug(
                                    "Selecting netconf:base:1.0 for encoding")
                                self._parse10()
                            else:
                                raise Exception
                        else:
                            self._parse10()  # HELLO msg uses EOM markers.
                    else:
                        raise SessionCloseError(self._buffer.getvalue())
                if not q.empty() and chan.send_ready():
                    logger.debug("Sending message")
                    data = q.get()
                    try:
                        # send a HELLO msg using v1.0 EOM markers.
                        validated_element(
                            data,
                            tags=
                            '{urn:ietf:params:xml:ns:netconf:base:1.0}hello')
                        data = "%s%s" % (data, MSG_DELIM)
                    except XMLError:
                        # this is not a HELLO msg
                        # we publish v1.1 support
                        if 'urn:ietf:params:netconf:base:1.1' in self._client_capabilities:
                            if self._server_capabilities:
                                if 'urn:ietf:params:netconf:base:1.1' in self._server_capabilities:
                                    # send using v1.1 chunked framing
                                    data = "%s%s%s" % (start_delim(
                                        len(data)), data, END_DELIM)
                                elif 'urn:ietf:params:netconf:base:1.0' in self._server_capabilities or 'urn:ietf:params:xml:ns:netconf:base:1.0' in self._server_capabilities:
                                    # send using v1.0 EOM markers
                                    data = "%s%s" % (data, MSG_DELIM)
                                else:
                                    raise Exception
                            else:
                                logger.debug(
                                    'HELLO msg was sent, but server capabilities are still not known'
                                )
                                raise Exception
                        # we publish only v1.0 support
                        else:
                            # send using v1.0 EOM markers
                            data = "%s%s" % (data, MSG_DELIM)
                    finally:
                        logger.debug("Sending: %s", data)
                        while data:
                            n = chan.send(data)
                            if n <= 0:
                                raise SessionCloseError(
                                    self._buffer.getvalue(), data)
                            data = data[n:]
        except Exception as e:
            logger.debug("Broke out of main loop, error=%r", e)
            self._dispatch_error(e)
            self.close()

    @property
    def transport(self):
        "Underlying `paramiko.Transport <http://www.lag.net/paramiko/docs/paramiko.Transport-class.html>`_ object. This makes it possible to call methods like :meth:`~paramiko.Transport.set_keepalive` on it."
        return self._transport
import pyutilib.misc
from six import StringIO

output = StringIO()
pyutilib.misc.setup_redirect(output)

try:
    # Run the runner
    from run_path_constraint import results, m as model
finally:
    pyutilib.misc.reset_redirect()

# Report the result
for line in output.getvalue().splitlines():
    if line.startswith('EXIT'):
        print(line)

model.obj.display()
model.u.display()
model.x1.display()
model.x2.display()
model.x3.display()
Пример #43
0
    def c_extract(self, name, sub, check_input=True, check_broadcast=True):
        sio = StringIO()
        fail = sub['fail']
        nd = self.ndim
        print("""
        assert(py_%(name)s->ob_refcnt >= 2); // There should be at least one ref from the container object,
        // and one ref from the local scope.

        if (CudaNdarray_Check(py_%(name)s))
        {
            //fprintf(stderr, "c_extract CNDA object w refcnt %%p %%i\\n", py_%(name)s, (py_%(name)s->ob_refcnt));
            %(name)s = (CudaNdarray*)py_%(name)s;
            //std::cerr << "c_extract " << %(name)s << '\\n';
        """ % locals(),
              file=sio)
        if (check_input):
            print("""
                if (%(name)s->nd != %(nd)s)
                {
                    PyErr_Format(PyExc_RuntimeError,
                                 "c_extract: Some CudaNdarray has rank %%i, it was supposed to have rank %(nd)s",
                                 %(name)s->nd);
                    %(name)s = NULL;
                    %(fail)s;
                }
                //std::cerr << "c_extract " << %(name)s << " nd check passed\\n";
            """ % locals(),
                  file=sio)
            for i, b in enumerate(self.broadcastable):
                if b and check_broadcast:
                    print("""
                if (CudaNdarray_HOST_DIMS(%(name)s)[%(i)s] != 1)
                {
                    PyErr_Format(PyExc_RuntimeError,
                                 "c_extract: Some CudaNdarray has dim %%i on broadcastable dimension %%i",
                                 CudaNdarray_HOST_DIMS(%(name)s)[%(i)s], %(i)s);
                    %(name)s = NULL;
                    %(fail)s;
                }
                //std::cerr << "c_extract " << %(name)s << "dim check %(i)s passed\\n";
                //std::cerr << "c_extract " << %(name)s << "checking bcast %(i)s <" << %(name)s->str<< ">\\n";
                //std::cerr << "c_extract " << %(name)s->str[%(i)s] << "\\n";
                if (CudaNdarray_HOST_STRIDES(%(name)s)[%(i)s])
                {
                    //std::cerr << "c_extract bad stride detected...\\n";
                    PyErr_Format(PyExc_RuntimeError,
                                 "c_extract: Some CudaNdarray has a nonzero stride %%i on a broadcastable dimension %%i",
                                 CudaNdarray_HOST_STRIDES(%(name)s)[%(i)s], %(i)s);
                    %(name)s = NULL;
                    %(fail)s;
                }
                //std::cerr << "c_extract " << %(name)s << "bcast check %(i)s passed\\n";
                    """ % locals(),
                          file=sio)
            print("""
                assert(%(name)s);
                Py_INCREF(py_%(name)s);
            }
            else if (py_%(name)s == Py_None)
            {
                PyErr_SetString(PyExc_TypeError,
                                "expected a CudaNdarray, not None");
                %(name)s = NULL;
                %(fail)s;
            }
            else
            {
                //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %%p %%i\\n", py_%(name)s, (py_%(name)s->ob_refcnt));
                PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
                %(name)s = NULL;
                %(fail)s;
            }
            //std::cerr << "c_extract done " << %(name)s << '\\n';
            """ % locals(),
                  file=sio)
        else:
            print("""
                assert(%(name)s);
                Py_INCREF(py_%(name)s);
            }
            """ % locals(),
                  file=sio)
        # print sio.getvalue()
        return sio.getvalue()
 def test_no_color(self):
     with force_color_support:
         out = StringIO()
         call_command('show_urls', '--no-color', stdout=out)
         self.output = out.getvalue()
         self.assertNotIn('\x1b', self.output)
Пример #45
0
class SympyGenerator(with_metaclass(MetaClass, VariableAnalyzer)):
    def __init__(self, model, **kwargs):

        self.has_random = False
        VariableAnalyzer.__init__(self, model, **kwargs)
        # for attr in ['state', 'parameter', 'input']:
        #     lst = [k for k, v in self.variables.items() if v.type == attr]
        #     lst.sort()
        #     setattr(self, attr+'s', lst)
        self.ode_src = StringIO()
        self.symbol_src = StringIO()
        self.ostream = self.ode_src
        self.equations = []

        for key in dir(self):
            if key[:8] == "_handle_":
                setattr(self, key[1:], getattr(self, key))

        self.generate()
        self.get_symbols()
        self.sympy_dct = {}
        self.latex_src = None
        self.compile_sympy()

        self.to_latex()

    @property
    def sympy_src(self):
        return self.symbol_src.getvalue() + self.ode_src.getvalue()

    def get_symbols(self):
        if self.has_random:
            self.symbol_src.write("N = Function('N')%c" % self.newline)
        self.symbol_src.write("t = Symbol('t')%c" % self.newline)
        for key, val in self.variables.items():
            src = "{0} = Symbol('{0}'){1}".format(key, self.newline)
            self.symbol_src.write(src)
        for key in self.signature:
            src = "{0} = Symbol('{0}'){1}".format(key, self.newline)
            self.symbol_src.write(src)

    def compile_sympy(self):
        try:
            exec(self.sympy_src, globals(), self.sympy_dct)
        except:
            for n, line in enumerate(self.sympy_src.split("\n")):
                try:
                    exec(line, globals(), self.sympy_dct)
                except IndentationError as e:
                    raise err.NeuralSymPyCodeGenError(
                        "SymPy Compilation Failed for model"
                        f" '{self.model.__class__.__name__}' on:"
                        f" Line {n}: \n{line}"
                        "\n This is likely an issue with using 'elif' statement"
                        " , avoid 'elif' and prefer binary masking operators"
                        " like '(x>0)*x' in general.") from e
                except Exception as e:
                    raise err.NeuralSymPyCodeGenError(
                        "SymPy Compilation Failed for model"
                        f" '{self.model.__class__.__name__}' on:"
                        f" Line {n}: \n{line}") from e

    def to_latex(self):
        cond = lambda v: v.type == "state" and v.integral is None
        states = [k for k, v in self.variables.items() if cond(v)]
        states.sort()
        states_src = ",~".join([latex(self.sympy_dct[x]) for x in states])
        params = [
            k for k, v in self.variables.items() if v.type == "parameter"
        ]
        params.sort()
        params_src = ",~".join([latex(self.sympy_dct[x]) for x in params])
        template_src = r"\mbox{State Variables: }%s\\\mbox{Parameters: }%s\\"
        self.latex_src = template_src % (states_src, params_src)

        self.latex_src = ""
        self.latex_src += r"\begin{eqnarray}"
        for eq in self.equations:
            try:
                tmp = latex(self.sympy_dct[eq], mul_symbol="dot")
            except Exception as e:
                raise err.NeuralSymPyCodeGenError(
                    "Failed to Generate Sympy Code for model"
                    f" {self.model.__class__.__name__}") from e
            self.latex_src += tmp.replace("=", " &=& ") + r"\\"
        self.latex_src += r"\end{eqnarray}"

    def _handle_call_function(self, ins):
        narg = int(ins.arg)

        # hacky way to handle keyword arguments
        if self.kwargs and self.var[-(narg + 1)] == (self.kwargs + ".pop"):
            arg = self.var[-narg][1:-1]
            self.var[-(narg + 1)] = arg
            new_arg = "%s" % arg
            self.signature.append(new_arg)

        else:
            args = [] if narg == 0 else [str(x) for x in self.var[-narg:]]
            func_name = self.var[-(narg + 1)]
            func_globals = get_function_globals(self.model.ode)
            pyfunc = eval(func_name, func_globals)
            sympyfunc = self.pyfunc_to_sympyfunc.get(pyfunc)
            if sympyfunc is not None:
                self.var[-(narg + 1)] = sympyfunc(self, args)
            else:
                self.var[-(narg + 1)] = "{}({})".format(
                    func_name, ",".join(args))

        if narg:
            del self.var[-narg:]

    def _handle_load_attr(self, ins):
        key = ins.argval
        if self.var[-1] == "self":
            if key[:2] == "d_":
                key = key.split("d_")[-1]
                depth = 1
            else:
                depth = 0
            if self.variables[key].integral:
                while self.variables[key].integral is not None:
                    depth += 1
                    key = self.variables[key].integral
            if depth > 0:
                key = "Derivative(%s%s)" % (key, ", t" * depth)
            self.var[-1] = key
        else:
            self.var[-1] += "." + key

    def _handle_store_attr(self, ins):
        self.handle_load_attr(ins)
        rval, lval = self.var[-2:]
        del self.var[-1]
        if rval != lval:
            eqn = "Eq(%s, %s)" % (lval, rval)
            self.equations.append("eqn_%d" % len(self.equations))
            self.var[-1] = "%s = %s" % (self.equations[-1], eqn)
        else:
            del self.var[-1]

    def _handle_store_fast(self, ins):
        key = ins.argval

        prefix, indent = "", ""

        if ins.argval == self.var[-1]:
            del self.var[-1]
            return
        elif self.variables[key].type == "local":

            eqn = "Eq(%s, %s)" % (key, self.var[-1])
            self.equations.append("eqn_%d" % len(self.equations))
            indent = " " * (self.space + self.indent)
            prefix = "with evaluate(False):" + self.newline
            self.var[-1] = "{}{}{} = {}".format(prefix, indent,
                                                self.equations[-1], eqn)
        else:
            self.var[-1] = "{} = {}".format(key, self.var[-1])

    def handle_pop_jump_if_true(self, ins):
        self.jump_targets.append(ins.arg)
        self.enter_indent = True
        self.var[-1] = "if not UnevaluatedExpr({0}):".format(self.var[-1])

    def handle_pop_jump_if_false(self, ins):
        self.jump_targets.append(ins.arg)
        self.enter_indent = True
        self.var[-1] = "if UnevaluatedExpr({0}):".format(self.var[-1])

    def handle_jump_forward(self, ins):
        self.leave_indent = True
        self.output_statement()

        target, old_target = ins.argval, self.jump_targets.pop()

        if target != old_target:
            self.var.append("")
            self.enter_indent = True
            self.jump_targets.append(target)
        else:
            self.var.append("")
            self.output_statement()

    def handle_compare_op(self, ins):
        """Convert Comparison Operation to Heaviside Expressions"""
        op = ins.argval
        if op in [">", ">="]:
            diff = f"{self.var[-2]} - {self.var[-1]}"
        elif op in ["<", "<="]:
            diff = f"{self.var[-1]} - {self.var[-2]}"
        else:
            raise ValueError(
                f"Comparison with Operator '{op}' not understood.")

        thres = 1 if "=" in op else 0

        self.var[-2] = f"Heaviside({diff}, {thres})"
        del self.var[-1]

    def _handle_return_value(self, ins):
        self.var[-1] = ""

    def _py2sympy(*source_funcs):
        def wrapper(func):
            func._source_funcs = source_funcs
            return func

        return wrapper

    def _random_func(func):
        """
        A decorator for registering random functions
        """
        @wraps(func)
        def wrap(self, args):
            self.has_random = True
            return func(self, args)

        return wrap

    def _generate_sympy_func(self, func, args):
        return "%s(%s)" % (func, ", ".join(args))

    @_py2sympy(np.exp)
    def _np_exp(self, args):
        return self._generate_sympy_func("exp", args)

    @_py2sympy(np.log)
    def _np_log(self, args):
        return self._generate_sympy_func("log", args)

    @_py2sympy(np.power)
    def _np_power(self, args):
        return self._generate_sympy_func("pow", args)

    @_py2sympy(np.cbrt)
    def _np_cbrt(self, args):
        return self._generate_sympy_func("cbrt", args)

    @_py2sympy(np.sqrt)
    def _np_sqrt(self, args):
        return self._generate_sympy_func("sqrt", args)

    @_py2sympy(random.gauss, np.random.normal)
    @_random_func
    def _random_gauss(self, args):

        return "N({0}, {1})".format(args[0], args[1])
Пример #46
0
def rst_table(elts):
    """Print out a RST-style table."""
    cols = StringIO()
    ncol, widths = colify(elts, output=cols, tty=True)
    header = ' '.join('=' * (w - 1) for w in widths)
    return '%s\n%s%s' % (header, cols.getvalue(), header)
Пример #47
0
 def test_list_languages(self):
     output = StringIO()
     call_command('list_addons', stdout=output)
     self.assertIn('msgmerge', output.getvalue())
Пример #48
0
def do_update_changelog(ctx, target, cur_version, new_version, dry_run=False):
    """
    Actually perform the operations needed to update the changelog, this
    method is supposed to be used by other tasks and not directly.
    """
    # get the name of the current release tag
    target_tag = get_release_tag_string(target, cur_version)

    # get the diff from HEAD
    target_path = os.path.join(ROOT, target)
    cmd = 'git log --pretty=%s {}... {}'.format(target_tag, target_path)
    diff_lines = ctx.run(cmd, hide='out').stdout.split('\n')

    # for each PR get the title, we'll use it to populate the changelog
    endpoint = GITHUB_API_URL + '/repos/DataDog/integrations-core/pulls/{}'
    pr_numbers = parse_pr_numbers(diff_lines)
    print("Found {} PRs merged since tag: {}".format(len(pr_numbers),
                                                     target_tag))

    entries = []
    for pr_num in pr_numbers:
        try:
            response = urlopen(endpoint.format(pr_num))
        except Exception as e:
            sys.stderr.write("Unable to fetch info for PR #{}\n: {}".format(
                pr_num, e))
            continue

        payload = json.loads(response.read())
        if NO_CHANGELOG_LABEL in (l.get('name')
                                  for l in payload.get('labels', [])):
            # No changelog entry for this PR
            print("Skipping PR #{} from changelog".format(pr_num))
            continue

        author = payload.get('user', {}).get('login')
        author_url = payload.get('user', {}).get('html_url')

        entry = ChangelogEntry(pr_num, payload.get('title'),
                               payload.get('html_url'), author, author_url,
                               is_contributor(payload))

        entries.append(entry)

    # store the new changelog in memory
    new_entry = StringIO()

    # the header contains version and date
    header = "### {} / {}\n".format(new_version,
                                    datetime.now().strftime("%Y-%m-%d"))
    new_entry.write(header)

    # one bullet point for each PR
    new_entry.write("\n")
    for entry in entries:
        thanknote = ""
        if entry.is_contributor:
            thanknote = " Thanks [{}]({}).".format(entry.author,
                                                   entry.author_url)
        new_entry.write("* {}. See [#{}]({}).{}\n".format(
            entry.title, entry.number, entry.url, thanknote))
    new_entry.write("\n")

    # read the old contents
    changelog_path = os.path.join(ROOT, target, "CHANGELOG.md")
    with open(changelog_path, 'r') as f:
        old = f.readlines()

    # write the new changelog in memory
    changelog = StringIO()

    # preserve the title
    changelog.write("".join(old[:2]))

    # prepend the new changelog to the old contents
    # make the command idempotent
    if header not in old:
        changelog.write(new_entry.getvalue())

    # append the rest of the old changelog
    changelog.write("".join(old[2:]))

    # print on the standard out in case of a dry run
    if dry_run:
        print(changelog.getvalue())
        sys.exit(0)

    # overwrite the old changelog
    with open(changelog_path, 'w') as f:
        f.write(changelog.getvalue())
Пример #49
0
 def test_list_command(self):
     add_document()
     output = StringIO()
     call_command('list_memory', stdout=output)
     self.assertIn('test', output.getvalue())
Пример #50
0
    def solve(self, *args, **kwds):
        """
        Solve a model via the GAMS Python API.

        Keyword Arguments
        -----------------
        tee=False: bool
            Output GAMS log to stdout.
        logfile=None: str
            Filename to output GAMS log to a file.
        load_solutions=True: bool
            Load solution into model. If False, the results
            object will contain the solution data.
        keepfiles=False: bool
            Keep temporary files. Equivalent of DebugLevel.KeepFiles.
            Summary of temp files can be found in _gams_py_gjo0.pf
        tmpdir=None: str
            Specify directory path for storing temporary files.
            A directory will be created if one of this name doesn't exist.
            By default uses the system default temporary path.
        report_timing=False: bool
            Print timing reports for presolve, solver, postsolve, etc.
        io_options: dict
            Options that get passed to the writer.
            See writer in pyomo.repn.plugins.gams_writer for details.
            Updated with any other keywords passed to solve method.
        """

        # Make sure available() doesn't crash
        self.available()

        from gams import GamsWorkspace, DebugLevel
        from gams.workspace import GamsExceptionExecution

        if len(args) != 1:
            raise ValueError('Exactly one model must be passed '
                             'to solve method of GAMSSolver.')
        model = args[0]

        # self.options are default for each run, overwritten by kwds
        options = dict()
        options.update(self.options)
        options.update(kwds)

        load_solutions = options.pop("load_solutions", True)
        tee            = options.pop("tee", False)
        logfile        = options.pop("logfile", None)
        keepfiles      = options.pop("keepfiles", False)
        tmpdir         = options.pop("tmpdir", None)
        report_timing  = options.pop("report_timing", False)
        io_options     = options.pop("io_options", {})

        # Pass remaining keywords to writer, which will handle
        # any unrecognized arguments
        io_options.update(options)

        initial_time = time.time()

        ####################################################################
        # Presolve
        ####################################################################

        # Create StringIO stream to pass to gams_writer, on which the
        # model file will be written. The writer also passes this StringIO
        # back, but output_file is defined in advance for clarity.
        output_file = StringIO()
        if isinstance(model, IBlock):
            # Kernel blocks have slightly different write method
            smap_id = model.write(filename=output_file,
                                  format=ProblemFormat.gams,
                                  _called_by_solver=True,
                                  **io_options)
            symbolMap = getattr(model, "._symbol_maps")[smap_id]
        else:
            (_, smap_id) = model.write(filename=output_file,
                                       format=ProblemFormat.gams,
                                       io_options=io_options)
            symbolMap = model.solutions.symbol_map[smap_id]

        presolve_completion_time = time.time()
        if report_timing:
            print("      %6.2f seconds required for presolve" %
                  (presolve_completion_time - initial_time))

        ####################################################################
        # Apply solver
        ####################################################################

        # IMPORTANT - only delete the whole tmpdir if the solver was the one
        # that made the directory. Otherwise, just delete the files the solver
        # made, if not keepfiles. That way the user can select a directory
        # they already have, like the current directory, without having to
        # worry about the rest of the contents of that directory being deleted.
        newdir = True
        if tmpdir is not None and os.path.exists(tmpdir):
            newdir = False

        ws = GamsWorkspace(debug=DebugLevel.KeepFiles if keepfiles
                           else DebugLevel.Off,
                           working_directory=tmpdir)

        t1 = ws.add_job_from_string(output_file.getvalue())

        try:
            with OutputStream(tee=tee, logfile=logfile) as output_stream:
                t1.run(output=output_stream)
        except GamsExceptionExecution as e:
            try:
                if e.rc == 3:
                    # Execution Error
                    check_expr_evaluation(model, symbolMap, 'direct')
            finally:
                # Always name working directory or delete files,
                # regardless of any errors.
                if keepfiles:
                    print("\nGAMS WORKING DIRECTORY: %s\n" %
                          ws.working_directory)
                elif tmpdir is not None:
                    # Garbage collect all references to t1.out_db
                    # So that .gdx file can be deleted
                    t1 = rec = rec_lo = rec_hi = None
                    file_removal_gams_direct(tmpdir, newdir)
                raise
        except:
            # Catch other errors and remove files first
            if keepfiles:
                print("\nGAMS WORKING DIRECTORY: %s\n" % ws.working_directory)
            elif tmpdir is not None:
                # Garbage collect all references to t1.out_db
                # So that .gdx file can be deleted
                t1 = rec = rec_lo = rec_hi = None
                file_removal_gams_direct(tmpdir, newdir)
            raise

        solve_completion_time = time.time()
        if report_timing:
            print("      %6.2f seconds required for solver" %
                  (solve_completion_time - presolve_completion_time))

        ####################################################################
        # Postsolve
        ####################################################################

        # import suffixes must be on the top-level model
        if isinstance(model, IBlock):
            model_suffixes = list(comp.storage_key for comp \
                                  in pyomo.core.kernel.suffix.\
                                  import_suffix_generator(model,
                                                          active=True,
                                                          descend_into=False))
        else:
            model_suffixes = list(name for (name,comp) \
                                  in pyomo.core.base.suffix.\
                                  active_import_suffix_generator(model))
        extract_dual = ('dual' in model_suffixes)
        extract_rc = ('rc' in model_suffixes)

        results = SolverResults()
        results.problem.name = t1.name
        results.problem.lower_bound = t1.out_db["OBJEST"].find_record().value
        results.problem.upper_bound = t1.out_db["OBJEST"].find_record().value
        results.problem.number_of_variables = \
            t1.out_db["NUMVAR"].find_record().value
        results.problem.number_of_constraints = \
            t1.out_db["NUMEQU"].find_record().value
        results.problem.number_of_nonzeros = \
            t1.out_db["NUMNZ"].find_record().value
        results.problem.number_of_binary_variables = None
        # Includes binary vars:
        results.problem.number_of_integer_variables = \
            t1.out_db["NUMDVAR"].find_record().value
        results.problem.number_of_continuous_variables = \
            t1.out_db["NUMVAR"].find_record().value \
            - t1.out_db["NUMDVAR"].find_record().value
        results.problem.number_of_objectives = 1 # required by GAMS writer
        obj = list(model.component_data_objects(Objective, active=True))
        assert len(obj) == 1, 'Only one objective is allowed.'
        obj = obj[0]
        objctvval = t1.out_db["OBJVAL"].find_record().value
        if obj.is_minimizing():
            results.problem.sense = ProblemSense.minimize
            results.problem.upper_bound = objctvval
        else:
            results.problem.sense = ProblemSense.maximize
            results.problem.lower_bound = objctvval

        results.solver.name = "GAMS " + str(self.version())

        # Init termination condition to None to give preference to this first
        # block of code, only set certain TC's below if it's still None
        results.solver.termination_condition = None
        results.solver.message = None

        solvestat = t1.out_db["SOLVESTAT"].find_record().value
        if solvestat == 1:
            results.solver.status = SolverStatus.ok
        elif solvestat == 2:
            results.solver.status = SolverStatus.ok
            results.solver.termination_condition = TerminationCondition.maxIterations
        elif solvestat == 3:
            results.solver.status = SolverStatus.ok
            results.solver.termination_condition = TerminationCondition.maxTimeLimit
        elif solvestat == 5:
            results.solver.status = SolverStatus.ok
            results.solver.termination_condition = TerminationCondition.maxEvaluations
        elif solvestat == 7:
            results.solver.status = SolverStatus.aborted
            results.solver.termination_condition = TerminationCondition.licensingProblems
        elif solvestat == 8:
            results.solver.status = SolverStatus.aborted
            results.solver.termination_condition = TerminationCondition.userInterrupt
        elif solvestat == 10:
            results.solver.status = SolverStatus.error
            results.solver.termination_condition = TerminationCondition.solverFailure
        elif solvestat == 11:
            results.solver.status = SolverStatus.error
            results.solver.termination_condition = TerminationCondition.internalSolverError
        elif solvestat == 4:
            results.solver.status = SolverStatus.warning
            results.solver.message = "Solver quit with a problem (see LST file)"
        elif solvestat in (9, 12, 13):
            results.solver.status = SolverStatus.error
        elif solvestat == 6:
            results.solver.status = SolverStatus.unknown

        results.solver.return_code = 0
        # Not sure if this value is actually user time
        # "the elapsed time it took to execute a solve statement in total"
        results.solver.user_time = t1.out_db["ETSOLVE"].find_record().value
        results.solver.system_time = None
        results.solver.wallclock_time = None
        results.solver.termination_message = None

        soln = Solution()

        modelstat = t1.out_db["MODELSTAT"].find_record().value
        if modelstat == 1:
            results.solver.termination_condition = TerminationCondition.optimal
            soln.status = SolutionStatus.optimal
        elif modelstat == 2:
            results.solver.termination_condition = TerminationCondition.locallyOptimal
            soln.status = SolutionStatus.locallyOptimal
        elif modelstat in [3, 18]:
            results.solver.termination_condition = TerminationCondition.unbounded
            soln.status = SolutionStatus.unbounded
        elif modelstat in [4, 5, 6, 10, 19]:
            results.solver.termination_condition = TerminationCondition.infeasible
            soln.status = SolutionStatus.infeasible
        elif modelstat == 7:
            results.solver.termination_condition = TerminationCondition.feasible
            soln.status = SolutionStatus.feasible
        elif modelstat == 8:
            # 'Integer solution model found'
            results.solver.termination_condition = TerminationCondition.optimal
            soln.status = SolutionStatus.optimal
        elif modelstat == 9:
            results.solver.termination_condition = TerminationCondition.intermediateNonInteger
            soln.status = SolutionStatus.other
        elif modelstat == 11:
            # Should be handled above, if modelstat and solvestat both
            # indicate a licensing problem
            if results.solver.termination_condition is None:
                results.solver.termination_condition = TerminationCondition.licensingProblems
            soln.status = SolutionStatus.error
        elif modelstat in [12, 13]:
            if results.solver.termination_condition is None:
                results.solver.termination_condition = TerminationCondition.error
            soln.status = SolutionStatus.error
        elif modelstat == 14:
            if results.solver.termination_condition is None:
                results.solver.termination_condition = TerminationCondition.noSolution
            soln.status = SolutionStatus.unknown
        elif modelstat in [15, 16, 17]:
            # Having to do with CNS models,
            # not sure what to make of status descriptions
            results.solver.termination_condition = TerminationCondition.optimal
            soln.status = SolutionStatus.unsure
        else:
            # This is just a backup catch, all cases are handled above
            soln.status = SolutionStatus.error

        soln.gap = abs(results.problem.upper_bound \
                       - results.problem.lower_bound)

        for sym, ref in iteritems(symbolMap.bySymbol):
            obj = ref()
            if isinstance(model, IBlock):
                # Kernel variables have no 'parent_component'
                if obj.ctype is IObjective:
                    soln.objective[sym] = {'Value': objctvval}
                if obj.ctype is not IVariable:
                    continue
            else:
                if obj.parent_component().type() is Objective:
                    soln.objective[sym] = {'Value': objctvval}
                if obj.parent_component().type() is not Var:
                    continue
            rec = t1.out_db[sym].find_record()
            # obj.value = rec.level
            soln.variable[sym] = {"Value": rec.level}
            if extract_rc and not math.isnan(rec.marginal):
                # Do not set marginals to nan
                # model.rc[obj] = rec.marginal
                soln.variable[sym]['rc'] = rec.marginal

        if extract_dual:
            for c in model.component_data_objects(Constraint, active=True):
                if c.body.is_fixed() or \
                   (not (c.has_lb() or c.has_ub())):
                    # the constraint was not sent to GAMS
                    continue
                sym = symbolMap.getSymbol(c)
                if c.equality:
                    rec = t1.out_db[sym].find_record()
                    if not math.isnan(rec.marginal):
                        # model.dual[c] = rec.marginal
                        soln.constraint[sym] = {'dual': rec.marginal}
                    else:
                        # Solver didn't provide marginals,
                        # nothing else to do here
                        break
                else:
                    # Inequality, assume if 2-sided that only
                    # one side's marginal is nonzero
                    # Negate marginal for _lo equations
                    marg = 0
                    if c.lower is not None:
                        rec_lo = t1.out_db[sym + '_lo'].find_record()
                        marg -= rec_lo.marginal
                    if c.upper is not None:
                        rec_hi = t1.out_db[sym + '_hi'].find_record()
                        marg += rec_hi.marginal
                    if not math.isnan(marg):
                        # model.dual[c] = marg
                        soln.constraint[sym] = {'dual': marg}
                    else:
                        # Solver didn't provide marginals,
                        # nothing else to do here
                        break

        results.solution.insert(soln)

        if keepfiles:
            print("\nGAMS WORKING DIRECTORY: %s\n" % ws.working_directory)
        elif tmpdir is not None:
            # Garbage collect all references to t1.out_db
            # So that .gdx file can be deleted
            t1 = rec = rec_lo = rec_hi = None
            file_removal_gams_direct(tmpdir, newdir)

        ####################################################################
        # Finish with results
        ####################################################################

        results._smap_id = smap_id
        results._smap = None
        if isinstance(model, IBlock):
            if len(results.solution) == 1:
                results.solution(0).symbol_map = \
                    getattr(model, "._symbol_maps")[results._smap_id]
                results.solution(0).default_variable_value = \
                    self._default_variable_value
                if load_solutions:
                    model.load_solution(results.solution(0))
            else:
                assert len(results.solution) == 0
            # see the hack in the write method
            # we don't want this to stick around on the model
            # after the solve
            assert len(getattr(model, "._symbol_maps")) == 1
            delattr(model, "._symbol_maps")
            del results._smap_id
            if load_solutions and \
               (len(results.solution) == 0):
                logger.error("No solution is available")
        else:
            if load_solutions:
                model.solutions.load_from(results)
                results._smap_id = None
                results.solution.clear()
            else:
                results._smap = model.solutions.symbol_map[smap_id]
                model.solutions.delete_symbol_map(smap_id)

        postsolve_completion_time = time.time()
        if report_timing:
            print("      %6.2f seconds required for postsolve" %
                  (postsolve_completion_time - solve_completion_time))
            print("      %6.2f seconds required total" %
                  (postsolve_completion_time - initial_time))

        return results
Пример #51
0
    def __call__(self, _, __, event_dict):
        # Initialize lazily to prevent import side-effects.
        if self._init_colorama:
            if self._force_colors:
                colorama.deinit()
                colorama.init(strip=False)
            else:
                colorama.init()

            self._init_colorama = False
        sio = StringIO()

        ts = event_dict.pop("timestamp", None)
        if ts is not None:
            sio.write(
                # can be a number if timestamp is UNIXy
                self._styles.timestamp
                + str(ts)
                + self._styles.reset
                + " "
            )
        level = event_dict.pop("level", None)
        if level is not None:
            sio.write(
                "["
                + self._level_to_color[level]
                + _pad(level, self._longest_level)
                + self._styles.reset
                + "] "
            )

        # force event to str for compatibility with standard library
        event = event_dict.pop("event")
        if not PY2 or not isinstance(event, string_types):
            event = str(event)

        if event_dict:
            event = _pad(event, self._pad_event) + self._styles.reset + " "
        else:
            event += self._styles.reset
        sio.write(self._styles.bright + event)

        logger_name = event_dict.pop("logger", None)
        if logger_name is not None:
            sio.write(
                "["
                + self._styles.logger_name
                + self._styles.bright
                + logger_name
                + self._styles.reset
                + "] "
            )

        stack = event_dict.pop("stack", None)
        exc = event_dict.pop("exception", None)
        sio.write(
            " ".join(
                self._styles.kv_key
                + key
                + self._styles.reset
                + "="
                + self._styles.kv_value
                + self._repr(event_dict[key])
                + self._styles.reset
                for key in sorted(event_dict.keys())
            )
        )

        if stack is not None:
            sio.write("\n" + stack)
            if exc is not None:
                sio.write("\n\n" + "=" * 79 + "\n")
        if exc is not None:
            sio.write("\n" + exc)

        return sio.getvalue()
Пример #52
0
def pformat_msg(obj):
    fp = StringIO()
    pprint.pprint(obj, fp)
    return fp.getvalue()
Пример #53
0
    def _rest_request(self, path, method='GET', args=None, body=None, \
                                                                headers=None):
        """Rest request main function
        :param path: path within tree
        :type path: str
        :param method: method to be implemented
        :type method: str
        :param args: the arguments for method
        :type args: dict
        :param body: body payload for the rest call
        :type body: dict
        :param headers: provide additional headers
        :type headers: dict
        :returns: returns a RestResponse object
        """
        headers = self._get_req_headers(headers)
        reqpath = path.replace('//', '/')

        if body is not None:
            if isinstance(body, dict) or isinstance(body, list):
                headers['Content-Type'] = 'application/json'
                body = json.dumps(body)
            elif isinstance(body, bytes):
                headers['Content-Type'] = 'application/octet-stream'
                body = body
            else:
                headers['Content-Type'] = 'application/x-www-form-urlencoded'
                body = urlencode(body)

            if method == 'PUT':
                resp = self._rest_request(path=path)

                try:
                    if resp.getheader('content-encoding') == 'gzip':
                        buf = StringIO()
                        gfile = gzip.GzipFile(mode='wb', fileobj=buf)

                        try:
                            gfile.write(str(body))
                        finally:
                            gfile.close()

                        compresseddata = buf.getvalue()
                        if compresseddata:
                            data = bytearray()
                            data.extend(buffer(compresseddata))
                            body = data
                except BaseException as excp:
                    LOGGER.error('Error occur while compressing body: %s', excp)
                    raise

            headers['Content-Length'] = len(body)

        if args:
            if method == 'GET':
                reqpath += '?' + urlencode(args)
            elif method == 'PUT' or method == 'POST' or method == 'PATCH':
                LOGGER.warning('For POST, PUT and PATCH methods, the provided "args" parameter "{}" is ignored.'
                               .format(args))
                if not body:
                    LOGGER.warning('Use the "body" parameter to supply the request payload.')

        restreq = RestRequest(reqpath, method=method, body=body)

        attempts = 0
        restresp = None
        cause_exception = None
        while attempts <= self._max_retry:
            if LOGGER.isEnabledFor(logging.DEBUG):
                try:
                    logbody = None
                    if restreq.body:
                        if restreq.body[0] == '{':
                            logbody = restreq.body
                        else:
                            raise ValueError('Body of message is binary')
                    LOGGER.debug('HTTP REQUEST: %s\n\tPATH: %s\n\tBODY: %s'% \
                                    (restreq.method, restreq.path, logbody))
                except:
                    LOGGER.debug('HTTP REQUEST: %s\n\tPATH: %s\n\tBODY: %s'% \
                                (restreq.method, restreq.path, 'binary body'))
            attempts = attempts + 1
            LOGGER.info('Attempt %s of %s', attempts, path)

            try:
                while True:
                    if self._conn is None:
                        self.__init_connection()

                    self._conn.request(method.upper(), reqpath, body=body, \
                                                                headers=headers)
                    self._conn_count += 1

                    if sys.version_info < (3, 3):
                        inittime = time.clock()
                    else:
                        inittime = time.perf_counter()
                    resp = self._conn.getresponse()
                    if sys.version_info < (3, 3):
                        endtime = time.clock()
                    else:
                        endtime = time.perf_counter()
                    LOGGER.info('Response Time for %s to %s: %s seconds.' %
                                (method, reqpath, str(endtime-inittime)))

                    if resp.getheader('Connection') == 'close':
                        self.__destroy_connection()

                    # redirect handling
                    if resp.status not in list(range(300, 399)) or \
                                                            resp.status == 304:
                        break
                    newloc = resp.getheader('location')
                    newurl = urlparse(newloc)
                    if resp.status == 303:
                        method = 'GET'
                        body = None
                        for h in ['Content-Type', 'Content-Length']:
                            if h in headers:
                                del headers[h]

                    reqpath = newurl.path
                    self.__init_connection(newurl)

                restresp = RestResponse(restreq, resp)

                try:
                    if restresp.getheader('content-encoding') == "gzip":
                        compressedfile = BytesIO(restresp.read)
                        decompressedfile = gzip.GzipFile(fileobj=compressedfile)
                        restresp.text = decompressedfile.read().decode("utf-8")
                except Exception as excp:
                    LOGGER.error('Error occur while decompressing body: %s', \
                                                                        excp)
                    raise DecompressResponseError()
            except Exception as excp:
                if isinstance(excp, DecompressResponseError):
                    raise

                if not cause_exception:
                    cause_exception = excp
                LOGGER.info('Retrying %s [%s]'% (path, excp))
                time.sleep(1)

                self.__init_connection()
                continue
            else:
                break

        if attempts <= self._max_retry:
            if LOGGER.isEnabledFor(logging.DEBUG):
                headerstr = ''

                if restresp is not None:
                    for header in restresp.getheaders():
                        headerstr += '\t' + header[0] + ': ' + header[1] + '\n'

                    try:
                        LOGGER.debug('HTTP RESPONSE for %s:\nCode: %s\nHeaders:\n' \
                                 '%s\nBody Response of %s: %s'%\
                                 (restresp.request.path,
                                str(restresp._http_response.status)+ ' ' + \
                                restresp._http_response.reason,
                                headerstr, restresp.request.path, restresp.read))
                    except:
                        LOGGER.debug('HTTP RESPONSE:\nCode:%s', (restresp))
                else:
                    LOGGER.debug('HTTP RESPONSE: <No HTTP Response obtained>')

            return restresp
        else:
            raise_from(RetriesExhaustedError(), cause_exception)
Пример #54
0
 def test_dump_command(self):
     add_document()
     output = StringIO()
     call_command('dump_memory', stdout=output)
     data = json.loads(output.getvalue())
     self.assertEqual(data, [TEST_DOCUMENT])
Пример #55
0
class TestOutput(unittest.TestCase):
    def setUp(self):
        self.mock_ctx = MockContext()
        self.io = StringIO()

    def tearDown(self):
        self.io.close()

    def test_cli_ctx_type_error(self):
        with self.assertRaises(TypeError):
            OutputProducer(cli_ctx=object())

    def test_out_json_valid(self):
        """
        The JSON output when the input is a dict should be the dict serialized to JSON
        """
        output_producer = OutputProducer(cli_ctx=self.mock_ctx)
        output_producer.out(CommandResultItem({
            'active': True,
            'id': '0b1f6472'
        }),
                            formatter=format_json,
                            out_file=self.io)
        self.assertEqual(
            normalize_newlines(self.io.getvalue()),
            normalize_newlines("""{
  "active": true,
  "id": "0b1f6472"
}
"""))

    def test_out_json_from_ordered_dict(self):
        """
        The JSON output when the input is OrderedDict should be serialized to JSON
        """
        output_producer = OutputProducer(cli_ctx=self.mock_ctx)
        output_producer.out(CommandResultItem(
            OrderedDict({
                'active': True,
                'id': '0b1f6472'
            })),
                            formatter=format_json,
                            out_file=self.io)
        self.assertEqual(
            normalize_newlines(self.io.getvalue()),
            normalize_newlines("""{
  "active": true,
  "id": "0b1f6472"
}
"""))

    def test_out_json_byte(self):
        output_producer = OutputProducer(cli_ctx=self.mock_ctx)
        output_producer.out(CommandResultItem({
            'active': True,
            'contents': b'0b1f6472'
        }),
                            formatter=format_json,
                            out_file=self.io)
        self.assertEqual(
            normalize_newlines(self.io.getvalue()),
            normalize_newlines("""{
  "active": true,
  "contents": "0b1f6472"
}
"""))

    def test_out_json_byte_empty(self):
        output_producer = OutputProducer(cli_ctx=self.mock_ctx)
        output_producer.out(CommandResultItem({
            'active': True,
            'contents': b''
        }),
                            formatter=format_json,
                            out_file=self.io)
        self.assertEqual(
            normalize_newlines(self.io.getvalue()),
            normalize_newlines("""{
  "active": true,
  "contents": ""
}
"""))

    # TABLE output tests

    def test_out_table(self):
        output_producer = OutputProducer(cli_ctx=self.mock_ctx)
        obj = OrderedDict()
        obj['active'] = True
        obj['val'] = '0b1f6472'
        obj['lun'] = 0
        output_producer.out(CommandResultItem(obj),
                            formatter=format_table,
                            out_file=self.io)
        self.assertEqual(
            normalize_newlines(self.io.getvalue()),
            normalize_newlines("""Active    Lun    Val
--------  -----  --------
True      0      0b1f6472
"""))

    def test_out_table_list_of_lists(self):
        output_producer = OutputProducer(cli_ctx=self.mock_ctx)
        obj = [['a', 'b'], ['c', 'd']]
        output_producer.out(CommandResultItem(obj),
                            formatter=format_table,
                            out_file=self.io)
        self.assertEqual(
            normalize_newlines(self.io.getvalue()),
            normalize_newlines("""Column1    Column2
---------  ---------
a          b
c          d
"""))

    def test_out_table_complex_obj(self):
        output_producer = OutputProducer(cli_ctx=self.mock_ctx)
        obj = OrderedDict()
        obj['name'] = 'qwerty'
        obj['val'] = '0b1f6472qwerty'
        obj['sub'] = {'1'}
        result_item = CommandResultItem(obj)
        output_producer.out(result_item,
                            formatter=format_table,
                            out_file=self.io)
        self.assertEqual(
            normalize_newlines(self.io.getvalue()),
            normalize_newlines("""Name    Val
------  --------------
qwerty  0b1f6472qwerty
"""))

    def test_out_table_no_query_no_transformer_order(self):
        output_producer = OutputProducer(cli_ctx=self.mock_ctx)
        obj = {
            'name': 'qwerty',
            'val': '0b1f6472qwerty',
            'active': True,
            'sub': '0b1f6472'
        }
        result_item = CommandResultItem(obj,
                                        table_transformer=None,
                                        is_query_active=False)
        output_producer.out(result_item,
                            formatter=format_table,
                            out_file=self.io)
        # Should be alphabetical order as no table transformer and query is not active.
        self.assertEqual(
            normalize_newlines(self.io.getvalue()),
            normalize_newlines("""Active    Name    Sub       Val
--------  ------  --------  --------------
True      qwerty  0b1f6472  0b1f6472qwerty
"""))

    def test_out_table_no_query_yes_transformer_order(self):
        output_producer = OutputProducer(cli_ctx=self.mock_ctx)
        obj = {
            'name': 'qwerty',
            'val': '0b1f6472qwerty',
            'active': True,
            'sub': '0b1f6472'
        }

        def transformer(r):
            return OrderedDict([('Name', r['name']), ('Val', r['val']),
                                ('Active', r['active']), ('Sub', r['sub'])])

        result_item = CommandResultItem(obj,
                                        table_transformer=transformer,
                                        is_query_active=False)
        output_producer.out(result_item,
                            formatter=format_table,
                            out_file=self.io)
        # Should be table transformer order
        self.assertEqual(
            normalize_newlines(self.io.getvalue()),
            normalize_newlines("""Name    Val             Active    Sub
------  --------------  --------  --------
qwerty  0b1f6472qwerty  True      0b1f6472
"""))

    def test_out_table_no_query_yes_jmespath_table_transformer(self):
        output_producer = OutputProducer(cli_ctx=self.mock_ctx)
        obj = {
            'name': 'qwerty',
            'val': '0b1f6472qwerty',
            'active': True,
            'sub': '0b1f6472'
        }

        result_item = CommandResultItem(
            obj,
            table_transformer='{Name:name, Val:val, Active:active}',
            is_query_active=False)
        output_producer.out(result_item,
                            formatter=format_table,
                            out_file=self.io)
        # Should be table transformer order
        self.assertEqual(
            normalize_newlines(self.io.getvalue()),
            normalize_newlines("""Name    Val             Active
------  --------------  --------
qwerty  0b1f6472qwerty  True
"""))

    def test_out_table_with_number(self):
        output_producer = OutputProducer(cli_ctx=self.mock_ctx)
        obj = OrderedDict()
        obj['Sku'] = '6.10'
        output_producer.out(CommandResultItem(obj),
                            formatter=format_table,
                            out_file=self.io)
        self.assertEqual(normalize_newlines(self.io.getvalue()),
                         normalize_newlines("""Sku
-----
6.10
"""))

    # TSV output tests
    def test_output_format_dict(self):
        obj = {}
        obj['A'] = 1
        obj['B'] = 2
        result = format_tsv(CommandResultItem(obj))
        self.assertEqual(result, '1\t2\n')

    def test_output_format_dict_sort(self):
        obj = {}
        obj['B'] = 1
        obj['A'] = 2
        result = format_tsv(CommandResultItem(obj))
        self.assertEqual(result, '2\t1\n')

    def test_output_format_ordereddict_not_sorted(self):
        obj = OrderedDict()
        obj['B'] = 1
        obj['A'] = 2
        result = format_tsv(CommandResultItem(obj))
        self.assertEqual(result, '1\t2\n')

    def test_output_format_ordereddict_list_not_sorted(self):
        obj1 = OrderedDict()
        obj1['B'] = 1
        obj1['A'] = 2

        obj2 = OrderedDict()
        obj2['A'] = 3
        obj2['B'] = 4
        result = format_tsv(CommandResultItem([obj1, obj2]))
        self.assertEqual(result, '1\t2\n3\t4\n')
Пример #56
0
    def do_request(self, req, status=None, expect_errors=None):
        """
        Executes the given webob Request (``req``), with the expected
        ``status``.  Generally :meth:`~webtest.TestApp.get` and
        :meth:`~webtest.TestApp.post` are used instead.

        To use this::

            req = webtest.TestRequest.blank('url', ...args...)
            resp = app.do_request(req)

        .. note::

            You can pass any keyword arguments to
            ``TestRequest.blank()``, which will be set on the request.
            These can be arguments like ``content_type``, ``accept``, etc.

        """

        errors = StringIO()
        req.environ['wsgi.errors'] = errors
        script_name = req.environ.get('SCRIPT_NAME', '')
        if script_name and req.path_info.startswith(script_name):
            req.path_info = req.path_info[len(script_name):]

        # set framework hooks
        req.environ['paste.testing'] = True
        req.environ['paste.testing_variables'] = {}

        # set request cookies
        self.cookiejar.add_cookie_header(utils._RequestCookieAdapter(req))

        # verify wsgi compatibility
        app = lint.middleware(self.app) if self.lint else self.app

        # FIXME: should it be an option to not catch exc_info?
        res = req.get_response(app, catch_exc_info=True)

        # be sure to decode the content
        res.decode_content()

        # set a few handy attributes
        res._use_unicode = self.use_unicode
        res.request = req
        res.app = app
        res.test_app = self

        # We do this to make sure the app_iter is exhausted:
        try:
            res.body
        except TypeError:  # pragma: no cover
            pass
        res.errors = errors.getvalue()

        for name, value in req.environ['paste.testing_variables'].items():
            if hasattr(res, name):
                raise ValueError(
                    "paste.testing_variables contains the variable %r, but "
                    "the response object already has an attribute by that "
                    "name" % name)
            setattr(res, name, value)
        if not expect_errors:
            self._check_status(status, res)
            self._check_errors(res)

        # merge cookies back in
        self.cookiejar.extract_cookies(utils._ResponseCookieAdapter(res),
                                       utils._RequestCookieAdapter(req))

        return res
Пример #57
0
 def c_code(self, node, nodename, inp, out, sub):
     itemsize_x = np.dtype(node.inputs[0].dtype).itemsize
     worksize_x = np.dtype(work_dtype(node.inputs[0].dtype)).itemsize
     itemsize_b = np.dtype(node.inputs[1].dtype).itemsize
     itemsize_y_idx = np.dtype(node.inputs[2].dtype).itemsize
     itemsize_nll = np.dtype(node.outputs[0].dtype).itemsize
     itemsize_sm = np.dtype(node.outputs[1].dtype).itemsize
     itemsize_am = np.dtype(node.outputs[2].dtype).itemsize
     x, b, y_idx = inp
     nll, sm, am = out
     fail = sub['fail']
     ctx = sub['params']
     k_var = "k_xent_sm_1hot_bias_%(nodename)s" % locals()
     err_check = """
         if (err != GA_NO_ERROR) {
             PyErr_Format(PyExc_RuntimeError,
                          "gpuarray error: %(k_var)s: %%s.",
                          GpuKernel_error(&%(k_var)s, err));
             %(fail)s;
         }
     """ % locals()
     sync = ""
     if config.gpuarray.sync:
         sync = """
         err = GpuArray_sync(&%(z)s->ga);
         %(err_check)s
         """ % locals()
     sio = StringIO()
     print("""
     if (PyGpuArray_DIMS(%(x)s)[0] !=
         PyGpuArray_DIMS(%(y_idx)s)[0])
     {
         PyErr_SetString(PyExc_ValueError,
                         "dimension mismatch in x,y_idx arguments");
         %(fail)s;
     }
     if (PyGpuArray_DIMS(%(x)s)[1] != PyGpuArray_DIMS(%(b)s)[0])
     {
         PyErr_SetString(PyExc_ValueError,
                         "dimension mismatch in x,b arguments");
         %(fail)s;
     }
     if (theano_prep_output(&%(nll)s, 1, PyGpuArray_DIMS(%(y_idx)s), %(x)s->ga.typecode, GA_C_ORDER, %(ctx)s)) %(fail)s
     if (theano_prep_output(&%(sm)s, 2, PyGpuArray_DIMS(%(x)s), %(x)s->ga.typecode, GA_C_ORDER, %(ctx)s)) %(fail)s
     if (theano_prep_output(&%(am)s, 1, PyGpuArray_DIMS(%(y_idx)s), %(y_idx)s->ga.typecode, GA_C_ORDER, %(ctx)s)) %(fail)s
     {
         size_t n_blocks = std::min(PyGpuArray_DIM(%(x)s, 0), (size_t)4096);
         size_t n_threads = std::min(PyGpuArray_DIM(%(x)s, 1), (size_t)256);
         size_t n_shared = n_threads * %(worksize_x)s;
  //TODO: launch more threads per row and do parallel sum and max reductions
         int err = k_xent_sm_1hot_bias_call(
             1, &n_blocks, &n_threads, n_shared,
             PyGpuArray_DIMS(%(x)s)[0],
             PyGpuArray_DIMS(%(x)s)[1],
             %(x)s->ga.data, %(x)s->ga.offset,
             PyGpuArray_STRIDE(%(x)s, 0) / %(itemsize_x)s,
             PyGpuArray_STRIDE(%(x)s, 1) / %(itemsize_x)s,
             %(b)s->ga.data, %(b)s->ga.offset,
             PyGpuArray_STRIDE(%(b)s, 0) / %(itemsize_b)s,
             %(y_idx)s->ga.data, %(y_idx)s->ga.offset,
             PyGpuArray_STRIDE(%(y_idx)s, 0) / %(itemsize_y_idx)s,
             %(nll)s->ga.data, %(nll)s->ga.offset,
             PyGpuArray_STRIDE(%(nll)s, 0) / %(itemsize_nll)s,
             %(sm)s->ga.data, %(sm)s->ga.offset,
             PyGpuArray_STRIDE(%(sm)s, 0) / %(itemsize_sm)s,
             PyGpuArray_STRIDE(%(sm)s, 1) / %(itemsize_sm)s,
             %(am)s->ga.data, %(am)s->ga.offset,
             PyGpuArray_STRIDE(%(am)s, 0) / %(itemsize_am)s);
         %(err_check)s
         %(sync)s
     }
     """ % locals(), file=sio)
     return sio.getvalue()
def cgitb_error_log(monitor):
    string_buf = StringIO()
    cgitb_hook(file=string_buf, format="text")
    monitor.logger.log(string_buf.getvalue(), level=SandeshLevel.SYS_ERR)
Пример #59
0
 def test_called_command_with_no_jobs_wrong_date(self):
     out = StringIO()
     sys.stdout = out
     call_command("send_weekly_mailing", stdout=out)
     self.assertEqual("False\n", out.getvalue())
    def gpu_kernels(self, node, nodename):
        dtype_x = node.inputs[0].dtype
        dtype_b = node.inputs[1].dtype
        dtype_y_idx = node.inputs[2].dtype
        work_x = work_dtype(dtype_x)
        work_b = work_dtype(dtype_b)
        load_x = load_w(dtype_x)
        load_b = load_w(dtype_b)
        write_x = write_w(dtype_x)
        write_b = write_w(dtype_b)
        flags = Kernel.get_flags(dtype_x, dtype_b, dtype_y_idx)
        type_x = gpuarray.dtype_to_ctype(dtype_x)
        type_b = gpuarray.dtype_to_ctype(dtype_b)
        work_x = gpuarray.dtype_to_ctype(work_x)
        type_y_idx = gpuarray.dtype_to_ctype(dtype_y_idx)
        kname = "k_xent_sm_1hot_bias"
        k_var = "k_xent_sm_1hot_bias_" + nodename
        if node.inputs[0].type.context.kind != b'cuda':
            f = ''
        else:
            f = '' if dtype_x == 'float64' else 'f'
        params = [
            gpuarray.SIZE, gpuarray.SIZE, gpuarray.GpuArray, gpuarray.SIZE,
            gpuarray.SSIZE, gpuarray.SSIZE, gpuarray.GpuArray, gpuarray.SIZE,
            gpuarray.SSIZE, gpuarray.GpuArray, gpuarray.SIZE, gpuarray.SSIZE,
            gpuarray.GpuArray, gpuarray.SIZE, gpuarray.SSIZE,
            gpuarray.GpuArray, gpuarray.SIZE, gpuarray.SSIZE, gpuarray.SSIZE,
            gpuarray.GpuArray, gpuarray.SIZE, gpuarray.SSIZE
        ]
        sio = StringIO()
        print("""#include "cluda.h"

        KERNEL void %(kname)s(const ga_size M, const ga_size N,
            GLOBAL_MEM const %(type_x)s* x_data, const ga_size offset_x, const ga_ssize xs0, const ga_ssize xs1,
            GLOBAL_MEM const %(type_b)s* b, const ga_size offset_b, const ga_ssize bs0,
            GLOBAL_MEM const %(type_y_idx)s* y_idx_data, const ga_size offset_y_idx, const ga_ssize y_idxs0,
            GLOBAL_MEM %(type_x)s* nll_data, const ga_size offset_nll, const ga_ssize nlls0,
            GLOBAL_MEM %(type_x)s* sm_data, const ga_size offset_sm, const ga_ssize sms0, const ga_ssize sms1,
            GLOBAL_MEM %(type_y_idx)s* am_data, const ga_size offset_am, const ga_ssize ams0 GA_DECL_SHARED_PARAM(%(work_x)s, per_thread_values))
        {
          x_data = (GLOBAL_MEM const %(type_x)s *)(((GLOBAL_MEM char *)x_data)+offset_x);
          b = (GLOBAL_MEM const %(type_b)s *)(((GLOBAL_MEM char *)b)+offset_b);
          y_idx_data = (GLOBAL_MEM const %(type_y_idx)s *)(((GLOBAL_MEM char *)y_idx_data)+offset_y_idx);
          nll_data = (GLOBAL_MEM %(type_x)s *)(((GLOBAL_MEM char *)nll_data)+offset_nll);
          sm_data = (GLOBAL_MEM %(type_x)s *)(((GLOBAL_MEM char *)sm_data)+offset_sm);
          am_data = (GLOBAL_MEM %(type_y_idx)s *)(((GLOBAL_MEM char *)am_data)+offset_am);
          for (ga_int row = GID_0; row < M; row += GDIM_0){
            GLOBAL_MEM const %(type_x)s* x = x_data + xs0 * row;
            GLOBAL_MEM %(type_x)s* sm = sm_data + sms0 * row;
            GA_DECL_SHARED_BODY(%(work_x)s, per_thread_values);
            LOCAL_MEM %(work_x)s row_max, sum, sum_inv;
            LOCAL_MEM ga_int row_max_threadIdx;
            %(work_x)s per_thread_row_max, per_thread_sum;
            ga_int per_thread_row_max_j;
            // COMPUTE ROW MAX AND ARGMAX
            // compute separate per-thread maximums and argmaxes
            per_thread_row_max = NAN;
            per_thread_row_max_j = 0;
            for (ga_int j = LID_0; j < N; j += LDIM_0)
            {
              %(work_x)s row_ij = %(load_x)s(x[j * xs1]) + %(load_b)s(b[j * bs0]);
              per_thread_row_max_j = (row_ij > per_thread_row_max) ? j : per_thread_row_max_j;
              per_thread_row_max = fmax%(f)s(row_ij, per_thread_row_max);
            }
            per_thread_values[LID_0] = per_thread_row_max;
            local_barrier();
            if (LID_0 == 0) {
              row_max = NAN;
              row_max_threadIdx = 0;
              for (ga_int j = 0; j < LDIM_0; j++)
              {
                %(work_x)s per_thread_max = per_thread_values[j];
                row_max_threadIdx = (per_thread_max > row_max) ? j : row_max_threadIdx;
                row_max = fmax%(f)s(per_thread_max, row_max);
              }
            }
            local_barrier();
            // The thread with the higest max writes out which of its
            // values was the winner.
            if (LID_0 == row_max_threadIdx) am_data[row * ams0] = per_thread_row_max_j;
            // COMPUTE SOFTMAX
            per_thread_sum = 0.0;
            for (ga_int j = LID_0; j < N; j += LDIM_0)
            {
              %(work_x)s row_ij = %(load_x)s(x[j * xs1]) + %(load_b)s(b[j * bs0]);
              %(work_x)s sm_ij = exp%(f)s(row_ij - row_max);
              per_thread_sum += sm_ij;
              sm[j * sms1] = %(write_x)s(sm_ij);
            }
            per_thread_values[LID_0] = per_thread_sum;
            local_barrier();
            if (LID_0 == 0) {
              sum = 0.0;
              for (ga_int j = 0; j < LDIM_0; j++) {
                sum += per_thread_values[j];
              }
              sum_inv = 1.0 / sum;
            }
            local_barrier();
            for (ga_int j = LID_0; j < N; j += LDIM_0) {
              sm[j * sms1] = %(write_x)s(%(load_x)s(sm[j * sms1]) * sum_inv);
            }
            if (LID_0 == 0) {
              const %(type_y_idx)s y_idx = (ga_int)y_idx_data[row * y_idxs0];
              if ((y_idx >= N || y_idx < 0)) {
                // raise some suspicion.
                nll_data[row * nlls0] = %(write_x)s(0.0);
              } else {
                nll_data[row * nlls0] = %(write_x)s(
                   - %(load_x)s(x[y_idx * xs1])
                   - %(load_b)s(b[y_idx * bs0])
                   + row_max + log%(f)s(sum));
              }
            }
          }
        }
        """ % locals(),
              file=sio)

        return [
            Kernel(code=sio.getvalue(),
                   name=kname,
                   params=params,
                   flags=flags,
                   objvar=k_var)
        ]