示例#1
0
    def test_dump_and_load(self):
        in_resource = Book(
            title="Consider Phlebas",
            isbn="0-333-45430-8",
            num_pages=471,
            rrp=19.50,
            fiction=True,
            genre="sci-fi",
            authors=[Author(name="Iain M. Banks")],
            publisher=Publisher(name="Macmillan"),
            published=[datetime.datetime(1987, 1, 1)],
        )

        fp = StringIO()
        yaml_codec.dump(in_resource, fp)

        fp.seek(0)
        out_resource = yaml_codec.load(fp)

        assert out_resource.title == in_resource.title
        assert out_resource.isbn == in_resource.isbn
        assert out_resource.num_pages == in_resource.num_pages
        assert out_resource.rrp == in_resource.rrp
        assert out_resource.fiction == in_resource.fiction
        assert out_resource.genre == in_resource.genre
        assert out_resource.authors[0].name == in_resource.authors[0].name
        assert out_resource.publisher.name == in_resource.publisher.name
        assert out_resource.published[0] == in_resource.published[0]
示例#2
0
文件: tests.py 项目: faulkner/sentry
    def test_content_encoding_gzip(self):
        kwargs = {'message': 'hello'}

        message = json.dumps(kwargs)

        fp = StringIO()

        try:
            f = GzipFile(fileobj=fp, mode='w')
            f.write(message)
        finally:
            f.close()

        key = self.projectkey.public_key
        secret = self.projectkey.secret_key

        with self.tasks():
            resp = self.client.post(
                self.path, fp.getvalue(),
                content_type='application/octet-stream',
                HTTP_CONTENT_ENCODING='gzip',
                HTTP_X_SENTRY_AUTH=get_auth_header('_postWithHeader', key, secret),
            )

        assert resp.status_code == 200, resp.content

        event_id = json.loads(resp.content)['id']
        instance = Event.objects.get(event_id=event_id)

        assert instance.message == 'hello'
示例#3
0
    def test_start_logger(self):
        sio = StringIO()
        logger = logging.getLogger('CONT #abcdef: test')
        logger.addHandler(logging.StreamHandler(sio))

        # set log level as INFO
        logger = start_logger('test', 'INFO', 'abcdef')
        self.assertEqual(logging.INFO, logger.level)
        # INFO message is recorded with INFO leg level
        logger.info('test1')
        self.assertEqual(sio.getvalue(), 'test1\n')
        # DEBUG message is not recorded with INFO leg level
        logger.debug('test2')
        self.assertEqual(sio.getvalue(), 'test1\n')

        # set log level as DEBUG
        logger = start_logger('test', 'DEBUG', 'abcdef')
        self.assertEqual(logging.DEBUG, logger.level)
        # DEBUG message is recorded with DEBUG leg level
        logger.debug('test3')
        self.assertEqual(sio.getvalue(), 'test1\ntest3\n')

        # If the level parameter is unkown, use ERROR as log level
        logger = start_logger('test', 'foo', 'abcdef')
        self.assertEqual(logging.ERROR, logger.level)
    def test_check_totals_calls_run_once(self):
        prob = Problem()
        root = prob.root = Group()
        root.add('p1', IndepVarComp('x', 1.0), promotes=['*'])
        root.add('p2', IndepVarComp('y', 1.0), promotes=['*'])
        root.add('comp', Paraboloid(), promotes=['*'])

        prob.driver.add_desvar('x')
        prob.driver.add_desvar('y')
        prob.driver.add_objective('f_xy')

        prob.setup(check=False)

        prob['x'] = 5.0
        prob['y'] = 2.0

        iostream = StringIO()

        data = prob.check_total_derivatives(out_stream=iostream)

        self.assertAlmostEqual(first=prob["f_xy"],
                               second= (prob['x']-3.0)**2 \
                                       + prob['x']*prob['y'] \
                                       + (prob['y']+4.0)**2 - 3.0,
                               places=5,
                               msg="check partial derivatives did not call"
                                   "run_once on the driver as expected.")

        self.assertEqual(first=iostream.getvalue()[:39],
                         second="Executing model to populate unknowns...",
                         msg="check partial derivatives failed to run driver once")
示例#5
0
def _entries_to_ldif(entries):
    """Format LDAP entries as LDIF"""
    io = StringIO()
    writer = LDIFWriter(io)
    for entry in entries:
        writer.unparse(str(entry.dn), dict(entry.raw))
    return io.getvalue()
示例#6
0
def chainedInequalityErrorMessage(gre, msg=None):
    if msg is None:
        msg = "Relational expression used in an unexpected Boolean context."
    buf = StringIO()
    gre.chainedInequality.to_string(buf)
    # We are about to raise an exception, so it's OK to reset chainedInequality
    info = gre.call_info
    gre.chainedInequality = None
    gre.call_info =  None

    args = ( str(msg).strip(), buf.getvalue().strip(), info[0], info[1],
             ':\n    %s' % info[3] if info[3] is not None else '.' )
    return """%s

The inequality expression:
    %s
contains non-constant terms (variables) that were evaluated in an
unexpected Boolean context at
  File '%s', line %s%s

Evaluating Pyomo variables in a Boolean context, e.g.
    if expression <= 5:
is generally invalid.  If you want to obtain the Boolean value of the
expression based on the current variable values, explicitly evaluate the
expression using the value() function:
    if value(expression) <= 5:
or
    if value(expression <= 5):
""" % args
示例#7
0
 def test_no_children(self):
     my_task = Task("t2", [""], file_dep=['d2.txt'])
     output = StringIO()
     cmd = CmdFactory(Graphx, outstream=output, task_list=[my_task])
     cmd._execute(graph_type='json', no_children=True)
     got = output.getvalue()
     self.assertNotIn("d2.txt", got)
    def test__read_write_credentials_file(self):
        credentials = _create_test_credentials()
        contents = StringIO()

        multiprocess_file_storage._write_credentials_file(
            contents, {'key': credentials})

        contents.seek(0)
        data = json.load(contents)
        self.assertEqual(data['file_version'], 2)
        self.assertTrue(data['credentials']['key'])

        # Read it back.
        contents.seek(0)
        results = multiprocess_file_storage._load_credentials_file(contents)
        self.assertEqual(
            results['key'].access_token, credentials.access_token)

        # Add an invalid credential and try reading it back. It should ignore
        # the invalid one but still load the valid one.
        data['credentials']['invalid'] = '123'
        results = multiprocess_file_storage._load_credentials_file(
            StringIO(json.dumps(data)))
        self.assertNotIn('invalid', results)
        self.assertEqual(
            results['key'].access_token, credentials.access_token)
示例#9
0
    def test_adds_supergroup(self):
        out = StringIO()
        fxa_id = uuid.uuid4().hex
        call_command(
            'createsuperuser',
            interactive=False,
            username='******',
            email='*****@*****.**',
            add_to_supercreate_group=True,
            fxa_id=fxa_id,
            stdout=out)

        user = UserProfile.objects.get(username='******')
        assert user.email == '*****@*****.**'
        assert user.read_dev_agreement
        assert user.groups.filter(rules='Accounts:SuperCreate').exists()

        response = json.loads(out.getvalue())

        assert response == {
            'username': '******',
            'email': '*****@*****.**',
            'api-key': ANY,
            'api-secret': ANY,
            'fxa-id': fxa_id,
        }
示例#10
0
    def load_dataset(infile, selection, verbose=1, **kwargs):
        """
        Loads selected distribution from selected infile.

        Arguments:
          infile (str): Path to text input file
          selection (str): Start of lines containing desired dataset
          verbose (int): Level of verbose output

        Returns:
          dataset (DataFrame): Selected dataset
        """
        from six import StringIO
        import pandas

        if verbose >= 1:
            print("loading '{0}' from '{1}'".format(selection, infile))

        s = StringIO()
        with open(infile) as open_infile:
            for line in open_infile:
                if line.startswith(selection):
                    s.write(line)
        s.seek(0)

        dataset = pandas.read_csv(s, delim_whitespace=True, header=None,
          usecols=[3,4,5,6], names=["phi", "psi", "probability",
          "free energy"])

        return dataset
示例#11
0
    def test_simple(self):
        # This test just checks the output of the program against
        # some expected output
        from six import StringIO

        out = StringIO()
        zdd.do_zdd(Arguments(), out)
        output = json.loads(out.getvalue())
        output['labels']['HAPROXY_DEPLOYMENT_STARTED_AT'] = ""

        expected = json.loads('''{
  "acceptedResourceRoles": [
    "*",
    "slave_public"
  ],
  "container": {
    "docker": {
      "forcePullImage": true,
      "image": "brndnmtthws/nginx-echo-sleep",
      "network": "BRIDGE",
      "portMappings": [
        {
          "containerPort": 8080,
          "hostPort": 0,
          "servicePort": 10001
        }
      ]
    },
    "type": "DOCKER"
  },
  "cpus": 0.1,
  "healthChecks": [
    {
      "gracePeriodSeconds": 15,
      "intervalSeconds": 3,
      "maxConsecutiveFailures": 10,
      "path": "/",
      "portIndex": 0,
      "protocol": "HTTP",
      "timeoutSeconds": 15
    }
  ],
  "id": "/nginx-blue",
  "instances": 1,
  "labels": {
    "HAPROXY_0_PORT": "10000",
    "HAPROXY_APP_ID": "nginx",
    "HAPROXY_DEPLOYMENT_ALT_PORT": "10001",
    "HAPROXY_DEPLOYMENT_COLOUR": "blue",
    "HAPROXY_DEPLOYMENT_GROUP": "nginx",
    "HAPROXY_DEPLOYMENT_NEW_INSTANCES": "0",
    "HAPROXY_DEPLOYMENT_STARTED_AT": "2016-02-01T15:51:38.184623",
    "HAPROXY_DEPLOYMENT_TARGET_INSTANCES": "3",
    "HAPROXY_GROUP": "external"
  },
  "mem": 65
}
''')
        expected['labels']['HAPROXY_DEPLOYMENT_STARTED_AT'] = ""
        self.assertEqual(output, expected)
示例#12
0
    def __call__(self, environ, start_response):
        script_name = environ.get('SCRIPT_NAME', '')
        path_info = environ.get('PATH_INFO', '')
        sent = []
        written_response = StringIO()

        def replacement_start_response(status, headers, exc_info=None):
            if not self.should_filter(status, headers):
                return start_response(status, headers, exc_info)
            else:
                sent[:] = [status, headers, exc_info]
                return written_response.write

        app_iter = self.app(environ, replacement_start_response)
        if not sent:
            return app_iter
        status, headers, exc_info = sent
        try:
            for chunk in app_iter:
                written_response.write(chunk)
        finally:
            if hasattr(app_iter, 'close'):
                app_iter.close()
        body = written_response.getvalue()
        status, headers, body = self.filter(
            script_name, path_info, environ, status, headers, body)
        start_response(status, headers, exc_info)
        return [body]
    def render(self, data, media_type=None, renderer_context={}, writer_opts=None):
        """
        Renders serialized *data* into CSV. For a dictionary:
        """
        if data is None:
            return ''

        if not isinstance(data, list):
            data = [data]

        if writer_opts is not None:
            log.warning('The writer_opts argument is deprecated. Pass the '
                        'writer_opts attribute into renderer_context instead.')

        writer_opts = renderer_context.get('writer_opts', writer_opts or self.writer_opts or {})

        renderer_context = renderer_context or {}
        header = renderer_context.get('header', self.header)
        labels = renderer_context.get('labels', self.labels)

        table = self.tablize(data, header=header, labels=labels)
        csv_buffer = StringIO()
        csv_writer = csv.writer(csv_buffer, **writer_opts)
        for row in table:
            # Assume that strings should be encoded as UTF-8
            csv_writer.writerow([
                elem.encode('utf-8') if isinstance(elem, text_type) and PY2 else elem
                for elem in row
            ])

        return csv_buffer.getvalue()
示例#14
0
def test_packed_workflow_execution(wf_path, job_path, namespaced, tmpdir):
    load_tool.loaders = {}

    document_loader, workflowobj, uri = fetch_document(
        get_data(wf_path), resolver=tool_resolver)

    document_loader, _, processobj, metadata, uri = validate_document(
        document_loader, workflowobj, uri, [], {})

    packed = json.loads(print_pack(document_loader, processobj, uri, metadata))

    assert not namespaced or "$namespaces" in packed

    wf_packed_handle, wf_packed_path = tempfile.mkstemp()
    with open(wf_packed_path, 'w') as temp_file:
        json.dump(packed, temp_file)

    normal_output = StringIO()
    packed_output = StringIO()

    normal_params = ['--outdir', str(tmpdir), get_data(wf_path), get_data(job_path)]
    packed_params = ['--outdir', str(tmpdir), '--debug', get_data(wf_packed_path), get_data(job_path)]

    assert main(normal_params, stdout=normal_output) == 0
    assert main(packed_params, stdout=packed_output) == 0

    assert json.loads(packed_output.getvalue()) == json.loads(normal_output.getvalue())

    os.close(wf_packed_handle)
    os.remove(wf_packed_path)
示例#15
0
 def check_roundtrip(self, code1, filename="internal"):
     ast1 = compile(code1, filename, "exec", ast.PyCF_ONLY_AST)
     unparse_buffer = StringIO()
     unparse.Unparser(ast1, unparse_buffer)
     code2 = unparse_buffer.getvalue()
     ast2 = compile(code2, filename, "exec", ast.PyCF_ONLY_AST)
     self.assertASTEqual(ast1, ast2)
示例#16
0
    def test_prints_signature(self):
        the_time = 1406143563.020043
        key = 'secret squirrel'
        expires = 3600
        path = '/v1/a/c/o'
        redirect = 'https://example.com/done.html'
        max_file_size = str(int(1024 * 1024 * 1024 * 3.14159))  # π GiB
        max_file_count = '3'

        expected_signature = hmac.new(
            key,
            "\n".join((
                path, redirect, max_file_size, max_file_count,
                str(int(the_time + expires)))),
            hashlib.sha1).hexdigest()

        out = StringIO()
        with mock.patch('swift.cli.form_signature.time', lambda: the_time):
            with mock.patch('sys.stdout', out):
                exitcode = form_signature.main([
                    '/path/to/swift-form-signature',
                    path, redirect, max_file_size,
                    max_file_count, str(expires), key])

        self.assertEqual(exitcode, 0)
        self.assertTrue("Signature: %s" % expected_signature
                        in out.getvalue())
        self.assertTrue("Expires: %d" % (the_time + expires,)
                        in out.getvalue())

        sig_input = ('<input type="hidden" name="signature" value="%s" />'
                     % expected_signature)
        self.assertTrue(sig_input in out.getvalue())
示例#17
0
def get_correct_indentation_diff(code, filename):
    """
    Generate a diff to make code correctly indented.

    :param code: a string containing a file's worth of Python code
    :param filename: the filename being considered (used in diff generation only)
    :returns: a unified diff to make code correctly indented, or
              None if code is already correctedly indented
    """
    code_buffer = StringIO(code)
    output_buffer = StringIO()
    reindenter = reindent.Reindenter(code_buffer)
    reindenter.run()
    reindenter.write(output_buffer)
    reindent_output = output_buffer.getvalue()
    output_buffer.close()
    if code != reindent_output:
        diff_generator = difflib.unified_diff(code.splitlines(True), reindent_output.splitlines(True),
                                              fromfile=filename, tofile=filename + " (reindented)")
        # work around http://bugs.python.org/issue2142
        diff_tuple = map(clean_diff_line_for_python_bug_2142, diff_generator)
        diff = "".join(diff_tuple)
        return diff
    else:
        return None
示例#18
0
                        def error(err):
                            errmsg = 'Failure while invoking procedure {0} registered under "{1}".'.format(endpoint.fn, registration.procedure)
                            try:
                                self.onUserError(err, errmsg)
                            except:
                                pass
                            formatted_tb = None
                            if self.traceback_app:
                                # if asked to marshal the traceback within the WAMP error message, extract it
                                # noinspection PyCallingNonCallable
                                tb = StringIO()
                                err.printTraceback(file=tb)
                                formatted_tb = tb.getvalue().splitlines()

                            del self._invocations[msg.request]

                            if hasattr(err, 'value'):
                                exc = err.value
                            else:
                                exc = err

                            reply = self._message_from_exception(message.Invocation.MESSAGE_TYPE, msg.request, exc, formatted_tb)

                            try:
                                self._transport.send(reply)
                            except SerializationError as e:
                                # the application-level payload returned from the invoked procedure can't be serialized
                                reply = message.Error(message.Invocation.MESSAGE_TYPE, msg.request, ApplicationError.INVALID_PAYLOAD,
                                                      args=[u'error return value from invoked procedure "{0}" could not be serialized: {1}'.format(registration.procedure, e)])
                                self._transport.send(reply)
                            # we have handled the error, so we eat it
                            return None
示例#19
0
  def _serve_compressed_histograms(self, query_params):
    """Given a tag and single run, return an array of compressed histograms."""
    tag = query_params.get('tag')
    run = query_params.get('run')
    compressed_histograms = self._multiplexer.CompressedHistograms(run, tag)
    if query_params.get('format') == _OutputFormat.CSV:
      string_io = StringIO()
      writer = csv.writer(string_io)

      # Build the headers; we have two columns for timing and two columns for
      # each compressed histogram bucket.
      headers = ['Wall time', 'Step']
      if compressed_histograms:
        bucket_count = len(compressed_histograms[0].compressed_histogram_values)
        for i in xrange(bucket_count):
          headers += ['Edge %d basis points' % i, 'Edge %d value' % i]
      writer.writerow(headers)

      for compressed_histogram in compressed_histograms:
        row = [compressed_histogram.wall_time, compressed_histogram.step]
        for value in compressed_histogram.compressed_histogram_values:
          row += [value.rank_in_bps, value.value]
        writer.writerow(row)
      self._send_csv_response(string_io.getvalue())
    else:
      self._send_json_response(compressed_histograms)
示例#20
0
 def test_isatty_false(self):
     w1 = StringIO()
     w1.isatty = lambda: True
     w2 = StringIO()
     w2.isatty = lambda: True
     writer = action.Writer(w1, w2)
     assert writer.isatty()
示例#21
0
 def test_isatty_overwrite_no(self):
     w1 = StringIO()
     w1.isatty = lambda: True
     w2 = StringIO()
     w2.isatty = lambda: True
     writer = action.Writer(w1)
     writer.add_writer(w2, False)
示例#22
0
 def test_cmds_with_params(self, commands):
     output = StringIO()
     cmd = CmdFactory(TabCompletion, task_loader=DodoTaskLoader(),
                      outstream=output, cmds=commands)
     cmd.execute({'shell':'zsh', 'hardcode_tasks': False}, [])
     got = output.getvalue()
     assert "tabcompletion: generate script" in got
示例#23
0
    def get(self, *args, **kwargs):
        # type: (*Any, **Any) -> None
        fh = StringIO()
        w_csv = csv.writer(fh, lineterminator="\n")

        # header
        w_csv.writerow(
            [
                "username",
                "created_at",
                "type",
                "size",
                "fingerprint",
                "fingerprint_sha256",
                "comment",
            ]
        )

        with closing(Session()) as session:
            user_key_list = session.query(PublicKey, User).filter(User.id == PublicKey.user_id)
            for key, user in user_key_list:
                w_csv.writerow(
                    [
                        user.name,
                        key.created_on.isoformat(),
                        key.key_type,
                        key.key_size,
                        key.fingerprint,
                        key.fingerprint_sha256,
                        key.comment,
                    ]
                )

        self.set_header("Content-Type", "text/csv")
        self.write(fh.getvalue())
示例#24
0
def bottlestats(server_name, path=''):
    """Renders a GET request, by showing this nodes stats and children."""
    path = path.lstrip('/')
    parts = path.split('/')
    if not parts[0]:
        parts = parts[1:]
    stat_dict = util.lookup(scales.getStats(), parts)

    if stat_dict is None:
        abort(404, "Not Found")
        return

    output = StringIO()
    output_format = request.query.get('format', 'html')
    query = request.query.get('query', None)
    if output_format == 'json':
        response.content_type = "application/json"
        formats.jsonFormat(output, stat_dict, query)
    elif output_format == 'prettyjson':
        formats.jsonFormat(output, stat_dict, query, pretty=True)
        response.content_type = "application/json"
    else:
        formats.htmlHeader(output, '/' + path, server_name, query)
        formats.htmlFormat(output, tuple(parts), stat_dict, query)
        response.content_type = "text/html"

    return output.getvalue()
示例#25
0
    def cmd(self, command, checks=None, allowed_exceptions=None, debug=False): #pylint: disable=no-self-use
        allowed_exceptions = allowed_exceptions or []
        if not isinstance(allowed_exceptions, list):
            allowed_exceptions = [allowed_exceptions]

        if self._debug or debug:
            print('\n\tRUNNING: {}'.format(command))
        command_list = shlex.split(command)
        output = StringIO()
        try:
            cli_main(command_list, file=output)
        except Exception as ex: # pylint: disable=broad-except
            ex_msg = str(ex)
            if not next((x for x in allowed_exceptions if x in ex_msg), None):
                raise ex
        self._track_executed_commands(command_list)
        result = output.getvalue().strip()
        output.close()

        if self._debug or debug:
            print('\tRESULT: {}\n'.format(result))

        if checks:
            checks = [checks] if not isinstance(checks, list) else checks
            for check in checks:
                check.compare(result)

        result = result or '{}'
        try:
            return json.loads(result)
        except Exception: # pylint: disable=broad-except
            return result
示例#26
0
文件: runner.py 项目: aregee/Mailman
def run_layer(options, layer_name, layer, tests, setup_layers,
              failures, errors):

    output = options.output
    gathered = []
    gather_layers(layer, gathered)
    needed = dict([(l, 1) for l in gathered])
    if options.resume_number != 0:
        output.info("Running %s tests:" % layer_name)
    tear_down_unneeded(options, needed, setup_layers)

    if options.resume_layer is not None:
        output.info_suboptimal("  Running in a subprocess.")

    try:
        setup_layer(options, layer, setup_layers)
    except zope.testrunner.interfaces.EndRun:
        raise
    except Exception:
        f = StringIO()
        traceback.print_exc(file=f)
        output.error(f.getvalue())
        errors.append((SetUpLayerFailure(layer), sys.exc_info()))
        return 0
    else:
        return run_tests(options, tests, layer_name, failures, errors)
示例#27
0
class _Buffer(object):

    def __init__(self, stream):
        self._stream = stream
        self._buffer = StringIO()

    def fileno(self):
        return self._stream.fileno()

    def __getattr__(self, attr):
        # this happens on unpickling
        if attr == '_buffer':
            raise AttributeError("No _buffer yet")
        return getattr(self._buffer, attr)

    def __le__(self, obj):
        return self._buffer.getvalue() == obj

    def __eq__(self, obj):
        return self._buffer.getvalue() == obj

    def __str__(self):
        return self._buffer.getvalue()

    def __repr__(self):
        return repr(self._buffer.getvalue())
示例#28
0
    def render(self, input_data, media_type=None, renderer_context=None):
        """
        Renders serialized *data* into CSV. For a dictionary:
        """
        if input_data is None:
            return ''

        data = input_data
        if not isinstance(data, list):
            data = input_data.get('results', [input_data])

        table = self.tablize(data)



        csv_buffer = StringIO()
        csv_writer = csv.writer(csv_buffer, dialect='excel-tab')
        for row in table:
            # Assume that strings should be encoded as UTF-8
            csv_writer.writerow([
                elem.encode('utf-8') if isinstance(elem, text_type) and PY2 else elem
                for elem in row
            ])

        return csv_buffer.getvalue()
示例#29
0
def runquery_csv():
	global out

	q = frappe.form_dict.get('query')

	rep_name = frappe.form_dict.get('report_name')
	if not frappe.form_dict.get('simple_query'):

		# Report Name
		if not rep_name:
			rep_name = get_sql_tables(q)[0]

	if not rep_name: rep_name = 'DataExport'

	rows = [[rep_name], out['colnames']] + out['values']

	from six import StringIO
	import csv

	f = StringIO()
	writer = csv.writer(f)
	for r in rows:
		# encode only unicode type strings and not int, floats etc.
		writer.writerow(map(lambda v: isinstance(v, text_type) and v.encode('utf-8') or v, r))

	f.seek(0)
	out['result'] = text_type(f.read(), 'utf-8')
	out['type'] = 'csv'
	out['doctype'] = rep_name
示例#30
0
 def test_hardcoded_tasks(self, commands):
     output = StringIO()
     cmd = CmdFactory(TabCompletion, task_loader=FakeLoader(),
                      outstream=output, cmds=commands)
     cmd.execute({'shell':'zsh', 'hardcode_tasks': True}, [])
     got = output.getvalue()
     assert 't1' in got
示例#31
0
    def render_board(state, mode="human", close=False):
        """
        Render the playing board
        """
        board = state["board"]
        outfile = StringIO() if mode == "ansi" else sys.stdout

        outfile.write("    ")
        outfile.write("-" * 25)
        outfile.write("\n")

        for i in range(7, -1, -1):
            outfile.write(" {} | ".format(i + 1))
            for j in range(7, -1, -1):
                piece = ChessEnvV0.ids_to_pieces[board[i, j]]
                figure = uniDict[piece[0]]
                outfile.write(" {} ".format(figure))
            outfile.write("|\n")
        outfile.write("    ")
        outfile.write("-" * 25)
        outfile.write("\n      a  b  c  d  e  f  g  h ")
        outfile.write("\n")
        outfile.write("\n")

        if mode != "human":
            return outfile
示例#32
0
 def test_list_same_checks(self):
     output = StringIO()
     call_command('list_same_checks', stdout=output)
     self.assertEqual(1, len(output.getvalue().splitlines()))
示例#33
0
 def test_list_count_checks(self):
     output = StringIO()
     call_command('list_ignored_checks', count=10, stdout=output)
     self.assertEqual('', output.getvalue())
示例#34
0
 def test_list_all_checks(self):
     output = StringIO()
     call_command('list_ignored_checks', list_all=True, stdout=output)
     self.assertEqual(2, len(output.getvalue().splitlines()))
示例#35
0
    test_autosummary
    ~~~~~~~~~~~~~~~~

    Test the autosummary extension.

    :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
    :license: BSD, see LICENSE for details.
"""

from six import iteritems, StringIO

from sphinx.ext.autosummary import mangle_signature

import pytest

html_warnfile = StringIO()

default_kw = {
    'testroot': 'autosummary',
    'confoverrides': {
        'extensions': ['sphinx.ext.autosummary'],
        'autosummary_generate': True,
        'source_suffix': '.rst'
    }
}


def test_mangle_signature():
    TEST = """
    () :: ()
    (a, b, c, d, e) :: (a, b, c, d, e)
示例#36
0
 def capture(self, command, *args, **kwargs):
     out, sys.stdout = sys.stdout, StringIO()
     command(*args, **kwargs)
     sys.stdout.seek(0)
     yield sys.stdout.read()
     sys.stdout = out
示例#37
0
    def render_moves(state, piece_id, moves, mode="human"):
        """
        Render the possible moves that a piece can take
        """
        board = state["board"]
        moves_pos = [m["new_pos"] for m in moves if m["piece_id"] == piece_id]

        outfile = StringIO() if mode == "ansi" else sys.stdout
        outfile.write("    ")
        outfile.write("-" * 25)
        outfile.write("\n")

        for i in range(7, -1, -1):
            outfile.write(" {} | ".format(i + 1))
            for j in range(7, -1, -1):
                piece = ChessEnvV0.ids_to_pieces[board[i, j]]
                figure = uniDict[piece[0]]

                # check moves + piece
                if board[i, j] == piece_id:
                    outfile.write("<{}>".format(figure))
                elif moves_pos and any(np.equal(moves_pos, [i, j]).all(1)):
                    if piece == ".":
                        if piece_id == ChessEnvV0.CASTLE_MOVE_ID:
                            outfile.write("0-0")
                        else:
                            outfile.write(" X ")
                    else:
                        outfile.write("+{}+".format(figure))
                else:
                    outfile.write(" {} ".format(figure))
            outfile.write("|\n")
        outfile.write("    ")
        outfile.write("-" * 25)
        outfile.write("\n      a  b  c  d  e  f  g  h ")
        outfile.write("\n")
        outfile.write("\n")

        if mode != "human":
            return outfile
示例#38
0
 def set(self, cluster, data, args=None, **kwargs):
     """ set the object by json """
     kwargs['stdin'] = StringIO(json.dumps(data))
     return self.json_command(cluster, 'set', args, **kwargs)
示例#39
0
class CommandArchitectureTest(S3HandlerBaseTest):
    def setUp(self):
        super(CommandArchitectureTest, self).setUp()
        self.session = FakeSession()
        self.bucket = make_s3_files(self.session)
        self.loc_files = make_loc_files()
        self.output = StringIO()
        self.saved_stdout = sys.stdout
        sys.stdout = self.output

    def tearDown(self):
        self.output.close()
        sys.stdout = self.saved_stdout

        super(CommandArchitectureTest, self).setUp()
        clean_loc_files(self.loc_files)
        s3_cleanup(self.bucket, self.session)

    def test_create_instructions(self):
        """
        This tests to make sure the instructions for any command is generated
        properly.
        """
        cmds = ['cp', 'mv', 'rm', 'sync', 'ls', 'mb', 'rb']

        instructions = {
            'cp': ['file_generator', 's3_handler'],
            'mv': ['file_generator', 's3_handler'],
            'rm': ['file_generator', 's3_handler'],
            'sync': ['file_generator', 'comparator', 's3_handler'],
            'ls': ['s3_handler'],
            'mb': ['s3_handler'],
            'rb': ['s3_handler']
        }

        params = {'filters': True}
        for cmd in cmds:
            cmd_arc = CommandArchitecture(self.session, cmd, {})
            cmd_arc.create_instructions()
            self.assertEqual(cmd_arc.instructions, instructions[cmd])

        # Test if there is a filter.
        cmd_arc = CommandArchitecture(self.session, 'cp', params)
        cmd_arc.create_instructions()
        self.assertEqual(cmd_arc.instructions,
                         ['file_generator', 'filters', 's3_handler'])

    def test_run_cp_put(self):
        """
        This ensures that the architecture sets up correctly for a ``cp`` put
        command.  It is just just a dry run, but all of the components need
        to be wired correctly for it to work.
        """
        s3_file = 's3://' + self.bucket + '/' + 'text1.txt'
        local_file = self.loc_files[0]
        rel_local_file = os.path.relpath(local_file)
        filters = [['--include', '*']]
        params = {
            'dir_op': False,
            'dryrun': True,
            'quiet': False,
            'src': local_file,
            'dest': s3_file,
            'filters': filters,
            'paths_type': 'locals3',
            'region': 'us-east-1'
        }
        cmd_arc = CommandArchitecture(self.session, 'cp', params)
        cmd_arc.create_instructions()
        cmd_arc.run()
        output_str = "(dryrun) upload: %s to %s" % (rel_local_file, s3_file)
        self.assertIn(output_str, self.output.getvalue())

    def test_run_cp_get(self):
        """
        This ensures that the architecture sets up correctly for a ``cp`` get
        command.  It is just just a dry run, but all of the components need
        to be wired correctly for it to work.
        """
        s3_file = 's3://' + self.bucket + '/' + 'text1.txt'
        local_file = self.loc_files[0]
        rel_local_file = os.path.relpath(local_file)
        filters = [['--include', '*']]
        params = {
            'dir_op': False,
            'dryrun': True,
            'quiet': False,
            'src': s3_file,
            'dest': local_file,
            'filters': filters,
            'paths_type': 's3local',
            'region': 'us-east-1'
        }
        cmd_arc = CommandArchitecture(self.session, 'cp', params)
        cmd_arc.create_instructions()
        cmd_arc.run()
        output_str = "(dryrun) download: %s to %s" % (s3_file, rel_local_file)
        self.assertIn(output_str, self.output.getvalue())

    def test_run_cp_copy(self):
        """
        This ensures that the architecture sets up correctly for a ``cp`` copy
        command.  It is just just a dry run, but all of the components need
        to be wired correctly for it to work.
        """
        s3_file = 's3://' + self.bucket + '/' + 'text1.txt'
        filters = [['--include', '*']]
        params = {
            'dir_op': False,
            'dryrun': True,
            'quiet': False,
            'src': s3_file,
            'dest': s3_file,
            'filters': filters,
            'paths_type': 's3s3',
            'region': 'us-east-1'
        }
        cmd_arc = CommandArchitecture(self.session, 'cp', params)
        cmd_arc.create_instructions()
        cmd_arc.run()
        output_str = "(dryrun) copy: %s to %s" % (s3_file, s3_file)
        self.assertIn(output_str, self.output.getvalue())

    def test_run_mv(self):
        """
        This ensures that the architecture sets up correctly for a ``mv``
        command.  It is just just a dry run, but all of the components need
        to be wired correctly for it to work.
        """
        s3_file = 's3://' + self.bucket + '/' + 'text1.txt'
        filters = [['--include', '*']]
        params = {
            'dir_op': False,
            'dryrun': True,
            'quiet': False,
            'src': s3_file,
            'dest': s3_file,
            'filters': filters,
            'paths_type': 's3s3',
            'region': 'us-east-1'
        }
        cmd_arc = CommandArchitecture(self.session, 'mv', params)
        cmd_arc.create_instructions()
        cmd_arc.run()
        output_str = "(dryrun) move: %s to %s" % (s3_file, s3_file)
        self.assertIn(output_str, self.output.getvalue())

    def test_run_remove(self):
        """
        This ensures that the architecture sets up correctly for a ``rm``
        command.  It is just just a dry run, but all of the components need
        to be wired correctly for it to work.
        """
        s3_file = 's3://' + self.bucket + '/' + 'text1.txt'
        filters = [['--include', '*']]
        params = {
            'dir_op': False,
            'dryrun': True,
            'quiet': False,
            'src': s3_file,
            'dest': s3_file,
            'filters': filters,
            'paths_type': 's3',
            'region': 'us-east-1'
        }
        cmd_arc = CommandArchitecture(self.session, 'rm', params)
        cmd_arc.create_instructions()
        cmd_arc.run()
        output_str = "(dryrun) delete: %s" % s3_file
        self.assertIn(output_str, self.output.getvalue())

    def test_run_sync(self):
        """
        This ensures that the architecture sets up correctly for a ``sync``
        command.  It is just just a dry run, but all of the components need
        to be wired correctly for it to work.
        """
        s3_file = 's3://' + self.bucket + '/' + 'text1.txt'
        local_file = self.loc_files[0]
        s3_prefix = 's3://' + self.bucket + '/'
        local_dir = self.loc_files[3]
        rel_local_file = os.path.relpath(local_file)
        filters = [['--include', '*']]
        params = {
            'dir_op': True,
            'dryrun': True,
            'quiet': False,
            'src': local_dir,
            'dest': s3_prefix,
            'filters': filters,
            'paths_type': 'locals3',
            'region': 'us-east-1'
        }
        cmd_arc = CommandArchitecture(self.session, 'sync', params)
        cmd_arc.create_instructions()
        cmd_arc.run()
        output_str = "(dryrun) upload: %s to %s" % (rel_local_file, s3_file)
        self.assertIn(output_str, self.output.getvalue())

    def test_run_ls(self):
        """
        This ensures that the architecture sets up correctly for a ``ls``
        command.  It is just just a dry run, but all of the components need
        to be wired correctly for it to work.
        """
        s3_file = 's3://' + self.bucket + '/' + 'text1.txt'
        s3_prefix = 's3://' + self.bucket + '/'
        params = {
            'dir_op': True,
            'dryrun': True,
            'quiet': False,
            'src': s3_prefix,
            'dest': s3_prefix,
            'paths_type': 's3',
            'region': 'us-east-1'
        }
        cmd_arc = CommandArchitecture(self.session, 'ls', params)
        cmd_arc.create_instructions()
        cmd_arc.run()

    def test_run_mb(self):
        """
        This ensures that the architecture sets up correctly for a ``rb``
        command.  It is just just a dry run, but all of the components need
        to be wired correctly for it to work.
        """
        s3_prefix = 's3://' + self.bucket + '/'
        params = {
            'dir_op': True,
            'dryrun': True,
            'quiet': False,
            'src': s3_prefix,
            'dest': s3_prefix,
            'paths_type': 's3',
            'region': 'us-east-1'
        }
        cmd_arc = CommandArchitecture(self.session, 'mb', params)
        cmd_arc.create_instructions()
        cmd_arc.run()
        output_str = "(dryrun) make_bucket: %s" % s3_prefix
        self.assertIn(output_str, self.output.getvalue())

    def test_run_rb(self):
        """
        This ensures that the architecture sets up correctly for a ``mb``
        command.  It is just just a dry run, but all of the components need
        to be wired correctly for it to work.
        """
        s3_prefix = 's3://' + self.bucket + '/'
        params = {
            'dir_op': True,
            'dryrun': True,
            'quiet': False,
            'src': s3_prefix,
            'dest': s3_prefix,
            'paths_type': 's3',
            'region': 'us-east-1'
        }
        cmd_arc = CommandArchitecture(self.session, 'rb', params)
        cmd_arc.create_instructions()
        cmd_arc.run()
        output_str = "(dryrun) remove_bucket: %s" % s3_prefix
        self.assertIn(output_str, self.output.getvalue())
示例#40
0
def test_core_library(monkeypatch, caplog, kwargs, fmt):
    scenario = kwargs.get("scenario")
    devices_cnt = kwargs.get("devices_cnt")
    filters = kwargs.get("filters", ([], [], []))
    exp_res_tag = kwargs.get("exp_res_tag", None)
    log = kwargs.get("log", None)
    use_peeringdb = kwargs.get("use_peeringdb", None)

    def fake_get_network_driver(*args, **kwargs):
        return napalm_base.MockDriver

    monkeypatch.setattr(napalm_base, "get_network_driver",
                        fake_get_network_driver)

    if not log:
        caplog.set_level(logging.ERROR)
    else:
        caplog.set_level(logging.INFO)

    dir_path = os.path.join(os.path.dirname(__file__), "mocked_data", scenario)

    devices = []
    device_idx = 0
    for i in range(devices_cnt):
        device_idx += 1
        device = {"hostname": "router{}".format(device_idx),
                  "vendor": "ios",
                  "optional_args": {
                      "path": os.path.join(dir_path, "router{}".format(device_idx))
                  }}
        if use_peeringdb is not None:
            device["use_peeringdb"] = use_peeringdb
        devices.append(device)

    lib_class = MACToPeer_JSON if fmt == "json" else MACToPeer_pmacct

    out_file = StringIO()
    lib = lib_class(devices, filters, out_file, threads=2)
    lib.write_output()
    out_file.seek(0)
    res = out_file.getvalue()

    expected_results_file = "expected_results"
    if exp_res_tag:
        expected_results_file += "." + exp_res_tag
    expected_results_file += "." + fmt

    exp_res = ""
    try:
        with open(os.path.join(dir_path, expected_results_file), "r") as f:
            exp_res = f.read()
    except:
        with open(os.path.join(dir_path, expected_results_file + ".from_test"), "w") as f:
            f.write(res)

    if not log:
        assert not caplog.records
    else:
        for log_msg in log:
            assert log_msg in caplog.text

    if fmt == "json":
        res = json.loads(res)
        exp_res = json.loads(exp_res)

    assert res == exp_res
示例#41
0
    def testInvalidRegexp(self):

        tplt = 'Value boo (.$*)\n\nStart\n  ^$boo -> Next\n'
        self.assertRaises(textfsm.TextFSMTemplateError, textfsm.TextFSM,
                          StringIO(tplt))
示例#42
0
文件: tests.py 项目: ztomaz/weblate
 def test_commands(self):
     out = StringIO()
     call_command('billing_check', stdout=out)
     self.assertEqual(out.getvalue(), '')
     self.add_project()
     self.add_project()
     out = StringIO()
     call_command('billing_check', stdout=out)
     self.assertEqual(
         out.getvalue(), 'Following billings are over limit:\n'
         ' * test0, test1 (test)\n')
     out = StringIO()
     call_command('billing_check', '--valid', stdout=out)
     self.assertEqual(out.getvalue(), '')
     self.invoice.delete()
     out = StringIO()
     call_command('billing_check', stdout=out)
     self.assertEqual(
         out.getvalue(), 'Following billings are over limit:\n'
         ' * test0, test1 (test)\n'
         'Following billings are past due date:\n'
         ' * test0, test1 (test)\n')
     call_command('billing_check', '--notify', stdout=out)
     self.assertEqual(len(mail.outbox), 1)
示例#43
0
    def testRuleStartsWithCarrot(self):

        f = StringIO(
            'Value Beer (.)\nValue Wine (\\w)\n\nStart\n  A Simple line')
        self.assertRaises(textfsm.TextFSMTemplateError, textfsm.TextFSM, f)
示例#44
0
class CommandArchitectureTest(S3HandlerBaseTest):
    def setUp(self):
        super(CommandArchitectureTest, self).setUp()
        self.session = FakeSession()
        self.bucket = make_s3_files(self.session)
        self.loc_files = make_loc_files()
        self.output = StringIO()
        self.saved_stdout = sys.stdout
        sys.stdout = self.output

    def tearDown(self):
        self.output.close()
        sys.stdout = self.saved_stdout

        super(CommandArchitectureTest, self).setUp()
        clean_loc_files(self.loc_files)
        s3_cleanup(self.bucket, self.session)

    def test_create_instructions(self):
        """
        This tests to make sure the instructions for any command is generated
        properly.
        """
        cmds = ['cp', 'mv', 'rm', 'sync', 'mb', 'rb']

        instructions = {
            'cp': ['file_generator', 's3_handler'],
            'mv': ['file_generator', 's3_handler'],
            'rm': ['file_generator', 's3_handler'],
            'sync': ['file_generator', 'comparator', 's3_handler'],
            'mb': ['s3_handler'],
            'rb': ['s3_handler']
        }

        params = {
            'filters': True,
            'region': 'us-east-1',
            'endpoint_url': None,
            'verify_ssl': None
        }
        for cmd in cmds:
            cmd_arc = CommandArchitecture(self.session, cmd, {
                'region': 'us-east-1',
                'endpoint_url': None,
                'verify_ssl': None
            })
            cmd_arc.create_instructions()
            self.assertEqual(cmd_arc.instructions, instructions[cmd])

        # Test if there is a filter.
        cmd_arc = CommandArchitecture(self.session, 'cp', params)
        cmd_arc.create_instructions()
        self.assertEqual(cmd_arc.instructions,
                         ['file_generator', 'filters', 's3_handler'])

    def test_run_cp_put(self):
        # This ensures that the architecture sets up correctly for a ``cp`` put
        # command.  It is just just a dry run, but all of the components need
        # to be wired correctly for it to work.
        s3_file = 's3://' + self.bucket + '/' + 'text1.txt'
        local_file = self.loc_files[0]
        rel_local_file = os.path.relpath(local_file)
        filters = [['--include', '*']]
        params = {
            'dir_op': False,
            'dryrun': True,
            'quiet': False,
            'src': local_file,
            'dest': s3_file,
            'filters': filters,
            'paths_type': 'locals3',
            'region': 'us-east-1',
            'endpoint_url': None,
            'verify_ssl': None
        }
        cmd_arc = CommandArchitecture(self.session, 'cp', params)
        cmd_arc.create_instructions()
        cmd_arc.run()
        output_str = "(dryrun) upload: %s to %s" % (rel_local_file, s3_file)
        self.assertIn(output_str, self.output.getvalue())

    def test_error_on_same_line_as_status(self):
        s3_file = 's3://' + 'bucket-does-not-exist' + '/' + 'text1.txt'
        local_file = self.loc_files[0]
        rel_local_file = os.path.relpath(local_file)
        filters = [['--include', '*']]
        params = {
            'dir_op': False,
            'dryrun': False,
            'quiet': False,
            'src': local_file,
            'dest': s3_file,
            'filters': filters,
            'paths_type': 'locals3',
            'region': 'us-east-1',
            'endpoint_url': None,
            'verify_ssl': None
        }
        cmd_arc = CommandArchitecture(self.session, 'cp', params)
        cmd_arc.create_instructions()
        cmd_arc.run()
        # Also, we need to verify that the error message is on the *same* line
        # as the upload failed line, to make it easier to track.
        output_str = (
            "upload failed: %s to %s Error: Bucket does not exist\n" %
            (rel_local_file, s3_file))
        self.assertIn(output_str, self.output.getvalue())

    def test_run_cp_get(self):
        # This ensures that the architecture sets up correctly for a ``cp`` get
        # command.  It is just just a dry run, but all of the components need
        # to be wired correctly for it to work.
        s3_file = 's3://' + self.bucket + '/' + 'text1.txt'
        local_file = self.loc_files[0]
        rel_local_file = os.path.relpath(local_file)
        filters = [['--include', '*']]
        params = {
            'dir_op': False,
            'dryrun': True,
            'quiet': False,
            'src': s3_file,
            'dest': local_file,
            'filters': filters,
            'paths_type': 's3local',
            'region': 'us-east-1',
            'endpoint_url': None,
            'verify_ssl': None
        }
        cmd_arc = CommandArchitecture(self.session, 'cp', params)
        cmd_arc.create_instructions()
        cmd_arc.run()
        output_str = "(dryrun) download: %s to %s" % (s3_file, rel_local_file)
        self.assertIn(output_str, self.output.getvalue())

    def test_run_cp_copy(self):
        # This ensures that the architecture sets up correctly for a ``cp`` copy
        # command.  It is just just a dry run, but all of the components need
        # to be wired correctly for it to work.
        s3_file = 's3://' + self.bucket + '/' + 'text1.txt'
        filters = [['--include', '*']]
        params = {
            'dir_op': False,
            'dryrun': True,
            'quiet': False,
            'src': s3_file,
            'dest': s3_file,
            'filters': filters,
            'paths_type': 's3s3',
            'region': 'us-east-1',
            'endpoint_url': None,
            'verify_ssl': None
        }
        cmd_arc = CommandArchitecture(self.session, 'cp', params)
        cmd_arc.create_instructions()
        cmd_arc.run()
        output_str = "(dryrun) copy: %s to %s" % (s3_file, s3_file)
        self.assertIn(output_str, self.output.getvalue())

    def test_run_mv(self):
        # This ensures that the architecture sets up correctly for a ``mv``
        # command.  It is just just a dry run, but all of the components need
        # to be wired correctly for it to work.
        s3_file = 's3://' + self.bucket + '/' + 'text1.txt'
        filters = [['--include', '*']]
        params = {
            'dir_op': False,
            'dryrun': True,
            'quiet': False,
            'src': s3_file,
            'dest': s3_file,
            'filters': filters,
            'paths_type': 's3s3',
            'region': 'us-east-1',
            'endpoint_url': None,
            'verify_ssl': None
        }
        cmd_arc = CommandArchitecture(self.session, 'mv', params)
        cmd_arc.create_instructions()
        cmd_arc.run()
        output_str = "(dryrun) move: %s to %s" % (s3_file, s3_file)
        self.assertIn(output_str, self.output.getvalue())

    def test_run_remove(self):
        # This ensures that the architecture sets up correctly for a ``rm``
        # command.  It is just just a dry run, but all of the components need
        # to be wired correctly for it to work.
        s3_file = 's3://' + self.bucket + '/' + 'text1.txt'
        filters = [['--include', '*']]
        params = {
            'dir_op': False,
            'dryrun': True,
            'quiet': False,
            'src': s3_file,
            'dest': s3_file,
            'filters': filters,
            'paths_type': 's3',
            'region': 'us-east-1',
            'endpoint_url': None,
            'verify_ssl': None
        }
        cmd_arc = CommandArchitecture(self.session, 'rm', params)
        cmd_arc.create_instructions()
        cmd_arc.run()
        output_str = "(dryrun) delete: %s" % s3_file
        self.assertIn(output_str, self.output.getvalue())

    def test_run_sync(self):
        # This ensures that the architecture sets up correctly for a ``sync``
        # command.  It is just just a dry run, but all of the components need
        # to be wired correctly for it to work.
        s3_file = 's3://' + self.bucket + '/' + 'text1.txt'
        local_file = self.loc_files[0]
        s3_prefix = 's3://' + self.bucket + '/'
        local_dir = self.loc_files[3]
        rel_local_file = os.path.relpath(local_file)
        filters = [['--include', '*']]
        params = {
            'dir_op': True,
            'dryrun': True,
            'quiet': False,
            'src': local_dir,
            'dest': s3_prefix,
            'filters': filters,
            'paths_type': 'locals3',
            'region': 'us-east-1',
            'endpoint_url': None,
            'verify_ssl': None
        }
        cmd_arc = CommandArchitecture(self.session, 'sync', params)
        cmd_arc.create_instructions()
        cmd_arc.run()
        output_str = "(dryrun) upload: %s to %s" % (rel_local_file, s3_file)
        self.assertIn(output_str, self.output.getvalue())

    def test_run_mb(self):
        # This ensures that the architecture sets up correctly for a ``rb``
        # command.  It is just just a dry run, but all of the components need
        # to be wired correctly for it to work.
        s3_prefix = 's3://' + self.bucket + '/'
        params = {
            'dir_op': True,
            'dryrun': True,
            'quiet': False,
            'src': s3_prefix,
            'dest': s3_prefix,
            'paths_type': 's3',
            'region': 'us-east-1',
            'endpoint_url': None,
            'verify_ssl': None
        }
        cmd_arc = CommandArchitecture(self.session, 'mb', params)
        cmd_arc.create_instructions()
        cmd_arc.run()
        output_str = "(dryrun) make_bucket: %s" % s3_prefix
        self.assertIn(output_str, self.output.getvalue())

    def test_run_rb(self):
        # This ensures that the architecture sets up correctly for a ``rb``
        # command.  It is just just a dry run, but all of the components need
        # to be wired correctly for it to work.
        s3_prefix = 's3://' + self.bucket + '/'
        params = {
            'dir_op': True,
            'dryrun': True,
            'quiet': False,
            'src': s3_prefix,
            'dest': s3_prefix,
            'paths_type': 's3',
            'region': 'us-east-1',
            'endpoint_url': None,
            'verify_ssl': None
        }
        cmd_arc = CommandArchitecture(self.session, 'rb', params)
        cmd_arc.create_instructions()
        rc = cmd_arc.run()
        output_str = "(dryrun) remove_bucket: %s" % s3_prefix
        self.assertIn(output_str, self.output.getvalue())
        self.assertEqual(rc, 0)

    def test_run_rb_nonzero_rc(self):
        # This ensures that the architecture sets up correctly for a ``rb``
        # command.  It is just just a dry run, but all of the components need
        # to be wired correctly for it to work.
        s3_prefix = 's3://' + self.bucket + '/'
        params = {
            'dir_op': True,
            'dryrun': False,
            'quiet': False,
            'src': s3_prefix,
            'dest': s3_prefix,
            'paths_type': 's3',
            'region': 'us-east-1',
            'endpoint_url': None,
            'verify_ssl': None
        }
        cmd_arc = CommandArchitecture(self.session, 'rb', params)
        cmd_arc.create_instructions()
        rc = cmd_arc.run()
        output_str = "remove_bucket failed: %s" % s3_prefix
        self.assertIn(output_str, self.output.getvalue())
        self.assertEqual(rc, 1)
示例#45
0
    def testParseFSMVariables(self):
        # Trivial template to initiate object.
        f = StringIO('Value unused (.)\n\nStart\n')
        t = textfsm.TextFSM(f)

        # Trivial entry
        buf = 'Value Filldown Beer (beer)\n\n'
        f = StringIO(buf)
        t._ParseFSMVariables(f)

        # Single variable with commented header.
        buf = '# Headline\nValue Filldown Beer (beer)\n\n'
        f = StringIO(buf)
        t._ParseFSMVariables(f)
        self.assertEqual(str(t._GetValue('Beer')),
                         'Value Filldown Beer (beer)')

        # Multiple variables.
        buf = ('# Headline\n'
               'Value Filldown Beer (beer)\n'
               'Value Required Spirits (whiskey)\n'
               'Value Filldown Wine (claret)\n'
               '\n')
        t._line_num = 0
        f = StringIO(buf)
        t._ParseFSMVariables(f)
        self.assertEqual(str(t._GetValue('Beer')),
                         'Value Filldown Beer (beer)')
        self.assertEqual(str(t._GetValue('Spirits')),
                         'Value Required Spirits (whiskey)')
        self.assertEqual(str(t._GetValue('Wine')),
                         'Value Filldown Wine (claret)')

        # Multiple variables.
        buf = ('# Headline\n'
               'Value Filldown Beer (beer)\n'
               ' # A comment\n'
               'Value Spirits ()\n'
               'Value Filldown,Required Wine ((c|C)laret)\n'
               '\n')

        f = StringIO(buf)
        t._ParseFSMVariables(f)
        self.assertEqual(str(t._GetValue('Beer')),
                         'Value Filldown Beer (beer)')
        self.assertEqual(str(t._GetValue('Spirits')), 'Value Spirits ()')
        self.assertEqual(str(t._GetValue('Wine')),
                         'Value Filldown,Required Wine ((c|C)laret)')

        # Malformed variables.
        buf = 'Value Beer (beer) beer'
        f = StringIO(buf)
        self.assertRaises(textfsm.TextFSMTemplateError, t._ParseFSMVariables,
                          f)

        buf = 'Value Filldown, Required Spirits ()'
        f = StringIO(buf)
        self.assertRaises(textfsm.TextFSMTemplateError, t._ParseFSMVariables,
                          f)
        buf = 'Value filldown,Required Wine ((c|C)laret)'
        f = StringIO(buf)
        self.assertRaises(textfsm.TextFSMTemplateError, t._ParseFSMVariables,
                          f)

        # Values that look bad but are OK.
        buf = ('# Headline\n'
               'Value Filldown Beer (bee(r), (and) (M)ead$)\n'
               '# A comment\n'
               'Value Spirits,and,some ()\n'
               'Value Filldown,Required Wine ((c|C)laret)\n'
               '\n')
        f = StringIO(buf)
        t._ParseFSMVariables(f)
        self.assertEqual(str(t._GetValue('Beer')),
                         'Value Filldown Beer (bee(r), (and) (M)ead$)')
        self.assertEqual(str(t._GetValue('Spirits,and,some')),
                         'Value Spirits,and,some ()')
        self.assertEqual(str(t._GetValue('Wine')),
                         'Value Filldown,Required Wine ((c|C)laret)')

        # Variable name too long.
        buf = ('Value Filldown '
               'nametoolong_nametoolong_nametoolo_nametoolong_nametoolong '
               '(beer)\n\n')
        f = StringIO(buf)
        self.assertRaises(textfsm.TextFSMTemplateError, t._ParseFSMVariables,
                          f)
示例#46
0
    def testParseTextToDicts(self):

        # Trivial FSM, no records produced.
        tplt = 'Value unused (.)\n\nStart\n  ^Trivial SFM\n'
        t = textfsm.TextFSM(StringIO(tplt))

        data = 'Non-matching text\nline1\nline 2\n'
        self.assertFalse(t.ParseText(data))
        # Matching.
        data = 'Matching text\nTrivial SFM\nline 2\n'
        self.assertFalse(t.ParseText(data))

        # Simple FSM, One Variable no options.
        tplt = 'Value boo (.*)\n\nStart\n  ^$boo -> Next.Record\n\nEOF\n'
        t = textfsm.TextFSM(StringIO(tplt))

        # Matching one line.
        # Tests 'Next' & 'Record' actions.
        data = 'Matching text'
        result = t.ParseTextToDicts(data)
        self.assertEqual(str(result), "[{'boo': 'Matching text'}]")

        # Matching two lines. Reseting FSM before Parsing.
        t.Reset()
        data = 'Matching text\nAnd again'
        result = t.ParseTextToDicts(data)
        self.assertEqual(str(result),
                         "[{'boo': 'Matching text'}, {'boo': 'And again'}]")

        # Two Variables and singular options.
        tplt = ('Value Required boo (one)\nValue Filldown hoo (two)\n\n'
                'Start\n  ^$boo -> Next.Record\n  ^$hoo -> Next.Record\n\n'
                'EOF\n')
        t = textfsm.TextFSM(StringIO(tplt))

        # Matching two lines. Only one records returned due to 'Required' flag.
        # Tests 'Filldown' and 'Required' options.
        data = 'two\none'
        result = t.ParseTextToDicts(data)
        self.assertEqual(str(result), "[{'hoo': 'two', 'boo': 'one'}]")

        t = textfsm.TextFSM(StringIO(tplt))
        # Matching two lines. Two records returned due to 'Filldown' flag.
        data = 'two\none\none'
        t.Reset()
        result = t.ParseTextToDicts(data)
        self.assertEqual(
            str(result),
            "[{'hoo': 'two', 'boo': 'one'}, {'hoo': 'two', 'boo': 'one'}]")

        # Multiple Variables and options.
        tplt = ('Value Required,Filldown boo (one)\n'
                'Value Filldown,Required hoo (two)\n\n'
                'Start\n  ^$boo -> Next.Record\n  ^$hoo -> Next.Record\n\n'
                'EOF\n')
        t = textfsm.TextFSM(StringIO(tplt))
        data = 'two\none\none'
        result = t.ParseTextToDicts(data)
        self.assertEqual(
            str(result),
            "[{'hoo': 'two', 'boo': 'one'}, {'hoo': 'two', 'boo': 'one'}]")
示例#47
0
    def _rest_request(self,
                      path='',
                      method="GET",
                      args=None,
                      body=None,
                      headers=None,
                      optionalpassword=None,
                      providerheader=None):
        """Rest request for blob store client

        :param path: path within tree
        :type path: str
        :param method: method to be implemented
        :type method: str
        :param args: the arguments for method
        :type args: dict
        :param body: body payload for the rest call
        :type body: dict
        :param headers: provide additional headers
        :type headers: dict
        :param optionalpassword: provide password for authentication
        :type optionalpassword: str
        :param provideheader: provider id for the header
        :type providerheader: str
        :return: returns a RestResponse object

        """
        self.updatecredentials()
        headers = self._get_req_headers(headers, providerheader, \
                                                            optionalpassword)

        reqpath = path.replace('//', '/')

        oribody = body
        if body is not None:
            if isinstance(body, (dict, list)):
                headers['Content-Type'] = 'application/json'
                body = json.dumps(body)
            else:
                headers['Content-Type'] = 'application/x-www-form-urlencoded'
                body = urlencode(body)

            if method == 'PUT':
                resp = self._rest_request(path=path)

                try:
                    if resp.getheader('content-encoding') == 'gzip':
                        buf = StringIO()
                        gfile = gzip.GzipFile(mode='wb', fileobj=buf)

                        try:
                            gfile.write(str(body))
                        finally:
                            gfile.close()

                        compresseddata = buf.getvalue()
                        if compresseddata:
                            data = bytearray()
                            data.extend(memoryview(compresseddata))
                            body = data
                except BaseException as excp:
                    LOGGER.error('Error occur while compressing body: %s',
                                 excp)
                    raise

            headers['Content-Length'] = len(body)

        if args:
            if method == 'GET':
                reqpath += '?' + urlencode(args)
            elif method == 'PUT' or method == 'POST' or method == 'PATCH':
                headers['Content-Type'] = 'application/x-www-form-urlencoded'
                body = urlencode(args)

        str1 = '{} {} {}\r\n'.format(method, reqpath, \
                                            Blobstore2RestClient._http_vsn_str)
        str1 += 'Host: \r\n'
        str1 += 'Accept-Encoding: identity\r\n'
        for header, value in headers.items():
            str1 += '{}: {}\r\n'.format(header, value)

        str1 += '\r\n'

        if body and len(body) > 0:
            if isinstance(body, bytearray):
                str1 = str1.encode("ASCII") + body
            else:
                str1 += body

        bs2 = BlobStore2()

        if not isinstance(str1, bytearray):
            str1 = str1.encode("ASCII")
        if LOGGER.isEnabledFor(logging.DEBUG):
            try:
                logbody = None
                if body:
                    if body[0] == '{':
                        logbody = body
                    else:
                        raise
                if method in ['POST', 'PATCH']:
                    debugjson = json.loads(body)
                    if 'Password' in debugjson.keys():
                        debugjson['Password'] = '******'
                    if 'OldPassword' in debugjson.keys():
                        debugjson['OldPassword'] = '******'
                    if 'NewPassword' in debugjson.keys():
                        debugjson['NewPassword'] = '******'
                    logbody = json.dumps(debugjson)

                LOGGER.debug('Blobstore REQUEST: %s\n\tPATH: %s\n\tHEADERS: '\
                             '%s\n\tBODY: %s', method, str(headers), path, logbody)
            except:
                LOGGER.debug('Blobstore REQUEST: %s\n\tPATH: %s\n\tHEADERS: '\
                             '%s\n\tBODY: %s', method, str(headers), path, 'binary body')

        inittime = time.time()

        for idx in range(5):
            try:
                resp_txt = bs2.rest_immediate(str1)
                break
            except Blob2OverrideError as excp:
                if idx == 4:
                    raise Blob2OverrideError(2)
                else:
                    continue

        endtime = time.time()

        bs2.channel.close()

        LOGGER.info("iLO Response Time to %s: %s secs.", path,
                    str(endtime - inittime))
        #Dummy response to support a bad host response
        if len(resp_txt) == 0:
            resp_txt = "HTTP/1.1 500 Not Found\r\nAllow: " \
            "GET\r\nCache-Control: no-cache\r\nContent-length: " \
            "0\r\nContent-type: text/html\r\nDate: Tues, 1 Apr 2025 " \
            "00:00:01 GMT\r\nServer: " \
            "HP-iLO-Server/1.30\r\nX_HP-CHRP-Service-Version: 1.0.3\r\n\r\n\r\n"

        restreq = RestRequest(path, method, data=body, url=self.base_url)
        rest_response = RisRestResponse(restreq, resp_txt)

        if rest_response.status in range(300, 399) and \
                                                    rest_response.status != 304:
            newloc = rest_response.getheader("location")
            newurl = urlparse(newloc)

            rest_response = self._rest_request(newurl.path, method, args, \
                               oribody, headers, optionalpassword, \
                               providerheader)

        try:
            if rest_response.getheader('content-encoding') == 'gzip':
                if hasattr(gzip, "decompress"):
                    rest_response.read = gzip.decompress(rest_response.ori)
                else:
                    compressedfile = StringIO(rest_response.ori)
                    decompressedfile = gzip.GzipFile(fileobj=compressedfile)
                    rest_response.read = decompressedfile.read()
        except Exception:
            pass
        if LOGGER.isEnabledFor(logging.DEBUG):
            headerstr = ''
            headerget = rest_response.getheaders()
            for header in headerget:
                headerstr += '\t' + header + ': ' + headerget[header] + '\n'
            try:
                LOGGER.debug('Blobstore RESPONSE for %s:\nCode: %s\nHeaders:'\
                            '\n%s\nBody of %s: %s', rest_response.request.path,\
                            str(rest_response._http_response.status)+ ' ' + \
                            rest_response._http_response.reason, \
                            headerstr, rest_response.request.path, \
                            rest_response.read)
            except:
                LOGGER.debug('Blobstore RESPONSE for %s:\nCode:%s', \
                             rest_response.request.path, rest_response)
        return rest_response
示例#48
0
    def testParseFSMState(self):

        f = StringIO('Value Beer (.)\nValue Wine (\\w)\n\nStart\n')
        t = textfsm.TextFSM(f)

        # Fails as we already have 'Start' state.
        buf = 'Start\n  ^.\n'
        f = StringIO(buf)
        self.assertRaises(textfsm.TextFSMTemplateError, t._ParseFSMState, f)

        # Remove start so we can test new Start state.
        t.states = {}

        # Single state.
        buf = '# Headline\nStart\n  ^.\n\n'
        f = StringIO(buf)
        t._ParseFSMState(f)
        self.assertEqual(str(t.states['Start'][0]), '  ^.')
        try:
            _ = t.states['Start'][1]
        except IndexError:
            pass

        # Multiple states.
        buf = '# Headline\nStart\n  ^.\n  ^Hello World\n  ^Last-[Cc]ha$$nge\n'
        f = StringIO(buf)
        t._line_num = 0
        t.states = {}
        t._ParseFSMState(f)
        self.assertEqual(str(t.states['Start'][0]), '  ^.')
        self.assertEqual(str(t.states['Start'][1]), '  ^Hello World')
        self.assertEqual(t.states['Start'][1].line_num, 4)
        self.assertEqual(str(t.states['Start'][2]), '  ^Last-[Cc]ha$$nge')
        try:
            _ = t.states['Start'][3]
        except IndexError:
            pass

        t.states = {}
        # Malformed states.
        buf = 'St%art\n  ^.\n  ^Hello World\n'
        f = StringIO(buf)
        self.assertRaises(textfsm.TextFSMTemplateError, t._ParseFSMState, f)

        buf = 'Start\n^.\n  ^Hello World\n'
        f = StringIO(buf)
        self.assertRaises(textfsm.TextFSMTemplateError, t._ParseFSMState, f)

        buf = '  Start\n  ^.\n  ^Hello World\n'
        f = StringIO(buf)
        self.assertRaises(textfsm.TextFSMTemplateError, t._ParseFSMState, f)

        # Multiple variables and substitution (depends on _ParseFSMVariables).
        buf = ('# Headline\nStart\n  ^.${Beer}${Wine}.\n'
               '  ^Hello $Beer\n  ^Last-[Cc]ha$$nge\n')
        f = StringIO(buf)
        t.states = {}
        t._ParseFSMState(f)
        self.assertEqual(str(t.states['Start'][0]), '  ^.${Beer}${Wine}.')
        self.assertEqual(str(t.states['Start'][1]), '  ^Hello $Beer')
        self.assertEqual(str(t.states['Start'][2]), '  ^Last-[Cc]ha$$nge')
        try:
            _ = t.states['Start'][3]
        except IndexError:
            pass

        t.states['bogus'] = []

        # State name too long (>32 char).
        buf = 'rnametoolong_nametoolong_nametoolong_nametoolong_nametoolo\n  ^.\n\n'
        f = StringIO(buf)
        self.assertRaises(textfsm.TextFSMTemplateError, t._ParseFSMState, f)
示例#49
0
 def setUp(self):
     self.io = StringIO()
示例#50
0
    def _rest_request(self, path, method='GET', args=None, body=None, \
        headers=None, optionalpassword=None, providerheader=None):
        """Rest request main function

        :param path: path within tree
        :type path: str
        :param method: method to be implemented
        :type method: str
        :param args: the arguments for method
        :type args: dict
        :param body: body payload for the rest call
        :type body: dict
        :param headers: provide additional headers
        :type headers: dict
        :param optionalpassword: provide password for authentication
        :type optionalpassword: str
        :param provideheader: provider id for the header
        :type providerheader: str
        :returns: returns a RestResponse object

        """
        files = None
        request_args = {}
        proxy = True if 'redfish.dmtf.org' in path else False
        headers = {} if proxy else self._get_req_headers(headers, providerheader, \
                                    optionalpassword)
        reqpath = path.replace('//', '/') if not proxy else path

        if body is not None:
            if body and isinstance(body, list) and isinstance(body[0], tuple):
                files = body
                body = None
            elif isinstance(body, (dict, list)):
                headers['Content-Type'] = 'application/json'
                body = json.dumps(body)
            elif not files:
                headers['Content-Type'] = 'application/x-www-form-urlencoded'
                body = urlencode(body)

            if method == 'PUT':
                resp = self._rest_request(method='HEAD', path=path)

                try:
                    if resp.getheader('content-encoding') == 'gzip':
                        buf = StringIO()
                        gfile = gzip.GzipFile(mode='wb', fileobj=buf)

                        try:
                            gfile.write(str(body))
                        finally:
                            gfile.close()

                        compresseddata = buf.getvalue()
                        if compresseddata:
                            data = bytearray()
                            data.extend(memoryview(compresseddata))
                            body = data
                except BaseException as excp:
                    LOGGER.error('Error occur while compressing body: %s',
                                 excp)
                    raise

        if args:
            if method == 'GET':
                reqpath += '?' + urlencode(args)
            elif method == 'PUT' or method == 'POST' or method == 'PATCH':
                headers['Content-Type'] = 'application/x-www-form-urlencoded'
                body = urlencode(args)

        restreq = RestRequest(path, method, data=files if files else body, \
                              url=self.base_url)

        attempts = 1
        restresp = None
        while attempts <= self.MAX_RETRY:
            if LOGGER.isEnabledFor(logging.DEBUG):
                try:
                    logbody = None
                    if restreq.body:
                        if restreq.body[0] == '{':
                            logbody = restreq.body
                        else:
                            raise KeyError()
                    if restreq.method in ['POST', 'PATCH']:
                        debugjson = json.loads(restreq.body)
                        if 'Password' in debugjson.keys():
                            debugjson['Password'] = '******'
                        if 'OldPassword' in debugjson.keys():
                            debugjson['OldPassword'] = '******'
                        if 'NewPassword' in debugjson.keys():
                            debugjson['NewPassword'] = '******'
                        logbody = json.dumps(debugjson)
                    LOGGER.debug('HTTP REQUEST: %s\n\tPATH: %s\n\t'\
                                'HEADERS: %s\n\tBODY: %s', restreq.method, restreq.path, headers, \
                                 logbody)
                except:
                    LOGGER.debug('HTTP REQUEST: %s\n\tPATH: %s\n\tBODY: %s', restreq.method, \
                                                                restreq.path, 'binary body')
            LOGGER.info('Attempt %s of %s', attempts, path)

            try:
                while True:
                    if self._conn is None or proxy:
                        self.__init_connection(proxy=proxy)

                    inittime = time.time()
                    reqfullpath = self.base_url + reqpath if not proxy else reqpath
                    urllib3.disable_warnings()
                    request_args['headers'] = headers
                    if files:
                        request_args['fields'] = files
                    else:
                        request_args['body'] = body
                    resp = self._conn(method, reqfullpath, **request_args)

                    self._conn_count += 1
                    endtime = time.time()
                    LOGGER.info('Response Time to %s: %s seconds.', restreq.path, \
                                                                            str(endtime-inittime))

                    if resp.status not in list(range(
                            300, 399)) or resp.status == 304:
                        break

                    newloc = resp.headers.get('location')
                    newurl = urlparse(newloc)

                    reqpath = newurl.path
                    self.__init_connection(newurl, proxy=proxy)

                restresp = RestResponse(restreq, resp)

            except Exception as excp:
                attempts = attempts + 1
                LOGGER.info('Retrying %s [%s]', path, excp)
                time.sleep(1)

                self.__init_connection(proxy=proxy)
                continue
            else:
                break

        if attempts <= self.MAX_RETRY:
            if LOGGER.isEnabledFor(logging.DEBUG):
                headerstr = ''
                if restresp is not None:
                    respheader = restresp.getheaders()
                    for kiy, headerval in respheader.items():
                        headerstr += '\t' + kiy + ': ' + headerval + '\n'
                    try:
                        LOGGER.debug('HTTP RESPONSE for %s:\nCode:%s\nHeaders:'\
                                '\n%s\nBody Response of %s: %s', restresp.request.path,\
                                str(restresp._http_response.status)+ ' ' + \
                                restresp._http_response.reason, \
                                headerstr, restresp.request.path, restresp.read\
                                .encode('ascii', 'ignore'))
                    except:
                        LOGGER.debug('HTTP RESPONSE:\nCode:%s', restresp)
                else:
                    LOGGER.debug('HTTP RESPONSE: No HTTP Response obtained')

            return restresp
        else:
            raise RetriesExhaustedError()
示例#51
0
 def _FormattedValue(self, t):
     # FormattedValue(expr value, int? conversion, expr? format_spec)
     self.write("f")
     string = StringIO()
     self._fstring_JoinedStr(t, string.write)
     self.write(repr(string.getvalue()))
示例#52
0
def formatargspec(function,
                  args,
                  varargs=None,
                  varkw=None,
                  defaults=None,
                  kwonlyargs=(),
                  kwonlydefaults={},
                  annotations={}):
    # type: (Callable, Tuple[str, ...], str, str, Any, Tuple, Dict, Dict[str, Any]) -> str
    """Return a string representation of an ``inspect.FullArgSpec`` tuple.

    An enhanced version of ``inspect.formatargspec()`` that handles typing
    annotations better.
    """
    warnings.warn(
        'formatargspec() is now deprecated.  '
        'Please use sphinx.util.inspect.Signature instead.',
        RemovedInSphinx20Warning,
        stacklevel=2)

    def format_arg_with_annotation(name):
        # type: (str) -> str
        if name in annotations:
            return '%s: %s' % (name, format_annotation(get_annotation(name)))
        return name

    def get_annotation(name):
        # type: (str) -> str
        value = annotations[name]
        if isinstance(value, string_types):
            return introspected_hints.get(name, value)
        else:
            return value

    try:
        introspected_hints = (
            typing.get_type_hints(function)  # type: ignore
            if typing and hasattr(function, '__code__') else {})
    except Exception:
        introspected_hints = {}

    fd = StringIO()
    fd.write('(')

    formatted = []
    defaults_start = len(args) - len(defaults) if defaults else len(args)

    for i, arg in enumerate(args):
        arg_fd = StringIO()
        if isinstance(arg, list):
            # support tupled arguments list (only for py2): def foo((x, y))
            arg_fd.write('(')
            arg_fd.write(format_arg_with_annotation(arg[0]))
            for param in arg[1:]:
                arg_fd.write(', ')
                arg_fd.write(format_arg_with_annotation(param))
            arg_fd.write(')')
        else:
            arg_fd.write(format_arg_with_annotation(arg))
            if defaults and i >= defaults_start:
                arg_fd.write(' = ' if arg in annotations else '=')
                arg_fd.write(object_description(
                    defaults[i - defaults_start]))  # type: ignore
        formatted.append(arg_fd.getvalue())

    if varargs:
        formatted.append('*' + format_arg_with_annotation(varargs))

    if kwonlyargs:
        if not varargs:
            formatted.append('*')

        for kwarg in kwonlyargs:
            arg_fd = StringIO()
            arg_fd.write(format_arg_with_annotation(kwarg))
            if kwonlydefaults and kwarg in kwonlydefaults:
                arg_fd.write(' = ' if kwarg in annotations else '=')
                arg_fd.write(object_description(
                    kwonlydefaults[kwarg]))  # type: ignore
            formatted.append(arg_fd.getvalue())

    if varkw:
        formatted.append('**' + format_arg_with_annotation(varkw))

    fd.write(', '.join(formatted))
    fd.write(')')

    if 'return' in annotations:
        fd.write(' -> ')
        fd.write(format_annotation(get_annotation('return')))

    return fd.getvalue()
示例#53
0
    def __call__(self, model, output_filename, solver_capability, io_options):

        # Make sure not to modify the user's dictionary, they may be
        # reusing it outside of this call
        io_options = dict(io_options)

        # NOTE: io_options is a simple dictionary of keyword-value
        #       pairs specific to this writer.
        symbolic_solver_labels = \
            io_options.pop("symbolic_solver_labels", False)
        labeler = io_options.pop("labeler", None)

        # How much effort do we want to put into ensuring the
        # LP file is written deterministically for a Pyomo model:
        #    0 : None
        #    1 : sort keys of indexed components (default)
        #    2 : sort keys AND sort names (over declaration order)
        file_determinism = io_options.pop("file_determinism", 1)

        sorter = SortComponents.unsorted
        if file_determinism >= 1:
            sorter = sorter | SortComponents.indices
            if file_determinism >= 2:
                sorter = sorter | SortComponents.alphabetical

        output_fixed_variable_bounds = \
            io_options.pop("output_fixed_variable_bounds", False)

        # Skip writing constraints whose body section is fixed (i.e.,
        # no variables)
        skip_trivial_constraints = \
            io_options.pop("skip_trivial_constraints", False)

        # Note: Baron does not allow specification of runtime
        #       option outside of this file, so we add support
        #       for them here
        solver_options = io_options.pop("solver_options", {})

        if len(io_options):
            raise ValueError(
                "ProblemWriter_baron_writer passed unrecognized io_options:\n\t"
                + "\n\t".join("%s = %s" % (k, v)
                              for k, v in iteritems(io_options)))

        if symbolic_solver_labels and (labeler is not None):
            raise ValueError("Baron problem writer: Using both the "
                             "'symbolic_solver_labels' and 'labeler' "
                             "I/O options is forbidden")

        if output_filename is None:
            output_filename = model.name + ".bar"

        output_file = open(output_filename, "w")

        # Process the options. Rely on baron to catch
        # and reset bad option values
        output_file.write("OPTIONS {\n")
        summary_found = False
        if len(solver_options):
            for key, val in iteritems(solver_options):
                if (key.lower() == 'summary'):
                    summary_found = True
                if key.endswith("Name"):
                    output_file.write(key + ": \"" + str(val) + "\";\n")
                else:
                    output_file.write(key + ": " + str(val) + ";\n")
        if not summary_found:
            # The 'summary option is defaulted to 0, so that no
            # summary file is generated in the directory where the
            # user calls baron. Check if a user explicitly asked for
            # a summary file.
            output_file.write("Summary: 0;\n")
        output_file.write("}\n\n")

        if symbolic_solver_labels:
            v_labeler = AlphaNumericTextLabeler()
            c_labeler = AlphaNumericTextLabeler()
        elif labeler is None:
            v_labeler = NumericLabeler('x')
            c_labeler = NumericLabeler('c')

        symbol_map = SymbolMap()
        symbol_map.default_labeler = v_labeler
        #sm_bySymbol = symbol_map.bySymbol

        # Cache the list of model blocks so we don't have to call
        # model.block_data_objects() many many times, which is slow
        # for indexed blocks
        all_blocks_list = list(
            model.block_data_objects(active=True,
                                     sort=sorter,
                                     descend_into=True))
        active_components_data_var = {}
        #for block in all_blocks_list:
        #    tmp = active_components_data_var[id(block)] = \
        #          list(obj for obj in block.component_data_objects(Var,
        #                                                           sort=sorter,
        #                                                           descend_into=False))
        #    create_symbols_func(symbol_map, tmp, labeler)

        # GAH: Not sure this is necessary, and also it would break for
        #      non-mutable indexed params so I am commenting out for now.
        #for param_data in active_components_data(block, Param, sort=sorter):
        #instead of checking if param_data._mutable:
        #if not param_data.is_constant():
        #    create_symbol_func(symbol_map, param_data, labeler)

        #symbol_map_variable_ids = set(symbol_map.byObject.keys())
        #object_symbol_dictionary = symbol_map.byObject

        #
        # Go through the objectives and constraints and generate
        # the output so that we can obtain the set of referenced
        # variables.
        #
        equation_section_stream = StringIO()
        referenced_variable_ids, branching_priorities_suffixes = \
            self._write_equations_section(
                model,
                equation_section_stream,
                all_blocks_list,
                active_components_data_var,
                symbol_map,
                c_labeler,
                output_fixed_variable_bounds,
                skip_trivial_constraints,
                sorter)

        #
        # BINARY_VARIABLES, INTEGER_VARIABLES, POSITIVE_VARIABLES, VARIABLES
        #

        BinVars = []
        IntVars = []
        PosVars = []
        Vars = []
        for vid in referenced_variable_ids:
            name = symbol_map.byObject[vid]
            var_data = symbol_map.bySymbol[name]()

            if var_data.is_continuous():
                if var_data.has_lb() and \
                   (self._get_bound(var_data.lb) >= 0):
                    TypeList = PosVars
                else:
                    TypeList = Vars
            elif var_data.is_binary():
                TypeList = BinVars
            elif var_data.is_integer():
                TypeList = IntVars
            else:
                assert False
            TypeList.append(name)

        if len(BinVars) > 0:
            BinVars.sort()
            output_file.write('BINARY_VARIABLES ')
            output_file.write(", ".join(BinVars))
            output_file.write(';\n\n')

        if len(IntVars) > 0:
            IntVars.sort()
            output_file.write('INTEGER_VARIABLES ')
            output_file.write(", ".join(IntVars))
            output_file.write(';\n\n')

        PosVars.append('ONE_VAR_CONST__')
        PosVars.sort()
        output_file.write('POSITIVE_VARIABLES ')
        output_file.write(", ".join(PosVars))
        output_file.write(';\n\n')

        if len(Vars) > 0:
            Vars.sort()
            output_file.write('VARIABLES ')
            output_file.write(", ".join(Vars))
            output_file.write(';\n\n')

        #
        # LOWER_BOUNDS
        #

        lbounds = {}
        for vid in referenced_variable_ids:
            name = symbol_map.byObject[vid]
            var_data = symbol_map.bySymbol[name]()

            if var_data.fixed:
                if output_fixed_variable_bounds:
                    var_data_lb = var_data.value
                else:
                    var_data_lb = None
            else:
                var_data_lb = None
                if var_data.has_lb():
                    var_data_lb = self._get_bound(var_data.lb)

            if var_data_lb is not None:
                name_to_output = symbol_map.getSymbol(var_data)
                lb_string_template = '%s: %' + self._precision_string + ';\n'
                lbounds[name_to_output] = lb_string_template % (name_to_output,
                                                                var_data_lb)

        if len(lbounds) > 0:
            output_file.write("LOWER_BOUNDS{\n")
            output_file.write("".join(lbounds[key]
                                      for key in sorted(lbounds.keys())))
            output_file.write("}\n\n")
        lbounds = None

        #
        # UPPER_BOUNDS
        #

        ubounds = {}
        for vid in referenced_variable_ids:
            name = symbol_map.byObject[vid]
            var_data = symbol_map.bySymbol[name]()

            if var_data.fixed:
                if output_fixed_variable_bounds:
                    var_data_ub = var_data.value
                else:
                    var_data_ub = None
            else:
                var_data_ub = None
                if var_data.has_ub():
                    var_data_ub = self._get_bound(var_data.ub)

            if var_data_ub is not None:
                name_to_output = symbol_map.getSymbol(var_data)
                ub_string_template = '%s: %' + self._precision_string + ';\n'
                ubounds[name_to_output] = ub_string_template % (name_to_output,
                                                                var_data_ub)

        if len(ubounds) > 0:
            output_file.write("UPPER_BOUNDS{\n")
            output_file.write("".join(ubounds[key]
                                      for key in sorted(ubounds.keys())))
            output_file.write("}\n\n")
        ubounds = None

        #
        # BRANCHING_PRIORITIES
        #

        # Specifying priorities requires that the pyomo model has established an
        # EXTERNAL, float suffix called 'branching_priorities' on the model
        # object, indexed by the relevant variable
        BranchingPriorityHeader = False
        for suffix in branching_priorities_suffixes:
            for var_data, priority in iteritems(suffix):
                if id(var_data) not in referenced_variable_ids:
                    continue
                if priority is not None:
                    if not BranchingPriorityHeader:
                        output_file.write('BRANCHING_PRIORITIES{\n')
                        BranchingPriorityHeader = True
                    name_to_output = symbol_map.getSymbol(var_data)
                    output_file.write(name_to_output + ': ' + str(priority) +
                                      ';\n')

        if BranchingPriorityHeader:
            output_file.write("}\n\n")

        #
        # Now write the objective and equations section
        #
        output_file.write(equation_section_stream.getvalue())

        #
        # STARTING_POINT
        #
        output_file.write('STARTING_POINT{\nONE_VAR_CONST__: 1;\n')
        tmp = {}
        string_template = '%s: %' + self._precision_string + ';\n'
        for vid in referenced_variable_ids:
            name = symbol_map.byObject[vid]
            var_data = symbol_map.bySymbol[name]()

            starting_point = var_data.value
            if starting_point is not None:
                var_name = symbol_map.getSymbol(var_data)
                tmp[var_name] = string_template % (var_name, starting_point)

        output_file.write("".join(tmp[key] for key in sorted(tmp.keys())))
        output_file.write('}\n\n')

        output_file.close()

        return output_filename, symbol_map
示例#54
0
class TestCoreCLIOutput(unittest.TestCase):
    def setUp(self):
        self.io = StringIO()

    def tearDown(self):
        self.io.close()

    def test_out_json_valid(self):
        """
        The JSON output when the input is a dict should be the dict serialized to JSON
        """
        output_producer = OutputProducer(formatter=format_json, file=self.io)
        output_producer.out(
            CommandResultItem({
                'active': True,
                'id': '0b1f6472'
            }))
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines("""{
  "active": true,
  "id": "0b1f6472"
}
"""))

    def test_out_json_from_ordered_dict(self):
        """
        The JSON output when the input is OrderedDict should be serialized to JSON
        """
        output_producer = OutputProducer(formatter=format_json, file=self.io)
        output_producer.out(
            CommandResultItem(OrderedDict({
                'active': True,
                'id': '0b1f6472'
            })))
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines("""{
  "active": true,
  "id": "0b1f6472"
}
"""))

    def test_out_json_byte(self):
        output_producer = OutputProducer(formatter=format_json, file=self.io)
        output_producer.out(
            CommandResultItem({
                'active': True,
                'contents': b'0b1f6472'
            }))
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines("""{
  "active": true,
  "contents": "0b1f6472"
}
"""))

    def test_out_json_byte_empty(self):
        output_producer = OutputProducer(formatter=format_json, file=self.io)
        output_producer.out(
            CommandResultItem({
                'active': True,
                'contents': b''
            }))
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines("""{
  "active": true,
  "contents": ""
}
"""))

    # TABLE output tests

    def test_out_table(self):
        output_producer = OutputProducer(formatter=format_table, file=self.io)
        obj = OrderedDict()
        obj['active'] = True
        obj['val'] = '0b1f6472'
        output_producer.out(CommandResultItem(obj))
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines("""Active    Val
--------  --------
True      0b1f6472
"""))

    def test_out_table_list_of_lists(self):
        output_producer = OutputProducer(formatter=format_table, file=self.io)
        obj = [['a', 'b'], ['c', 'd']]
        output_producer.out(CommandResultItem(obj))
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines("""Column1    Column2
---------  ---------
a          b
c          d
"""))

    def test_out_table_complex_obj(self):
        output_producer = OutputProducer(formatter=format_table, file=self.io)
        obj = OrderedDict()
        obj['name'] = 'qwerty'
        obj['val'] = '0b1f6472qwerty'
        obj['sub'] = {'1'}
        result_item = CommandResultItem(obj)
        output_producer.out(result_item)
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines("""Name    Val
------  --------------
qwerty  0b1f6472qwerty
"""))

    def test_out_table_no_query_no_transformer_order(self):
        output_producer = OutputProducer(formatter=format_table, file=self.io)
        obj = {
            'name': 'qwerty',
            'val': '0b1f6472qwerty',
            'active': True,
            'sub': '0b1f6472'
        }
        result_item = CommandResultItem(obj,
                                        table_transformer=None,
                                        is_query_active=False)
        output_producer.out(result_item)
        # Should be alphabetical order as no table transformer and query is not active.
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines("""Active    Name    Sub       Val
--------  ------  --------  --------------
True      qwerty  0b1f6472  0b1f6472qwerty
"""))

    def test_out_table_no_query_yes_transformer_order(self):
        output_producer = OutputProducer(formatter=format_table, file=self.io)
        obj = {
            'name': 'qwerty',
            'val': '0b1f6472qwerty',
            'active': True,
            'sub': '0b1f6472'
        }

        def transformer(r):
            return OrderedDict([('Name', r['name']), ('Val', r['val']),
                                ('Active', r['active']), ('Sub', r['sub'])])

        result_item = CommandResultItem(obj,
                                        table_transformer=transformer,
                                        is_query_active=False)
        output_producer.out(result_item)
        # Should be table transformer order
        self.assertEqual(
            util.normalize_newlines(self.io.getvalue()),
            util.normalize_newlines("""Name    Val             Active    Sub
------  --------------  --------  --------
qwerty  0b1f6472qwerty  True      0b1f6472
"""))

    # TSV output tests
    def test_output_format_dict(self):
        obj = {}
        obj['A'] = 1
        obj['B'] = 2
        result = format_tsv(CommandResultItem(obj))
        self.assertEqual(result, '1\t2\n')

    def test_output_format_dict_sort(self):
        obj = {}
        obj['B'] = 1
        obj['A'] = 2
        result = format_tsv(CommandResultItem(obj))
        self.assertEqual(result, '2\t1\n')

    def test_output_format_ordereddict_not_sorted(self):
        obj = OrderedDict()
        obj['B'] = 1
        obj['A'] = 2
        result = format_tsv(CommandResultItem(obj))
        self.assertEqual(result, '1\t2\n')

    def test_output_format_ordereddict_list_not_sorted(self):
        obj1 = OrderedDict()
        obj1['B'] = 1
        obj1['A'] = 2

        obj2 = OrderedDict()
        obj2['A'] = 3
        obj2['B'] = 4
        result = format_tsv(CommandResultItem([obj1, obj2]))
        self.assertEqual(result, '1\t2\n3\t4\n')
示例#55
0
def ceph_mons(ctx, config):
    """
    Deploy any additional mons
    """
    cluster_name = config['cluster']
    fsid = ctx.ceph[cluster_name].fsid
    num_mons = 1

    try:
        for remote, roles in ctx.cluster.remotes.items():
            for mon in [
                    r for r in roles
                    if teuthology.is_type('mon', cluster_name)(r)
            ]:
                c_, _, id_ = teuthology.split_role(mon)
                if c_ == cluster_name and id_ == ctx.ceph[
                        cluster_name].first_mon:
                    continue
                log.info('Adding %s on %s' % (mon, remote.shortname))
                num_mons += 1
                _shell(ctx, cluster_name, remote, [
                    'ceph',
                    'orch',
                    'daemon',
                    'add',
                    'mon',
                    remote.shortname + ':' + ctx.ceph[cluster_name].mons[mon] +
                    '=' + id_,
                ])
                ctx.daemons.register_daemon(
                    remote,
                    'mon',
                    id_,
                    cluster=cluster_name,
                    fsid=fsid,
                    logger=log.getChild(mon),
                    wait=False,
                    started=True,
                )

                with contextutil.safe_while(sleep=1, tries=180) as proceed:
                    while proceed():
                        log.info('Waiting for %d mons in monmap...' %
                                 (num_mons))
                        r = _shell(
                            ctx=ctx,
                            cluster_name=cluster_name,
                            remote=remote,
                            args=[
                                'ceph',
                                'mon',
                                'dump',
                                '-f',
                                'json',
                            ],
                            stdout=StringIO(),
                        )
                        j = json.loads(r.stdout.getvalue())
                        if len(j['mons']) == num_mons:
                            break

        # refresh our (final) ceph.conf file
        log.info('Generating final ceph.conf file...')
        r = _shell(
            ctx=ctx,
            cluster_name=cluster_name,
            remote=remote,
            args=[
                'ceph',
                'config',
                'generate-minimal-conf',
            ],
            stdout=StringIO(),
        )
        ctx.ceph[cluster_name].config_file = r.stdout.getvalue()

        yield

    finally:
        pass
示例#56
0
文件: mps.py 项目: CanLi1/pyomo-1
    def _print_model_MPS(self,
                         model,
                         output_file,
                         solver_capability,
                         labeler,
                         output_fixed_variable_bounds=False,
                         file_determinism=1,
                         row_order=None,
                         column_order=None,
                         skip_trivial_constraints=False,
                         force_objective_constant=False,
                         include_all_variable_bounds=False,
                         skip_objective_sense=False):

        symbol_map = SymbolMap()
        variable_symbol_map = SymbolMap()
        # NOTE: we use createSymbol instead of getSymbol because we
        #       know whether or not the symbol exists, and don't want
        #       to the overhead of error/duplicate checking.
        # cache frequently called functions
        extract_variable_coefficients = self._extract_variable_coefficients
        create_symbol_func = SymbolMap.createSymbol
        create_symbols_func = SymbolMap.createSymbols
        alias_symbol_func = SymbolMap.alias
        variable_label_pairs = []

        sortOrder = SortComponents.unsorted
        if file_determinism >= 1:
            sortOrder = sortOrder | SortComponents.indices
            if file_determinism >= 2:
                sortOrder = sortOrder | SortComponents.alphabetical

        #
        # Create variable symbols (and cache the block list)
        #
        all_blocks = []
        variable_list = []
        for block in model.block_data_objects(active=True,
                                              sort=sortOrder):

            all_blocks.append(block)

            for vardata in block.component_data_objects(
                    Var,
                    active=True,
                    sort=sortOrder,
                    descend_into=False):

                variable_list.append(vardata)
                variable_label_pairs.append(
                    (vardata,create_symbol_func(symbol_map,
                                                vardata,
                                                labeler)))

        variable_symbol_map.addSymbols(variable_label_pairs)

        # and extract the information we'll need for rapid labeling.
        object_symbol_dictionary = symbol_map.byObject
        variable_symbol_dictionary = variable_symbol_map.byObject

        # sort the variable ordering by the user
        # column_order ComponentMap
        if column_order is not None:
            variable_list.sort(key=lambda _x: column_order[_x])

        # prepare to hold the sparse columns
        variable_to_column = ComponentMap(
            (vardata, i) for i, vardata in enumerate(variable_list))
        # add one position for ONE_VAR_CONSTANT
        column_data = [[] for i in xrange(len(variable_list)+1)]
        quadobj_data = []
        quadmatrix_data = []
        # constraint rhs
        rhs_data = []

        # print the model name and the source, so we know
        # roughly where
        output_file.write("* Source:     Pyomo MPS Writer\n")
        output_file.write("* Format:     Free MPS\n")
        output_file.write("*\n")
        output_file.write("NAME %s\n" % (model.name,))

        #
        # ROWS section
        #

        objective_label = None
        numObj = 0
        onames = []
        for block in all_blocks:

            gen_obj_repn = \
                getattr(block, "_gen_obj_repn", True)

            # Get/Create the ComponentMap for the repn
            if not hasattr(block,'_repn'):
                block._repn = ComponentMap()
            block_repn = block._repn
            for objective_data in block.component_data_objects(
                    Objective,
                    active=True,
                    sort=sortOrder,
                    descend_into=False):

                numObj += 1
                onames.append(objective_data.name)
                if numObj > 1:
                    raise ValueError(
                        "More than one active objective defined for input "
                        "model '%s'; Cannot write legal MPS file\n"
                        "Objectives: %s" % (model.name, ' '.join(onames)))

                objective_label = create_symbol_func(symbol_map,
                                                     objective_data,
                                                     labeler)

                symbol_map.alias(objective_data, '__default_objective__')
                if not skip_objective_sense:
                    output_file.write("OBJSENSE\n")
                    if objective_data.is_minimizing():
                        output_file.write(" MIN\n")
                    else:
                        output_file.write(" MAX\n")
                # This section is not recognized by the COIN-OR
                # MPS reader
                #output_file.write("OBJNAME\n")
                #output_file.write(" %s\n" % (objective_label))
                output_file.write("ROWS\n")
                output_file.write(" N  %s\n" % (objective_label))

                if gen_obj_repn:
                    repn = \
                        generate_standard_repn(objective_data.expr)
                    block_repn[objective_data] = repn
                else:
                    repn = block_repn[objective_data]

                degree = repn.polynomial_degree()
                if degree == 0:
                    logger.warning("Constant objective detected, replacing "
                          "with a placeholder to prevent solver failure.")
                    force_objective_constant = True
                elif degree is None:
                    raise RuntimeError(
                        "Cannot write legal MPS file. Objective '%s' "
                        "has nonlinear terms that are not quadratic."
                        % objective_data.name)

                constant = extract_variable_coefficients(
                    objective_label,
                    repn,
                    column_data,
                    quadobj_data,
                    variable_to_column)
                if force_objective_constant or (constant != 0.0):
                    # ONE_VAR_CONSTANT
                    column_data[-1].append((objective_label, constant))

        if numObj == 0:
            raise ValueError(
                "Cannot write legal MPS file: No objective defined "
                "for input model '%s'." % str(model))
        assert objective_label is not None

        # Constraints
        def constraint_generator():
            for block in all_blocks:

                gen_con_repn = \
                    getattr(block, "_gen_con_repn", True)

                # Get/Create the ComponentMap for the repn
                if not hasattr(block,'_repn'):
                    block._repn = ComponentMap()
                block_repn = block._repn

                for constraint_data in block.component_data_objects(
                        Constraint,
                        active=True,
                        sort=sortOrder,
                        descend_into=False):

                    if (not constraint_data.has_lb()) and \
                       (not constraint_data.has_ub()):
                        assert not constraint_data.equality
                        continue # non-binding, so skip

                    if constraint_data._linear_canonical_form:
                        repn = constraint_data.canonical_form()
                    elif gen_con_repn:
                        repn = generate_standard_repn(constraint_data.body)
                        block_repn[constraint_data] = repn
                    else:
                        repn = block_repn[constraint_data]

                    yield constraint_data, repn

        if row_order is not None:
            sorted_constraint_list = list(constraint_generator())
            sorted_constraint_list.sort(key=lambda x: row_order[x[0]])
            def yield_all_constraints():
                for constraint_data, repn in sorted_constraint_list:
                    yield constraint_data, repn
        else:
            yield_all_constraints = constraint_generator

        for constraint_data, repn in yield_all_constraints():

            degree = repn.polynomial_degree()

            # Write constraint
            if degree == 0:
                if skip_trivial_constraints:
                    continue
            elif degree is None:
                raise RuntimeError(
                    "Cannot write legal MPS file. Constraint '%s' "
                    "has nonlinear terms that are not quadratic."
                    % constraint_data.name)

            # Create symbol
            con_symbol = create_symbol_func(symbol_map,
                                            constraint_data,
                                            labeler)

            if constraint_data.equality:
                assert value(constraint_data.lower) == \
                    value(constraint_data.upper)
                label = 'c_e_' + con_symbol + '_'
                alias_symbol_func(symbol_map, constraint_data, label)
                output_file.write(" E  %s\n" % (label))
                offset = extract_variable_coefficients(
                    label,
                    repn,
                    column_data,
                    quadmatrix_data,
                    variable_to_column)
                bound = constraint_data.lower
                bound = _get_bound(bound) - offset
                rhs_data.append((label, _no_negative_zero(bound)))
            else:
                if constraint_data.has_lb():
                    if constraint_data.has_ub():
                        label = 'r_l_' + con_symbol + '_'
                    else:
                        label = 'c_l_' + con_symbol + '_'
                    alias_symbol_func(symbol_map, constraint_data, label)
                    output_file.write(" G  %s\n" % (label))
                    offset = extract_variable_coefficients(
                        label,
                        repn,
                        column_data,
                        quadmatrix_data,
                        variable_to_column)
                    bound = constraint_data.lower
                    bound = _get_bound(bound) - offset
                    rhs_data.append((label, _no_negative_zero(bound)))
                else:
                    assert constraint_data.has_ub()

                if constraint_data.has_ub():
                    if constraint_data.has_lb():
                        label = 'r_u_' + con_symbol + '_'
                    else:
                        label = 'c_u_' + con_symbol + '_'
                    alias_symbol_func(symbol_map, constraint_data, label)
                    output_file.write(" L  %s\n" % (label))
                    offset = extract_variable_coefficients(
                        label,
                        repn,
                        column_data,
                        quadmatrix_data,
                        variable_to_column)
                    bound = constraint_data.upper
                    bound = _get_bound(bound) - offset
                    rhs_data.append((label, _no_negative_zero(bound)))
                else:
                    assert constraint_data.has_lb()

        if len(column_data[-1]) > 0:
            # ONE_VAR_CONSTANT = 1
            output_file.write(" E  c_e_ONE_VAR_CONSTANT\n")
            column_data[-1].append(("c_e_ONE_VAR_CONSTANT",1))
            rhs_data.append(("c_e_ONE_VAR_CONSTANT",1))

        #
        # COLUMNS section
        #
        column_template = "     %s %s %"+self._precision_string+"\n"
        output_file.write("COLUMNS\n")
        cnt = 0
        for vardata in variable_list:
            col_entries = column_data[variable_to_column[vardata]]
            cnt += 1
            if len(col_entries) > 0:
                var_label = variable_symbol_dictionary[id(vardata)]
                for i, (row_label, coef) in enumerate(col_entries):
                    output_file.write(column_template
                                      % (var_label,
                                         row_label,
                                         _no_negative_zero(coef)))
            elif include_all_variable_bounds:
                # the column is empty, so add a (0 * var)
                # term to the objective
                # * Note that some solvers (e.g., Gurobi)
                #   will accept an empty column as a line
                #   with just the column name. This doesn't
                #   seem to work for CPLEX 12.6, so I am
                #   doing it this way so that it will work for both
                var_label = variable_symbol_dictionary[id(vardata)]
                output_file.write(column_template
                                  % (var_label,
                                     objective_label,
                                     0))

        assert cnt == len(column_data)-1
        if len(column_data[-1]) > 0:
            col_entries = column_data[-1]
            var_label = "ONE_VAR_CONSTANT"
            for i, (row_label, coef) in enumerate(col_entries):
                output_file.write(column_template
                                  % (var_label,
                                     row_label,
                                     _no_negative_zero(coef)))

        #
        # RHS section
        #
        rhs_template = "     RHS %s %"+self._precision_string+"\n"
        output_file.write("RHS\n")
        for i, (row_label, rhs) in enumerate(rhs_data):
            # note: we have already converted any -0 to 0 by this point
            output_file.write(rhs_template % (row_label, rhs))

        # SOS constraints
        SOSlines = StringIO()
        sos1 = solver_capability("sos1")
        sos2 = solver_capability("sos2")
        for block in all_blocks:

            for soscondata in block.component_data_objects(
                    SOSConstraint,
                    active=True,
                    sort=sortOrder,
                    descend_into=False):

                create_symbol_func(symbol_map, soscondata, labeler)

                level = soscondata.level
                if (level == 1 and not sos1) or \
                   (level == 2 and not sos2) or \
                   (level > 2):
                    raise ValueError(
                        "Solver does not support SOS level %s constraints" % (level))
                # This updates the referenced_variable_ids, just in case
                # there is a variable that only appears in an
                # SOSConstraint, in which case this needs to be known
                # before we write the "bounds" section (Cplex does not
                # handle this correctly, Gurobi does)
                self._printSOS(symbol_map,
                               labeler,
                               variable_symbol_map,
                               soscondata,
                               SOSlines)

        #
        # BOUNDS section
        #
        entry_template = "%s %"+self._precision_string+"\n"
        output_file.write("BOUNDS\n")
        for vardata in variable_list:
            if include_all_variable_bounds or \
               (id(vardata) in self._referenced_variable_ids):
                var_label = variable_symbol_dictionary[id(vardata)]
                if vardata.fixed:
                    if not output_fixed_variable_bounds:
                        raise ValueError(
                            "Encountered a fixed variable (%s) inside an active "
                            "objective or constraint expression on model %s, which is "
                            "usually indicative of a preprocessing error. Use the "
                            "IO-option 'output_fixed_variable_bounds=True' to suppress "
                            "this error and fix the variable by overwriting its bounds "
                            "in the MPS file." % (vardata.name, model.name))
                    if vardata.value is None:
                        raise ValueError("Variable cannot be fixed to a value of None.")
                    output_file.write((" FX BOUND "+entry_template)
                                      % (var_label,
                                         _no_negative_zero(value(vardata.value))))
                    continue

                # convert any -0 to 0 to make baseline diffing easier
                vardata_lb = _no_negative_zero(_get_bound(vardata.lb))
                vardata_ub = _no_negative_zero(_get_bound(vardata.ub))
                unbounded_lb = not vardata.has_lb()
                unbounded_ub = not vardata.has_ub()
                treat_as_integer = False
                if vardata.is_binary():
                    if (vardata_lb == 0) and (vardata_ub == 1):
                        output_file.write(" BV BOUND %s\n" % (var_label))
                        continue
                    else:
                        # so we can add bounds
                        treat_as_integer = True
                if treat_as_integer or vardata.is_integer():
                    # Indicating unbounded integers is tricky because
                    # the only way to indicate a variable is integer
                    # is using the bounds section. Thus, we signify
                    # infinity with a large number (10E20)
                    # * Note: Gurobi allows values like inf and -inf
                    #         but CPLEX 12.6 does not, so I am just
                    #         using a large value
                    if not unbounded_lb:
                        output_file.write((" LI BOUND "+entry_template)
                                          % (var_label, vardata_lb))
                    else:
                        output_file.write(" LI BOUND %s -10E20\n" % (var_label))
                    if not unbounded_ub:
                        output_file.write((" UI BOUND "+entry_template)
                                          % (var_label, vardata_ub))
                    else:
                        output_file.write(" UI BOUND %s 10E20\n" % (var_label))
                else:
                    assert vardata.is_continuous()
                    if unbounded_lb and unbounded_ub:
                        output_file.write(" FR BOUND %s\n" % (var_label))
                    else:
                        if not unbounded_lb:
                            output_file.write((" LO BOUND "+entry_template)
                                              % (var_label, vardata_lb))
                        else:
                            output_file.write(" MI BOUND %s\n" % (var_label))

                        if not unbounded_ub:
                            output_file.write((" UP BOUND "+entry_template)
                                              % (var_label, vardata_ub))

        #
        # SOS section
        #
        output_file.write(SOSlines.getvalue())

        # Formatting of the next two sections comes from looking
        # at Gurobi and Cplex output

        #
        # QUADOBJ section
        #
        if len(quadobj_data) > 0:
            assert len(quadobj_data) == 1
            # it looks like the COIN-OR MPS Reader only
            # recognizes QUADOBJ (Gurobi and Cplex seem to
            # be okay with this)
            output_file.write("QUADOBJ\n")
            #output_file.write("QMATRIX\n")
            label, quad_terms = quadobj_data[0]
            assert label == objective_label
            # sort by the sorted tuple of symbols (or column assignments)
            # for the variables appearing in the term
            quad_terms = sorted(quad_terms,
                                key=lambda _x: \
                                  sorted((variable_to_column[_x[0][0]],
                                          variable_to_column[_x[0][1]])))
            for term, coef in quad_terms:
                # sort the term for consistent output
                var1, var2 = sorted(term,
                                    key=lambda _x: variable_to_column[_x])
                var1_label = variable_symbol_dictionary[id(var1)]
                var2_label = variable_symbol_dictionary[id(var2)]
                # Don't forget that a quadratic objective is always
                # assumed to be divided by 2
                if var1_label == var2_label:
                    output_file.write(column_template
                                      % (var1_label,
                                         var2_label,
                                         _no_negative_zero(coef * 2)))
                else:
                    # the matrix needs to be symmetric so split
                    # the coefficient (but remember it is divided by 2)
                    output_file.write(column_template
                                      % (var1_label,
                                         var2_label,
                                         _no_negative_zero(coef)))
                    output_file.write(column_template
                                      % (var2_label,
                                         var1_label,
                                         _no_negative_zero(coef)))

        #
        # QCMATRIX section
        #
        if len(quadmatrix_data) > 0:
            for row_label, quad_terms in quadmatrix_data:
                output_file.write("QCMATRIX    %s\n" % (row_label))
                # sort by the sorted tuple of symbols (or
                # column assignments) for the variables
                # appearing in the term
                quad_terms = sorted(quad_terms,
                                    key=lambda _x: \
                                      sorted((variable_to_column[_x[0][0]],
                                              variable_to_column[_x[0][1]])))
                for term, coef in quad_terms:
                    # sort the term for consistent output
                    var1, var2 = sorted(term,
                                        key=lambda _x: variable_to_column[_x])
                    var1_label = variable_symbol_dictionary[id(var1)]
                    var2_label = variable_symbol_dictionary[id(var2)]
                    if var1_label == var2_label:
                        output_file.write(column_template
                                          % (var1_label,
                                             var2_label,
                                             _no_negative_zero(coef)))
                    else:
                        # the matrix needs to be symmetric so split
                        # the coefficient
                        output_file.write(column_template
                                          % (var1_label,
                                             var2_label,
                                             _no_negative_zero(coef * 0.5)))
                        output_file.write(column_template
                                          % (var2_label,
                                             var1_label,
                                             coef * 0.5))

        output_file.write("ENDATA\n")

        # Clean up the symbol map to only contain variables referenced
        # in the active constraints **Note**: warm start method may
        # rely on this for choosing the set of potential warm start
        # variables
        vars_to_delete = set(variable_symbol_map.byObject.keys()) - \
                         set(self._referenced_variable_ids.keys())
        sm_byObject = symbol_map.byObject
        sm_bySymbol = symbol_map.bySymbol
        var_sm_byObject = variable_symbol_map.byObject
        for varid in vars_to_delete:
            symbol = var_sm_byObject[varid]
            del sm_byObject[varid]
            del sm_bySymbol[symbol]
        del variable_symbol_map

        return symbol_map
示例#57
0
class CaseFormatter(object):
    def __init__(self, translator, base, cases, results, default):
        self.translator = translator
        self.base = base
        self.cases = cases
        self.results = results
        self.default = default

        # HACK
        self.indent = 2
        self.multiline = len(cases) > 1
        self.buf = StringIO()

    def _trans(self, expr):
        return self.translator.translate(expr)

    def get_result(self):
        self.buf.seek(0)

        self.buf.write('CASE')
        if self.base is not None:
            base_str = self._trans(self.base)
            self.buf.write(' {0}'.format(base_str))

        for case, result in zip(self.cases, self.results):
            self._next_case()
            case_str = self._trans(case)
            result_str = self._trans(result)
            self.buf.write('WHEN {0} THEN {1}'.format(case_str, result_str))

        if self.default is not None:
            self._next_case()
            default_str = self._trans(self.default)
            self.buf.write('ELSE {0}'.format(default_str))

        if self.multiline:
            self.buf.write('\nEND')
        else:
            self.buf.write(' END')

        return self.buf.getvalue()

    def _next_case(self):
        if self.multiline:
            self.buf.write('\n{0}'.format(' ' * self.indent))
        else:
            self.buf.write(' ')
示例#58
0
    def _write_equations_section(self, model, output_file, all_blocks_list,
                                 active_components_data_var, symbol_map,
                                 c_labeler, output_fixed_variable_bounds,
                                 skip_trivial_constraints, sorter):

        referenced_variable_ids = set()

        def _skip_trivial(constraint_data):
            if skip_trivial_constraints:
                if constraint_data._linear_canonical_form:
                    repn = constraint_data.canonical_form()
                    if (repn.variables is None) or \
                       (len(repn.variables) == 0):
                        return True
                elif constraint_data.body.polynomial_degree() == 0:
                    return True
            return False

        #
        # Check for active suffixes to export
        #
        if isinstance(model, IBlock):
            suffix_gen = lambda b: ((suf.storage_key, suf) \
                                    for suf in pyomo.core.kernel.suffix.\
                                    export_suffix_generator(b,
                                                            active=True,
                                                            descend_into=False))
        else:
            suffix_gen = lambda b: pyomo.core.base.suffix.\
                         active_export_suffix_generator(b)
        r_o_eqns = []
        c_eqns = []
        l_eqns = []
        branching_priorities_suffixes = []
        for block in all_blocks_list:
            for name, suffix in suffix_gen(block):
                if name == 'branching_priorities':
                    branching_priorities_suffixes.append(suffix)
                elif name == 'constraint_types':
                    for constraint_data, constraint_type in iteritems(suffix):
                        if not _skip_trivial(constraint_data):
                            if constraint_type.lower() == 'relaxationonly':
                                r_o_eqns.append(constraint_data)
                            elif constraint_type.lower() == 'convex':
                                c_eqns.append(constraint_data)
                            elif constraint_type.lower() == 'local':
                                l_eqns.append(constraint_data)
                            else:
                                raise ValueError(
                                    "A suffix '%s' contained an invalid value: %s\n"
                                    "Choices are: [relaxationonly, convex, local]"
                                    % (suffix.name, constraint_type))
                else:
                    raise ValueError(
                        "The BARON writer can not export suffix with name '%s'. "
                        "Either remove it from block '%s' or deactivate it." %
                        (block.name, name))

        non_standard_eqns = r_o_eqns + c_eqns + l_eqns

        #
        # EQUATIONS
        #

        #Equation Declaration
        n_roeqns = len(r_o_eqns)
        n_ceqns = len(c_eqns)
        n_leqns = len(l_eqns)
        eqns = []

        # Alias the constraints by declaration order since Baron does not
        # include the constraint names in the solution file. It is important
        # that this alias not clash with any real constraint labels, hence
        # the use of the ".c<integer>" template. It is not possible to declare
        # a component having this type of name when using standard syntax.
        # There are ways to do it, but it is unlikely someone will.
        order_counter = 0
        alias_template = ".c%d"
        output_file.write('EQUATIONS ')
        output_file.write("c_e_FIX_ONE_VAR_CONST__")
        order_counter += 1
        for block in all_blocks_list:

            for constraint_data in block.component_data_objects(
                    Constraint, active=True, sort=sorter, descend_into=False):

                if (not constraint_data.has_lb()) and \
                   (not constraint_data.has_ub()):
                    assert not constraint_data.equality
                    continue  # non-binding, so skip

                if (not _skip_trivial(constraint_data)) and \
                   (constraint_data not in non_standard_eqns):

                    eqns.append(constraint_data)

                    con_symbol = symbol_map.createSymbol(
                        constraint_data, c_labeler)
                    assert not con_symbol.startswith('.')
                    assert con_symbol != "c_e_FIX_ONE_VAR_CONST__"

                    symbol_map.alias(constraint_data,
                                     alias_template % order_counter)
                    output_file.write(", " + str(con_symbol))
                    order_counter += 1

        output_file.write(";\n\n")

        if n_roeqns > 0:
            output_file.write('RELAXATION_ONLY_EQUATIONS ')
            for i, constraint_data in enumerate(r_o_eqns):
                con_symbol = symbol_map.createSymbol(constraint_data,
                                                     c_labeler)
                assert not con_symbol.startswith('.')
                assert con_symbol != "c_e_FIX_ONE_VAR_CONST__"
                symbol_map.alias(constraint_data,
                                 alias_template % order_counter)
                if i == n_roeqns - 1:
                    output_file.write(str(con_symbol) + ';\n\n')
                else:
                    output_file.write(str(con_symbol) + ', ')
                order_counter += 1

        if n_ceqns > 0:
            output_file.write('CONVEX_EQUATIONS ')
            for i, constraint_data in enumerate(c_eqns):
                con_symbol = symbol_map.createSymbol(constraint_data,
                                                     c_labeler)
                assert not con_symbol.startswith('.')
                assert con_symbol != "c_e_FIX_ONE_VAR_CONST__"
                symbol_map.alias(constraint_data,
                                 alias_template % order_counter)
                if i == n_ceqns - 1:
                    output_file.write(str(con_symbol) + ';\n\n')
                else:
                    output_file.write(str(con_symbol) + ', ')
                order_counter += 1

        if n_leqns > 0:
            output_file.write('LOCAL_EQUATIONS ')
            for i, constraint_data in enumerate(l_eqns):
                con_symbol = symbol_map.createSymbol(constraint_data,
                                                     c_labeler)
                assert not con_symbol.startswith('.')
                assert con_symbol != "c_e_FIX_ONE_VAR_CONST__"
                symbol_map.alias(constraint_data,
                                 alias_template % order_counter)
                if i == n_leqns - 1:
                    output_file.write(str(con_symbol) + ';\n\n')
                else:
                    output_file.write(str(con_symbol) + ', ')
                order_counter += 1

        # Create a dictionary of baron variable names to match to the
        # strings that constraint.to_string() prints. An important
        # note is that the variable strings are padded by spaces so
        # that whole variable names are recognized, and simple
        # variable names are not identified inside longer names.
        # Example: ' x[1] ' -> ' x3 '
        #FIXME: 7/18/14 CLH: This may cause mistakes if spaces in
        #                    variable names are allowed
        if isinstance(model, IBlock):
            mutable_param_gen = lambda b: \
                                b.components(ctype=Param,
                                             descend_into=False)
        else:

            def mutable_param_gen(b):
                for param in block.component_objects(Param):
                    if param._mutable and param.is_indexed():
                        param_data_iter = \
                            (param_data for index, param_data
                             in iteritems(param))
                    elif not param.is_indexed():
                        param_data_iter = iter([param])
                    else:
                        param_data_iter = iter([])

                    for param_data in param_data_iter:
                        yield param_data

        if False:
            #
            # This was part of a merge from master that caused
            # test failures.  But commenting this out didn't cause additional failures!?!
            #
            vstring_to_var_dict = {}
            vstring_to_bar_dict = {}
            pstring_to_bar_dict = {}
            _val_template = ' %' + self._precision_string + ' '
            for block in all_blocks_list:
                for var_data in active_components_data_var[id(block)]:
                    variable_stream = StringIO()
                    var_data.to_string(ostream=variable_stream, verbose=False)
                    variable_string = variable_stream.getvalue()
                    variable_string = ' ' + variable_string + ' '
                    vstring_to_var_dict[variable_string] = var_data
                    if output_fixed_variable_bounds or (not var_data.fixed):
                        vstring_to_bar_dict[variable_string] = \
                            ' '+object_symbol_dictionary[id(var_data)]+' '
                    else:
                        assert var_data.value is not None
                        vstring_to_bar_dict[variable_string] = \
                            (_val_template % (var_data.value,))

                for param_data in mutable_param_gen(block):
                    param_stream = StringIO()
                    param_data.to_string(ostream=param_stream, verbose=False)
                    param_string = param_stream.getvalue()

                    param_string = ' ' + param_string + ' '
                    pstring_to_bar_dict[param_string] = \
                        (_val_template % (param_data(),))

        # Equation Definition
        string_template = '%' + self._precision_string
        output_file.write('c_e_FIX_ONE_VAR_CONST__:  ONE_VAR_CONST__  == 1;\n')
        for constraint_data in itertools.chain(eqns, r_o_eqns, c_eqns, l_eqns):

            variables = set()
            #print(symbol_map.byObject.keys())
            eqn_body = expression_to_string(constraint_data.body,
                                            variables,
                                            smap=symbol_map)
            #print(symbol_map.byObject.keys())
            referenced_variable_ids.update(variables)

            if len(variables) == 0:
                assert not skip_trivial_constraints
                eqn_body += " + 0 * ONE_VAR_CONST__ "

            # 7/29/14 CLH:
            #FIXME: Baron doesn't handle many of the
            #       intrinsic_functions available in pyomo. The
            #       error message given by baron is also very
            #       weak.  Either a function here to re-write
            #       unallowed expressions or a way to track solver
            #       capability by intrinsic_expression would be
            #       useful.
            ##########################

            con_symbol = symbol_map.byObject[id(constraint_data)]
            output_file.write(str(con_symbol) + ': ')

            # Fill in the left and right hand side (constants) of
            #  the equations

            # Equality constraint
            if constraint_data.equality:
                eqn_lhs = ''
                eqn_rhs = ' == ' + \
                          str(string_template
                              % self._get_bound(constraint_data.upper))

            # Greater than constraint
            elif not constraint_data.has_ub():
                eqn_rhs = ' >= ' + \
                          str(string_template
                              % self._get_bound(constraint_data.lower))
                eqn_lhs = ''

            # Less than constraint
            elif not constraint_data.has_lb():
                eqn_rhs = ' <= ' + \
                          str(string_template
                              % self._get_bound(constraint_data.upper))
                eqn_lhs = ''

            # Double-sided constraint
            elif constraint_data.has_lb() and \
                 constraint_data.has_ub():
                eqn_lhs = str(string_template
                              % self._get_bound(constraint_data.lower)) + \
                          ' <= '
                eqn_rhs = ' <= ' + \
                          str(string_template
                              % self._get_bound(constraint_data.upper))

            eqn_string = eqn_lhs + eqn_body + eqn_rhs + ';\n'
            output_file.write(eqn_string)

        #
        # OBJECTIVE
        #

        output_file.write("\nOBJ: ")

        n_objs = 0
        for block in all_blocks_list:

            for objective_data in block.component_data_objects(
                    Objective, active=True, sort=sorter, descend_into=False):

                n_objs += 1
                if n_objs > 1:
                    raise ValueError(
                        "The BARON writer has detected multiple active "
                        "objective functions on model %s, but "
                        "currently only handles a single objective." %
                        (model.name))

                # create symbol
                symbol_map.createSymbol(objective_data, c_labeler)
                symbol_map.alias(objective_data, "__default_objective__")

                if objective_data.is_minimizing():
                    output_file.write("minimize ")
                else:
                    output_file.write("maximize ")

                variables = set()
                #print(symbol_map.byObject.keys())
                obj_string = expression_to_string(objective_data.expr,
                                                  variables,
                                                  smap=symbol_map)
                #print(symbol_map.byObject.keys())
                referenced_variable_ids.update(variables)

        output_file.write(obj_string + ";\n\n")
        #referenced_variable_ids.update(symbol_map.byObject.keys())

        return referenced_variable_ids, branching_priorities_suffixes
示例#59
0
    def get_result(self):
        # Got to unravel the join stack; the nesting order could be
        # arbitrary, so we do a depth first search and push the join tokens
        # and predicates onto a flat list, then format them
        op = self.expr.op()

        if isinstance(op, ops.Join):
            self._walk_join_tree(op)
        else:
            self.join_tables.append(self._format_table(self.expr))

        # TODO: Now actually format the things
        buf = StringIO()
        buf.write(self.join_tables[0])
        for jtype, table, preds in zip(self.join_types, self.join_tables[1:],
                                       self.join_predicates):
            buf.write('\n')
            buf.write(util.indent('{0} {1}'.format(jtype, table), self.indent))

            if len(preds):
                buf.write('\n')
                fmt_preds = [self._translate(pred) for pred in preds]
                conj = ' AND\n{0}'.format(' ' * 3)
                fmt_preds = util.indent('ON ' + conj.join(fmt_preds),
                                        self.indent * 2)
                buf.write(fmt_preds)

        return buf.getvalue()
示例#60
0
def ceph_bootstrap(ctx, config):
    cluster_name = config['cluster']
    testdir = teuthology.get_testdir(ctx)
    fsid = ctx.ceph[cluster_name].fsid

    bootstrap_remote = ctx.ceph[cluster_name].bootstrap_remote
    first_mon = ctx.ceph[cluster_name].first_mon
    first_mon_role = ctx.ceph[cluster_name].first_mon_role
    mons = ctx.ceph[cluster_name].mons

    ctx.cluster.run(args=[
        'sudo',
        'mkdir',
        '-p',
        '/etc/ceph',
    ])
    ctx.cluster.run(args=[
        'sudo',
        'chmod',
        '777',
        '/etc/ceph',
    ])
    try:
        # write seed config
        log.info('Writing seed config...')
        conf_fp = BytesIO()
        seed_config = build_initial_config(ctx, config)
        seed_config.write(conf_fp)
        teuthology.write_file(remote=bootstrap_remote,
                              path='{}/seed.{}.conf'.format(
                                  testdir, cluster_name),
                              data=conf_fp.getvalue())
        log.debug('Final config:\n' + conf_fp.getvalue().decode())
        ctx.ceph[cluster_name].conf = seed_config

        # register initial daemons
        ctx.daemons.register_daemon(
            bootstrap_remote,
            'mon',
            first_mon,
            cluster=cluster_name,
            fsid=fsid,
            logger=log.getChild('mon.' + first_mon),
            wait=False,
            started=True,
        )
        if not ctx.ceph[cluster_name].roleless:
            first_mgr = ctx.ceph[cluster_name].first_mgr
            ctx.daemons.register_daemon(
                bootstrap_remote,
                'mgr',
                first_mgr,
                cluster=cluster_name,
                fsid=fsid,
                logger=log.getChild('mgr.' + first_mgr),
                wait=False,
                started=True,
            )

        # bootstrap
        log.info('Bootstrapping...')
        cmd = [
            'sudo',
            ctx.cephadm,
            '--image',
            ctx.ceph[cluster_name].image,
            '-v',
            'bootstrap',
            '--fsid',
            fsid,
            '--config',
            '{}/seed.{}.conf'.format(testdir, cluster_name),
            '--output-config',
            '/etc/ceph/{}.conf'.format(cluster_name),
            '--output-keyring',
            '/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
            '--output-pub-ssh-key',
            '{}/{}.pub'.format(testdir, cluster_name),
        ]
        if not ctx.ceph[cluster_name].roleless:
            cmd += [
                '--mon-id',
                first_mon,
                '--mgr-id',
                first_mgr,
                '--orphan-initial-daemons',  # we will do it explicitly!
                '--skip-monitoring-stack',  # we'll provision these explicitly
            ]
        if mons[first_mon_role].startswith('['):
            cmd += ['--mon-addrv', mons[first_mon_role]]
        else:
            cmd += ['--mon-ip', mons[first_mon_role]]
        if config.get('skip_dashboard'):
            cmd += ['--skip-dashboard']
        # bootstrap makes the keyring root 0600, so +r it for our purposes
        cmd += [
            run.Raw('&&'),
            'sudo',
            'chmod',
            '+r',
            '/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
        ]
        bootstrap_remote.run(args=cmd)

        # fetch keys and configs
        log.info('Fetching config...')
        ctx.ceph[cluster_name].config_file = teuthology.get_file(
            remote=bootstrap_remote,
            path='/etc/ceph/{}.conf'.format(cluster_name))
        log.info('Fetching client.admin keyring...')
        ctx.ceph[cluster_name].admin_keyring = teuthology.get_file(
            remote=bootstrap_remote,
            path='/etc/ceph/{}.client.admin.keyring'.format(cluster_name))
        log.info('Fetching mon keyring...')
        ctx.ceph[cluster_name].mon_keyring = teuthology.get_file(
            remote=bootstrap_remote,
            path='/var/lib/ceph/%s/mon.%s/keyring' % (fsid, first_mon),
            sudo=True)

        # fetch ssh key, distribute to additional nodes
        log.info('Fetching pub ssh key...')
        ssh_pub_key = teuthology.get_file(
            remote=bootstrap_remote,
            path='{}/{}.pub'.format(testdir,
                                    cluster_name)).decode('ascii').strip()

        log.info('Installing pub ssh key for root users...')
        ctx.cluster.run(args=[
            'sudo',
            'install',
            '-d',
            '-m',
            '0700',
            '/root/.ssh',
            run.Raw('&&'),
            'echo',
            ssh_pub_key,
            run.Raw('|'),
            'sudo',
            'tee',
            '-a',
            '/root/.ssh/authorized_keys',
            run.Raw('&&'),
            'sudo',
            'chmod',
            '0600',
            '/root/.ssh/authorized_keys',
        ])

        # set options
        _shell(ctx, cluster_name, bootstrap_remote, [
            'ceph', 'config', 'set', 'mgr', 'mgr/cephadm/allow_ptrace', 'true'
        ])

        # add other hosts
        for remote in ctx.cluster.remotes.keys():
            if remote == bootstrap_remote:
                continue
            log.info('Writing (initial) conf and keyring to %s' %
                     remote.shortname)
            teuthology.write_file(
                remote=remote,
                path='/etc/ceph/{}.conf'.format(cluster_name),
                data=ctx.ceph[cluster_name].config_file)
            teuthology.write_file(
                remote=remote,
                path='/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
                data=ctx.ceph[cluster_name].admin_keyring)

            log.info('Adding host %s to orchestrator...' % remote.shortname)
            _shell(ctx, cluster_name, remote,
                   ['ceph', 'orch', 'host', 'add', remote.shortname])
            r = _shell(ctx,
                       cluster_name,
                       remote, ['ceph', 'orch', 'host', 'ls', '--format=json'],
                       stdout=StringIO())
            hosts = [
                node['hostname'] for node in json.loads(r.stdout.getvalue())
            ]
            assert remote.shortname in hosts

        yield

    finally:
        log.info('Cleaning up testdir ceph.* files...')
        ctx.cluster.run(args=[
            'rm',
            '-f',
            '{}/seed.{}.conf'.format(testdir, cluster_name),
            '{}/{}.pub'.format(testdir, cluster_name),
        ])

        log.info('Stopping all daemons...')

        # this doesn't block until they are all stopped...
        #ctx.cluster.run(args=['sudo', 'systemctl', 'stop', 'ceph.target'])

        # so, stop them individually
        for role in ctx.daemons.resolve_role_list(None, CEPH_ROLE_TYPES):
            cluster, type_, id_ = teuthology.split_role(role)
            ctx.daemons.get_daemon(type_, id_, cluster).stop()

        # clean up /etc/ceph
        ctx.cluster.run(args=[
            'sudo',
            'rm',
            '-f',
            '/etc/ceph/{}.conf'.format(cluster_name),
            '/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
        ])