예제 #1
0
    def test_file_download_authorization_public(self):
        # type: () -> None
        subscribed_users = [self.example_email("hamlet"), self.example_email("iago")]
        unsubscribed_users = [self.example_email("othello"), self.example_email("prospero")]
        realm = get_realm("zulip")
        for email in subscribed_users:
            self.subscribe(get_user(email, realm), "test-subscribe")

        self.login(self.example_email("hamlet"))
        fp = StringIO("zulip!")
        fp.name = "zulip.txt"
        result = self.client_post("/json/user_uploads", {'file': fp})
        uri = result.json()['uri']
        fp_path_id = re.sub('/user_uploads/', '', uri)
        body = "First message ...[zulip.txt](http://localhost:9991/user_uploads/" + fp_path_id + ")"
        self.send_stream_message(self.example_email("hamlet"), "test-subscribe", body, "test")
        self.logout()

        # Now all users should be able to access the files
        for user in subscribed_users + unsubscribed_users:
            self.login(user)
            response = self.client_get(uri)
            data = b"".join(response.streaming_content)
            self.assertEqual(b"zulip!", data)
            self.logout()
예제 #2
0
def test_question_choices():

    # TODO: come up with a reusable fixture for testing here

    choices = {
        'a': '[a], b, cc',
        'b': 'a, [b], cc',
        'cc': 'a, b, [cc]'
    }

    for default_value in ['a', 'b']:
        choices_str = choices[default_value]
        for entered_value, expected_value in [(default_value, default_value),
                                              ('', default_value),
                                              ('cc', 'cc')]:
            with patch_getpass(return_value=entered_value), \
                patch_getpass(return_value=entered_value):
                out = StringIO()
                response = DialogUI(out=out).question("prompt", choices=sorted(choices), default=default_value)
                eq_(response, expected_value)
                # getpass doesn't use out -- goes straight to the terminal
                eq_(out.getvalue(), '')
                # TODO: may be test that the prompt was passed as a part of the getpass arg
                #eq_(out.getvalue(), 'prompt (choices: %s): ' % choices_str)

    # check some expected exceptions to be thrown
    out = StringIO()
    ui = DialogUI(out=out)
    assert_raises(ValueError, ui.question, "prompt", choices=['a'], default='b')
    eq_(out.getvalue(), '')

    with patch_getpass(return_value='incorrect'):
        assert_raises(RuntimeError, ui.question, "prompt", choices=['a', 'b'])
    assert_re_in(".*ERROR: .incorrect. is not among choices.*", out.getvalue())
    def test_list(self):

        def fake_network_get_all(context):
            return [db_fakes.FakeModel(self.net)]
        self.stubs.Set(db, 'network_get_all', fake_network_get_all)
        output = StringIO()
        sys.stdout = output
        self.commands.list()
        sys.stdout = sys.__stdout__
        result = output.getvalue()
        _fmt = "\t".join(["%(id)-5s", "%(cidr)-18s", "%(cidr_v6)-15s",
                          "%(dhcp_start)-15s", "%(dns1)-15s", "%(dns2)-15s",
                          "%(vlan)-15s", "%(project_id)-15s", "%(uuid)-15s"])
        head = _fmt % {'id': 'id',
                       'cidr': 'IPv4',
                       'cidr_v6': 'IPv6',
                       'dhcp_start': 'start address',
                       'dns1': 'DNS1',
                       'dns2': 'DNS2',
                       'vlan': 'VlanID',
                       'project_id': 'project',
                       'uuid': "uuid"}
        body = _fmt % {'id': self.net['id'],
                       'cidr': self.net['cidr'],
                       'cidr_v6': self.net['cidr_v6'],
                       'dhcp_start': self.net['dhcp_start'],
                       'dns1': self.net['dns1'],
                       'dns2': self.net['dns2'],
                       'vlan': self.net['vlan'],
                       'project_id': self.net['project_id'],
                       'uuid': self.net['uuid']}
        answer = '%s\n%s\n' % (head, body)
        self.assertEqual(result, answer)
예제 #4
0
    def test_rest_endpoint(self):
        # type: () -> None
        """
        Tests the /api/v1/user_uploads api endpoint. Here a single file is uploaded
        and downloaded using a username and api_key
        """
        fp = StringIO("zulip!")
        fp.name = "zulip.txt"

        # Upload file via API
        auth_headers = self.api_auth(self.example_email("hamlet"))
        result = self.client_post('/api/v1/user_uploads', {'file': fp}, **auth_headers)
        self.assertIn("uri", result.json())
        uri = result.json()['uri']
        base = '/user_uploads/'
        self.assertEqual(base, uri[:len(base)])

        # Download file via API
        self.logout()
        response = self.client_get(uri, **auth_headers)
        data = b"".join(response.streaming_content)
        self.assertEqual(b"zulip!", data)

        # Files uploaded through the API should be accesible via the web client
        self.login(self.example_email("hamlet"))
        self.assert_url_serves_contents_of_file(uri, b"zulip!")
예제 #5
0
def test_pydotprint_cond_highlight():
    """
    This is a REALLY PARTIAL TEST.

    I did them to help debug stuff.
    """

    # Skip test if pydot is not available.
    if not theano.printing.pydot_imported:
        raise SkipTest('pydot not available')

    x = tensor.dvector()
    f = theano.function([x], x * 2)
    f([1, 2, 3, 4])

    s = StringIO()
    new_handler = logging.StreamHandler(s)
    new_handler.setLevel(logging.DEBUG)
    orig_handler = theano.logging_default_handler

    theano.theano_logger.removeHandler(orig_handler)
    theano.theano_logger.addHandler(new_handler)
    try:
        theano.printing.pydotprint(f, cond_highlight=True,
                                   print_output_file=False)
    finally:
        theano.theano_logger.addHandler(orig_handler)
        theano.theano_logger.removeHandler(new_handler)

    assert (s.getvalue() == 'pydotprint: cond_highlight is set but there'
            ' is no IfElse node in the graph\n')
예제 #6
0
def user_report():
    strIO = None
    # Create or update view
    cursor = connection.cursor()
    sql = 'CREATE OR REPLACE VIEW accounts_emailuser_report_v AS \
            select md5(CAST((first_name,last_name,dob)AS text)) as hash,count(*) as occurence, first_name,last_name,\
            dob from accounts_emailuser group by first_name,last_name,dob;'
    cursor.execute(sql)

    users = EmailUserReport.objects.filter(occurence__gt=1)
    if users:
        strIO = StringIO()
        fieldnames = ['Occurence', 'First Name','Last Name','DOB']
        writer = csv.DictWriter(strIO, fieldnames=fieldnames)
        writer.writeheader()

        for u in users:
            info = {
                'Occurence': u.occurence,
                'First Name': u.first_name,
                'Last Name': u.last_name,
                'DOB': u.dob
            }
            writer.writerow(info)
        strIO.flush()
        strIO.seek(0)
    return strIO
예제 #7
0
        def do_check_on(value, nd, var=None):
            """
            Checks `value` for NaNs / Infs. If detected, raises an exception
            and / or prints information about `nd`, `f`, and `is_input` to
            help the user determine the cause of the invalid values.

            Parameters
            ----------
            value : numpy.ndarray
                The value to be checked.
            nd : theano.gof.Apply
                The Apply node being executed.
            var : theano.gof.Variable
                Not used if nd is there. Otherwise, used to print the stack
                trace for inputs of the graph.

            """
            error = False
            sio = StringIO()
            if nan_is_error:
                if contains_nan(value, nd, var):
                    print('NaN detected', file=sio)
                    error = True
            if inf_is_error:
                if contains_inf(value, nd, var):
                    print('Inf detected', file=sio)
                    error = True
            if big_is_error:
                err = False
                if not _is_numeric_value(value, var):
                    err = False
                elif pygpu_available and isinstance(value, GpuArray):
                    err = (f_gpua_absmax(value.reshape(value.size)) > 1e10)
                else:
                    err = (np.abs(value).max() > 1e10)
                if err:
                    print('Big value detected', file=sio)
                    error = True
            if error:
                if nd:
                    print("NanGuardMode found an error in the "
                          "output of a node in this variable:", file=sio)
                    print(theano.printing.debugprint(nd, file='str'), file=sio)
                else:
                    print("NanGuardMode found an error in an input of the "
                          "graph.", file=sio)
                # Add the stack trace
                if nd:
                    var = nd.outputs[0]
                print(theano.gof.utils.get_variable_trace_string(var),
                      file=sio)
                msg = sio.getvalue()
                if config.NanGuardMode.action == 'raise':
                    raise AssertionError(msg)
                elif config.NanGuardMode.action == 'pdb':
                    print(msg)
                    import pdb
                    pdb.set_trace()
                elif config.NanGuardMode.action == 'warn':
                    logger.error(msg)
예제 #8
0
    def test_file_upload_authed(self):
        # type: () -> None
        """
        A call to /json/upload_file should return a uri and actually create an object.
        """
        conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
        conn.create_bucket(settings.S3_AUTH_UPLOADS_BUCKET)

        self.login("*****@*****.**")
        fp = StringIO("zulip!")
        fp.name = "zulip.txt"

        result = self.client_post("/json/upload_file", {'file': fp})
        self.assert_json_success(result)
        json = ujson.loads(result.content)
        self.assertIn("uri", json)
        uri = json["uri"]
        base = '/user_uploads/'
        self.assertEquals(base, uri[:len(base)])

        response = self.client_get(uri)
        redirect_url = response['Location']

        self.assertEquals(b"zulip!", urllib.request.urlopen(redirect_url).read().strip())

        self.subscribe_to_stream("*****@*****.**", "Denmark")
        body = "First message ...[zulip.txt](http://localhost:9991" + uri + ")"
        self.send_message("*****@*****.**", "Denmark", Recipient.STREAM, body, "test")
        self.assertIn('title="zulip.txt"', self.get_last_message().rendered_content)
예제 #9
0
def main(loops, level):
    board, solution = LEVELS[level]
    order = DESCENDING
    strategy = Done.FIRST_STRATEGY
    stream = StringIO()

    board = board.strip()
    expected = solution.rstrip()

    range_it = xrange(loops)
    t0 = perf.perf_counter()

    for _ in range_it:
        stream = StringIO()
        solve_file(board, strategy, order, stream)
        output = stream.getvalue()
        stream = None

    dt = perf.perf_counter() - t0

    output = '\n'.join(line.rstrip() for line in output.splitlines())
    if output != expected:
        raise AssertionError("got a wrong answer:\n%s\nexpected: %s"
                             % (output, expected))

    return dt
예제 #10
0
    def test_file_upload_authed(self):
        # type: () -> None
        """
        A call to /json/upload_file should return a uri and actually create an
        entry in the database. This entry will be marked unclaimed till a message
        refers it.
        """
        self.login("*****@*****.**")
        fp = StringIO("zulip!")
        fp.name = "zulip.txt"

        result = self.client_post("/json/upload_file", {'file': fp})
        self.assert_json_success(result)
        json = ujson.loads(result.content)
        self.assertIn("uri", json)
        uri = json["uri"]
        base = '/user_uploads/'
        self.assertEquals(base, uri[:len(base)])

        # In the future, local file requests will follow the same style as S3
        # requests; they will be first authenthicated and redirected
        response = self.client_get(uri)
        data = b"".join(response.streaming_content)
        self.assertEquals(b"zulip!", data)

        # check if DB has attachment marked as unclaimed
        entry = Attachment.objects.get(file_name='zulip.txt')
        self.assertEquals(entry.is_claimed(), False)

        self.subscribe_to_stream("*****@*****.**", "Denmark")
        body = "First message ...[zulip.txt](http://localhost:9991" + uri + ")"
        self.send_message("*****@*****.**", "Denmark", Recipient.STREAM, body, "test")
        self.assertIn('title="zulip.txt"', self.get_last_message().rendered_content)
예제 #11
0
    def test_rest_endpoint(self):
        # type: () -> None
        """
        Tests the /api/v1/user_uploads api endpoint. Here a single file is uploaded
        and downloaded using a username and api_key
        """
        fp = StringIO("zulip!")
        fp.name = "zulip.txt"

        # Upload file via API
        auth_headers = self.api_auth('*****@*****.**')
        result = self.client_post('/api/v1/user_uploads', {'file': fp}, **auth_headers)
        json = ujson.loads(result.content)
        self.assertIn("uri", json)
        uri = json["uri"]
        base = '/user_uploads/'
        self.assertEquals(base, uri[:len(base)])

        # Download file via API
        self.client_post('/accounts/logout/')
        response = self.client_get(uri, **auth_headers)
        data = b"".join(response.streaming_content)
        self.assertEquals(b"zulip!", data)

        # Files uploaded through the API should be accesible via the web client
        self.login("*****@*****.**")
        response = self.client_get(uri)
        data = b"".join(response.streaming_content)
        self.assertEquals(b"zulip!", data)
예제 #12
0
def test_record_good():
    """
    Tests that when we record a sequence of events, then
    repeat it exactly, the Record class:
        1) Records it correctly
        2) Does not raise any errors
    """

    # Record a sequence of events
    output = StringIO()

    recorder = Record(file_object=output, replay=False)

    num_lines = 10

    for i in xrange(num_lines):
        recorder.handle_line(str(i) + '\n')

    # Make sure they were recorded correctly
    output_value = output.getvalue()

    assert output_value == ''.join(str(i) + '\n' for i in xrange(num_lines))

    # Make sure that the playback functionality doesn't raise any errors
    # when we repeat them
    output = StringIO(output_value)

    playback_checker = Record(file_object=output, replay=True)

    for i in xrange(num_lines):
        playback_checker.handle_line(str(i) + '\n')
예제 #13
0
def test_stderr_to_StringIO():
    s = StringIO()

    with stderr_to(s):
        sys.stderr.write(u("hello"))

    assert s.getvalue() == 'hello'
예제 #14
0
    def test_grid_01(self):
        nid = 1
        cp = 2
        cd = 0
        ps = ''
        seid = 0
        card_count = {'GRID': 1,}

        model = BDF()
        model.allocate(card_count)
        data1 = BDFCard(['GRID', nid, cp, 0., 0., 0., cd, ps, seid])

        nodes = model.grid
        nodes.add(data1)

        #print n1
        f = StringIO()
        nodes.write_bdf(f, size=8, write_header=False)
        nodes.write_bdf(f, size=16, write_header=False)
        nodes.write_bdf(f, size=16, is_double=True, write_header=False)

        # small field
        f = StringIO()
        nodes.write_bdf(f, size=8, write_header=False)
        msg = f.getvalue()
        card = 'GRID           1       2      0.      0.      0.\n'
        self.assertCardEqual(msg, card)

        # large field
        f = StringIO()
        nodes.write_bdf(f, size=16, write_header=False)
        card = ('GRID*                  1               2              0.              0.\n'
                '*                     0.\n')
        msg = f.getvalue()
        self.assertCardEqual(msg, card)
예제 #15
0
def test_record_bad():
    """
    Tests that when we record a sequence of events, then
    do something different on playback, the Record class catches it.
    """

    # Record a sequence of events
    output = StringIO()

    recorder = Record(file_object=output, replay=False)

    num_lines = 10

    for i in xrange(num_lines):
        recorder.handle_line(str(i) + '\n')

    # Make sure that the playback functionality doesn't raise any errors
    # when we repeat some of them
    output_value = output.getvalue()
    output = StringIO(output_value)

    playback_checker = Record(file_object=output, replay=True)

    for i in xrange(num_lines // 2):
        playback_checker.handle_line(str(i) + '\n')

    # Make sure it raises an error when we deviate from the recorded sequence
    try:
        playback_checker.handle_line('0\n')
    except MismatchError:
        return
    raise AssertionError("Failed to detect mismatch between recorded sequence "
                         " and repetition of it.")
예제 #16
0
def init_3d():
    """Initialise 3D plots within the IPython notebook, by injecting the
    required javascript libraries.
    """

    library_javascript = StringIO()

    library_javascript.write("""
    <script type="text/javascript">
    /* Beginning of javascript injected by OpenModes */
    var openmodes_javascript_injected = true;
    """)

    three_js_libraries = ("three.min.js", "OrbitControls.js",
                          "Lut.js", "Detector.js", "CanvasRenderer.js",
                          "Projector.js")

    # Include required parts of three.js inline
    for library in three_js_libraries:
        with open(osp.join(three_js_dir, library)) as infile:
            library_javascript.write(infile.read())

    # include my custom javascript inline
    with open(osp.join(static_dir, "three_js_plot.js")) as infile:
        library_javascript.write(infile.read())

    library_javascript.write(
                "/* End of javascript injected by OpenModes */\n</script>\n")

    display(HTML(library_javascript.getvalue()))
    logging.info("Javascript injected for 3D interactive WebGL plots")
예제 #17
0
파일: hooks.py 프로젝트: Web5design/Bento
def create_hook_module(target):
    safe_name = SAFE_MODULE_NAME.sub("_", target, len(target))
    module_name = "bento_hook_%s" % safe_name
    main_file = os.path.abspath(target)
    module = imp.new_module(module_name)
    module.__file__ = main_file
    code = open(main_file).read()

    sys.path.insert(0, os.path.dirname(main_file))
    try:
        exec(compile(code, main_file, 'exec'), module.__dict__)
        sys.modules[module_name] = module
    except Exception:
        sys.path.pop(0)
        e = extract_exception()
        tb = sys.exc_info()[2]
        s = StringIO()
        traceback.print_tb(tb, file=s)
        msg = """\
Could not import hook file %r: caught exception %r
Original traceback (most recent call last)
%s\
""" % (main_file, e, s.getvalue())
        raise InvalidHook(msg)

    module.root_path = main_file
    return module
예제 #18
0
    def render_and_save_variation(self, name, content, variation):
        """
        Renders the image variations and saves them to the storage
        """
        content.seek(0)

        img = Image.open(content)

        if self.is_smaller(img, variation):
            factor = 1
            while (img.size[0] / factor > 2 * variation['width'] and
                                   img.size[1] * 2 / factor > 2 * variation['height']):
                factor *= 2
            if factor > 1:
                img.thumbnail((int(img.size[0] / factor),
                               int(img.size[1] / factor)), resample=resample)

            if variation['crop']:
                img = ImageOps.fit(img, (variation['width'], variation['height']), method=resample)
            else:
                img.thumbnail((variation['width'], variation['height']), resample=resample)
        variation_name = self.get_variation_name(self.instance, self.field, variation)
        file_buffer = StringIO()
        format = self.get_file_extension(name).lower().replace('jpg', 'jpeg')
        img.save(file_buffer, format)
        self.storage.save(variation_name, ContentFile(file_buffer.getvalue()))
        file_buffer.close()
예제 #19
0
    def setUp(self):
        stream = StringIO("""
                - hosts: localhost
                  vars:
                    number: 1
                    string: Ansible
                    utf8_string: Cafè Eñyei
                    dictionary:
                      webster: daniel
                      oed: oxford
                    list:
                      - a
                      - b
                      - 1
                      - 2
                  tasks:
                    - name: Test case
                      ping:
                        data: "{{ utf8_string }}"

                    - name: Test 2
                      ping:
                        data: "Cafè Eñyei"

                    - name: Test 3
                      command: "printf 'Cafè Eñyei\\n'"
                """)
        self.play_filename = '/path/to/myplay.yml'
        stream.name = self.play_filename
        self.loader = AnsibleLoader(stream)
        self.data = self.loader.get_single_data()
예제 #20
0
파일: reports.py 프로젝트: wilsonc86/ledger
def outstanding_bookings():
    try:
        outstanding = []
        today = datetime.date.today()
        for b in Booking.objects.filter(is_canceled=False,departure__gte=today).exclude(booking_type__in=['1','3']):
            if not b.paid:
                outstanding.append(b)


        strIO = StringIO()
        fieldnames = ['Confirmation Number','Customer','Campground','Arrival','Departure','Outstanding Amount']
        writer = csv.writer(strIO)
        writer.writerow(fieldnames)
        for o in outstanding:
            fullname = '{} {}'.format(o.details.get('first_name'),o.details.get('last_name'))
            writer.writerow([o.confirmation_number,fullname,o.campground.name,o.arrival.strftime('%d/%m/%Y'),o.departure.strftime('%d/%m/%Y'),o.outstanding])
        strIO.flush()
        strIO.seek(0)
        _file = strIO

        dt = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
        recipients = []
        recipients = OutstandingBookingRecipient.objects.all()
        email = EmailMessage(
            'Unpaid Bookings Summary as at {}'.format(dt),
            'Unpaid Bookings as at {}'.format(dt),
            settings.DEFAULT_FROM_EMAIL,
            to=[r.email for r in recipients]if recipients else [settings.NOTIFICATION_EMAIL]
        )
        email.attach('OustandingBookings_{}.csv'.format(dt), _file.getvalue(), 'text/csv')
        email.send()
    except:
        raise
예제 #21
0
def init_3d():
    """Initialise 3D plots within the IPython notebook, by injecting the
    required javascript libraries.
    """

    library_javascript = StringIO()

    library_javascript.write("""

    <!-- Beginning of javascript injected by multinet.js -->
    <script type="text/javascript" src="multinet/static/js/jquery-2.1.4.js"></script>
    <script type="text/javascript" src="multinet/static/js/jquery-ui-1.11.4.js"></script>

    <script type="text/javascript" src="multinet/static/js/threejs/three-r71.js"></script>
    <script type="text/javascript" src="multinet/static/js/threejs/orbitcontrols.js"></script>
    <script type="text/javascript" src="multinet/static/js/threejs/stats-r12.min.js"></script>
    <script type="text/javascript" src="multinet/static/js/threejs/detector.js"></script>

    <script type="text/javascript" src="multinet/static/js/multinet-core.js"></script>
    <script type="text/javascript" src="multinet/static/js/multinet-ipython.js"></script>
    <script type="text/javascript">
        var multinet_javascript_injected = true;
    </script>
    <!-- End of javascript injected by multinet.js -->
    """)

    display(HTML(library_javascript.getvalue()))
 def test_contradictory_date_entries_warn(self):
     """4.8.5.3 Emit warning on contradictory date entries."""
     stream = StringIO(
         wrap_document_text(construct_document_from(**{
             "Author": {
                 "ForeName": "John",
                 "LastName": "Smith"
             },
             "DateCompleted": {
                 "Year": "2011",
                 "Month": "01",
                 "Day": "01"
             },
             "DateRevised": {
                 "Year": "2010",
                 "Month": "01",
                 "Day": "01"
             },
         }))
     )
     stderr = StringIO()
     self.patch(sys, "stderr", stderr)
     result = parsexml.parse_element_tree(
         parsexml.file_to_element_tree(stream)
     )
     stderr.seek(0)
     stderr_out = stderr.read()
     self.assertThat(result["pubDate"], Is(None))
     self.assertThat(result["reviseDate"], Is(None))
     self.assertThat(stderr_out,
                     Contains("is greater than"))
예제 #23
0
class UpdateAppsAndBackendsTest(TestCase):

    def setUp(self):
        self.output = StringIO()
        # BASE_APPS are needed for the managment commands to load successfully
        self.BASE_APPS = [
            'rapidsms',
            'django.contrib.auth',
            'django.contrib.contenttypes',
        ]

    def test_no_apps_then_none_added(self):
        with self.settings(INSTALLED_APPS=self.BASE_APPS):
            call_command('update_apps', stdout=self.output)
        self.assertEqual(self.output.getvalue(), '')

    def test_adds_app(self):
        # Add an app that has a RapidSMS app
        APPS = self.BASE_APPS + ['rapidsms.contrib.handlers']
        with self.settings(INSTALLED_APPS=APPS):
            call_command('update_apps', stdout=self.output)
        self.assertEqual(self.output.getvalue(), 'Added persistent app rapidsms.contrib.handlers\n')

    def test_no_backends_then_none_added(self):
        with self.settings(INSTALLED_BACKENDS={}):
            call_command('update_backends', stdout=self.output)
        self.assertEqual(self.output.getvalue(), '')

    def test_adds_backend(self):
        INSTALLED_BACKENDS = {
            "message_tester": {"ENGINE": "rapidsms.backends.database.DatabaseBackend"},
        }
        with self.settings(INSTALLED_BACKENDS=INSTALLED_BACKENDS):
            call_command('update_backends', stdout=self.output)
        self.assertEqual(self.output.getvalue(), 'Added persistent backend message_tester\n')
예제 #24
0
def _find_snippet_imports(module_data, module_path, strip_comments):
    """
    Given the source of the module, convert it to a Jinja2 template to insert
    module code and return whether it's a new or old style module.
    """

    module_style = "old"
    if REPLACER in module_data:
        module_style = "new"
    elif REPLACER_WINDOWS in module_data:
        module_style = "new"
    elif "from ansible.module_utils." in module_data:
        module_style = "new"
    elif "WANT_JSON" in module_data:
        module_style = "non_native_want_json"

    output = StringIO()
    lines = module_data.split("\n")
    snippet_names = []

    for line in lines:

        if REPLACER in line:
            output.write(_slurp(os.path.join(_SNIPPET_PATH, "basic.py")))
            snippet_names.append("basic")
        if REPLACER_WINDOWS in line:
            ps_data = _slurp(os.path.join(_SNIPPET_PATH, "powershell.ps1"))
            output.write(ps_data)
            snippet_names.append("powershell")
        elif line.startswith("from ansible.module_utils."):
            tokens = line.split(".")
            import_error = False
            if len(tokens) != 3:
                import_error = True
            if " import *" not in line:
                import_error = True
            if import_error:
                raise AnsibleError(
                    "error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'"
                    % module_path
                )
            snippet_name = tokens[2].split()[0]
            snippet_names.append(snippet_name)
            output.write(_slurp(os.path.join(_SNIPPET_PATH, snippet_name + ".py")))
        else:
            if strip_comments and line.startswith("#") or line == "":
                pass
            output.write(line)
            output.write("\n")

    if not module_path.endswith(".ps1"):
        # Unixy modules
        if len(snippet_names) > 0 and not "basic" in snippet_names:
            raise AnsibleError("missing required import in %s: from ansible.module_utils.basic import *" % module_path)
    else:
        # Windows modules
        if len(snippet_names) > 0 and not "powershell" in snippet_names:
            raise AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path)

    return (output.getvalue(), module_style)
예제 #25
0
파일: sdist.py 프로젝트: B-Rich/Bento
    def run(self, ctx):
        argv = ctx.command_argv
        p = ctx.options_context.parser
        o, a =  p.parse_args(argv)
        if o.help:
            p.print_help()
            return

        pkg = ctx.pkg
        format = o.format

        archive_root = "%s-%s" % (pkg.name, pkg.version)
        if not o.output_file:
            archive_name = archive_basename(pkg) + _FORMATS[format]["ext"]
        else:
            output = op.basename(o.output_file)
            if output != o.output_file:
                raise bento.errors.BentoError("Invalid output file: should not contain any directory")
            archive_name = output

        s = StringIO()
        write_pkg_info(ctx.pkg, s)
        n = ctx.build_node.make_node("PKG_INFO")
        n.parent.mkdir()
        n.write(s.getvalue())
        ctx.register_source_node(n, "PKG_INFO")

        # XXX: find a better way to pass archive name from other commands (used
        # by distcheck ATM)
        self.archive_root, self.archive_node = create_archive(archive_name, archive_root, ctx._node_pkg,
                ctx.top_node, ctx.run_node, o.format, o.output_dir)
예제 #26
0
    def render(self):
        """This is the tricky part, whith the rendered_content create a CSV"""


        if not self._is_rendered:

            # File pointer needed to create the CSV in memory
            buffer = StringIO()
            writer = UnicodeWriter(buffer)

            for row in self.rows:
                writer.writerow([six.text_type(value) for value
                                 in row])

            # Get the value of the StringIO buffer and write it to the response.
            csv = buffer.getvalue()
            buffer.close()
            self.write(csv)

            # Sets the appropriate CSV headers.
            self['Content-Disposition'] = 'attachment; filename=%s' % (
                self.filename, )

            # The CSV has been generated
            self._is_rendered = True

            for post_callback in self._post_render_callbacks:
                post_callback(self)
        return self
예제 #27
0
def log_mem_usage(signum, frame, fname=None):
    global _count
    _count += 1
    gc.collect()
    if not fname:
        fname = filename + '_memory_%02d.log' % _count
    with open(fname, 'wb') as f:
        f.write('gc.garbage: %d\n\n' % len(gc.garbage))
        objgraph.show_most_common_types(limit=50, file=f)
        f.write('\n\n')
        buf = StringIO()
        objgraph.show_growth(limit=50, file=buf)
        buf = buf.getvalue()
        f.write(buf)
    if _count < 2:
        return
    for tn, l in enumerate(buf.splitlines()[:10]):
        l = l.strip()
        if not l:
            continue
        type_ = l.split()[0]
        objects = objgraph.by_type(type_)
        objects = random.sample(objects, min(50, len(objects)))
        objgraph.show_chain(
            objgraph.find_backref_chain(
                objects[0],
                objgraph.is_proper_module),
            filename=fname[:-4] + '_type_%02d_backref.png' % tn
        )
        objgraph.show_backrefs(
            objects,
            max_depth=5,
            extra_info=lambda x: hex(id(x)),
            filename=fname[:-4] + '_type_%02d_backrefs.png' % tn,
        )
예제 #28
0
def test_main_help(monkeypatch):
    # Patch stdout
    fakestdout = StringIO()
    monkeypatch.setattr(sys, "stdout", fakestdout)

    pytest.raises(SystemExit, main.main, ['--help'])
    assert fakestdout.getvalue().lstrip().startswith("Usage: ")
예제 #29
0
    def render(self):
        """This is the tricky part, whith the rendered_content create a PDF"""

        # The following is required for PDF generation

        import xhtml2pdf.pisa as pisa # The import is changed to xhtml2pdf

        if not self._is_rendered:

            # File pointer needed to create the PDF in memory
            buffer = StringIO()

            # Create the PDF object, using the StringIO object as its "file."
            pisa.CreatePDF(self.rendered_content, buffer,
                           link_callback=fetch_resources)

            # Get the value of the StringIO buffer and write it to the response.
            pdf = buffer.getvalue()
            buffer.close()
            self.write(pdf)

            # Sets the appropriate PDF headers.
            self['Content-Disposition'] = 'attachment; filename=%s' % (
                self.filename, )

            # The PDF has been rendered
            self._is_rendered = True

            for post_callback in self._post_render_callbacks:
                post_callback(self)
        return self
예제 #30
0
        def show(self, lswitch=None):
            params = [lswitch] if lswitch else []
            stdout = StringIO()
            self.run("show", args=params, stdout=stdout)
            output = stdout.getvalue()

            return get_lswitch_info(output)
예제 #31
0
 def __repr__(self):
     return self.print_to(StringIO()).getvalue()
예제 #32
0
def exercise_non_crystallographic_conserving_bonds_and_angles():
    sites_cart, geo = geometry_restraints.manager \
      .construct_non_crystallographic_conserving_bonds_and_angles(
        sites_cart=flex.vec3_double([
          (10.949, 12.815, 15.189),
          (10.405, 13.954, 15.917),
          (10.779, 15.262, 15.227),
          ( 9.916, 16.090, 14.936)]),
        edge_list_bonds=[(0, 1), (1, 2), (2, 3)],
        edge_list_angles=[(0, 2), (1, 3)])
    assert approx_equal(sites_cart, [(6.033, 5.000, 5.253),
                                     (5.489, 6.139, 5.981),
                                     (5.863, 7.447, 5.291),
                                     (5.000, 8.275, 5.000)])
    assert approx_equal(geo.energies_sites(sites_cart=sites_cart).target, 0)
    sites_cart_noise = flex.vec3_double([  # Just to make all residuals unique,
        (6.043, 5.030, 5.233),  # so that the sorted bond list below
        (5.469, 6.119, 5.941),  # has the same order on all platforms.
        (5.893, 7.487, 5.281),
        (5.040, 8.225, 5.020)
    ])
    sio = StringIO()
    geo.show_sorted(sites_cart=sites_cart_noise, f=sio)
    expected_first_part = """\
Bond restraints: 5
Sorted by residual:
bond 2
     3
  ideal  model  delta    sigma   weight residual
  1.231  1.158  0.073 1.00e-01 1.00e+02 5.35e-01
bond 1
     2
  ideal  model  delta    sigma   weight residual
  1.525  1.577 -0.052 1.00e-01 1.00e+02 2.66e-01
bond 1
     3
  ideal  model  delta    sigma   weight residual
  2.401  2.338  0.063 1.41e-01 5.00e+01 1.96e-01
bond 0
     1
  ideal  model  delta    sigma   weight residual
  1.457  1.420  0.037 1.00e-01 1.00e+02 1.37e-01
bond 0
     2
  ideal  model  delta    sigma   weight residual
  2.453  2.462 -0.009 1.41e-01 5.00e+01 3.92e-03

"""
    assert not show_diff(
        sio.getvalue(), expected_first_part + """\
Nonbonded interactions: 0

""")
    #
    sites_cart, geo = geometry_restraints.manager \
      .construct_non_crystallographic_conserving_bonds_and_angles(
        sites_cart=flex.vec3_double([
          (10.949, 12.815, 15.189),
          (10.405, 13.954, 15.917),
          (10.779, 15.262, 15.227),
          ( 9.916, 16.090, 14.936),
          (10.749, 12.615, 15.389)]),
        edge_list_bonds=[(0, 1), (1, 2), (2, 3)],
        edge_list_angles=[(0, 2), (1, 3)])
    sites_cart_noise.append(sites_cart[-1])
    sio = StringIO()
    geo.show_sorted(sites_cart=sites_cart_noise, f=sio)
    assert not show_diff(
        sio.getvalue(), expected_first_part + """\
Nonbonded interactions: 2
Sorted by model distance:
nonbonded 0
          4
   model   vdw
   0.306 1.200
nonbonded 1
          4
   model   vdw
   1.274 1.200

""")
예제 #33
0
def exercise_with_zeolite(verbose):
    if (not libtbx.env.has_module("iotbx")):
        print("Skipping exercise_with_zeolite(): iotbx not available")
        return
    from iotbx.kriber import strudat
    atlas_file = libtbx.env.find_in_repositories(
        relative_path="phenix_regression/misc/strudat_zeolite_atlas",
        test=os.path.isfile)
    if (atlas_file is None):
        print("Skipping exercise_with_zeolite(): input file not available")
        return
    strudat_contents = strudat.read_all_entries(open(atlas_file))
    strudat_entry = strudat_contents.get("YUG")
    si_structure = strudat_entry.as_xray_structure()
    if (verbose):
        out = sys.stdout
    else:
        out = StringIO()
    drls = distance_and_repulsion_least_squares(
        si_structure=si_structure,
        distance_cutoff=3.5,
        nonbonded_repulsion_function_type="prolsq",
        n_macro_cycles=2,
        out=out)
    #
    nbp = drls.geometry_restraints_manager.pair_proxies().nonbonded_proxies
    assert nbp.n_total() > 50
    # expected is 60, but the exact number depends on the minimizer
    #
    site_labels = drls.minimized_structure.scatterers().extract_labels()
    sites_cart = drls.start_structure.sites_cart()
    pair_proxies = drls.geometry_restraints_manager.pair_proxies()
    out = StringIO()
    pair_proxies.bond_proxies.show_sorted(by_value="residual",
                                          sites_cart=sites_cart,
                                          site_labels=site_labels,
                                          f=out)
    if (verbose):
        sys.stdout.write(out.getvalue())
    assert len(out.getvalue().splitlines()) == 48 * 4 + 2
    assert out.getvalue().splitlines()[-1].find("remaining") < 0
    out = StringIO()
    pair_proxies.bond_proxies.show_sorted(by_value="residual",
                                          sites_cart=sites_cart,
                                          site_labels=site_labels,
                                          f=out,
                                          prefix="0^",
                                          max_items=28)
    if (verbose):
        sys.stdout.write(out.getvalue())
    assert not show_diff(out.getvalue().replace("e-00", "e-0"),
                         """\
0^Bond restraints: 48
0^Sorted by residual:
0^bond O3
0^     O4
0^  ideal  model  delta    sigma   weight residual
0^  2.629  2.120  0.509 1.56e+00 4.10e-01 1.06e-01
...
0^bond SI1
0^     SI1
0^  ideal  model  delta    sigma   weight residual sym.op.
0^  3.071  3.216 -0.145 2.08e+00 2.31e-01 4.83e-03 -x+1/2,-y+1/2,-z+1
0^... (remaining 20 not shown)
""",
                         selections=[range(6), range(-5, 0)])
    out = StringIO()
    pair_proxies.bond_proxies.show_sorted(by_value="delta",
                                          sites_cart=sites_cart,
                                          site_labels=site_labels,
                                          f=out,
                                          prefix="0^",
                                          max_items=28)
    if (verbose):
        sys.stdout.write(out.getvalue())
    assert not show_diff(out.getvalue().replace("e-00", "e-0"),
                         """\
0^Bond restraints: 48
0^Sorted by delta:
0^bond O3
0^     O4
0^  ideal  model  delta    sigma   weight residual
0^  2.629  2.120  0.509 1.56e+00 4.10e-01 1.06e-01
...
0^... (remaining 20 not shown)
""",
                         selections=[range(6), [-1]])
    site_labels_long = ["abc" + label + "def" for label in site_labels]
    out = StringIO()
    pair_proxies.bond_proxies.show_sorted(by_value="residual",
                                          sites_cart=sites_cart,
                                          site_labels=site_labels_long,
                                          f=out,
                                          prefix="^0",
                                          max_items=28)
    if (verbose):
        sys.stdout.write(out.getvalue())
    assert not show_diff(out.getvalue().replace("e-00", "e-0"),
                         """\
^0Bond restraints: 48
^0Sorted by residual:
^0bond abcO3def
^0     abcO4def
^0  ideal  model  delta    sigma   weight residual
^0  2.629  2.120  0.509 1.56e+00 4.10e-01 1.06e-01
...
^0bond abcSI1def
^0     abcSI1def
^0  ideal  model  delta    sigma   weight residual sym.op.
^0  3.071  3.216 -0.145 2.08e+00 2.31e-01 4.83e-03 -x+1/2,-y+1/2,-z+1
^0... (remaining 20 not shown)
""",
                         selections=[range(6), range(-5, 0)])
    out = StringIO()
    pair_proxies.bond_proxies.show_sorted(by_value="residual",
                                          sites_cart=sites_cart,
                                          f=out,
                                          prefix=".=",
                                          max_items=28)
    if (verbose):
        sys.stdout.write(out.getvalue())
    assert not show_diff(out.getvalue().replace("e-00", "e-0"),
                         """\
.=Bond restraints: 48
.=Sorted by residual:
.=bond 4
.=     5
.=  ideal  model  delta    sigma   weight residual
.=  2.629  2.120  0.509 1.56e+00 4.10e-01 1.06e-01
...
.=bond 0
.=     0
.=  ideal  model  delta    sigma   weight residual sym.op.
.=  3.071  3.216 -0.145 2.08e+00 2.31e-01 4.83e-03 -x+1/2,-y+1/2,-z+1
.=... (remaining 20 not shown)
""",
                         selections=[range(6), range(-5, 0)])
    out = StringIO()
    pair_proxies.bond_proxies.show_sorted(by_value="residual",
                                          sites_cart=sites_cart,
                                          f=out,
                                          prefix="-+",
                                          max_items=1)
    if (verbose):
        sys.stdout.write(out.getvalue())
    assert not show_diff(
        out.getvalue().replace("e-00", "e-0"), """\
-+Bond restraints: 48
-+Sorted by residual:
-+bond 4
-+     5
-+  ideal  model  delta    sigma   weight residual
-+  2.629  2.120  0.509 1.56e+00 4.10e-01 1.06e-01
-+... (remaining 47 not shown)
""")
    out = StringIO()
    pair_proxies.bond_proxies.show_sorted(by_value="residual",
                                          sites_cart=sites_cart,
                                          f=out,
                                          prefix="=+",
                                          max_items=0)
    if (verbose):
        sys.stdout.write(out.getvalue())
    assert not show_diff(
        out.getvalue(), """\
=+Bond restraints: 48
=+Sorted by residual:
=+... (remaining 48 not shown)
""")
    #
    sites_cart = si_structure.sites_cart()
    site_labels = [sc.label for sc in si_structure.scatterers()]
    asu_mappings = si_structure.asu_mappings(buffer_thickness=3.5)
    for min_cubicle_edge in [0, 5]:
        pair_generator = crystal.neighbors_fast_pair_generator(
            asu_mappings=asu_mappings,
            distance_cutoff=asu_mappings.buffer_thickness(),
            minimal=False,
            min_cubicle_edge=min_cubicle_edge)
        sorted_asu_proxies = geometry_restraints.nonbonded_sorted_asu_proxies(
            asu_mappings=asu_mappings)
        while (not pair_generator.at_end()):
            p = geometry_restraints.nonbonded_asu_proxy(
                pair=next(pair_generator), vdw_distance=3)
            sorted_asu_proxies.process(p)
        out = StringIO()
        sorted_asu_proxies.show_sorted(by_value="delta",
                                       sites_cart=sites_cart,
                                       site_labels=site_labels,
                                       f=out,
                                       prefix="d%")
        if (verbose):
            sys.stdout.write(out.getvalue())
        assert not show_diff(
            out.getvalue(),
            """\
d%Nonbonded interactions: 7
d%Sorted by model distance:
...
d%nonbonded SI2
d%          SI2
d%   model   vdw sym.op.
d%   3.092 3.000 -x+1,y,-z
...
d%nonbonded SI1
d%          SI1
d%   model   vdw sym.op.
d%   3.216 3.000 -x+1/2,-y+1/2,-z+1
""",
            selections=[range(2), range(10, 14),
                        range(26, 30)])
        out = StringIO()
        sorted_asu_proxies.show_sorted(by_value="delta",
                                       sites_cart=sites_cart,
                                       f=out,
                                       prefix="*j",
                                       max_items=5)
        if (verbose):
            sys.stdout.write(out.getvalue())
        assert not show_diff(out.getvalue(),
                             """\
*jNonbonded interactions: 7
*jSorted by model distance:
...
*jnonbonded 0
*j          1
*j   model   vdw
*j   3.107 3.000
*jnonbonded 0
*j          0
*j   model   vdw sym.op.
*j   3.130 3.000 -x+1,y,-z+1
*j... (remaining 2 not shown)
""",
                             selections=[range(2), range(-9, 0)])
        out = StringIO()
        sorted_asu_proxies.show_sorted(by_value="delta",
                                       sites_cart=sites_cart,
                                       f=out,
                                       prefix="@r",
                                       max_items=0)
        if (verbose):
            sys.stdout.write(out.getvalue())
        assert not show_diff(out.getvalue(), """\
@rNonbonded interactions: 7
""")
예제 #34
0
파일: JFS.py 프로젝트: ariselseng/jottalib
 def write(self, data):
     'Put, possibly replace, file contents with (new) data'
     if not hasattr(data, 'read'):
         data = StringIO(data)
     self.jfs.up(self.path, data)
예제 #35
0
    def test_cord2_rcs_03(self):
        """
        all points are located at <30,40,50>
        """
        model = BDF(debug=False)
        cards = [
            [
                'CORD2S*                2               0              0.              0.',
                '*                     0.              0.              0.              1.*       ',
                '*                     1.              0.              1.',
            ],
            [
                #'$ Coordinate System 30 : rectangular in spherical',
                'CORD2R*               30               2             14.             30.',
                '*                    70.    13.431863852   32.1458443949   75.2107442927*       ',
                '*          14.4583462334   33.4569982885   68.2297989286',
            ],
            [
                #'$ Coordinate System 31 : cylindrical in spherical',
                'CORD2C*               31               2              3.             42.',
                '*                  -173.   2.86526881213   45.5425615252   159.180363517*       ',
                '*          3.65222385965   29.2536614627  -178.631312271',
            ],
            [
                #'$ Coordinate System 32 : spherical in spherical',
                'CORD2S*               32               2             22.             14.',
                '*                    85.   22.1243073983   11.9537753718   77.9978191005*       ',
                '*          21.0997242967   13.1806120497   88.4824763008',
            ],
            [
                'GRID*                 30              30   40.7437952957  -23.6254877994',
                '*           -33.09784854               0',
            ],
            [
                'GRID*                 31              31   62.9378078196   15.9774797923',
                '*          31.0484428362               0',
            ],
            [
                'GRID*                 32              32   53.8270847449   95.8215692632',
                '*          159.097767463               0',
            ],
        ]
        for lines in cards:
            card = model._process_card(lines)
            model.add_card(card, card[0])
        unused_xyz_cid0b = model.get_xyz_in_coord_no_xref(cid=0,
                                                          fdtype='float64')
        unused_xyz_cid0c = model.get_xyz_in_coord_no_xref(cid=32,
                                                          fdtype='float64')
        bdf_file = StringIO()
        model.write_bdf(bdf_file, close=False)
        bdf_file.seek(0)

        #-------------------------------------------------
        model.cross_reference()

        model2 = BDFv(debug=False)
        model2.read_bdf(bdf_file,
                        punch=True,
                        xref=False,
                        save_file_structure=False)
        bdf_file.seek(0)
        #-------------------------------------------------

        xyz_cid0_actual = array([
            [30., 40., 50.],
            [30., 40., 50.],
            [30., 40., 50.],
        ],
                                dtype='float64')
        for nid in model.nodes:
            node = model.Node(nid)
            a = array([30., 40., 50.])
            b = node.get_position()
            self.assertTrue(
                allclose(array([30., 40., 50.]), node.get_position()),
                str(a - b))
        xyz_cid0 = model.get_xyz_in_coord(cid=0, fdtype='float64')
        assert np.allclose(xyz_cid0_actual,
                           xyz_cid0), '%s' % (xyz_cid0_actual - xyz_cid0)

        out = model.get_displacement_index_xyz_cp_cd()
        unused_icd_transform, icp_transform, xyz_cp, nid_cp_cd = out
        nids = nid_cp_cd[:, 0]
        xyz_cid0_xform = model.transform_xyzcp_to_xyz_cid(xyz_cp,
                                                          nids,
                                                          icp_transform,
                                                          cid=0)
        array_equal(xyz_cid0_actual, xyz_cid0_xform)
        assert array_equal(nids, array([30, 31, 32]))
        model2.nodes.nids = nid_cp_cd[:, 0]

        for cid in [30, 31, 32]:
            unused_xyz_cid_a = model.transform_xyzcp_to_xyz_cid(xyz_cp,
                                                                nids,
                                                                icp_transform,
                                                                cid=cid)
            unused_xyz_cid_b = model2.transform_xyzcp_to_xyz_cid(xyz_cp,
                                                                 nids,
                                                                 icp_transform,
                                                                 cid=cid,
                                                                 atol=None)
            #assert np.allclose(xyz_cid_a, xyz_cid_b), '%s' % np.isclose(xyz_cid_a, xyz_cid_b)

            #print(xyz_cid_a)
            #print(xyz_cid_b)
            #print(xyz_cid_a - xyz_cid_b)
            #print('-------------')
            #assert array_equal(xyz_cid_a, xyz_cid_b), 'error=%s'  % (
            #xyz_cid_a - xyz_cid_b)

        #---------------------------------------------
        xyz_cid0 = model.transform_xyzcp_to_xyz_cid(xyz_cp,
                                                    nids,
                                                    icp_transform,
                                                    cid=0,
                                                    atol=None)
        array_equal(xyz_cid0_actual, xyz_cid0)

        model.write_bdf(bdf_file, close=False)

        model3 = BDF(debug=False)
        origin = [14., 30., 70.]
        zaxis = [13.431863852, 32.1458443949, 75.2107442927]
        xzplane = [14.4583462334, 33.4569982885, 68.2297989286]
        cord2r = model3.add_cord2r(30,
                                   origin,
                                   zaxis,
                                   xzplane,
                                   rid=2,
                                   comment='')

        origin = [3., 42., -173.]
        zaxis = [2.86526881213, 45.5425615252, 159.180363517]
        xzplane = [3.65222385965, 29.2536614627, -178.631312271]
        cord2c = model3.add_cord2c(31,
                                   origin,
                                   zaxis,
                                   xzplane,
                                   rid=2,
                                   comment='')

        origin = [22., 14., 85.]
        zaxis = [22.1243073983, 11.9537753718, 77.9978191005]
        xzplane = [21.0997242967, 13.1806120497, 88.4824763008]
        cord2s = model3.add_cord2s(32,
                                   origin,
                                   zaxis,
                                   xzplane,
                                   rid=2,
                                   comment='')

        assert cord2r == model.coords[
            cord2r.cid], 'cord2r:\n%r\ncord2r[cid]:\n%r' % (
                str(cord2r), str(model.coords[cord2r.cid]))
        assert cord2c == model.coords[
            cord2c.cid], 'cord2c:\n%r\ncord2c[cid]:\n%r' % (
                str(cord2c), str(model.coords[cord2c.cid]))
        assert cord2s == model.coords[
            cord2s.cid], 'cord2s:\n%r\ncord2s[cid]:\n%r' % (
                str(cord2s), str(model.coords[cord2s.cid]))
예제 #36
0
 def __enter__(self):
   self._stdout = sys.stdout
   self._stderr = sys.stderr
   sys.stdout = self._io_stdout = StringIO()
   sys.stderr = self._io_stderr = StringIO()
   return self
def run(args, params=None, out=sys.stdout, log=sys.stderr):
  # params keyword is for running program from GUI dialog
  if ( ((len(args) == 0) and (params is None)) or
       ((len(args) > 0) and ((args[0] == "-h") or (args[0] == "--help"))) ):
    show_usage()
    return

  # parse command-line arguments
  if (params is None):
    pcl = iotbx.phil.process_command_line_with_files(
      args=args,
      master_phil_string=master_phil_str,
      pdb_file_def="file_name")
    work_params = pcl.work.extract()
  # or use parameters defined by GUI
  else:
    work_params = params
  pdb_files = work_params.file_name

  work_params.secondary_structure.enabled=True
  assert work_params.format in ["phenix", "phenix_refine", "phenix_bonds",
      "pymol", "refmac", "kinemage", "pdb", 'csv']
  if work_params.quiet :
    out = StringIO()

  pdb_combined = iotbx.pdb.combine_unique_pdb_files(file_names=pdb_files)
  pdb_structure = iotbx.pdb.input(source_info=None,
    lines=flex.std_string(pdb_combined.raw_records))
  cs = pdb_structure.crystal_symmetry()

  corrupted_cs = False
  if cs is not None:
    if [cs.unit_cell(), cs.space_group()].count(None) > 0:
      corrupted_cs = True
      cs = None
    elif cs.unit_cell().volume() < 10:
      corrupted_cs = True
      cs = None

  if cs is None:
    if corrupted_cs:
      print("Symmetry information is corrupted, ", file=out)
    else:
      print("Symmetry information was not found, ", file=out)
    print("putting molecule in P1 box.", file=out)
    from cctbx import uctbx
    atoms = pdb_structure.atoms()
    box = uctbx.non_crystallographic_unit_cell_with_the_sites_in_its_center(
      sites_cart=atoms.extract_xyz(),
      buffer_layer=3)
    atoms.set_xyz(new_xyz=box.sites_cart)
    cs = box.crystal_symmetry()

  defpars = mmtbx.model.manager.get_default_pdb_interpretation_params()
  defpars.pdb_interpretation.automatic_linking.link_carbohydrates=False
  defpars.pdb_interpretation.c_beta_restraints=False
  defpars.pdb_interpretation.clash_guard.nonbonded_distance_threshold=None
  model = mmtbx.model.manager(
      model_input=pdb_structure,
      crystal_symmetry=cs,
      pdb_interpretation_params=defpars,
      stop_for_unknowns=False)
  pdb_hierarchy = model.get_hierarchy()
  geometry = None
  if pdb_hierarchy.contains_nucleic_acid():
    geometry = model.get_restraints_manager().geometry
  if len(pdb_hierarchy.models()) != 1 :
    raise Sorry("Multiple models not supported.")
  ss_from_file = None
  if (hasattr(pdb_structure, "extract_secondary_structure") and
      not work_params.ignore_annotation_in_file):
    ss_from_file = pdb_structure.extract_secondary_structure()
  m = manager(pdb_hierarchy=pdb_hierarchy,
    geometry_restraints_manager=geometry,
    sec_str_from_pdb_file=ss_from_file,
    params=work_params.secondary_structure,
    verbose=work_params.verbose)

  # bp_p = nucleic_acids.get_basepair_plane_proxies(
  #     pdb_hierarchy,
  #     m.params.secondary_structure.nucleic_acid.base_pair,
  #     geometry)
  # st_p = nucleic_acids.get_stacking_proxies(
  #     pdb_hierarchy,
  #     m.params.secondary_structure.nucleic_acid.stacking_pair,
  #     geometry)
  # hb_b, hb_a = nucleic_acids.get_basepair_hbond_proxies(pdb_hierarchy,
  #     m.params.secondary_structure.nucleic_acid.base_pair)
  result_out = StringIO()
  # prefix_scope="refinement.pdb_interpretation"
  # prefix_scope=""
  prefix_scope=""
  if work_params.format == "phenix_refine":
    prefix_scope = "refinement.pdb_interpretation"
  elif work_params.format == "phenix":
    prefix_scope = "pdb_interpretation"
  ss_phil = None
  working_phil = m.as_phil_str(master_phil=sec_str_master_phil)
  phil_diff = sec_str_master_phil.fetch_diff(source=working_phil)

  if work_params.format in ["phenix", "phenix_refine"]:
    comment = "\n".join([
      "# These parameters are suitable for use in e.g. phenix.real_space_refine",
      "# or geometry_minimization. To use them in phenix.refine add ",
      "# 'refinement.' if front of pdb_interpretation."])
    if work_params.format == "phenix_refine":
      comment = "\n".join([
      "# These parameters are suitable for use in phenix.refine only.",
      "# To use them in other Phenix tools remove ",
      "# 'refinement.' if front of pdb_interpretation."])
    print(comment, file=result_out)
    if (prefix_scope != ""):
      print("%s {" % prefix_scope, file=result_out)
    if work_params.show_all_params :
      working_phil.show(prefix="  ", out=result_out)
    else :
      phil_diff.show(prefix="  ", out=result_out)
    if (prefix_scope != ""):
      print("}", file=result_out)
  elif work_params.format == "pdb":
    print(m.actual_sec_str.as_pdb_str(), file=result_out)
  elif work_params.format == "phenix_bonds" :
    raise Sorry("Not yet implemented.")
  elif work_params.format in ["pymol", "refmac", "kinemage", 'csv'] :
    m.show_summary(log=out)
    model.process_input_model(make_restraints=True)
    (hb_proxies, hb_angle_proxies, planarity_proxies,
        parallelity_proxies) = m.create_all_new_restraints(
        pdb_hierarchy=pdb_hierarchy,
        grm=model.get_restraints_manager().geometry,
        log=out)
    if hb_proxies.size() > 0:
      if work_params.format == "pymol" :
        file_load_add = "load %s" % work_params.file_name[0]
        # surprisingly, pymol handles filenames with whitespaces without quotes...
        print(file_load_add, file=result_out)
        bonds_in_format = hb_proxies.as_pymol_dashes(
            pdb_hierarchy=pdb_hierarchy)
      elif work_params.format == "kinemage" :
        bonds_in_format = hb_proxies.as_kinemage(
            pdb_hierarchy=pdb_hierarchy)
      elif work_params.format == "csv" :
        bonds_in_format = hb_proxies.as_csv(
            pdb_hierarchy=pdb_hierarchy)
      else :
        bonds_in_format = hb_proxies.as_refmac_restraints(
            pdb_hierarchy=pdb_hierarchy)
      print(bonds_in_format, file=result_out)
    if hb_angle_proxies.size() > 0:
      if work_params.format == "pymol":
        angles_in_format = hb_angle_proxies.as_pymol_dashes(
            pdb_hierarchy=pdb_hierarchy)
        print(angles_in_format, file=result_out)
  result = result_out.getvalue()
  out_prefix = os.path.basename(work_params.file_name[0])
  if work_params.output_prefix is not None:
    out_prefix = work_params.output_prefix
  filename = "%s_ss.eff" % out_prefix
  if work_params.format == "pymol":
    filename = "%s_ss.pml" % out_prefix
  outf = open(filename, "w")
  outf.write(result)
  outf.close()
  print(result, file=out)

  return os.path.abspath(filename)
예제 #38
0
 def test_no_historical(self):
     out = StringIO()
     with replace_registry({"test_place": Place}):
         management.call_command(self.command_name, auto=True, stdout=out)
     self.assertIn(clean_old_history.Command.NO_REGISTERED_MODELS,
                   out.getvalue())
def four_lines_dataframe():
  text = StringIO(FOUR_LINES)
  return pd.read_csv(text, names=automobile_data.COLUMN_TYPES.keys(),
                     dtype=automobile_data.COLUMN_TYPES, na_values="?")
예제 #40
0
파일: merge.py 프로젝트: JBlaschke/dials
def merge_and_truncate(params, experiments, reflections):
    """Filter data, assess space group, run french wilson and Wilson stats."""

    logger.info("\nMerging scaled reflection data\n")
    # first filter bad reflections using dials.util.filter methods
    reflections = filter_reflection_table(
        reflections[0],
        intensity_choice=["scale"],
        d_min=params.d_min,
        d_max=params.d_max,
        combine_partials=params.combine_partials,
        partiality_threshold=params.partiality_threshold,
    )
    # ^ scale factor has been applied, so now set to 1.0 - okay as not
    # going to output scale factor in merged mtz.
    reflections["inverse_scale_factor"] = flex.double(reflections.size(), 1.0)

    scaled_array = scaled_data_as_miller_array([reflections], experiments)
    # Note, merge_equivalents does not raise an error if data is unique.
    if params.anomalous:
        anomalous_scaled = scaled_array.as_anomalous_array()

    merged = scaled_array.merge_equivalents(
        use_internal_variance=params.merging.use_internal_variance).array()
    merged_anom = None
    if params.anomalous:
        merged_anom = anomalous_scaled.merge_equivalents(
            use_internal_variance=params.merging.use_internal_variance).array(
            )

    # Before merge, do some assessment of the space_group
    if params.assess_space_group:
        merged_reflections = flex.reflection_table()
        merged_reflections["intensity"] = merged.data()
        merged_reflections["variance"] = flex.pow2(merged.sigmas())
        merged_reflections["miller_index"] = merged.indices()
        logger.info("Running systematic absences check")
        run_systematic_absences_checks(experiments, merged_reflections)

    # Run the stats on truncating on anomalous or non anomalous?
    if params.anomalous:
        intensities = merged_anom
    else:
        intensities = merged

    assert intensities.is_xray_intensity_array()
    amplitudes = None
    anom_amplitudes = None
    if params.truncate:
        logger.info("\nScaling input intensities via French-Wilson Method")
        out = StringIO()
        if params.anomalous:
            anom_amplitudes = intensities.french_wilson(params=params, log=out)
            n_removed = intensities.size() - anom_amplitudes.size()
            assert anom_amplitudes.is_xray_amplitude_array()
            amplitudes = anom_amplitudes.as_non_anomalous_array()
            amplitudes = amplitudes.merge_equivalents().array()
        else:
            amplitudes = intensities.french_wilson(params=params, log=out)
            n_removed = intensities.size() - amplitudes.size()
        logger.info("Total number of rejected intensities %s", n_removed)
        logger.debug(out.getvalue())

    if params.reporting.wilson_stats:
        if not intensities.space_group().is_centric():
            try:
                wilson_scaling = data_statistics.wilson_scaling(
                    miller_array=intensities,
                    n_residues=params.n_residues)  # XXX default n_residues?
            except (IndexError, RuntimeError) as e:
                logger.error(
                    "\n"
                    "Error encountered during Wilson statistics calculation:\n"
                    "Perhaps there are too few unique reflections.\n"
                    "%s",
                    e,
                    exc_info=True,
                )
            else:
                # Divert output through logger - do with StringIO rather than
                # info_handle else get way too much whitespace in output.
                out = StringIO()
                wilson_scaling.show(out=out)
                logger.info(out.getvalue())

    # Apply wilson B to give absolute scale?

    # Show merging stats again.
    if params.reporting.merging_stats:
        try:
            stats, anom_stats = merging_stats_from_scaled_array(
                scaled_array,
                params.merging.n_bins,
                params.merging.use_internal_variance,
            )
        except DialsMergingStatisticsError as e:
            logger.error(e, exc_info=True)
        else:
            if params.merging.anomalous and anom_stats:
                logger.info(make_merging_statistics_summary(anom_stats))
            else:
                logger.info(make_merging_statistics_summary(stats))
            logger.info(table_1_summary(stats, anom_stats))

    return merged, merged_anom, amplitudes, anom_amplitudes
예제 #41
0
def _find_snippet_imports(module_data, module_path, strip_comments):
    """
    Given the source of the module, convert it to a Jinja2 template to insert
    module code and return whether it's a new or old style module.
    """

    module_style = 'old'
    if REPLACER in module_data:
        module_style = 'new'
    elif REPLACER_WINDOWS in module_data:
        module_style = 'new'
    elif 'from ansible.module_utils.' in module_data:
        module_style = 'new'
    elif 'WANT_JSON' in module_data:
        module_style = 'non_native_want_json'

    output = StringIO()
    lines = module_data.split('\n')
    snippet_names = []

    for line in lines:

        if REPLACER in line:
            output.write(_slurp(os.path.join(_SNIPPET_PATH, "basic.py")))
            snippet_names.append('basic')
        if REPLACER_WINDOWS in line:
            ps_data = _slurp(os.path.join(_SNIPPET_PATH, "powershell.ps1"))
            output.write(ps_data)
            snippet_names.append('powershell')
        elif line.startswith('from ansible.module_utils.'):
            tokens = line.split(".")
            import_error = False
            if len(tokens) != 3:
                import_error = True
            if " import *" not in line:
                import_error = True
            if import_error:
                raise AnsibleError(
                    "error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'"
                    % module_path)
            snippet_name = tokens[2].split()[0]
            snippet_names.append(snippet_name)
            output.write(
                _slurp(os.path.join(_SNIPPET_PATH, snippet_name + ".py")))
        else:
            if strip_comments and line.startswith("#") or line == '':
                pass
            output.write(line)
            output.write("\n")

    if not module_path.endswith(".ps1"):
        # Unixy modules
        if len(snippet_names) > 0 and not 'basic' in snippet_names:
            raise AnsibleError(
                "missing required import in %s: from ansible.module_utils.basic import *"
                % module_path)
    else:
        # Windows modules
        if len(snippet_names) > 0 and not 'powershell' in snippet_names:
            raise AnsibleError(
                "missing required import in %s: # POWERSHELL_COMMON" %
                module_path)

    return (output.getvalue(), module_style)
예제 #42
0
 def test_no_args(self):
     out = StringIO()
     management.call_command(self.command_name,
                             stdout=out,
                             stderr=StringIO())
     self.assertIn(clean_old_history.Command.COMMAND_HINT, out.getvalue())
예제 #43
0
def log():
    return StringIO("""
    Info: Benchmark time: 1 CPUs 1.13195 s/step 13.1013 days/ns 2727.91 MB memory
    """)
예제 #44
0
    def install(self):
        options = self.options

        if not os.path.exists(options['runzeo']):
            logger.warn(no_runzeo % options['runzeo'])

        run_directory = options['run-directory']
        deployment = self.deployment
        if deployment:
            zeo_conf_path = os.path.join(options['etc-directory'],
                                         self.name + '-zeo.conf')
            zdaemon_conf_path = os.path.join(options['etc-directory'],
                                             self.name + '-zdaemon.conf')
            event_log_path = os.path.join(options['log-directory'],
                                          self.name + '-zeo.log')
            socket_path = os.path.join(run_directory,
                                       self.name + '-zdaemon.sock')
            rc = options['deployment-name'] + '-' + self.name

            options.created(
                zeo_conf_path,
                zdaemon_conf_path,
                os.path.join(options['rc-directory'], rc),
            )

            logrotate = options['logrotate']
            if logrotate:
                open(logrotate, 'w').write(logrotate_template % dict(
                    logfile=event_log_path,
                    rc=os.path.join(options['rc-directory'], rc),
                    conf=zdaemon_conf_path,
                ))
                options.created(logrotate)

            pack = options.get('pack')
            if pack:
                pack = pack.split()
                if len(pack) < 5:
                    raise zc.buildout.UserError(
                        'Too few crontab fields in pack specification')
                if len(pack) > 7:
                    raise zc.buildout.UserError(
                        'Too many values in pack option')
                pack_path = os.path.join(
                    options['crontab-directory'],
                    "pack-%s-%s" % (options['deployment-name'], self.name),
                )
                if not os.path.exists(options['zeopack']):
                    logger.warn("Couln'e find zeopack script, %r",
                                options['zeopack'])
        else:
            zeo_conf_path = os.path.join(run_directory, 'zeo.conf')
            zdaemon_conf_path = os.path.join(run_directory, 'zdaemon.conf')
            event_log_path = os.path.join(run_directory, 'zeo.log')
            socket_path = os.path.join(run_directory, 'zdaemon.sock')
            rc = self.name
            options.created(
                run_directory,
                os.path.join(options['rc-directory'], rc),
            )
            if not os.path.exists(run_directory):
                os.mkdir(run_directory)
            pack = pack_path = None

        zeo_conf = options.get('zeo.conf', '') + '\n'
        try:
            zeo_conf = ZConfig.schemaless.loadConfigFile(StringIO(zeo_conf))
        except ConfigurationSyntaxError as e:
            raise zc.buildout.UserError('%s in:\n%s' % (e, zeo_conf))

        zeo_section = [s for s in zeo_conf.sections if s.type == 'zeo']
        if not zeo_section:
            raise zc.buildout.UserError('No zeo section was defined.')
        if len(zeo_section) > 1:
            raise zc.buildout.UserError('Too many zeo sections.')
        zeo_section = zeo_section[0]
        if not 'address' in zeo_section:
            raise zc.buildout.UserError('No ZEO address was specified.')

        storages = [
            s.name or '1' for s in zeo_conf.sections
            if s.type not in ('zeo', 'eventlog', 'runner')
        ]

        if not storages:
            raise zc.buildout.UserError('No storages were defined.')

        if not [s for s in zeo_conf.sections if s.type == 'eventlog']:
            zeo_conf.sections.append(event_log('STDOUT'))

        zdaemon_conf = options.get('zdaemon.conf', '') + '\n'
        zdaemon_conf = ZConfig.schemaless.loadConfigFile(
            StringIO(zdaemon_conf))

        defaults = {
            'program': "%s -C %s" % (options['runzeo'], zeo_conf_path),
            'daemon': 'on',
            'transcript': event_log_path,
            'socket-name': socket_path,
            'directory': run_directory,
        }
        if deployment:
            defaults['user'] = options['user']
        runner = [s for s in zdaemon_conf.sections if s.type == 'runner']
        if runner:
            runner = runner[0]
        else:
            runner = ZConfig.schemaless.Section('runner')
            zdaemon_conf.sections.insert(0, runner)
        for name, value in defaults.items():
            if name not in runner:
                runner[name] = [value]

        if not [s for s in zdaemon_conf.sections if s.type == 'eventlog']:
            zdaemon_conf.sections.append(event_log(event_log_path))

        zdaemon_conf = str(zdaemon_conf)

        self.egg.install()
        requirements, ws = self.egg.working_set()

        open(zeo_conf_path, 'w').write(str(zeo_conf))
        open(zdaemon_conf_path, 'w').write(str(zdaemon_conf))

        if options.get('shell-script') == 'true':
            if not os.path.exists(options['zdaemon']):
                logger.warn(no_zdaemon % options['zdaemon'])

            contents = "%(zdaemon)s -C '%(conf)s' $*" % dict(
                zdaemon=options['zdaemon'],
                conf=zdaemon_conf_path,
            )
            if options.get('user'):
                contents = 'su %(user)s -c \\\n  "%(contents)s"' % dict(
                    user=options['user'],
                    contents=contents,
                )
            contents = "#!/bin/sh\n%s\n" % contents

            dest = os.path.join(options['rc-directory'], rc)
            if not (os.path.exists(dest) and open(dest).read() == contents):
                open(dest, 'w').write(contents)
                os.chmod(dest, 0o755)
                logger.info("Generated shell script %r.", dest)

        else:
            self.egg.install()
            requirements, ws = self.egg.working_set()
            zc.buildout.easy_install.scripts(
                [(rc, 'zdaemon.zdctl', 'main')],
                ws,
                options['executable'],
                options['rc-directory'],
                arguments=('['
                           '\n        %r, %r,'
                           '\n        ]+sys.argv[1:]'
                           '\n        ' % (
                               '-C',
                               zdaemon_conf_path,
                           )),
            )

        if pack:
            address, = zeo_section['address']
            if ':' in address:
                host, port = address.split(':')
                address = '-h %s -p %s' % (host, port)
            else:
                try:
                    port = int(address)
                except:
                    address = '-U ' + address
                else:
                    address = '-p ' + address
            f = open(pack_path, 'w')
            if len(pack) == 7:
                assert '@' in pack[6]
                f.write("MAILTO=%s\n" % pack.pop())

            if len(pack) == 6:
                days = pack.pop()
            else:
                days = 1

            for storage in storages:
                f.write("%s %s %s %s -S %s -d %s\n" % (
                    ' '.join(pack),
                    options['user'],
                    options['zeopack'],
                    address,
                    storage,
                    days,
                ))
            f.close()
            options.created(pack_path)

        return options.created()
예제 #45
0
def exercise_hbond_as_cif_loop():
    xs = sucrose()
    for sc in xs.scatterers():
        sc.flags.set_grad_site(True)
    radii = [
        covalent_radii.table(elt).radius()
        for elt in xs.scattering_type_registry().type_index_pairs_as_dict()
    ]
    asu_mappings = xs.asu_mappings(buffer_thickness=2 * max(radii) + 0.5)
    pair_asu_table = crystal.pair_asu_table(asu_mappings)
    pair_asu_table.add_covalent_pairs(xs.scattering_types(), tolerance=0.5)
    hbonds = [
        geometry.hbond(1, 5, sgtbx.rt_mx('-X,0.5+Y,2-Z')),
        geometry.hbond(5, 14, sgtbx.rt_mx('-X,-0.5+Y,1-Z')),
        geometry.hbond(7, 10, sgtbx.rt_mx('1+X,+Y,+Z')),
        geometry.hbond(10, 0),
        geometry.hbond(12, 14, sgtbx.rt_mx('-1-X,0.5+Y,1-Z')),
        geometry.hbond(14, 12, sgtbx.rt_mx('-1-X,-0.5+Y,1-Z')),
        geometry.hbond(16, 7)
    ]
    loop = geometry.hbonds_as_cif_loop(hbonds,
                                       pair_asu_table,
                                       xs.scatterers().extract_labels(),
                                       sites_frac=xs.sites_frac()).loop
    s = StringIO()
    print(loop, file=s)
    assert not show_diff(
        s.getvalue(), """\
loop_
  _geom_hbond_atom_site_label_D
  _geom_hbond_atom_site_label_H
  _geom_hbond_atom_site_label_A
  _geom_hbond_distance_DH
  _geom_hbond_distance_HA
  _geom_hbond_distance_DA
  _geom_hbond_angle_DHA
  _geom_hbond_site_symmetry_A
  O2   H2   O4  0.8200  2.0636  2.8635  165.0  2_557
  O4   H4   O9  0.8200  2.0559  2.8736  174.9  2_546
  O5   H5   O7  0.8200  2.0496  2.8589  169.0  1_655
  O7   H7   O1  0.8200  2.0573  2.8617  166.8  .
  O8   H8   O9  0.8200  2.1407  2.8943  152.8  2_456
  O9   H9   O8  0.8200  2.1031  2.8943  162.1  2_446
  O10  H10  O5  0.8200  2.0167  2.7979  159.1  .

""")
    # with a covariance matrix
    flex.set_random_seed(1)
    vcv_matrix = matrix.diag(
      flex.random_double(size=xs.n_parameters(), factor=1e-5))\
               .as_flex_double_matrix().matrix_symmetric_as_packed_u()
    loop = geometry.hbonds_as_cif_loop(hbonds,
                                       pair_asu_table,
                                       xs.scatterers().extract_labels(),
                                       sites_frac=xs.sites_frac(),
                                       covariance_matrix=vcv_matrix,
                                       parameter_map=xs.parameter_map()).loop
    s = StringIO()
    print(loop, file=s)
    assert not show_diff(
        s.getvalue(), """\
loop_
  _geom_hbond_atom_site_label_D
  _geom_hbond_atom_site_label_H
  _geom_hbond_atom_site_label_A
  _geom_hbond_distance_DH
  _geom_hbond_distance_HA
  _geom_hbond_distance_DA
  _geom_hbond_angle_DHA
  _geom_hbond_site_symmetry_A
  O2   H2   O4  0.82(3)  2.06(3)    2.86(3)  165.0(18)  2_557
  O4   H4   O9  0.82(4)  2.06(4)    2.87(4)     175(2)  2_546
  O5   H5   O7  0.82(2)  2.05(2)  2.859(19)  169.0(18)  1_655
  O7   H7   O1  0.82(2)  2.06(2)    2.86(2)     167(2)  .
  O8   H8   O9  0.82(3)  2.14(3)    2.89(3)     153(3)  2_456
  O9   H9   O8  0.82(3)  2.10(3)    2.89(3)     162(2)  2_446
  O10  H10  O5  0.82(3)  2.02(3)    2.80(3)     159(3)  .

""")
    cell_vcv = flex.pow2(matrix.diag(flex.random_double(size=6,factor=1e-1))\
                         .as_flex_double_matrix().matrix_symmetric_as_packed_u())
    loop = geometry.hbonds_as_cif_loop(hbonds,
                                       pair_asu_table,
                                       xs.scatterers().extract_labels(),
                                       sites_frac=xs.sites_frac(),
                                       covariance_matrix=vcv_matrix,
                                       cell_covariance_matrix=cell_vcv,
                                       parameter_map=xs.parameter_map()).loop
    s = StringIO()
    print(loop, file=s)
    assert not show_diff(
        s.getvalue(), """\
loop_
  _geom_hbond_atom_site_label_D
  _geom_hbond_atom_site_label_H
  _geom_hbond_atom_site_label_A
  _geom_hbond_distance_DH
  _geom_hbond_distance_HA
  _geom_hbond_distance_DA
  _geom_hbond_angle_DHA
  _geom_hbond_site_symmetry_A
  O2   H2   O4  0.82(3)  2.06(4)  2.86(4)  165.0(18)  2_557
  O4   H4   O9  0.82(4)  2.06(4)  2.87(4)     175(2)  2_546
  O5   H5   O7  0.82(2)  2.05(2)  2.86(2)  169.0(18)  1_655
  O7   H7   O1  0.82(2)  2.06(3)  2.86(3)     167(2)  .
  O8   H8   O9  0.82(3)  2.14(4)  2.89(4)     153(3)  2_456
  O9   H9   O8  0.82(3)  2.10(3)  2.89(4)     162(2)  2_446
  O10  H10  O5  0.82(3)  2.02(3)  2.80(3)     159(3)  .

""")
예제 #46
0
def empty_log():
    return StringIO("""
    not the log you are looking for
    """)
예제 #47
0
    def test_rload(self):
        """tests DLOAD, RLOAD1, RLOAD2, TABLED2 cards"""
        model = BDF(debug=False)
        #model.case_control_deck = CaseControlDeck(['DLOAD=2', 'BEGIN BULK'])
        sid = 2
        excite_id = 20
        delay = 0
        tid = 42
        rload1 = model.add_rload1(sid,
                                  excite_id,
                                  delay=0,
                                  dphase=0,
                                  tc=0,
                                  td=0,
                                  Type='LOAD',
                                  comment='rload1')
        rload1 = model.add_rload1(sid,
                                  excite_id,
                                  delay=1.,
                                  dphase=0,
                                  tc=0,
                                  td=0,
                                  Type='DISP',
                                  comment='rload1')
        rload1 = model.add_rload1(sid,
                                  excite_id,
                                  delay=2,
                                  dphase=0,
                                  tc=0,
                                  td=0,
                                  Type='VELO',
                                  comment='rload1')
        rload1 = model.add_rload1(sid,
                                  excite_id,
                                  delay=0,
                                  dphase=0,
                                  tc=0,
                                  td=0,
                                  Type='ACC',
                                  comment='rload1')

        sid = 3
        excite_id = 30
        rload2 = model.add_rload2(sid,
                                  excite_id,
                                  delay=0,
                                  dphase=0,
                                  tb=0,
                                  tp=0,
                                  Type='LOAD',
                                  comment='rload2')
        rload2 = model.add_rload2(sid,
                                  excite_id,
                                  delay=1.,
                                  dphase=0,
                                  tb=0,
                                  tp=0,
                                  Type='D',
                                  comment='rload2')
        rload2 = model.add_rload2(sid,
                                  excite_id,
                                  delay=2,
                                  dphase=0,
                                  tb=0,
                                  tp=0,
                                  Type='V',
                                  comment='rload2')
        rload2 = model.add_rload2(sid,
                                  excite_id,
                                  delay=0,
                                  dphase=0,
                                  tb=0,
                                  tp=0,
                                  Type='A',
                                  comment='rload2')

        excite_id = 20
        nid = 21
        c = 1
        scale = 1.0
        model.add_darea(excite_id, nid, c, scale, comment='darea')
        model.add_grid(nid)

        excite_id = 30
        model.add_darea(excite_id, nid, c, scale, comment='darea')

        delay_id = 2
        nodes = 100
        components = 2
        delays = 1.5
        delay = model.add_delay(delay_id, nodes, components, delays)

        sid = 1
        scale = 1.0
        scale_factors = 1.
        load_ids = 2
        dload = model.add_dload(sid,
                                scale,
                                scale_factors,
                                load_ids,
                                comment='dload')

        x1 = 0.1
        x = np.linspace(0., 1.)
        y = np.sin(x)
        tabled2 = model.add_tabled2(tid, x1, x, y, comment='tabled2')

        model.pop_parse_errors()

        delay.validate()
        delay.raw_fields()
        delay.write_card()
        delay.write_card(size=16)

        rload1.validate()
        rload1.raw_fields()
        rload1.write_card()
        rload1.write_card(size=16)

        rload2.validate()
        rload2.raw_fields()
        rload2.write_card()
        rload2.write_card(size=16)

        dload.validate()
        dload.raw_fields()
        dload.write_card()
        dload.write_card(size=16)

        tabled2.validate()
        tabled2.raw_fields()
        tabled2.write_card()
        tabled2.write_card(size=16)

        model.validate()
        model.cross_reference()
        model.pop_xref_errors()
        #print(model.dareas)

        bdf_file = StringIO()
        model.write_bdf(bdf_file, close=False)
        out = bdf_file.getvalue()
        bdf_file.seek(0)
        outs = model.get_bdf_stats(return_type='list')
        outs = model.get_bdf_stats(return_type='string')

        freq = 0.5
        out1 = rload1.get_load_at_freq(freq, scale=1.)
        #out2 = rload2.get_load_at_time(freq, scale=1.)
        #print(out1)
        #print(out2)
        assert len(out1) == 1, out1
        #assert len(out2) == 1, out2

        freq = [0.5, 0.9]
        out1 = rload1.get_load_at_freq(freq, scale=1.)
        #out2 = rload2.get_load_at_freq(freq, scale=1.)
        #print(out1)
        #print(out2)
        assert len(out1) == 2, out1
        #assert len(out2) == 2, out2

        model2 = read_bdf(bdf_file, punch=True, debug=False)
        model2.uncross_reference()
        model2.safe_cross_reference()
        model2.uncross_reference()
예제 #48
0
def _getInputFromUser(param):
    """
    this private func reads the data from the user
    for the given param
    """
    loop = True
    userInput = None

    try:
        if param.USE_DEFAULT:
            logging.debug("setting default value (%s) for key (%s)" %
                          (mask(param.DEFAULT_VALUE), param.CONF_NAME))
            controller.CONF[param.CONF_NAME] = param.DEFAULT_VALUE
        else:
            while loop:
                # If the value was not supplied by the command line flags
                if param.CONF_NAME not in commandLineValues:
                    message = StringIO()
                    message.write(param.PROMPT)

                    val_list = param.VALIDATORS or []
                    if (validators.validate_regexp not in val_list
                            and param.OPTION_LIST):
                        message.write(" [%s]" % "|".join(param.OPTION_LIST))

                    if param.DEFAULT_VALUE:
                        message.write("  [%s] " % (str(param.DEFAULT_VALUE)))

                    message.write(": ")
                    message.seek(0)
                    # mask password or hidden fields

                    if (param.MASK_INPUT):
                        userInput = getpass.getpass("%s :" % (param.PROMPT))
                    else:
                        userInput = raw_input(message.read())
                else:
                    userInput = commandLineValues[param.CONF_NAME]
                # If DEFAULT_VALUE is set and user did not input anything
                if userInput == "" and len(str(param.DEFAULT_VALUE)) > 0:
                    userInput = param.DEFAULT_VALUE

                # Param processing
                userInput = process_param_value(param, userInput)

                # If param requires validation
                try:
                    validate_param_value(param, userInput)
                    controller.CONF[param.CONF_NAME] = userInput
                    loop = False
                except ParamValidationError:
                    if param.LOOSE_VALIDATION:
                        # If validation failed but LOOSE_VALIDATION is true, ask user
                        answer = _askYesNo("User input failed validation, "
                                           "do you still wish to use it")
                        loop = not answer
                        if answer:
                            controller.CONF[param.CONF_NAME] = userInput
                            continue
                        else:
                            if param.CONF_NAME in commandLineValues:
                                del commandLineValues[param.CONF_NAME]
                    else:
                        # Delete value from commandLineValues so that we will prompt the user for input
                        if param.CONF_NAME in commandLineValues:
                            del commandLineValues[param.CONF_NAME]
                        loop = True
    except KeyboardInterrupt:
        # add the new line so messages wont be displayed in the same line as the question
        print("")
        raise
    except:
        logging.error(traceback.format_exc())
        raise Exception(output_messages.ERR_EXP_READ_INPUT_PARAM %
                        (param.CONF_NAME))
예제 #49
0
def setup(process_name, user_log_dict):
    """Set up the weewx logging facility"""

    # Create a ConfigObj from the default string. No interpolation (it interferes with the
    # interpolation directives embedded in the string).
    log_config = configobj.ConfigObj(StringIO(LOGGING_STR), interpolation=False, encoding='utf-8')

    # Turn off interpolation in the incoming dictionary. First save the old
    # value, then restore later. However, the incoming dictionary may be a simple
    # Python dictionary and not have interpolation. Hence the try block.
    try:
        old_interpolation = user_log_dict.interpolation
        user_log_dict.interpolation = False
    except AttributeError:
        old_interpolation = None

    # Merge in the user additions / changes:
    log_config.merge(user_log_dict)

    # Restore the old interpolation value
    if old_interpolation is not None:
        user_log_dict.interpolation = old_interpolation

    # Adjust the logging level in accordance with whether or not the 'debug' flag is on
    log_level = 'DEBUG' if weewx.debug else 'INFO'

    # Now we need to walk the structure, plugging in the values we know.
    # First, we need a function to do this:
    def _fix(section, key):
        if isinstance(section[key], (list, tuple)):
            # The value is a list or tuple
            section[key] = [item.format(log_level=log_level,
                                        address=address,
                                        facility=facility,
                                        process_name=process_name) for item in section[key]]
        else:
            # The value is a string
            section[key] = section[key].format(log_level=log_level,
                                               address=address,
                                               facility=facility,
                                               process_name=process_name)

    # Using the function, walk the 'Logging' part of the structure
    log_config['Logging'].walk(_fix)

    # Extract just the part used by Python's logging facility
    log_dict = log_config.dict().get('Logging', {})

    # The root logger is denoted by an empty string by the logging facility. Unfortunately,
    # ConfigObj does not accept an empty string as a key. So, instead, we use this hack:
    try:
        log_dict['loggers'][''] = log_dict['loggers']['root']
        del log_dict['loggers']['root']
    except KeyError:
        pass

    # Make sure values are of the right type
    if 'version' in log_dict:
        log_dict['version'] = to_int(log_dict['version'])
    if 'disable_existing_loggers' in log_dict:
        log_dict['disable_existing_loggers'] = to_bool(log_dict['disable_existing_loggers'])
    if 'loggers' in log_dict:
        for logger in log_dict['loggers']:
            if 'propagate' in log_dict['loggers'][logger]:
                log_dict['loggers'][logger]['propagate'] = to_bool(log_dict['loggers'][logger]['propagate'])

    # Finally! The dictionary is ready. Set the defaults.
    logging.config.dictConfig(log_dict)
예제 #50
0
def exercise_cif_from_cctbx():
    quartz = xray.structure(crystal_symmetry=crystal.symmetry(
        (5.01, 5.01, 5.47, 90, 90, 120), "P6222"),
                            scatterers=flex.xray_scatterer([
                                xray.scatterer("Si", (1 / 2., 1 / 2., 1 / 3.)),
                                xray.scatterer("O", (0.197, -0.197, 0.83333))
                            ]))
    for sc in quartz.scatterers():
        sc.flags.set_grad_site(True)
    s = StringIO()
    loop = geometry.distances_as_cif_loop(
        quartz.pair_asu_table(distance_cutoff=2),
        site_labels=quartz.scatterers().extract_labels(),
        sites_frac=quartz.sites_frac()).loop
    print(loop, file=s)
    assert not show_diff(
        s.getvalue(), """\
loop_
  _geom_bond_atom_site_label_1
  _geom_bond_atom_site_label_2
  _geom_bond_distance
  _geom_bond_site_symmetry_2
  Si  O  1.6160  4_554
  Si  O  1.6160  2_554
  Si  O  1.6160  3_664
  Si  O  1.6160  5_664

""")
    s = StringIO()
    loop = geometry.angles_as_cif_loop(
        quartz.pair_asu_table(distance_cutoff=2),
        site_labels=quartz.scatterers().extract_labels(),
        sites_frac=quartz.sites_frac()).loop
    print(loop, file=s)
    assert not show_diff(
        s.getvalue(), """\
loop_
  _geom_angle_atom_site_label_1
  _geom_angle_atom_site_label_2
  _geom_angle_atom_site_label_3
  _geom_angle
  _geom_angle_site_symmetry_1
  _geom_angle_site_symmetry_3
  O   Si  O   101.3  2_554  4_554
  O   Si  O   111.3  3_664  4_554
  O   Si  O   116.1  3_664  2_554
  O   Si  O   116.1  5_664  4_554
  O   Si  O   111.3  5_664  2_554
  O   Si  O   101.3  5_664  3_664
  Si  O   Si  146.9  3      5

""")
    # with a covariance matrix
    flex.set_random_seed(1)
    vcv_matrix = matrix.diag(
      flex.random_double(size=quartz.n_parameters(), factor=1e-5))\
               .as_flex_double_matrix().matrix_symmetric_as_packed_u()
    s = StringIO()
    loop = geometry.distances_as_cif_loop(
        quartz.pair_asu_table(distance_cutoff=2),
        site_labels=quartz.scatterers().extract_labels(),
        sites_frac=quartz.sites_frac(),
        covariance_matrix=vcv_matrix,
        parameter_map=quartz.parameter_map()).loop
    print(loop, file=s)
    assert not show_diff(
        s.getvalue(), """\
loop_
  _geom_bond_atom_site_label_1
  _geom_bond_atom_site_label_2
  _geom_bond_distance
  _geom_bond_site_symmetry_2
  Si  O  1.616(14)  4_554
  Si  O  1.616(12)  2_554
  Si  O  1.616(14)  3_664
  Si  O  1.616(12)  5_664

""")
    s = StringIO()
    loop = geometry.angles_as_cif_loop(
        quartz.pair_asu_table(distance_cutoff=2),
        site_labels=quartz.scatterers().extract_labels(),
        sites_frac=quartz.sites_frac(),
        covariance_matrix=vcv_matrix,
        parameter_map=quartz.parameter_map()).loop
    print(loop, file=s)
    assert not show_diff(
        s.getvalue(), """\
loop_
  _geom_angle_atom_site_label_1
  _geom_angle_atom_site_label_2
  _geom_angle_atom_site_label_3
  _geom_angle
  _geom_angle_site_symmetry_1
  _geom_angle_site_symmetry_3
  O   Si  O    101.3(8)  2_554  4_554
  O   Si  O   111.3(10)  3_664  4_554
  O   Si  O    116.1(9)  3_664  2_554
  O   Si  O    116.1(9)  5_664  4_554
  O   Si  O   111.3(10)  5_664  2_554
  O   Si  O    101.3(8)  5_664  3_664
  Si  O   Si   146.9(9)  3      5

""")
    cell_vcv = flex.pow2(matrix.diag(flex.random_double(size=6,factor=1e-1))\
                         .as_flex_double_matrix().matrix_symmetric_as_packed_u())
    s = StringIO()
    loop = geometry.distances_as_cif_loop(
        quartz.pair_asu_table(distance_cutoff=2),
        site_labels=quartz.scatterers().extract_labels(),
        sites_frac=quartz.sites_frac(),
        covariance_matrix=vcv_matrix,
        cell_covariance_matrix=cell_vcv,
        parameter_map=quartz.parameter_map()).loop
    print(loop, file=s)
    assert not show_diff(
        s.getvalue(), """\
loop_
  _geom_bond_atom_site_label_1
  _geom_bond_atom_site_label_2
  _geom_bond_distance
  _geom_bond_site_symmetry_2
  Si  O  1.616(15)  4_554
  Si  O  1.616(19)  2_554
  Si  O  1.616(15)  3_664
  Si  O  1.616(19)  5_664

""")
예제 #51
0
    def test_section_typo(self):
        text = '\n'.join(['[clue]', 'rows=1', 'columns=1'])
        stream = StringIO(text)

        with pytest.raises(NoSectionError, match="No section: u?'clues'"):
            read_ini(stream)
예제 #52
0
    def test_tload(self):
        """tests DLOAD, TLOAD1, TLOAD2, TABLED2 cards"""
        model = BDF(debug=False)
        sid = 2
        excite_id = 20
        delay = 0
        tid = 42
        tload1 = model.add_tload1(sid,
                                  excite_id,
                                  tid,
                                  delay=0,
                                  Type='LOAD',
                                  us0=0.0,
                                  vs0=0.0,
                                  comment='tload1')
        tload1 = model.add_tload1(sid,
                                  excite_id,
                                  tid,
                                  delay=1.,
                                  Type='DISP',
                                  us0=0.0,
                                  vs0=0.0,
                                  comment='')
        tload1 = model.add_tload1(sid,
                                  excite_id,
                                  tid,
                                  delay=2,
                                  Type='VELO',
                                  us0=0.0,
                                  vs0=0.0,
                                  comment='')
        tload1 = model.add_tload1(sid,
                                  excite_id,
                                  tid,
                                  delay=0,
                                  Type='ACC',
                                  us0=0.0,
                                  vs0=0.0,
                                  comment='')

        sid = 3
        excite_id = 30
        tload2 = model.add_tload2(sid,
                                  excite_id,
                                  delay=0,
                                  Type='LOAD',
                                  T1=0.,
                                  T2=None,
                                  frequency=0.,
                                  phase=0.,
                                  c=0.,
                                  b=0.,
                                  us0=0.,
                                  vs0=0.,
                                  comment='tload2')
        tload2 = model.add_tload2(sid,
                                  excite_id,
                                  delay=1.,
                                  Type='D',
                                  T1=0.,
                                  T2=None,
                                  frequency=0.,
                                  phase=0.,
                                  c=0.,
                                  b=0.,
                                  us0=0.,
                                  vs0=0.,
                                  comment='')
        tload2 = model.add_tload2(sid,
                                  excite_id,
                                  delay=2,
                                  Type='V',
                                  T1=0.,
                                  T2=None,
                                  frequency=0.,
                                  phase=0.,
                                  c=0.,
                                  b=0.,
                                  us0=0.,
                                  vs0=0.,
                                  comment='')
        tload2 = model.add_tload2(sid,
                                  excite_id,
                                  delay=0,
                                  Type='A',
                                  T1=0.,
                                  T2=1.,
                                  frequency=0.,
                                  phase=0.,
                                  c=0.,
                                  b=0.,
                                  us0=0.,
                                  vs0=0.,
                                  comment='')

        delay_id = 2
        nodes = 100
        components = 2
        delays = 1.5
        delay = model.add_delay(delay_id, nodes, components, delays)

        sid = 1
        scale = 1.0
        scale_factors = 1.
        load_ids = 2
        dload = model.add_dload(sid,
                                scale,
                                scale_factors,
                                load_ids,
                                comment='dload')

        x1 = 0.1
        x = np.linspace(0., 1.)
        y = np.sin(x)
        tabled2 = model.add_tabled2(tid, x1, x, y, comment='tabled2')

        model.pop_parse_errors()

        delay.validate()
        delay.raw_fields()
        delay.write_card()
        delay.write_card(size=16)

        tload1.validate()
        tload1.raw_fields()
        tload1.write_card()
        tload1.write_card(size=16)

        tload2.validate()
        tload2.raw_fields()
        tload2.write_card()
        tload2.write_card(size=16)

        dload.validate()
        dload.raw_fields()
        dload.write_card()
        dload.write_card(size=16)

        tabled2.validate()
        tabled2.raw_fields()
        tabled2.write_card()
        tabled2.write_card(size=16)

        model.validate()
        model.cross_reference()
        model.pop_xref_errors()

        bdf_file = StringIO()
        model.write_bdf(bdf_file, close=False)
        out = bdf_file.getvalue()
        bdf_file.seek(0)
        outs = model.get_bdf_stats(return_type='list')
        outs = model.get_bdf_stats(return_type='string')

        time = 0.5
        out1 = tload1.get_load_at_time(time, scale=1.)
        out2 = tload2.get_load_at_time(time, scale=1.)
        #print(out1)
        assert len(out1) == 1, out1
        assert len(out2) == 1, out2
        #print(out1)
        #print(out2)

        time = [0.5, 0.9]
        out1 = tload1.get_load_at_time(time, scale=1.)
        out2 = tload2.get_load_at_time(time, scale=1.)
        assert len(out1) == 2, out1
        assert len(out2) == 2, out2
        #print(out1)
        #print(out2)

        model2 = read_bdf(bdf_file, punch=True, debug=False)
        model2.uncross_reference()
        model2.safe_cross_reference()
        model2.uncross_reference()
예제 #53
0
 def setUp(self):
     super(TestUpgradeCheckCellsV2, self).setUp()
     self.output = StringIO()
     self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
     self.useFixture(nova_fixtures.Database(database='api'))
     self.cmd = status.UpgradeCommands()
예제 #54
0
파일: fg.py 프로젝트: xzm2004260/Theano
    def __import__(self, apply_node, check=True, reason=None):
        """
        Given an apply_node, recursively search from this node to know graph,
        and then add all unknown variables and apply_nodes to this graph.
        """
        node = apply_node

        # We import the nodes in topological order. We only are interested
        # in new nodes, so we use all variables we know of as if they were the input set.
        # (the functions in the graph module only use the input set to
        # know where to stop going down)
        new_nodes = graph.io_toposort(self.variables, apply_node.outputs)

        if check:
            for node in new_nodes:
                if hasattr(node, 'fgraph') and node.fgraph is not self:
                    raise Exception("%s is already owned by another fgraph" %
                                    node)
                for r in node.inputs:
                    if hasattr(r, 'fgraph') and r.fgraph is not self:
                        raise Exception(
                            "%s is already owned by another fgraph" % r)
                    if (r.owner is None and not isinstance(r, graph.Constant)
                            and r not in self.inputs):
                        # Verbose error message
                        # Show a complete chain of variables from the missing input to an output
                        if config.exception_verbosity == 'high':

                            def find_path_to(output_var, input_var):
                                """
                                Returns a list of each variable on a (not
                                necessarily unique) path from input_var to
                                output_var, where each variable in the list has
                                the preceding variable as one of its inputs.
                                Returns None if no path exists.

                                """
                                # If output and input are the same we have a singleton path
                                if output_var is input_var:
                                    return [output_var]

                                # If output has no inputs then there is no path
                                owner = output_var.owner

                                if owner is None:
                                    return None

                                # If input_var is an input to the output node, there is a
                                # simple two element path
                                inputs = owner.inputs

                                if input_var in inputs:
                                    return [input_var, output_var]

                                # Otherwise we must recurse by searching for a path to one
                                # of our inputs, then appending the output to that path
                                for ipt in inputs:
                                    path = find_path_to(ipt, input_var)

                                    if path is not None:
                                        path.append(output_var)

                                        return path

                                # Since none of the above methods returned a path, there is none
                                return None

                            # Try different outputs until we find one that has a path to the missing input
                            for output in self.outputs:
                                path = find_path_to(output, r)

                                if path is not None:
                                    break

                            # if there is no path then r isn't really a graph input so we shouldn't be running error
                            # handler code in the first place
                            assert path is not None
                            tr = getattr(r.tag, 'trace', [])
                            detailed_err_msg = ""
                            if len(tr) > 0:
                                detailed_err_msg += "\nBacktrace when the variable is created:\n"

                                # Print separate message for each element in
                                # the list of batcktraces
                                sio = StringIO()
                                for subtr in tr:
                                    traceback.print_list(subtr, sio)
                                detailed_err_msg += str(sio.getvalue())
                            raise MissingInputError(
                                'A variable that is an input to the graph was '
                                'neither provided as an input to the function '
                                'nor given a value. A chain of variables '
                                'leading from this input to an output is %s. '
                                'This chain may not be unique' % str(path) +
                                detailed_err_msg)

                        # Standard error message
                        raise MissingInputError(
                            ("An input of the graph, used to compute %s, "
                             "was not provided and not given a value."
                             "Use the Theano flag exception_verbosity='high',"
                             "for more information on this error." %
                             str(node)), r)

        for node in new_nodes:
            assert node not in self.apply_nodes
            self.__setup_node__(node)
            self.apply_nodes.add(node)
            for output in node.outputs:
                self.__setup_r__(output)
                self.variables.add(output)
            for i, input in enumerate(node.inputs):
                if input not in self.variables:
                    self.__setup_r__(input)
                    self.variables.add(input)
                self.__add_clients__(input, [(node, i)])
            assert node.fgraph is self
            self.execute_callbacks('on_import', node, reason)
예제 #55
0
class TestUpgradeCheckBasic(test.NoDBTestCase):
    """Tests for the nova-status upgrade check command.

    The tests in this class should just test basic logic and use mock. Real
    checks which require more elaborate fixtures or the database should be done
    in separate test classes as they are more or less specific to a particular
    release and may be removed in a later release after they are no longer
    needed.
    """
    def setUp(self):
        super(TestUpgradeCheckBasic, self).setUp()
        self.output = StringIO()
        self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
        self.cmd = status.UpgradeCommands()

    def test_check_success(self):
        fake_checks = (('good',
                        mock.Mock(return_value=status.UpgradeCheckResult(
                            status.UpgradeCheckCode.SUCCESS))), )
        with mock.patch.object(self.cmd, '_upgrade_checks', fake_checks):
            self.assertEqual(status.UpgradeCheckCode.SUCCESS, self.cmd.check())
        expected = """\
+-----------------------+
| Upgrade Check Results |
+-----------------------+
| Check: good           |
| Result: Success       |
| Details: None         |
+-----------------------+
"""
        self.assertEqual(expected, self.output.getvalue())

    def test_check_warning(self):
        fake_checks = (
            ('good',
             mock.Mock(return_value=status.UpgradeCheckResult(
                 status.UpgradeCheckCode.SUCCESS))),
            ('warn',
             mock.Mock(return_value=status.UpgradeCheckResult(
                 status.UpgradeCheckCode.WARNING, 'there might be a problem'))
             ),
        )
        with mock.patch.object(self.cmd, '_upgrade_checks', fake_checks):
            self.assertEqual(status.UpgradeCheckCode.WARNING, self.cmd.check())
        expected = """\
+-----------------------------------+
| Upgrade Check Results             |
+-----------------------------------+
| Check: good                       |
| Result: Success                   |
| Details: None                     |
+-----------------------------------+
| Check: warn                       |
| Result: Warning                   |
| Details: there might be a problem |
+-----------------------------------+
"""
        self.assertEqual(expected, self.output.getvalue())

    def test_check_failure(self):
        # make the error details over 60 characters so we test the wrapping
        error_details = 'go back to bed' + '!' * 60
        fake_checks = (
            ('good',
             mock.Mock(return_value=status.UpgradeCheckResult(
                 status.UpgradeCheckCode.SUCCESS))),
            ('warn',
             mock.Mock(return_value=status.UpgradeCheckResult(
                 status.UpgradeCheckCode.WARNING, 'there might be a problem'))
             ),
            ('fail',
             mock.Mock(return_value=status.UpgradeCheckResult(
                 status.UpgradeCheckCode.FAILURE, error_details))),
        )
        with mock.patch.object(self.cmd, '_upgrade_checks', fake_checks):
            self.assertEqual(status.UpgradeCheckCode.FAILURE, self.cmd.check())
        expected = """\
+-----------------------------------------------------------------------+
| Upgrade Check Results                                                 |
+-----------------------------------------------------------------------+
| Check: good                                                           |
| Result: Success                                                       |
| Details: None                                                         |
+-----------------------------------------------------------------------+
| Check: warn                                                           |
| Result: Warning                                                       |
| Details: there might be a problem                                     |
+-----------------------------------------------------------------------+
| Check: fail                                                           |
| Result: Failure                                                       |
| Details: go back to bed!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! |
|   !!!!!!!!!!!!!!                                                      |
+-----------------------------------------------------------------------+
"""
        self.assertEqual(expected, self.output.getvalue())
예제 #56
0
 def setUp(self):
     super(TestNovaStatusMain, self).setUp()
     self.output = StringIO()
     self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
예제 #57
0
def generateTransactionsSummary(files,unmatched_only=False):
    try:
        # Split transactions into biller codes
        biller_codes = {}
        biller_code_emails = {}
        if unmatched_only:
            for f in files:
                for t in f.transactions.all():
                    if t.biller_code in biller_codes:
                        txns = list(biller_codes[t.biller_code])
                        txns.append(t)
                        biller_codes[t.biller_code] = txns
                    else:
                        biller_codes[t.biller_code] = [t]
        else:
            for n, f in files:
                for t in f.transactions.all():
                    if t.biller_code in biller_codes:
                        txns = list(biller_codes[t.biller_code])
                        txns.append(t)
                        biller_codes[t.biller_code] = txns
                    else:
                        biller_codes[t.biller_code] = [t]
        # Generate summaries per biller code
        for k,v in biller_codes.items():
            matched = []
            unmatched = []
            for t in v:
                if t.matched:
                    matched.append(t)
                else:
                    unmatched.append(t)
            output = StringIO()
            if not unmatched_only:
                # Matched txns
                output.write('Matched transactions:\n')
                for m in matched:
                    output.write('  CRN: {} Amount: ${}\n'.format(m.crn,m.amount))
            # Unmatched txns
            output.write('\nUnmatched transactions:\n')
            for u in unmatched:
                output.write('  CRN: {} Amount: ${}\n'.format(u.crn,u.amount))
            
            contents = output.getvalue()
            output.close()
            # Add the biller code email
            biller_code_emails[k] = contents
        return biller_code_emails
    except Exception as e:
        traceback.print_exc(e)
        raise
예제 #58
0
 def setUp(self):
     super(TestUpgradeCheckBasic, self).setUp()
     self.output = StringIO()
     self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
     self.cmd = status.UpgradeCommands()
예제 #59
0
def generateParserSummary(files):
    valid = files['valid']
    other = files['other']
    failed = files['failed']
    processed = files['processed']

    output = StringIO()
    output.write('Successful Files with transactions:\n')
    # Successful Files
    for n,t in valid:
        output.write('  File Name: {}\n'.format(n))
        output.write('    Transactions:\n')
        for trans in t.transactions.all():
            output.write('      CRN: {}\n'.format(trans.crn))
    # Successful Files without transactions
    output.write('\nSuccessful Files without transactions:\n')
    for n,t in other:
        output.write('  File Name: {}\n'.format(n))
    # Failed files
    output.write('\nFailed Files:\n')
    for n,r in failed:
        output.write('  File Name: {}\n'.format(n))
        output.write('    Reason: {}\n'.format(r))
    # Already processed Files
    output.write('\nFiles previously processed:\n')
    for n,t in processed:
        output.write('  File Name: {}\n'.format(n))

    contents = output.getvalue()
    output.close()
    return contents
예제 #60
0
def generate_yang(test, uuid_set):
    global count, level_memory
    global mqtt_commands
    global my_set
    global device_category

    my_set = uuid_set
    '''
	Generate a YANG-in-XML Tree
	- print the YANG Tree as string with SerialIO
	- build a new root-Element, called <data> with 'xmlns' Attribute
	- attach the stringified CDATA to the new Element
	- print the XML
	'''

    #python-modeled.netconf/modeled/netconf/yang/__init__.py
    module1 = Statement(None, None, None, 'module', 'mqtt-led')

    my_namespace = "http://ipv6lab.beuth-hochschule.de/led"
    my_prefix = "led"

    namespace = Statement(None, module1, None, 'namespace', my_namespace)
    module1.substmts.append(namespace)

    prefix = Statement(None, module1, None, 'prefix', my_prefix)
    module1.substmts.append(prefix)

    #http://stackoverflow.com/questions/10844064/items-in-json-object-are-out-of-order-using-json-dumps
    data = json.loads(test, object_pairs_hook=OrderedDict)
    count = 0
    level_memory = {}
    #print_dict(data, module1, count)
    parse_dict(data, module1)

    #revision = str(datetime.now())
    #revision = Statement(None, module, None, 'revision', revision)
    #module.substmts.append(revision)

    #https://github.com/mbj4668/pyang/blob/master/pyang/plugin.py
    #https://github.com/modeled/modeled.netconf/blob/master/modeled/netconf/yang/container.py
    """Serialize YANG container to the given output `format`.
			"""
    # output stream for pyang output plugin
    stream = StringIO()

    # gets filled with all availabe pyang output format plugins
    PYANG_PLUGINS = {}

    # register and initialise pyang plugin
    pyang.plugin.init([])
    for plugin in pyang.plugin.plugins:
        plugin.add_output_format(PYANG_PLUGINS)
    del plugin

    #for name in PYANG_PLUGINS:
    #    print(name)
    #...
    #dsdl
    #depend
    #name
    #omni
    #yin
    #tree
    #jstree
    #capability
    #yang
    #uml
    #jtox
    #jsonxsl
    #sample-xml-skeleton
    plugin = PYANG_PLUGINS['yang']

    # register plugin options according to pyang script
    optparser = OptionParser()
    plugin.add_opts(optparser)

    # pyang plugins also need a pyang.Context
    ctx = pyang.Context(DummyRepository())

    # which offers plugin-specific options (just take defaults)
    ctx.opts = optparser.parse_args([])[0]

    # ready to serialize
    plugin.emit(ctx, [module1], stream)

    # and return the resulting data
    stream.seek(0)
    yang = stream.getvalue()

    print('\nAusgabe: ')
    print(stream.read())
    print("")
    #return stream.read()

    #root = etree.Element("data", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring")
    #root.text = etree.CDATA(yang)
    #print  etree.tostring(root,  pretty_print=True)
    #return  etree.tostring(root,  pretty_print=True)

    #returns the constructed yang from json-config, a list of mqtt-commands and a set of uuids
    return (yang, mqtt_commands, my_set, device_category)