示例#1
1
    def test_add_filter(self):
        self.assertEqual(len(self.bsq.query_filter), 0)

        self.bsq.add_filter(SQ(foo="bar"))
        self.assertEqual(len(self.bsq.query_filter), 1)

        self.bsq.add_filter(SQ(foo__lt="10"))

        self.bsq.add_filter(~SQ(claris="moof"))

        self.bsq.add_filter(SQ(claris="moof"), use_or=True)

        self.assertEqual(
            repr(self.bsq.query_filter),
            "<SQ: OR ((foo__content=bar AND foo__lt=10 AND NOT (claris__content=moof)) OR claris__content=moof)>",
        )

        self.bsq.add_filter(SQ(claris="moof"))

        self.assertEqual(
            repr(self.bsq.query_filter),
            "<SQ: AND (((foo__content=bar AND foo__lt=10 AND NOT (claris__content=moof)) OR claris__content=moof) AND claris__content=moof)>",
        )

        self.bsq.add_filter(SQ(claris="wtf mate"))

        self.assertEqual(
            repr(self.bsq.query_filter),
            "<SQ: AND (((foo__content=bar AND foo__lt=10 AND NOT (claris__content=moof)) OR claris__content=moof) AND claris__content=moof AND claris__content=wtf mate)>",
        )
示例#2
1
    def test_syntactic(self):
        sentences = list(SyntacticExtractor(
            [{'bio': u'this is part a1, and this is part a2', 'url': 'www.example.org', 'name': 'abc def'}],
            'bio', 'sentences', 'en', {'be': ['is', 'are']}, self.match_base_form
        ).extract(1))

        self.assertEqual(len(sentences), 2)

        missing_parts = {'a1', 'a2'}
        for sentence in sentences:
            self.assertIn('url', sentence)
            self.assertIn('text', sentence)
            self.assertIn('lu', sentence)
            self.assertEqual(sentence['lu'], 'be')

            for p in missing_parts:
                if p in sentence['text']:
                    self.assertEqual(sentence['text'], 'this is part ' + p)
                    missing_parts.remove(p)
                    break
            else:
                self.fail('Extracted unexpected sentence: %s' % repr(sentence))

        if missing_parts:
            self.fail('Did not find parts: %s' % repr(missing_parts))
示例#3
0
 def report_details(self, doc, path, diff1, diff2):
     if isinstance(diff1, bool):
         desc1 = repr(diff1)
     else:
         desc1 = str(diff1) if diff1 else ""
     if isinstance(diff2, bool):
         desc2 = repr(diff2)
     else:
         desc2 = str(diff2) if diff2 else ""
     if path.endswith(".change"):
         diff1 = todate(diff1)
         diff2 = todate(diff2)
         desc1 = diff1
         desc2 = diff2
     if diff1 == diff2:
         return
     doc.start_row()
     doc.start_cell('DIFF-TableCell')
     doc.start_paragraph('DIFF-TableHeading')
     doc.write_text(self.format_struct_path(path))
     doc.end_paragraph()
     doc.end_cell()
     doc.start_cell('DIFF-TableCell')
     doc.start_paragraph('DIFF-Text')
     doc.write_text(desc1)
     doc.end_paragraph()
     doc.end_cell()
     doc.start_cell('DIFF-TableCell')
     doc.start_paragraph('DIFF-Text')
     doc.write_text(desc2)
     doc.end_paragraph()
     doc.end_cell()
     doc.end_row()
示例#4
0
文件: haskell.py 项目: khskrede/mehh
def main_loop(expr):
    function = None
    todo = None
    i=0
    while True:
        #print "meh"
        jitdriver.jit_merge_point(function=function, todo=todo, expr=expr)
        if isinstance(expr, Substitution):
            expr = expr.apply()
        if isinstance(expr, Value) and todo is None:
            break
        #print expr, todo
        #import pdb; pdb.set_trace()
        print repr(expr)
        expr, todo = expr.step(todo)
        i=i+1
        print i
        function = None
        if isinstance(expr, Substitution):
            recursive = expr.recursive
            #print recursive
            function = expr.rhs
            #print repr(function)
            #print function.name
            if recursive:
                #print "can enter jit", function, expr
                jitdriver.can_enter_jit(function=function, todo=todo, expr=expr)
    return expr
示例#5
0
文件: dc.py 项目: iderikon/elliptics
def iterate_node(arg):
    ctx, address, backend_id, ranges = arg
    elog = elliptics.Logger(ctx.log_file, int(ctx.log_level))
    stats = ctx.stats["iterate"][str(address)][str(backend_id)]
    stats.timer('process', 'started')
    log.info("Running iterator on node: {0}/{1}".format(address, backend_id))
    log.debug("Ranges:")
    for range in ranges:
        log.debug(repr(range))
    stats.timer('process', 'iterate')

    node_id = ctx.routes.get_address_backend_route_id(address, backend_id)

    node = elliptics_create_node(address=address,
                                 elog=elog,
                                 wait_timeout=ctx.wait_timeout,
                                 net_thread_num=4,
                                 io_thread_num=1)

    try:
        flags = elliptics.iterator_flags.key_range
        timestamp_range = ctx.timestamp.to_etime(), Time.time_max().to_etime()
        if ctx.no_meta:
            flags |= elliptics.iterator_flags.no_meta
        else:
            flags |= elliptics.iterator_flags.ts_range

        log.debug("Running iterator on node: {0}/{1}".format(address, backend_id))
        results, results_len = Iterator.iterate_with_stats(
            node=node,
            eid=node_id,
            timestamp_range=timestamp_range,
            key_ranges=ranges,
            tmp_dir=ctx.tmp_dir,
            address=address,
            backend_id=backend_id,
            group_id=node_id.group_id,
            batch_size=ctx.batch_size,
            stats=stats,
            flags=flags,
            leave_file=True,
            separately=True)
        if results is None or results_len == 0:
            return None

    except Exception as e:
        log.error("Iteration failed for node {0}/{1}: {2}, traceback: {3}"
                  .format(address, backend_id, repr(e), traceback.format_exc()))
        return None

    log.debug("Iterator for node {0}/{1} obtained: {2} record(s)"
              .format(address, backend_id, results_len))

    stats.timer('process', 'sort')
    for range_id in results:
        results[range_id].sort()

    stats.timer('process', 'finished')
    return [(range_id, container.filename, container.address, container.backend_id, container.group_id)
            for range_id, container in results.items()]
示例#6
0
	def _child_main_loop(self, queue):
		while True:
			url = "http://geekhost.net/OK"
			f = urllib.urlopen(url)
			data = f.read()
			#print data
			abcPattern = re.compile(r'OK')
			if abcPattern.match(data):
				queue.put('Already logined')
			else:
				queue.put('Need login')
				LOGIN_URL = 'https://auth-wlc.ntwk.dendai.ac.jp/login.html'
				#LOGIN_URL = 'http://geekhost.net/checkparams.php'
				pd = yaml.load(open('config.yaml').read().decode('utf-8'))
				pd['buttonClicked'] = '4'
				pd['redirect_url'] = 'http://google.com/'
				pd["err_flag"] = "0" 
				pd["err_msg"] = ""
				pd["info_flag"] = "0"
				pd["info_msg"] = ""
				params = urllib.urlencode(pd)
				print repr(params)
				up = urllib.urlopen(LOGIN_URL, params)
			# あとは寝てる
			time.sleep(yaml.load(open('config.yaml').read().decode('utf-8'))['threadtime'])
示例#7
0
 def _writer_coro(self):
     self.logger.debug("Starting writer coro")
     while self._running:
         try:
             self._writer_ready.set()
             packet = yield from asyncio.wait_for(self.outgoing_queue.get(), 5)
             yield from packet.to_stream(self.session.writer)
             self.logger.debug(" -out-> " + repr(packet))
             yield from self.session.writer.drain()
             self.packet_sent.send(packet)
         except asyncio.TimeoutError as ce:
             self.logger.debug("Output queue get timeout")
         except Exception as e:
             self.logger.warn("Unhandled exception in writer coro: %s" % e)
             break
     self.logger.debug("Writer coro stopping")
     # Flush queue before stopping
     if not self.outgoing_queue.empty():
         while True:
             try:
                 packet = self.outgoing_queue.get_nowait()
                 yield from packet.to_stream(self.session.writer)
                 self.logger.debug(" -out-> " + repr(packet))
             except asyncio.QueueEmpty:
                 break
             except Exception as e:
                 self.logger.warn("Unhandled exception in writer coro: %s" % e)
     self.logger.debug("Writer coro stopped")
示例#8
0
def closeScoringFiles():
	global filePointerForRuntimeScoring, filePointerForTimeRecording, filePointerForRuntimeNegativeScoring
	dataRecording_controlSchema.closeFile(filePointerForRuntimeScoring)
	filePointerForTimeRecording.write(repr(some_global_variables.pausedTime) + "\n")
	dataRecording_controlSchema.closeFile(filePointerForTimeRecording)
	filePointerForRuntimeNegativeScoring.write(repr(negativeScoreCounter) + "\n")
	dataRecording_controlSchema.closeFile(filePointerForRuntimeNegativeScoring)
示例#9
0
    def _http_request(self, method, url, headers, body,
                      ignore_result_body=False):
        """Perform an HTTP request against the server.

        method: the HTTP method to use
        url: the URL to request (not including server portion)
        headers: headers for the request
        body: body to send with the request
        ignore_result_body: the body of the result will be ignored

        Returns: a http_client response object
        """
        if self.auth_token:
            headers.setdefault('x-auth-token', self.auth_token)

        LOG.debug('Request: %(method)s http://%(server)s:%(port)s'
                  '%(url)s with headers %(headers)s',
                  {'method': method,
                   'server': self.conn.host,
                   'port': self.conn.port,
                   'url': url,
                   'headers': repr(headers)})
        self.conn.request(method, url, body, headers)

        response = self.conn.getresponse()
        headers = self._header_list_to_dict(response.getheaders())
        code = response.status
        code_description = http_client.responses[code]
        LOG.debug('Response: %(code)s %(status)s %(headers)s',
                  {'code': code,
                   'status': code_description,
                   'headers': repr(headers)})

        if code == 400:
            raise exc.HTTPBadRequest(
                explanation=response.read())

        if code == 500:
            raise exc.HTTPInternalServerError(
                explanation=response.read())

        if code == 401:
            raise exc.HTTPUnauthorized(
                explanation=response.read())

        if code == 403:
            raise exc.HTTPForbidden(
                explanation=response.read())

        if code == 409:
            raise exc.HTTPConflict(
                explanation=response.read())

        if ignore_result_body:
            # NOTE: because we are pipelining requests through a single HTTP
            # connection, http_client requires that we read the response body
            # before we can make another request. If the caller knows they
            # don't care about the body, they can ask us to do that for them.
            response.read()
        return response
示例#10
0
文件: test_ast.py 项目: vslutov/llb3d
def test_program():
    """Check program block."""
    program_tuple = (ast.IntLiteral(10), ast.IntLiteral(20))
    program = ast.Program(program_tuple)
    assert program['statements'] is program_tuple
    assert str(program) == '10\n20'
    assert repr(program) == 'Program({program_tuple})'.format(program_tuple=repr(program_tuple))
示例#11
0
def test_repr():
    d = da.ones((4, 4), chunks=(2, 2))
    assert d.name[:5] in repr(d)
    assert str(d.shape) in repr(d)
    assert str(d._dtype) in repr(d)
    d = da.ones((4000, 4), chunks=(4, 2))
    assert len(str(d)) < 1000
示例#12
0
 def __repr__(self):
     if (self.value is not None):
         return "Huffman_Node(value=%s)" % (repr(self.value))
     else:
         return "Huffman_Node(bit_0=%s, bit_1=%s)" % \
             (repr(self.bit_0),
              repr(self.bit_1))
示例#13
0
文件: test_ast.py 项目: vslutov/llb3d
def test_body():
    """Check code blocks."""
    body_tuple = (ast.IntLiteral(10), ast.IntLiteral(20))
    body = ast.Body(body_tuple)
    assert body['statements'] is body_tuple
    assert str(body) == '  10\n  20'
    assert repr(body) == 'Body({body_tuple})'.format(body_tuple=repr(body_tuple))
示例#14
0
文件: misc.py 项目: PyUtilib/pyutilib
 def __str__(self, nesting=0, indent=''):
     attrs = []
     indentation = indent + "    " * nesting
     for k, v in self.__dict__.items():
         if not k.startswith("_"):
             text = [indentation, k, ":"]
             if isinstance(v, Container):
                 if len(v) > 0:
                     text.append('\n')
                 text.append(v.__str__(nesting + 1))
             elif isinstance(v, list):
                 if len(v) == 0:
                     text.append(' []')
                 else:
                     for v_ in v:
                         text.append('\n' + indentation + "-")
                         if isinstance(v_, Container):
                             text.append('\n' + v_.__str__(nesting + 1))
                         else:
                             text.append(" " + repr(v_))
             else:
                 text.append(' ' + repr(v))
             attrs.append("".join(text))
     attrs.sort()
     return "\n".join(attrs)
示例#15
0
    def gexpect(self, tag):
        log.debug('GAME_EXPECT: %s', repr(tag))
        l = self.gdqueue
        e = self.gdevent
        while True:
            for i in xrange(len(l)):
                d = l.popleft()
                if isinstance(d, EndpointDied):
                    raise d

                elif d[0] == tag:
                    log.debug('GAME_READ: %s', repr(d))
                    self.usergdhistory.append((self.player_index, d[0], d[1]))
                    return d[1]

                else:
                    d.scan_count += 1
                    if d.scan_count >= 15:
                        log.debug('Dropped gamedata: %s' % d)
                    else:
                        log.debug('GAME_DATA_MISS: %s', repr(d))
                        log.debug('EXPECTS: %s' % tag)
                        l.append(d)
            e.clear()
            e.wait()
    def runTest(self):
        self.setup_test()
        self.log_entries = self.c.run_command_ignore_fail("cat /sys/firmware/opal/msglog |  grep 'PHB#' | grep -i  ' C:'")
        failed_eplist = []
        failed_slotlist = []
        failed_swuplist = []
        match_list = ["[EP  ]", "[LGCY]", "[PCID]", "[ETOX]" ]

        for entry in self.log_entries:
            if entry == '':
                continue

            matchObj = re.match(r"(.*) PHB#(.*) \[(.*)", entry)
            if matchObj:
                bdfn = matchObj.group(2)
            else:
                log.debug(entry)
                bdfn = entry

            ep_present = False
            # Check for a end point PCI device, it should have LOC_CODE label
            for string in match_list:
                if string in entry:
                    ep_present = True
                    if "LOC_CODE" in entry:
                        log.debug("Location code found for entry %s" % bdfn)
                    else:
                        failed_eplist.append(bdfn)
                    break
            else:
                ep_present = False

            if ep_present:
                continue

            if "[SWUP]" in entry:
                if "LOC_CODE" in entry:
                    log.debug("Entry %s has LOC_CODE".format(bdfn))
                    continue
                if "SLOT" in entry:
                    log.debug("Entry %s has SLOT".format(bdfn))
                    continue
                failed_swuplist.append(bdfn)

            # If it is a pcie slot check for SLOT entry
            if "SLOT" in entry:
                log.debug("Entry %s has the slot label" % bdfn)
            else:
                failed_slotlist.append(bdfn)

        log.debug(repr(failed_eplist))
        log.debug(repr(failed_slotlist))
        log.debug(repr(failed_swuplist))
        if (len(failed_slotlist) == 0) and (len(failed_eplist) == 0):
            return
        failed_eplist = '\n'.join(filter(None, failed_eplist))
        failed_slotlist = '\n'.join(filter(None, failed_slotlist))
        failed_swuplist = '\n'.join(filter(None, failed_swuplist))
        message = "SLOT Label failures: %s\n LOC_CODE failures:%s\nSWUP failures:%s\n" % (failed_slotlist, failed_eplist, failed_swuplist)
        self.assertTrue(False, message)
def test_ViewInventory___deepcopy___01():
    
    inventory_1 = idetools.ViewInventory()
    inventory_2 = copy.deepcopy(inventory_1)

    assert inventory_1 == inventory_2
    assert repr(inventory_1) == repr(inventory_2)
示例#18
0
    def _filter_ids_helper(cls, df_or_series, ids, ids_to_keep):
        # `ids_to_keep` can be any iterable, so turn it into a list so that it
        # can be iterated over multiple times below (and length-checked).
        ids_to_keep = list(ids_to_keep)

        if len(ids_to_keep) == 0:
            raise ValueError("`ids_to_keep` must contain at least one ID.")

        duplicates = find_duplicates(ids_to_keep)
        if duplicates:
            raise ValueError(
                "`ids_to_keep` must contain unique IDs. The following IDs are "
                "duplicated: %s" %
                (', '.join(repr(e) for e in sorted(duplicates))))

        ids_to_keep = set(ids_to_keep)
        missing_ids = ids_to_keep - ids
        if missing_ids:
            raise ValueError(
                "The following IDs are not present in the metadata: %s"
                % (', '.join(repr(e) for e in sorted(missing_ids))))

        # While preserving order, get rid of any IDs not contained in
        # `ids_to_keep`.
        ids_to_discard = ids - ids_to_keep
        return df_or_series.drop(labels=ids_to_discard, axis='index',
                                 inplace=False, errors='raise')
示例#19
0
    def test_categorical_series_repr_timedelta(self):
        idx = timedelta_range('1 days', periods=5)
        s = Series(Categorical(idx))
        exp = """0   1 days
1   2 days
2   3 days
3   4 days
4   5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""

        assert repr(s) == exp

        idx = timedelta_range('1 hours', periods=10)
        s = Series(Categorical(idx))
        exp = """0   0 days 01:00:00
1   1 days 01:00:00
2   2 days 01:00:00
3   3 days 01:00:00
4   4 days 01:00:00
5   5 days 01:00:00
6   6 days 01:00:00
7   7 days 01:00:00
8   8 days 01:00:00
9   9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
                                   3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
                                   8 days 01:00:00, 9 days 01:00:00]"""  # noqa

        assert repr(s) == exp
示例#20
0
def test_volume_source_space():
    """Test setting up volume source spaces."""
    tempdir = _TempDir()
    src = read_source_spaces(fname_vol)
    temp_name = op.join(tempdir, 'temp-src.fif')
    surf = read_bem_surfaces(fname_bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN)
    surf['rr'] *= 1e3  # convert to mm
    # The one in the testing dataset (uses bem as bounds)
    for bem, surf in zip((fname_bem, None), (None, surf)):
        src_new = setup_volume_source_space(
            'sample', pos=7.0, bem=bem, surface=surf, mri='T1.mgz',
            subjects_dir=subjects_dir)
        write_source_spaces(temp_name, src_new, overwrite=True)
        src[0]['subject_his_id'] = 'sample'  # XXX: to make comparison pass
        _compare_source_spaces(src, src_new, mode='approx')
        del src_new
        src_new = read_source_spaces(temp_name)
        _compare_source_spaces(src, src_new, mode='approx')
    pytest.raises(IOError, setup_volume_source_space, 'sample',
                  pos=7.0, bem=None, surface='foo',  # bad surf
                  mri=fname_mri, subjects_dir=subjects_dir)
    assert repr(src) == repr(src_new)
    assert src.kind == 'volume'
    # Spheres
    sphere = make_sphere_model(r0=(0., 0., 0.), head_radius=0.1,
                               relative_radii=(0.9, 1.0), sigmas=(0.33, 1.0))
    src = setup_volume_source_space(pos=10)
    src_new = setup_volume_source_space(pos=10, sphere=sphere)
    _compare_source_spaces(src, src_new, mode='exact')
    pytest.raises(ValueError, setup_volume_source_space, sphere='foo')
    # Need a radius
    sphere = make_sphere_model(head_radius=None)
    pytest.raises(ValueError, setup_volume_source_space, sphere=sphere)
示例#21
0
    def test_categorical_series_repr_timedelta_ordered(self):
        idx = timedelta_range('1 days', periods=5)
        s = Series(Categorical(idx, ordered=True))
        exp = """0   1 days
1   2 days
2   3 days
3   4 days
4   5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""  # noqa

        assert repr(s) == exp

        idx = timedelta_range('1 hours', periods=10)
        s = Series(Categorical(idx, ordered=True))
        exp = """0   0 days 01:00:00
1   1 days 01:00:00
2   2 days 01:00:00
3   3 days 01:00:00
4   4 days 01:00:00
5   5 days 01:00:00
6   6 days 01:00:00
7   7 days 01:00:00
8   8 days 01:00:00
9   9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
                                   3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
                                   8 days 01:00:00 < 9 days 01:00:00]"""  # noqa

        assert repr(s) == exp
示例#22
0
    def test_categorical_series_repr_datetime_ordered(self):
        idx = date_range('2011-01-01 09:00', freq='H', periods=5)
        s = Series(Categorical(idx, ordered=True))
        exp = """0   2011-01-01 09:00:00
1   2011-01-01 10:00:00
2   2011-01-01 11:00:00
3   2011-01-01 12:00:00
4   2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
                                 2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""  # noqa

        assert repr(s) == exp

        idx = date_range('2011-01-01 09:00', freq='H', periods=5,
                         tz='US/Eastern')
        s = Series(Categorical(idx, ordered=True))
        exp = """0   2011-01-01 09:00:00-05:00
1   2011-01-01 10:00:00-05:00
2   2011-01-01 11:00:00-05:00
3   2011-01-01 12:00:00-05:00
4   2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
                                             2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
                                             2011-01-01 13:00:00-05:00]"""  # noqa

        assert repr(s) == exp
示例#23
0
    def test_categorical_series_repr(self):
        s = Series(Categorical([1, 2, 3]))
        exp = """0    1
1    2
2    3
dtype: category
Categories (3, int64): [1, 2, 3]"""

        assert repr(s) == exp

        s = Series(Categorical(np.arange(10)))
        exp = """0    0
1    1
2    2
3    3
4    4
5    5
6    6
7    7
8    8
9    9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""

        assert repr(s) == exp
示例#24
0
    def test_various_ops(self):
        # This takes about n/3 seconds to run (about n/3 clumps of tasks,
        # times about 1 second per clump).
        NUMTASKS = 10

        # no more than 3 of the 10 can run at once
        sema = threading.BoundedSemaphore(value=3)
        mutex = threading.RLock()
        numrunning = Counter()

        threads = []

        for i in range(NUMTASKS):
            t = TestThread("<thread %d>" % i, self, sema, mutex, numrunning)
            threads.append(t)
            self.assertEqual(t.ident, None)
            self.assertTrue(re.match("<TestThread\(.*, initial\)>", repr(t)))
            t.start()

        if verbose:
            print("waiting for all tasks to complete")
        for t in threads:
            t.join(NUMTASKS)
            self.assertTrue(not t.is_alive())
            self.assertNotEqual(t.ident, 0)
            self.assertFalse(t.ident is None)
            self.assertTrue(re.match("<TestThread\(.*, stopped -?\d+\)>", repr(t)))
        if verbose:
            print("all tasks done")
        self.assertEqual(numrunning.get(), 0)
示例#25
0
    def test_categorical_repr_unicode(self):
        # GH#21002 if len(index) > 60, sys.getdefaultencoding()=='ascii',
        # and we are working in PY2, then rendering a Categorical could raise
        # UnicodeDecodeError by trying to decode when it shouldn't

        class County(StringMixin):
            name = u'San Sebastián'
            state = u'PR'

            def __unicode__(self):
                return self.name + u', ' + self.state

        cat = pd.Categorical([County() for n in range(61)])
        idx = pd.Index(cat)
        ser = idx.to_series()

        if compat.PY3:
            # no reloading of sys, just check that the default (utf8) works
            # as expected
            repr(ser)
            str(ser)

        else:
            # set sys.defaultencoding to ascii, then change it back after
            # the test
            with tm.set_defaultencoding('ascii'):
                repr(ser)
                str(ser)
示例#26
0
 def __repr__(self):
     r = u"[%s ->\n %s\n source=%s strength=%.2f votes=%d)]" % (
         repr(self.input).decode("utf8"),
         repr(self.output).decode("utf8"),
         self.data_source.name, self.strength, self.votes
     )
     return r.encode("utf8")
示例#27
0
    def edit_tagged_sel(self, tag_sel_idx, *, note=None):
        tagged_sel = self.store[tag_sel_idx]
        old_note = tagged_sel.note
        tagged_sel.note = note
        print("Updated note: %s -> %s" % (repr(old_note), repr(note)))

        self.store.save()
示例#28
0
    def test_categorical_series_repr_ordered(self):
        s = Series(Categorical([1, 2, 3], ordered=True))
        exp = """0    1
1    2
2    3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""

        assert repr(s) == exp

        s = Series(Categorical(np.arange(10), ordered=True))
        exp = """0    0
1    1
2    2
3    3
4    4
5    5
6    6
7    7
8    8
9    9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""

        assert repr(s) == exp
示例#29
0
    def test_categorical_series_repr_period_ordered(self):
        idx = period_range('2011-01-01 09:00', freq='H', periods=5)
        s = Series(Categorical(idx, ordered=True))
        exp = """0   2011-01-01 09:00
1   2011-01-01 10:00
2   2011-01-01 11:00
3   2011-01-01 12:00
4   2011-01-01 13:00
dtype: category
Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
                            2011-01-01 13:00]"""  # noqa

        assert repr(s) == exp

        idx = period_range('2011-01', freq='M', periods=5)
        s = Series(Categorical(idx, ordered=True))
        exp = """0   2011-01
1   2011-02
2   2011-03
3   2011-04
4   2011-05
dtype: category
Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""

        assert repr(s) == exp
示例#30
0
def write_syn_dataset(csvPathname, rowCount, colCount, SEED):
    r1 = random.Random(SEED)
    if UTF8 or UTF8_MULTIBYTE:
        dsf = codecs.open(csvPathname, encoding='utf-8', mode='w+')
    elif UTF16:
        dsf = codecs.open(csvPathname, encoding='utf-16', mode='w+')
    else:
        dsf = open(csvPathname, "w+")

    for i in range(rowCount):
        if UTF16:
            u = unichr(0x2018) + unichr(6000) + unichr(0x2019)
            rowDataCsv = u
        else: # both ascii and utf-8 go here?
            rowData = []
            for j in range(colCount):
                r = generate_random_utf8_string(length=2)
                rowData.append(r)
            rowDataCsv = ",".join(rowData)
        if UTF16:
            # we're already passing it unicode. no decoding needed
            print "utf16:", repr(rowDataCsv), type(rowDataCsv)
            decoded = rowDataCsv
        else:
            print "str:", repr(rowDataCsv), type(rowDataCsv)
            decoded = rowDataCsv.decode('utf-8')
            # this has the right length..multibyte utf8 are decoded 
            print "utf8:" , repr(decoded), type(decoded)
        
        # dsf.write(rowDataCsv + "\n")
        dsf.write(decoded + "\n")
    dsf.close()
示例#31
0
 def __repr__(self):
     return 'MuProcedure({0}, {1})'.format(
         repr(self.formals), repr(self.body))
示例#32
0
parser.add_argument('--hostname', default="moranis.cs.washington.edu", help='Redis hostname')
parser.add_argument('--port', default="8001", help='Redis port')
args = parser.parse_args()
        
WORKING_DIR = client_common.getWorkingDir()

client_common.copyFromSrcHost("diamond-src/apps/baseline-benchmarks/100game/build/game-redis")
client_common.copyFromSrcHostWithName("diamond-src/apps/baseline-benchmarks/redox/build/libredox.so.0.3.0", "libredox.so.0")

sys.stderr.write("Running clients...\n")

processes = []
outputFiles = []

for i in range(0, args.numpairs):
    gameKeyPrefix = repr(random.randint(0, sys.maxint))
    outputFile1 = "game-redis-" + gameKeyPrefix + "-1"
    outputFile2 = "game-redis-" + gameKeyPrefix + "-2"
    cmd1 = WORKING_DIR + "/game-redis --host " + args.hostname + " --port " + args.port + " --name player1 --keyprefix " + gameKeyPrefix + " > " + WORKING_DIR + "/" + outputFile1
    cmd2 = WORKING_DIR + "/game-redis --host " + args.hostname + " --port " + args.port + " --name player2 --keyprefix " + gameKeyPrefix + " > " + WORKING_DIR + "/" + outputFile2
    processes.append(subprocess.Popen(cmd1, shell=True))
    processes.append(subprocess.Popen(cmd2, shell=True))
    outputFiles.append(outputFile1)
    outputFiles.append(outputFile2)

for process in processes:
    process.wait()

sys.stderr.write("Finished running clients\n")

success = True
示例#33
0
 def on_error(self, status_code):
     # On error - if an error occurs, display the error / status code
     print('An Error has occured: ' + repr(status_code))
     return False
示例#34
0
	def constructDictionary(self, keyColumnIndexList=None, valueColumnIndexList=None, keyUniqueInInputFile=False,\
						keyDataType=None, valueDataType=None):
		"""
		2013.10.04 added argument keyDataType, valueDataType
		2013.10.02 added argument keyUniqueInInputFile
			True when each key appears once and only once in input file. Exception would be raise if this is not true.
		2013.09.30
			If length of keyColumnIndexList is one, then key is not a tuple, simply the key.
			If length of valueColumnIndexList is one, then value is not a list of tuples, simply a list of values.
		2013.05.24
			the key is a tuple
			the value is a list of lists.
				
			i.e.:
			
				alignmentCoverageFile = MatrixFile(inputFname=self.individualAlignmentCoverageFname)
				alignmentCoverageFile.constructColName2IndexFromHeader()
				alignmentReadGroup2coverageLs = alignmentCoverageFile.constructDictionary(keyColumnIndexList=[0], valueColumnIndexList=[1])
				alignmentCoverageFile.close()
				
				coverageLs = alignmentReadGroup2coverageLs.get((individualID,))
				return coverageLs[0]
				
			
				
		"""
		sys.stderr.write("Constructing a dictionary, keys are column %s, values are column %s. ..."%\
						(repr(keyColumnIndexList), repr(valueColumnIndexList)))
		dc = {}
		counter = 0
		for row in self:
			counter += 1
			keyList = []
			for i in keyColumnIndexList:
				keyData = row[i]
				if keyDataType is not None:
					keyData = keyDataType(keyData)
				keyList.append(keyData)
			valueList = []
			for i in valueColumnIndexList:
				valueData = row[i]
				if valueDataType is not None:
					valueData = valueDataType(valueData)
				valueList.append(valueData)
			if len(keyColumnIndexList)>1:
				key = tuple(keyList)
			else:
				key = keyList[0]
			if keyUniqueInInputFile:
				if key in dc:
					sys.stderr.write("ERROR: keyUniqueInInputFile=%s but this key (%s) from this row (%s) is already in dictionary with value=%s.\n"%
									(keyUniqueInInputFile, repr(key), repr(row), repr(dc.get(key)) ))
					raise
			else:
				if key not in dc:
					dc[key] = []
			
			if len(valueColumnIndexList)>1:
				value = valueList
			else:
				value = valueList[0]
			if keyUniqueInInputFile:
				dc[key] = value
			else:
				dc[key].append(value)
		sys.stderr.write("%s unique pairs from %s rows.\n"%(len(dc), counter))
		return dc
示例#35
0
 def __repr__(self):
     return "<%s: %s>" % (type(self).__name__, repr(self.value))
    def CASE8(self, main):
        """
        Compare topo
        """
        import json
        import time
        assert main, "main not defined"
        assert utilities.assert_equals, "utilities.assert_equals not defined"

        main.case("Compare ONOS Topology view to Mininet topology")
        main.caseExplanation = "Compare topology objects between Mininet" +\
                                " and ONOS"
        topoResult = main.FALSE
        elapsed = 0
        count = 0
        main.step("Comparing ONOS topology to MN topology")
        startTime = time.time()
        ctrl = main.Cluster.active(0)
        # Give time for Gossip to work
        while topoResult == main.FALSE and (elapsed < 60 or count < 3):
            devicesResults = main.TRUE
            linksResults = main.TRUE
            hostsResults = main.TRUE
            hostAttachmentResults = True
            count += 1
            cliStart = time.time()
            devices = []
            devices.append(ctrl.CLI.devices())
            hosts = []
            hosts.append(json.loads(ctrl.CLI.hosts()))
            ipResult = main.TRUE
            for controller in range(0, len(hosts)):
                controllerStr = str(controller + 1)
                for host in hosts[controller]:
                    if host is None or host.get('ipAddresses', []) == []:
                        main.log.error(
                            "DEBUG:Error with host ips on controller" +
                            controllerStr + ": " + str(host))
                        ipResult = main.FALSE
            ports = []
            ports.append(ctrl.CLI.ports())
            links = []
            links.append(ctrl.CLI.links())
            clusters = []
            clusters.append(ctrl.CLI.clusters())

            elapsed = time.time() - startTime
            cliTime = time.time() - cliStart
            print "CLI time: " + str(cliTime)

            mnSwitches = main.Mininet1.getSwitches()
            mnLinks = main.Mininet1.getLinks()
            mnHosts = main.Mininet1.getHosts()
            for controller in main.Cluster.getRunningPos():
                controllerStr = str(controller)
                if devices[ controller ] and ports[ controller ] and\
                        "Error" not in devices[ controller ] and\
                        "Error" not in ports[ controller ]:

                    try:
                        currentDevicesResult = main.Mininet1.compareSwitches(
                            mnSwitches, json.loads(devices[controller]),
                            json.loads(ports[controller]))
                    except (TypeError, ValueError) as e:
                        main.log.exception(
                            "Object not as expected; devices={!r}\nports={!r}".
                            format(devices[controller], ports[controller]))
                else:
                    currentDevicesResult = main.FALSE
                utilities.assert_equals(expect=main.TRUE,
                                        actual=currentDevicesResult,
                                        onpass="******" + controllerStr +
                                        " Switches view is correct",
                                        onfail="ONOS" + controllerStr +
                                        " Switches view is incorrect")

                if links[controller] and "Error" not in links[controller]:
                    currentLinksResult = main.Mininet1.compareLinks(
                        mnSwitches, mnLinks, json.loads(links[controller]))
                else:
                    currentLinksResult = main.FALSE
                utilities.assert_equals(
                    expect=main.TRUE,
                    actual=currentLinksResult,
                    onpass="******" + controllerStr + " links view is correct",
                    onfail="ONOS" + controllerStr + " links view is incorrect")

                if hosts[controller] or "Error" not in hosts[controller]:
                    currentHostsResult = main.Mininet1.compareHosts(
                        mnHosts, hosts[controller])
                else:
                    currentHostsResult = main.FALSE
                utilities.assert_equals(expect=main.TRUE,
                                        actual=currentHostsResult,
                                        onpass="******" + controllerStr +
                                        " hosts exist in Mininet",
                                        onfail="ONOS" + controllerStr +
                                        " hosts don't match Mininet")
                # CHECKING HOST ATTACHMENT POINTS
                hostAttachment = True
                zeroHosts = False
                # FIXME: topo-HA/obelisk specific mappings:
                # key is mac and value is dpid
                mappings = {}
                for i in range(1, 29):  # hosts 1 through 28
                    # set up correct variables:
                    macId = "00:" * 5 + hex(i).split("0x")[1].upper().zfill(2)
                    if i == 1:
                        deviceId = "1000".zfill(16)
                    elif i == 2:
                        deviceId = "2000".zfill(16)
                    elif i == 3:
                        deviceId = "3000".zfill(16)
                    elif i == 4:
                        deviceId = "3004".zfill(16)
                    elif i == 5:
                        deviceId = "5000".zfill(16)
                    elif i == 6:
                        deviceId = "6000".zfill(16)
                    elif i == 7:
                        deviceId = "6007".zfill(16)
                    elif i >= 8 and i <= 17:
                        dpid = '3' + str(i).zfill(3)
                        deviceId = dpid.zfill(16)
                    elif i >= 18 and i <= 27:
                        dpid = '6' + str(i).zfill(3)
                        deviceId = dpid.zfill(16)
                    elif i == 28:
                        deviceId = "2800".zfill(16)
                    mappings[macId] = deviceId
                if hosts[controller] or "Error" not in hosts[controller]:
                    if hosts[controller] == []:
                        main.log.warn("There are no hosts discovered")
                        zeroHosts = True
                    else:
                        for host in hosts[controller]:
                            mac = None
                            location = None
                            device = None
                            port = None
                            try:
                                mac = host.get('mac')
                                assert mac, "mac field could not be found for this host object"

                                location = host.get('locations')[0]
                                assert location, "location field could not be found for this host object"

                                # Trim the protocol identifier off deviceId
                                device = str(
                                    location.get('elementId')).split(':')[1]
                                assert device, "elementId field could not be found for this host location object"

                                port = location.get('port')
                                assert port, "port field could not be found for this host location object"

                                # Now check if this matches where they should be
                                if mac and device and port:
                                    if str(port) != "1":
                                        main.log.error(
                                            "The attachment port is incorrect for "
                                            + "host " + str(mac) +
                                            ". Expected: 1 Actual: " +
                                            str(port))
                                        hostAttachment = False
                                    if device != mappings[str(mac)]:
                                        main.log.error(
                                            "The attachment device is incorrect for "
                                            + "host " + str(mac) +
                                            ". Expected: " +
                                            mappings[str(mac)] + " Actual: " +
                                            device)
                                        hostAttachment = False
                                else:
                                    hostAttachment = False
                            except AssertionError:
                                main.log.exception(
                                    "Json object not as expected")
                                main.log.error(repr(host))
                                hostAttachment = False
                else:
                    main.log.error("No hosts json output or \"Error\"" +
                                   " in output. hosts = " +
                                   repr(hosts[controller]))
                if zeroHosts is False:
                    hostAttachment = True

                devicesResults = devicesResults and currentDevicesResult
                linksResults = linksResults and currentLinksResult
                hostsResults = hostsResults and currentHostsResult
                hostAttachmentResults = hostAttachmentResults and\
                                        hostAttachment

            # "consistent" results don't make sense for single instance
            # there should always only be one cluster
            clusterResults = main.FALSE
            try:
                numClusters = len(json.loads(clusters[0]))
            except (ValueError, TypeError):
                main.log.exception("Error parsing clusters[0]: " +
                                   repr(clusters[0]))
                numClusters = "ERROR"
                clusterResults = main.FALSE
            if numClusters == 1:
                clusterResults = main.TRUE
            utilities.assert_equals(expect=1,
                                    actual=numClusters,
                                    onpass="******",
                                    onfail="ONOS shows " + str(numClusters) +
                                    " SCCs")

            topoResult = (devicesResults and linksResults and hostsResults
                          and ipResult and clusterResults
                          and hostAttachmentResults)

        topoResult = topoResult and int(count <= 2)
        note = "note it takes about " + str( int( cliTime ) ) + \
            " seconds for the test to make all the cli calls to fetch " +\
            "the topology from each ONOS instance"
        main.log.info(
            "Very crass estimate for topology discovery/convergence( " +
            str(note) + " ): " + str(elapsed) + " seconds, " + str(count) +
            " tries")
        utilities.assert_equals(expect=main.TRUE,
                                actual=topoResult,
                                onpass="******",
                                onfail="Topology Check Test NOT successful")
        main.step("Checking ONOS nodes")
        nodeResults = utilities.retry(main.Cluster.nodesCheck,
                                      False,
                                      attempts=5)

        utilities.assert_equals(expect=True,
                                actual=nodeResults,
                                onpass="******",
                                onfail="Nodes check NOT successful")
        if not nodeResults:
            for ctrl in main.Cluster.active():
                main.log.debug("{} components not ACTIVE: \n{}".format(
                    ctrl.name, ctrl.CLI.sendline("scr:list | grep -v ACTIVE")))

        if not topoResult:
            main.cleanAndExit()
    def CASE7(self, main):
        """
        Check state after ONOS failure
        """
        import json
        assert main, "main not defined"
        assert utilities.assert_equals, "utilities.assert_equals not defined"
        main.case("Running ONOS Constant State Tests")

        # Assert that each device has a master
        main.HA.checkRoleNotNull()

        main.step("Check if switch roles are consistent across all nodes")
        ONOSMastership, rolesResult, consistentMastership = main.HA.checkTheRole(
        )
        ONOSMastership = ONOSMastership[0]
        description2 = "Compare switch roles from before failure"
        main.step(description2)

        currentJson = json.loads(ONOSMastership)
        oldJson = json.loads(mastershipState)
        mastershipCheck = main.TRUE
        for i in range(1, 29):
            switchDPID = str(main.Mininet1.getSwitchDPID(switch="s" + str(i)))

            current = [
                switch['master'] for switch in currentJson
                if switchDPID in switch['id']
            ]
            old = [
                switch['master'] for switch in oldJson
                if switchDPID in switch['id']
            ]
            if current == old:
                mastershipCheck = mastershipCheck and main.TRUE
            else:
                main.log.warn(
                    "Mastership of switch %s changed; old: %s, new: %s" %
                    (switchDPID, old, current))
                mastershipCheck = main.FALSE
        utilities.assert_equals(
            expect=main.TRUE,
            actual=mastershipCheck,
            onpass="******",
            onfail="Mastership of some switches changed")
        mastershipCheck = mastershipCheck and consistentMastership

        main.step("Get the intents and compare across all nodes")
        ONOSIntents = main.Cluster.runningNodes[0].CLI.intents(jsonFormat=True)
        intentCheck = main.FALSE
        if "Error" in ONOSIntents or not ONOSIntents:
            main.log.error("Error in getting ONOS intents")
            main.log.warn("ONOS1 intents response: " + repr(ONOSIntents))
        else:
            intentCheck = main.TRUE
        utilities.assert_equals(
            expect=main.TRUE,
            actual=intentCheck,
            onpass="******",
            onfail="ONOS nodes have different views of intents")
        # Print the intent states
        intents = []
        intents.append(ONOSIntents)
        intentStates = []
        for node in intents:  # Iter through ONOS nodes
            nodeStates = []
            # Iter through intents of a node
            for intent in json.loads(node):
                nodeStates.append(intent['state'])
            intentStates.append(nodeStates)
            out = [(i, nodeStates.count(i)) for i in set(nodeStates)]
            main.log.info(dict(out))

        # NOTE: Store has no durability, so intents are lost across system
        #       restarts
        main.step("Get the OF Table entries and compare to before " +
                  "component failure")
        FlowTables = main.TRUE
        for i in range(28):
            main.log.info("Checking flow table on s" + str(i + 1))
            tmpFlows = main.Mininet1.getFlowTable("s" + str(i + 1),
                                                  version="1.3",
                                                  debug=False)
            curSwitch = main.Mininet1.flowTableComp(flows[i], tmpFlows)
            FlowTables = FlowTables and curSwitch
            if curSwitch == main.FALSE:
                main.log.warn(
                    "Differences in flow table for switch: s{}".format(i + 1))
        utilities.assert_equals(
            expect=main.TRUE,
            actual=FlowTables,
            onpass="******",
            onfail="Changes were found in the flow tables")

        main.step("Leadership Election is still functional")
        # Test of LeadershipElection

        leader = main.Cluster.runningNodes[0].ipAddress
        leaderResult = main.TRUE
        for ctrl in main.Cluster.active():
            # loop through ONOScli handlers
            leaderN = ctrl.CLI.electionTestLeader()
            # verify leader is ONOS1
            # NOTE even though we restarted ONOS, it is the only one so onos 1
            # must be leader
            if leaderN == leader:
                # all is well
                pass
            elif leaderN == main.FALSE:
                # error in response
                main.log.error("Something is wrong with " +
                               "electionTestLeader function, check the" +
                               " error logs")
                leaderResult = main.FALSE
            elif leader != leaderN:
                leaderResult = main.FALSE
                main.log.error(ctrl.name + " sees " + str(leaderN) +
                               " as the leader of the election app. " +
                               "Leader should be " + str(leader))
        utilities.assert_equals(
            expect=main.TRUE,
            actual=leaderResult,
            onpass="******",
            onfail="Something went wrong with Leadership election")
    def CASE5(self, main):
        """
        Reading state of ONOS
        """
        import json
        assert main, "main not defined"
        assert utilities.assert_equals, "utilities.assert_equals not defined"

        main.case("Setting up and gathering data for current state")
        # The general idea for this test case is to pull the state of
        # ( intents,flows, topology,... ) from each ONOS node
        # We can then compare them with each other and also with past states

        main.step("Check that each switch has a master")
        global mastershipState
        mastershipState = '[]'

        # Assert that each device has a master
        main.HA.checkRoleNotNull()

        main.step("Get the Mastership of each switch")
        main.HA.checkTheRole()

        main.step("Get the intents from each controller")
        global intentState
        intentState = []
        ONOSIntents = main.Cluster.runningNodes[0].CLI.intents(jsonFormat=True)
        intentCheck = main.FALSE
        if "Error" in ONOSIntents or not ONOSIntents:
            main.log.error("Error in getting ONOS intents")
            main.log.warn("ONOS1 intents response: " + repr(ONOSIntents))
        else:
            intentCheck = main.TRUE

        main.step("Get the flows from each controller")
        global flowState
        flowState = []
        flowCheck = main.FALSE
        ONOSFlows = main.Cluster.runningNodes[0].CLI.flows(jsonFormat=True)
        if "Error" in ONOSFlows or not ONOSFlows:
            main.log.error("Error in getting ONOS flows")
            main.log.warn("ONOS1 flows repsponse: " + ONOSFlows)
        else:
            # TODO: Do a better check, maybe compare flows on switches?
            flowState = ONOSFlows
            flowCheck = main.TRUE

        main.step("Get the OF Table entries")
        global flows
        flows = []
        for i in range(1, 29):
            flows.append(
                main.Mininet1.getFlowTable("s" + str(i),
                                           version="1.3",
                                           debug=False))
        if flowCheck == main.FALSE:
            for table in flows:
                main.log.warn(table)
        # TODO: Compare switch flow tables with ONOS flow tables

        main.step("Collecting topology information from ONOS")
        devices = []
        devices.append(main.Cluster.runningNodes[0].CLI.devices())
        hosts = []
        hosts.append(json.loads(main.Cluster.runningNodes[0].CLI.hosts()))
        ports = []
        ports.append(main.Cluster.runningNodes[0].CLI.ports())
        links = []
        links.append(main.Cluster.runningNodes[0].CLI.links())
        clusters = []
        clusters.append(main.Cluster.runningNodes[0].CLI.clusters())

        main.step("Each host has an IP address")
        ipResult = main.TRUE
        for controller in range(0, len(hosts)):
            controllerStr = str(main.Cluster.active(controller))
            if hosts[controller]:
                for host in hosts[controller]:
                    if not host.get('ipAddresses', []):
                        main.log.error("Error with host ips on controller" +
                                       controllerStr + ": " + str(host))
                        ipResult = main.FALSE
        utilities.assert_equals(
            expect=main.TRUE,
            actual=ipResult,
            onpass="******",
            onfail="The ip of at least one host is missing")

        # there should always only be one cluster
        main.step("There is only one dataplane cluster")
        try:
            numClusters = len(json.loads(clusters[0]))
        except (ValueError, TypeError):
            main.log.exception("Error parsing clusters[0]: " +
                               repr(clusters[0]))
            numClusters = "ERROR"
        clusterResults = main.FALSE
        if numClusters == 1:
            clusterResults = main.TRUE
        utilities.assert_equals(expect=1,
                                actual=numClusters,
                                onpass="******",
                                onfail="ONOS shows " + str(numClusters) +
                                " SCCs")

        main.step("Comparing ONOS topology to MN")
        devicesResults = main.TRUE
        linksResults = main.TRUE
        hostsResults = main.TRUE
        mnSwitches = main.Mininet1.getSwitches()
        mnLinks = main.Mininet1.getLinks()
        mnHosts = main.Mininet1.getHosts()
        for controller in main.Cluster.getRunningPos():
            controllerStr = str(main.Cluster.active(controller))
            if devices[ controller ] and ports[ controller ] and\
                    "Error" not in devices[ controller ] and\
                    "Error" not in ports[ controller ]:
                currentDevicesResult = main.Mininet1.compareSwitches(
                    mnSwitches, json.loads(devices[controller]),
                    json.loads(ports[controller]))
            else:
                currentDevicesResult = main.FALSE
            utilities.assert_equals(
                expect=main.TRUE,
                actual=currentDevicesResult,
                onpass="******" + controllerStr + " Switches view is correct",
                onfail="ONOS" + controllerStr + " Switches view is incorrect")
            if links[controller] and "Error" not in links[controller]:
                currentLinksResult = main.Mininet1.compareLinks(
                    mnSwitches, mnLinks, json.loads(links[controller]))
            else:
                currentLinksResult = main.FALSE
            utilities.assert_equals(
                expect=main.TRUE,
                actual=currentLinksResult,
                onpass="******" + controllerStr + " links view is correct",
                onfail="ONOS" + controllerStr + " links view is incorrect")

            if hosts[controller] and "Error" not in hosts[controller]:
                currentHostsResult = main.Mininet1.compareHosts(
                    mnHosts, hosts[controller])
            else:
                currentHostsResult = main.FALSE
            utilities.assert_equals(
                expect=main.TRUE,
                actual=currentHostsResult,
                onpass="******" + controllerStr + " hosts exist in Mininet",
                onfail="ONOS" + controllerStr + " hosts don't match Mininet")

            devicesResults = devicesResults and currentDevicesResult
            linksResults = linksResults and currentLinksResult
            hostsResults = hostsResults and currentHostsResult

        main.step("Device information is correct")
        utilities.assert_equals(expect=main.TRUE,
                                actual=devicesResults,
                                onpass="******",
                                onfail="Device information is incorrect")

        main.step("Links are correct")
        utilities.assert_equals(expect=main.TRUE,
                                actual=linksResults,
                                onpass="******",
                                onfail="Links are incorrect")

        main.step("Hosts are correct")
        utilities.assert_equals(expect=main.TRUE,
                                actual=hostsResults,
                                onpass="******",
                                onfail="Hosts are incorrect")

        ONOSMastership, rolesResult, consistentMastership = main.HA.checkTheRole(
        )
        mastershipState = ONOSMastership[0]
示例#39
0
 def __str__(self):
     return repr(self)
示例#40
0
def playlist_handler(playlist_name, playlist_description, playlist_tracks):
    # skip empty and no-name playlists
    if not playlist_name: return
    if len(playlist_tracks) == 0: return

    # setup output files
    playlist_name = playlist_name.replace('/', '')
    open_log(os.path.join(output_dir,playlist_name+u'.log'))
    outfile = codecs.open(os.path.join(output_dir,playlist_name+u'.csv'),
        encoding='utf-8',mode='w')

    # keep track of stats
    stats = create_stats()
    export_skipped = 0
    # keep track of songids incase we need to skip duplicates
    song_ids = []

    log('')
    log('============================================================')
    log(u'Exporting '+ unicode(len(playlist_tracks)) +u' tracks from '
        +playlist_name)
    log('============================================================')

    # add the playlist description as a "comment"
    if playlist_description:
        outfile.write(tsep)
        outfile.write(playlist_description)
        outfile.write(os.linesep)

    for tnum, pl_track in enumerate(playlist_tracks):
        track = pl_track.get('track')

        # Check if we need to look up these tracks in the library
        if not track:
            library_track = [
                item for item in library if item.get('id')
                in pl_track.get('trackId')]
            if len(library_track) == 0:
                log(u'!! '+str(tnum+1)+repr(pl_track))
                export_skipped += 1
                continue
            track = library_track[0]

        result_details = create_result_details(track)

        if not allow_duplicates and result_details['songid'] in song_ids:
            log('{D} '+str(tnum+1)+'. '+create_details_string(result_details,True))
            export_skipped += 1
            continue

        # update the stats
        update_stats(track,stats)

        # export the track
        song_ids.append(result_details['songid'])
        outfile.write(create_details_string(result_details))
        outfile.write(os.linesep)

    # calculate the stats
    stats_results = calculate_stats_results(stats,len(playlist_tracks))

    # output the stats to the log
    log('')
    log_stats(stats_results)
    log(u'export skipped: '+unicode(export_skipped))

    # close the files
    close_log()
    outfile.close()
示例#41
0
    srcs = args.src.split(",")
    assert len(srcs) == 2


    sli = args.sli

    try:    
        a = A.load_("gensteps",srcs[0],args.tag,args.det)
        b = A.load_("gensteps",srcs[1],args.tag,args.det)
    except IOError as err:
        log.fatal(err)
        sys.exit(args.mrc)


    log.info("loaded a : %s %s " % (repr(a.shape),a.path))
    log.info("loaded b : %s %s " % (repr(b.shape),b.path))

    aa = a[sli]
    bb = b[sli]
    assert aa.shape[1:] == (6,4) and bb.shape[1:] == (6,4)

    log.info("sliced aa : %s " % (repr(aa.shape)))
    log.info("sliced bb : %s " % (repr(bb.shape)))


    cc = np.empty((len(aa)+len(bb),6,4), dtype=np.float32)
    cc[0:len(aa)] = aa
    cc[len(aa):len(aa)+len(bb)] = bb

    gsp = gspath_("natural", args.tag, args.det, gsbase=os.path.expanduser("~/opticksdata/gensteps"))
示例#42
0
 def __repr__(self):
     if self.parent is None:
         return '<Global Frame>'
     s = sorted(['{0}: {1}'.format(k, v) for k, v in self.bindings.items()])
     return '<{{{0}}} -> {1}>'.format(', '.join(s), repr(self.parent))
示例#43
0
    print('\n###\n', words_df.to_csv(), '\n', sep='')

    with open('words_dfs.txt', 'a') as outf:
        outf.write('\n#{}#{}#\n'.format(recording_name, recording_id))
        outf.write(words_df.to_csv())
        outf.write('\n#\n')

    # Add results for this recording to the Pandas data frame for all words
    if all_words_df is None:
        all_words_df = words_df
    else:
        all_words_df = pd.concat([all_words_df, words_df], axis=0)
    print("END Processing recording ID: %s" % recording_id)


# Display performance measures
perf_measures = transcription_benchmark.compute_performance_measures(all_words_df, all_jobs)


# Show details of word alignments
lattice_details = transcription_benchmark.get_lattice_details(all_words_df, all_jobs)


with open('performance_measures.txt', 'w+') as outf:
    outf.write(repr(perf_measures))
    outf.write('\n')
with open('lattice_details.txt', 'w+') as outf:
    outf.write(lattice_details.to_csv())
    outf.write('\n')

示例#44
0
 def logger_debug(*args):
     return printer(' '.join(isinstance(a, str) and a or repr(a)
                                  for a in args))
示例#45
0
#!C:\Users\brgey\PycharmProjects\finalFlatIronPrepProject\venv\Scripts\python.exe
# See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function

import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
    try:
        i = sys.argv.index("--" + mode)
        del sys.argv[i]
        break
    except ValueError:
        pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
    sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
    sys.exit(1)
elif mode == "2e-numeric":
    from f2py2e import main
elif mode == "2e-numarray":
    sys.argv.append("-DNUMARRAY")
    from f2py2e import main
elif mode == "2e-numpy":
    from numpy.f2py import main
else:
    sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
    sys.exit(1)
main()
示例#46
0
 def __repr__(self):
     return 'LambdaProcedure({0}, {1}, {2})'.format(
         repr(self.formals), repr(self.body), repr(self.env))
def connect_to_dremio_flight_server_endpoint(host, port, username, password, query,
                                             tls, certs, disable_server_verification, pat_or_auth_token,
                                             engine, session_properties):
    """
    Connects to Dremio Flight server endpoint with the provided credentials.
    It also runs the query and retrieves the result set.
    """
    try:
        # Default to use an unencrypted TCP connection.
        scheme = "grpc+tcp"
        connection_args = {}

        if tls:
            # Connect to the server endpoint with an encrypted TLS connection.
            print('[INFO] Enabling TLS connection')
            scheme = "grpc+tls"
            if certs:
                print('[INFO] Trusted certificates provided')
                # TLS certificates are provided in a list of connection arguments.
                with open(certs, "rb") as root_certs:
                    connection_args["tls_root_certs"] = root_certs.read()
            elif disable_server_verification:
                # Connect to the server endpoint with server verification disabled.
                print('[INFO] Disable TLS server verification.')
                connection_args['disable_server_verification'] = disable_server_verification
            else:
                print('[ERROR] Trusted certificates must be provided to establish a TLS connection')
                sys.exit()

        headers = session_properties
        if not headers:
            headers = []

        if engine:
            headers.append((b'routing_engine', engine.encode('utf-8')))

        # Two WLM settings can be provided upon initial authentication with the Dremio Server Flight Endpoint:
        # routing_tag
        # routing_queue
        headers.append((b'routing_tag', b'test-routing-tag'))
        headers.append((b'routing_queue', b'Low Cost User Queries'))

        client_cookie_middleware = CookieMiddlewareFactory()

        if pat_or_auth_token:
            client = flight.FlightClient("{}://{}:{}".format(scheme, host, port),
                                         middleware=[client_cookie_middleware], **connection_args)

            headers.append((b'authorization', "Bearer {}".format(pat_or_auth_token).encode('utf-8')))
            print('[INFO] Authentication skipped until first request')

        elif username and password:
            client_auth_middleware = DremioClientAuthMiddlewareFactory()
            client = flight.FlightClient("{}://{}:{}".format(scheme, host, port),
                                         middleware=[client_auth_middleware, client_cookie_middleware],
                                         **connection_args)

            # Authenticate with the server endpoint.
            bearer_token = client.authenticate_basic_token(username, password,
                                                           flight.FlightCallOptions(headers=headers))
            print('[INFO] Authentication was successful')
            headers.append(bearer_token)
        else:
            print('[ERROR] Username/password or PAT/Auth token must be supplied.')
            sys.exit()

        if query:
            # Construct FlightDescriptor for the query result set.
            flight_desc = flight.FlightDescriptor.for_command(query)
            print('[INFO] Query: ', query)

            # In addition to the bearer token, a query context can also
            # be provided as an entry of FlightCallOptions.
            # options = flight.FlightCallOptions(headers=[
            #     bearer_token,
            #     (b'schema', b'test.schema')
            # ])

            # Retrieve the schema of the result set.
            options = flight.FlightCallOptions(headers=headers)
            schema = client.get_schema(flight_desc, options)
            print('[INFO] GetSchema was successful')
            print('[INFO] Schema: ', schema)

            # Get the FlightInfo message to retrieve the Ticket corresponding
            # to the query result set.
            flight_info = client.get_flight_info(flight.FlightDescriptor.for_command(query), options)
            print('[INFO] GetFlightInfo was successful')
            print('[INFO] Ticket: ', flight_info.endpoints[0].ticket)

            # Retrieve the result set as a stream of Arrow record batches.
            reader = client.do_get(flight_info.endpoints[0].ticket, options)
            print('[INFO] Reading query results from Dremio')
            print(reader.read_pandas())

    except Exception as exception:
        print("[ERROR] Exception: {}".format(repr(exception)))
        raise
示例#48
0
    def process_keywords(self, provider, text):
        """ Processes the query payload from a provider's keyword definitions

        Args:
            provider (str): Provider ID
            text     (str): Keyword placeholders from definitions, ie. {title}

        Returns:
            str: Processed query keywords
        """
        keywords = self.read_keywords(text)
        replacing = get_setting("filter_quotes", bool)

        for keyword in keywords:
            keyword = keyword.lower()
            if 'title' in keyword:
                title = self.info["title"]
                language = definitions[provider]['language']
                use_language = None
                if ':' in keyword:
                    use_language = keyword.split(':')[1].lower()
                if provider not in self.language_exceptions and \
                   (use_language or self.kodi_language) and \
                   'titles' in self.info and self.info['titles']:
                    try:
                        if self.kodi_language and self.kodi_language in self.info['titles']:
                            use_language = self.kodi_language
                        if use_language not in self.info['titles']:
                            use_language = language
                            if 'original' in self.info['titles']:
                                title = self.info['titles']['original']
                        if use_language in self.info['titles'] and self.info['titles'][use_language]:
                            title = self.info['titles'][use_language]
                            title = normalize_string(title)
                            log.info("[%s] Using translated '%s' title %s" % (provider, use_language,
                                                                              repr(title)))
                            log.debug("[%s] Translated titles from da_inc: %s" % (provider, repr(self.info['titles'])))
                    except Exception as e:
                        import traceback
                        log.error("%s failed with: %s" % (provider, repr(e)))
                        map(log.debug, traceback.format_exc().split("\n"))
                text = text.replace('{%s}' % keyword, title)

            if 'year' in keyword:
                text = text.replace('{%s}' % keyword, str(self.info["year"]))

            if 'season' in keyword:
                if '+' in keyword:
                    keys = keyword.split('+')
                    season = str(self.info["season"] + get_int(keys[1]))
                elif ':' in keyword:
                    keys = keyword.split(':')
                    season = ('%%.%sd' % keys[1]) % self.info["season"]
                else:
                    season = '%s' % self.info["season"]
                text = text.replace('{%s}' % keyword, season)

            if 'episode' in keyword:
                if '+' in keyword:
                    keys = keyword.split('+')
                    episode = str(self.info["episode"] + get_int(keys[1]))
                elif ':' in keyword:
                    keys = keyword.split(':')
                    episode = ('%%.%sd' % keys[1]) % self.info["episode"]
                else:
                    episode = '%s' % self.info["episode"]
                text = text.replace('{%s}' % keyword, episode)

        if replacing:
            text = text.replace(u"'", '')

        return text
示例#49
0
	def __str__(self):
		return "VCFToken< key=" + repr(self.key) + ", params=" + repr(self.params) + ", values=" + repr(self.values) + " >"
示例#50
0
def indent(x):
	return ["\n    " + line for line in repr(x).splitlines()]
示例#51
0
 def __str__(self):
     return repr([self.v4, self.v6])
示例#52
0
 def __str__(self):
     """Represent the error."""
     return repr(self.value)
示例#53
0
 def __repr__(self) -> str:
     # NumPy starts the ndarray class name with "array", so we replace it
     # with our class name
     return f"{self.__class__.__name__}(\n      " + repr(
         self.view(np.ndarray))[6:]
示例#54
0
 def __repr_test_value__(self):
     return repr(numpy.array(theano.gof.op.get_test_value(self)))
示例#55
0
文件: rpmyum.py 项目: makhomed/fabrix
def _parse_packages(recursion_level, allow_empty_list_of_packages, *args):
    packages = set()
    for arg in args:
        if isinstance(arg, basestring):
            for line in arg.split('\n'):
                line = line.strip()
                if line == "":
                    continue
                packages.update(line.split())
        elif isinstance(arg, collections.Iterable):
            packages.update(_parse_packages(recursion_level + 1, True, *arg))
        else:
            caller = str(inspect.stack()[recursion_level + 1][3])
            fname = str(inspect.stack()[recursion_level + 2][1])
            nline = str(inspect.stack()[recursion_level + 2][2])
            abort('%s: unexpected object \'%s\' in list of packages in file %s line %s' % (caller, repr(arg), fname, nline))
    result = sorted(list(packages))
    if not result and not allow_empty_list_of_packages:
        caller = str(inspect.stack()[recursion_level + 1][3])
        fname = str(inspect.stack()[recursion_level + 2][1])
        nline = str(inspect.stack()[recursion_level + 2][2])
        abort('%s: unexpected empty list of packages in file %s line %s' % (caller, fname, nline))
    return result
示例#56
0
def hive(name=None):
    import params

    if name == 'hiveserver2':
        # HDP 2.1.* or lower
        if params.hdp_stack_version_major != "" and compare_versions(
                params.hdp_stack_version_major, "2.2.0.0") < 0:
            params.HdfsResource(params.webhcat_apps_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.webhcat_user,
                                mode=0755)

        # Create webhcat dirs.
        if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
            params.HdfsResource(params.hcat_hdfs_user_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hcat_user,
                                mode=params.hcat_hdfs_user_mode)

        params.HdfsResource(params.webhcat_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.webhcat_user,
                            mode=params.webhcat_hdfs_user_mode)

        # ****** Begin Copy Tarballs ******
        # *********************************
        # HDP 2.2 or higher, copy mapreduce.tar.gz to HDFS
        if params.hdp_stack_version_major != "" and compare_versions(
                params.hdp_stack_version_major, '2.2') >= 0:
            copy_to_hdfs("mapreduce",
                         params.user_group,
                         params.hdfs_user,
                         host_sys_prepped=params.host_sys_prepped)
            copy_to_hdfs("tez",
                         params.user_group,
                         params.hdfs_user,
                         host_sys_prepped=params.host_sys_prepped)

        # Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
        # This can use a different source and dest location to account for both HDP 2.1 and 2.2
        copy_to_hdfs("pig",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.pig_tar_source,
                     custom_dest_file=params.pig_tar_dest_file,
                     host_sys_prepped=params.host_sys_prepped)
        copy_to_hdfs("hive",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.hive_tar_source,
                     custom_dest_file=params.hive_tar_dest_file,
                     host_sys_prepped=params.host_sys_prepped)

        wildcard_tarballs = ["sqoop", "hadoop_streaming"]
        for tarball_name in wildcard_tarballs:
            source_file_pattern = eval("params." + tarball_name +
                                       "_tar_source")
            dest_dir = eval("params." + tarball_name + "_tar_dest_dir")

            if source_file_pattern is None or dest_dir is None:
                continue

            source_files = glob.glob(
                source_file_pattern) if "*" in source_file_pattern else [
                    source_file_pattern
                ]
            for source_file in source_files:
                src_filename = os.path.basename(source_file)
                dest_file = os.path.join(dest_dir, src_filename)

                copy_to_hdfs(tarball_name,
                             params.user_group,
                             params.hdfs_user,
                             file_mode=params.tarballs_mode,
                             custom_source_file=source_file,
                             custom_dest_file=dest_file,
                             host_sys_prepped=params.host_sys_prepped)
        # ******* End Copy Tarballs *******
        # *********************************

        # Create Hive Metastore Warehouse Dir
        params.HdfsResource(params.hive_apps_whs_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            mode=0777)

        # Create Hive User Dir
        params.HdfsResource(params.hive_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            mode=params.hive_hdfs_user_mode)

        if not is_empty(params.hive_exec_scratchdir) and not urlparse(
                params.hive_exec_scratchdir).path.startswith("/tmp"):
            params.HdfsResource(
                params.hive_exec_scratchdir,
                type="directory",
                action="create_on_execute",
                owner=params.hive_user,
                group=params.hdfs_user,
                mode=0777
            )  # Hive expects this dir to be writeable by everyone as it is used as a temp dir

        params.HdfsResource(None, action="execute")

    Directory(params.hive_etc_dir_prefix, mode=0755)

    # We should change configurations for client as well as for server.
    # The reason is that stale-configs are service-level, not component.
    for conf_dir in params.hive_conf_dirs_list:
        fill_conf_dir(conf_dir)

    XmlConfig(
        "hive-site.xml",
        conf_dir=params.hive_config_dir,
        configurations=params.hive_site_config,
        configuration_attributes=params.config['configuration_attributes']
        ['hive-site'],
        owner=params.hive_user,
        group=params.user_group,
        mode=0644)

    setup_atlas_hive()

    if params.hive_specific_configs_supported and name == 'hiveserver2':
        XmlConfig(
            "hiveserver2-site.xml",
            conf_dir=params.hive_server_conf_dir,
            configurations=params.config['configurations']['hiveserver2-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hiveserver2-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0644)

    File(format("{hive_config_dir}/hive-env.sh"),
         owner=params.hive_user,
         group=params.user_group,
         content=InlineTemplate(params.hive_env_sh_template))

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hive.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hive.conf.j2"))

    if (name == 'metastore'
            or name == 'hiveserver2') and not os.path.exists(params.target):
        jdbc_connector()

    File(
        format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
        content=DownloadSource(
            format("{jdk_location}{check_db_connection_jar_name}")),
        mode=0644,
    )

    if name == 'metastore':
        File(params.start_metastore_path,
             mode=0755,
             content=StaticFile('startMetastore.sh'))
        if params.init_metastore_schema:
            create_schema_cmd = format(
                "export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                "{hive_bin}/schematool -initSchema "
                "-dbType {hive_metastore_db_type} "
                "-userName {hive_metastore_user_name} "
                "-passWord {hive_metastore_user_passwd!p}")

            check_schema_created_cmd = as_user(
                format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                       "{hive_bin}/schematool -info "
                       "-dbType {hive_metastore_db_type} "
                       "-userName {hive_metastore_user_name} "
                       "-passWord {hive_metastore_user_passwd!p}"),
                params.hive_user)

            # HACK: in cases with quoted passwords and as_user (which does the quoting as well) !p won't work for hiding passwords.
            # Fixing it with the hack below:
            quoted_hive_metastore_user_passwd = quote_bash_args(
                quote_bash_args(params.hive_metastore_user_passwd))
            if quoted_hive_metastore_user_passwd[0] == "'" and quoted_hive_metastore_user_passwd[-1] == "'" \
                or quoted_hive_metastore_user_passwd[0] == '"' and quoted_hive_metastore_user_passwd[-1] == '"':
                quoted_hive_metastore_user_passwd = quoted_hive_metastore_user_passwd[
                    1:-1]
            Logger.sensitive_strings[repr(check_schema_created_cmd)] = repr(
                check_schema_created_cmd.replace(
                    format("-passWord {quoted_hive_metastore_user_passwd}"),
                    "-passWord " + utils.PASSWORDS_HIDE_STRING))

            Execute(create_schema_cmd,
                    not_if=check_schema_created_cmd,
                    user=params.hive_user)
    elif name == 'hiveserver2':
        File(params.start_hiveserver2_path,
             mode=0755,
             content=Template(format('{start_hiveserver2_script}')))

    if name != "client":
        crt_directory(params.hive_pid_dir)
        crt_directory(params.hive_log_dir)
        crt_directory(params.hive_var_lib)
示例#57
0
def run():
    if not hasattr(tcp.Client, 'abortConnection'):
        print "Twisted doesn't have abortConnection! Upgrade to a newer version of Twisted to avoid memory leaks!"
        print 'Pausing for 3 seconds...'
        time.sleep(3)
    
    realnets = dict((name, net) for name, net in networks.nets.iteritems() if '_testnet' not in name)
    
    parser = fixargparse.FixedArgumentParser(description='p2pool (version %s)' % (p2pool.__version__,), fromfile_prefix_chars='@')
    parser.add_argument('--version', action='version', version=p2pool.__version__)
    parser.add_argument('--net',
        help='use specified network (default: help)',
        action='store', choices=sorted(realnets), default='help', dest='net_name')
    parser.add_argument('--testnet',
        help='''use the network's testnet''',
        action='store_const', const=True, default=False, dest='testnet')
    parser.add_argument('--debug',
        help='enable debugging mode',
        action='store_const', const=True, default=False, dest='debug')
    parser.add_argument('-a', '--address',
        help='generate payouts to this address (default: <address requested from helpd>), or (dynamic)',
        type=str, action='store', default=None, dest='address')
    parser.add_argument('-i', '--numaddresses',
        help='number of help auto-generated addresses to maintain for getwork dynamic address allocation',
        type=int, action='store', default=2, dest='numaddresses')
    parser.add_argument('-t', '--timeaddresses',
        help='seconds between acquisition of new address and removal of single old (default: 2 days or 172800s)',
        type=int, action='store', default=172800, dest='timeaddresses')
    parser.add_argument('--datadir',
        help='store data in this directory (default: <directory run_p2pool.py is in>/data)',
        type=str, action='store', default=None, dest='datadir')
    parser.add_argument('--logfile',
        help='''log to this file (default: data/<NET>/log)''',
        type=str, action='store', default=None, dest='logfile')
    parser.add_argument('--web-static',
        help='use an alternative web frontend in this directory (otherwise use the built-in frontend)',
        type=str, action='store', default=None, dest='web_static')
    parser.add_argument('--merged',
        help='call getauxblock on this url to get work for merged mining (example: http://ncuser:[email protected]:10332/)',
        type=str, action='append', default=[], dest='merged_urls')
    parser.add_argument('--give-author', metavar='DONATION_PERCENTAGE',
        help='donate this percentage of work towards the development of p2pool (default: 1.0)',
        type=float, action='store', default=1.0, dest='donation_percentage')
    parser.add_argument('--iocp',
        help='use Windows IOCP API in order to avoid errors due to large number of sockets being open',
        action='store_true', default=False, dest='iocp')
    parser.add_argument('--irc-announce',
        help='announce any blocks found on irc://irc.freenode.net/#p2pool',
        action='store_true', default=False, dest='irc_announce')
    parser.add_argument('--no-bugreport',
        help='disable submitting caught exceptions to the author',
        action='store_true', default=False, dest='no_bugreport')
    
    p2pool_group = parser.add_argument_group('p2pool interface')
    p2pool_group.add_argument('--p2pool-port', metavar='PORT',
        help='use port PORT to listen for connections (forward this port from your router!) (default: %s)' % ', '.join('%s:%i' % (name, net.P2P_PORT) for name, net in sorted(realnets.items())),
        type=int, action='store', default=None, dest='p2pool_port')
    p2pool_group.add_argument('-n', '--p2pool-node', metavar='ADDR[:PORT]',
        help='connect to existing p2pool node at ADDR listening on port PORT (defaults to default p2pool P2P port) in addition to builtin addresses',
        type=str, action='append', default=[], dest='p2pool_nodes')
    parser.add_argument('--disable-upnp',
        help='''don't attempt to use UPnP to forward p2pool's P2P port from the Internet to this computer''',
        action='store_false', default=True, dest='upnp')
    p2pool_group.add_argument('--max-conns', metavar='CONNS',
        help='maximum incoming connections (default: 40)',
        type=int, action='store', default=40, dest='p2pool_conns')
    p2pool_group.add_argument('--outgoing-conns', metavar='CONNS',
        help='outgoing connections (default: 6)',
        type=int, action='store', default=6, dest='p2pool_outgoing_conns')
    p2pool_group.add_argument('--external-ip', metavar='ADDR[:PORT]',
        help='specify your own public IP address instead of asking peers to discover it, useful for running dual WAN or asymmetric routing',
        type=str, action='store', default=None, dest='p2pool_external_ip')
    parser.add_argument('--disable-advertise',
        help='''don't advertise local IP address as being available for incoming connections. useful for running a dark node, along with multiple -n ADDR's and --outgoing-conns 0''',
        action='store_false', default=True, dest='advertise_ip')
    
    worker_group = parser.add_argument_group('worker interface')
    worker_group.add_argument('-w', '--worker-port', metavar='PORT or ADDR:PORT',
        help='listen on PORT on interface with ADDR for RPC connections from miners (default: all interfaces, %s)' % ', '.join('%s:%i' % (name, net.WORKER_PORT) for name, net in sorted(realnets.items())),
        type=str, action='store', default=None, dest='worker_endpoint')
    worker_group.add_argument('-f', '--fee', metavar='FEE_PERCENTAGE',
        help='''charge workers mining to their own help address (by setting their miner's username to a help address) this percentage fee to mine on your p2pool instance. Amount displayed at http://127.0.0.1:WORKER_PORT/fee (default: 0)''',
        type=float, action='store', default=0, dest='worker_fee')
    
    helpd_group = parser.add_argument_group('helpd interface')
    helpd_group.add_argument('--helpd-config-path', metavar='HELPD_CONFIG_PATH',
        help='custom configuration file path (when helpd -conf option used)',
        type=str, action='store', default=None, dest='helpd_config_path')
    helpd_group.add_argument('--helpd-address', metavar='HELPD_ADDRESS',
        help='connect to this address (default: 127.0.0.1)',
        type=str, action='store', default='127.0.0.1', dest='helpd_address')
    helpd_group.add_argument('--helpd-rpc-port', metavar='HELPD_RPC_PORT',
        help='''connect to JSON-RPC interface at this port (default: %s <read from help.conf if password not provided>)''' % ', '.join('%s:%i' % (name, net.PARENT.RPC_PORT) for name, net in sorted(realnets.items())),
        type=int, action='store', default=None, dest='helpd_rpc_port')
    helpd_group.add_argument('--helpd-rpc-ssl',
        help='connect to JSON-RPC interface using SSL',
        action='store_true', default=False, dest='helpd_rpc_ssl')
    helpd_group.add_argument('--helpd-p2p-port', metavar='HELPD_P2P_PORT',
        help='''connect to P2P interface at this port (default: %s <read from help.conf if password not provided>)''' % ', '.join('%s:%i' % (name, net.PARENT.P2P_PORT) for name, net in sorted(realnets.items())),
        type=int, action='store', default=None, dest='helpd_p2p_port')
    
    helpd_group.add_argument(metavar='HELPD_RPCUSERPASS',
        help='helpd RPC interface username, then password, space-separated (only one being provided will cause the username to default to being empty, and none will cause P2Pool to read them from help.conf)',
        type=str, action='store', default=[], nargs='*', dest='helpd_rpc_userpass')
    
    args = parser.parse_args()
    
    if args.debug:
        p2pool.DEBUG = True
        defer.setDebugging(True)
    else:
        p2pool.DEBUG = False
    
    net_name = args.net_name + ('_testnet' if args.testnet else '')
    net = networks.nets[net_name]
    
    datadir_path = os.path.join((os.path.join(os.path.dirname(sys.argv[0]), 'data') if args.datadir is None else args.datadir), net_name)
    if not os.path.exists(datadir_path):
        os.makedirs(datadir_path)
    
    if len(args.helpd_rpc_userpass) > 2:
        parser.error('a maximum of two arguments are allowed')
    args.helpd_rpc_username, args.helpd_rpc_password = ([None, None] + args.helpd_rpc_userpass)[-2:]
    
    if args.helpd_rpc_password is None:
        conf_path = args.helpd_config_path or net.PARENT.CONF_FILE_FUNC()
        if not os.path.exists(conf_path):
            parser.error('''help configuration file not found. Manually enter your RPC password.\r\n'''
                '''If you actually haven't created a configuration file, you should create one at %s with the text:\r\n'''
                '''\r\n'''
                '''server=1\r\n'''
                '''rpcpassword=%x\r\n'''
                '''\r\n'''
                '''Keep that password secret! After creating the file, restart help.''' % (conf_path, random.randrange(2**128)))
        conf = open(conf_path, 'rb').read()
        contents = {}
        for line in conf.splitlines(True):
            if '#' in line:
                line = line[:line.index('#')]
            if '=' not in line:
                continue
            k, v = line.split('=', 1)
            contents[k.strip()] = v.strip()
        for conf_name, var_name, var_type in [
            ('rpcuser', 'helpd_rpc_username', str),
            ('rpcpassword', 'helpd_rpc_password', str),
            ('rpcport', 'helpd_rpc_port', int),
            ('port', 'helpd_p2p_port', int),
        ]:
            if getattr(args, var_name) is None and conf_name in contents:
                setattr(args, var_name, var_type(contents[conf_name]))
        if 'rpcssl' in contents and contents['rpcssl'] != '0':
                args.helpd_rpc_ssl = True
        if args.helpd_rpc_password is None:
            parser.error('''help configuration file didn't contain an rpcpassword= line! Add one!''')
    
    if args.helpd_rpc_username is None:
        args.helpd_rpc_username = ''
    
    if args.helpd_rpc_port is None:
        args.helpd_rpc_port = net.PARENT.RPC_PORT
    
    if args.helpd_p2p_port is None:
        args.helpd_p2p_port = net.PARENT.P2P_PORT
    
    if args.p2pool_port is None:
        args.p2pool_port = net.P2P_PORT
    
    if args.p2pool_outgoing_conns > 10:
        parser.error('''--outgoing-conns can't be more than 10''')
    
    if args.worker_endpoint is None:
        worker_endpoint = '', net.WORKER_PORT
    elif ':' not in args.worker_endpoint:
        worker_endpoint = '', int(args.worker_endpoint)
    else:
        addr, port = args.worker_endpoint.rsplit(':', 1)
        worker_endpoint = addr, int(port)
    
    if args.address is not None and args.address != 'dynamic':
        try:
            args.pubkey_hash = help_data.address_to_pubkey_hash(args.address, net.PARENT)
        except Exception as e:
            parser.error('error parsing address: ' + repr(e))
    else:
        args.pubkey_hash = None
    
    def separate_url(url):
        s = urlparse.urlsplit(url)
        if '@' not in s.netloc:
            parser.error('merged url netloc must contain an "@"')
        userpass, new_netloc = s.netloc.rsplit('@', 1)
        return urlparse.urlunsplit(s._replace(netloc=new_netloc)), userpass
    merged_urls = map(separate_url, args.merged_urls)
    
    if args.logfile is None:
        args.logfile = os.path.join(datadir_path, 'log')
    
    logfile = logging.LogFile(args.logfile)
    pipe = logging.TimestampingPipe(logging.TeePipe([logging.EncodeReplacerPipe(sys.stderr), logfile]))
    sys.stdout = logging.AbortPipe(pipe)
    sys.stderr = log.DefaultObserver.stderr = logging.AbortPipe(logging.PrefixPipe(pipe, '> '))
    if hasattr(signal, "SIGUSR1"):
        def sigusr1(signum, frame):
            print 'Caught SIGUSR1, closing %r...' % (args.logfile,)
            logfile.reopen()
            print '...and reopened %r after catching SIGUSR1.' % (args.logfile,)
        signal.signal(signal.SIGUSR1, sigusr1)
    deferral.RobustLoopingCall(logfile.reopen).start(5)
    
    class ErrorReporter(object):
        def __init__(self):
            self.last_sent = None
        
        def emit(self, eventDict):
            if not eventDict["isError"]:
                return
            
            if self.last_sent is not None and time.time() < self.last_sent + 5:
                return
            self.last_sent = time.time()
            
            if 'failure' in eventDict:
                text = ((eventDict.get('why') or 'Unhandled Error')
                    + '\n' + eventDict['failure'].getTraceback())
            else:
                text = " ".join([str(m) for m in eventDict["message"]]) + "\n"
            
            from twisted.web import client
            client.getPage(
                url='http://u.forre.st/p2pool_error.cgi',
                method='POST',
                postdata=p2pool.__version__ + ' ' + net.NAME + '\n' + text,
                timeout=15,
            ).addBoth(lambda x: None)
    if not args.no_bugreport:
        log.addObserver(ErrorReporter().emit)
    
    reactor.callWhenRunning(main, args, net, datadir_path, merged_urls, worker_endpoint)
    reactor.run()
示例#58
0
def test(got, expected):
    if got == expected:
        prefix = ' OK '
    else:
        prefix = '  X '
    print('%s got: %s \nexpected: %s\n' % (prefix, repr(got), repr(expected)))
示例#59
0
 def __repr__(self):
     if self:
         return "{}-{}".format(self.val, repr(self.next))
示例#60
0
 def lineReceived(self, line):
     if p2pool.DEBUG:
         print repr(line)
     irc.IRCClient.lineReceived(self, line)