Example #1
1
    def test_syntactic(self):
        sentences = list(SyntacticExtractor(
            [{'bio': u'this is part a1, and this is part a2', 'url': 'www.example.org', 'name': 'abc def'}],
            'bio', 'sentences', 'en', {'be': ['is', 'are']}, self.match_base_form
        ).extract(1))

        self.assertEqual(len(sentences), 2)

        missing_parts = {'a1', 'a2'}
        for sentence in sentences:
            self.assertIn('url', sentence)
            self.assertIn('text', sentence)
            self.assertIn('lu', sentence)
            self.assertEqual(sentence['lu'], 'be')

            for p in missing_parts:
                if p in sentence['text']:
                    self.assertEqual(sentence['text'], 'this is part ' + p)
                    missing_parts.remove(p)
                    break
            else:
                self.fail('Extracted unexpected sentence: %s' % repr(sentence))

        if missing_parts:
            self.fail('Did not find parts: %s' % repr(missing_parts))
Example #2
1
    def test_add_filter(self):
        self.assertEqual(len(self.bsq.query_filter), 0)

        self.bsq.add_filter(SQ(foo="bar"))
        self.assertEqual(len(self.bsq.query_filter), 1)

        self.bsq.add_filter(SQ(foo__lt="10"))

        self.bsq.add_filter(~SQ(claris="moof"))

        self.bsq.add_filter(SQ(claris="moof"), use_or=True)

        self.assertEqual(
            repr(self.bsq.query_filter),
            "<SQ: OR ((foo__content=bar AND foo__lt=10 AND NOT (claris__content=moof)) OR claris__content=moof)>",
        )

        self.bsq.add_filter(SQ(claris="moof"))

        self.assertEqual(
            repr(self.bsq.query_filter),
            "<SQ: AND (((foo__content=bar AND foo__lt=10 AND NOT (claris__content=moof)) OR claris__content=moof) AND claris__content=moof)>",
        )

        self.bsq.add_filter(SQ(claris="wtf mate"))

        self.assertEqual(
            repr(self.bsq.query_filter),
            "<SQ: AND (((foo__content=bar AND foo__lt=10 AND NOT (claris__content=moof)) OR claris__content=moof) AND claris__content=moof AND claris__content=wtf mate)>",
        )
Example #3
0
def iterate_node(arg):
    ctx, address, backend_id, ranges = arg
    elog = elliptics.Logger(ctx.log_file, int(ctx.log_level))
    stats = ctx.stats["iterate"][str(address)][str(backend_id)]
    stats.timer('process', 'started')
    log.info("Running iterator on node: {0}/{1}".format(address, backend_id))
    log.debug("Ranges:")
    for range in ranges:
        log.debug(repr(range))
    stats.timer('process', 'iterate')

    node_id = ctx.routes.get_address_backend_route_id(address, backend_id)

    node = elliptics_create_node(address=address,
                                 elog=elog,
                                 wait_timeout=ctx.wait_timeout,
                                 net_thread_num=4,
                                 io_thread_num=1)

    try:
        flags = elliptics.iterator_flags.key_range
        timestamp_range = ctx.timestamp.to_etime(), Time.time_max().to_etime()
        if ctx.no_meta:
            flags |= elliptics.iterator_flags.no_meta
        else:
            flags |= elliptics.iterator_flags.ts_range

        log.debug("Running iterator on node: {0}/{1}".format(address, backend_id))
        results, results_len = Iterator.iterate_with_stats(
            node=node,
            eid=node_id,
            timestamp_range=timestamp_range,
            key_ranges=ranges,
            tmp_dir=ctx.tmp_dir,
            address=address,
            backend_id=backend_id,
            group_id=node_id.group_id,
            batch_size=ctx.batch_size,
            stats=stats,
            flags=flags,
            leave_file=True,
            separately=True)
        if results is None or results_len == 0:
            return None

    except Exception as e:
        log.error("Iteration failed for node {0}/{1}: {2}, traceback: {3}"
                  .format(address, backend_id, repr(e), traceback.format_exc()))
        return None

    log.debug("Iterator for node {0}/{1} obtained: {2} record(s)"
              .format(address, backend_id, results_len))

    stats.timer('process', 'sort')
    for range_id in results:
        results[range_id].sort()

    stats.timer('process', 'finished')
    return [(range_id, container.filename, container.address, container.backend_id, container.group_id)
            for range_id, container in results.items()]
Example #4
0
 def _writer_coro(self):
     self.logger.debug("Starting writer coro")
     while self._running:
         try:
             self._writer_ready.set()
             packet = yield from asyncio.wait_for(self.outgoing_queue.get(), 5)
             yield from packet.to_stream(self.session.writer)
             self.logger.debug(" -out-> " + repr(packet))
             yield from self.session.writer.drain()
             self.packet_sent.send(packet)
         except asyncio.TimeoutError as ce:
             self.logger.debug("Output queue get timeout")
         except Exception as e:
             self.logger.warn("Unhandled exception in writer coro: %s" % e)
             break
     self.logger.debug("Writer coro stopping")
     # Flush queue before stopping
     if not self.outgoing_queue.empty():
         while True:
             try:
                 packet = self.outgoing_queue.get_nowait()
                 yield from packet.to_stream(self.session.writer)
                 self.logger.debug(" -out-> " + repr(packet))
             except asyncio.QueueEmpty:
                 break
             except Exception as e:
                 self.logger.warn("Unhandled exception in writer coro: %s" % e)
     self.logger.debug("Writer coro stopped")
Example #5
0
    def _http_request(self, method, url, headers, body,
                      ignore_result_body=False):
        """Perform an HTTP request against the server.

        method: the HTTP method to use
        url: the URL to request (not including server portion)
        headers: headers for the request
        body: body to send with the request
        ignore_result_body: the body of the result will be ignored

        Returns: a http_client response object
        """
        if self.auth_token:
            headers.setdefault('x-auth-token', self.auth_token)

        LOG.debug('Request: %(method)s http://%(server)s:%(port)s'
                  '%(url)s with headers %(headers)s',
                  {'method': method,
                   'server': self.conn.host,
                   'port': self.conn.port,
                   'url': url,
                   'headers': repr(headers)})
        self.conn.request(method, url, body, headers)

        response = self.conn.getresponse()
        headers = self._header_list_to_dict(response.getheaders())
        code = response.status
        code_description = http_client.responses[code]
        LOG.debug('Response: %(code)s %(status)s %(headers)s',
                  {'code': code,
                   'status': code_description,
                   'headers': repr(headers)})

        if code == 400:
            raise exc.HTTPBadRequest(
                explanation=response.read())

        if code == 500:
            raise exc.HTTPInternalServerError(
                explanation=response.read())

        if code == 401:
            raise exc.HTTPUnauthorized(
                explanation=response.read())

        if code == 403:
            raise exc.HTTPForbidden(
                explanation=response.read())

        if code == 409:
            raise exc.HTTPConflict(
                explanation=response.read())

        if ignore_result_body:
            # NOTE: because we are pipelining requests through a single HTTP
            # connection, http_client requires that we read the response body
            # before we can make another request. If the caller knows they
            # don't care about the body, they can ask us to do that for them.
            response.read()
        return response
Example #6
0
def test_repr():
    d = da.ones((4, 4), chunks=(2, 2))
    assert d.name[:5] in repr(d)
    assert str(d.shape) in repr(d)
    assert str(d._dtype) in repr(d)
    d = da.ones((4000, 4), chunks=(4, 2))
    assert len(str(d)) < 1000
Example #7
0
def test_body():
    """Check code blocks."""
    body_tuple = (ast.IntLiteral(10), ast.IntLiteral(20))
    body = ast.Body(body_tuple)
    assert body['statements'] is body_tuple
    assert str(body) == '  10\n  20'
    assert repr(body) == 'Body({body_tuple})'.format(body_tuple=repr(body_tuple))
Example #8
0
    def gexpect(self, tag):
        log.debug('GAME_EXPECT: %s', repr(tag))
        l = self.gdqueue
        e = self.gdevent
        while True:
            for i in xrange(len(l)):
                d = l.popleft()
                if isinstance(d, EndpointDied):
                    raise d

                elif d[0] == tag:
                    log.debug('GAME_READ: %s', repr(d))
                    self.usergdhistory.append((self.player_index, d[0], d[1]))
                    return d[1]

                else:
                    d.scan_count += 1
                    if d.scan_count >= 15:
                        log.debug('Dropped gamedata: %s' % d)
                    else:
                        log.debug('GAME_DATA_MISS: %s', repr(d))
                        log.debug('EXPECTS: %s' % tag)
                        l.append(d)
            e.clear()
            e.wait()
def test_ViewInventory___deepcopy___01():
    
    inventory_1 = idetools.ViewInventory()
    inventory_2 = copy.deepcopy(inventory_1)

    assert inventory_1 == inventory_2
    assert repr(inventory_1) == repr(inventory_2)
Example #10
0
    def test_categorical_series_repr_ordered(self):
        s = Series(Categorical([1, 2, 3], ordered=True))
        exp = """0    1
1    2
2    3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""

        assert repr(s) == exp

        s = Series(Categorical(np.arange(10), ordered=True))
        exp = """0    0
1    1
2    2
3    3
4    4
5    5
6    6
7    7
8    8
9    9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""

        assert repr(s) == exp
Example #11
0
    def test_categorical_series_repr(self):
        s = Series(Categorical([1, 2, 3]))
        exp = """0    1
1    2
2    3
dtype: category
Categories (3, int64): [1, 2, 3]"""

        assert repr(s) == exp

        s = Series(Categorical(np.arange(10)))
        exp = """0    0
1    1
2    2
3    3
4    4
5    5
6    6
7    7
8    8
9    9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""

        assert repr(s) == exp
Example #12
0
    def test_categorical_repr_unicode(self):
        # GH#21002 if len(index) > 60, sys.getdefaultencoding()=='ascii',
        # and we are working in PY2, then rendering a Categorical could raise
        # UnicodeDecodeError by trying to decode when it shouldn't

        class County(StringMixin):
            name = u'San Sebastián'
            state = u'PR'

            def __unicode__(self):
                return self.name + u', ' + self.state

        cat = pd.Categorical([County() for n in range(61)])
        idx = pd.Index(cat)
        ser = idx.to_series()

        if compat.PY3:
            # no reloading of sys, just check that the default (utf8) works
            # as expected
            repr(ser)
            str(ser)

        else:
            # set sys.defaultencoding to ascii, then change it back after
            # the test
            with tm.set_defaultencoding('ascii'):
                repr(ser)
                str(ser)
Example #13
0
    def test_various_ops(self):
        # This takes about n/3 seconds to run (about n/3 clumps of tasks,
        # times about 1 second per clump).
        NUMTASKS = 10

        # no more than 3 of the 10 can run at once
        sema = threading.BoundedSemaphore(value=3)
        mutex = threading.RLock()
        numrunning = Counter()

        threads = []

        for i in range(NUMTASKS):
            t = TestThread("<thread %d>" % i, self, sema, mutex, numrunning)
            threads.append(t)
            self.assertEqual(t.ident, None)
            self.assertTrue(re.match("<TestThread\(.*, initial\)>", repr(t)))
            t.start()

        if verbose:
            print("waiting for all tasks to complete")
        for t in threads:
            t.join(NUMTASKS)
            self.assertTrue(not t.is_alive())
            self.assertNotEqual(t.ident, 0)
            self.assertFalse(t.ident is None)
            self.assertTrue(re.match("<TestThread\(.*, stopped -?\d+\)>", repr(t)))
        if verbose:
            print("all tasks done")
        self.assertEqual(numrunning.get(), 0)
Example #14
0
    def edit_tagged_sel(self, tag_sel_idx, *, note=None):
        tagged_sel = self.store[tag_sel_idx]
        old_note = tagged_sel.note
        tagged_sel.note = note
        print("Updated note: %s -> %s" % (repr(old_note), repr(note)))

        self.store.save()
Example #15
0
 def __repr__(self):
     r = u"[%s ->\n %s\n source=%s strength=%.2f votes=%d)]" % (
         repr(self.input).decode("utf8"),
         repr(self.output).decode("utf8"),
         self.data_source.name, self.strength, self.votes
     )
     return r.encode("utf8")
Example #16
0
    def _filter_ids_helper(cls, df_or_series, ids, ids_to_keep):
        # `ids_to_keep` can be any iterable, so turn it into a list so that it
        # can be iterated over multiple times below (and length-checked).
        ids_to_keep = list(ids_to_keep)

        if len(ids_to_keep) == 0:
            raise ValueError("`ids_to_keep` must contain at least one ID.")

        duplicates = find_duplicates(ids_to_keep)
        if duplicates:
            raise ValueError(
                "`ids_to_keep` must contain unique IDs. The following IDs are "
                "duplicated: %s" %
                (', '.join(repr(e) for e in sorted(duplicates))))

        ids_to_keep = set(ids_to_keep)
        missing_ids = ids_to_keep - ids
        if missing_ids:
            raise ValueError(
                "The following IDs are not present in the metadata: %s"
                % (', '.join(repr(e) for e in sorted(missing_ids))))

        # While preserving order, get rid of any IDs not contained in
        # `ids_to_keep`.
        ids_to_discard = ids - ids_to_keep
        return df_or_series.drop(labels=ids_to_discard, axis='index',
                                 inplace=False, errors='raise')
    def runTest(self):
        self.setup_test()
        self.log_entries = self.c.run_command_ignore_fail("cat /sys/firmware/opal/msglog |  grep 'PHB#' | grep -i  ' C:'")
        failed_eplist = []
        failed_slotlist = []
        failed_swuplist = []
        match_list = ["[EP  ]", "[LGCY]", "[PCID]", "[ETOX]" ]

        for entry in self.log_entries:
            if entry == '':
                continue

            matchObj = re.match(r"(.*) PHB#(.*) \[(.*)", entry)
            if matchObj:
                bdfn = matchObj.group(2)
            else:
                log.debug(entry)
                bdfn = entry

            ep_present = False
            # Check for a end point PCI device, it should have LOC_CODE label
            for string in match_list:
                if string in entry:
                    ep_present = True
                    if "LOC_CODE" in entry:
                        log.debug("Location code found for entry %s" % bdfn)
                    else:
                        failed_eplist.append(bdfn)
                    break
            else:
                ep_present = False

            if ep_present:
                continue

            if "[SWUP]" in entry:
                if "LOC_CODE" in entry:
                    log.debug("Entry %s has LOC_CODE".format(bdfn))
                    continue
                if "SLOT" in entry:
                    log.debug("Entry %s has SLOT".format(bdfn))
                    continue
                failed_swuplist.append(bdfn)

            # If it is a pcie slot check for SLOT entry
            if "SLOT" in entry:
                log.debug("Entry %s has the slot label" % bdfn)
            else:
                failed_slotlist.append(bdfn)

        log.debug(repr(failed_eplist))
        log.debug(repr(failed_slotlist))
        log.debug(repr(failed_swuplist))
        if (len(failed_slotlist) == 0) and (len(failed_eplist) == 0):
            return
        failed_eplist = '\n'.join(filter(None, failed_eplist))
        failed_slotlist = '\n'.join(filter(None, failed_slotlist))
        failed_swuplist = '\n'.join(filter(None, failed_swuplist))
        message = "SLOT Label failures: %s\n LOC_CODE failures:%s\nSWUP failures:%s\n" % (failed_slotlist, failed_eplist, failed_swuplist)
        self.assertTrue(False, message)
Example #18
0
    def test_categorical_series_repr_datetime_ordered(self):
        idx = date_range('2011-01-01 09:00', freq='H', periods=5)
        s = Series(Categorical(idx, ordered=True))
        exp = """0   2011-01-01 09:00:00
1   2011-01-01 10:00:00
2   2011-01-01 11:00:00
3   2011-01-01 12:00:00
4   2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
                                 2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""  # noqa

        assert repr(s) == exp

        idx = date_range('2011-01-01 09:00', freq='H', periods=5,
                         tz='US/Eastern')
        s = Series(Categorical(idx, ordered=True))
        exp = """0   2011-01-01 09:00:00-05:00
1   2011-01-01 10:00:00-05:00
2   2011-01-01 11:00:00-05:00
3   2011-01-01 12:00:00-05:00
4   2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
                                             2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
                                             2011-01-01 13:00:00-05:00]"""  # noqa

        assert repr(s) == exp
Example #19
0
 def __str__(self, nesting=0, indent=''):
     attrs = []
     indentation = indent + "    " * nesting
     for k, v in self.__dict__.items():
         if not k.startswith("_"):
             text = [indentation, k, ":"]
             if isinstance(v, Container):
                 if len(v) > 0:
                     text.append('\n')
                 text.append(v.__str__(nesting + 1))
             elif isinstance(v, list):
                 if len(v) == 0:
                     text.append(' []')
                 else:
                     for v_ in v:
                         text.append('\n' + indentation + "-")
                         if isinstance(v_, Container):
                             text.append('\n' + v_.__str__(nesting + 1))
                         else:
                             text.append(" " + repr(v_))
             else:
                 text.append(' ' + repr(v))
             attrs.append("".join(text))
     attrs.sort()
     return "\n".join(attrs)
Example #20
0
    def test_categorical_series_repr_period_ordered(self):
        idx = period_range('2011-01-01 09:00', freq='H', periods=5)
        s = Series(Categorical(idx, ordered=True))
        exp = """0   2011-01-01 09:00
1   2011-01-01 10:00
2   2011-01-01 11:00
3   2011-01-01 12:00
4   2011-01-01 13:00
dtype: category
Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
                            2011-01-01 13:00]"""  # noqa

        assert repr(s) == exp

        idx = period_range('2011-01', freq='M', periods=5)
        s = Series(Categorical(idx, ordered=True))
        exp = """0   2011-01
1   2011-02
2   2011-03
3   2011-04
4   2011-05
dtype: category
Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""

        assert repr(s) == exp
 def __repr__(self):
     if (self.value is not None):
         return "Huffman_Node(value=%s)" % (repr(self.value))
     else:
         return "Huffman_Node(bit_0=%s, bit_1=%s)" % \
             (repr(self.bit_0),
              repr(self.bit_1))
Example #22
0
    def test_categorical_series_repr_timedelta(self):
        idx = timedelta_range('1 days', periods=5)
        s = Series(Categorical(idx))
        exp = """0   1 days
1   2 days
2   3 days
3   4 days
4   5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""

        assert repr(s) == exp

        idx = timedelta_range('1 hours', periods=10)
        s = Series(Categorical(idx))
        exp = """0   0 days 01:00:00
1   1 days 01:00:00
2   2 days 01:00:00
3   3 days 01:00:00
4   4 days 01:00:00
5   5 days 01:00:00
6   6 days 01:00:00
7   7 days 01:00:00
8   8 days 01:00:00
9   9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
                                   3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
                                   8 days 01:00:00, 9 days 01:00:00]"""  # noqa

        assert repr(s) == exp
Example #23
0
def test_program():
    """Check program block."""
    program_tuple = (ast.IntLiteral(10), ast.IntLiteral(20))
    program = ast.Program(program_tuple)
    assert program['statements'] is program_tuple
    assert str(program) == '10\n20'
    assert repr(program) == 'Program({program_tuple})'.format(program_tuple=repr(program_tuple))
Example #24
0
    def test_categorical_series_repr_timedelta_ordered(self):
        idx = timedelta_range('1 days', periods=5)
        s = Series(Categorical(idx, ordered=True))
        exp = """0   1 days
1   2 days
2   3 days
3   4 days
4   5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""  # noqa

        assert repr(s) == exp

        idx = timedelta_range('1 hours', periods=10)
        s = Series(Categorical(idx, ordered=True))
        exp = """0   0 days 01:00:00
1   1 days 01:00:00
2   2 days 01:00:00
3   3 days 01:00:00
4   4 days 01:00:00
5   5 days 01:00:00
6   6 days 01:00:00
7   7 days 01:00:00
8   8 days 01:00:00
9   9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
                                   3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
                                   8 days 01:00:00 < 9 days 01:00:00]"""  # noqa

        assert repr(s) == exp
Example #25
0
def closeScoringFiles():
	global filePointerForRuntimeScoring, filePointerForTimeRecording, filePointerForRuntimeNegativeScoring
	dataRecording_controlSchema.closeFile(filePointerForRuntimeScoring)
	filePointerForTimeRecording.write(repr(some_global_variables.pausedTime) + "\n")
	dataRecording_controlSchema.closeFile(filePointerForTimeRecording)
	filePointerForRuntimeNegativeScoring.write(repr(negativeScoreCounter) + "\n")
	dataRecording_controlSchema.closeFile(filePointerForRuntimeNegativeScoring)
def test_volume_source_space():
    """Test setting up volume source spaces."""
    tempdir = _TempDir()
    src = read_source_spaces(fname_vol)
    temp_name = op.join(tempdir, 'temp-src.fif')
    surf = read_bem_surfaces(fname_bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN)
    surf['rr'] *= 1e3  # convert to mm
    # The one in the testing dataset (uses bem as bounds)
    for bem, surf in zip((fname_bem, None), (None, surf)):
        src_new = setup_volume_source_space(
            'sample', pos=7.0, bem=bem, surface=surf, mri='T1.mgz',
            subjects_dir=subjects_dir)
        write_source_spaces(temp_name, src_new, overwrite=True)
        src[0]['subject_his_id'] = 'sample'  # XXX: to make comparison pass
        _compare_source_spaces(src, src_new, mode='approx')
        del src_new
        src_new = read_source_spaces(temp_name)
        _compare_source_spaces(src, src_new, mode='approx')
    pytest.raises(IOError, setup_volume_source_space, 'sample',
                  pos=7.0, bem=None, surface='foo',  # bad surf
                  mri=fname_mri, subjects_dir=subjects_dir)
    assert repr(src) == repr(src_new)
    assert src.kind == 'volume'
    # Spheres
    sphere = make_sphere_model(r0=(0., 0., 0.), head_radius=0.1,
                               relative_radii=(0.9, 1.0), sigmas=(0.33, 1.0))
    src = setup_volume_source_space(pos=10)
    src_new = setup_volume_source_space(pos=10, sphere=sphere)
    _compare_source_spaces(src, src_new, mode='exact')
    pytest.raises(ValueError, setup_volume_source_space, sphere='foo')
    # Need a radius
    sphere = make_sphere_model(head_radius=None)
    pytest.raises(ValueError, setup_volume_source_space, sphere=sphere)
Example #27
0
	def _child_main_loop(self, queue):
		while True:
			url = "http://geekhost.net/OK"
			f = urllib.urlopen(url)
			data = f.read()
			#print data
			abcPattern = re.compile(r'OK')
			if abcPattern.match(data):
				queue.put('Already logined')
			else:
				queue.put('Need login')
				LOGIN_URL = 'https://auth-wlc.ntwk.dendai.ac.jp/login.html'
				#LOGIN_URL = 'http://geekhost.net/checkparams.php'
				pd = yaml.load(open('config.yaml').read().decode('utf-8'))
				pd['buttonClicked'] = '4'
				pd['redirect_url'] = 'http://google.com/'
				pd["err_flag"] = "0" 
				pd["err_msg"] = ""
				pd["info_flag"] = "0"
				pd["info_msg"] = ""
				params = urllib.urlencode(pd)
				print repr(params)
				up = urllib.urlopen(LOGIN_URL, params)
			# あとは寝てる
			time.sleep(yaml.load(open('config.yaml').read().decode('utf-8'))['threadtime'])
Example #28
0
def write_syn_dataset(csvPathname, rowCount, colCount, SEED):
    r1 = random.Random(SEED)
    if UTF8 or UTF8_MULTIBYTE:
        dsf = codecs.open(csvPathname, encoding='utf-8', mode='w+')
    elif UTF16:
        dsf = codecs.open(csvPathname, encoding='utf-16', mode='w+')
    else:
        dsf = open(csvPathname, "w+")

    for i in range(rowCount):
        if UTF16:
            u = unichr(0x2018) + unichr(6000) + unichr(0x2019)
            rowDataCsv = u
        else: # both ascii and utf-8 go here?
            rowData = []
            for j in range(colCount):
                r = generate_random_utf8_string(length=2)
                rowData.append(r)
            rowDataCsv = ",".join(rowData)
        if UTF16:
            # we're already passing it unicode. no decoding needed
            print "utf16:", repr(rowDataCsv), type(rowDataCsv)
            decoded = rowDataCsv
        else:
            print "str:", repr(rowDataCsv), type(rowDataCsv)
            decoded = rowDataCsv.decode('utf-8')
            # this has the right length..multibyte utf8 are decoded 
            print "utf8:" , repr(decoded), type(decoded)
        
        # dsf.write(rowDataCsv + "\n")
        dsf.write(decoded + "\n")
    dsf.close()
Example #29
0
def main_loop(expr):
    function = None
    todo = None
    i=0
    while True:
        #print "meh"
        jitdriver.jit_merge_point(function=function, todo=todo, expr=expr)
        if isinstance(expr, Substitution):
            expr = expr.apply()
        if isinstance(expr, Value) and todo is None:
            break
        #print expr, todo
        #import pdb; pdb.set_trace()
        print repr(expr)
        expr, todo = expr.step(todo)
        i=i+1
        print i
        function = None
        if isinstance(expr, Substitution):
            recursive = expr.recursive
            #print recursive
            function = expr.rhs
            #print repr(function)
            #print function.name
            if recursive:
                #print "can enter jit", function, expr
                jitdriver.can_enter_jit(function=function, todo=todo, expr=expr)
    return expr
Example #30
0
 def report_details(self, doc, path, diff1, diff2):
     if isinstance(diff1, bool):
         desc1 = repr(diff1)
     else:
         desc1 = str(diff1) if diff1 else ""
     if isinstance(diff2, bool):
         desc2 = repr(diff2)
     else:
         desc2 = str(diff2) if diff2 else ""
     if path.endswith(".change"):
         diff1 = todate(diff1)
         diff2 = todate(diff2)
         desc1 = diff1
         desc2 = diff2
     if diff1 == diff2:
         return
     doc.start_row()
     doc.start_cell('DIFF-TableCell')
     doc.start_paragraph('DIFF-TableHeading')
     doc.write_text(self.format_struct_path(path))
     doc.end_paragraph()
     doc.end_cell()
     doc.start_cell('DIFF-TableCell')
     doc.start_paragraph('DIFF-Text')
     doc.write_text(desc1)
     doc.end_paragraph()
     doc.end_cell()
     doc.start_cell('DIFF-TableCell')
     doc.start_paragraph('DIFF-Text')
     doc.write_text(desc2)
     doc.end_paragraph()
     doc.end_cell()
     doc.end_row()