def test_fetch_with_data(self): create(self.SIMPLE_CREATE_ARGS) update_start = time_ref + 1 for ts in range(update_start, update_start + 300, 10): update('/tmp/foo', '%i:100:200' % ts) ret = fetch( '/tmp/foo', 'AVERAGE', '-s %i' % time_ref, '-e %i' % (time_ref + 400) ) ref = ( (1368278970, 1368279380, 10), (six.u('a'), six.u('b')), [ (None, None), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (100.0, 200.0), (None, None), (None, None), (None, None), (None, None), (None, None), (None, None), (None, None), (None, None), (None, None), (None, None), (None, None) ] ) self.assertEqual(ref, ret)
def test__gather_files(self, path_mock, os_mock): """ Tests the _gather_files function """ self.app.static_folder = '/home' self.app.static_url_path = '/static' bp_a = Mock(static_folder='/home/bar', static_url_path='/a/bar', url_prefix=None) bp_b = Mock(static_folder='/home/zoo', static_url_path='/b/bar', url_prefix=None) bp_c = Mock(static_folder=None) self.app.blueprints = {'a': bp_a, 'b': bp_b, 'c': bp_c} dirs = {'/home': [('/home', None, ['.a'])], '/home/bar': [('/home/bar', None, ['b'])], '/home/zoo': [('/home/zoo', None, ['c']), ('/home/zoo/foo', None, ['d', 'e'])]} os_mock.side_effect = dirs.get path_mock.return_value = True expected = {('/home/bar', six.u('/a/bar')): ['/home/bar/b'], ('/home/zoo', six.u('/b/bar')): ['/home/zoo/c', '/home/zoo/foo/d', '/home/zoo/foo/e']} actual = flask_s3._gather_files(self.app, False) self.assertEqual(expected, actual) expected[('/home', six.u('/static'))] = ['/home/.a'] actual = flask_s3._gather_files(self.app, True) self.assertEqual(expected, actual)
def test_find_archive_with_unknown_properties(self): archive_id = u('f6e7ee58-d6cf-4a59-896b-6d56b158ec71') httpretty.register_uri(httpretty.GET, u('https://api.opentok.com/v2/partner/{0}/archive/{1}').format(self.api_key, archive_id), body=textwrap.dedent(u("""\ { "createdAt" : 1395187836000, "duration" : 62, "id" : "f6e7ee58-d6cf-4a59-896b-6d56b158ec71", "name" : "", "partnerId" : 123456, "reason" : "", "sessionId" : "SESSIONID", "size" : 8347554, "status" : "expired", "url" : null, "hasAudio": true, "hasVideo": true, "notarealproperty" : "not a real value" }""")), status=200, content_type=u('application/json')) archive = self.opentok.get_archive(archive_id) expect(archive).to.be.an(Archive)
def frozenset_string(value, seen): if value: return u('frozenset({%s})') % (u(', ').join(sorted( show(c, seen) for c in value ))) else: return repr(value)
def test_non_bytes(self): padder = padding.PKCS7(128).padder() with pytest.raises(TypeError): padder.update(six.u("abc")) unpadder = padding.PKCS7(128).unpadder() with pytest.raises(TypeError): unpadder.update(six.u("abc"))
def sanitize(input, cleaner=DocumentCleaner, wrap='p'): """Cleanup markup using a given cleanup configuration. Unwrapped text will be wrapped with wrap parameter. """ if 'body' not in cleaner.allow_tags: cleaner.allow_tags.append('body') input = six.u("<html><body>%s</body></html>") % input document = html.document_fromstring(input) bodies = [e for e in document if html._nons(e.tag) == 'body'] body = bodies[0] cleaned = cleaner.clean_html(body) remove_empty_tags(cleaned) strip_outer_breaks(cleaned) if wrap is not None: if wrap in html.defs.tags: wrap_text(cleaned, wrap) else: raise ValueError( 'Invalid html tag provided for wrapping the sanitized text') output = six.u('').join([etree.tostring(fragment, encoding=six.text_type) for fragment in cleaned.iterchildren()]) if wrap is None and cleaned.text: output = cleaned.text + output return output
def info(self): if self.project is not None: return six.u("join project '%s'") % self.project.pid elif self.name: return six.u("create project '%s'") % self.name else: return six.u("create/join a project")
def parseInvstmtrs(cls_, invstmtrs_list): ret = [] for invstmtrs_ofx in invstmtrs_list: account = InvestmentAccount() acctid_tag = invstmtrs_ofx.find('acctid') if (hasattr(acctid_tag, 'contents')): try: account.account_id = acctid_tag.contents[0].strip() except IndexError: account.warnings.append( six.u("Empty acctid tag for %s") % invstmtrs_ofx) if cls_.fail_fast: raise brokerid_tag = invstmtrs_ofx.find('brokerid') if (hasattr(brokerid_tag, 'contents')): try: account.brokerid = brokerid_tag.contents[0].strip() except IndexError: account.warnings.append( six.u("Empty brokerid tag for %s") % invstmtrs_ofx) if cls_.fail_fast: raise account.type = AccountType.Investment if (invstmtrs_ofx): account.statement = cls_.parseInvestmentStatement( invstmtrs_ofx) ret.append(account) return ret
def parseBalance(cls_, statement, stmt_ofx, bal_tag_name, bal_attr, bal_date_attr, bal_type_string): bal_tag = stmt_ofx.find(bal_tag_name) if hasattr(bal_tag, "contents"): balamt_tag = bal_tag.find('balamt') dtasof_tag = bal_tag.find('dtasof') if hasattr(balamt_tag, "contents"): try: setattr(statement, bal_attr, decimal.Decimal( balamt_tag.contents[0].strip())) except (IndexError, decimal.InvalidOperation): ex = sys.exc_info()[1] statement.warnings.append( six.u("%s balance amount was empty for %s") % (bal_type_string, stmt_ofx)) if cls_.fail_fast: raise OfxParserException("Empty %s balance" % bal_type_string) if hasattr(dtasof_tag, "contents"): try: setattr(statement, bal_date_attr, cls_.parseOfxDateTime( dtasof_tag.contents[0].strip())) except IndexError: statement.warnings.append( six.u("%s balance date was empty for %s") % (bal_type_string, stmt_ofx)) if cls_.fail_fast: raise except ValueError: statement.warnings.append( six.u("%s balance date was not allowed for %s") % (bal_type_string, stmt_ofx)) if cls_.fail_fast: raise
def test_zero_byte_string(): # Tests hack to allow chars of non-zero length, but 0 bytes # make reader-like thing str_io = cStringIO() r = _make_readerlike(str_io, boc.native_code) c_reader = m5u.VarReader5(r) tag_dt = np.dtype([('mdtype', 'u4'), ('byte_count', 'u4')]) tag = np.zeros((1,), dtype=tag_dt) tag['mdtype'] = mio5p.miINT8 tag['byte_count'] = 1 hdr = m5u.VarHeader5() # Try when string is 1 length hdr.set_dims([1,]) _write_stream(str_io, tag.tostring() + b' ') str_io.seek(0) val = c_reader.read_char(hdr) assert_equal(val, u(' ')) # Now when string has 0 bytes 1 length tag['byte_count'] = 0 _write_stream(str_io, tag.tostring()) str_io.seek(0) val = c_reader.read_char(hdr) assert_equal(val, u(' ')) # Now when string has 0 bytes 4 length str_io.seek(0) hdr.set_dims([4,]) val = c_reader.read_char(hdr) assert_array_equal(val, [u(' ')] * 4)
def check_username_for_new_account(person, username, machine_category): """ Check the new username for a new account. If the username is in use, raises :py:exc:`UsernameTaken`. :param person: Owner of new account. :param username: Username to validate. :param machine_category: Machine category for new account. """ query = Account.objects.filter( username__exact=username, machine_category=machine_category, date_deleted__isnull=True) if query.count() > 0: raise UsernameTaken( six.u('Username already in use on machine category %s.') % machine_category) if machine_category_account_exists(username, machine_category): raise UsernameTaken( six.u('Username is already in datastore for machine category %s.') % machine_category) return username
def coerce_output(s): if isinstance(s, ColoredString): return six.u(str(s)) elif isinstance(s, six.binary_type): return six.u(s) else: return s
def _build_illegal_xml_regex(): """Constructs a regex to match all illegal xml characters. Expects to be used against a unicode string.""" # Construct the range pairs of invalid unicode characters. illegal_chars_u = [ (0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x1F), (0x7F, 0x84), (0x86, 0x9F), (0xFDD0, 0xFDDF), (0xFFFE, 0xFFFF)] # For wide builds, we have more. if sys.maxunicode >= 0x10000: illegal_chars_u.extend( [(0x1FFFE, 0x1FFFF), (0x2FFFE, 0x2FFFF), (0x3FFFE, 0x3FFFF), (0x4FFFE, 0x4FFFF), (0x5FFFE, 0x5FFFF), (0x6FFFE, 0x6FFFF), (0x7FFFE, 0x7FFFF), (0x8FFFE, 0x8FFFF), (0x9FFFE, 0x9FFFF), (0xAFFFE, 0xAFFFF), (0xBFFFE, 0xBFFFF), (0xCFFFE, 0xCFFFF), (0xDFFFE, 0xDFFFF), (0xEFFFE, 0xEFFFF), (0xFFFFE, 0xFFFFF), (0x10FFFE, 0x10FFFF)]) # Build up an array of range expressions. illegal_ranges = [ "%s-%s" % (six.unichr(low), six.unichr(high)) for (low, high) in illegal_chars_u] # Compile the regex return re.compile(six.u('[%s]') % six.u('').join(illegal_ranges))
def test_generate_role_token(self): session = Session(self.opentok, self.session_id, media_mode=MediaModes.routed, location=None) token = session.generate_token(role=Roles.moderator) assert isinstance(token, text_type) assert token_decoder(token)[u('session_id')] == self.session_id assert token_decoder(token)[u('role')] == u('moderator') assert token_signature_validator(token, self.api_secret)
def __init__(self, authid = None, authrole = None, authmethod = None, authprovider = None): """ Ctor. :param authid: The authentication ID the client is assigned, e.g. `"joe"` or `"*****@*****.**"`. :type authid: str :param authrole: The authentication role the client is assigned, e.g. `"anonymous"`, `"user"` or `"com.myapp.user"`. :type authrole: str :param authmethod: The authentication method that was used to authenticate the client, e.g. `"cookie"` or `"wampcra"`. :type authmethod: str :param authprovider: The authentication provider that was used to authenticate the client, e.g. `"mozilla-persona"`. :type authprovider: str """ if six.PY2: if type(authid) == str: authid = six.u(authid) if type(authrole) == str: authrole = six.u(authrole) if type(authmethod) == str: authmethod = six.u(authmethod) if type(authprovider) == str: authprovider = six.u(authprovider) assert(authid is None or type(authid) == six.text_type) assert(authrole is None or type(authrole) == six.text_type) assert(authmethod is None or type(authmethod) == six.text_type) assert(authprovider is None or type(authprovider) == six.text_type) self.authid = authid self.authrole = authrole self.authmethod = authmethod self.authprovider = authprovider
def on_search(args): # TODO: Decode via actual tty encoding try: q = args.q[0].decode("utf-8") except AttributeError: q = args.q[0] pkg_names = set() # First, check for exact case-insensitive name matches for pkg in session.query(Package).filter(collate(Package.name,"NOCASE")==q).all(): pkg_names.add(pkg.name) # Check for substring name matches for pkg in session.query(Package).filter(Package.name.like(u('%{0}%').format(q))).all(): pkg_names.add(pkg.name) # Check for description matches for pkg in session.query(Package).filter(Package.description.like(u('%{0}%').format(q))).all(): pkg_names.add(pkg.name) if len(pkg_names) == 0: print_(u('No matching packages found.')) return # Nice column formatting max_len_name = max( len(name) for name in pkg_names ) for pkg_name in sorted(pkg_names): pkg = session.query(Package).get(pkg_name) print_(u('{name:{max_len_name}} {version:10} {desc}'.format(name=pkg.name, version=pkg.version, desc=pkg.description, max_len_name=max_len_name)))
def test_unicode_deserialize(self): """ UnicodeAttribute.deserialize """ attr = UnicodeAttribute() self.assertEqual(attr.deserialize('foo'), six.u('foo')) self.assertEqual(attr.deserialize(u'foo'), six.u('foo'))
def render_structure(fs_source_root, fs_target_root, variables, verbose, renderer): """Recursively copies the given filesystem path `fs_source_root_ to a target directory `fs_target_root`. Any files ending in `.bob` are rendered as templates using the given renderer using the variables dictionary, thereby losing the `.bob` suffix. strings wrapped in `+` signs in file- or directory names will be replaced with values from the variables, i.e. a file named `+name+.py.bob` given a dictionary {'name': 'bar'} would be rendered as `bar.py`. """ if not isinstance(fs_source_root, six.text_type): # pragma: no cover fs_source_root = six.u(fs_source_root) for fs_source_dir, local_directories, local_files in os.walk(fs_source_root): fs_target_dir = path.abspath(path.join(fs_target_root, path.relpath(fs_source_dir, fs_source_root))) for local_file in local_files: if local_file == '.mrbob.ini': continue render_template( path.join(fs_source_dir, local_file), render_filename(fs_target_dir, variables), variables, verbose, renderer, ) for local_directory in local_directories: abs_dir = render_filename(path.join(fs_target_dir, local_directory), variables) if not path.exists(abs_dir): if verbose: print(six.u("mkdir %s") % abs_dir) os.mkdir(abs_dir)
def test_print_(): save = sys.stdout out = sys.stdout = six.moves.StringIO() try: six.print_("Hello,", "person!") finally: sys.stdout = save assert out.getvalue() == "Hello, person!\n" out = six.StringIO() six.print_("Hello,", "person!", file=out) assert out.getvalue() == "Hello, person!\n" out = six.StringIO() six.print_("Hello,", "person!", file=out, end="") assert out.getvalue() == "Hello, person!" out = six.StringIO() six.print_("Hello,", "person!", file=out, sep="X") assert out.getvalue() == "Hello,Xperson!\n" out = six.StringIO() six.print_(six.u("Hello,"), six.u("person!"), file=out) result = out.getvalue() assert isinstance(result, six.text_type) assert result == six.u("Hello, person!\n") six.print_("Hello", file=None) # This works. out = six.StringIO() six.print_(None, file=out) assert out.getvalue() == "None\n"
def _format_final_exc_line(etype, value): valuestr = _some_str(value) if value == 'None' or value is None or not valuestr: line = u("%s\n") % etype else: line = u("%s: %s\n") % (etype, valuestr) return line
def pre_save(cls, self): full_name = getattr(self, "fullName", None) if full_name is None: full_name = "%s %s" % (self.givenName, self.sn) self.displayName = six.u('%s (%s)') % (full_name, self.o) self.gecos = _a(six.u('%s (%s)') % (full_name, self.o))
def set_bool(x): if isinstance(x, bool): return x elif isinstance(x, integer_types): return x != 0 else: return text_type(x).strip().lower() not in {u('0'), b('0'), u('n'), b('n'), u('no'), b('no'), u('f'), b('f'), u('false'), b('false')}
def test_encode(self): # _encode must encode unicode strings self.assertEqual(_encode(six.u('привет')), six.u('привет').encode('utf-8')) # _encode must return byte strings unchanged self.assertEqual(_encode(six.u('привет').encode('utf-8')), six.u('привет').encode('utf-8'))
def test_wrong_encoding(self): string = String() unknown_char = u("\ufffd") * (2 if six.PY3 else 4) self.assert_equal( string.decode(u("ündecödäble").encode("utf-8"), "ascii"), u("%(u)sndec%(u)sd%(u)sble") % {"u": unknown_char} )
def address(self, qs, out): """simple single entry per line in the format of: "full name" <*****@*****.**>; """ out.write(six.u("\n").join(six.u('"%s" <%s>;' % (full_name(**ent), ent['email'])) for ent in qs).encode(self.encoding)) out.write("\n")
def test_unicode_password(self): j = jenkins.Jenkins('{0}'.format(self.base_url), six.u('nonascii'), six.u('\xe9\u20ac')) self.assertEqual(j.server, self.make_url('')) self.assertEqual(j.auth, b'Basic bm9uYXNjaWk6w6nigqw=') self.assertEqual(j.crumb, None)
def load(self, name, package=__package__): if name in self.plugins: msg = u("Not loading already loaded plugin: {0}").format(name) self.logger.warn(msg) return msg try: fqplugin = "{0}.{1}".format(package, name) if fqplugin in sys.modules: reload(sys.modules[fqplugin]) m = safe__import__(name, globals(), locals(), package) p1 = lambda x: isclass(x) and issubclass(x, BasePlugin) # noqa p2 = lambda x: x is not BasePlugin # noqa predicate = lambda x: p1(x) and p2(x) # noqa plugins = getmembers(m, predicate) for name, Plugin in plugins: instance = Plugin(*self.init_args, **self.init_kwargs) instance.register(self) self.logger.debug(u("Registered Component: {0}").format(instance)) if name not in self.plugins: self.plugins[name] = set() self.plugins[name].add(instance) msg = u("Loaded plugin: {0}").format(name) self.logger.info(msg) return msg except Exception, e: msg = u("Could not load plugin: {0} Error: {1}").format(name, e) self.logger.error(msg) self.logger.error(format_exc()) return msg
def test_unicode_names(self): """ Unicode field names for for read and write """ self.assertArrayEqual(self.dset[six.u('a')], self.data['a']) self.dset[six.u('a')] = 42 data = self.data.copy() data['a'] = 42 self.assertArrayEqual(self.dset[six.u('a')], data['a'])
def __init__(self, zone, vrf, network, template, params=None, loader=None): try: self.network = IPv4Network(u(str(network))) self.ipversion = 4 except AddressValueError: try: self.network = IPv6Network(u(str(network))) self.ipversion = 6 except AddressValueError: raise ConfigError('invalid network: {}'.format(str(network))) if loader is None: loader = FileSystemLoader('templates') env = Environment(loader=loader, extensions=['jinja2.ext.do', 'jinja2.ext.loopcontrols']) add_custom_filters(env) add_custom_globals(env, self.ipversion) self.template = env.get_template('{0}.yaml'.format(template)) self.zone = zone self.vrf = vrf self.params = 0 self.params = params if params is not None else {} self._rendered = None self._data = None
def remove_objects_not_in(self, objects_to_keep, verbosity): """ Deletes all the objects in the database that are not in objects_to_keep. - objects_to_keep: A map where the keys are classes, and the values are a set of the objects of that class we should keep. """ for class_ in objects_to_keep.keys(): current = class_.objects.all() current_ids = set([x.pk for x in current]) keep_ids = set([x.pk for x in objects_to_keep[class_]]) remove_these_ones = current_ids.difference(keep_ids) if remove_these_ones: for obj in current: if obj.pk in remove_these_ones: obj.delete() if verbosity >= 2: print("Deleted object: %s" % six.u(obj)) if verbosity > 0 and remove_these_ones: num_deleted = len(remove_these_ones) if num_deleted > 1: type_deleted = six.u(class_._meta.verbose_name_plural) else: type_deleted = six.u(class_._meta.verbose_name) print("Deleted %s %s" % (str(num_deleted), type_deleted))
def start_archive(self, session_id, has_audio=True, has_video=True, name=None, output_mode=OutputModes.composed): """ Starts archiving an OpenTok session. Clients must be actively connected to the OpenTok session for you to successfully start recording an archive. You can only record one archive at a time for a given session. You can only record archives of sessions that use the OpenTok Media Router (sessions with the media mode set to routed); you cannot archive sessions with the media mode set to relayed. For more information on archiving, see the `OpenTok archiving <https://tokbox.com/opentok/tutorials/archiving/>`_ programming guide. :param String session_id: The session ID of the OpenTok session to archive. :param String name: This is the name of the archive. You can use this name to identify the archive. It is a property of the Archive object, and it is a property of archive-related events in the OpenTok.js library. :param Boolean has_audio: if set to True, an audio track will be inserted to the archive. has_audio is an optional parameter that is set to True by default. If you set both has_audio and has_video to False, the call to the start_archive() method results in an error. :param Boolean has_video: if set to True, a video track will be inserted to the archive. has_video is an optional parameter that is set to True by default. :param OutputModes output_mode: Whether all streams in the archive are recorded to a single file (OutputModes.composed, the default) or to individual files (OutputModes.individual). :rtype: The Archive object, which includes properties defining the archive, including the archive ID. """ if not isinstance(output_mode, OutputModes): raise OpenTokException( u('Cannot start archive, {0} is not a valid output mode'). format(output_mode)) payload = { 'name': name, 'sessionId': session_id, 'hasAudio': has_audio, 'hasVideo': has_video, 'outputMode': output_mode.value } response = requests.post(self.archive_url(), data=json.dumps(payload), headers=self.archive_headers(), proxies=self.proxies) if response.status_code < 300: return Archive(self, response.json()) elif response.status_code == 403: raise AuthError() elif response.status_code == 400: raise RequestError("Session ID is invalid") elif response.status_code == 404: raise NotFoundError("Session not found") elif response.status_code == 409: raise ArchiveError(response.json().get("message")) else: raise RequestError("An unexpected error occurred", response.status_code)
class TestRequest: @pytest.mark.parametrize("message", (None, "it's hard out here...", u("\u57CE\u697C\u4E07\u4F17\u68C0\u9605"))) def test_construct(self, message): # Always use the same URL as different ways to specify a Request's URL # are tested separately. url = "some://url" timeout = 10 request = Request(url, message, timeout) assert request.url is url assert request.message is message assert request.headers == {} assert request.timeout == timeout def test_construct_with_no_message(self): request = Request("some://url") assert request.headers == {} assert request.message is None test_non_ASCII_URLs = [ u("\u4E2D\u539F\u5343\u519B\u9010\u848B"), u("\u57CE\u697C\u4E07\u4F17\u68C0\u9605") ] + [ url_prefix + url_suffix for url_prefix in (u(""), u("Jurko")) for url_suffix in (unichr(128), unichr(200), unichr(1000)) ] @pytest.mark.parametrize( "url", test_non_ASCII_URLs + # unicode strings [x.encode("utf-8") for x in test_non_ASCII_URLs]) # byte strings def test_non_ASCII_URL(self, url): """Transport Request should reject URLs with non-ASCII characters.""" pytest.raises(UnicodeError, Request, url) @pytest.mark.parametrize( ("url", "headers", "message"), (("my URL", {}, ""), ("", { "aaa": "uf-uf" }, "for a bitch it's haaaard..."), ("http://rumple-fif/muka-laka-hiki", { "uno": "eins", "zwei": "due" }, """\ I'm here to kick ass, and chew bubble gum... and I'm all out of gum."""), ("", {}, u("\u0161u\u0107-mu\u0107 pa o\u017Ee\u017Ei.. za 100 " "\u20AC\n\nwith multiple\nlines...")), ("", {}, "\n\n\n\n\n\n"), ("", {}, u("\u4E2D\u539F\u5343\u519B\u9010\u848B")))) def test_string_representation_with_message(self, url, headers, message): for key, value in list(headers.items()): old_key = key if isinstance(key, text_type): key = key.encode("utf-8") del headers[old_key] if isinstance(value, text_type): value = value.encode("utf-8") headers[key] = value if isinstance(message, text_type): message = message.encode("utf-8") request = Request(url, message) request.headers = headers expected = u("""\ URL: %s HEADERS: %s MESSAGE: %s""") % (url, request.headers, message.decode("raw_unicode_escape")) assert text_type(request) == expected if sys.version_info < (3, ): assert str(request) == expected.encode("utf-8") def test_string_representation_with_no_message(self): url = "look at my silly little URL" headers = {suds.byte_str("yuck"): suds.byte_str("ptooiii...")} request = Request(url) request.headers = headers expected = u("""\ URL: %s HEADERS: %s""") % (url, request.headers) assert text_type(request) == expected if sys.version_info < (3, ): assert str(request) == expected.encode("utf-8") test_URLs = [ u(""), u("http://host/path/name"), u("cogito://ergo/sum"), u("haleluya"), u("look at me flyyyyyyyy"), unichr(127), u("Jurko") + unichr(127) ] @pytest.mark.parametrize("url", test_URLs + [url.encode("ascii") for url in test_URLs]) def test_URL(self, url): """ Transport Request accepts its URL as either a byte or a unicode string. Internally URL information is kept as the native Python str type. """ request = Request(url) assert isinstance(request.url, str) if url.__class__ is str: assert request.url is url elif url.__class__ is u: assert request.url == url.encode("ascii") # Python 2. else: assert request.url == url.decode("ascii") # Python 3. test_URLs = [unichr(0), u("Jurko") + unichr(0)] if sys.version_info <= (3, 6) else [ ] # "https://bugs.python.org/issue32745" @pytest.mark.parametrize("url", test_URLs + [url.encode("ascii") for url in test_URLs]) def test_URL_null_bytes(self, url): """ Transport Request accepts its URL as either a byte or a unicode string. Internally URL information is kept as the native Python str type. """ request = Request(url) assert isinstance(request.url, str) if url.__class__ is str: assert request.url is url elif url.__class__ is u: assert request.url == url.encode("ascii") # Python 2. else: assert request.url == url.decode("ascii") # Python 3.
def paper_to_quickstatements(paper): """Convert paper to Quickstatements. Convert a paper represented as a dict in to Magnus Manske's Quickstatement format for entry into Wikidata. Parameters ---------- paper : dict Scraped paper represented as a dict. Returns ------- qs : str Quickstatements as a string References ---------- https://quickstatements.toolforge.org Notes ----- title, authors (list), date, doi, year, language_q, volume, issue, pages, number_of_pages, url, full_text_url, published_in_q are recognized. `date` takes precedence over `year`. """ qs = u("CREATE\n") title = escape_string(paper['title']) qs += u('LAST\tLen\t"{}"\n').format(title) # Instance of scientific article qs += 'LAST\tP31\tQ13442814\n' # Title qs += u('LAST\tP1476\ten:"{}"\n').format(title) # DOI if 'doi' in paper: qs += u('LAST\tP356\t"{}"\n').format(escape_string(paper['doi'])) # Authors for n, author in enumerate(paper['authors'], start=1): qs += u('LAST\tP2093\t"{}"\tP1545\t"{}"\n').format(author, n) # Published in if 'date' in paper: # Day precision qs += 'LAST\tP577\t+{}T00:00:00Z/11\n'.format(paper['date']) elif 'year' in paper: # Year precision qs += 'LAST\tP577\t+{}-01-01T00:00:00Z/9\n'.format(paper['year']) # Volume if 'volume' in paper: qs += u('LAST\tP478\t"{}"\n').format(escape_string(paper['volume'])) # Issue if 'issue' in paper: qs += u('LAST\tP433\t"{}"\n').format(escape_string(paper['issue'])) if 'pages' in paper: qs += u('LAST\tP304\t"{}"\n').format(escape_string(paper['pages'])) if 'number_of_pages' in paper: qs += u('LAST\tP1104\t{}\n').format(paper['number_of_pages']) # Language if 'language_q' in paper: qs += 'LAST\tP407\t{}\n'.format(paper['language_q']) # Homepage if 'url' in paper: qs += 'LAST\tP856\t"{}"\n'.format(paper['url']) # Fulltext URL if 'full_text_url' in paper: qs += 'LAST\tP953\t"{}"\n'.format(paper['full_text_url']) # Published in if 'published_in_q' in paper and paper['published_in_q']: qs += 'LAST\tP1433\t{}\n'.format(paper['published_in_q']) return qs
def __repr__(self): return six.u('AttrDict({contents})').format( contents=super(AttrDict, self).__repr__())
import requests from ..qs import paper_to_quickstatements from ..query import iso639_to_q, issn_to_qs from ..utils import escape_string USER_AGENT = 'Scholia' HEADERS = {'User-Agent': USER_AGENT} PAPER_TO_Q_QUERY = u(""" SELECT ?paper WHERE {{ OPTIONAL {{ ?label rdfs:label "{label}"@en . }} OPTIONAL {{ ?title wdt:P1476 "{title}"@en . }} OPTIONAL {{ ?url wdt:P953 <{url}> . }} BIND(COALESCE(?full_text_url, ?url, ?label, ?title) AS ?paper) }} """) # SPARQL Endpoint for Wikidata Query Service WDQS_URL = 'https://query.wikidata.org/sparql' def paper_to_q(paper): """Find Q identifier for paper. Parameters ---------- paper : dict Paper represented as dictionary.
def generate_token(self, session_id, role=Roles.publisher, expire_time=None, data=None, initial_layout_class_list=[]): """ Generates a token for a given session. :param String session_id: The session ID of the session to be accessed by the client using the token. :param String role: The role for the token. Valid values are defined in the Role class: * `Roles.subscriber` -- A subscriber can only subscribe to streams. * `Roles.publisher` -- A publisher can publish streams, subscribe to streams, and signal. (This is the default value if you do not specify a role.) * `Roles.moderator` -- In addition to the privileges granted to a publisher, in clients using the OpenTok.js 2.2 library, a moderator can call the `forceUnpublish()` and `forceDisconnect()` method of the Session object. :param int expire_time: The expiration time of the token, in seconds since the UNIX epoch. The maximum expiration time is 30 days after the creation time. The default expiration time is 24 hours after the token creation time. :param String data: A string containing connection metadata describing the end-user. For example, you can pass the user ID, name, or other data describing the end-user. The length of the string is limited to 1000 characters. This data cannot be updated once it is set. :param list initial_layout_class_list: An array of class names (strings) to be used as the initial layout classes for streams published by the client. Layout classes are used in customizing the layout of videos in `live streaming broadcasts <https://tokbox.com/developer/guides/broadcast/#live-streaming>`_ and `composed archives <https://tokbox.com/developer/guides/archiving/layout-control.html>`_ :rtype: The token string. """ # normalize # expire_time can be an integer, a datetime object, or anything else that can be coerced into an integer # after this block it will only be an integer if expire_time is not None: if isinstance(expire_time, datetime): expire_time = calendar.timegm(expire_time.utctimetuple()) else: try: expire_time = int(expire_time) except (ValueError, TypeError): raise OpenTokException( u('Cannot generate token, invalid expire time {0}'). format(expire_time)) else: expire_time = int(time.time()) + (60 * 60 * 24) # 1 day # validations if not text_type(session_id): raise OpenTokException( u('Cannot generate token, session_id was not valid {0}'). format(session_id)) if not isinstance(role, Roles): raise OpenTokException( u('Cannot generate token, {0} is not a valid role').format( role)) now = int(time.time()) if expire_time < now: raise OpenTokException( u('Cannot generate token, expire_time is not in the future {0}' ).format(expire_time)) if expire_time > now + (60 * 60 * 24 * 30): # 30 days raise OpenTokException( u('Cannot generate token, expire_time is not in the next 30 days {0}' ).format(expire_time)) if data and len(data) > 1000: raise OpenTokException( u('Cannot generate token, data must be less than 1000 characters' )) if initial_layout_class_list and not all( text_type(c) for c in initial_layout_class_list): raise OpenTokException( u('Cannot generate token, all items in initial_layout_class_list must be strings' )) initial_layout_class_list_serialized = u(' ').join( initial_layout_class_list) if len(initial_layout_class_list_serialized) > 1000: raise OpenTokException( u('Cannot generate token, initial_layout_class_list must be less than 1000 characters' )) # decode session id to verify api_key sub_session_id = session_id[2:] sub_session_id_bytes = sub_session_id.encode('utf-8') sub_session_id_bytes_padded = sub_session_id_bytes + ( b('=') * (-len(sub_session_id_bytes) % 4)) try: decoded_session_id = base64.b64decode(sub_session_id_bytes_padded, b('-_')) parts = decoded_session_id.decode('utf-8').split(u('~')) except Exception as e: raise OpenTokException( u('Cannot generate token, the session_id {0} was not valid'). format(session_id)) if self.api_key not in parts: raise OpenTokException( u('Cannot generate token, the session_id {0} does not belong to the api_key {1}' ).format(session_id, self.api_key)) data_params = dict( session_id=session_id, create_time=now, expire_time=expire_time, role=role.value, nonce=random.randint(0, 999999), initial_layout_class_list=initial_layout_class_list_serialized) if data: data_params['connection_data'] = data data_string = urlencode(data_params, True) sig = self._sign_string(data_string, self.api_secret) decoded_base64_bytes = u( 'partner_id={api_key}&sig={sig}:{payload}').format( api_key=self.api_key, sig=sig, payload=data_string) if PY3: decoded_base64_bytes = decoded_base64_bytes.encode('utf-8') token = u('{sentinal}{base64_data}').format( sentinal=self.TOKEN_SENTINEL, base64_data=base64.b64encode(decoded_base64_bytes).decode()) return token
class MediaModes(Enum): """List of valid settings for the mediaMode parameter of the OpenTok.create_session() method.""" routed = u('disabled') """The session will transmit streams using the OpenTok Media Server.""" relayed = u('enabled') """The session will attempt to transmit streams directly between clients. If two clients
def _parse_record(self, raw_record: dict, ipv4: Optional[str], ipv6: Optional[str]) -> List[Record]: domain_name = raw_record['domain'] if not isinstance(domain_name, six.string_types): raise TypeError("domain's name must be a string") domain_name = domain_name.strip().lower() if not domain_name: raise ValueError("empty domain name") domain = Domain() domain.name = domain_name record = Record(domain=domain) optional_record = None tmp = raw_record.get('name') if tmp is not None: record.name = str(tmp) if not record.name: raise ValueError("empty record name") type_given = False if 'type' in raw_record: record.type = raw_record['type'] type_given = True target_given = False tmp = raw_record.get('target') if tmp is not None: record.target = str(tmp).strip() target_given = True if 'ttl' in raw_record: record.ttl = raw_record['ttl'] if target_given and record.target != 'auto': try: addr = ipaddress.ip_address(six.u(record.target)) except ValueError: pass else: if isinstance(addr, ipaddress.IPv4Address): if type_given: if record.type == RecordType.AAAA: raise ValueError("cannot use ipv4 for AAAA record") else: record.type = RecordType.A elif isinstance(addr, ipaddress.IPv6Address): if type_given: if record.type == RecordType.A: raise ValueError("cannot use ipv6 for A record") else: record.type = RecordType.AAAA else: # target not given or target is 'auto' if type_given: if record.type == RecordType.AAAA: if not ipv6: raise ValueError("empty ipv6") record.target = ipv6 elif record.type == RecordType.A: if not ipv4: raise ValueError("empty ipv4") record.target = ipv4 else: # type not given if not ipv4: raise ValueError("empty ipv4") record.type = RecordType.A record.target = ipv4 if ipv6 is not None: optional_record = copy(record) optional_record.type = RecordType.AAAA optional_record.target = ipv6 return [record] if optional_record is None else [record, optional_record]
def parse(link, data_to_store): print link resp = requests.get(url=link, cookies={'over18': '1'}, verify=False) if resp.status_code != 200: print('invalid url:', resp.url) return json.dumps({"error": "invalid url"}, sort_keys=True, ensure_ascii=False) soup = BeautifulSoup(resp.text, 'html.parser') main_content = soup.find(id="main-content") metas = main_content.select('div.article-metaline') author, title, date = '', '', '' if metas: _author = metas[0].select('span.article-meta-value')[0] _title = metas[1].select('span.article-meta-value')[0] _date = metas[2].select('span.article-meta-value')[0] author = _author.string if _author else author title = _title.string if _title else title date = _date.string if _date else date # remove meta nodes for meta in metas: meta.extract() for meta in main_content.select('div.article-metaline-right'): meta.extract() # remove and keep push nodes pushes = main_content.find_all('div', class_='push') for push in pushes: push.extract() try: ip = main_content.find(text=re.compile(u'※ 發信站:')) ip = re.search('[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*', ip).group() except: ip = "" filtered = [ v for v in main_content.stripped_strings if v[0] not in [u'※', u'◆'] and v[:2] not in [u'--'] ] expr = re.compile( u(r'[^\u4e00-\u9fa5\u3002\uff1b\uff0c\uff1a\u201c\u201d\uff08\uff09\u3001\uff1f\u300a\u300b\s\w:/-_.?~%()]' )) for i in range(len(filtered)): filtered[i] = re.sub(expr, '', filtered[i]) filtered = [_f for _f in filtered if _f] # remove empty strings content = ' '.join(filtered) content = re.sub(r'(\s)+', ' ', content) messages = [] for push in pushes: if not push.find('span', 'push-tag'): continue push_userid = push.find('span', 'push-userid').string.strip(' \t\n\r') # if find is None: find().strings -> list -> ' '.join; else the current way push_content = push.find('span', 'push-content').strings push_content = ' '.join(push_content)[1:].strip( ' \t\n\r') # remove ':' push_ipdatetime = push.find('span', 'push-ipdatetime').string.strip(' \t\n\r') messages.append(push_userid + ":" + push_content) data = { 'title': title, 'link': link, 'author': author, 'date': date, 'content': content, 'ip': ip, 'messages': " ".join(messages) } data_to_store.append( json.dumps(data, sort_keys=False, ensure_ascii=False) + ",")
def create_session(self, location=None, media_mode=MediaModes.relayed, archive_mode=ArchiveModes.manual): """ Creates a new OpenTok session and returns the session ID, which uniquely identifies the session. For example, when using the OpenTok JavaScript library, use the session ID when calling the OT.initSession() method (to initialize an OpenTok session). OpenTok sessions do not expire. However, authentication tokens do expire (see the generateToken() method). Also note that sessions cannot explicitly be destroyed. A session ID string can be up to 255 characters long. Calling this method results in an OpenTokException in the event of an error. Check the error message for details. You can also create a session using the OpenTok `REST API <https://tokbox.com/opentok/api/#session_id_production>`_ or `the OpenTok dashboard <https://dashboard.tokbox.com/projects>`_. :param String media_mode: Determines whether the session will transmit streams using the OpenTok Media Router (MediaMode.routed) or not (MediaMode.relayed). By default, the setting is MediaMode.relayed. With the media_mode property set to MediaMode.relayed, the session will attempt to transmit streams directly between clients. If clients cannot connect due to firewall restrictions, the session uses the OpenTok TURN server to relay audio-video streams. The `OpenTok Media Router <https://tokbox.com/opentok/tutorials/create-session/#media-mode>`_ provides the following benefits: * The OpenTok Media Router can decrease bandwidth usage in multiparty sessions. (When the mediaMode property is set to MediaMode.relayed, each client must send a separate audio-video stream to each client subscribing to it.) * The OpenTok Media Router can improve the quality of the user experience through audio fallback and video recovery (see https://tokbox.com/platform/fallback). With these features, if a client's connectivity degrades to a degree that it does not support video for a stream it's subscribing to, the video is dropped on that client (without affecting other clients), and the client receives audio only. If the client's connectivity improves, the video returns. * The OpenTok Media Router supports the archiving feature, which lets you record, save, and retrieve OpenTok sessions (see http://tokbox.com/platform/archiving). :param String archive_mode: Whether the session is automatically archived (ArchiveModes.always) or not (ArchiveModes.manual). By default, the setting is ArchiveModes.manual, and you must call the start_archive() method of the OpenTok object to start archiving. To archive the session (either automatically or not), you must set the media_mode parameter to MediaModes.routed. :param String location: An IP address that the OpenTok servers will use to situate the session in its global network. If you do not set a location hint, the OpenTok servers will be based on the first client connecting to the session. :rtype: The Session object. The session_id property of the object is the session ID. """ # build options options = {} if not isinstance(media_mode, MediaModes): raise OpenTokException( u('Cannot create session, {0} is not a valid media mode'). format(role)) if not isinstance(archive_mode, ArchiveModes): raise OpenTokException( u('Cannot create session, {0} is not a valid archive mode'). format(role)) if archive_mode == ArchiveModes.always and media_mode != MediaModes.routed: raise OpenTokException( u('A session with always archive mode must also have the routed media mode.' )) options[u('p2p.preference')] = media_mode.value options[u('archiveMode')] = archive_mode.value if location: # validate IP address try: inet_aton(location) except: raise OpenTokException( u('Cannot create session. Location must be either None or a valid IPv4 address {0}' ).format(location)) options[u('location')] = location try: response = requests.post(self.session_url(), data=options, headers=self.headers(), proxies=self.proxies) response.encoding = 'utf-8' if response.status_code == 403: raise AuthError( 'Failed to create session, invalid credentials') if not response.content: raise RequestError() dom = xmldom.parseString(response.content) except Exception as e: raise RequestError('Failed to create session: %s' % str(e)) try: error = dom.getElementsByTagName('error') if error: error = error[0] raise AuthError('Failed to create session (code=%s): %s' % (error.attributes['code'].value, error.firstChild.attributes['message'].value)) session_id = dom.getElementsByTagName( 'session_id')[0].childNodes[0].nodeValue return Session(self, session_id, location=location, media_mode=media_mode, archive_mode=archive_mode) except Exception as e: raise OpenTokException('Failed to generate session: %s' % str(e))
def __str__(cls, self): return six.u("%s") % (self.displayName or self.cn)
def clean_pkg_version(version): """Uses pip to prepare a package version string, from our internal version.""" return six.u(pep440_version(str(version).replace("==", "")))
def __init__(self, url, key=None, secret=None, timeout=5, context=None): """ Create a new Crossbar.io push client. The only mandatory argument is the Push service endpoint of the Crossbar.io instance to push to. For signed pushes, provide authentication key and secret. If those are not given, unsigned pushes are performed. :param url: URL of the HTTP bridge of Crossbar.io (e.g. http://example.com:8080/push). :type url: str :param key: Optional key to use for signing requests. :type key: str :param secret: When using signed request, the secret corresponding to key. :type secret: str :param timeout: Timeout for requests. :type timeout: int :param context: If the HTTP bridge is running on HTTPS (that is securely over TLS), then the context provides the SSL settings the client should use (e.g. the certificate chain against which to verify the server certificate). This parameter is only available on Python 2.7.9+ and Python 3 (otherwise the parameter is silently ignored!). See: https://docs.python.org/2/library/ssl.html#ssl.SSLContext :type context: obj or None """ if six.PY2: if type(url) == str: url = six.u(url) if type(key) == str: key = six.u(key) if type(secret) == str: secret = six.u(secret) assert (type(url) == six.text_type) assert ((key and secret) or (not key and not secret)) assert (key is None or type(key) == six.text_type) assert (secret is None or type(secret) == six.text_type) assert (type(timeout) == int) if _HAS_SSL and _HAS_SSL_CLIENT_CONTEXT: assert (context is None or isinstance(context, ssl.SSLContext)) self._seq = 1 self._key = key self._secret = secret self._endpoint = _parse_url(url) self._endpoint['headers'] = { "Content-type": "application/json", "User-agent": "crossbarconnect-python" } if self._endpoint['secure']: if not _HAS_SSL: raise Exception( "Bridge URL is using HTTPS, but Python SSL module is missing" ) if _HAS_SSL_CLIENT_CONTEXT: self._connection = HTTPSConnection(self._endpoint['host'], self._endpoint['port'], timeout=timeout, context=context) else: self._connection = HTTPSConnection(self._endpoint['host'], self._endpoint['port'], timeout=timeout) else: self._connection = HTTPConnection(self._endpoint['host'], self._endpoint['port'], timeout=timeout)
def _generate_desired_capabilities(self, testname): # Generate desired capabilities object using config settings. browser_type = self._config_reader.get( WebDriverFactory.BROWSER_TYPE_CONFIG) browser_constant_dict = { self.HTMLUNIT: DesiredCapabilities.HTMLUNIT, self.HTMLUNITWITHJS: DesiredCapabilities.HTMLUNITWITHJS, self.ANDROID: DesiredCapabilities.ANDROID, self.CHROME: DesiredCapabilities.CHROME, self.FIREFOX: DesiredCapabilities.FIREFOX, self.INTERNETEXPLORER: DesiredCapabilities.INTERNETEXPLORER, self.IPAD: DesiredCapabilities.IPAD, self.IPHONE: DesiredCapabilities.IPHONE, self.OPERA: DesiredCapabilities.OPERA, self.SAFARI: DesiredCapabilities.SAFARI, self.PHANTOMJS: DesiredCapabilities.PHANTOMJS, self.OTHER: { 'browserName': '' } # Blank Desired Capabilities. } try: # Get a copy of the desired capabilities object. (to avoid # overwriting the global.) desired_capabilities = browser_constant_dict[browser_type].copy() except KeyError: raise TypeError( u("Unsupported Browser Type {0}").format(browser_type)) # Get additional desired properties from config file and add them in. other_desired_capabilities = self._config_reader.get( WebDriverFactory.DESIRED_CAPABILITIES_CONFIG) for prop in other_desired_capabilities: value = other_desired_capabilities[prop] if type(other_desired_capabilities[prop]) is dict: # do some recursive call to flatten this setting. self.__flatten_capabilities(desired_capabilities, prop, other_desired_capabilities[prop]) else: # Handle has a single string value. if isinstance(value, basestring): desired_capabilities[prop] = value # Version is specified as a string, but we'll allow user to use # an int for convenience. elif prop == "version": desired_capabilities[prop] = str(value) else: desired_capabilities[prop] = value # Set the test name property if specified in the WTF_TESTNAME var. try: test_name = self._config_reader.get("TESTNAME") desired_capabilities['name'] = test_name except KeyError: pass # No test name is specified, use the default. # If there is desired capabilities properties specified in the OS ENV vars, # override the desired capabilities value with those values. for key in self._env_vars.keys(): if key.startswith(self.DESIRED_CAPABILITIES_ENV_PREFIX): dc_key = key[len(self.DESIRED_CAPABILITIES_ENV_PREFIX):] desired_capabilities[dc_key] = self._env_vars[key] # Append optional testname postfix if supplied. if testname: if desired_capabilities['name']: desired_capabilities['name'] += "-" + testname else: # handle case where name is not specified. desired_capabilities['name'] = testname return desired_capabilities
def escape(t): return t.replace(u'"""', six.u(r'\"\"\"'))
#Other non-characters which are not strictly forbidden but #discouraged. RESTRICTED_RANGES = [(0x7F, 0x84), (0x86, 0x9F), (0xFDD0, 0xFDDF)] #check for a wide build if sys.maxunicode > 0xFFFF: RESTRICTED_RANGES += [(0x1FFFE, 0x1FFFF), (0x2FFFE, 0x2FFFF), (0x3FFFE, 0x3FFFF), (0x4FFFE, 0x4FFFF), (0x5FFFE, 0x5FFFF), (0x6FFFE, 0x6FFFF), (0x7FFFE, 0x7FFFF), (0x8FFFE, 0x8FFFF), (0x9FFFE, 0x9FFFF), (0xAFFFE, 0xAFFFF), (0xBFFFE, 0xBFFFF), (0xCFFFE, 0xCFFFF), (0xDFFFE, 0xDFFFF), (0xEFFFE, 0xEFFFF), (0xFFFFE, 0xFFFFF), (0x10FFFE, 0x10FFFF)] ILLEGAL_REGEX_STR = \ six.u('[') + \ six.u('').join(["%s-%s" % (_unichr(l), _unichr(h)) for (l, h) in ILLEGAL_RANGES]) + \ six.u(']') RESTRICTED_REGEX_STR = \ six.u('[') + \ six.u('').join(["%s-%s" % (_unichr(l), _unichr(h)) for (l, h) in RESTRICTED_RANGES]) + \ six.u(']') _ILLEGAL_REGEX = re.compile(ILLEGAL_REGEX_STR, re.U) _RESTRICTED_REGEX = re.compile(RESTRICTED_REGEX_STR, re.U) def string_cleanup(string, keep_restricted=False):
def rewrite_standard_width(cls, s): return six.u('').join([cls.full_width_digit_map.get(c, c) for c in s])
def publish(self, topic, *args, **kwargs): """ Publish an event to subscribers on specified topic via Crossbar.io HTTP bridge. The event payload (positional and keyword) can be of any type that can be serialized to JSON. If `kwargs` contains an `options` attribute, this is expected to be a dictionary with the following possible parameters: * `exclude`: A list of WAMP session IDs to exclude from receivers. * `eligible`: A list of WAMP session IDs eligible as receivers. :param topic: Topic to push to. :type topic: str :param args: Arbitrary application payload for the event (positional arguments). :type args: list :param kwargs: Arbitrary application payload for the event (keyword arguments). :type kwargs: dict :returns int -- The event publication ID assigned by the broker. """ if six.PY2 and type(topic) == str: topic = six.u(topic) assert (type(topic) == six.text_type) # this will get filled and later serialized into HTTP/POST body event = {'topic': topic} if 'options' in kwargs: event['options'] = kwargs.pop('options') assert (type(event['options']) == dict) if args: event['args'] = args if kwargs: event['kwargs'] = kwargs try: body = json.dumps(event, separators=(',', ':')) if six.PY3: body = body.encode('utf8') except Exception as e: raise Exception( "invalid event payload - not JSON serializable: {0}".format(e)) params = { 'timestamp': _utcnow(), 'seq': self._seq, } if self._key: # if the request is to be signed, create extra fields and signature params['key'] = self._key params['nonce'] = random.randint(0, 9007199254740992) # HMAC[SHA256]_{secret} (key | timestamp | seq | nonce | body) => signature hm = hmac.new(self._secret.encode('utf8'), None, hashlib.sha256) hm.update(params['key'].encode('utf8')) hm.update(params['timestamp'].encode('utf8')) hm.update(u"{0}".format(params['seq']).encode('utf8')) hm.update(u"{0}".format(params['nonce']).encode('utf8')) hm.update(body) signature = base64.urlsafe_b64encode(hm.digest()) params['signature'] = signature self._seq += 1 path = "{0}?{1}".format(parse.quote(self._endpoint['path']), parse.urlencode(params)) # now issue the HTTP/POST self._connection.request('POST', path, body, self._endpoint['headers']) response = self._connection.getresponse() response_body = response.read().decode() if response.status != 202: raise Exception( "publication request failed {0} [{1}] - {2}".format( response.status, response.reason, response_body)) try: res = json.loads(response_body) except Exception as e: raise Exception("publication request bogus result - {0}".format(e)) return res['id']
def numeric_phrase(cls, key, num, language, country=None, dictionaries=(), strict_numeric=False, is_alpha=False): has_alpha = False has_numeric = True is_integer = False is_none = False if num is not None: try: num_int = int(num) is_integer = True except ValueError: try: num_float = float(num) except ValueError: tokens = tokenize(safe_decode(num)) has_numeric = False for t, c in tokens: if c == token_types.NUMERIC: has_numeric = True if any((ch.isalpha() for ch in t)): has_alpha = True if strict_numeric and has_alpha: return safe_decode(num) else: is_none = True values, probs = None, None if is_alpha: values, probs = address_config.alternative_probabilities( '{}.alpha'.format(key), language, dictionaries=dictionaries, country=country) # Pick a phrase given the probability distribution from the config if values is None: values, probs = address_config.alternative_probabilities( key, language, dictionaries=dictionaries, country=country) if not values: return safe_decode(num) if not is_none else None phrase, phrase_props = weighted_choice(values, probs) values = [] probs = [] # Dictionaries are lowercased, so title case here if phrase_props.get('title_case', True): phrase = phrase.title() ''' There are a few ways we can express the number itself 1. Alias it as some standalone word like basement (for floor "-1") 2. Use the number itself, so "Floor 2" 3. Append/prepend an affix e.g. 2/F for second floor 4. As an ordinal expression e.g. "2nd Floor" ''' have_standalone = False have_null = False for num_type in ('standalone', 'null', 'numeric', 'numeric_affix', 'ordinal'): key = '{}_probability'.format(num_type) prob = phrase_props.get(key) if prob is not None: if num_type == 'standalone': have_standalone = True elif num_type == 'null': have_null = True values.append(num_type) probs.append(prob) elif num_type in phrase_props: values.append(num_type) probs.append(1.0) break if not probs or is_none: return phrase # If we're using something like "Floor A" or "Unit 2L", remove ordinal/affix items if has_alpha: values, probs = zip(*[(v, p) for v, p in zip(values, probs) if v in ('numeric', 'null', 'standalone')]) total = float(sum(probs)) if isclose(total, 0.0): return None probs = [p / total for p in probs] probs = cdf(probs) if len(values) < 2: if have_standalone: num_type = 'standalone' elif have_null: num_type = 'null' else: num_type = 'numeric' else: num_type = weighted_choice(values, probs) if num_type == 'standalone': return phrase elif num_type == 'null': return safe_decode(num) props = phrase_props[num_type] if is_integer: num_int = int(num) if phrase_props.get('number_abs_value', False): num_int = abs(num_int) num = num_int if 'number_min_abs_value' in phrase_props and num_int < phrase_props[ 'number_min_abs_value']: return None if 'number_max_abs_value' in phrase_props and num_int > phrase_props[ 'number_max_abs_value']: return None if phrase_props.get('number_subtract_abs_value'): num_int -= phrase_props['number_subtract_abs_value'] num = num_int num = safe_decode(num) digits_props = props.get('digits') if digits_props: # Inherit the gender and category e.g. for ordinals for k in ('gender', 'category'): if k in props: digits_props[k] = props[k] num = Digits.rewrite(num, language, digits_props, num_type=Digits.CARDINAL if num_type != 'ordinal' else Digits.ORDINAL) # Do we add the numeric phrase e.g. Floor No 1 add_number_phrase = props.get('add_number_phrase', False) if add_number_phrase and random.random( ) < props['add_number_phrase_probability']: num = Number.phrase(num, language, country=country) whitespace_default = True if num_type == 'numeric_affix': phrase = props['affix'] if props.get('upper_case', True): phrase = phrase.upper() if 'zero_pad' in props and num.isdigit(): num = num.rjust(props['zero_pad'], props.get('zero_char', '0')) whitespace_default = False elif num_type == 'ordinal' and safe_decode(num).isdigit(): ordinal_expression = ordinal_expressions.suffixed_number( num, language, gender=props.get('gender', None)) if ordinal_expression is not None: num = ordinal_expression if 'null_phrase_probability' in props and ( num_type == 'ordinal' or (has_alpha and (has_numeric or 'null_phrase_alpha_only' in props))): if random.random() < props['null_phrase_probability']: return num direction = props['direction'] whitespace = props.get('whitespace', whitespace_default) whitespace_probability = props.get('whitespace_probability') if whitespace_probability is not None: whitespace = random.random() < whitespace_probability # Occasionally switch up if direction_probability is specified if random.random() > props.get('direction_probability', 1.0): if direction == 'left': direction = 'right' elif direction == 'right': direction = 'left' whitespace_phrase = six.u(' ') if whitespace else six.u('') # Phrase goes to the left of hte number if direction == 'left': return six.u('{}{}{}').format(phrase, whitespace_phrase, num) # Phrase goes to the right of the number elif direction == 'right': return six.u('{}{}{}').format(num, whitespace_phrase, phrase) # Need to specify a direction, otherwise return naked number else: return safe_decode(num)
def _filter_model(pkg, args): to_search = [six.u(pkg["name"]), six.u(pkg["summary"])] return util.match_filters(args.terms, to_search)
def notify(args): notifier = Notifier(from_addr=ICINGA_EMAIL) env = (dict( TARGET_TYPE='service', NAGIOS_LONGDATETIME='2016-05-11 16:30:50 +8000', NAGIOS_NOTIFICATIONTYPE='PROBLEM', NAGIOS_HOSTALIAS='sa', NAGIOS_SERVICEDESC='fakeservice', NAGIOS_SERVICEOUTPUT="整个中文试试", NAGIOS_SERVICESTATE='CRITICAL', NOTIFICATIONAUTHORNAME='sysadmin', NOTIFICATIONCOMMENT='没病走两步~', NOTIFICATION_IS_ARCHIVE=False, NAGIOS_CONTACTNAME='shuaisa', SERVICE_DURATION_SEC='5.001102', ) if args.test else os.environ) unicode_env = {} for name, value in env.items(): if isinstance(value, six.string_types): unicode_env[six.ensure_text(name)] = six.ensure_text(value) else: unicode_env[six.ensure_text(name)] = value env = AttrDict(unicode_env, _default_value=six.u('')) short_env = dict(type=env.NAGIOS_NOTIFICATIONTYPE[:3].upper(), host=env.NAGIOS_HOSTALIAS, hoststate=env.HOSTSTATE, service=env.NAGIOS_SERVICEDESC, time=' '.join(env.NAGIOS_LONGDATETIME.split()[:2]), extra=(env.NAGIOS_HOSTOUTPUT if env.TARGET_TYPE == 'host' else env.NAGIOS_SERVICEOUTPUT), link='', wiki_base_url=ALERT_WIKI_BASE_URL.rstrip(' /')) duration = env.SERVICE_DURATION_SEC if env.TARGET_TYPE == 'service' \ else env.HOST_DURATION_SEC short_env = AttrDict(short_env, _default_value='') ack_link = icinga_cluster_config.get_ack_link(env) reboot_host_link = icinga_cluster_config.get_reboot_host_link(env) icinga_link = icinga_cluster_config.get_icinga_link(env) for type_ in NOTIFY_TYPES: values = vars(args)[type_] if values: addrs = [i for v in values for i in re.split(r'[,\s]+', v)] addrs = [a for a in addrs if a] if not addrs: logger.warning('ignore empty %s addrs' % type_) continue title, content = render_notification( env=env, short_env=short_env, notify_type=type_, ack_link=ack_link, reboot_host_link=reboot_host_link, icinga_link=icinga_link) try: ok = add_notification(env.NAGIOS_NOTIFICATIONTYPE, short_env.host, short_env.hoststate, short_env.service, content, type_, ', '.join(addrs), duration) logger.info('notification gateway permit: %s', ok) except Exception: # we catch the exception and send it to sentry, but let the program continue to run report() logger.exception('add notification to gateway failed: ') ok = True if ok: try: getattr(notifier, type_)(addrs, title=title, content=content) except Exception as e: report() logger.error('Notifier.%s(%s) failed: %s', type_, addrs, e)
def rewrite_full_width(cls, s): return six.u('').join( [cls.unicode_full_width_map.get(c, c) for c in s])
def ra(value): return value\ .replace(u("ą"), "a").replace(u("Ą"), "a")\ .replace(u("ć"), "c").replace(u("Ć"), "c")\ .replace(u("ę"), "e").replace(u("Ę"), "e")\ .replace(u("ł"), "l").replace(u("Ł"), "l")\ .replace(u("ń"), "n").replace(u("Ń"), "n")\ .replace(u("ó"), "o").replace(u("Ó"), "o")\ .replace(u("ś"), "s").replace(u("Ś"), "s")\ .replace(u("ź"), "z").replace(u("Ź"), "z")\ .replace(u("ż"), "z").replace(u("Ż"), "z")\ .lower()
def test_inventory_name_with_unicode(inventory, inventory_source): inventory.name = six.u('オオオ') inventory.save() iu = inventory_source.update() assert iu.name.startswith(inventory.name)
def read_datetime(self, value, out_fmt, in_fmt=None): if type(value) != datetime.datetime and in_fmt is not None: value = datetime.datetime.strptime(value, in_fmt) elif type(value) == datetime.datetime: pass else: raise TypeError('Either datetime must be supplied or both ' 'value and in_fmt') MONTHS = [ u(""), u("stycznia"), u("lutego"), u("marca"), u("kwietnia"), u("maja"), u("czerwca"), u("lipca"), u("sierpnia"), u("września"), u("października"), u("listopada"), u("grudnia"), ] DAYS_N0 = [ u(""), u(""), u("dwudziestego"), u("trzydziestego"), ] DAYS_N = [ u(""), u("pierwszego"), u("drugiego"), u("trzeciego"), u("czwartego"), u("piątego"), u("szóstego"), u("siódmego"), u("ósmego"), u("dziewiątego"), u("dziesiątego"), u("jedenastego"), u("dwunastego"), u("trzynastego"), u("czternastego"), u("piętnastego"), u("szesnastego"), u("siedemnastego"), u("osiemnastego"), u("dziewiętnastego"), ] HOURS = [ u("zero"), u("pierwsza"), u("druga"), u("trzecia"), u("czwarta"), u("piąta"), u("szósta"), u("siódma"), u("ósma"), u("dziewiąta"), u("dziesiąta"), u("jedenasta"), u("dwunasta"), u("trzynasta"), u("czternasta"), u("piętnasta"), u("szesnasta"), u("siedemnasta"), u("osiemnasta"), u("dziewiętnasta"), u("dwudziesta"), ] _, tm_mon, tm_mday, tm_hour, tm_min, _, _, _, _ = value.timetuple() retval = [] for word in out_fmt.split(" "): if word == '%d': # Day of the month if tm_mday <= 20: retval.append(DAYS_N[tm_mday]) else: retval.append(DAYS_N0[tm_mday // 10]) retval.append(DAYS_N[tm_mday % 10]) elif word == '%B': # Month as locale’s full name retval.append(MONTHS[tm_mon]) elif word == '%H': # Hour (24-hour clock) as a decimal number if tm_hour <= 20: retval.append(HOURS[tm_hour]) elif tm_hour > 20: retval.append(HOURS[20]) retval.append(HOURS[tm_hour - 20]) elif word == '%M': # Minute as a decimal number if tm_min == 0: retval.append(u('zero-zero')) else: retval.append(read_number(tm_min)) elif word.startswith('%'): raise ValueError("Token %s' is not supported!", word) else: retval.append(word) return ' '.join((w for w in retval if w != ''))
import threading import pyperf import six from six.moves import xrange import tracemalloc import gc import os import psutil import time EMPTY = ({}, 2000) SIMPLE_DATA = {'key1': 0, 'key2': True, 'key3': 'value', 'key4': 'foo', 'key5': 'string'} SIMPLE = (SIMPLE_DATA, 1000) NESTED_DATA = {'key1': 0, 'key2': SIMPLE[0], 'key3': 'value', 'key4': SIMPLE[0], 'key5': SIMPLE[0], six.u('key'): six.u('\u0105\u0107\u017c')} NESTED = (NESTED_DATA, 1000) HUGE = ([NESTED[0]] * 1000, 1) CASES = ['EMPTY', 'SIMPLE', 'NESTED', 'HUGE'] def bench_json_dumps(data): for obj, count_it in data: for _ in count_it: json.dumps(obj) def add_cmdline_args(cmd, args): if args.cases: cmd.extend(("--cases", args.cases)) def functionWorker(tid, tname, allocate_pkey):
retval.append(read_number(int(char))) except ValueError: raise ValueError("\"%s\" is not a element of callsign", char) return ' '.join(retval) # ########################################## # # module dependant words # ############################################# # World Weather Online wwo_weather_codes = { '113': _(ra(u('bezchmurnie'))), # Clear/Sunny '116': _(ra(u('częściowe zachmurzenie'))), # Partly Cloudy '119': _(ra(u('pochmurno'))), # Cloudy '122': _(ra(u('zachmurzenie całkowite'))), # Overcast '143': _(ra(u('zamglenia'))), # Mist '176': _(ra(u('lokalne przelotne opady deszczu'))), # Patchy rain nearby '179': _(ra(u('śnieg'))), # Patchy snow nearby '182': _(ra(u('śnieg z deszczem'))), # Patchy sleet nearby '185': _(ra(u('lokalna przelotna marznąca mżawka')) ), # Patchy freezing drizzle nearby '200': _(ra(u('lokalne burze'))), # Thundery outbreaks in nearby '227': _(ra(u('zamieć śnieżna'))), # Blowing snow '230': _(ra(u('zamieć śnieżna'))), # Blizzard '248': _(ra(u('mgła'))), # Fog '260': _(ra(u('marznąca mgła'))), # Freezing fog '263': _(ra(u('mżawka'))), # Patchy light drizzle
def read_callsign(self, value): # literowanie polskie wg. "Krótkofalarstwo i radiokomunikacja - poradnik", # Łukasz Komsta SQ8QED, Wydawnictwa Komunikacji i Łączności Warszawa, 2001, # str. 130 LETTERS = { 'a': u('adam'), 'b': u('barbara'), 'c': u('celina'), 'd': u('dorota'), 'e': u('edward'), 'f': u('franciszek'), 'g': u('gustaw'), 'h': u('henryk'), 'i': u('irena'), 'j': u('józef'), 'k': u('karol'), 'l': u('ludwik'), 'm': u('marek'), 'n': u('natalia'), 'o': u('olga'), 'p': u('paweł'), 'q': u('quebec'), 'r': u('roman'), 's': u('stefan'), 't': u('tadeusz'), 'u': u('urszula'), 'v': u('violetta'), 'w': u('wacław'), 'x': u('xawery'), 'y': u('ypsilon'), 'z': u('zygmunt'), '/': u('łamane'), } retval = [] for char in value.lower(): try: retval.append(LETTERS[char]) except KeyError: try: retval.append(read_number(int(char))) except ValueError: raise ValueError("\"%s\" is not a element of callsign", char) return ' '.join(retval)
def test_strings(self): self.assertEqual(always_iterable('foo'), ('foo',)) self.assertEqual(always_iterable(six.b('bar')), (six.b('bar'),)) self.assertEqual(always_iterable(six.u(b'baz')), (six.u(b'baz'),))
def read_degrees(self, value): deg = [u("stopień"), u("stopnie"), u("stopni")] return read_number(value, deg)