def _GetResponse( self, handler, request_data = None, method = 'POST'): """ Query racerd via HTTP racerd returns JSON with 200 OK responses. 204 No Content responses occur when no errors were encountered but no completions, definitions, or errors were found. """ _logger.info( 'RustCompleter._GetResponse' ) handler = ToBytes( handler ) method = ToBytes( method ) url = urllib.parse.urljoin( ToBytes( self._racerd_host ), handler ) parameters = self._ConvertToRacerdRequest( request_data ) body = ToBytes( json.dumps( parameters ) ) if parameters else bytes() extra_headers = self._ExtraHeaders( method, handler, body ) _logger.debug( 'Making racerd request: %s %s %s %s', method, url, extra_headers, body ) # Failing to wrap the method & url bytes objects in `native()` causes HMAC # failures (403 Forbidden from racerd) for unknown reasons. Similar for # request_hmac above. response = requests.request( native( method ), native( url ), data = body, headers = extra_headers ) response.raise_for_status() if response.status_code is http.client.NO_CONTENT: return None return response.json()
def groups_user(conn, search_base, user_filter, user_name_att, username): search_filter = "(&({0})({1}={2}))".format(user_filter, user_name_att, username) try: memberof_attr = configuration.conf.get("ldap", "group_member_attr") except: memberof_attr = "memberOf" res = conn.search(native(search_base), native(search_filter), attributes=[native(memberof_attr)]) if not res: log.info("Cannot find user %s", username) raise AuthenticationError("Invalid username or password") if conn.response and memberof_attr not in conn.response[0]["attributes"]: log.warning("""Missing attribute "%s" when looked-up in Ldap database. The user does not seem to be a member of a group and therefore won't see any dag if the option filter_by_owner=True and owner_mode=ldapgroup are set""", memberof_attr) return [] user_groups = conn.response[0]["attributes"][memberof_attr] regex = re.compile("cn=([^,]*).*", re.IGNORECASE) groups_list = [] try: groups_list = [regex.search(i).group(1) for i in user_groups] except IndexError: log.warning("Parsing error when retrieving the user's group(s)." " Check if the user belongs to at least one group" " or if the user's groups name do not contain special characters") return groups_list
def _CallExtraConfFlagsForFile( module, filename, client_data ): # We want to ensure we pass a native py2 `str` on py2 and a native py3 `str` # (unicode) object on py3. That's the API we provide. # In a vacuum, always passing a unicode object (`unicode` on py2 and `str` on # py3) would be better, but we can't do that because that would break all the # ycm_extra_conf files already out there that expect a py2 `str` object on # py2, and WE DO NOT BREAK BACKWARDS COMPATIBILITY. # Hindsight is 20/20. if PY2: filename = native( ToBytes( filename ) ) else: filename = native( ToUnicode( filename ) ) # For the sake of backwards compatibility, we need to first check whether the # FlagsForFile function in the extra conf module even allows keyword args. if inspect.getargspec( module.FlagsForFile ).keywords: results = module.FlagsForFile( filename, client_data = client_data ) else: results = module.FlagsForFile( filename ) results[ 'flags' ] = _MakeRelativePathsInFlagsAbsolute( results[ 'flags' ], results.get( 'include_paths_relative_to_dir' ) ) return results
def test_native(self): a = int(10 ** 20) # long int b = native(a) self.assertEqual(a, b) if PY2: self.assertEqual(type(b), long) else: self.assertEqual(type(b), int) c = bytes(b"ABC") d = native(c) self.assertEqual(c, d) if PY2: self.assertEqual(type(d), type(b"Py2 byte-string")) else: self.assertEqual(type(d), bytes) s = str("ABC") t = native(s) self.assertEqual(s, t) if PY2: self.assertEqual(type(t), unicode) else: self.assertEqual(type(t), str) type(s)
def test_native(self): a = int(10**20) # long int b = native(a) self.assertEqual(a, b) if PY2: self.assertEqual(type(b), long) else: self.assertEqual(type(b), int) c = bytes(b'ABC') d = native(c) self.assertEqual(c, d) if PY2: self.assertEqual(type(d), type(b'Py2 byte-string')) else: self.assertEqual(type(d), bytes) s = str(u'ABC') t = native(s) self.assertEqual(s, t) if PY2: self.assertEqual(type(t), unicode) else: self.assertEqual(type(t), str) d = dict({1: 2, 2: 4}) e = native(d) self.assertEqual(d, e) if PY2: self.assertEqual(type(e), type({})) else: self.assertEqual(type(d), dict)
def get_ldap_connection(dn=None, password=None): try: cacert = configuration.conf.get("ldap", "cacert") except AirflowConfigException: pass try: ignore_malformed_schema = configuration.conf.get("ldap", "ignore_malformed_schema") except AirflowConfigException: pass if ignore_malformed_schema: set_config_parameter('IGNORE_MALFORMED_SCHEMA', ignore_malformed_schema) tls_configuration = Tls(validate=ssl.CERT_REQUIRED, ca_certs_file=cacert) server = Server(configuration.conf.get("ldap", "uri"), use_ssl=True, tls=tls_configuration) conn = Connection(server, native(dn), native(password)) if not conn.bind(): log.error("Cannot bind to ldap server: %s ", conn.last_error) raise AuthenticationError("Cannot bind to ldap server") return conn
def test_native(self): a = int(10**20) # long int b = native(a) self.assertEqual(a, b) if PY2: self.assertEqual(type(b), long) else: self.assertEqual(type(b), int) c = bytes(b'ABC') d = native(c) self.assertEqual(c, d) if PY2: self.assertEqual(type(d), type(b'Py2 byte-string')) else: self.assertEqual(type(d), bytes) s = str(u'ABC') t = native(s) self.assertEqual(s, t) if PY2: self.assertEqual(type(t), unicode) else: self.assertEqual(type(t), str) d1 = dict({'a': 1, 'b': 2}) d2 = native(d1) self.assertEqual(d1, d2) self.assertEqual(type(d2), type({}))
def try_login(username, password): conn = get_ldap_connection(configuration.conf.get("ldap", "bind_user"), configuration.conf.get("ldap", "bind_password")) search_filter = "(&({0})({1}={2}))".format( configuration.conf.get("ldap", "user_filter"), configuration.conf.get("ldap", "user_name_attr"), username ) search_scopes = { "LEVEL": LEVEL, "SUBTREE": SUBTREE, "BASE": BASE } search_scope = LEVEL if configuration.conf.has_option("ldap", "search_scope"): if configuration.conf.get("ldap", "search_scope") == "SUBTREE": search_scope = SUBTREE else: search_scope = LEVEL # todo: BASE or ONELEVEL? res = conn.search(native(configuration.conf.get("ldap", "basedn")), native(search_filter), search_scope=native(search_scope)) # todo: use list or result? if not res: log.info("Cannot find user %s", username) raise AuthenticationError("Invalid username or password") entry = conn.response[0] conn.unbind() if 'dn' not in entry: # The search filter for the user did not return any values, so an # invalid user was used for credentials. raise AuthenticationError("Invalid username or password") try: conn = get_ldap_connection(entry['dn'], password) except KeyError as e: log.error(""" Unable to parse LDAP structure. If you're using Active Directory and not specifying an OU, you must set search_scope=SUBTREE in airflow.cfg. %s """ % traceback.format_exc()) raise LdapException("Could not parse LDAP structure. Try setting search_scope in airflow.cfg, or check logs") if not conn: log.info("Password incorrect for user %s", username) raise AuthenticationError("Invalid username or password")
def group_contains_user(conn, search_base, group_filter, user_name_attr, username): search_filter = '(&({0}))'.format(group_filter) if not conn.search(native(search_base), native(search_filter), attributes=[native(user_name_attr)]): log.warning("Unable to find group for %s %s", search_base, search_filter) else: for entry in conn.entries: if username in getattr(entry, user_name_attr).values: return True return False
def CompilationDatabase_Py3Bytes_test(): cc_dir = native(ToBytes(PATH_TO_COMPILE_COMMANDS)) cc_filename = native(ToBytes(os.path.join(COMPILE_COMMANDS_WORKING_DIR, "example.cc"))) # Ctor reads ycmd/tests/testdata/[unix|windows]/compile_commands.json db = ycm_core.CompilationDatabase(cc_dir) info = db.GetCompilationInfoForFile(cc_filename) eq_(str(info.compiler_working_dir_), COMPILE_COMMANDS_WORKING_DIR) eq_(str(info.compiler_flags_[0]), "/usr/bin/clang++") eq_(str(info.compiler_flags_[1]), "example.cc")
def read_las(self): if Path(native(self.las_file)).exists(): las = LASReader(native(str(Path(self.las_file))), null_subs=np.nan) # well_name = las.well.items['WELL'].data df = pd.DataFrame( las.data2d, columns=["{}({})".format( las.curves.items[name].descr.replace(' ', '_'), las.curves.items[name].units) \ for name in las.curves.names]) if 'Depth(M)' in df.columns.values.tolist(): df.rename(columns={'Depth(M)': 'Depth(m)'}, inplace=True) self._data_frame = df
def _GetResponse(self, handler, request_data={}): """POST JSON data to JediHTTP server and return JSON response.""" handler = ToBytes(handler) url = urllib.parse.urljoin(self._jedihttp_host, handler) parameters = self._TranslateRequestForJediHTTP(request_data) body = ToBytes(json.dumps(parameters)) if parameters else bytes() extra_headers = self._ExtraHeaders(handler, body) self._logger.debug("Making JediHTTP request: %s %s %s %s", "POST", url, extra_headers, body) response = requests.request(native(bytes(b"POST")), native(url), data=body, headers=extra_headers) response.raise_for_status() return response.json()
def group_contains_user(conn, search_base, group_filter, user_name_attr, username): search_filter = '(&({0}))'.format(group_filter) if not conn.search(native(search_base), native(search_filter), attributes=[native(user_name_attr)]): _log.warn("Unable to find group for %s %s", search_base, search_filter) else: for resp in conn.response: if ( 'attributes' in resp and ( resp['attributes'].get(user_name_attr)[0] == username or resp['attributes'].get(user_name_attr) == username ) ): return True return False
def save(self, *args, **kwargs): super(Gallery, self).save(*args, **kwargs) if self.zipfile: from zipfile import ZipFile zip_file = ZipFile(self.zipfile) for name in zip_file.namelist(): data = zip_file.read(name) try: from PIL import Image image = Image.open(io.BytesIO(data)) image.load() image = Image.open(io.BytesIO(data)) image.verify() except ImportError: pass except: continue path = os.path.join(settings.MEDIA_ROOT, UPLOADS_GALLERY_DIRECTORY, native(str(name, errors="ignore"))) img_tmp_path = default_storage.save(path, ContentFile(data)) photo = Photo() photo.image.save(os.path.basename(img_tmp_path), File(open(img_tmp_path))) photo.album = os.path.splitext( os.path.split(zip_file.filename)[1])[0] photo.save() os.remove(img_tmp_path) zip_file.close() self.zipfile.delete(save=True)
def asString(obj): """ Ensure an object is either explicitly str or unicode and not some derived type that can change semantics. If the object is unicode, return unicode. Otherwise return the string conversion of the object. Args: obj: Object to return as str or unicode Returns: str or unicode: """ typ = type(obj) # explicit type check as faster path if typ in _STR_TYPES: if not futils.PY2 and typ is futils.binary_type: obj = os.fsdecode(obj) return obj # derived type check elif isinstance(obj, bytes): if not futils.PY2: obj = obj.decode(FILESYSTEM_ENCODING) else: obj = futils.text_type(obj) return futils.native(obj)
def get_conn(self): """ Returns an LDAP connection object """ logging.info("Connecting to {0}".format(self.ldap_conn_id)) conn = self.get_connection(self.ldap_conn_id) url = 'ldap://{0}:{1}'.format(conn.host, conn.port) server = Server(url, get_info=ALL) conn = Connection(server, native(conn.login), native(conn.password)) if not conn.bind(): logging.error("Cannot bind to ldap server: %s ", conn.last_error) raise Exception("Cannot bind to ldap server") return conn
def _push_sample(self, sample): # Calculate whether we need to draw any annotations on the output video. now = sample.time annotations = [] with self.annotations_lock: # Remove expired annotations self.text_annotations = [x for x in self.text_annotations if now < x.end_time] current_texts = [x for x in self.text_annotations if x.time <= now] for annotation in list(self.annotations): if annotation.time == now: annotations.append(annotation) if now >= annotation.time: self.annotations.remove(annotation) sample = gst_sample_make_writable(sample) img = array_from_sample(sample, readwrite=True) # Text: _draw_text( img, datetime.datetime.now().strftime("%H:%M:%S.%f")[:-4], (10, 30), (255, 255, 255)) for i, x in enumerate(reversed(current_texts)): origin = (10, (i + 2) * 30) age = float(now - x.time) / 3 color = (native(int(255 * max([1 - age, 0.5]))).__int__(),) * 3 _draw_text(img, x.text, origin, color) # Regions: for annotation in annotations: _draw_annotation(img, annotation) self.appsrc.props.caps = sample.get_caps() self.appsrc.emit("push-buffer", sample.get_buffer()) self._sample_count += 1
def prepare_lookup_for_tvmaze(**lookup_params): """ Return a dict of params which is valid with tvmaze API lookups :param lookup_params: Search parameters :return: Dict of tvmaze recognizable key words """ prepared_params = {} title = None series_name = ( lookup_params.get('series_name') or lookup_params.get('show_name') or lookup_params.get('title') ) if series_name: title, _ = split_title_year(series_name) # Support for when title is just a number if not title: title = series_name # Ensure we send native types to tvmaze lib as it does not handle new types very well prepared_params['tvmaze_id'] = lookup_params.get('tvmaze_id') prepared_params['thetvdb_id'] = lookup_params.get('tvdb_id') or lookup_params.get( 'trakt_series_tvdb_id' ) prepared_params['tvrage_id'] = lookup_params.get('tvrage_id') or lookup_params.get( 'trakt_series_tvrage_id' ) prepared_params['imdb_id'] = lookup_params.get('imdb_id') prepared_params['show_name'] = native(title) if title else None return prepared_params
def generate_entries(self, config): entries = [] filter = config.get('filter', {}) # deluge client lib chokes on future's newlist, make sure we have a native python list here torrents = self.get_torrents_status( native(list(self.settings_map.keys())) + config.get('keys', []), filter) for hash, torrent_dict in torrents.items(): # Make sure it has a url so no plugins crash entry = Entry(deluge_id=hash, url='') config_path = os.path.expanduser(config.get('config_path', '')) if config_path: torrent_path = os.path.join(config_path, 'state', hash + '.torrent') if os.path.isfile(torrent_path): entry['location'] = torrent_path if not torrent_path.startswith('/'): torrent_path = '/' + torrent_path entry['url'] = 'file://' + torrent_path else: log.warning('Did not find torrent file at %s', torrent_path) for key, value in torrent_dict.items(): if key in self.settings_map: flexget_key = self.settings_map[key] else: flexget_key = self.extra_settings_map[key] if isinstance(flexget_key, tuple): flexget_key, format_func = flexget_key value = format_func(value) entry[flexget_key] = value entries.append(entry) return entries
def parse_series(self, data, **kwargs): log.debug('Parsing series: `%s` [options: %s]', data, kwargs) guessit_options = self._guessit_options(kwargs) if kwargs.get('name') and not guessit_options.get('strict_name'): expected_title = kwargs['name'] expected_title = expected_title.replace('\'', '(?:\'|\\\'|\\\\\'|-|)?') # apostrophe support guessit_options['expected_title'] = ['re:' + expected_title] if kwargs.get('id_regexps'): guessit_options['id_regexps'] = kwargs.get('id_regexps') start = time.clock() # If no series name is provided, we don't tell guessit what kind of match we are looking for # This prevents guessit from determining that too general of matches are series parse_type = 'episode' if kwargs.get('name') else None if parse_type: guessit_options['type'] = parse_type # NOTE: Guessit expects str on PY3 and unicode on PY2 hence the use of future.utils.native try: guess_result = guessit_api.guessit(native(data), options=guessit_options) except GuessitException: log.warning('Parsing %s with guessit failed. Most likely a unicode error.', data) guess_result = {} parsed = GuessitParsedSerie(data, kwargs.pop('name', None), guess_result, **kwargs) end = time.clock() log.debug('Parsing result: %s (in %s ms)', parsed, (end - start) * 1000) return parsed
def normalizeimage(image_desc, copy=False): """ :param image_desc: an image description as returned from |docker.Client.images|_, |docker.Client.inspect_image|_, etc. :param copy: if :const:`True`, make a copy of :obj:`image_desc` before performing any normalizations :returns: the normalized image description (:obj:`image_desc` if :obj:`copy` is :const:`False`) This method is attempts to address certain `Docker API inconsistencies <https://github.com/docker/docker/issues/5893#issuecomment-102398746>`__. The following keys are added to :obj:`image_desc`: * :attr:`':id'` - a normalized :attr:`'Id'` * :attr:`':short_id'` - the first 12 hexidecimal characters from :attr:`':id'` * :attr:`':parent_id'` - a normalized :attr:`'ParentId'` or :attr:`'Parent'` * :attr:`':created_dt'` - a timezone-aware :class:`datetime` object representing :attr:`'Created'` * :attr:`':repo_tags'` - a normalized :attr:`'RepoTags'`, including any short names (i.e., those implying ``:latest``) """ if copy: image = deepcopy(image_desc) else: image = image_desc image_id = image.get('Id', image.get('id')).lower() image[':id'] = image_id image[':parent_id'] = image.get('ParentId', image.get('Parent', image.get('parent', ''))).lower() image_short_id = image_id[:12] image[':short_id'] = image_short_id image_created = image.get('Created', image.get('created')) if isinstance(image_created, int): # Work-around for # <https://github.com/PythonCharmers/python-future/issues/144> and # <https://bitbucket.org/pypy/pypy/issue/2048/datetimeutcfromtimestamp-barfs-when> image_created = native(image_created) image[':created_dt'] = datetime.utcfromtimestamp(image_created).replace(tzinfo=TZ_UTC) else: image[':created_dt'] = dateutil_parse(image_created) image[':repo_tags'] = [] for repo_tag in image.get('RepoTags', ()): if repo_tag == '<none>:<none>': continue repo, tag = repo_tag.split(':') if tag == 'latest': image[':repo_tags'].append(repo) image[':repo_tags'].append(repo_tag) return image
def on_connect_success(self, result, task, config): """Creates a list of FlexGet entries from items loaded in deluge and stores them to self.entries""" from deluge.ui.client import client def on_get_torrents_status(torrents): config_path = os.path.expanduser(config.get('config_path', '')) for hash, torrent_dict in torrents.items(): # Make sure it has a url so no plugins crash entry = Entry(deluge_id=hash, url='') if config_path: torrent_path = os.path.join(config_path, 'state', hash + '.torrent') if os.path.isfile(torrent_path): entry['location'] = torrent_path if not torrent_path.startswith('/'): torrent_path = '/' + torrent_path entry['url'] = 'file://' + torrent_path else: log.warning('Did not find torrent file at %s' % torrent_path) for key, value in torrent_dict.items(): if key in self.settings_map: flexget_key = self.settings_map[key] else: flexget_key = self.extra_settings_map[key] if isinstance(flexget_key, tuple): flexget_key, format_func = flexget_key value = format_func(value) entry[flexget_key] = value self.entries.append(entry) client.disconnect() filter = config.get('filter', {}) # deluge client lib chokes on future's newlist, make sure we have a native python list here client.core.get_torrents_status(filter, native(list(self.settings_map.keys()) + config.get('keys', []))).addCallback( on_get_torrents_status)
def on_connect_success(self, result, task, config): """Creates a list of FlexGet entries from items loaded in deluge and stores them to self.entries""" from deluge.ui.client import client def on_get_torrents_status(torrents): config_path = os.path.expanduser(config.get('config_path', '')) for hash, torrent_dict in torrents.items(): # Make sure it has a url so no plugins crash entry = Entry(deluge_id=hash, url='') if config_path: torrent_path = os.path.join(config_path, 'state', hash + '.torrent') if os.path.isfile(torrent_path): entry['location'] = torrent_path if not torrent_path.startswith('/'): torrent_path = '/' + torrent_path entry['url'] = 'file://' + torrent_path else: log.warning('Did not find torrent file at %s' % torrent_path) for key, value in torrent_dict.items(): flexget_key = self.settings_map[key] if isinstance(flexget_key, tuple): flexget_key, format_func = flexget_key value = format_func(value) entry[flexget_key] = value self.entries.append(entry) client.disconnect() filter = config.get('filter', {}) # deluge client lib chokes on future's newlist, make sure we have a native python list here client.core.get_torrents_status(filter, native(list(self.settings_map.keys()))).addCallback( on_get_torrents_status)
def group_contains_user(conn, search_base, group_filter, group_member_attr, username): search_filter = '(&({0}))'.format(group_filter) if not conn.search(native(search_base), native(search_filter), attributes=[native(group_member_attr)]): LOG.warning("Unable to find group for %s %s", search_base, search_filter) else: for resp in conn.response: if ('attributes' in resp and (resp['attributes'].get(group_member_attr)[0] == username or resp['attributes'].get(group_member_attr) == username or username in resp['attributes'].get(group_member_attr))): return True return False
def _encrypt(self, data, iv): encrypter = pyaes.Encrypter( pyaes.AESModeOfOperationCBC(self._hash.hash, iv=native(iv))) enc_data = encrypter.feed(self.MAGIC_DETECT_ENC + data) enc_data += encrypter.feed() return enc_data
def ensure_text(value, encoding=sys.getdefaultencoding(), errors='strict', coerce=False): """Return the text representation of the given string. :param value bytes/str/unicode: string value :param encoding str: name of encoding used if `value` is not text :param errors str: decode option used if `value` is not text :param bool coerce: whether to attempt to coerce `value` to text :returns: text representation of `value` :rtype: `unicode` if Python 2; otherwise, `str` :raises TypeError: if `value` is not a str, unicode, nor bytes :raises UnicodeDecodeError: if `value` cannot be decoded The primary use case for this function is as a shortcut for a library providing support for Python 2 and 3 to ensure that a provided string value can be interpreted as text. """ if isinstance(value, future.native_bytes): value = value.decode(encoding, errors) elif not isinstance(value, future.text_type): if not coerce: raise TypeError("{!r} is not a string type.".format(type(value))) value = future.text_type(value) return future.native(value)
def group_contains_user(conn, search_base, group_filter, user_name_attr, username): search_filter = '(&({0}))'.format(group_filter) if not conn.search(native(search_base), native(search_filter), attributes=[native(user_name_attr)]): log.warning("Unable to find group for %s %s", search_base, search_filter) else: for entry in conn.entries: if username.lower() in map(lambda attr: attr.lower(), getattr(entry, user_name_attr).values): return True return False
def parse_series(self, data, **kwargs): log.debug('Parsing series: `%s` [options: %s]', data, kwargs) guessit_options = self._guessit_options(kwargs) if kwargs.get('name') and not guessit_options.get('strict_name'): expected_title = kwargs['name'] expected_title = expected_title.replace( '\'', '(?:\'|\\\'|\\\\\'|-|)?') # apostrophe support guessit_options['expected_title'] = ['re:' + expected_title] if kwargs.get('id_regexps'): guessit_options['id_regexps'] = kwargs.get('id_regexps') start = time.clock() # If no series name is provided, we don't tell guessit what kind of match we are looking for # This prevents guessit from determining that too general of matches are series parse_type = 'episode' if kwargs.get('name') else None if parse_type: guessit_options['type'] = parse_type # NOTE: Guessit expects str on PY3 and unicode on PY2 hence the use of future.utils.native guess_result = guessit_api.guessit(native(data), options=guessit_options) parsed = GuessitParsedSerie(data, kwargs.pop('name', None), guess_result, **kwargs) end = time.clock() log.debug('Parsing result: %s (in %s ms)', parsed, (end - start) * 1000) return parsed
def prepare_lookup_for_tvmaze(**lookup_params): """ Return a dict of params which is valid with tvmaze API lookups :param lookup_params: Search parameters :return: Dict of tvmaze recognizable key words """ prepared_params = {} title = None series_name = (lookup_params.get('series_name') or lookup_params.get('show_name') or lookup_params.get('title')) if series_name: title, _ = split_title_year(series_name) # Support for when title is just a number if not title: title = series_name # Ensure we send native types to tvmaze lib as it does not handle new types very well prepared_params['tvmaze_id'] = lookup_params.get('tvmaze_id') prepared_params['thetvdb_id'] = lookup_params.get( 'tvdb_id') or lookup_params.get('trakt_series_tvdb_id') prepared_params['tvrage_id'] = lookup_params.get( 'tvrage_id') or lookup_params.get('trakt_series_tvrage_id') prepared_params['imdb_id'] = lookup_params.get('imdb_id') prepared_params['show_name'] = native(title) if title else None return prepared_params
def generate_entries(self, config): entries = [] filter = config.get('filter', {}) # deluge client lib chokes on future's newlist, make sure we have a native python list here torrents = self.get_torrents_status(native(list(self.settings_map.keys())) + config.get('keys', []), filter) for hash, torrent_dict in torrents.items(): # Make sure it has a url so no plugins crash entry = Entry(deluge_id=hash, url='') config_path = os.path.expanduser(config.get('config_path', '')) if config_path: torrent_path = os.path.join(config_path, 'state', hash + '.torrent') if os.path.isfile(torrent_path): entry['location'] = torrent_path if not torrent_path.startswith('/'): torrent_path = '/' + torrent_path entry['url'] = 'file://' + torrent_path else: log.warning('Did not find torrent file at %s', torrent_path) for key, value in torrent_dict.items(): if key in self.settings_map: flexget_key = self.settings_map[key] else: flexget_key = self.extra_settings_map[key] if isinstance(flexget_key, tuple): flexget_key, format_func = flexget_key value = format_func(value) entry[flexget_key] = value entries.append(entry) return entries
def append(self, item): if item.order is None: item.order = 0 else: item.order = native(int(item.order)) super(AppsConfig, self).append(item) self._hash[item.name] = item
def from_bytes(cls, mybytes, byteorder='big', signed=False): """ Return the integer represented by the given array of bytes. The mybytes argument must either support the buffer protocol or be an iterable object producing bytes. Bytes and bytearray are examples of built-in objects that support the buffer protocol. The byteorder argument determines the byte order used to represent the integer. If byteorder is 'big', the most significant byte is at the beginning of the byte array. If byteorder is 'little', the most significant byte is at the end of the byte array. To request the native byte order of the host system, use `sys.byteorder' as the byte order value. The signed keyword-only argument indicates whether two's complement is used to represent the integer. """ if byteorder not in ('little', 'big'): raise ValueError("byteorder must be either 'little' or 'big'") if isinstance(mybytes, unicode): raise TypeError("cannot convert unicode objects to bytes") # mybytes can also be passed as a sequence of integers on Py3. # Test for this: elif isinstance(mybytes, collections.Iterable): mybytes = newbytes(mybytes) b = mybytes if byteorder == 'big' else mybytes[::-1] if len(b) == 0: b = b'\x00' # The encode() method has been disabled by newbytes, but Py2's # str has it: num = int(native(b).encode('hex'), 16) if signed and (b[0] & 0x80): num = num - (2 ** (len(b)*8)) return cls(num)
def to_segy(self, output_folder, like, shape): Path(native(output_folder)).mkdir(parents=True, exist_ok=True) for i, cn in enumerate(tqdm(self.column_names, ascii=True)): attr = self.dataframe[cn].values.reshape(shape, order="F") segy = SeiSEGY(output_folder+"/{}.sgy".format(cn), like=like) for inl, attr_slice in zip(list(segy.inlines()), attr): segy.update(InlineIndex(inl), attr_slice)
def from_bytes(cls, mybytes, byteorder='big', signed=False): """ Return the integer represented by the given array of bytes. The mybytes argument must either support the buffer protocol or be an iterable object producing bytes. Bytes and bytearray are examples of built-in objects that support the buffer protocol. The byteorder argument determines the byte order used to represent the integer. If byteorder is 'big', the most significant byte is at the beginning of the byte array. If byteorder is 'little', the most significant byte is at the end of the byte array. To request the native byte order of the host system, use `sys.byteorder' as the byte order value. The signed keyword-only argument indicates whether two's complement is used to represent the integer. """ if byteorder not in ('little', 'big'): raise ValueError("byteorder must be either 'little' or 'big'") if isinstance(mybytes, unicode): raise TypeError("cannot convert unicode objects to bytes") # mybytes can also be passed as a sequence of integers on Py3. # Test for this: elif isinstance(mybytes, Iterable): mybytes = newbytes(mybytes) b = mybytes if byteorder == 'big' else mybytes[::-1] if len(b) == 0: b = b'\x00' # The encode() method has been disabled by newbytes, but Py2's # str has it: num = int(native(b).encode('hex'), 16) if signed and (b[0] & 0x80): num = num - (2**(len(b) * 8)) return cls(num)
def append(self, item): """" Adiciona um aplicativo """ if item.order is None: item.order = 0 else: item.order = native(int(item.order)) super(AppsConfig, self).append(item) self._hash[item.name] = item
def newstr_to_native_str(s): try: newstr = str(s, 'utf-8') except TypeError: # Python 3 does not support an encoding argument when given a unicode str. newstr = str(s) return future_utils.native(newstr).encode('utf-8')
def get_ldap_connection(dn=None, password=None): tls_configuration = None use_ssl = False try: cacert = configuration.conf.get("ldap", "cacert") tls_configuration = Tls(validate=ssl.CERT_REQUIRED, ca_certs_file=cacert) use_ssl = True except: pass server = Server(configuration.conf.get("ldap", "uri"), use_ssl, tls_configuration) conn = Connection(server, native(dn), native(password)) if not conn.bind(): log.error("Cannot bind to ldap server: %s ", conn.last_error) raise AuthenticationError("Cannot bind to ldap server") return conn
def _GetResponse(self, handler, request_data={}): """POST JSON data to JediHTTP server and return JSON response.""" handler = ToBytes(handler) url = urljoin(self._jedihttp_host, handler) parameters = self._TranslateRequestForJediHTTP(request_data) body = ToBytes(json.dumps(parameters)) if parameters else bytes() extra_headers = self._ExtraHeaders(handler, body) self._logger.debug('Making JediHTTP request: %s %s %s %s', 'POST', url, extra_headers, body) response = requests.request(native(bytes(b'POST')), native(url), data=body, headers=extra_headers) response.raise_for_status() return response.json()
def get_services(cls, service_id=None, service_name=None): """ Get service from database. Gets all services in database or explicitly those with a given service_name or service_id. Args: service_id (int): Service id to extract service_name (string): Service name to extract Returns: list/Services: The services/service pulled from the database """ if service_name is not None: if not isinstance(service_name, (str, native_str)): cls.logger.error("Service name: %r should be of type str", service_name) raise TypeError if service_id is not None: try: service_id = native(int(service_id)) except ValueError: cls.logger.error( "Service id: %r should be of type int " "(or convertable to int)", service_id) raise with managed_session() as session: query = session.query(cls) query_id = [] if service_id is not None: query = query.filter_by(id=service_id) query_id.append(str(service_id)) if service_name is not None: query = query.filter_by(name=service_name) query_id.append(service_name) if service_id is None and service_name is None: services = query.all() session.expunge_all() return services try: service = query.one() except NoResultFound: cls.logger.warning("No result found for service: (%s)", ', '.join(query_id)) raise except MultipleResultsFound: cls.logger.error("Multiple results found for service: (%s)", ', '.join(query_id)) raise session.expunge(service) return service
def _get_exons(counts): # type: (pd.DataFrame) -> pd.DataFrame """Extracts exon position information from given count frame.""" exons = pd.DataFrame.from_records( native(list(counts.index.get_values())), columns=['chromosome', 'start', 'end', 'strand']) exons['strand'] = exons['strand'].map({'-': -1, '+': 1}) return exons
def fs_to_text(s): """convert a native_str to text, using the file system encoding, preserving None""" # PY2COMPAT: used to handle python2 requiring native str in encoding, and for type consistency of file system paths if s is None: return s if PY3: return s # note that there isn't a native_str_to_text: this is the equivalent # also handle use during startup when filesystemencoding not yet initialized, within this module return native(s).decode(sys.getfilesystemencoding() or locale.getpreferredencoding())
def _ExtraHeaders( self, method, handler, body ): if not body: body = bytes() hmac = hmac_utils.CreateRequestHmac( method, handler, body, self._hmac_secret ) final_hmac_value = native( ToBytes( binascii.hexlify( hmac ) ) ) extra_headers = { 'content-type': 'application/json' } extra_headers[ RACERD_HMAC_HEADER ] = final_hmac_value return extra_headers
def groups_user(conn, search_base, group_filter, group_member_attr, username): groups_list = [] search_filter = '(&({0}))'.format(group_filter) LOG.debug("Search Filter %s", search_filter) if not conn.search(native(search_base), native(search_filter), attributes=[native(group_member_attr)]): LOG.warning("Unable to find groups for %s %s", search_base, search_filter) else: for resp in conn.response: LOG.debug("Response %s", resp) if ('attributes' in resp and (resp['attributes'].get(group_member_attr) == username or username in resp['attributes'].get(group_member_attr))): groups_list.append(resp['dn']) return groups_list
def _CallExtraConfFlagsForFile(module, filename, client_data): # We want to ensure we pass a native py2 `str` on py2 and a native py3 `str` # (unicode) object on py3. That's the API we provide. # In a vacuum, always passing a unicode object (`unicode` on py2 and `str` on # py3) would be better, but we can't do that because that would break all the # ycm_extra_conf files already out there that expect a py2 `str` object on # py2, and WE DO NOT BREAK BACKWARDS COMPATIBILITY. # Hindsight is 20/20. if PY2: filename = native(ToBytes(filename)) else: filename = native(ToUnicode(filename)) # For the sake of backwards compatibility, we need to first check whether the # FlagsForFile function in the extra conf module even allows keyword args. if inspect.getargspec(module.FlagsForFile).keywords: return module.FlagsForFile(filename, client_data=client_data) else: return module.FlagsForFile(filename)
def __serialize_xml(node, serialize_hidden_attrs=False): # print "serializing: %r" % node # Special handling of pyparsing.ParseResults -- deserializing of # these won't work (easily) if isinstance(node, pyparsing.ParseResults): xml = util.parseresults_as_xml(node) return ET.XML(xml) # We use type() instead of isinstance() because we want to # serialize str derived types using their correct class # names. This is now more involved since under py2, str is now # really future.types.newstr.newstr. if type(node) == str or (hasattr(builtins, 'unicode') and # means py2 + future type(node) == builtins.unicode): nodename = "str" elif type(node) == bytes: nodename = "bytes" else: nodename = node.__class__.__name__ e = ET.Element(nodename) if hasattr(node, '__dict__'): for key in [ x for x in list(node.__dict__.keys()) if serialize_hidden_attrs or not x.startswith('_') ]: val = node.__dict__[key] if val is None: continue if (isinstance(val, (str, bytes))): e.set(key, native(val)) elif isinstance(val, LayeredConfig): # FIXME: this is an # ugly hack to avoid # problems with # pdfreader.TextBox.font continue else: e.set(key, repr(val)) if isinstance(node, str): if node: e.text = str(node) elif isinstance(node, bytes): if node: e.text = node.decode() elif isinstance(node, int): e.text = str(node) elif isinstance(node, list): for x in node: e.append(__serialize_xml(x)) else: e.text = repr(node) # raise TypeError("Can't serialize %r (%r)" % (type(node), node)) return e
def save(self, delete_zip_import=True, *args, **kwargs): """ If a zip file is uploaded, extract any images from it and add them to the gallery, before removing the zip file. """ super(Gallery, self).save(*args, **kwargs) if self.zip_import: zip_file = ZipFile(self.zip_import) for name in zip_file.namelist(): data = zip_file.read(name) try: from PIL import Image image = Image.open(BytesIO(data)) image.load() image = Image.open(BytesIO(data)) image.verify() except ImportError: pass except: continue name = os.path.split(name)[1] # This is a way of getting around the broken nature of # os.path.join on Python 2.x. See also the comment below. if isinstance(name, bytes): tempname = name.decode("utf-8") else: tempname = name # A gallery with a slug of "/" tries to extract files # to / on disk; see os.path.join docs. slug = self.slug if self.slug != "/" else "" path = os.path.join(GALLERIES_UPLOAD_DIR, slug, tempname) try: saved_path = default_storage.save(path, ContentFile(data)) except UnicodeEncodeError: from warnings import warn warn( "A file was saved that contains unicode " "characters in its path, but somehow the current " "locale does not support utf-8. You may need to set " "'LC_ALL' to a correct value, eg: 'en_US.UTF-8'." ) # The native() call is needed here around str because # os.path.join() in Python 2.x (in posixpath.py) # mixes byte-strings with unicode strings without # explicit conversion, which raises a TypeError as it # would on Python 3. path = os.path.join(GALLERIES_UPLOAD_DIR, slug, native(str(name, errors="ignore"))) saved_path = default_storage.save(path, ContentFile(data)) self.images.add(GalleryImage(file=saved_path)) if delete_zip_import: zip_file.close() self.zip_import.delete(save=True)
def do_GET(self): args = self.path.split("/") args = list(map(unquote, args)) assert args.pop(0) == "" # since path starts with a slash response = self.app.get_response(*args) self.send_response(response.status) for k, v in response.headers.items(): self.send_header(k, str(v)) self.end_headers() self.wfile.write(native(response.content))
def _ExtraHeaders(self, method, handler, body): if not body: body = bytes() hmac = hmac_utils.CreateRequestHmac(method, handler, body, self._hmac_secret) final_hmac_value = native(ToBytes(binascii.hexlify(hmac))) extra_headers = {'content-type': 'application/json'} extra_headers[RACERD_HMAC_HEADER] = final_hmac_value return extra_headers
def parse_movie(self, data, **kwargs): log.debug('Parsing movie: `%s` [options: %s]', data, kwargs) start = time.clock() guessit_options = self._guessit_options(kwargs) guessit_options['type'] = 'movie' guess_result = guessit_api.guessit(data, options=guessit_options) # NOTE: Guessit expects str on PY3 and unicode on PY2 hence the use of future.utils.native parsed = GuessitParsedMovie(native(data), kwargs.pop('name', None), guess_result, **kwargs) end = time.clock() log.debug('Parsing result: %s (in %s ms)', parsed, (end - start) * 1000) return parsed
def __init__(self, segy_file, like=None): """ Parameters ---------- segy_file : str segy file path like : str, optional created segy file has the same dimesions as like. """ self.segy_file = segy_file self.inDepth = False # True if dataset Z is in Depth self.property_type = None if like is not None: if Path(native(like)).exists() and not Path(native(self.segy_file)).exists(): copyfile(src=like, dst=self.segy_file) if Path(native(self.segy_file)).exists(): self._parse_segy() else: raise Exception("File does not exist!")
def next_pow_2(i): """ Find the next power of two >>> int(next_pow_2(5)) 8 >>> int(next_pow_2(250)) 256 """ # do not use NumPy here, math is much faster for single values buf = M.ceil(M.log(i) / M.log(2)) return native(int(M.pow(2, buf)))
def save(self, delete_zip_import=True, *args, **kwargs): """ If a zip file is uploaded, extract any images from it and add them to the gallery, before removing the zip file. """ super(BaseGallery, self).save(*args, **kwargs) if self.zip_import: zip_file = ZipFile(self.zip_import) for name in zip_file.namelist(): data = zip_file.read(name) try: from PIL import Image image = Image.open(BytesIO(data)) image.load() image = Image.open(BytesIO(data)) image.verify() except ImportError: pass except: continue name = os.path.split(name)[1] # This is a way of getting around the broken nature of # os.path.join on Python 2.x. See also the comment below. if isinstance(name, bytes): encoding = charsetdetect(name)['encoding'] tempname = name.decode(encoding) else: tempname = name # A gallery with a slug of "/" tries to extract files # to / on disk; see os.path.join docs. slug = self.slug if self.slug != "/" else "" path = os.path.join(GALLERIES_UPLOAD_DIR, slug, tempname) try: saved_path = default_storage.save(path, ContentFile(data)) except UnicodeEncodeError: from warnings import warn warn("A file was saved that contains unicode " "characters in its path, but somehow the current " "locale does not support utf-8. You may need to set " "'LC_ALL' to a correct value, eg: 'en_US.UTF-8'.") # The native() call is needed here around str because # os.path.join() in Python 2.x (in posixpath.py) # mixes byte-strings with unicode strings without # explicit conversion, which raises a TypeError as it # would on Python 3. path = os.path.join(GALLERIES_UPLOAD_DIR, slug, native(str(name, errors="ignore"))) saved_path = default_storage.save(path, ContentFile(data)) self.images.add(GalleryImage(file=saved_path)) if delete_zip_import: zip_file.close() self.zip_import.delete(save=True)
def get_saml_logout_url(request): auth = _get_auth(request) # redirect user back to application is single sign out url is none logout_service_url = url_for('route_app', _external=True) if auth.get_slo_url() is None: return logout_service_url the_url = auth.logout(name_id=sess.get('SAML_NAME_ID'), session_index=sess.get('SAML_SESSION_INDEX'), return_to=logout_service_url) return native(the_url)
def __serialize_xml(node, serialize_hidden_attrs=False): # print "serializing: %r" % node # Special handling of pyparsing.ParseResults -- deserializing of # these won't work (easily) if isinstance(node, pyparsing.ParseResults): xml = util.parseresults_as_xml(node) return ET.XML(xml) # We use type() instead of isinstance() because we want to # serialize str derived types using their correct class # names. This is now more involved since under py2, str is now # really future.types.newstr.newstr. if type(node) == str or (hasattr(builtins, 'unicode') and # means py2 + future type(node) == builtins.unicode): nodename = "str" elif type(node) == bytes: nodename = "bytes" else: nodename = node.__class__.__name__ e = ET.Element(nodename) if hasattr(node, '__dict__'): for key in [ x for x in list(node.__dict__.keys()) if serialize_hidden_attrs or not x.startswith('_')]: val = node.__dict__[key] if val is None: continue if (isinstance(val, (str, bytes))): e.set(key, native(val)) elif isinstance(val, LayeredConfig): # FIXME: this is an # ugly hack to avoid # problems with # pdfreader.TextBox.font continue else: e.set(key, repr(val)) if isinstance(node, str): if node: e.text = str(node) elif isinstance(node, bytes): if node: e.text = node.decode() elif isinstance(node, int): e.text = str(node) elif isinstance(node, list): for x in node: e.append(__serialize_xml(x)) else: e.text = repr(node) # raise TypeError("Can't serialize %r (%r)" % (type(node), node)) return e
def _add_seismic(self): seis_dir = self.survey_dir / "Seismics" for seis_name in get_data_files(seis_dir): info_file = str(self.survey_dir.absolute() / \ "Seismics" / "{}.seis".format(seis_name)) data_path = None with open(info_file, "r") as fl: data_path = Path(native(json.load(fl)["path"])) if not data_path.is_absolute() and \ data_path.name == str(data_path): data_path = self.survey_dir.absolute() / "Seismics" / data_path self.seismics[seis_name] = SeiSEGY.from_json(info_file, str(data_path))