def file_script(filename, tar_filename, execute): if not os.path.isfile(filename): return "" with open(filename, 'rb') as fd: compressed = base64.b64encode(bz2.compress(fd.read())) return ("extract('{}', {} , {})\n").format( tar_filename, oct(os.stat(filename)[stat.ST_MODE]), compressed)
def _put(self, source_path, remote_filename): # remote_filename is a byte object, not str or unicode remote_filename = util.fsdecode(remote_filename) if self.use_scp: f = open(source_path.name, u'rb') try: chan = self.client.get_transport().open_session() chan.settimeout(config.timeout) # scp in sink mode uses the arg as base directory chan.exec_command(u"scp -t '%s'" % self.remote_dir) except Exception as e: raise BackendException(u"scp execution failed: %s" % e) # scp protocol: one 0x0 after startup, one after the Create meta, # one after saving if there's a problem: 0x1 or 0x02 and some error # text response = chan.recv(1) if (response != b"\0"): raise BackendException(b"scp remote error: %b" % chan.recv(-1)) fstat = os.stat(source_path.name) chan.send(u'C%s %d %s\n' % (oct(fstat.st_mode)[-4:], fstat.st_size, remote_filename)) response = chan.recv(1) if (response != b"\0"): raise BackendException(b"scp remote error: %b" % chan.recv(-1)) chan.sendall(f.read() + b'\0') f.close() response = chan.recv(1) if (response != b"\0"): raise BackendException(u"scp remote error: %s" % chan.recv(-1)) chan.close() else: self.sftp.put(source_path.name, remote_filename)
def check_permissions(filehandle, permission, pass_stronger=False): info = os.stat(filehandle) filepermission = oct(info[stat.ST_MODE] & 0o777) if pass_stronger: assert filepermission <= permission, "file's permissions are too weak" else: assert filepermission == permission, "file does not have the correct permissions"
def mode(self): # type: (LocalPath) -> str """Octal file mode :param LocalPath self: this :rtype: str :return: octal file mode """ return str(oct(self._stat.st_mode))
def safe_octal(octal_value): """ safe_octal(octal_value) -> octal value in string This correctly handles octal values specified as a string or as a numeric. """ try: return oct(octal_value).replace('o', '') # fix futurized octal value with 0o prefix except TypeError: return str(octal_value).replace('o', '')
def test_workspace_has_enough_permissions(self): reset = REMOTE_SAMPLE_DIR.set_for_testing( '/tmp/oozie_test_workspace_has_enough_permissions') try: resp = self.cli.get('/desktop/debug/check_config') assert_false('The permissions of workspace' in resp.content, resp) self.cluster.fs.mkdir(REMOTE_SAMPLE_DIR.get()) assert_equal( oct(0o40755), oct(self.cluster.fs.stats(REMOTE_SAMPLE_DIR.get())["mode"])) resp = self.cli.get('/desktop/debug/check_config') assert_true('The permissions of workspace' in resp.content, resp) permissions_dict = { 'group_read': True, 'other_execute': True, 'user_write': True, 'user_execute': True, 'sticky': False, 'user_read': True, 'other_read': True, 'other_write': True, 'group_write': False, 'group_execute': True } kwargs = {'path': [REMOTE_SAMPLE_DIR.get()]} kwargs.update(permissions_dict) # Add write permission to Others response = self.cli.post("/filebrowser/chmod", kwargs) assert_equal( oct(0o40757), oct(self.cluster.fs.stats(REMOTE_SAMPLE_DIR.get())["mode"])) resp = self.cli.get('/desktop/debug/check_config') assert_false('The permissions of workspace' in resp.content, resp) finally: self.cluster.fs.rmdir(REMOTE_SAMPLE_DIR.get(), skip_trash=True) reset()
def test_files(host, file_path): file_p = file_path[0] try: file_u, file_g = file_path[1].split(':') except IndexError: file_u = None file_g = None try: file_m = oct(int(file_path[2], 8)) except IndexError: file_m = None with host.sudo(): assert host.file(file_p).exists assert host.file(file_p).is_file if file_u: assert host.file(file_p).user == file_u if file_g: assert host.file(file_p).group == file_g if file_m: assert oct(host.file(file_p).mode) == file_m
def test_files(host, file_path): file_p = file_path[0] try: file_u, file_g = file_path[1].split(':') except IndexError: file_u = None file_g = None try: file_m = oct(int(file_path[2], 8)) except IndexError: file_m = None with host.sudo(): assert host.file(file_p).exists assert host.file(file_p).is_file if file_p.split('/')[-1] == 'filebeat.yml': assert host.file(file_p).contains('/data/suricata/eve.json') assert host.file(file_p).contains('/data/fsf/rockout.log') if file_u: assert host.file(file_p).user == file_u if file_g: assert host.file(file_p).group == file_g if file_m: assert oct(host.file(file_p).mode) == file_m
def copyfile(self, src, dst, skip_header=False): sb = self._stats(src) if sb is None: raise IOError(errno.ENOENT, _("Copy src '%s' does not exist") % src) if sb.isDir: raise IOError(errno.INVAL, _("Copy src '%s' is a directory") % src) if self.isdir(dst): raise IOError(errno.INVAL, _("Copy dst '%s' is a directory") % dst) offset = 0 while True: data = self.read(src, offset, self.get_upload_chuck_size()) if skip_header: data = '\n'.join(data.splitlines()) cnt = len(data) if offset == 0: if skip_header: n = data.index('\n') if n > 0: data = data[n + 1:] self.create(dst, overwrite=True, blocksize=sb.blockSize, replication=sb.replication, permission=oct(stat.S_IMODE(sb.mode)), data=data) else: self.append(dst, data) if cnt < self.get_upload_chuck_size(): break offset += cnt
def owner_readwrite(file_): check_permissions(file_, oct(0o600))
def get_permission(path): return oct(os.stat(ssh_path).st_mode)[-4:]
def oct(number): """oct(number) -> string Return the octal representation of an integer """ return '0' + builtins.oct(number)[2:]
def owner_readwriteexec(file_): check_permissions(file_, oct(0o700))
class SphinxProxy(object): """ Acts exactly like a normal instance of an object except that it will handle any special sphinx attributes in a `_sphinx` class. If there is no `sphinx` attribute on the instance, it will also add a proxy wrapper to `_sphinx` under that name as well. """ __slots__ = ('__dict__', '__instance__', '_sphinx', 'sphinx') def __init__(self, instance, attributes): object.__setattr__(self, '__instance__', instance) object.__setattr__(self, '_sphinx', attributes) def _get_current_object(self): """ Return the current object. This is useful if you want the real object behind the proxy at a time for performance reasons or because you want to pass the object into a different context. """ return self.__instance__ _current_object = property(_get_current_object) def __dict__(self): try: return self._current_object.__dict__ except RuntimeError: return AttributeError('__dict__') __dict__ = property(__dict__) def __repr__(self): try: obj = self._current_object except RuntimeError: return '<%s unbound>' % self.__class__.__name__ return repr(obj) def __bool__(self): try: return bool(self._current_object) except RuntimeError: return False def __unicode__(self): try: return six.text_type(self._current_object) except RuntimeError: return repr(self) def __dir__(self): try: return dir(self._current_object) except RuntimeError: return [] # def __getattribute__(self, name): # if not hasattr(self._current_object, 'sphinx') and name == 'sphinx': # name = '_sphinx' # if name == '_sphinx': # return object.__getattribute__(self, name) # print object.__getattribute__(self, '_current_object') # return getattr(object.__getattribute__(self, '_current_object'), name) def __getattr__(self, name, value=UNDEFINED): if not hasattr(self._current_object, 'sphinx') and name == 'sphinx': name = '_sphinx' if name == '_sphinx': return getattr(self, '_sphinx', value) if value == UNDEFINED: return getattr(self._current_object, name) return getattr(self._current_object, name, value) def __setattr__(self, name, value): if name == '_sphinx': return object.__setattr__(self, '_sphinx', value) elif name == 'sphinx': if not hasattr(self._current_object, 'sphinx'): return object.__setattr__(self, '_sphinx', value) return setattr(self._current_object, name, value) def __setitem__(self, key, value): self._current_object[key] = value def __delitem__(self, key): del self._current_object[key] def __setslice__(self, i, j, seq): self._current_object[i:j] = seq def __delslice__(self, i, j): del self._current_object[i:j] __delattr__ = lambda x, n: delattr(x._current_object, n) __str__ = lambda x: str(x._current_object) __unicode__ = lambda x: six.text_type(x._current_object) __lt__ = lambda x, o: x._current_object < o __le__ = lambda x, o: x._current_object <= o __eq__ = lambda x, o: x._current_object == o __ne__ = lambda x, o: x._current_object != o __gt__ = lambda x, o: x._current_object > o __ge__ = lambda x, o: x._current_object >= o __cmp__ = lambda x, o: cmp(x._current_object, o) __hash__ = lambda x: hash(x._current_object) # attributes are currently not callable # __call__ = lambda x, *a, **kw: x._current_object(*a, **kw) __len__ = lambda x: len(x._current_object) __getitem__ = lambda x, i: x._current_object[i] __iter__ = lambda x: iter(x._current_object) __contains__ = lambda x, i: i in x._current_object __getslice__ = lambda x, i, j: x._current_object[i:j] __add__ = lambda x, o: x._current_object + o __sub__ = lambda x, o: x._current_object - o __mul__ = lambda x, o: x._current_object * o __floordiv__ = lambda x, o: x._current_object // o __mod__ = lambda x, o: x._current_object % o __divmod__ = lambda x, o: x._current_object.__divmod__(o) __pow__ = lambda x, o: x._current_object**o __lshift__ = lambda x, o: x._current_object << o __rshift__ = lambda x, o: x._current_object >> o __and__ = lambda x, o: x._current_object & o __xor__ = lambda x, o: x._current_object ^ o __or__ = lambda x, o: x._current_object | o __div__ = lambda x, o: x._current_object.__div__(o) __truediv__ = lambda x, o: x._current_object.__truediv__(o) __neg__ = lambda x: -(x._current_object) __pos__ = lambda x: +(x._current_object) __abs__ = lambda x: abs(x._current_object) __invert__ = lambda x: ~(x._current_object) __complex__ = lambda x: complex(x._current_object) __int__ = lambda x: int(x._current_object) __long__ = lambda x: int(x._current_object) __float__ = lambda x: float(x._current_object) __oct__ = lambda x: oct(x._current_object) __hex__ = lambda x: hex(x._current_object) __index__ = lambda x: x._current_object.__index__() __coerce__ = lambda x, o: x.__coerce__(x, o) __enter__ = lambda x: x.__enter__() __exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw)
def __unicode__(self): return "[WebHdfsStat] %7s %8s %8s %12s %s%s" % (oct( self.mode), self.user, self.group, self.size, self.path, self.isDir and '/' or "")
def config_validator(user): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from desktop.lib.fsmanager import get_filesystem from hadoop.cluster import get_all_hdfs from hadoop.fs.hadoopfs import Hdfs from liboozie.oozie_api import get_oozie res = [] try: from oozie.conf import REMOTE_SAMPLE_DIR except Exception as e: LOG.warn('Config check failed because Oozie app not installed: %s' % e) return res if OOZIE_URL.get(): status = get_oozie_status(user) if 'NORMAL' not in status: res.append((status, _('The Oozie server is not available'))) fs = get_filesystem() NICE_NAME = 'Oozie' if fs.do_as_superuser(fs.exists, REMOTE_SAMPLE_DIR.get()): stats = fs.do_as_superuser(fs.stats, REMOTE_SAMPLE_DIR.get()) mode = oct(stats.mode) # if neither group nor others have write permission group_has_write = int(mode[-2]) & 2 others_has_write = int(mode[-1]) & 2 if not group_has_write and not others_has_write: res.append( (NICE_NAME, "The permissions of workspace '%s' are too restrictive" % REMOTE_SAMPLE_DIR.get())) api = get_oozie(user, api_version="v2") configuration = api.get_configuration() if 'org.apache.oozie.service.MetricsInstrumentationService' in [ c.strip() for c in configuration.get('oozie.services.ext', '').split(',') ]: metrics = api.get_metrics() sharelib_url = 'gauges' in metrics and 'libs.sharelib.system.libpath' in metrics[ 'gauges'] and [ metrics['gauges']['libs.sharelib.system.libpath']['value'] ] or [] else: intrumentation = api.get_instrumentation() sharelib_url = [ param['value'] for group in intrumentation['variables'] for param in group['data'] if param['name'] == 'sharelib.system.libpath' ] if sharelib_url: sharelib_url = Hdfs.urlsplit(sharelib_url[0])[2] if not sharelib_url: res.append((status, _('Oozie Share Lib path is not available'))) class ConfigMock(object): def __init__(self, value): self.value = value def get(self): return self.value def get_fully_qualifying_key(self): return self.value for cluster in list(get_all_hdfs().values()): res.extend( validate_path( ConfigMock(sharelib_url), is_dir=True, fs=cluster, message=_( 'Oozie Share Lib not installed in default location.'))) return res
lambda *args, **kwargs: builtins.memoryview(*args, **kwargs), builtins.memoryview) memoryview._ = functools.update_wrapper( lambda *args, **kwargs: wrap(builtins.memoryview)(*args, **kwargs), builtins.memoryview) min = functools.update_wrapper( lambda *args, **kwargs: builtins.min(*args, **kwargs), builtins.min) min._ = functools.update_wrapper( lambda *args, **kwargs: wrap(builtins.min)(*args, **kwargs), builtins.min) next = functools.update_wrapper( lambda *args, **kwargs: builtins.next(*args, **kwargs), builtins.next) next._ = functools.update_wrapper( lambda *args, **kwargs: wrap(builtins.next)(*args, **kwargs), builtins.next) oct = functools.update_wrapper( lambda *args, **kwargs: builtins.oct(*args, **kwargs), builtins.oct) oct._ = functools.update_wrapper( lambda *args, **kwargs: wrap(builtins.oct)(*args, **kwargs), builtins.oct) open = functools.update_wrapper( lambda *args, **kwargs: builtins.open(*args, **kwargs), builtins.open) open._ = functools.update_wrapper( lambda *args, **kwargs: wrap(builtins.open)(*args, **kwargs), builtins.open) ord = functools.update_wrapper( lambda *args, **kwargs: builtins.ord(*args, **kwargs), builtins.ord) ord._ = functools.update_wrapper( lambda *args, **kwargs: wrap(builtins.ord)(*args, **kwargs), builtins.ord) pow = functools.update_wrapper( lambda *args, **kwargs: builtins.pow(*args, **kwargs), builtins.pow) pow._ = functools.update_wrapper( lambda *args, **kwargs: wrap(builtins.pow)(*args, **kwargs), builtins.pow)
def owner_readonly(file_): check_permissions(file_, oct(0o400))
def __oct__(self): # oct() return oct(self.value)
def importer_submit(request): source = json.loads(request.POST.get('source', '{}')) outputFormat = json.loads(request.POST.get('destination', '{}'))['outputFormat'] destination = json.loads(request.POST.get('destination', '{}')) destination['ouputFormat'] = outputFormat # Workaround a very weird bug start_time = json.loads(request.POST.get('start_time', '-1')) if source['inputFormat'] == 'file': if source['path']: path = urllib_unquote(source['path']) source['path'] = request.fs.netnormpath(path) parent_path = request.fs.parent_path(path) stats = request.fs.stats(parent_path) split = urlparse(path) # Only for HDFS, import data and non-external table if split.scheme in ( '', 'hdfs') and destination['importData'] and destination[ 'useDefaultLocation'] and oct( stats["mode"])[-1] != '7' and not request.POST.get( 'show_command'): user_scratch_dir = request.fs.get_home_dir() + '/.scratchdir' request.fs.do_as_user(request.user, request.fs.mkdir, user_scratch_dir, 0o0777) request.fs.do_as_user(request.user, request.fs.rename, source['path'], user_scratch_dir) source['path'] = user_scratch_dir + '/' + source['path'].split( '/')[-1] if destination['ouputFormat'] in ('database', 'table'): destination['nonDefaultLocation'] = request.fs.netnormpath( destination['nonDefaultLocation']) if destination[ 'nonDefaultLocation'] else destination['nonDefaultLocation'] if destination['ouputFormat'] == 'index': source['columns'] = destination['columns'] index_name = destination["name"] if destination['indexerRunJob'] or source['inputFormat'] == 'stream': _convert_format(source["format"], inverse=True) job_handle = _large_indexing( request, source, index_name, start_time=start_time, lib_path=destination['indexerJobLibPath'], destination=destination) else: client = SolrClient(request.user) job_handle = _small_indexing(request.user, request.fs, client, source, destination, index_name) elif source['inputFormat'] in ( 'stream', 'connector') or destination['ouputFormat'] == 'stream': job_handle = _envelope_job(request, source, destination, start_time=start_time, lib_path=destination['indexerJobLibPath']) elif source['inputFormat'] == 'altus': # BDR copy or DistCP + DDL + Sentry DDL copy pass elif source['inputFormat'] == 'rdbms': if destination['outputFormat'] in ('database', 'file', 'table', 'hbase'): job_handle = run_sqoop(request, source, destination, start_time) elif destination['ouputFormat'] == 'database': job_handle = _create_database(request, source, destination, start_time) else: job_handle = _create_table(request, source, destination, start_time) request.audit = { 'operation': 'EXPORT', 'operationText': 'User %(username)s exported %(inputFormat)s to %(ouputFormat)s: %(name)s' % { 'username': request.user.username, 'inputFormat': source['inputFormat'], 'ouputFormat': destination['ouputFormat'], 'name': destination['name'], }, 'allowed': True } return JsonResponse(job_handle)
def unpack(archive_path, output_directory, trusted=False, file_match_callback=None): """Extracts an archive into the target directory.""" if not os.path.exists(archive_path): logs.log_error('Archive %s not found.' % archive_path) return False # If the output directory is a symlink, get its actual path since we will be # doing directory traversal checks later when unpacking the archive. output_directory = os.path.realpath(output_directory) archive_filename = os.path.basename(archive_path) error_occurred = False # Choose to unpack all files or ones matching a particular regex. file_list = get_file_list(archive_path, file_match_callback=file_match_callback) archive_file_unpack_count = 0 archive_file_total_count = len(file_list) # If the archive is not trusted, do file path checks to make # sure this archive is safe and is not attempting to do path # traversals. if not trusted: for filename in file_list: absolute_file_path = os.path.join(output_directory, os.path.normpath(filename)) real_file_path = os.path.realpath(absolute_file_path) if real_file_path == output_directory: # Workaround for https://bugs.python.org/issue28488. # Ignore directories named '.'. continue if real_file_path != absolute_file_path: logs.log_error( 'Directory traversal attempted while unpacking archive %s ' '(file path=%s, actual file path=%s). Aborting.' % (archive_path, absolute_file_path, real_file_path)) return False archive_type = get_archive_type(archive_filename) # Extract based on file's extension. if archive_type == ArchiveType.ZIP: zip_file_handle = open(archive_path, 'rb') zip_archive = zipfile.ZipFile(zip_file_handle) for filename in file_list: try: extracted_path = zip_archive.extract(filename, output_directory) # Preserve permissions for regular files. 640 is the default # permission for extract. If we need execute permission, we need # to chmod it explicitly. Also, get rid of suid bit for security # reasons. external_attr = zip_archive.getinfo( filename).external_attr >> 16 if oct(external_attr).startswith(FILE_ATTRIBUTE): old_mode = external_attr & 0o7777 new_mode = external_attr & 0o777 new_mode |= 0o440 needs_execute_permission = external_attr & 100 if new_mode != old_mode or needs_execute_permission: # Default extract condition is 640 which is safe. # |new_mode| might have read+write+execute bit for # others, so remove those. new_mode &= 0o770 os.chmod(extracted_path, new_mode) # Create symlink if needed (only on unix platforms). if (trusted and hasattr(os, 'symlink') and oct(external_attr).startswith(SYMLINK_ATTRIBUTE)): symlink_source = zip_archive.read(filename) if os.path.exists(extracted_path): os.remove(extracted_path) os.symlink(symlink_source, extracted_path) # Keep heartbeat happy by updating with our progress. archive_file_unpack_count += 1 if archive_file_unpack_count % 1000 == 0: logs.log( 'Unpacked %d/%d.' % (archive_file_unpack_count, archive_file_total_count)) except: # In case of errors, we try to extract whatever we can without errors. error_occurred = True continue logs.log('Unpacked %d/%d.' % (archive_file_unpack_count, archive_file_total_count)) zip_archive.close() zip_file_handle.close() if error_occurred: logs.log_error('Failed to extract everything from archive %s.' % archive_filename) elif archive_type == ArchiveType.TAR or archive_type == ArchiveType.TAR_LZMA: if archive_type == ArchiveType.TAR_LZMA: # Import lzma here so that if lzma installation fails (as it may on # Windows), other archives can still be opened. # TODO(metzman): Determine if this actually fails on Windows and move this # to the top of the file if it doesn't. from backports import lzma lzma_file = lzma.LZMAFile(archive_path) tar_archive = tarfile.open(fileobj=lzma_file) else: tar_archive = tarfile.open(archive_path) try: tar_archive.extractall(path=output_directory) except: # In case of errors, we try to extract whatever we can without errors. error_occurred = True logs.log_error( 'Failed to extract everything from archive %s, trying one at a time.' % archive_filename) for filename in file_list: try: tar_archive.extract(filename, output_directory) except: continue # Keep heartbeat happy by updating with our progress. archive_file_unpack_count += 1 if archive_file_unpack_count % 1000 == 0: logs.log( 'Unpacked %d/%d.' % (archive_file_unpack_count, archive_file_total_count)) logs.log('Unpacked %d/%d.' % (archive_file_unpack_count, archive_file_total_count)) tar_archive.close() if archive_type == ArchiveType.TAR_LZMA: lzma_file.close() else: logs.log_error('Unsupported compression type for file %s.' % archive_filename) return False return not error_occurred
def copy(self, src, dest, recursive=False, dir_mode=None, owner=None): """ Copy file, or directory, in HDFS to another location in HDFS. ``src`` -- The directory, or file, to copy from. ``dest`` -- the directory, or file, to copy to. If 'dest' is a directory that exists, copy 'src' into dest. If 'dest' is a file that exists and 'src' is a file, overwrite dest. If 'dest' does not exist, create 'src' as 'dest'. ``recursive`` -- Recursively copy contents of 'src' to 'dest'. This is required for directories. ``dir_mode`` and ``owner`` are used to define permissions on the newly copied files and directories. This method will overwrite any pre-existing files that collide with what is being copied. Copying a directory to a file is not allowed. """ if owner is None: owner = self.user # Hue was defauling permissions on copying files to the permissions # of the original file, but was not doing the same for directories # changed below for directories to remain consistent if dir_mode is None: sb = self._stats(src) dir_mode=oct(stat.S_IMODE(sb.mode)) src = self.strip_normpath(src) dest = self.strip_normpath(dest) if not self.exists(src): raise IOError(errno.ENOENT, _("File not found: %s") % src) if self.isdir(src): # 'src' is directory. # Skip if not recursive copy and 'src' is directory. if not recursive: LOG.debug("Skipping contents of %s" % src) return None # If 'dest' is a directory change 'dest' # to include 'src' basename. # create 'dest' if it doesn't already exist. if self.exists(dest): if self.isdir(dest): dest = self.join(dest, self.basename(src)) else: raise IOError(errno.EEXIST, _("Destination file %s exists and is not a directory.") % dest) self.do_as_user(owner, self.mkdir, dest, mode=dir_mode) # Copy files in 'src' directory to 'dest'. self.copy_remote_dir(src, dest, dir_mode, owner) else: # 'src' is a file. # If 'dest' is a directory, then copy 'src' into that directory. # Other wise, copy to 'dest'. if self.exists(dest) and self.isdir(dest): self.copyfile(src, self.join(dest, self.basename(src))) else: self.copyfile(src, dest)
class ConfigOption(object): __slots__ = [ 'allowed_values', 'default', 'desc', 'label', 'parser', 'type', 'value' ] DEFAULT_TYPE = InputType.Str _convert_map = { InputType.NA: lambda x: x, InputType.Str: lambda x: "" if x is None else str(x), InputType.Int: lambda x: 0 if x is None else int(x), InputType.File: lambda x: "" if x is None else fullpath(x), InputType.Folder: lambda x: "" if x is None else os.path.dirname(fullpath(x)), InputType.Password: lambda x: "" if x is None else str(x), InputType.Bool: lambda x: boolean(x) if isinstance(x, str) else bool(x), InputType.Float: lambda x: 0.0 if x is None else float(x), InputType.Tristate: lambda x: x if x is None else bool(x), InputType.Octal: lambda x: 0 if x is None else oct(x), InputType.Size: lambda x: 0 if x is None else (-1 if str(x) == '-1' else bytesize(x)), InputType.Address: lambda x: (None, None) if x is None else (endpoint if isendpoint(x) else socket)(x), InputType.Bytes: lambda x: b"" if x is None else bytes(x), InputType.StrList: lambda l: [str(x) for x in l] if isiterable(l) else entries(l) } def __init__(self, parser, value, label=None, desc=None, allowed_values=None, input_type=None): self.parser = parser self.type = None self.value = None self.default = None self.label = None self.desc = None self.allowed_values = () self._set_type(input_type) self._set_value(value) self._set_allowed(allowed_values) self._set_info(label, desc) def _set_info(self, label, desc): self.label = "" if label is None else str(label) self.desc = "" if desc is None else str(desc) def _set_type(self, input_type): if not input_type: input_type = self.DEFAULT_TYPE if input_type not in InputType: raise InvalidValueError(input_type) self.type = input_type def _set_value(self, value): self.value = self.default = self._normalize_value(value) def _set_allowed(self, allowed): if not allowed: self.allowed_values = () return self.allowed_values = tuple(self._normalize_value(v) for v in allowed) def _normalize_value(self, value): return self._convert_map[self.type](value) def reset(self): self.value = self.default def get(self): return self.value def get_default(self): return self.default def set(self, value, store=True): norm_value = self._normalize_value(value) if self.allowed_values and norm_value not in self.allowed_values: raise InvalidValueError(value) if self.value == norm_value: return None self.value = norm_value if store: self.parser.store()
def oct(x=None): if x is None: return bpipe(oct) else: return builtins.oct(x)