def _make_writable(fname): """Make a file writable.""" os.chmod(fname, stat.S_IMODE(os.lstat(fname)[stat.ST_MODE]) | 128) # write
if not os.path.exists(user_dir + '/bin'): os.mkdir(user_dir + '/bin') if os.path.exists(user_dir + '/bin/cadmium'): os.remove(user_dir + '/bin/cadmium') if not os.path.exists(user_dir + '/bin/cadmium'): fd = open(user_dir + '/bin/cadmium', 'w') try: fd.write(cadmium_sh) fd.flush() finally: fd.close() mode = os.stat(user_dir + '/bin/cadmium').st_mode if stat.S_IMODE(mode) != 0755: os.chmod(user_dir + '/bin/cadmium', 0755) if os.path.exists(user_dir + '/bin/cadmium-commit'): os.remove(user_dir + '/bin/cadmium-commit') if not os.path.exists(user_dir + '/bin/cadmium-commit'): fd = open(user_dir + '/bin/cadmium-commit', 'w') try: fd.write(cadmium_commit) fd.flush() finally: fd.close() mode = os.stat(user_dir + '/bin/cadmium-commit').st_mode if stat.S_IMODE(mode) != 0755:
def build_conda_environment(conda_store, build): """Build a conda environment with set uid/gid/and permissions and symlink the build to a named environment """ set_build_started(conda_store, build) conda_prefix = build.build_path(conda_store) os.makedirs(os.path.dirname(conda_prefix), exist_ok=True) environment_prefix = build.environment_path(conda_store) os.makedirs(os.path.dirname(environment_prefix), exist_ok=True) conda_store.log.info(f"building conda environment={conda_prefix}") try: with utils.timer(conda_store.log, f"building {conda_prefix}"): with tempfile.TemporaryDirectory() as tmpdir: tmp_environment_filename = os.path.join(tmpdir, "environment.yaml") with open(tmp_environment_filename, "w") as f: yaml.dump(build.specification.spec, f) if conda_store.serialize_builds: with filelock.FileLock( os.path.join(tempfile.tempdir, "conda-store.lock") ): output = build_environment( conda_store.conda_command, tmp_environment_filename, conda_prefix, ) else: output = build_environment( conda_store.conda_command, tmp_environment_filename, conda_prefix, ) utils.symlink(conda_prefix, environment_prefix) # modify permissions, uid, gid if they do not match stat_info = os.stat(conda_prefix) permissions = conda_store.default_permissions uid = conda_store.default_uid gid = conda_store.default_gid if permissions is not None and oct(stat.S_IMODE(stat_info.st_mode))[-3:] != str( permissions ): conda_store.log.info( f"modifying permissions of {conda_prefix} to permissions={permissions}" ) with utils.timer(conda_store.log, f"chmod of {conda_prefix}"): utils.chmod(conda_prefix, permissions) if ( uid is not None and gid is not None and (str(uid) != str(stat_info.st_uid) or str(gid) != str(stat_info.st_gid)) ): conda_store.log.info( f"modifying permissions of {conda_prefix} to uid={uid} and gid={gid}" ) with utils.timer(conda_store.log, f"chown of {conda_prefix}"): utils.chown(conda_prefix, uid, gid) packages = conda.conda_prefix_packages(conda_prefix) build.size = utils.disk_usage(conda_prefix) set_build_completed(conda_store, build, output.encode("utf-8"), packages) except subprocess.CalledProcessError as e: conda_store.log.exception(e) set_build_failed(conda_store, build, e.output.encode("utf-8")) raise e except Exception as e: conda_store.log.exception(e) set_build_failed(conda_store, build, traceback.format_exc().encode("utf-8")) raise e
# Highly unlikely os.unlink(tempPath) raise plugin.SplatPluginError, "Failed to open output file: %s" % e # Dump the XML try: etree.doc.write(output, XML_ENCODING) output.close() except Exception, e: os.unlink(tempPath) raise plugin.SplatPluginError, "Failed to write to output file: %s" % e # Set permissions try: fstat = os.stat(filePath) os.chmod(tempPath, stat.S_IMODE(fstat.st_mode)) os.chown(tempPath, fstat.st_uid, fstat.st_gid) except Exception, e: os.unlink(tempPath) raise plugin.SplatPluginError, "Failed to set output permissions: %s" % e # Atomicly replace the old file try: os.rename(tempPath, filePath) except Exception, e: os.unlink(tempPath) raise plugin.SplatPluginError, "Failed to rename output file: %s" % e def _finishUsers(self): # Open up the OpenNMS user database. try:
def subtest_repair_file_permissions(self): try: # for this test we need to make sure the warning is NOT suppressed, but set it back to current setting at the end original_suppress_warning_value = os.environ.get( 'OCI_CLI_SUPPRESS_FILE_PERMISSIONS_WARNING') os.environ['OCI_CLI_SUPPRESS_FILE_PERMISSIONS_WARNING'] = 'False' # capture stdout / stderr so we can validate warnings if oci_cli.cli_util.is_windows(): with util.capture() as out: # create a temporary file and set some unnecessary permissions tmp = tempfile.NamedTemporaryFile() subprocess.check_output( 'icacls "{path}" /grant Everyone:F'.format( path=tmp.name), stderr=subprocess.STDOUT) # warning should be emitted because permissions are too loose oci_cli.cli_util.FilePermissionChecker.warn_on_invalid_file_permissions( tmp.name) assert 'WARNING' in out[1].getvalue() # reset captured stderr out[1] = StringIO() result = self.invoke([ 'setup', 'repair-file-permissions', '--file', tmp.name ]) assert result.exit_code == 0 # no warning should be emitted because we repaired the permissions oci_cli.cli_util.FilePermissionChecker.warn_on_invalid_file_permissions( tmp.name) assert 'WARNING' not in out[1].getvalue() else: with util.capture() as out: # create a temporary file and set some unnecessary permissions tmp = tempfile.NamedTemporaryFile() os.chmod(tmp.name, 509) # octal 775 # warning should be emitted because permissions are too loose oci_cli.cli_util.FilePermissionChecker.warn_on_invalid_file_permissions( tmp.name) assert 'WARNING' in out[1].getvalue() # reset captured stderr out[1] = StringIO() result = self.invoke([ 'setup', 'repair-file-permissions', '--file', tmp.name ]) assert result.exit_code == 0 assert oct(stat.S_IMODE(os.lstat( tmp.name).st_mode)) == oct(384) # 600 # no warning should be emitted because we repaired the permissions oci_cli.cli_util.FilePermissionChecker.warn_on_invalid_file_permissions( tmp.name) assert 'WARNING' not in out[1].getvalue() with util.capture() as out: # validate that 400 file permissions are accepted as well os.chmod(tmp.name, 256) # octal 400 oci_cli.cli_util.FilePermissionChecker.warn_on_invalid_file_permissions( tmp.name) assert 'WARNING' not in out[1].getvalue() finally: if original_suppress_warning_value is None: del os.environ['OCI_CLI_SUPPRESS_FILE_PERMISSIONS_WARNING'] else: os.environ[ 'OCI_CLI_SUPPRESS_FILE_PERMISSIONS_WARNING'] = original_suppress_warning_value
def DoUnaryOp(op_id, s): # type: (Id_t, str) -> bool # Only use lstat if we're testing for a symlink. if op_id in (Id.BoolUnary_h, Id.BoolUnary_L): try: mode = posix.lstat(s).st_mode except OSError: # TODO: simple_test_builtin should this as status=2. #e_die("lstat() error: %s", e, word=node.child) return False return stat.S_ISLNK(mode) try: st = posix.stat(s) except OSError as e: # TODO: simple_test_builtin should this as status=2. # Problem: we really need errno, because test -f / is bad argument, # while test -f /nonexistent is a good argument but failed. Gah. # ENOENT vs. ENAMETOOLONG. #e_die("stat() error: %s", e, word=node.child) return False mode = st.st_mode if op_id in (Id.BoolUnary_e, Id.BoolUnary_a): # -a is alias for -e return True if op_id == Id.BoolUnary_f: return stat.S_ISREG(mode) if op_id == Id.BoolUnary_d: return stat.S_ISDIR(mode) if op_id == Id.BoolUnary_b: return stat.S_ISBLK(mode) if op_id == Id.BoolUnary_c: return stat.S_ISCHR(mode) if op_id == Id.BoolUnary_k: # need 'bool' for MyPy return bool(stat.S_IMODE(mode) & stat.S_ISVTX) if op_id == Id.BoolUnary_p: return stat.S_ISFIFO(mode) if op_id == Id.BoolUnary_S: return stat.S_ISSOCK(mode) if op_id == Id.BoolUnary_x: return posix.access(s, posix.X_OK_) if op_id == Id.BoolUnary_r: return posix.access(s, posix.R_OK_) if op_id == Id.BoolUnary_w: return posix.access(s, posix.W_OK_) if op_id == Id.BoolUnary_s: return st.st_size != 0 if op_id == Id.BoolUnary_u: return bool(stat.S_IMODE(mode) & stat.S_ISUID) if op_id == Id.BoolUnary_g: return bool(stat.S_IMODE(mode) & stat.S_ISGID) if op_id == Id.BoolUnary_O: return st.st_uid == posix.geteuid() if op_id == Id.BoolUnary_G: return st.st_gid == posix.getegid() e_die("%s isn't implemented", ui.PrettyId(op_id)) # implicit location
def entry_pack_dir(gf, bn, mo, uid, gid): blen = len(bn) return struct.pack(_fmt_mkdir(blen), uid, gid, gf, mo, bn, stat.S_IMODE(mo), umask())
def _popen(self, command, stdin=None, stdout=None, stderr=None, close_fds=True): """Execute the given command in the sandbox using subprocess.Popen, assigning the corresponding standard file descriptors. command ([string]): executable filename and arguments of the command. stdin (int|None): a file descriptor. stdout (int|None): a file descriptor. stderr (int|None): a file descriptor. close_fds (bool): close all file descriptor before executing. return (Popen): popen object. """ self.log = None self.exec_num += 1 # We run a selection of commands without isolate, as they need # to create new files. This is safe because these commands do # not depend on the user input. if command[0] in IsolateSandbox.SECURE_COMMANDS: logger.debug("Executing non-securely: %s at %s", pretty_print_cmdline(command), self.path) try: prev_permissions = stat.S_IMODE(os.stat(self.path).st_mode) os.chmod(self.path, 0o700) with io.open(self.relative_path(self.cmd_file), 'at') as cmds: cmds.write("%s\n" % (pretty_print_cmdline(command))) p = subprocess.Popen(command, cwd=self.path, stdin=stdin, stdout=stdout, stderr=stderr, close_fds=close_fds) os.chmod(self.path, prev_permissions) # For secure commands, we clear the output so that it # is not forwarded to the contestants. Secure commands # are "setup" commands, which should not fail or # provide information for the contestants. open(os.path.join(self.path, self.stdout_file), "w").close() open(os.path.join(self.path, self.stderr_file), "w").close() self._write_empty_run_log(self.exec_num) except OSError: logger.critical( "Failed to execute program in sandbox with command: %s", pretty_print_cmdline(command), exc_info=True) raise return p args = [self.box_exec] + self.build_box_options() + ["--"] + command logger.debug("Executing program in sandbox with command: `%s'.", pretty_print_cmdline(args)) # Temporarily allow writing new files. prev_permissions = stat.S_IMODE(os.stat(self.path).st_mode) os.chmod(self.path, 0o770) with io.open(self.relative_path(self.cmd_file), 'at') as commands: commands.write("%s\n" % (pretty_print_cmdline(args))) os.chmod(self.path, prev_permissions) try: p = subprocess.Popen(args, stdin=stdin, stdout=stdout, stderr=stderr, close_fds=close_fds) except OSError: logger.critical("Failed to execute program in sandbox " "with command: %s", pretty_print_cmdline(args), exc_info=True) raise return p
'text/x-c', 'text/x-component', 'text/x-cross-domain-policy', ] if len(sys.argv) > 1: compressdir = sys.argv[1] else: compressdir = '.' print('Brotli compressing files in %s...' % compressdir) matches = [] for root, dirnames, basenames in os.walk(compressdir): for basename in basenames: # skip files that have already been gzipped if basename.endswith('.gz'): continue full_path = os.path.join(root, basename) mime_type = magic.from_file(full_path, mime=True) stinfo = os.stat(full_path) if mime_type in MIME_WHITELIST or stinfo.st_size == 0: compress_path = "%s.br" % full_path print("compressing %s" % full_path) subprocess.call(["brotli", "--", full_path]) # set mtime and atime to uncompressed file's values os.utime(compress_path, (stinfo.st_atime, stinfo.st_mtime)) # set file permissions to uncompressed file's values uncompressed_perms = stat.S_IMODE(os.lstat(full_path).st_mode) os.chmod(compress_path, uncompressed_perms)
def chpath_inplace(filename, is_text_file, old, new): """ Returns True if any modifications were made, and False otherwise. """ modified = False orig_stat = os.lstat(filename) try: f = io.open(filename, buffering=0, mode='r+b') except IOError: try: orig_mode = stat.S_IMODE(os.lstat(filename).st_mode) except OSError as e: sys.stderr.write('%s: %s\n' % (e, filename)) return temp_mode = 0o200 | orig_mode os.chmod(filename, temp_mode) try: f = io.open(filename, buffering=0, mode='r+b') finally: os.chmod(filename, orig_mode) len_old = len(old) len_new = len(new) matched_byte_count = 0 while True: in_byte = f.read(1) if not in_byte: break if in_byte == old[matched_byte_count:matched_byte_count+1]: matched_byte_count += 1 if matched_byte_count == len_old: modified = True matched_byte_count = 0 end_position = f.tell() start_position = end_position - len_old if not is_text_file: # search backwards for leading slashes written by # a previous invocation of this tool num_to_write = len_old f.seek(start_position - 1) while True: if f.read(1) != b'/': break num_to_write += 1 f.seek(f.tell() - 2) # pad with as many leading slashes as necessary while num_to_write > len_new: f.write(b'/') num_to_write -= 1 f.write(new) else: remainder = f.read() f.seek(start_position) f.write(new) if remainder: f.write(remainder) f.truncate() f.seek(start_position + len_new) elif matched_byte_count > 0: # back up an try to start a new match after # the first byte of the previous partial match f.seek(f.tell() - matched_byte_count) matched_byte_count = 0 f.close() if modified: if sys.hexversion >= 0x3030000: orig_mtime = orig_stat.st_mtime_ns os.utime(filename, ns=(orig_mtime, orig_mtime)) else: orig_mtime = orig_stat[stat.ST_MTIME] os.utime(filename, (orig_mtime, orig_mtime)) return modified
logging.info("Wrote to " + path_dest) finally: new_file.close() except Exception, e: # An error occurred in writing, we should clean up # the tmp file if it exists, before re-raising try: fs.remove(path_dest) except: pass raise e # Try to match the permissions and ownership of the old file cur_stats = fs.stats(path) try: fs.chmod(path_dest, stat_module.S_IMODE(cur_stats['mode'])) except: logging.warn("Could not chmod new file %s to match old file %s" % ( path_dest, path), exc_info=True) # but not the end of the world - keep going try: fs.chown(path_dest, cur_stats['user'], cur_stats['group']) except: logging.warn("Could not chown new file %s to match old file %s" % ( path_dest, path), exc_info=True) # but not the end of the world - keep going # Now delete the old - nothing we can do here to recover fs.remove(path)
def mode(self, path): return stat.S_IMODE(os.stat(path).st_mode)
def _mknod(path, name, mode, major, minor): os.mknod(os.path.join(path, name), mode=(stat.S_IMODE(mode) | stat.S_IFCHR), device=os.makedev(major, minor))
def run(self): try: self.preload() success_paths = [] error_paths = [] operation_progress = { "total_done": False, "total": 0, "operation_done": False, "processed": 0 } source_path = self.source.get('path') target_path = self.target.get('path') if source_path is None: raise Exception("Source path empty") if target_path is None: raise Exception("Target path empty") source_path = self.get_abs_path(source_path) self.logger.info( "CopyToSftp process run source = %s , target = %s" % (source_path, target_path)) sftp = self.get_sftp_connection(self.target) t_total = threading.Thread(target=self.get_total, args=(operation_progress, self.paths)) t_total.start() t_progress = threading.Thread(target=update_progress, args=( self, operation_progress, )) t_progress.start() for path in self.paths: try: abs_path = self.get_abs_path(path) file_basename = os.path.basename(abs_path) if os.path.isdir(abs_path): destination = os.path.join(target_path, file_basename) st = os.stat(abs_path) if not sftp.exists(destination): sftp.makedirs(destination, stat.S_IMODE(st.st_mode)) elif self.overwrite and sftp.exists( destination) and not sftp.isdir(destination): sftp.remove(destination) sftp.makedirs(destination, stat.S_IMODE(st.st_mode)) elif self.overwrite and sftp.isdir(destination): sftp.sftp.chmod(destination, stat.S_IMODE(st.st_mode)) elif not self.overwrite and sftp.exists( destination) and not sftp.isdir(destination): raise Exception("destination is not a dir") else: pass operation_progress["processed"] += 1 for current, dirs, files in os.walk(abs_path): relative_root = os.path.relpath( current, source_path) for d in dirs: target_dir = os.path.join( target_path, relative_root, d) st = os.stat(os.path.join(current, d)) if not sftp.exists(target_dir): sftp.makedirs(target_dir, stat.S_IMODE(st.st_mode)) elif self.overwrite and sftp.exists( target_dir ) and not sftp.isdir(target_dir): sftp.remove(target_dir) sftp.makedirs(target_dir, stat.S_IMODE(st.st_mode)) elif self.overwrite and sftp.isdir(target_dir): sftp.sftp.chmod(destination, stat.S_IMODE(st.st_mode)) elif not self.overwrite and os.path.exists( target_dir ) and not sftp.isdir(target_dir): raise Exception("destination is not a dir") else: pass operation_progress["processed"] += 1 for f in files: source_file = os.path.join(current, f) target_file = os.path.join( target_path, relative_root, f) if not sftp.exists(target_file): sftp.sftp.put(source_file, target_file) elif self.overwrite and sftp.exists( target_file ) and not sftp.isdir(target_file): sftp.remove(target_file) sftp.sftp.put(source_file, target_file) elif self.overwrite and sftp.isdir( target_file): sftp.remove(target_file) sftp.sftp.put(source_file, target_file) else: pass operation_progress["processed"] += 1 elif os.path.isfile(abs_path): try: target_file = os.path.join(target_path, file_basename) if not sftp.exists(target_file): sftp.sftp.put(abs_path, target_file) elif self.overwrite and sftp.exists( target_file ) and not sftp.isdir(target_file): sftp.remove(target_file) sftp.sftp.put(abs_path, target_file) elif self.overwrite and sftp.isdir(target_file): sftp.rmtree(target_file) sftp.sftp.put(abs_path, target_file) else: pass operation_progress["processed"] += 1 except Exception as e: self.logger.info("Cannot copy file %s , %s" % (abs_path, str(e))) raise e finally: operation_progress["processed"] += 1 success_paths.append(path) except Exception as e: self.logger.error( "Error copy %s , error %s , %s" % (str(path), str(e), traceback.format_exc())) error_paths.append(path) operation_progress["operation_done"] = True result = {"success": success_paths, "errors": error_paths} # иначе пользователям кажется что скопировалось не полностью ) progress = { 'percent': round(float(len(success_paths)) / float(len(self.paths)), 2), 'text': str( int( round( float(len(success_paths)) / float(len(self.paths)), 2) * 100)) + '%' } time.sleep(REQUEST_DELAY) self.on_success(self.status_id, data=result, progress=progress, pid=self.pid, pname=self.name) except Exception as e: result = { "error": True, "message": str(e), "traceback": traceback.format_exc() } self.on_error(self.status_id, result, pid=self.pid, pname=self.name)
def addlicense(file, pre=None, post=None, start=None, end=None): if pre is None and post is None and start is None and end is None: root, ext = os.path.splitext(file) if ext == '.in': # special case: .in suffix doesn't count root, ext = os.path.splitext(root) if not suffixrules.has_key(ext): # no known suffix # see if file starts with #! (i.e. shell script) try: f = open(file) line = f.readline() f.close() except IOError: return if line[:2] == '#!': ext = '.sh' else: return pre, post, start, end = suffixrules[ext] if not pre: pre = '' if not post: post = '' if not start: start = '' if not end: end = '' try: f = open(file) except IOError: print >> sys.stderr, 'Cannot open file %s' % file return try: g = open(file + '.new', 'w') except IOError: print >> sys.stderr, 'Cannot create temp file %s.new' % file return data = f.read() res = re_copyright.search(data) if res is not None: pos = res.end(0) g.write(data[:pos]) g.write(start.rstrip() + '\n') else: f.seek(0) line = f.readline() addblank = False if line[:2] == '#!': # if file starts with #! command interpreter, keep the line there g.write(line) # add a blank line addblank = True line = f.readline() if line.find('-*-') >= 0: # if file starts with an Emacs mode specification, keep # the line there g.write(line) # add a blank line addblank = True line = f.readline() if line[:5] == '<?xml': # if line starts with an XML declaration, keep the line there g.write(line) # add a blank line addblank = True line = f.readline() if addblank: g.write('\n') if pre: g.write(pre + '\n') for l in license: if l[:1] == '\t' or (not l and (not end or end[:1] == '\t')): # if text after start begins with tab, remove spaces from start g.write(start.rstrip() + l + end + '\n') else: g.write(start + l + end + '\n') if res is not None: # copy rest of file g.write(data[pos:]) else: if post: g.write(post + '\n') # add empty line after license if line: g.write('\n') # but only one, so skip empty line from file, if any if line and line != '\n': g.write(line) # copy rest of file g.write(f.read()) f.close() g.close() try: st = os.stat(file) os.chmod(file + '.new', stat.S_IMODE(st.st_mode)) except OSError: pass try: os.rename(file, file + '~') # make backup except OSError: print >> sys.stderr, 'Cannot make backup for %s' % file return try: os.rename(file + '.new', file) except OSError: print >> sys.stderr, 'Cannot move file %s into position' % file
"(%(expected_id)d)'") % { "found_name": img.get_name_by_uid(lstat.st_uid, True), "found_id": lstat.st_uid, "expected_name": self.attrs["owner"], "expected_id": owner }) if group is not None and lstat.st_gid != group: errors.append(_("Group: '%(found_name)s " "(%(found_id)s)' should be '%(expected_name)s " "(%(expected_id)s)'") % { "found_name": img.get_name_by_gid(lstat.st_gid, True), "found_id": lstat.st_gid, "expected_name": self.attrs["group"], "expected_id": group }) if mode is not None and stat.S_IMODE(lstat.st_mode) != mode: errors.append(_("Mode: 0%(found).3o should be " "0%(expected).3o") % { "found": stat.S_IMODE(lstat.st_mode), "expected": mode }) return lstat, errors, warnings, info, abort def needsdata(self, orig, pkgplan): """Returns True if the action transition requires a datastream.""" return False def attrlist(self, name): """return list containing value of named attribute.""" value = self.attrs.get(name, []) if isinstance(value, list):
def _make_writable(fname): os.chmod(fname, stat.S_IMODE(os.lstat(fname)[stat.ST_MODE]) | 128) # write
def test_init(self): for _ in xrange(NITERS): path, x_size, y_size = self.random_data() with FITSImage(path) as img: self.assertEqual(img.path, path) self.assertEqual(img.size, (x_size, y_size)) # IOError raised if we do not have permission to open the file... with self.random() as img: nonreadable_path = img.path mode = stat.S_IMODE(os.stat(nonreadable_path)[stat.ST_MODE]) mode ^= stat.S_IRUSR os.chmod(nonreadable_path, mode) with self.assertRaises(IOError): FITSImage(nonreadable_path) # ... or if it simply does not exist nonexistent_path = path self.assertFalse(os.path.exists(nonexistent_path)) with self.assertRaises(IOError): FITSImage(nonexistent_path) # NonStandardFITS raised if the FITS file does not conform to the FITS # standard. When this is the case, the 'SIMPLE' keyword is expected to # be False (as, according to the Standard, "A [logical constant] of F # signifies that the file does not conform to this standard"). nonstandard_path = self.random_data(SIMPLE=False)[0] with self.assertRaises(fitsimage.NonStandardFITS): FITSImage(nonstandard_path) os.unlink(nonstandard_path) # It may also happen that the 'SIMPLE' keyword does not exist nonstandard_path = self.random_data()[0] handler = pyfits.open(nonstandard_path, mode='update') del handler[0].header['SIMPLE'] handler.close(output_verify='ignore') # Ignore PyFITS <= 3.2 warning: "Error validating header for HDU #0 # (note: PyFITS uses zero-based indexing). Block does not begin with # SIMPLE or XTENSION. There may be extra bytes after the last HDU or # the file is corrupted". with warnings.catch_warnings(): msg = "(?s)Error validating header for .+ file is corrupted." warnings.filterwarnings('ignore', message=msg) with self.assertRaises(fitsimage.NonStandardFITS): FITSImage(nonstandard_path) os.unlink(nonstandard_path) # NonStandardFITS exception must also be raised when we try to open # anything that is not a FITS file. Among the countless kinds of file # types that could be used for this, try to open (a) an empty file... with tempfile.NamedTemporaryFile(suffix='.fits') as fd: empty_path = fd.name with self.assertRaises(fitsimage.NonStandardFITS): FITSImage(empty_path) # ... and (b) a non-empty text file. with tempfile.NamedTemporaryFile(suffix='.fits') as fd: fd.write("Lorem ipsum dolor sit amet,\n") fd.write("consectetur adipisicing elit\n") fd.flush() text_path = fd.name with self.assertRaises(fitsimage.NonStandardFITS): FITSImage(text_path)
def file_mode(name): """Returns mode for file name""" st = os.stat(name) return stat.S_IMODE(st.st_mode)
def putFile(self, local_name, remote_name, bufsize=1024, preserve=False): """ Transfer a file from the local side to the remote side. The 'bufsize' is the chunk size used to read/write the file and send across the connection. A LocalException will cause the connection to be closed, but other exceptions do not. If 'preserve' is True, the time stamp and permission bits are also copied. Returns the number of bulk bytes sent, which will be greater than the file size due to the data encoding used during transfer. """ try: fp = None exc = "Exception preparing for file put: " assert self.pssh != None, "connection lost or never started" local_name = os.path.expanduser( local_name ) mtime = atime = fmode = None if preserve: mtime = os.path.getmtime( local_name ) atime = os.path.getatime( local_name ) fmode = stat.S_IMODE( os.stat(local_name)[stat.ST_MODE] ) sz = os.path.getsize( local_name ) bufsize = max( 16, min( bufsize, 524288 ) ) n,r = int(sz/bufsize), sz%bufsize nreads = n if r > 0: nreads += 1 # check ability to open and read the file fp = open( local_name, 'rb' ) ; fp.close() ; fp = None os.write( self.send_pipe, _BYTES_( 'WOK:' + repr(remote_name) + '\n' ) ) ok = self._read_return() assert ok exc = "Exception during file read / pipe write: " fp = open( local_name, 'rb' ) msg = [ nreads, remote_name, mtime, atime, fmode ] os.write( self.send_pipe, _BYTES_( 'PUT:' + repr(msg) + '\n' ) ) i = 0 while i < n: #buf = binascii.b2a_hqx( binascii.rlecode_hqx( # fp.read( bufsize ) ) ) buf = binascii.b2a_hex( fp.read( bufsize ) ) os.write( self.send_pipe, _BYTES_( str(len(buf))+'\n' ) ) os.write( self.send_pipe, buf ) i += 1 if r > 0: #buf = binascii.b2a_hqx( binascii.rlecode_hqx( # fp.read( r ) ) ) buf = binascii.b2a_hex( fp.read( r ) ) os.write( self.send_pipe, _BYTES_( str(len(buf))+'\n' ) ) os.write( self.send_pipe, buf ) fp.close() ; fp = None rtn = self._read_return() except RemoteException: self._cancelTimer() if fp != None: fp.close() raise except: self._cancelTimer() sio = class_StringIO() traceback.print_exc( file=sio ) if fp != None: fp.close() self.shutdown() raise LocalException( exc + sio.getvalue() ) self._cancelTimer() return rtn
def checkPermissions(path, mode): assert stat.S_IMODE(os.stat(path).st_mode) == mode
def make_nonwritable(path: Pathable) -> None: os.chmod( path, stat.S_IMODE(os.lstat(path).st_mode) & ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH), )
def is_unarchived(self): # BSD unzip doesn't support zipinfo listings with timestamp. cmd = [self.zipinfocmd_path, '-T', '-s', self.src] if self.excludes: cmd.extend([ '-x', ] + self.excludes) rc, out, err = self.module.run_command(cmd) old_out = out diff = '' out = '' if rc == 0: unarchived = True else: unarchived = False # Get some information related to user/group ownership umask = os.umask(0) os.umask(umask) systemtype = platform.system() # Get current user and group information groups = os.getgroups() run_uid = os.getuid() run_gid = os.getgid() try: run_owner = pwd.getpwuid(run_uid).pw_name except: run_owner = run_uid try: run_group = grp.getgrgid(run_gid).gr_name except: run_group = run_gid # Get future user ownership fut_owner = fut_uid = None if self.file_args['owner']: try: tpw = pwd.getpwname(self.file_args['owner']) except: try: tpw = pwd.getpwuid(self.file_args['owner']) except: tpw = pwd.getpwuid(run_uid) fut_owner = tpw.pw_name fut_uid = tpw.pw_uid else: try: fut_owner = run_owner except: pass fut_uid = run_uid # Get future group ownership fut_group = fut_gid = None if self.file_args['group']: try: tgr = grp.getgrnam(self.file_args['group']) except: try: tgr = grp.getgrgid(self.file_args['group']) except: tgr = grp.getgrgid(run_gid) fut_group = tgr.gr_name fut_gid = tgr.gr_gid else: try: fut_group = run_group except: pass fut_gid = run_gid for line in old_out.splitlines(): change = False pcs = line.split(None, 7) if len(pcs) != 8: # Too few fields... probably a piece of the header or footer continue # Check first and seventh field in order to skip header/footer if len(pcs[0]) != 7 and len(pcs[0]) != 10: continue if len(pcs[6]) != 15: continue # Possible entries: # -rw-rws--- 1.9 unx 2802 t- defX 11-Aug-91 13:48 perms.2660 # -rw-a-- 1.0 hpf 5358 Tl i4:3 4-Dec-91 11:33 longfilename.hpfs # -r--ahs 1.1 fat 4096 b- i4:2 14-Jul-91 12:58 EA DATA. SF # --w------- 1.0 mac 17357 bx i8:2 4-May-92 04:02 unzip.macr if pcs[0][0] not in 'dl-?' or not frozenset( pcs[0][1:]).issubset('rwxstah-'): continue ztype = pcs[0][0] permstr = pcs[0][1:] version = pcs[1] ostype = pcs[2] size = int(pcs[3]) path = to_text(pcs[7], errors='surrogate_or_strict') # Skip excluded files if path in self.excludes: out += 'Path %s is excluded on request\n' % path continue # Itemized change requires L for symlink if path[-1] == '/': if ztype != 'd': err += 'Path %s incorrectly tagged as "%s", but is a directory.\n' % ( path, ztype) ftype = 'd' elif ztype == 'l': ftype = 'L' elif ztype == '-': ftype = 'f' elif ztype == '?': ftype = 'f' # Some files may be storing FAT permissions, not Unix permissions # For FAT permissions, we will use a base permissions set of 777 if the item is a directory or has the execute bit set. Otherwise, 666. # This permission will then be modified by the system UMask. # BSD always applies the Umask, even to Unix permissions. # For Unix style permissions on Linux or Mac, we want to use them directly. # So we set the UMask for this file to zero. That permission set will then be unchanged when calling _permstr_to_octal if len(permstr) == 6: if path[-1] == '/': permstr = 'rwxrwxrwx' elif permstr == 'rwx---': permstr = 'rwxrwxrwx' else: permstr = 'rw-rw-rw-' file_umask = umask elif 'bsd' in systemtype.lower(): file_umask = umask else: file_umask = 0 # Test string conformity if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr): raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr) # DEBUG # err += "%s%s %10d %s\n" % (ztype, permstr, size, path) dest = os.path.join(self.dest, path) try: st = os.lstat(dest) except: change = True self.includes.append(path) err += 'Path %s is missing\n' % path diff += '>%s++++++.?? %s\n' % (ftype, path) continue # Compare file types if ftype == 'd' and not stat.S_ISDIR(st.st_mode): change = True self.includes.append(path) err += 'File %s already exists, but not as a directory\n' % path diff += 'c%s++++++.?? %s\n' % (ftype, path) continue if ftype == 'f' and not stat.S_ISREG(st.st_mode): change = True unarchived = False self.includes.append(path) err += 'Directory %s already exists, but not as a regular file\n' % path diff += 'c%s++++++.?? %s\n' % (ftype, path) continue if ftype == 'L' and not stat.S_ISLNK(st.st_mode): change = True self.includes.append(path) err += 'Directory %s already exists, but not as a symlink\n' % path diff += 'c%s++++++.?? %s\n' % (ftype, path) continue itemized = list('.%s.......??' % ftype) # Note: this timestamp calculation has a rounding error # somewhere... unzip and this timestamp can be one second off # When that happens, we report a change and re-unzip the file dt_object = datetime.datetime( *(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6])) timestamp = time.mktime(dt_object.timetuple()) # Compare file timestamps if stat.S_ISREG(st.st_mode): if self.module.params['keep_newer']: if timestamp > st.st_mtime: change = True self.includes.append(path) err += 'File %s is older, replacing file\n' % path itemized[4] = 't' elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime: # Add to excluded files, ignore other changes out += 'File %s is newer, excluding file\n' % path self.excludes.append(path) continue else: if timestamp != st.st_mtime: change = True self.includes.append(path) err += 'File %s differs in mtime (%f vs %f)\n' % ( path, timestamp, st.st_mtime) itemized[4] = 't' # Compare file sizes if stat.S_ISREG(st.st_mode) and size != st.st_size: change = True err += 'File %s differs in size (%d vs %d)\n' % (path, size, st.st_size) itemized[3] = 's' # Compare file checksums if stat.S_ISREG(st.st_mode): crc = crc32(dest) if crc != self._crc32(path): change = True err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\n' % ( path, self._crc32(path), crc) itemized[2] = 'c' # Compare file permissions # Do not handle permissions of symlinks if ftype != 'L': # Use the new mode provided with the action, if there is one if self.file_args['mode']: if isinstance(self.file_args['mode'], int): mode = self.file_args['mode'] else: try: mode = int(self.file_args['mode'], 8) except Exception as e: try: mode = AnsibleModule._symbolic_mode_to_octal( st, self.file_args['mode']) except ValueError as e: self.module.fail_json( path=path, msg="%s" % to_native(e), exception=traceback.format_exc()) # Only special files require no umask-handling elif ztype == '?': mode = self._permstr_to_octal(permstr, 0) else: mode = self._permstr_to_octal(permstr, file_umask) if mode != stat.S_IMODE(st.st_mode): change = True itemized[5] = 'p' err += 'Path %s differs in permissions (%o vs %o)\n' % ( path, mode, stat.S_IMODE(st.st_mode)) # Compare file user ownership owner = uid = None try: owner = pwd.getpwuid(st.st_uid).pw_name except: uid = st.st_uid # If we are not root and requested owner is not our user, fail if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid): raise UnarchiveError( 'Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner)) if owner and owner != fut_owner: change = True err += 'Path %s is owned by user %s, not by user %s as expected\n' % ( path, owner, fut_owner) itemized[6] = 'o' elif uid and uid != fut_uid: change = True err += 'Path %s is owned by uid %s, not by uid %s as expected\n' % ( path, uid, fut_uid) itemized[6] = 'o' # Compare file group ownership group = gid = None try: group = grp.getgrgid(st.st_gid).gr_name except: gid = st.st_gid if run_uid != 0 and fut_gid not in groups: raise UnarchiveError( 'Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner)) if group and group != fut_group: change = True err += 'Path %s is owned by group %s, not by group %s as expected\n' % ( path, group, fut_group) itemized[6] = 'g' elif gid and gid != fut_gid: change = True err += 'Path %s is owned by gid %s, not by gid %s as expected\n' % ( path, gid, fut_gid) itemized[6] = 'g' # Register changed files and finalize diff output if change: if path not in self.includes: self.includes.append(path) diff += '%s %s\n' % (''.join(itemized), path) if self.includes: unarchived = False # DEBUG # out = old_out + out return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff)
def make_writable(path: Pathable) -> None: os.chmod(path, stat.S_IMODE(os.lstat(path).st_mode) | stat.S_IWUSR)
def check(self, test): """ """ success = 1 [testname, args] = self.rule.split("=") if testname[0] == "!": self.false = 1 testname = testname[1:] [kind, case] = testname.split("_") if "|" in args: [key, value] = args.split("|", 1) else: [key, value] = [args, None] if kind == "PACMAN": if case == "RETCODE": if test.retcode != int(key): success = 0 elif case == "OUTPUT": logfile = os.path.join(test.root, util.LOGFILE) if not os.access(logfile, os.F_OK): tap.diag( "LOGFILE not found, cannot validate 'OUTPUT' rule") success = 0 elif not util.grep(logfile, key): success = 0 else: tap.diag("PACMAN rule '%s' not found" % case) success = -1 elif kind == "PKG": localdb = test.db["local"] newpkg = localdb.db_read(key) if not newpkg: success = 0 else: if case == "EXIST": success = 1 elif case == "VERSION": if value != newpkg.version: success = 0 elif case == "DESC": if value != newpkg.desc: success = 0 elif case == "GROUPS": if not value in newpkg.groups: success = 0 elif case == "PROVIDES": if not value in newpkg.provides: success = 0 elif case == "DEPENDS": if not value in newpkg.depends: success = 0 elif case == "OPTDEPENDS": success = 0 for optdep in newpkg.optdepends: if value == optdep.split(':', 1)[0]: success = 1 break elif case == "REASON": if newpkg.reason != int(value): success = 0 elif case == "FILES": if not value in newpkg.files: success = 0 elif case == "BACKUP": success = 0 for f in newpkg.backup: if f.startswith(value + "\t"): success = 1 break else: tap.diag("PKG rule '%s' not found" % case) success = -1 elif kind == "FILE": filename = os.path.join(test.root, key) if case == "EXIST": if not os.path.isfile(filename): success = 0 elif case == "EMPTY": if not (os.path.isfile(filename) and os.path.getsize(filename) == 0): success = 0 elif case == "CONTENTS": try: with open(filename, 'r') as f: success = f.read() == value except: success = 0 elif case == "MODIFIED": for f in test.files: if f.name == key: if not f.ismodified(): success = 0 break elif case == "MODE": if not os.path.isfile(filename): success = 0 else: mode = os.lstat(filename)[stat.ST_MODE] if int(value, 8) != stat.S_IMODE(mode): success = 0 elif case == "TYPE": if value == "dir": if not os.path.isdir(filename): success = 0 elif value == "file": if not os.path.isfile(filename): success = 0 elif value == "link": if not os.path.islink(filename): success = 0 elif case == "PACNEW": if not os.path.isfile("%s.pacnew" % filename): success = 0 elif case == "PACSAVE": if not os.path.isfile("%s.pacsave" % filename): success = 0 else: tap.diag("FILE rule '%s' not found" % case) success = -1 elif kind == "DIR": filename = os.path.join(test.root, key) if case == "EXIST": if not os.path.isdir(filename): success = 0 else: tap.diag("DIR rule '%s' not found" % case) success = -1 elif kind == "LINK": filename = os.path.join(test.root, key) if case == "EXIST": if not os.path.islink(filename): success = 0 else: tap.diag("LINK rule '%s' not found" % case) success = -1 elif kind == "CACHE": cachedir = os.path.join(test.root, util.PM_CACHEDIR) if case == "EXISTS": pkg = test.findpkg(key, value, allow_local=True) if not pkg or not os.path.isfile( os.path.join(cachedir, pkg.filename())): success = 0 else: tap.diag("Rule kind '%s' not found" % kind) success = -1 if self.false and success != -1: success = not success self.result = success return success
def get_perm(path): return stat.S_IMODE(os.lstat(path).st_mode)
def test_keep_permissions(self): self.write_file('foo.py', 'def myfunc(): return 4\n') os.chmod(self.join('foo.py'), 0o755) slicker.make_fixes(['foo'], 'baz', project_root=self.tmpdir) self.assertEqual(0o755, stat.S_IMODE(os.stat(self.join('baz.py')).st_mode))
def make_response_content(content_type, content_data): ret = [] if content_type not in anchore_engine.services.common.image_content_types: logger.warn("input content_type (" + str(content_type) + ") not supported (" + str(anchore_engine.services.common.image_content_types) + ")") return (ret) if not content_data: logger.warn( "empty content data given to format - returning empty result") return (ret) # type-specific formatting of content data if content_type == 'os': elkeys = ['license', 'origin', 'size', 'type', 'version'] for package in content_data.keys(): el = {} try: el['package'] = package for k in elkeys: if k in content_data[package]: el[k] = content_data[package][k] else: el[k] = None except: el = {} if el: ret.append(el) elif content_type == 'npm': for package in content_data.keys(): el = {} try: el['package'] = content_data[package]['name'] el['type'] = 'NPM' el['location'] = package el['version'] = content_data[package]['versions'][0] el['origin'] = ','.join( content_data[package]['origins']) or 'Unknown' el['license'] = ' '.join( content_data[package]['lics']) or 'Unknown' except: el = {} if el: ret.append(el) elif content_type == 'gem': for package in content_data.keys(): el = {} try: el['package'] = content_data[package]['name'] el['type'] = 'GEM' el['location'] = package el['version'] = content_data[package]['versions'][0] el['origin'] = ','.join( content_data[package]['origins']) or 'Unknown' el['license'] = ' '.join( content_data[package]['lics']) or 'Unknown' except: el = {} if el: ret.append(el) elif content_type == 'python': for package in content_data.keys(): el = {} try: el['package'] = content_data[package]['name'] el['type'] = 'PYTHON' el['location'] = content_data[package]['location'] el['version'] = content_data[package]['version'] el['origin'] = content_data[package]['origin'] or 'Unknown' el['license'] = content_data[package]['license'] or 'Unknown' except: el = {} if el: ret.append(el) elif content_type == 'java': for package in content_data.keys(): el = {} try: el['package'] = content_data[package]['name'] el['type'] = content_data[package]['type'].upper() el['location'] = content_data[package]['location'] el['specification-version'] = content_data[package][ 'specification-version'] el['implementation-version'] = content_data[package][ 'implementation-version'] el['origin'] = content_data[package]['origin'] or 'Unknown' #el['license'] = content_data[package]['license'] or 'Unknown' except: el = {} if el: ret.append(el) elif content_type == 'files': elmap = { 'linkdst': 'linkdest', 'size': 'size', 'mode': 'mode', 'sha256': 'sha256', 'type': 'type', 'uid': 'uid', 'gid': 'gid' } for filename in content_data.keys(): el = {} try: el['filename'] = filename for elkey in elmap.keys(): try: el[elmap[elkey]] = content_data[filename][elkey] except: #el.pop(elmap[elkey], None) el[elmap[elkey]] = None # special formatting el['mode'] = oct(stat.S_IMODE(el['mode'])) if el['sha256'] == 'DIRECTORY_OR_OTHER': el['sha256'] = None #el['mode'] = oct(stat.S_IMODE(content_data[filename]['mode'])) #el['linkdest'] = content_data[filename]['linkdst'] #el['sha256'] = content_data[filename]['sha256'] #el['size'] = content_data[filename]['size'] #el['type'] = content_data[filename]['type'] #el['uid'] = content_data[filesname]['uid'] #el['gid'] = content_data[filesname]['gid'] except Exception as err: el = {} if el: ret.append(el) else: ret = content_data return (ret)
def copymode(src, dst): """Copy mode bits from src to dst""" if hasattr(os, 'chmod'): st = os.stat(src) mode = stat.S_IMODE(st.st_mode) os.chmod(dst, mode)
def is_unarchived(self): cmd = '%s -ZT -s "%s"' % (self.cmd_path, self.src) if self.excludes: cmd += ' -x "' + '" "'.join(self.excludes) + '"' rc, out, err = self.module.run_command(cmd) old_out = out diff = '' out = '' if rc == 0: unarchived = True else: unarchived = False # Get some information related to user/group ownership umask = os.umask(0) os.umask(umask) # Get current user and group information groups = os.getgroups() run_uid = os.getuid() run_gid = os.getgid() try: run_owner = pwd.getpwuid(run_uid).pw_name except: run_owner = run_uid try: run_group = grp.getgrgid(run_gid).gr_name except: run_group = run_gid # Get future user ownership fut_owner = fut_uid = None if self.file_args['owner']: try: tpw = pwd.getpwname(self.file_args['owner']) except: try: tpw = pwd.getpwuid(self.file_args['owner']) except: tpw = pwd.getpwuid(run_uid) fut_owner = tpw.pw_name fut_uid = tpw.pw_uid else: try: fut_owner = run_owner except: pass fut_uid = run_uid # Get future group ownership fut_group = fut_gid = None if self.file_args['group']: try: tgr = grp.getgrnam(self.file_args['group']) except: try: tgr = grp.getgrgid(self.file_args['group']) except: tgr = grp.getgrgid(run_gid) fut_group = tgr.gr_name fut_gid = tgr.gr_gid else: try: fut_group = run_group except: pass fut_gid = run_gid for line in old_out.splitlines(): change = False pcs = line.split(None, 7) if len(pcs) != 8: # Too few fields... probably a piece of the header or footer continue # Check first and seventh field in order to skip header/footer if len(pcs[0]) != 7 and len(pcs[0]) != 10: continue if len(pcs[6]) != 15: continue if pcs[0][0] not in 'dl-?' or not frozenset( pcs[0][1:]).issubset('rwxst-'): continue ztype = pcs[0][0] permstr = pcs[0][1:] version = pcs[1] ostype = pcs[2] size = int(pcs[3]) path = pcs[7] # Skip excluded files if path in self.excludes: out += 'Path %s is excluded on request\n' % path continue # Itemized change requires L for symlink if path[-1] == '/': if ztype != 'd': err += 'Path %s incorrectly tagged as "%s", but is a directory.\n' % ( path, ztype) ftype = 'd' elif ztype == 'l': ftype = 'L' elif ztype == '-': ftype = 'f' elif ztype == '?': ftype = 'f' # Some files may be storing FAT permissions, not Unix permissions if len(permstr) == 6: if path[-1] == '/': permstr = 'rwxrwxrwx' elif permstr == 'rwx---': permstr = 'rwxrwxrwx' else: permstr = 'rw-rw-rw-' # Test string conformity if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr): raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr) # DEBUG # err += "%s%s %10d %s\n" % (ztype, permstr, size, path) dest = os.path.join(self.dest, path) try: st = os.lstat(dest) except: change = True self.includes.append(path) err += 'Path %s is missing\n' % path diff += '>%s++++++.?? %s\n' % (ftype, path) continue # Compare file types if ftype == 'd' and not stat.S_ISDIR(st.st_mode): change = True self.includes.append(path) err += 'File %s already exists, but not as a directory\n' % path diff += 'c%s++++++.?? %s\n' % (ftype, path) continue if ftype == 'f' and not stat.S_ISREG(st.st_mode): change = True unarchived = False self.includes.append(path) err += 'Directory %s already exists, but not as a regular file\n' % path diff += 'c%s++++++.?? %s\n' % (ftype, path) continue if ftype == 'L' and not stat.S_ISLNK(st.st_mode): change = True self.includes.append(path) err += 'Directory %s already exists, but not as a symlink\n' % path diff += 'c%s++++++.?? %s\n' % (ftype, path) continue itemized = list('.%s.......??' % ftype) # Note: this timestamp calculation has a rounding error # somewhere... unzip and this timestamp can be one second off # When that happens, we report a change and re-unzip the file dt_object = datetime.datetime( *(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6])) timestamp = time.mktime(dt_object.timetuple()) # Compare file timestamps if stat.S_ISREG(st.st_mode): if self.module.params['keep_newer']: if timestamp > st.st_mtime: change = True self.includes.append(path) err += 'File %s is older, replacing file\n' % path itemized[4] = 't' elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime: # Add to excluded files, ignore other changes out += 'File %s is newer, excluding file\n' % path continue else: if timestamp != st.st_mtime: change = True self.includes.append(path) err += 'File %s differs in mtime (%f vs %f)\n' % ( path, timestamp, st.st_mtime) itemized[4] = 't' # Compare file sizes if stat.S_ISREG(st.st_mode) and size != st.st_size: change = True err += 'File %s differs in size (%d vs %d)\n' % (path, size, st.st_size) itemized[3] = 's' # Compare file checksums if stat.S_ISREG(st.st_mode): crc = crc32(dest) if crc != self._crc32(path): change = True err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\n' % ( path, self._crc32(path), crc) itemized[2] = 'c' # Compare file permissions # Do not handle permissions of symlinks if ftype != 'L': # Only special files require no umask-handling if ztype == '?': mode = self._permstr_to_octal(permstr, 0) else: mode = self._permstr_to_octal(permstr, umask) if self.file_args['mode'] and self.file_args[ 'mode'] != stat.S_IMODE(st.st_mode): change = True err += 'Path %s differs in permissions (%o vs %o)\n' % ( path, self.file_args['mode'], stat.S_IMODE(st.st_mode)) itemized[5] = 'p' elif mode != stat.S_IMODE(st.st_mode): change = True itemized[5] = 'p' err += 'Path %s differs in permissions (%o vs %o)\n' % ( path, mode, stat.S_IMODE(st.st_mode)) # Compare file user ownership owner = uid = None try: owner = pwd.getpwuid(st.st_uid).pw_name except: uid = st.st_uid # If we are not root and requested owner is not our user, fail if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid): raise UnarchiveError( 'Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner)) if owner and owner != fut_owner: change = True err += 'Path %s is owned by user %s, not by user %s as expected\n' % ( path, owner, fut_owner) itemized[6] = 'o' elif uid and uid != fut_uid: change = True err += 'Path %s is owned by uid %s, not by uid %s as expected\n' % ( path, uid, fut_uid) itemized[6] = 'o' # Compare file group ownership group = gid = None try: group = grp.getgrgid(st.st_gid).gr_name except: gid = st.st_gid if run_uid != 0 and fut_gid not in groups: raise UnarchiveError( 'Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner)) if group and group != fut_group: change = True err += 'Path %s is owned by group %s, not by group %s as expected\n' % ( path, group, fut_group) itemized[6] = 'g' elif gid and gid != fut_gid: change = True err += 'Path %s is owned by gid %s, not by gid %s as expected\n' % ( path, gid, fut_gid) itemized[6] = 'g' # Register changed files and finalize diff output if change: if path not in self.includes: self.includes.append(path) diff += '%s %s\n' % (''.join(itemized), path) if self.includes: unarchived = False # DEBUG # out = old_out + out return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff)