def unpack_from(self, block, offset=0): (self.magic, dev, self.ino, self.mode, self.uid, self.gid, self.nlink, rdev, mtimehigh, mtimelow, namesize, filesizehigh, filesizelow) = self.coder.unpack_from(block, offset) self.devmajor = os.major(dev) self.devminor = os.minor(dev) self.rdevmajor = os.major(rdev) self.rdevminor = os.minor(rdev) self.mtime = (mtimehigh << 16) | mtimelow self.filesize = (filesizehigh << 16) | filesizelow namestart = offset + self.coder.size datastart = namestart + namesize self.name = block[namestart:datastart - 1] # drop the null if isinstance(self, CpioMemberBin) and (namesize & 1): datastart += 1 # skip a pad byte if necessary self.content = block[datastart:datastart + self.filesize] return self
def compare(self, src_path, dest_stat): '''see if devs are the same''' if not self.exists: return False # dest_stat is a SyncStat object and it's useless here # I need a real, fresh statbuf that includes st_rdev field try: dest_stat = os.lstat(self.name) except OSError as err: error('error checking %s : %s' % (self.name, err.strerror)) return False src_major = os.major(self.src_stat.st_rdev) src_minor = os.minor(self.src_stat.st_rdev) dest_major = os.major(dest_stat.st_rdev) dest_minor = os.minor(dest_stat.st_rdev) if src_major != dest_major or src_minor != dest_minor: stdout('%s should have major,minor %d,%d but has %d,%d' % (self.name, src_major, src_minor, dest_major, dest_minor)) unix_out('# updating major,minor %s' % self.name) terse(synctool.lib.TERSE_SYNC, self.name) return False return True
def unpack_from(self, block, offset=0): (self.magic, dev, ino, mode, uid, gid, nlink, rdev, mtime, namesize, filesize) = self.coder.unpack_from(block, offset) _namesize = namesize self.ino = int(ino, 8) self.mode = int(mode, 8) self.uid = int(uid, 8) self.gid = int(gid, 8) self.nlink = int(nlink, 8) dev = int(dev, 8) rdev = int(rdev, 8) self.devmajor = os.major(dev) self.devminor = os.minor(dev) self.rdevmajor = os.major(rdev) self.rdevminor = os.minor(rdev) self.mtime = int(mtime, 8) namesize = int(namesize, 8) self.filesize = int(filesize, 8) namestart = offset + self.coder.size datastart = namestart + namesize self.name = block[namestart:datastart - 1] # drop the null print('+', _namesize, self.name) self.content = block[datastart:datastart + self.filesize] return self
def copy_device(source, target_dir, source_dir="", target=None): if not os.path.isdir(target_dir): raise IOError, "'%s' is no directory." % target_dir if not target: target = source s = "%s/%s" % (source_dir, source) s_linkto = None if os.path.islink(s): s_linkto = os.readlink(s) s = "%s/%s" % (source_dir, s_linkto) stats = os.stat(s) if not stats.st_rdev: raise IOError, "'%s' is no device." % s t = "%s/%s" % (target_dir, target) if os.path.exists(t): return try: if s_linkto: create_dir(target_dir, os.path.dirname(s_linkto)) create_device(target_dir, s_linkto, stats.st_mode, os.major(stats.st_rdev), os.minor(stats.st_rdev)) create_dir(target_dir, os.path.dirname(target)) os.symlink(s_linkto, t) else: create_device(target_dir, target, stats.st_mode, os.major(stats.st_rdev), os.minor(stats.st_rdev)) except Exception, msg: raise IOError, "Unable to copy device '%s': %s" % (s, msg)
def compare(self, _src_path, dest_stat): # type: (str, SyncStat) -> bool '''see if devs are the same''' if not self.exists: return False # dest_stat is a SyncStat object and it's useless here # I need a real, fresh statbuf that includes st_rdev field try: dest_stat = os.lstat(self.name) except OSError as err: error('error checking %s : %s' % (self.name, err.strerror)) return False # Note: mypy triggers false errors here # Also, no luck with Union[SyncStat, posix.stat_result] # In any case, for VNodeChrDev and VNodeBlkDev, # the self.src_stat is of type posix.stat_result src_major = os.major(self.src_stat.st_rdev) # type: ignore src_minor = os.minor(self.src_stat.st_rdev) # type: ignore dest_major = os.major(dest_stat.st_rdev) # type: ignore dest_minor = os.minor(dest_stat.st_rdev) # type: ignore if src_major != dest_major or src_minor != dest_minor: stdout('%s should have major,minor %d,%d but has %d,%d' % (self.name, src_major, src_minor, dest_major, dest_minor)) unix_out('# updating major,minor %s' % self.name) terse(synctool.lib.TERSE_SYNC, self.name) return False return True
def verify(self, entry, modlist): """Verify device entry.""" ondisk = self._exists(entry) if not ondisk: return False # attempt to verify device properties as specified in config rv = True dev_type = entry.get('dev_type') if dev_type in ['block', 'char']: major = int(entry.get('major')) minor = int(entry.get('minor')) if major != os.major(ondisk.st_rdev): msg = ("Major number for device %s is incorrect. " "Current major is %s but should be %s" % (entry.get("name"), os.major(ondisk.st_rdev), major)) self.logger.debug('POSIX: ' + msg) entry.set('qtext', entry.get('qtext', '') + "\n" + msg) rv = False if minor != os.minor(ondisk.st_rdev): msg = ("Minor number for device %s is incorrect. " "Current minor is %s but should be %s" % (entry.get("name"), os.minor(ondisk.st_rdev), minor)) self.logger.debug('POSIX: ' + msg) entry.set('qtext', entry.get('qtext', '') + "\n" + msg) rv = False return POSIXTool.verify(self, entry, modlist) and rv
def valid_vcsa(vcsa): """Check that the vcsa device and associated terminal seem sane""" vcsa_kernel_major = 7 tty_kernel_major = 4 vcsa_range = range(128, 191) tty_range = range(1, 63) tty = PaperTTY.ttydev(vcsa) vs = os.stat(vcsa) ts = os.stat(tty) vcsa_major, vcsa_minor = os.major(vs.st_rdev), os.minor(vs.st_rdev) tty_major, tty_minor = os.major(ts.st_rdev), os.minor(ts.st_rdev) if not (vcsa_major == vcsa_kernel_major and vcsa_minor in vcsa_range): print("Not a valid vcsa device node: {} ({}/{})".format(vcsa, vcsa_major, vcsa_minor)) return False read_vcsa = os.access(vcsa, os.R_OK) write_tty = os.access(tty, os.W_OK) if not read_vcsa: print("No read access to {} - maybe run with sudo?".format(vcsa)) return False if not (tty_major == tty_kernel_major and tty_minor in tty_range): print("Not a valid TTY device node: {}".format(vcsa)) if not write_tty: print("No write access to {} so cannot set terminal size, maybe run with sudo?".format(tty)) return True
def convert_stat(st): if S_ISDIR(st.st_mode): return { 'kind': 'dir', 'uid': st.st_uid, 'gid': st.st_gid, 'perm': S_IMODE(st.st_mode) } elif S_ISREG(st.st_mode): return { 'kind': 'file', 'uid': st.st_uid, 'gid': st.st_gid, 'mtime': st.st_mtime, 'ctime': st.st_ctime, 'ino': st.st_ino, 'perm': S_IMODE(st.st_mode) } elif S_ISLNK(st.st_mode): return { 'kind': 'lnk' } elif S_ISSOCK: return { 'kind': 'sock', 'uid': st.st_uid, 'gid': st.st_gid, 'perm': S_IMODE(st.st_mode) } elif S_ISFIFO: return { 'kind': 'fifo', 'uid': st.st_uid, 'gid': st.st_gid, 'perm': S_IMODE(st.st_mode) } elif S_ISBLK: return { 'kind': 'blk', 'uid': st.st_uid, 'gid': st.st_gid, 'devmaj': os.major(st.st_rdev), 'devmin': os.minor(st.st_rdev), 'perm': S_IMODE(st.st_mode) } elif S_ISCHR: return { 'kind': 'chr', 'uid': st.st_uid, 'gid': st.st_gid, 'devmaj': os.major(st.st_rdev), 'devmin': os.minor(st.st_rdev), 'perm': S_IMODE(st.st_mode) } else: raise "Unknown file kind"
def get_partition(device, number): disk_rdev = os.stat(device).st_rdev for partition in glob.glob("/sys/dev/block/%d:%d/*/partition" % (os.major(disk_rdev), os.minor(disk_rdev))): if int(open(partition, "r").read().strip()) == number: rdev = open(os.path.join(os.path.dirname(partition), "dev"), "r").read().strip() return os.path.normpath(os.path.join("/dev/block",os.readlink("/dev/block/%s" % rdev))) raise Exception("Partition (%s #%d) is not recognized by system", (device, number))
def _initDMinfo(self): """Check files in /dev/mapper to initialize data sctructures for mappings between device-mapper devices, minor device numbers, VGs and LVs. """ self._mapMinorDmName = {} self._mapMinorLV = {} self._mapLVminor = {} self._vgTree = {} if self._dmMajorNum is None: self._initBlockMajorMap() for file in os.listdir(devmapperDir): path = os.path.join(devmapperDir, file) fstat = os.stat(path) major = os.major(fstat.st_rdev) minor = os.minor(fstat.st_rdev) if major == self._dmMajorNum: self._mapMinorDmName[minor] = file mobj = re.match('(.*[^!])-([^!].*)$', file) if mobj: vg = mobj.group(1) lv = mobj.group(2) self._mapMinorLV[minor] = (vg,lv) self._mapLVminor["-".join((vg,lv))] = minor if not self._vgTree.has_key(vg): self._vgTree[vg] = [] self._vgTree[vg].append(lv)
def mknod(self, path, mode, device): log.debug('function mknod') full_path, minion_id = self._full_path(path) devtype = str(oct(mode)).replace('L', '') if devtype[:2] == '01': ntype = 'p' elif devtype[:2] == '02': ntype = 'c' elif devtype[:2] == '06': ntype = 'b' else: raise OSError major = os.major(device) minor = os.minor(device) res = self._salt_cmd( minion_id, 'file.mknod', kwarg={ 'name': full_path, 'ntype': ntype, 'major': major, 'minor': minor, }, ) return
def attach_volume(self, name, device, mount_device, static=True): try: s = os.stat(device) if not stat.S_ISBLK(s.st_mode): raise exception.InvalidInput(reason='"%s" is not block device'%device) maj, min = os.major(s.st_rdev), os.minor(s.st_rdev) if not static: # ignore mount_device now self._dynamic_attach_or_detach_volume(name, device, maj, min, attach=True) else: conf_path = lxc_device_conf_file(name, device) with open(conf_path, 'w') as f: for i in range(16): f.write('lxc.cgroup.devices.allow = ' 'b %(maj)s:%(min)s rwm\n'%{'maj':maj, 'min':min+i}) LOG.info(_("new config path %(path)s for %(device)s"), {'path': conf_path, 'device': device}) # autodev hook: # add the partitions of this device into the container when it starts with open(lxc_autodev_hook_script(name, device), 'w') as f, \ open('/proc/partitions', 'r') as p: for line in p: fields = line.split() if fields and fields[-1].startswith(os.path.basename(device)): f.write("mknod --mode=0660 $LXC_ROOTFS_MOUNT/dev/%(device)s " "b %(maj)s %(min)s\n" % { "device": fields[-1], "maj":fields[0], "min":fields[1]}) except Exception as ex: with excutils.save_and_reraise_exception(): LOG.error(_('Failed to attach device %(device)s ' ' for %(name)s: %(ex)s'), {'name': name, 'ex': ex.message, 'device': device})
def _resolveLoopDevice(path): """ Loop devices appear as the loop device under /proc/mount instead of the backing file. As the mount command does the resolution so must we. """ if not path.startswith("/"): return path try: st = os.stat(path) except: return path if not stat.S_ISBLK(st.st_mode): return path minor = os.minor(st.st_rdev) major = os.major(st.st_rdev) loopdir = "/sys/dev/block/%d:%d/loop" % (major, minor) if os.path.exists(loopdir): with open(loopdir + "/backing_file", "r") as f: # Remove trailing newline return f.read()[:-1] # Old kernels might not have the sysfs entry, this is a bit slower and does # not work on hosts that do support the above method. for rec in _iterateMtab(): loopOpt = "loop=%s" % path for opt in rec.fs_mntops: if opt != loopOpt: continue return rec.fs_spec return path
def getInfoFromDeviceNumber(self, d): retval = None d = parse_device_number(d) # device_cache = self.db("_device_num_to_list") if d not in device_cache and self._offline is False: try: m = [] with open("/proc/mounts", "r") as f: m = f.readlines() for l in m: if re.search(" (nfs|smbfs|cifs|pnfs|afs) ", l): pass else: mount = l.split()[1] devic = l.split()[0] try: stdev = os.stat(mount).st_dev key = "{},{}".format(os.major(stdev), os.minor(stdev)) device_cache[key] = [devic, mount] except: pass except: pass try: retval = device_cache[d] except: pass return retval
def get_disk_info(device): if isinstance(device, tuple): major, minor = device else: disk_rdev = os.stat(device).st_rdev major, minor = (os.major(disk_rdev), os.minor(disk_rdev)) device_dir = "/sys/dev/block/%d:%d" % (major, minor) if not os.path.isdir("%s/device" % device_dir): return None # Not representing a pysical device def read(file): filename = os.path.join(device_dir, file) if not os.path.isfile(filename): return None return open(filename).read().strip() rst = {"name":device, "identical_name":"/dev/block/%d:%d" % (major, minor),"device_dir":device_dir, "major":major, "minor":minor, "ro":int(read("ro")),"removable":int(read("removable")),"vendor":read("device/vendor") or read("device/oemid"),"model":read("device/model") or read("device/name")} try: device_capacity = get_device_capacity(device) rst["size"] = device_capacity[0] rst["sector_size"] = device_capacity[1] rst["bios_compatible"] = rst["sector_size"] == 512 and rst["size"] <= MAX_BIOS_FRIENDLY_DISK_SIZE except OSError: pass # e.g. unloaded cd-rom drive if "size" in rst: rst["size_str"] = get_capacity_string(rst["size"]) return rst
def cache_status(self): if not self.is_cached(): raise ValueError('LV is of wrong type') devpath = self.path s = os.stat(devpath) major, minor = os.major(s.st_rdev), os.minor(s.st_rdev) mapper = find_device('/dev/mapper', major, minor) if mapper is None: raise KeyError('failed to find device mapper entry for ' '%s/%s' % (self.vg.name, self.name)) status = dmsetup('status', mapper) status = dict(zip(cache_status_fields, status.strip().split()[:len(cache_status_fields)])) for k in status.keys(): if status[k].isdigit(): status[k] = int(status[k]) elif '/' in status[k]: a, b = [int(x) for x in status[k].split('/')] status['%s_pct' % k] = (a*1.0/b*1.0)*100 return status
def _resolveLoopDevice(path): """ Loop devices appear as the loop device under /proc/mount instead of the backing file. As the mount command does the resolution so must we. """ if not path.startswith("/"): return path try: st = os.stat(path) except: return path if not stat.S_ISBLK(st.st_mode): return path minor = os.minor(st.st_rdev) major = os.major(st.st_rdev) backing_file = os.path.join(_SYS_DEV_BLOCK_PATH, '%d:%d' % (major, minor), 'loop', 'backing_file') try: with open(backing_file, "r") as f: # Remove trailing newline return f.read()[:-1] except IOError as e: if e.errno != errno.ENOENT: raise return path
def __call__(self, log, *args, **kwargs): context = pyudev.Context() devices = context.list_devices(subsystem = 'drm') log.debug('DRM devices:') for device in devices: if not device.device_node: continue if 'seat' not in device.tags: continue devno = device.device_number log.debug(' %s (%u, %u)' % (device.device_node, os.major(devno), os.minor(devno))) dev = drm.open(device.device_node) version = dev.GetVersion() log.debug(' Driver:', version.name) log.debug(' Description:', version.description) log.debug(' Version: %u.%u.%u (%s)' % (version.major, version.minor, version.patch, version.date))
def __init__( self, path, blocking=True, parent=None ): path = Path( '/dev/uio', path ) self.path = path flags = O_RDWR | O_CLOEXEC if not blocking: flags |= O_NONBLOCK # for irq_recv self._fd = os.open( str(path), flags ) # check parent memory region (if any) if parent is not None: if isinstance( parent, Uio ): parent = parent.region() elif isinstance( parent, MemRegion ): raise TypeError # build path to sysfs dir for obtaining metadata dev = os.stat( self._fd ).st_rdev dev = '{0}:{1}'.format( os.major(dev), os.minor(dev) ) self.syspath = Path('/sys/dev/char', dev).resolve() # enumerate memory regions # beware that if there are none, the dir is absent rather than empty self._regions = {} rgninfo = self.syspath/'maps'; if rgninfo.is_dir(): for info in rgninfo.iterdir(): rgn = MemRegion( self, info, parent ) # allow lookup by index or (if available) by name self._regions[ rgn.index ] = rgn if rgn.name: self._regions[ rgn.name ] = rgn
def _linux_use_stat(f, log=None): devnum_cache = _devicenum_cache(log=log) dev_cache = {} returndict = {} unknown_paths = [] unknown_cache = {} for path in f: try: blk = os.stat(path)[stat.ST_DEV] dev_major = os.major(blk) dev_minor = os.minor(blk) dn = "{}:{}".format(dev_major, dev_minor) if dn in dev_cache: returndict[path] = dev_cache[dn] elif dn in devnum_cache: dev_cache[dn] = devnum_cache[dn] returndict[path] = dev_cache[dn] else: unknown_paths.append(path) except Exception as _: if sillyfacter.config.STRICT: raise if sillyfacter.config.DEBUG: dp = debugprint() dp("Error in stat of '{}' exception is '{}'". format(path, _), "_linux_use_stat") if log: log.error("Stat failed for path: {}".format(path)) returndict[path] = None if len(unknown_paths) > 0: unknown_cache = _run_df(unknown_paths, df_cmd=["/bin/df", "-P"]) returndict = dict(returndict.items() + unknown_cache.items()) return returndict
def get_file_systems(): ''' Creates a map of mounted filesystems on the machine. Returns: (major, minor) -> FileSystem(device, mount_point) ''' result = {} if not os.access('/proc/mounts', os.R_OK): return result file = open('/proc/mounts') for line in file: try: device, mount_point, fs_type, fs_options, dummy1, dummy2 = line.split() except ValueError: continue if mount_point.startswith('/dev') or mount_point.startswith('/proc') or mount_point.startswith('/sys'): continue if device.startswith('/') and mount_point.startswith('/'): stat = os.stat(mount_point) major = os.major(stat.st_dev) minor = os.minor(stat.st_dev) result[(major, minor)] = _FileSystem(device, mount_point) file.close() return result
def build_dmblock_major_minor_tables(): """Returns 1) a table of filenames that are all device mapper block special files 2) a dict mapping each device mapper name to (major,minor) 3) a dict mapping each (major,minor) pair to a table of devce mapper names""" names = [] name2pair = {} pair2name = {} mapper_entries = [] mapper_entries = os.listdir(DMDIR) for n in mapper_entries: s = os.lstat(DMDIR + '/' + n) if stat.S_ISBLK(s[stat.ST_MODE]): names.append(n) maj = str(os.major(s.st_rdev)) min = str(os.minor(s.st_rdev)) name2pair[n] = (maj, min) pair2name[(maj, min)] = n logging.debug('grabbed dmsetup device info') logging.debug('dmsetup devices: ' + str(name2pair)) return (names, name2pair, pair2name)
def get_blkdev_major_minor(path, lookup_for_file=True): """Get 'major:minor' number of block device. Get the device's 'major:minor' number of a block device to control I/O ratelimit of the specified path. If lookup_for_file is True and the path is a regular file, lookup a disk device which the file lies on and returns the result for the device. """ st = os.stat(path) if stat.S_ISBLK(st.st_mode): path, st = _get_disk_of_partition(path, st) return "%d:%d" % (os.major(st.st_rdev), os.minor(st.st_rdev)) elif stat.S_ISCHR(st.st_mode): # No I/O ratelimit control is provided for character devices return None elif lookup_for_file: # lookup the mounted disk which the file lies on out, _err = execute("df", path) devpath = out.split("\n")[1].split()[0] if devpath[0] is not "/": # the file is on a network file system return None return get_blkdev_major_minor(devpath, False) else: msg = _("Unable to get a block device for file '%s'") % path raise exception.Error(msg)
def processTree(tarfd, tree): for child in tree.children.values(): # Create file info info = tarfile.TarInfo() info.name = child.filePath info.mode = stat.S_IMODE(child.st.st_mode) info.type = _STAT_TO_TAR_TYPE[stat.S_IFMT(child.st.st_mode)] info.mtime = child.st.st_mtime info.uid = child.st.st_uid info.gid = child.st.st_gid info.devmajor = os.major(child.st.st_dev) info.devminor = os.minor(child.st.st_dev) # Setup content and links content = None if stat.S_IFMT(child.st.st_mode) == stat.S_IFREG: info.size = child.dataSize content = io.BytesIO(child.getData()) elif stat.S_IFMT(child.st.st_mode) == stat.S_IFLNK: info.linkname = child.getData().decode("UTF-8") # Add file and itd content tarfd.addfile(info, content) # Recursion for directories if stat.S_IFMT(child.st.st_mode) == stat.S_IFDIR: processTree(tarfd, child)
def setup_class(cls): cls.space = space cls.w_posix = space.appexec([], GET_POSIX) cls.w_path = space.wrap(str(path)) cls.w_path2 = space.wrap(str(path2)) cls.w_pdir = space.wrap(str(pdir)) try: cls.w_unicode_dir = space.wrap( str(unicode_dir).decode(sys.getfilesystemencoding())) except UnicodeDecodeError: # filesystem encoding is not good enough cls.w_unicode_dir = space.w_None if hasattr(os, 'getuid'): cls.w_getuid = space.wrap(os.getuid()) cls.w_geteuid = space.wrap(os.geteuid()) if hasattr(os, 'getgid'): cls.w_getgid = space.wrap(os.getgid()) if hasattr(os, 'getgroups'): cls.w_getgroups = space.newlist([space.wrap(e) for e in os.getgroups()]) if hasattr(os, 'getpgid'): cls.w_getpgid = space.wrap(os.getpgid(os.getpid())) if hasattr(os, 'getsid'): cls.w_getsid0 = space.wrap(os.getsid(0)) if hasattr(os, 'sysconf'): sysconf_name = os.sysconf_names.keys()[0] cls.w_sysconf_name = space.wrap(sysconf_name) cls.w_sysconf_value = space.wrap(os.sysconf_names[sysconf_name]) cls.w_sysconf_result = space.wrap(os.sysconf(sysconf_name)) cls.w_SIGABRT = space.wrap(signal.SIGABRT) cls.w_python = space.wrap(sys.executable) if hasattr(os, 'major'): cls.w_expected_major_12345 = space.wrap(os.major(12345)) cls.w_expected_minor_12345 = space.wrap(os.minor(12345)) cls.w_udir = space.wrap(str(udir))
def test_major_minor(self): os = self.posix assert os.major(12345) == self.expected_major_12345 assert os.minor(12345) == self.expected_minor_12345 assert os.makedev(self.expected_major_12345, self.expected_minor_12345) == 12345 raises((ValueError, OverflowError), os.major, -1)
def create_volume(self, **params): size = params.get('size') snapshot_id = params.get('snapshot') assert size or snapshot_id, 'Not enough params to create volume' if snapshot_id: snapshot = self.describe_snapshot(snapshot_id) if size: if int(size) < int(snapshot['size']): raise StorageError('Size you specified is smaller than snapshot') else: # Size in Gigabytes size = int(size) id = 'vol-%s' % str(uuid.uuid4())[:7] lvm2.lvcreate(vg_name, name=id, size='%sG' % size) lvinfo = lvm2.lvs(lvm2.lvpath(vg_name, id)).values()[0] device = os.path.realpath(lvinfo.lv_path) if snapshot_id: # Apply snapshot system('dd if=%s of=%s' % (self._get_snapshot_path(snapshot_id), device), shell=True) stat = os.stat(device) maj, min = (os.major(stat.st_rdev), os.minor(stat.st_rdev)) self.volumes[id] = dict(id=id, attached_to=None, maj=maj, min=min, host_path=device, size=str(size), source_snapshot=snapshot_id) return self.volumes[id]
def from_tapdisk(cls, tapdisk, stats): # pick the last image. if it's a VHD, we got a parent # cache. the leaf case is an aio node sitting on a # parent-caching tapdev. always checking the complementary # case, so we bail on unexpected chains. images = stats['images'] image = images[-1] path = image['name'] _type = image['driver']['name'] def __assert(cond): if not cond: raise cls.NotACachingTapdisk(tapdisk, stats) if _type == 'vhd': # parent return ParentCachingTap(tapdisk, stats) elif _type == 'aio': # leaf st = os.stat(path) __assert(S_ISBLK(st.st_mode)) major = os.major(st.st_rdev) minor = os.minor(st.st_rdev) __assert(major == tapdisk.major()) return LeafCachingTap(tapdisk, stats, minor) __assert(0)
def get_fs_info(self, path): fs_type = None uuid = None label = None devpth = None tmpd = None try: st_dev=os.stat(path).st_dev dev=os.makedev(os.major(st_dev),os.minor(st_dev)) tmpd=tempfile.mkdtemp() devpth=("%s/dev" % tmpd) os.mknod(devpth,0o400 | stat.S_IFBLK ,dev) except: raise ret = { } pairs = { 'LABEL' : 'label', 'UUID' : 'uuid' , 'FS_TYPE' : 'fs_type' } for (blkid_n, my_n) in pairs.items(): cmd = [ 'blkid', '-s%s' % blkid_n, '-ovalue', devpth ] print(cmd) try: output = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0] ret[my_n]=output.rstrip() except Exception as e: os.unlink(devpth) os.rmdir(tmpd) raise UnsupportedException("Unable to determine %s for %s" % (blkid_n, path)) os.unlink(devpth) os.rmdir(tmpd) return(ret)
def detailed_str(meta, fields = None): # FIXME: should optional fields be omitted, or empty i.e. "rdev: # 0", "link-target:", etc. if not fields: fields = all_fields result = [] if 'path' in fields: path = meta.path or '' result.append('path: ' + path) if 'mode' in fields: result.append('mode: %s (%s)' % (oct(meta.mode), xstat.mode_str(meta.mode))) if 'link-target' in fields and stat.S_ISLNK(meta.mode): result.append('link-target: ' + meta.symlink_target) if 'rdev' in fields: if meta.rdev: result.append('rdev: %d,%d' % (os.major(meta.rdev), os.minor(meta.rdev))) else: result.append('rdev: 0') if 'size' in fields and meta.size: result.append('size: ' + str(meta.size)) if 'uid' in fields: result.append('uid: ' + str(meta.uid)) if 'gid' in fields: result.append('gid: ' + str(meta.gid)) if 'user' in fields: result.append('user: '******'group' in fields: result.append('group: ' + meta.group) if 'atime' in fields: # If we don't have xstat.lutime, that means we have to use # utime(), and utime() has no way to set the mtime/atime of a # symlink. Thus, the mtime/atime of a symlink is meaningless, # so let's not report it. (That way scripts comparing # before/after won't trigger.) if xstat.lutime or not stat.S_ISLNK(meta.mode): result.append('atime: ' + xstat.fstime_to_sec_str(meta.atime)) else: result.append('atime: 0') if 'mtime' in fields: if xstat.lutime or not stat.S_ISLNK(meta.mode): result.append('mtime: ' + xstat.fstime_to_sec_str(meta.mtime)) else: result.append('mtime: 0') if 'ctime' in fields: result.append('ctime: ' + xstat.fstime_to_sec_str(meta.ctime)) if 'linux-attr' in fields and meta.linux_attr: result.append('linux-attr: ' + hex(meta.linux_attr)) if 'linux-xattr' in fields and meta.linux_xattr: for name, value in meta.linux_xattr: result.append('linux-xattr: %s -> %s' % (name, repr(value))) if 'posix1e-acl' in fields and meta.posix1e_acl: acl = meta.posix1e_acl[0] result.append('posix1e-acl: ' + acl + '\n') if stat.S_ISDIR(meta.mode): def_acl = meta.posix1e_acl[2] result.append('posix1e-acl-default: ' + def_acl + '\n') return '\n'.join(result)
def get_file_systems(self): """ Creates a map of mounted filesystems on the machine. iostat(1): Each sector has size of 512 bytes. Returns: (major, minor) -> FileSystem(device, mount_point) """ result = {} if os.access('/proc/mounts', os.R_OK): file = open('/proc/mounts') for line in file: try: mount = line.split() device = mount[0] mount_point = mount[1] fs_type = mount[2] except (IndexError, ValueError): continue # Skip the filesystem if it is not in the list of valid # filesystems if fs_type not in self.filesystems: self.log.debug( "Ignoring %s since it is of type %s which " + " is not in the list of filesystems.", mount_point, fs_type) continue # Process the filters if self.exclude_reg.match(mount_point): self.log.debug( "Ignoring %s since it is in the " + "exclude_filter list.", mount_point) continue if (mount_point.startswith('/dev') or mount_point.startswith('/proc') or mount_point.startswith('/sys')): continue if '/' in device and mount_point.startswith('/'): try: stat = os.stat(mount_point) major = os.major(stat.st_dev) minor = os.minor(stat.st_dev) except OSError: self.log.debug("Path %s is not mounted - skipping.", mount_point) continue if (major, minor) in result: continue result[(major, minor)] = { 'device': device, 'mount_point': mount_point, 'fs_type': fs_type } file.close() elif psutil: partitions = psutil.disk_partitions(False) for partition in partitions: result[(0, len(result))] = { 'device': partition.device, 'mount_point': partition.mountpoint, 'fs_type': partition.fstype } pass return result
zbar.connect("notify::video-opened", video_opened) # open image file button open_button = gtk.Button(stock=gtk.STOCK_OPEN) open_button.connect("clicked", open_button_clicked) # populate video devices in combo box video_list.append_text("<none>") video_list.set_active(0) for (root, dirs, files) in os.walk("/dev"): for dev in files: path = os.path.join(root, dev) if not os.access(path, os.F_OK): continue info = os.stat(path) if stat.S_ISCHR(info.st_mode) and os.major(info.st_rdev) == 81: video_list.append_text(path) if path == video_device: video_list.set_active(len(video_list.get_model()) - 1) video_device = None if video_device is not None: video_list.append_text(video_device) video_list.set_active(len(video_list.get_model()) - 1) video_device = None # combine combo box and buttons horizontally hbox = gtk.HBox(spacing=8) hbox.pack_start(video_list) hbox.pack_start(status_button, expand=False) hbox.pack_start(open_button, expand=False)
def _copy_node(self, path, st): dev_maj = os.major(st.st_rdev) dev_min = os.minor(st.st_rdev) mode = st.st_mode self.archive.add_node(path, mode, os.makedev(dev_maj, dev_min))
def __init__(self, preferred_paths: list = None, remove_paths: list or bool = None, additional_paths: list = None, filesystem_types: list = None, fallback: str or bool = None): self.os_tempdir = tempfile.gettempdir() suitable_paths = [self.os_tempdir] + SUITABLE_PATHS if isinstance(fallback, bool): self.fallback = self.os_tempdir if fallback else None else: self.fallback = fallback self.usable_paths = OrderedDict() if platform.system() == "Linux": self.filesystem_types = list( filesystem_types ) if filesystem_types is not None else MEM_BASED_FS preferred_paths = [] if preferred_paths is None else preferred_paths if isinstance(remove_paths, bool) and remove_paths: suitable_paths = [] elif isinstance(remove_paths, list) and len(remove_paths) > 0: suitable_paths = [ i for i in suitable_paths if i not in remove_paths ] additional_paths = [] if additional_paths is None else additional_paths self.suitable_paths = preferred_paths + suitable_paths + additional_paths uid = os.geteuid() with open('/proc/self/mountinfo', 'r') as file: mnt_info = {i[2]: i for i in [line.split() for line in file]} for path in self.suitable_paths: path = path.replace('{uid}', str(uid)) # We may have repeated if self.usable_paths.get(path) is not None: continue self.usable_paths[path] = False try: dev = os.stat(path).st_dev major, minor = os.major(dev), os.minor(dev) mp = mnt_info.get("{}:{}".format(major, minor)) if mp and mp[8] in self.filesystem_types: self.usable_paths[path] = mp except FileNotFoundError: pass for key in [k for k, v in self.usable_paths.items() if not v]: del self.usable_paths[key] if len(self.usable_paths) > 0: self.tempdir = next(iter(self.usable_paths.keys())) else: if fallback: self.tempdir = self.fallback else: raise RuntimeError( 'No memory temporary dir found and fallback is disabled.')
def __init__(self, image, dest, bmap=None, image_size=None, logger=None): """ The class constructor. The parameters are: image - file-like object of the image which should be copied, should only support 'read()' and 'seek()' methods, and only seeking forward has to be supported. dest - file object of the destination file to copy the image to. bmap - file object of the bmap file to use for copying. image_size - size of the image in bytes. logger - the logger object to use for printing messages. """ self._logger = logger if self._logger is None: self._logger = logging.getLogger(__name__) self._xml = None self._dest_fsync_watermark = None self._batch_blocks = None self._batch_queue = None self._batch_bytes = 1024 * 1024 self._batch_queue_len = 2 self.bmap_version = None self.bmap_version_major = None self.bmap_version_minor = None self.block_size = None self.blocks_cnt = None self.mapped_cnt = None self.image_size = None self.image_size_human = None self.mapped_size = None self.mapped_size_human = None self.mapped_percent = None self._f_bmap = None self._f_bmap_path = None self._progress_started = None self._progress_index = None self._progress_time = None self._progress_file = None self._progress_format = None self.set_progress_indicator(None, None) self._f_image = image self._image_path = image.name self._f_dest = dest self._dest_path = dest.name st_data = os.fstat(self._f_dest.fileno()) self._dest_is_regfile = stat.S_ISREG(st_data.st_mode) # Special quirk for /dev/null which does not support fsync() if stat.S_ISCHR(st_data.st_mode) and \ os.major(st_data.st_rdev) == 1 and \ os.minor(st_data.st_rdev) == 3: self._dest_supports_fsync = False else: self._dest_supports_fsync = True if bmap: self._f_bmap = bmap self._bmap_path = bmap.name self._parse_bmap() else: # There is no bmap. Initialize user-visible attributes to something # sensible with an assumption that we just have all blocks mapped. self.bmap_version = 0 self.block_size = 4096 self.mapped_percent = 100 if image_size: self._set_image_size(image_size) self._batch_blocks = self._batch_bytes / self.block_size
def get_root_dev_major(): buf = os.stat('/') devno = buf.st_dev return os.major(devno)
def major(space, device): result = os.major(intmask(device)) return space.newint(result)
def write_cpio(inputdict, outputfile): """ Function to write a CPIO archive in the "New ASCII Format". The inputlist is a dictionary of files to put in the archive, where the dictionary key is the path to the file on the local filesystem and the dictionary value is the location that the file should have in the cpio archive. The outputfile is the location of the final cpio archive that will be written. """ if inputdict is None: raise Exception("input dictionary was None") if outputfile is None: raise Exception("output file was None") outf = open(outputfile, "w") try: for inputfile, destfile in list(inputdict.items()): inf = open(inputfile, 'r') st = os.fstat(inf.fileno()) # 070701 is the magic for new CPIO (newc in cpio parlance) outf.write("070701") # inode (really just needs to be unique) outf.write("%08x" % (st[stat.ST_INO])) # mode outf.write("%08x" % (st[stat.ST_MODE])) # uid is 0 outf.write("00000000") # gid is 0 outf.write("00000000") # nlink (always a single link for a single file) outf.write("00000001") # mtime outf.write("%08x" % (st[stat.ST_MTIME])) # filesize outf.write("%08x" % (st[stat.ST_SIZE])) # devmajor outf.write("%08x" % (os.major(st[stat.ST_DEV]))) # dev minor outf.write("%08x" % (os.minor(st[stat.ST_DEV]))) # rdevmajor (always 0) outf.write("00000000") # rdevminor (always 0) outf.write("00000000") # namesize (the length of the name plus 1 for the NUL padding) outf.write("%08x" % (len(destfile) + 1)) # check (always 0) outf.write("00000000") # write the name of the inputfile minus the leading / stripped = destfile.lstrip('/') outf.write(stripped) # we now need to write sentinel NUL byte(s). We need to make the # header (110 bytes) plus the filename, plus the sentinel a # multiple of 4 bytes. Note that we always need at *least* one NUL, # so if it is exactly a multiple of 4 we need to write 4 NULs outf.write("\x00" * (4 - ((110 + len(stripped)) % 4))) # now write the data from the input file outf.writelines(inf) inf.close() # we now need to write out NUL byte(s) to make it a multiple of 4. # note that unlike the name, we do *not* have to have any NUL bytes, # so if it is already aligned on 4 bytes do nothing remainder = st[stat.ST_SIZE] % 4 if remainder != 0: outf.write("\x00" * (4 - remainder)) # now that we have written all of the file entries, write the trailer outf.write("070701") # zero inode outf.write("00000000") # zero mode outf.write("00000000") # zero uid outf.write("00000000") # zero gid outf.write("00000000") # one nlink outf.write("00000001") # zero mtime outf.write("00000000") # zero filesize outf.write("00000000") # zero devmajor outf.write("00000000") # zero devminor outf.write("00000000") # zero rdevmajor outf.write("00000000") # zero rdevminor outf.write("00000000") # 0xB namesize outf.write("0000000B") # zero check outf.write("00000000") # trailer outf.write("TRAILER!!!") # finally, we need to pad to the closest 512 bytes outf.write("\x00" * (512 - (outf.tell() % 512))) except: os.unlink(outputfile) raise outf.close()
def diskinfo(self): """Return a dictionary contain info of all disks and partitions. """ disks = { 'count': 0, 'totalsize': 0, 'partitions': [], 'lvm': { 'partcount': 0, 'unpartition': 0, 'partitions': [] } } # read mount points mounts = Server.mounts(True) # scan for uuid and filesystem of partitions blks = Server.partinfo() # OpenVZ may not have blk info if not blks: return disks for devname, blkinfo in blks.iteritems(): dev = os.stat('/dev/%s' % devname).st_rdev major, minor = os.major(dev), os.minor(dev) blks[devname]['major'] = major blks[devname]['minor'] = minor parts = [] with open('/proc/partitions', 'r') as f: for line in f: fields = line.split() if len(fields) == 0: continue if not fields[0].isdigit(): continue major, minor, blocks, name = fields major, minor, blocks = int(major), int(minor), int(blocks) parts.append({ 'name': name, 'major': major, 'minor': minor, 'blocks': blocks, }) # check if some unmounted partition is busy has_busy_part = False for i, part in enumerate(parts): # check if it appears in blkid list if not blks.has_key(part['name']): # don't check the part with child partition if i+1<len(parts) and parts[i+1]['name'].startswith(part['name']): continue # if dev name doesn't match, check the major and minor of the dev devfound = False for devname, blkinfo in blks.iteritems(): if blkinfo['major'] == part['major'] and blkinfo['minor'] == part['minor']: devfound = True break if devfound: continue # means that it is busy has_busy_part = True break # scan for lvm logical volume lvmlvs = [] lvmlvs_vname = {} if not has_busy_part and os.path.exists('/sbin/lvm'): p = subprocess.Popen(shlex.split('/sbin/lvm lvdisplay'), stdout=subprocess.PIPE, close_fds=True) lvs = p.stdout while True: line = lvs.readline() if not line: break if 'LV Name' in line or 'LV Path' in line: devlink = line.replace('LV Name', '').replace('LV Path', '').strip() if not os.path.exists(devlink): continue dev = os.readlink(devlink) dev = os.path.abspath(os.path.join(os.path.dirname(devlink), dev)) dev = dev.replace('/dev/', '') lvmlvs_vname[dev] = devlink.replace('/dev/', '') lvmlvs.append(dev) p.wait() # scan for the 'on' status swap partition swapptns = [] with open('/proc/swaps', 'r') as f: for line in f: if not line.startswith('/dev/'): continue fields = line.split() swapptns.append(fields[0].replace('/dev/', '')) for part in parts: name = part['name'] major = part['major'] minor = part['minor'] blocks = part['blocks'] # check if the partition is a hardware disk # we treat name with no digit as a hardware disk is_hw = True partcount = 0 unpartition = 0 if len([x for x in name if x.isdigit()]) > 0 or name in lvmlvs: is_hw = False # determine which disk this partition belong to # and calcular the unpartition disk space parent_part = disks parent_part_found = False for i, ptn in enumerate(disks['partitions']): if name.startswith(ptn['name']): parent_part_found = True parent_part = disks['partitions'][i] parent_part['partcount'] += 1 parent_part['unpartition'] -= blocks*1024 break if not is_hw and not parent_part_found: parent_part = disks['lvm'] if blks.has_key(name) and blks[name]['fstype'].startswith('LVM'): is_pv = True else: is_pv = False partition = { 'major': major, 'minor': minor, 'name': name, 'size': b2h(blocks*1024), 'is_hw': is_hw, 'is_pv': is_pv, 'partcount': partcount, } if blks.has_key(name): partition['fstype'] = blks[name]['fstype'] partition['uuid'] = blks[name]['uuid'] if name in lvmlvs: partition['is_lv'] = True partition['vname'] = lvmlvs_vname[name] else: partition['is_lv'] = False for mount in mounts: if mount['major'] == major and mount['minor'] == minor: partition['mount'] = mount['path'] # read filesystem type from blkid #partition['fstype'] = mount['fstype'] break if name in swapptns: partition['fstype'] = 'swap' partition['mount'] = 'swap' if is_hw: partition['partitions'] = [] partition['unpartition'] = blocks*1024 disks['count'] += 1 disks['totalsize'] += blocks*1024 parent_part['partitions'].append(partition) disks['totalsize'] = b2h(disks['totalsize']) disks['lvscount'] = len(lvmlvs) for i, part in enumerate(disks['partitions']): unpartition = part['unpartition'] if unpartition <= 10*1024**2: # ignore size < 10MB unpartition = '0' else: unpartition = b2h(unpartition) disks['partitions'][i]['unpartition'] = unpartition return disks
def test_add_node_dev_null(self): st = os.lstat('/dev/null') dev_maj = os.major(st.st_rdev) dev_min = os.minor(st.st_rdev) self.tf.add_node('/dev/null', st.st_mode, os.makedev(dev_maj, dev_min))
def does_stuff(n): a = os.major(n) b = os.minor(n) x = os.makedev(a, b) return '%d,%d,%d' % (a, b, x)
def blk_maj_min(block_dev): """Returns major/minor device numbers for the given block device. """ dev_stat = os.stat(os.path.realpath(block_dev)) return os.major(dev_stat.st_rdev), os.minor(dev_stat.st_rdev)
def maj_min_from_path(path): """Returns major/minor device numbers for the given path. """ dev_stat = os.stat(os.path.realpath(path)) return os.major(dev_stat.st_dev), os.minor(dev_stat.st_dev)
device = True elif meta['st_mode'] & 0o170000 == stat.S_IFCHR: info.type = tarfile.CHRTYPE device = True elif meta['st_mode'] & 0o170000 == stat.S_IFSOCK: info.type = tarfile.CONTTYPE else: continue info.mtime = meta['st_mtime'] info.mode = meta['st_mode'] info.uid = meta['st_uid'] info.gid = meta['st_gid'] if device and hasattr(os, "major") and hasattr(os, "minor"): info.devmajor = os.major(meta['st_rdev']) info.devminor = os.minor(meta['st_rdev']) self._archive.addfile(info) self._pending_metadata = dirview[D_FILES] def interrupt(self): if self._completed.is_set(): return try: if self._terminate: self._terminate() self._completed.wait(5)
def checkIfDevice(path, diskstats): st_dev = os.stat(path).st_dev return (os.major(st_dev), os.minor(st_dev)) in diskstats
def major(space, device): result = os.major(device) return space.wrap(result)
def render(self, to_file, logProgress=False): """ Render a CPIO archive to the given file. """ f_out = open(to_file, "wb") file_list = list(self.files) # There's a trailer on every cpio archive .. file_list.append(file_from_data("TRAILER!!!", "")) for f in file_list: # We need to know our data size, so pull in the data now. if (f.orig_file is not None): # Is this a real file at all? orig_stat = os.lstat(f.orig_file) if (stat.S_ISREG(orig_stat.st_mode)): f_in = open(f.orig_file, "rb") file_data = f_in.read() f_in.close() elif (stat.S_ISLNK(orig_stat.st_mode)): file_data = os.readlink(f.orig_file) else: # No data file_data = None else: file_data = f.data if (file_data is None): # There is actually no data. data_size = 0 else: # There is data, but it may be a zero-length string. data_size = len(file_data) # name_size includes the terminating NUL name_size = len(f.name) + 1 # This is a bit inefficient, but it has the advantage of clarity. hdr_array = [] # cpio, as is UNIX's wont, is almost entirely undocumented: # http://refspecs.freestandards.org/LSB_3.1.0/LSB-Core-generic/LSB-Core-generic/pkgformat.html # has a spec for the SVR4 portable format (-Hnewc), which is # understood by newer Linux kernels as an initrd format. # Magic hdr_array.append("070701") hdr_array.append("%08X" % f.ino) hdr_array.append("%08X" % f.mode) hdr_array.append("%08X" % f.uid) hdr_array.append("%08X" % f.gid) hdr_array.append("%08X" % f.nlink) hdr_array.append("%08X" % f.mtime) hdr_array.append("%08X" % data_size) hdr_array.append("%08X" % os.major(f.dev)) hdr_array.append("%08X" % os.minor(f.dev)) hdr_array.append("%08X" % os.major(f.rdev)) hdr_array.append("%08X" % os.minor(f.rdev)) hdr_array.append("%08X" % name_size) # And a zero checksum (?!) hdr_array.append("%08x" % 0) if (logProgress): print "> Packing %s .. " % (f.name) f_out.write("".join(hdr_array)) f_out.write(f.name) f_out.write("\0") # Now we need to pad to a 4-byte boundary. pos = f_out.tell() if (pos % 4): for i in range(0, 4 - (pos % 4)): f_out.write("\0") if (file_data is not None): f_out.write(file_data) # .. and pad again. pos = f_out.tell() if (pos % 4): for i in range(0, 4 - (pos % 4)): f_out.write("\0") # Make very sure we throw this data away after we're done # using it - it's typically several megabytes. file_data = None f_out.close()
def checkIfDevice(path, diskStats): deviceID = stat(path).st_dev return (major(deviceID), minor(deviceID)) in diskStats
""" args = parser.parse_args() args.pcts = args.pcts.split(',') args.pcts.sort(key=lambda x: float(x)) try: major = int(args.dev.split(':')[0]) minor = int(args.dev.split(':')[1]) except Exception: if '/' in args.dev: stat = os.stat(args.dev) else: stat = os.stat('/dev/' + args.dev) major = os.major(stat.st_rdev) minor = os.minor(stat.st_rdev) if args.which == 'from-rq-alloc': start_time_field = 'alloc_time_ns' elif args.which == 'after-rq-alloc': start_time_field = 'start_time_ns' elif args.which == 'on-device': start_time_field = 'io_start_time_ns' else: print("Invalid latency measurement {}".format(args.which)) exit() bpf_source = bpf_source.replace('__START_TIME_FIELD__', start_time_field) bpf_source = bpf_source.replace('__MAJOR__', str(major)) bpf_source = bpf_source.replace('__MINOR__', str(minor))
def dev_to_nums(path): dev = os.lstat(path).st_rdev return (os.major(dev), os.minor(dev))
# @email: [email protected] # @github: https://github.com/hooj0 # @create date: 2018-03-26 22:22:27 # @copyright by hoojo@2018 # @changelog Added python3 `os file -> major` example import os ''' 概述 os.major() 方法用于从原始的设备号中提取设备major号码 (使用stat中的st_dev或者st_rdev field)。 语法 major()方法语法格式如下: os.major(device) 参数 device -- 原始的设备号中提取设备major号码 (使用stat中的st_dev或者st_rdev field)。 返回值 返回设备major号码 ''' info = os.lstat('/tmp/foo.txt') print('stat info: %s' % info) print('major 设备号: %s' % os.major(info.st_dev)) print('minor 设备号: %s' % os.minor(info.st_dev))
def decode_dev(field, value): if _marked_as_odev(field): return "%d:%d" % (os.major(value), os.minor(value)) else: return "%d:%d" % (value >> kern_minorbits, value & ((1 << kern_minorbits) - 1))
def detailed_str(meta, fields = None): # FIXME: should optional fields be omitted, or empty i.e. "rdev: # 0", "link-target:", etc. if not fields: fields = all_fields result = [] if 'path' in fields: path = meta.path or '' result.append('path: ' + path) if 'mode' in fields: result.append('mode: %s (%s)' % (oct(meta.mode), xstat.mode_str(meta.mode))) if 'link-target' in fields and stat.S_ISLNK(meta.mode): result.append('link-target: ' + meta.symlink_target) if 'rdev' in fields: if meta.rdev: result.append('rdev: %d,%d' % (os.major(meta.rdev), os.minor(meta.rdev))) else: result.append('rdev: 0') if 'size' in fields and meta.size: result.append('size: ' + str(meta.size)) if 'uid' in fields: result.append('uid: ' + str(meta.uid)) if 'gid' in fields: result.append('gid: ' + str(meta.gid)) if 'user' in fields: result.append('user: '******'group' in fields: result.append('group: ' + meta.group) if 'atime' in fields: # If we don't have xstat.lutime, that means we have to use # utime(), and utime() has no way to set the mtime/atime of a # symlink. Thus, the mtime/atime of a symlink is meaningless, # so let's not report it. (That way scripts comparing # before/after won't trigger.) if xstat.lutime or not stat.S_ISLNK(meta.mode): result.append('atime: ' + xstat.fstime_to_sec_str(meta.atime)) else: result.append('atime: 0') if 'mtime' in fields: if xstat.lutime or not stat.S_ISLNK(meta.mode): result.append('mtime: ' + xstat.fstime_to_sec_str(meta.mtime)) else: result.append('mtime: 0') if 'ctime' in fields: result.append('ctime: ' + xstat.fstime_to_sec_str(meta.ctime)) if 'linux-attr' in fields and meta.linux_attr: result.append('linux-attr: ' + hex(meta.linux_attr)) if 'linux-xattr' in fields and meta.linux_xattr: for name, value in meta.linux_xattr: result.append('linux-xattr: %s -> %s' % (name, repr(value))) if 'posix1e-acl' in fields and meta.posix1e_acl and posix1e: flags = posix1e.TEXT_ABBREVIATE if stat.S_ISDIR(meta.mode): acl = meta.posix1e_acl[0] default_acl = meta.posix1e_acl[2] result.append(acl.to_any_text('posix1e-acl: ', '\n', flags)) result.append(acl.to_any_text('posix1e-acl-default: ', '\n', flags)) else: acl = meta.posix1e_acl[0] result.append(acl.to_any_text('posix1e-acl: ', '\n', flags)) return '\n'.join(result)
def copy(src, dst, link=1, touch=0): """Copy a file, a directory or a link. When link is 1 (default), regular files will be hardlinked, as opposed to being copied. When touch is 1, only the file, but not the contents are copied (useful for logfiles). """ global bytes, lins, drs, syms, touchs, copys, devs if os.path.islink(src): # if it is a symlink, always copy it # (no sense in trying to hardlink a symlink) if DRYRUN: print 'ln -s %s %s' % (os.readlink(src), dst) else: os.symlink(os.readlink(src), dst) copyown(src, dst) syms += 1 elif os.path.isdir(src): # directories are also copied always if DRYRUN: s = os.stat(src) print 'mkdir %s; chmod 4%s %s' % (dst, oct(stat.S_IMODE( s.st_mode)), dst) copyown(src, dst) copytime(src, dst) else: os.mkdir(dst) copyown(src, dst) shutil.copystat(src, dst) drs += 1 elif os.path.isfile(src): # this a file, not a dir or symlink if touch: # means create a new file and copy perms if DRYRUN: print 'touch %s' % dst else: open(dst, 'w') copyown(src, dst) shutil.copystat(src, dst) touchs += 1 elif link: # means we should hardlink if DRYRUN: print 'ln %s %s' % (src, dst) else: if vsutil.is_file_immutable_unlink(src): os.link(src, dst) lins += 1 else: # since it is not iunlink, copy it anyway print 'Warning: not hardlinking %s because it is not iunlink' % src shutil.copy(src, dst) copyown(src, dst) shutil.copystat(src, dst) bytes += os.path.getsize(dst) copys += 1 else: # else copy it if DRYRUN: print 'cp -a %s %s' % (src, dst) else: shutil.copy(src, dst) copyown(src, dst) shutil.copystat(src, dst) bytes += os.path.getsize(dst) copys += 1 else: # this is a special device? s = os.stat(src) if stat.S_ISBLK(s.st_mode) or stat.S_ISCHR(s.st_mode) \ or stat.S_ISFIFO(s.st_mode): if DRYRUN: print "mknod %s %o %02x:%02x" % ( dst, s.st_mode, os.major(s.st_rdev), os.minor(s.st_rdev)) else: os.mknod(dst, s.st_mode, os.makedev(os.major(s.st_rdev), os.minor(s.st_rdev))) copyown(src, dst) shutil.copystat(src, dst) devs += 1
def get_mount_paths(devnode): '''given a device node, return a list of all active mountpoints.''' devno = os.stat(devnode).st_rdev majmin = "%d:%d" % (os.major(devno), os.minor(devno)) mountinfo = (line.split() for line in open("/proc/self/mountinfo")) return [info[4] for info in mountinfo if info[2] == majmin]
def get_file_list(cwd, count_directories, unarchive_path=None): cwd = Path(cwd) if unarchive_path is None: archive_file = None temp_dir = None else: (cwd, archive_file, temp_dir) = unarchive_path(cwd) uid_cache = Cache(lambda x: pwd.getpwuid(x).pw_name) gid_cache = Cache(lambda x: grp.getgrgid(x).gr_name) files = [] for file in cwd.iterdir(): if archive_file: shown_file = Path(str(file).replace(str(temp_dir), str(archive_file), 1)) else: shown_file = file obj = { 'file': shown_file, 'key': natsort_key(file.name), 'extension': natsort_key(tar_suffix(file)), } try: lstat = file.lstat() except FileNotFoundError: continue obj['lstat'] = lstat if stat.S_ISLNK(lstat.st_mode): try: st = file.stat() if stat.S_ISDIR(st.st_mode): obj['label'] = f'~{file.name}' obj['palette'] = 'dir_symlink' else: obj['label'] = f'@{file.name}' obj['palette'] = 'symlink' except (FileNotFoundError, PermissionError): st = lstat obj['label'] = f'!{file.name}' obj['palette'] = 'stalelink' else: st = lstat if stat.S_ISDIR(st.st_mode): obj['label'] = f'/{file.name}' obj['palette'] = 'directory' elif stat.S_ISCHR(lstat.st_mode): obj['label'] = f'-{file.name}' obj['palette'] = 'device' elif stat.S_ISBLK(lstat.st_mode): obj['label'] = f'+{file.name}' obj['palette'] = 'device' elif stat.S_ISFIFO(lstat.st_mode): obj['label'] = f'|{file.name}' obj['palette'] = 'special' elif stat.S_ISSOCK(lstat.st_mode): obj['label'] = f'={file.name}' obj['palette'] = 'special' elif lstat.st_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH): obj['label'] = f'*{file.name}' obj['palette'] = 'executable' else: obj['label'] = f' {file.name}' if obj['extension'] in ARCHIVE_EXTENSIONS: obj['palette'] = 'archive' else: obj['palette'] = 'panel' obj['stat'] = st if stat.S_ISDIR(st.st_mode): try: if count_directories: length = len(list(file.iterdir())) obj['length'] = (length,) obj['size'] = str(length) else: obj['length'] = (0,) obj['size'] = 'DIR' except (FileNotFoundError, PermissionError): obj['length'] = (-1,) obj['size'] = '?' elif stat.S_ISCHR(lstat.st_mode) or stat.S_ISBLK(lstat.st_mode): major = os.major(lstat.st_rdev) minor = os.minor(lstat.st_rdev) obj['length'] = (major, minor) obj['size'] = f'{major},{minor}' else: length = lstat.st_size obj['length'] = (length,) obj['size'] = human_readable_size(length) try: uid = uid_cache[lstat.st_uid] except KeyError: uid = str(lstat.st_uid) try: gid = gid_cache[lstat.st_gid] except KeyError: gid = str(lstat.st_gid) obj['details'] = f'{stat.filemode(lstat.st_mode)} {lstat.st_nlink} {uid} {gid}' if stat.S_ISLNK(lstat.st_mode): try: link_target = os.readlink(file) obj['details'] = f'{obj["details"]} -> {link_target}' if Path(link_target).is_absolute(): obj['link_target'] = Path(os.path.normpath(link_target)) else: obj['link_target'] = Path(os.path.normpath(shown_file.parent / link_target)) except (FileNotFoundError, PermissionError): obj['details'] = f'{obj["details"]} -> ?' obj['link_target'] = shown_file else: obj['details'] = f'{obj["details"]} {file.name}' files.append(obj) return files
# Following is the syntax for makedev() method − # # os.makedev(major, minor) # # Parameters # # major − This is Major device number. # # minor − This is Minor device number. # # Return Value # # This method returns the device number. # Example import os, sys path = "/var/www/html/foo.txt" # Now get the touple info = os.lstat(path) # Get major and minor device number major_dnum = os.major(info.st_dev) minor_dnum = os.minor(info.st_dev) print("Major Device Number :", major_dnum) print("Minor Device Number :", minor_dnum) # Make a device number dev_num = os.makedev(major_dnum, minor_dnum) print("Device Number :", dev_num)
class ExtStorageDevice(base.BlockDev): """A block device provided by an ExtStorage Provider. This class implements the External Storage Interface, which means handling of the externally provided block devices. """ def __init__(self, unique_id, children, size, params, dyn_params, *args): """Attaches to an extstorage block device. """ super(ExtStorageDevice, self).__init__(unique_id, children, size, params, dyn_params, *args) (self.name, self.uuid) = args if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2: raise ValueError("Invalid configuration data %s" % str(unique_id)) self.driver, self.vol_name = unique_id self.ext_params = params self.major = self.minor = None self.uris = [] self.Attach() @classmethod def Create(cls, unique_id, children, size, spindles, params, excl_stor, dyn_params, *args): """Create a new extstorage device. Provision a new volume using an extstorage provider, which will then be mapped to a block device. """ (name, uuid) = args if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2: raise errors.ProgrammerError("Invalid configuration data %s" % str(unique_id)) if excl_stor: raise errors.ProgrammerError("extstorage device requested with" " exclusive_storage") # Call the External Storage's create script, # to provision a new Volume inside the External Storage _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id, params, size=size, name=name, uuid=uuid) return ExtStorageDevice(unique_id, children, size, params, dyn_params, *args) def Remove(self): """Remove the extstorage device. """ if not self.minor and not self.Attach(): # The extstorage device doesn't exist. return # First shutdown the device (remove mappings). self.Shutdown() # Call the External Storage's remove script, # to remove the Volume from the External Storage _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id, self.ext_params, name=self.name, uuid=self.uuid) def Rename(self, new_id): """Rename this device. """ pass def Attach(self): """Attach to an existing extstorage device. This method maps the extstorage volume that matches our name with a corresponding block device and then attaches to this device. """ self.attached = False # Call the External Storage's attach script, # to attach an existing Volume to a block device under /dev result = _ExtStorageAction(constants.ES_ACTION_ATTACH, self.unique_id, self.ext_params, name=self.name, uuid=self.uuid) # Attach script returns the block device path and optionally # the URIs to be used for userspace access (one URI for # each hypervisor supported). # If the provider doesn't support userspace access, then # the 'uris' variable will be an empty list. result = result.split("\n") self.dev_path = result[0] self.uris = result[1:] # Verify that dev_path exists and is a block device try: st = os.stat(self.dev_path) except OSError, err: logging.error("Error stat()'ing %s: %s", self.dev_path, str(err)) return False if not stat.S_ISBLK(st.st_mode): logging.error("%s is not a block device", self.dev_path) return False self.major = os.major(st.st_rdev) self.minor = utils.osminor(st.st_rdev) self.attached = True return True
def enumerateBlockDevices(self): print "[Harddisk] Enumerating block devices..." self.partitions.append( Partition(mountpoint="/", description=_("Internal flash"))) # Add the root device. # print "[Harddisk] DEBUG: Partition(mountpoint=%s, description=%s)" % ("/", _("Internal flash")) try: rootDev = os.stat("/").st_dev rootMajor = os.major(rootDev) rootMinor = os.minor(rootDev) except (IOError, OSError): rootMajor = None rootMinor = None # print "[Harddisk] DEBUG: rootMajor='%s', rootMinor='%s'" % (rootMajor, rootMinor) for device in sorted(os.listdir("/sys/block")): try: physicalDevice = os.path.realpath( os.path.join("/sys/block", device, "device")) except (IOError, OSError) as err: print "[Harddisk] Error: Couldn't determine physicalDevice for device '%s':" % device, err continue devicePath = os.path.join("/sys/block/", device) data = readFile(os.path.join( devicePath, "dev")) # This is the device's major and minor device numbers. if data is None: print "[Harddisk] Error: Device '%s' (%s) does not appear to have valid device numbers!" % ( device, physicalDevice) continue devMajor = int(data.split(":")[0]) if devMajor in blacklistedDisks: # print "[Harddisk] DEBUG: Major device number '%s' for device '%s' (%s) is blacklisted." % (devMajor, device, physicalDevice) continue if devMajor == 179 and not SystemInfo["HasSDnomount"]: # print "[Harddisk] DEBUG: Major device number '%s' for device '%s' (%s) doesn't have 'HasSDnomount' set." % (devMajor, device, physicalDevice) continue if devMajor == 179 and devMajor == rootMajor and not SystemInfo[ "HasSDnomount"][0]: # print "[Harddisk] DEBUG: Major device number '%s' for device '%s' (%s) is the root disk." % (devMajor, device, physicalDevice) continue if SystemInfo["HasSDnomount"] and device.startswith( "%s" % (SystemInfo["HasSDnomount"][1]) ) and SystemInfo["HasSDnomount"][0]: # print "[Harddisk] DEBUG: Major device number '%s' for device '%s' (%s) starts with 'mmcblk0' and has 'HasSDnomount' set." % (devMajor, device, physicalDevice) continue description = self.getUserfriendlyDeviceName( device, physicalDevice) isCdrom = devMajor in opticalDisks or device.startswith("sr") if isCdrom: self.cd = devicePath self.partitions.append( Partition(mountpoint=self.getMountpoint(device), description=description, force_mounted=True, device=device)) # print "[Harddisk] DEBUG: Partition(mountpoint=%s, description=%s, force_mounted=True, device=%s)" % (self.getMountpoint(device), description, device) print "[Harddisk] Found optical disk '%s' (%s)." % ( device, physicalDevice) data = readFile(os.path.join(devicePath, "removable")) removable = False if data is None else bool(int(data)) # if removable: # print "[Harddisk] DEBUG: Device '%s' (%s) has removable media." % (device, physicalDevice) try: open(os.path.join("/dev", device), "r").close() mediumFound = True # Check for medium. except (IOError, OSError) as err: if err.errno in ( 123, 159 ): # ENOMEDIUM - No medium found. (123=Common Linux, 159=MIPS Linux) mediumFound = False else: print "[Harddisk] Error: Device '%s' (%s) media availability test failed:" % ( device, physicalDevice), err continue # if mediumFound: # print "[Harddisk] DEBUG: Device '%s' (%s) has media." % (device, physicalDevice) # print "[Harddisk] DEBUG: device='%s', physicalDevice='%s', devMajor='%s', description='%s'" % (device, physicalDevice, devMajor, description) if not isCdrom and os.path.exists( devicePath): # Add HDD check for partitions. partitions = [ partition for partition in sorted(os.listdir(devicePath)) if partition.startswith(device) ] if SystemInfo["HasHiSi"] and devMajor == 8 and len( partitions) >= 4: partitions = partitions[4:] print "[Harddisk] len partitions = %s, device = %s" % ( len(partitions), device) if len(partitions) != 0: print "[Harddisk] Found storage device '%s' (Removable=%s) NoPartitions = %s." % ( device, removable, len(partitions)) self.hdd.append(Harddisk(device, removable)) SystemInfo["Harddisk"] = True # self.partitions.append(Partition(mountpoint=self.getMountpoint(device), description=description, force_mounted, device=device)) # print "[Harddisk] DEBUG: Partition(mountpoint=%s, description=%s, force_mounted=True, device=%s)" % (self.getMountpoint(device), description, device) for partition in partitions: description = self.getUserfriendlyDeviceName( partition, physicalDevice) print "[Harddisk] Found partition '%s', description='%s', device='%s'." % ( partition, description, physicalDevice) part = Partition( mountpoint=self.getMountpoint(partition), description=description, force_mounted=True, device=partition) self.partitions.append(part) # print "[Harddisk] DEBUG: Partition(mountpoint=%s, description=%s, force_mounted=True, device=%s)" % (self.getMountpoint(partition), description, partition) self.on_partition_list_change("add", part) # print "[Harddisk] DEBUG: on_partition_list_change('add', Partition(mountpoint=%s, description=%s, force_mounted=True, device=%s))" % (self.getMountpoint(partition), description, partition) self.hdd.sort() print "[Harddisk] Enumerating block devices complete."
def metrics(step, approot): """Collect node and container metrics.""" app_env = appmgr.AppEnvironment(root=approot) app_metrics_dir = os.path.join(app_env.metrics_dir, 'apps') core_metrics_dir = os.path.join(app_env.metrics_dir, 'core') fs.mkdir_safe(app_metrics_dir) fs.mkdir_safe(core_metrics_dir) interval = int(step) * 2 rrdclient = rrdutils.RRDClient('/tmp/treadmill.rrd') # Initiate the list for monitored applications monitored_apps = set( os.path.basename(metric_name)[:-len('.rrd')] for metric_name in glob.glob('%s/*' % app_metrics_dir) if metric_name.endswith('.rrd')) sys_svcs = _core_svcs(approot) sys_svcs_no_metrics = set() sys_maj_min = '%s:0' % os.major(os.stat(approot).st_dev) logging.info('Device maj:min = %s for approot: %s', sys_maj_min, approot) core_rrds = [ 'treadmill.apps.rrd', 'treadmill.core.rrd', 'treadmill.system.rrd' ] for core_rrd in core_rrds: rrdfile = os.path.join(core_metrics_dir, core_rrd) if not os.path.exists(rrdfile): rrdclient.create(rrdfile, step, interval) while True: rrdclient.update( os.path.join(core_metrics_dir, 'treadmill.apps.rrd'), rrdutils.app_metrics('treadmill/apps', sys_maj_min)) rrdclient.update( os.path.join(core_metrics_dir, 'treadmill.core.rrd'), rrdutils.app_metrics('treadmill/core', sys_maj_min)) rrdclient.update( os.path.join(core_metrics_dir, 'treadmill.system.rrd'), rrdutils.app_metrics('treadmill/system', sys_maj_min)) for svc in sys_svcs: if svc in sys_svcs_no_metrics: continue rrdfile = os.path.join(core_metrics_dir, '{svc}.rrd'.format(svc=svc)) if not os.path.exists(rrdfile): rrdclient.create(rrdfile, step, interval) svc_cgrp = os.path.join('treadmill', 'core', svc) svc_metrics = rrdutils.app_metrics(svc_cgrp, sys_maj_min) rrdclient.update(rrdfile, svc_metrics) seen_apps = set() for app_dir in glob.glob('%s/*' % app_env.apps_dir): if not os.path.isdir(app_dir): continue app_unique_name = os.path.basename(app_dir) seen_apps.add(app_unique_name) try: localdisk = app_env.svc_localdisk.get(app_unique_name) blkio_major_minor = '{major}:{minor}'.format( major=localdisk['dev_major'], minor=localdisk['dev_minor'], ) except (exc.TreadmillError, IOError, OSError): blkio_major_minor = None rrd_file = os.path.join( app_metrics_dir, '{app}.rrd'.format(app=app_unique_name)) if not os.path.exists(rrd_file): rrdclient.create(rrd_file, step, interval) app_cgrp = os.path.join('treadmill', 'apps', app_unique_name) app_metrics = rrdutils.app_metrics(app_cgrp, blkio_major_minor) rrdclient.update(rrd_file, app_metrics) for app_unique_name in monitored_apps - seen_apps: # Removed metrics for apps that are not present anymore rrd_file = os.path.join( app_metrics_dir, '{app}.rrd'.format(app=app_unique_name)) logging.info('removing %r', rrd_file) rrdclient.forget(rrd_file) os.unlink(rrd_file) monitored_apps = seen_apps time.sleep(step) # Gracefull shutdown. logging.info('service shutdown.')