def get_free_space(dir=None): if dir is None: dir = os.getenv('PANDIR') _, _, free_space = shutil.disk_usage(dir) free_space = (free_space * u.byte).to(u.gigabyte) return free_space
def install_progress(): """ Function to calculate progress percentage of install. :return: """ from . import progressbar try: usb_details = details(config.usb_disk) except PartitionNotMounted as e: log(str(e)) return config.usb_mount = usb_details['mount_point'] usb_size_used = usb_details['size_used'] thrd = threading.Thread(target=install_distro, name="install_progress") # thrd.daemon() # install_size = usb_size_used / 1024 # install_size = iso_size(config.image_path) / 1024 final_size = (usb_size_used + iso_size(config.image_path)) + config.persistence thrd.start() pbar = progressbar.ProgressBar(maxval=100).start() # bar = progressbar.ProgressBar(redirect_stdout=True) while thrd.is_alive(): current_size = shutil.disk_usage(usb_details['mount_point'])[1] percentage = int((current_size / final_size) * 100) if percentage > 100: percentage = 100 config.percentage = percentage pbar.update(percentage) time.sleep(0.1)
def test_disk_usage(self): usage = shutil.disk_usage(os.getcwd()) self.assertGreater(usage.total, 0) self.assertGreater(usage.used, 0) self.assertGreaterEqual(usage.free, 0) self.assertGreaterEqual(usage.total, usage.used) self.assertGreater(usage.total, usage.free)
def copyfiles(destid, dirid): dest = tabledest.getdest(destid) directory = tabledir.getdir(dirid) srcdirpath = str(PurePath(directory['Location']).joinpath(directory['DirName'])) destdirpath = str(PurePath(dest['DiskPath']).joinpath(directory['DirName'])) makedirs(destdirpath) while True: if not (os.path.exists(srcdirpath) and os.path.exists(destdirpath)): raise OSError('No such direcory') freeusage = disk_usage(destdirpath).free - pow(2,30) file = tablefile.getfilefrom(dirid, database.CopyState.idle, freeusage) if file: srcfilepath = getfilepath(file) destfilepath = str(PurePath(destdirpath).joinpath(file['FileName'] + file['ExtName'])) try: copyfile(srcfilepath, destfilepath) except OSError as why: print(why) tablefile.updatecopystate(file['FileID'], destid, database.CopyState.failed, str(why)) os.remove(destfilepath) else: tablefile.updatecopystate(file['FileID'], destid, database.CopyState.finished) log(destid, directory['DirName'], file['FileName'], file['ExtName'], srcfilepath) else: tabledir.updatecopystate(dirid, database.CopyState.finished) break;
def disk_usage_info(wd, keep_free: int, warn=True, quiet=0): """Display information about disk usage and maybe a warning :param wd: Working directory :param keep_free: Disk space in bytes to keep free :param warn: Show warning when keep_free seems too low :param quiet: Show disk usage information """ # We create a directory here to prevent FileNotFoundError # if someone specified --free without --download they are dumb makedirs(wd,exist_ok=True) free = disk_usage(wd).free if quiet < 1: print('free space: {:} MiB, minimum limit: {:} MiB'.format(free//1024**2, keep_free//1024**2), file=stderr) if warn and free < keep_free: msg = '\nWarning:\n' \ 'The disk usage currently exceeds the limit by {} MiB.\n' \ 'If the limit was set too high, many or ALL videos may get deleted.\n' \ 'Press Enter to proceed or Ctrl+D to abort... ' print(msg.format((keep_free-free) // 1024**2), file=stderr) try: input() except EOFError: exit(1)
def diagnostic(request): # Load version number with open('./package.json') as f: package = json.load(f) version = package['version'] # Disk space total_disk_space, used_disk_space, free_disk_space = shutil.disk_usage('./') template_args = { 'title': 'Diagnostic', 'version': version, 'total_disk_space': total_disk_space, 'used_disk_space': used_disk_space, 'free_disk_space': free_disk_space } # Memory (Linux only) memory_stats = get_memory_stats() if 'free' in memory_stats: template_args['free_memory'] = memory_stats['free'] template_args['used_memory'] = memory_stats['used'] template_args['total_memory'] = memory_stats['total'] return render(request, self.template_path("diagnostic.html"), template_args)
def freespace(self): """Get the amount of free space available. """ self.log.info("freespace") freebytes = shutil.disk_usage(self.s3_dir).free self.log.info("returning:" + str(freebytes)) return freebytes
def diskfree_sigterm(disk: Union[str, Path], pid: list, freethres: int, verbose: bool = False): def _stop(pid: int): if verbose: print('sending', SIG, 'to', pid) os.kill(pid, SIG) disk = Path(disk).expanduser().resolve().anchor du = shutil.disk_usage(disk) freerat = du.free / du.total if freerat < freethres: for p in pid: if isinstance(p, str): try: pstr = subprocess.check_output( ['pgrep', '-f', p], timeout=10, universal_newlines=True) except Exception: logging.error(f'did not find PID for {p}') for s in pstr.split(): _stop(int(s)) _stop(p) if verbose: print(f'{disk} free percentage {freerat*100:.1f}')
def get_disk_free(path): ''' @type path: str @rtype tuple ''' return (path, floor(float(shutil.disk_usage(path).free) / MB))
def create_download_instructions(self): self.instlObj.batch_accum.set_current_section('sync') already_synced_num_files, already_synced_num_bytes = self.instlObj.info_map_table.get_not_to_download_num_files_and_size() to_sync_num_files, bytes_to_sync = self.instlObj.info_map_table.get_to_download_num_files_and_size() var_stack.add_const_config_variable("__NUM_FILES_TO_DOWNLOAD__", "create_download_instructions", to_sync_num_files) var_stack.add_const_config_variable("__NUM_BYTES_TO_DOWNLOAD__", "create_download_instructions", bytes_to_sync) # notify user how many files and bytes to sync self.instlObj.progress("{} of {} files to sync".format(to_sync_num_files, to_sync_num_files+already_synced_num_files)) self.instlObj.progress("{} of {} bytes to sync".format(bytes_to_sync, bytes_to_sync+already_synced_num_bytes)) if already_synced_num_files > 0: self.instlObj.batch_accum += self.instlObj.platform_helper.progress("{} files already in cache".format(already_synced_num_files), math.ceil(already_synced_num_files/4)) if to_sync_num_files == 0: return to_sync_num_files file_list = self.instlObj.info_map_table.get_download_items_sync_info() if False: # need to rethink how to calc mount point sizes efficiently mount_points_to_size = total_sizes_by_mount_point(file_list) for m_p in sorted(mount_points_to_size): free_bytes = shutil.disk_usage(m_p).free print(mount_points_to_size[m_p], "bytes to sync to drive", "".join(("'", m_p, "'")), free_bytes-mount_points_to_size[m_p], "bytes will remain") self.create_sync_folders() self.create_sync_urls(file_list) self.create_curl_download_instructions() self.instlObj.create_sync_folder_manifest_command("after-sync", back_ground=True) self.create_check_checksum_instructions(to_sync_num_files) return to_sync_num_files
def __init__(self, path): self.path = os.path.normpath(path) self.free = disk_usage(path)[2] self.folders = {} self.big = {} self.targets = self.settings()['folders'] self.set_folders()
def test_disk_usage(self): usage = psutil.disk_usage(os.getcwd()) assert usage.total > 0, usage assert usage.used > 0, usage assert usage.free > 0, usage assert usage.total > usage.used, usage assert usage.total > usage.free, usage assert 0 <= usage.percent <= 100, usage.percent if hasattr(shutil, 'disk_usage'): # py >= 3.3, see: http://bugs.python.org/issue12442 shutil_usage = shutil.disk_usage(os.getcwd()) tolerance = 5 * 1024 * 1024 # 5MB self.assertEqual(usage.total, shutil_usage.total) self.assertAlmostEqual(usage.free, shutil_usage.free, delta=tolerance) self.assertAlmostEqual(usage.used, shutil_usage.used, delta=tolerance) # if path does not exist OSError ENOENT is expected across # all platforms fname = tempfile.mktemp() try: psutil.disk_usage(fname) except OSError as err: if err.args[0] != errno.ENOENT: raise else: self.fail("OSError not raised")
def test_disk_usage(self): usage = psutil.disk_usage(os.getcwd()) self.assertEqual(usage._fields, ('total', 'used', 'free', 'percent')) assert usage.total > 0, usage assert usage.used > 0, usage assert usage.free > 0, usage assert usage.total > usage.used, usage assert usage.total > usage.free, usage assert 0 <= usage.percent <= 100, usage.percent if hasattr(shutil, 'disk_usage'): # py >= 3.3, see: http://bugs.python.org/issue12442 shutil_usage = shutil.disk_usage(os.getcwd()) tolerance = 5 * 1024 * 1024 # 5MB self.assertEqual(usage.total, shutil_usage.total) self.assertAlmostEqual(usage.free, shutil_usage.free, delta=tolerance) self.assertAlmostEqual(usage.used, shutil_usage.used, delta=tolerance) # if path does not exist OSError ENOENT is expected across # all platforms fname = tempfile.mktemp() with self.assertRaises(OSError) as exc: psutil.disk_usage(fname) self.assertEqual(exc.exception.errno, errno.ENOENT)
def details_udisks2(usb_disk_part): """ Get details of USB disk detail. usb_disk_part: It is the partition of an USB removable disk. """ import dbus bus = dbus.SystemBus() mount_point = '' uuid = '' file_system = '' vendor = '' model = '' label = '' devtype = "disk" bd = bus.get_object('org.freedesktop.UDisks2', '/org/freedesktop/UDisks2/block_devices%s'%usb_disk_part[4:]) device = bd.Get('org.freedesktop.UDisks2.Block', 'Device', dbus_interface='org.freedesktop.DBus.Properties') device = bytearray(device).replace(b'\x00', b'').decode('utf-8') if device[-1].isdigit() is True: uuid = bd.Get('org.freedesktop.UDisks2.Block', 'IdUUID', dbus_interface='org.freedesktop.DBus.Properties') file_system = bd.Get('org.freedesktop.UDisks2.Block', 'IdType', dbus_interface='org.freedesktop.DBus.Properties') mount_point = bd.Get('org.freedesktop.UDisks2.Filesystem', 'MountPoints', dbus_interface='org.freedesktop.DBus.Properties') devtype = 'partition' else: devtype = 'disk' file_system = 'No_File_System' if mount_point: # mount_point = str(bytearray(mount_point[0]).decode('utf-8').replace(b'\x00', b'')) mount_point = bytearray(mount_point[0]).replace(b'\x00', b'').decode('utf-8') else: try: mount_point = UDISKS.mount(usb_disk_part) config.add_remounted(usb_disk_part) except: mount_point = "No_Mount" try: label = bd.Get('org.freedesktop.UDisks2.Block', 'IdLabel', dbus_interface='org.freedesktop.DBus.Properties') except: label = "No_Label" usb_drive_id = (bd.Get('org.freedesktop.UDisks2.Block', 'Drive', dbus_interface='org.freedesktop.DBus.Properties')) bd1 = bus.get_object('org.freedesktop.UDisks2', usb_drive_id) try: vendor = bd1.Get('org.freedesktop.UDisks2.Drive', 'Vendor', dbus_interface='org.freedesktop.DBus.Properties') except: vendor = str('No_Vendor') try: model = bd1.Get('org.freedesktop.UDisks2.Drive', 'Model', dbus_interface='org.freedesktop.DBus.Properties') except: model = str('No_Model') if not mount_point == "No_Mount": size_total, size_used, size_free = \ shutil.disk_usage(mount_point)[:3] else: raise PartitionNotMounted(usb_disk_part) return {'uuid': uuid, 'file_system': file_system, 'label': label, 'mount_point': mount_point, 'size_total': size_total, 'size_used': size_used, 'size_free': size_free, 'vendor': vendor, 'model': model, 'devtype': devtype}
def get_info_from_folder(name, folder_path): info = {} folder_usage = shutil.disk_usage(folder_path) info[name + 'folder'] = folder_path info[name + 'disktotal'] = folder_usage[0] info[name + 'diskused'] = folder_usage[1] info[name + 'used'] = get_folder_size(folder_path) return info
def DiskSpace(): DiskSpace = shutil.disk_usage('/') tkinter.messagebox.showinfo('Disk space', 'Free: %f Gbytes' % (DiskSpace.free / 1e9) + '\n' + 'Used: %f Gbytes' % (DiskSpace.used / 1e9) + '\n' + 'Total: %f Gbytes' % (DiskSpace.total / 1e9)) Logger(Trace=MyTrace(GFI(CF())), Message='DiskSpace')
def __init__(self, top, min_size): self.top = top self.min_size = min_size # disk usage in bytes (3.3+) (self.total, self.used, z) = shutil.disk_usage(top) manager = mp.Manager() self.files = manager.list() self.worker_size = manager.Value('i',0)
def test_disk_usage(self): self.skip_real_fs() file_path = self.make_path('foo', 'bar') self.fs.create_file(file_path, st_size=400) # root = self.os.path.splitdrive(file_path)[0] + self.fs.path_separator disk_usage = shutil.disk_usage(file_path) self.assertEqual(1000, disk_usage.total) self.assertEqual(400, disk_usage.used) self.assertEqual(600, disk_usage.free) self.assertEqual((1000, 400, 600), disk_usage) mount_point = self.create_mount_point() dir_path = self.os.path.join(mount_point, 'foo') file_path = self.os.path.join(dir_path, 'bar') self.fs.create_file(file_path, st_size=400) disk_usage = shutil.disk_usage(dir_path) self.assertEqual((500, 400, 100), disk_usage)
def get_free_space_bytes(folder): """ Return folder/drive free space (in bytes) """ try: total, _, free = shutil.disk_usage(folder) return total, free except: return 0, 0
def getdisk_usage(self): """only works on Python 3. returns 0 for Python 2""" if disk_usage: # noinspection PyCallingNonCallable spaces = disk_usage(self.serverpath) frsp = spaces.free return frsp return int(0)
def _read(self, path, fields="all", return_header=False): tmpdira = config.conf["main"]["tmpdir"] tmpdirb = config.conf["main"]["tmpdirb"] tmpdir = (tmpdira if shutil.disk_usage(tmpdira).free > self.minspace else tmpdirb) with tempfile.NamedTemporaryFile(mode="wb", dir=tmpdir, delete=True) as tmpfile: with gzip.open(str(path), "rb") as gzfile: logging.debug("Decompressing {!s}".format(path)) gzcont = gzfile.read() logging.debug("Writing decompressed file to {!s}".format(tmpfile.name)) tmpfile.write(gzcont) del gzcont # All the hard work is in coda logging.debug("Reading {!s}".format(tmpfile.name)) cfp = coda.open(tmpfile.name) c = coda.fetch(cfp) logging.debug("Sorting info...") n_scanlines = c.MPHR.TOTAL_MDR start = datetime.datetime(*coda.time_double_to_parts_utc(c.MPHR.SENSING_START)) has_mdr = numpy.array([hasattr(m, 'MDR') for m in c.MDR], dtype=numpy.bool) bad = numpy.array([ (m.MDR.DEGRADED_PROC_MDR|m.MDR.DEGRADED_INST_MDR) if hasattr(m, 'MDR') else True for m in c.MDR], dtype=numpy.bool) dlt = numpy.concatenate( [m.MDR.OnboardUTC[:, numpy.newaxis] for m in c.MDR if hasattr(m, 'MDR')], 1) - c.MPHR.SENSING_START M = numpy.ma.zeros( dtype=self._dtype, shape=(n_scanlines, 30)) M["time"][has_mdr] = numpy.datetime64(start, "ms") + numpy.array(dlt*1e3, "m8[ms]").T specall = self.__obtain_from_mdr(c, "GS1cSpect") M["spectral_radiance"][has_mdr] = specall locall = self.__obtain_from_mdr(c, "GGeoSondLoc") M["lon"][has_mdr] = locall[:, :, :, 0] M["lat"][has_mdr] = locall[:, :, :, 1] satangall = self.__obtain_from_mdr(c, "GGeoSondAnglesMETOP") M["satellite_zenith_angle"][has_mdr] = satangall[:, :, :, 0] M["satellite_azimuth_angle"][has_mdr] = satangall[:, :, :, 1] solangall = self.__obtain_from_mdr(c, "GGeoSondAnglesSUN") M["solar_zenith_angle"][has_mdr] = solangall[:, :, :, 0] M["solar_azimuth_angle"][has_mdr] = solangall[:, :, :, 1] for fld in M.dtype.names: M.mask[fld][~has_mdr, ...] = True M.mask[fld][bad, ...] = True m = c.MDR[0].MDR wavenumber = (m.IDefSpectDWn1b * numpy.arange(m.IDefNsfirst1b, m.IDefNslast1b+0.1) * (1/ureg.metre)) if self.wavenumber is None: self.wavenumber = wavenumber elif abs(self.wavenumber - wavenumber).max() > (0.05 * 1/(ureg.centimetre)): raise ValueError("Inconsistent wavenumbers") return M
def download_all(self, wd): """Download/check media files :param wd: directory where files will be saved """ exclude = self.exclude_category.split(',') media_list = [x for cat in self.result if cat.key not in exclude or cat.home for x in cat.content if not x.iscategory] media_list = sorted(media_list, key=lambda x: x.date or 0, reverse=True) # Trim down the list of files that need to be downloaded download_list = [] checked_files = [] for media in media_list: # Only run this check once per filename base = urllib.parse.urlparse(media.url).path base = os.path.basename(base) if base in checked_files: continue checked_files.append(base) # Skip previously deleted files f = urllib.parse.urlparse(media.url).path f = os.path.basename(f) f = os.path.join(wd, f + '.deleted') if os.path.exists(f): continue # Search for local media and delete broken files media.file = self.download_media(media, wd, check_only=True) if not media.file: download_list.append(media) if not self.download: return # Download all files for media in download_list: # Clean up until there is enough space while self.keep_free > 0: space = shutil.disk_usage(wd).free needed = media.size + self.keep_free if space > needed: break if self.quiet < 1: msg('free space: {:} MiB, needed: {:} MiB'.format(space//1024**2, needed//1024**2)) delete_oldest(wd, media.date, self.quiet) # Download the video if self.quiet < 2: print('[{}/{}]'.format(download_list.index(media) + 1, len(download_list)), end=' ', file=stderr) media.file = self.download_media(media, wd)
def get_disk_space_status(total=None, used=None, free=None): if None in [total, used, free]: total, used, free = shutil.disk_usage('/') percentage_free = ((free / total) * 100) if not percentage_free > 10.0: raise ServiceUnavailable("Only {} % free space available".format(percentage_free)) if not percentage_free > 30.0: raise ServiceReturnedUnexpectedResult("Only {} % free space available".format(percentage_free)) return True
def preparing(filesToTimeLapse,tmpDir=tmpDir): if os.path.getsize(filesToTimeLapse[0])*len(filesToTimeLapse)*1.5 > shutil.disk_usage('/tmp/').free: raise Exception('not enough space') #create tmpdir and copy images to it if os.path.exists(tmpDir): os.removedirs(tmpDir) os.mkdir(tmpDir) length = len(filesToTimeLapse) for i in range(length): shutil.copyfile(filesToTimeLapse[i],tmpDir + 'img-' + normalize(i,length)+".jpg") zeroCount=len(str(length))
def update(self): disk = shutil.disk_usage(self.config['path']) data = { 'free': '%7s' % self.format_size(disk.free), 'used': '%7s' % self.format_size(disk.used), 'total': '%7s' % self.format_size(disk.total), 'path': self.config['path'] } data['percent_used'] = int((disk.used/disk.total) * 100) locals().update(data) self.set_text(self.config['format'] % locals())
async def handle_put(target_dir: str, min_free_space: int, request: web.Request): target_name = request.headers.get('Arch-Name') enc_passwd = request.headers.get('Enc-Password') if request.content_length is None or target_name is None or enc_passwd is None : return web.HTTPBadRequest(reason="Some required header(s) missing (Arch-Name/Enc-Password/Content-Length)") # explicit redundancy with parse_file_name to be sure that no malicious file would ever pass into if not allowed_file_name(target_name): return web.HTTPBadRequest(reason=f"Incorrect file name: {target_name}") finfo = parse_file_name(target_name) if finfo is None or finfo.ftype != FileType.enc or finfo.ref_ftype != FileType.report_arch: return web.HTTPBadRequest(reason=f"Incorrect file name: {target_name}") if request.content_length > MAX_FILE_SIZE: return web.HTTPBadRequest(reason=f"File too large, max {MAX_FILE_SIZE} size allowed") if len(enc_passwd) > MAX_PASSWORD_LEN: return web.HTTPBadRequest(reason=f"Password too large, max {MAX_PASSWORD_LEN} size allowed") if shutil.disk_usage("/").free - request.content_length - len(enc_passwd) < min_free_space: return web.HTTPBadRequest(reason="No space left on device") target_path = os.path.join(target_dir, finfo.ref_name) key_file = target_path + ".key" meta_file = target_path + ".meta" if any(map(os.path.exists, [target_path, key_file, meta_file])): return web.HTTPBadRequest(reason="File exists") print(f"Receiving {request.content_length} bytes from {request.remote} to file {target_name}") try: with open(target_path, "wb") as fd: while True: data = await request.content.read(1 << 20) if len(data) == 0: break fd.write(data) with open(key_file, "wb") as fd: fd.write(enc_passwd.encode("utf8")) with open(meta_file, "w") as fd: fd.write(json.dumps({'upload_time': time.time(), 'src_addr': request.remote})) except: os.unlink(target_path) raise return web.Response(text="Saved ")
def attach(): mount = hookenv.storage_get()['location'] pgdata = os.path.join(mount, postgresql.version(), 'main') unitdata.kv().set(pgdata_mount_key, mount) unitdata.kv().set(pgdata_path_key, pgdata) hookenv.log('PGDATA storage attached at {}'.format(mount)) # Never happens with Juju 2.0 as we can't reuse an old mount. This # check is here for the future. existingdb = os.path.exists(pgdata) required_space = shutil.disk_usage(postgresql.data_dir()).used free_space = shutil.disk_usage(mount).free if required_space > free_space and not existingdb: hookenv.status_set('blocked', 'Not enough free space in pgdata storage') else: apt.queue_install(['rsync']) coordinator.acquire('restart') reactive.set_state('postgresql.storage.pgdata.attached')
def filesystem_space(): b_to_gb = 1024 * 1024 * 1024 levels = json_data["filesystem_treshold"] paths = json_data["path"] for path in paths: (total, used, free) = shutil.disk_usage(path) percentage_free = ((free / b_to_gb ) / (total / b_to_gb)) * 100 if percentage_free <= levels[0]: logger.info("Path: %s is %.2f free status: Warning" % (path,percentage_free)) elif percentage_free <= levels[1]: logger.error("Path: %s is %.2f free status: DANGER" % (path,percentage_free)) else: logger.info("Path: %s is %.2f free status: OK" % (path,percentage_free))
def monitor_disk(): # hd = {} # st = os.statvfs('/') # hd['avail'] = st.f_bsize * st.f_bavail # hd['total'] = st.f_bsize * st.f_blocks # hd['used'] = st.f_bsize * (st.f_blocks - st.f_bfree) # return hd hd = shutil.disk_usage('/') if (hd.used / hd.total > 0.8): hostname = socket.gethostname() ip = socket.gethostbyname(hostname) body = "hostname = %s, ip = %s, total = %s, used = %s, free = %s" % (hostname, ip, bytes2human(hd.total), bytes2human(hd.used), bytes2human(hd.free)) sendmail('[monitor] 磁盘空间不足', body)
def check_mountpoint(path): disk = shutil.disk_usage(path) state = (1 - disk.free/disk.total)*100 if state <= 70: print(state) sys.exit(0) elif 70 < state <= 90: print("Alert, only 30% size is free") sys.exit(1) elif 90 < state <= 95: print("Alarm! less 10% size is free on disk from my storage") sys.exit(2) else: sys.exit(10)
def check_disk_usage(): """Will check if disk usage is over 20%""" total, used, free = shutil.disk_usage(os.path.expanduser("~")) return free / total < 0.2
def get_usage(path, unit=Unit.BYTE): log.debug("getting usage for '{}' in {}".format(path, unit.name)) usage = disk_usage(path) return convert(usage, unit)
blocks_found = 'tac %s | grep "Stage 3/3" -c' % (latest_log_file) blocks_found_count = subprocess.check_output(blocks_found, shell=True) print("Blocks found this session:", blocks_found_count.decode("utf-8").strip()) message = "%s.Logs.Blocks_Found %s %d\n" % ( friendly_name, blocks_found_count.decode("utf-8").strip(), int(time.time())) except: print("Blocks found this session: 0") message = "%s.Logs.Blocks_Found 0 %d\n" % (friendly_name, int(time.time())) send_msg(message) # Specific disk metrics total, used, free = shutil.disk_usage(arweave_directory) print("Total Arweave Disk: %d GB" % (total // (2**30))) message = "%s.Metrics.Disk.TotalGB %s %d\n" % (friendly_name, (total // (2**30)), int(time.time())) send_msg(message) print("Used Arweave Disk: %d GB" % (used // (2**30))) message = "%s.Metrics.Disk.UsedGB %s %d\n" % (friendly_name, (used // (2**30)), int(time.time())) send_msg(message) print("Free Arweave Disk: %d GB" % (free // (2**30))) message = "%s.Metrics.Disk.FreeGB %s %d\n" % (friendly_name, (free // (2**30)), int(time.time())) send_msg(message)
def disk(): global disk_size, disk_used, disk_free disk_size, disk_used, disk_free = shutil.disk_usage('/') return disk_size, disk_used, disk_free
def opt_check_disk_space(warnlimit_mb=200): if disk_usage('.').free < warnlimit_mb*1024*2: log.warning("Less than %sMB of free space remains on this device" % warnlimit_mb)
path = os.path.join(TMPPATH, filename) tmppath = os.path.join(TMPPATH, "tmp-" + filename) finalpath = os.path.join(DESTPATH, filename) if not os.path.exists(finalpath): if args.boost: urlretrieve(url, path) print(f"Compress") os.system(f"ffmpeg -hide_banner -loglevel warning -i {path} -filter_complex \"compand=attacks=0:points=-80/-900|-45/-15|-27/-9|0/-7|20/-7:gain=5\" {tmppath}") else: urlretrieve(url, tmppath) # adding label tts = gTTS(arrow.utcnow().format('dddd DD.MMMM', locale="de_CH") + f" {pc['name']}. {p['title']}.", lang='de') tts.save(label_path) cmd = f"ffmpeg -hide_banner -loglevel warning -i {label_path} -i {tmppath} -filter_complex [0:a][1:a]concat=n=2:v=0:a=1 {finalpath}" os.system(cmd) os.unlink(tmppath) else: print(f"{finalpath} already exists. Skipping.") print(f"\n") i += 1 print(f"Downloaded {round(total_duration)} minutes") df = shutil.disk_usage(args.destpath) print(f"{args.destpath} has {round(df.free / 1024 / 1024)}MB free")
#!/usr/bin/python3 import datetime import shutil import urllib.request as urlrequest import boto3 disk_usage = shutil.disk_usage('/data') disk_used_percent = disk_usage.used / disk_usage.total * 100 print('DiskUsedPercent {0}'.format(disk_used_percent)) timestamp = urlrequest.urlopen( 'http://localhost/api/timestamp').readline().decode('utf-8') parsed = datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%SZ\n") now = datetime.datetime.now() replication_seconds_behind = (now - parsed).total_seconds() print('ReplicationSecondsBehind {0}'.format(replication_seconds_behind)) boto3.client('cloudwatch', 'us-east-1').put_metric_data(Namespace="Overpass", MetricData=[{ 'MetricName': 'ReplicationSecondsBehind', 'Value': replication_seconds_behind, 'Unit': 'Seconds' }, { 'MetricName': 'DiskUsedPercent', 'Value': disk_used_percent, 'Unit': 'Percent' }])
def check_disk_space(): if disk_usage('/')[2] / (2**30) < 0.5: raise LowDiskSpaceException
def has_sufficient_disk_space(needed_bytes, directory="."): try: free_bytes = disk_usage(os.path.abspath(directory)).free except OSError: return True return needed_bytes < free_bytes
def get_free_disk_space(): # https://stackoverflow.com/a/48929832/6943581 return shutil.disk_usage("/")[-1] / (2 ** 30)
def check_disk_usage(disk): """Verifies that there's enough free space on disk""" du = shutil.disk_usage(disk) free = du.free / du.total * 100 return free > 20
def main(args=None): '''Mount S3QL file system''' if args is None: args = sys.argv[1:] options = parse_args(args) # Save handler so that we can remove it when daemonizing stdout_log_handler = setup_logging(options) if not os.path.exists(options.mountpoint): raise QuietError('Mountpoint does not exist.', exitcode=36) if options.threads is None: options.threads = determine_threads(options) avail_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if avail_fd == resource.RLIM_INFINITY: avail_fd = 4096 resource.setrlimit(resource.RLIMIT_NOFILE, (avail_fd, avail_fd)) # Subtract some fd's for random things we forgot, and a fixed number for # each upload thread (because each thread is using at least one socket and # at least one temporary file) avail_fd -= 32 + 3 * options.threads if options.max_cache_entries is None: if avail_fd <= 64: raise QuietError("Not enough available file descriptors.", exitcode=37) log.info( 'Autodetected %d file descriptors available for cache entries', avail_fd) options.max_cache_entries = avail_fd else: if options.max_cache_entries > avail_fd: log.warning( "Up to %d cache entries requested, but detected only %d " "available file descriptors.", options.max_cache_entries, avail_fd) options.max_cache_entries = avail_fd if options.profile: import cProfile import pstats prof = cProfile.Profile() backend_factory = get_backend_factory(options.storage_url, options.backend_options, options.authfile, options.compress) backend_pool = BackendPool(backend_factory) atexit.register(backend_pool.flush) # Get paths cachepath = get_backend_cachedir(options.storage_url, options.cachedir) # Retrieve metadata with backend_pool() as backend: (param, db) = get_metadata(backend, cachepath) #if param['max_obj_size'] < options.min_obj_size: # raise QuietError('Maximum object size must be bigger than minimum object size.', # exitcode=2) # Handle --cachesize rec_cachesize = options.max_cache_entries * param['max_obj_size'] / 2 avail_cache = shutil.disk_usage(os.path.dirname(cachepath))[2] / 1024 if options.cachesize is None: options.cachesize = min(rec_cachesize, 0.8 * avail_cache) log.info('Setting cache size to %d MB', options.cachesize / 1024) elif options.cachesize > avail_cache: log.warning('Requested cache size %d MB, but only %d MB available', options.cachesize / 1024, avail_cache / 1024) if options.nfs: # NFS may try to look up '..', so we have to speed up this kind of query log.info('Creating NFS indices...') db.execute( 'CREATE INDEX IF NOT EXISTS ix_contents_inode ON contents(inode)') else: db.execute('DROP INDEX IF EXISTS ix_contents_inode') metadata_upload_thread = MetadataUploadThread( backend_pool, param, db, options.metadata_upload_interval) block_cache = BlockCache(backend_pool, db, cachepath + '-cache', options.cachesize * 1024, options.max_cache_entries) commit_thread = CommitThread(block_cache) operations = fs.Operations(block_cache, db, max_obj_size=param['max_obj_size'], inode_cache=InodeCache(db, param['inode_gen']), upload_event=metadata_upload_thread.event) block_cache.fs = operations metadata_upload_thread.fs = operations with ExitStack() as cm: log.info('Mounting %s at %s...', options.storage_url, options.mountpoint) try: llfuse.init(operations, options.mountpoint, get_fuse_opts(options)) except RuntimeError as exc: raise QuietError(str(exc), exitcode=39) unmount_clean = False def unmount(): log.info("Unmounting file system...") llfuse.close(unmount=unmount_clean) cm.callback(unmount) if options.fg: faulthandler.enable() faulthandler.register(signal.SIGUSR1) else: if stdout_log_handler: logging.getLogger().removeHandler(stdout_log_handler) global crit_log_fh crit_log_fh = open( os.path.join(options.cachedir, 'mount.s3ql_crit.log'), 'a') faulthandler.enable(crit_log_fh) faulthandler.register(signal.SIGUSR1, file=crit_log_fh) daemonize(options.cachedir) mark_metadata_dirty(backend, cachepath, param) block_cache.init(options.threads) cm.callback(block_cache.destroy) metadata_upload_thread.start() cm.callback(metadata_upload_thread.join) cm.callback(metadata_upload_thread.stop) commit_thread.start() cm.callback(commit_thread.join) cm.callback(commit_thread.stop) if options.upstart: os.kill(os.getpid(), signal.SIGSTOP) if sd_notify is not None: sd_notify('READY=1') sd_notify('MAINPID=%d' % os.getpid()) exc_info = setup_exchook() workers = 1 if options.single else None # use default if options.profile: ret = prof.runcall(llfuse.main, workers) else: ret = llfuse.main(workers) if ret is not None: raise RuntimeError('Received signal %d, terminating' % (ret, )) # Allow operations to terminate while block_cache is still available # (destroy() will be called again when from llfuse.close(), but at that # point the block cache is no longer available). with llfuse.lock: operations.destroy() # Re-raise if main loop terminated due to exception in other thread if exc_info: (exc_inst, exc_tb) = exc_info raise exc_inst.with_traceback(exc_tb) log.info("FUSE main loop terminated.") unmount_clean = True # At this point, there should be no other threads left # Do not update .params yet, dump_metadata() may fail if the database is # corrupted, in which case we want to force an fsck. param['max_inode'] = db.get_val('SELECT MAX(id) FROM inodes') if operations.failsafe: log.warning('File system errors encountered, marking for fsck.') param['needs_fsck'] = True with backend_pool() as backend: seq_no = get_seq_no(backend) if metadata_upload_thread.db_mtime == os.stat(cachepath + '.db').st_mtime: log.info('File system unchanged, not uploading metadata.') del backend['s3ql_seq_no_%d' % param['seq_no']] param['seq_no'] -= 1 save_params(cachepath, param) elif seq_no == param['seq_no']: param['last-modified'] = time.time() dump_and_upload_metadata(backend, db, param) save_params(cachepath, param) else: log.error( 'Remote metadata is newer than local (%d vs %d), ' 'refusing to overwrite!', seq_no, param['seq_no']) log.error( 'The locally cached metadata will be *lost* the next time the file system ' 'is mounted or checked and has therefore been backed up.') for name in (cachepath + '.params', cachepath + '.db'): for i in range(4)[::-1]: if os.path.exists(name + '.%d' % i): os.rename(name + '.%d' % i, name + '.%d' % (i + 1)) os.rename(name, name + '.0') log.info('Cleaning up local metadata...') db.execute('ANALYZE') db.execute('VACUUM') db.close() if options.profile: with tempfile.NamedTemporaryFile() as tmp, \ open('s3ql_profile.txt', 'w') as fh: prof.dump_stats(tmp.name) p = pstats.Stats(tmp.name, stream=fh) p.strip_dirs() p.sort_stats('cumulative') p.print_stats(50) p.sort_stats('time') p.print_stats(50) log.info('All done.')
def station_get_json(security_token, station_info, ucontrollers): logger = logging.getLogger("Utils") logger.debug("Gathering station data...") station_json = {} if security_token != None: station_json['security_token'] = security_token station_json['timestamp'] = int(time.time()) for key in station_info.get('station'): station_json[key] = station_info.get('station', key) components = [] computer = {} computer['name'] = 'Computer' measurements = {} total_bytes, used_bytes, free_bytes = disk_usage(realpath('/')) measurements['Disk used'] = str(used_bytes / (1024**3)) + "GiB" measurements['Disk cap'] = str(total_bytes / (1024**3)) + "GiB" computer['measurements'] = measurements components.append(computer) measurements_list = ucontrollers.get_measurements_list() for measurement in measurements_list: component = {} component['name'] = measurement['name'] component['measurements'] = measurement['data'] components.append(component) i = 1 while True: camera = station_info.get('camera' + str(i)) if camera == None: break camera_data = {} if i == 1: camera_data['name'] = 'Camera (ϕ: {}°, λ: {}°)'.format( camera['azimuth'], camera['altitude']) else: camera_data['name'] = 'Camera {} (ϕ: {}°, λ: {}°)'.format( str(i), camera['azimuth'], camera['altitude']) if i == 2: components[-1]['name'] = components[-1]['name'][ 0:7] + "1 " + components[-1]['name'][7:] components.append(camera_data) i += 1 station_json['components'] = components maintainers = [] i = 1 while True: maintainer = station_info.get('maintainer' + str(i)) if maintainer == None: break maintainer_data = {} for key in maintainer: maintainer_data[key] = maintainer[key] maintainers.append(maintainer_data) i += 1 station_json['maintainers'] = maintainers logger.debug("Status data gathered.") logger.debug("Status:\n" + pprint.pformat(station_json)) return json.dumps(station_json)
async def get_free_modules_space(request): modules_dir = au.get_modules_dir() free_space = shutil.disk_usage(modules_dir).free return web.json_response(free_space)
def resource_usage(workdir='.', disk=True, ram=True, rlimit=True, rusage=False): """ Get information about resource usage Args: workdir (str): path to directory to check disk space of. Returns: Dict: dictionary of resource info References: https://stackoverflow.com/questions/276052/how-to-get-current-cpu-and-ram-usage-in-python https://man7.org/linux/man-pages/man2/getrlimit.2.html Example: >>> from netharn.util.util_resources import * # NOQA >>> info = resource_usage() >>> import ubelt as ub >>> print('info = {}'.format(ub.repr2(info, nl=2, precision=2))) Profiling: import timerit ti = timerit.Timerit(100, bestof=10, verbose=2) for timer in ti.reset('resource_usage'): with timer: resource_usage() import xdev _ = xdev.profile_now(resource_usage)() """ def struct_to_dict(struct): attrs = {k for k in dir(struct) if not k.startswith('_')} dict_ = {k: getattr(struct, k) for k in attrs} dict_ = {k: v for k, v in dict_.items() if isinstance(v, (int, float))} return dict_ errors = [] info = {} try: import psutil except Exception as ex: errors.append(repr(ex)) else: if ram: vmem = psutil.virtual_memory() # info['cpu_percent'] = psutil.cpu_percent() info['mem_details'] = dict(vmem._asdict()) info['ram_percent'] = vmem.percent info['mem_percent'] = round(vmem.used * 100 / vmem.total, 2) if disk: import shutil disk_usage = shutil.disk_usage(workdir) info['disk_usage'] = struct_to_dict(disk_usage) try: import resource except Exception as ex: errors.append(repr(ex)) else: if rlimit: rlimit_keys = [ # 'RLIMIT_AS', # 'RLIMIT_CORE', # 'RLIMIT_CPU', # 'RLIMIT_DATA', # 'RLIMIT_FSIZE', # 'RLIMIT_MEMLOCK', # 'RLIMIT_MSGQUEUE', # 'RLIMIT_NICE', # RLIMIT_NOFILE - a value one greater than the maximum file # descriptor number that can be opened by this process 'RLIMIT_NOFILE', # 'RLIMIT_NPROC', # 'RLIMIT_OFILE', # 'RLIMIT_RSS', # 'RLIMIT_RTPRIO', # 'RLIMIT_SIGPENDING', # 'RLIMIT_STACK', ] rlimits = {} for key in rlimit_keys: # each rlimit return a (soft, hard) tuple with soft and hard limits rlimits[key] = resource.getrlimit(getattr(resource, key)) info['rlimits'] = rlimits if rusage: rusage_keys = [ # 'RUSAGE_CHILDREN', 'RUSAGE_SELF', # 'RUSAGE_THREAD', ] rusages = {} for key in rusage_keys: # Returns a structure that we will convert to a dict val = resource.getrusage(getattr(resource, key)) attrs = ({n for n in dir(val) if not n.startswith('_')} - {'count', 'index'}) val = {n: getattr(val, n) for n in attrs} rusages[key] = val info['rusages'] = rusages if errors: info['errors'] = errors return info
def check_disk_space(): # Available disk space is lower than 20% usage = shutil.disk_usage("/") free = usage.free / usage.total * 100 return free < 20
def disc_space_check(): disk_usage = shutil.disk_usage("/") disk_total = disk_usage.total disk_free = disk_usage.used threshold = disk_free / disk_total * 100 return threshold > 20
def getDiskspace(): return DiskSize( shutil.disk_usage("/").free, shutil.disk_usage("/").used, shutil.disk_usage("/").total)
def check_disk_usage(disk): """This function will check if Available disk space is less than 20%""" du = shutil.disk_usage(disk) # calculating percent of free disk free_disk = du.free / du.total * 100 return free_disk > 20
def low_disk_space("/"): usage = shutil.disk_usage(os.getcwd()) THRESHOLD = usage.free / usage.total * 100 # % if THRESHOLD < 20: #20% return True
def run_ffmpeg(progress_filename, uploaded_file_path, params, output_name): progress_file_path = os.path.join('ffmpeg-progress', progress_filename) params = params.split(' ') params.append(output_name) try: log.info(params) except Exception as e: log.info(e) ffmpeg_output_file = os.path.join('ffmpeg-output', f'{Path(uploaded_file_path).stem}.txt') with open(ffmpeg_output_file, 'w'): pass log.info(f'Converting {uploaded_file_path}...') start_time = time() process = subprocess.Popen( [ ffmpeg_path, '-loglevel', 'debug', '-progress', '-', '-nostats', '-y', '-i', uploaded_file_path, '-metadata', 'comment=Transcoded using av-converter.com', '-metadata', 'encoded_by=av-converter.com', '-id3v2_version', '3', '-write_id3v1', 'true' ] + params, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) try: file_duration = float(probe(uploaded_file_path)['format']['duration']) except Exception: log.info(f'Unable to get the duration of {uploaded_file_path}') character_set = 'utf-8' while True: # The process is in progress. if process.poll() is None: try: output = process.stdout.readline().decode(character_set) except UnicodeDecodeError as e: character_set = 'latin-1' log.info(f'{e}\nCharacter set changed to latin-1.') with open(ffmpeg_output_file, 'a', encoding='utf-8') as f: f.write(output) if 'out_time_ms' in output: microseconds = int(output.strip()[12:]) secs = microseconds / 1_000_000 try: percentage = (secs / file_duration) * 100 except Exception: percentage = 'unknown' elif "speed" in output: speed = output.strip()[6:] speed = 0 if ' ' in speed or 'N/A' in speed else float(speed[:-1]) try: eta = (file_duration - secs) / speed except Exception: continue else: minutes = round(eta / 60) seconds = f'{round(eta % 60):02d}' with open(progress_file_path, 'w') as f: f.write(f'Progress: {round(percentage, 1)}% | Speed: {speed}x | ETA: {minutes}:{seconds} [M:S]') # The process has completed. else: # Empty the uploads folder if there is less than 2 GB free storage space. free_space = shutil.disk_usage('/')[2] free_space_gb = free_space / 1_000_000_000 if free_space_gb < 2: empty_folder('uploads') # The return code is not 0 if an error occurred. if process.returncode != 0: log.info('Unable to convert.') return { 'error': 'Unable to convert', 'log_file': f'api/{ffmpeg_output_file}' } # The conversion was successful else: log.info(f'Conversion took {round((time() - start_time), 1)} seconds.') delete_file(progress_file_path) delete_file(ffmpeg_output_file) return { 'error': None, 'ext': os.path.splitext(output_name)[1], 'download_path': f'api/{output_name}', 'log_file': f'api/{ffmpeg_output_file}' }
def prepare_working_directory(job, submission_path, validator_path): ''' Based on two downloaded files in the working directory, the student submission and the validation package, the working directory is prepared. We unpack student submission first, so that teacher files overwrite them in case. When the student submission is a single directory, we change the working directory and go directly into it, before dealing with the validator stuff. If unrecoverable errors happen, such as an empty student archive, a JobException is raised. ''' # Safeguard for fail-fast in disk full scenarios on the executor dusage = shutil.disk_usage(job.working_dir) if dusage.free < 1024 * 1024 * 50: # 50 MB info_student = "Internal error with the validator. Please contact your course responsible." info_tutor = "Error: Execution cancelled, less then 50MB of disk space free on the executor." logger.error(info_tutor) raise JobException(info_student=info_student, info_tutor=info_tutor) submission_fname = os.path.basename(submission_path) validator_fname = os.path.basename(validator_path) # Un-archive student submission single_dir, did_unpack = unpack_if_needed(job.working_dir, submission_path) job.student_files = os.listdir(job.working_dir) if did_unpack: job.student_files.remove(submission_fname) # Fail automatically on empty student submissions if len(job.student_files) is 0: info_student = "Your compressed upload is empty - no files in there." info_tutor = "Submission archive file has no content." logger.error(info_tutor) raise JobException(info_student=info_student, info_tutor=info_tutor) # Handle student archives containing a single directory with all data if single_dir: logger.warning( "The submission archive contains only one directory. Changing working directory." ) # Set new working directory job.working_dir = job.working_dir + single_dir + os.sep # Move validator package there shutil.move(validator_path, job.working_dir) validator_path = job.working_dir + validator_fname # Re-scan for list of student files job.student_files = os.listdir(job.working_dir) # The working directory now only contains the student data and the downloaded # validator package. # Update the file list accordingly. job.student_files.remove(validator_fname) logger.debug("Student files: {0}".format(job.student_files)) # Unpack validator package single_dir, did_unpack = unpack_if_needed(job.working_dir, validator_path) if single_dir: info_student = "Internal error with the validator. Please contact your course responsible." info_tutor = "Error: Directories are not allowed in the validator archive." logger.error(info_tutor) raise JobException(info_student=info_student, info_tutor=info_tutor) if not os.path.exists(job.validator_script_name): if did_unpack: # The download was an archive, but the validator was not inside. # This is a failure of the tutor. info_student = "Internal error with the validator. Please contact your course responsible." info_tutor = "Error: Missing validator.py in the validator archive." logger.error(info_tutor) raise JobException(info_student=info_student, info_tutor=info_tutor) else: # The download is already the script, but has the wrong name logger.warning("Renaming {0} to {1}.".format( validator_path, job.validator_script_name)) shutil.move(validator_path, job.validator_script_name)
def get_free_space(dir_path): total, used, free = shutil.disk_usage(dir_path) free_disk_space = Storage.standardize_bytes(free) return free_disk_space
def is_enough_space(path, size): if hasattr(shutil, "disk_usage"): # python >= 3.3 free = shutil.disk_usage(path).free return free >= size return True
def get_quota(self): if not self._namespace: raise ex.CloudDisconnectedError("namespace not set") ret = shutil.disk_usage(self._namespace.id) return {"used": ret.used, "limit": ret.total, "login": "******"}
from neurobooth_terra.fixes import OptionalSSHTunnelForwarder from neurobooth_terra.dataflow import delete_files from config import ssh_args, db_args target_dir = '/autofs/nas/neurobooth/data_test/' suitable_dest_dir = '/autofs/nas/neurobooth/data_test_backup/' dry_run = True table_id = 'log_file' # don't run rsync on weekend # run this file on weekend. if dry_run: stats = shutil.disk_usage(target_dir) threshold = stats.used / stats.total - 0.1 # ensure that it deletes older_than = 1 else: threshold = 0.9 older_than = 30 with OptionalSSHTunnelForwarder(**ssh_args) as tunnel: with psycopg2.connect(port=tunnel.local_bind_port, host=tunnel.local_bind_host, **db_args) as conn: if dry_run: copy_table(src_table_id=table_id, target_table_id=table_id + '_copy', conn=conn)
def df_b(d: str) -> int: 'Return free space for directory (in bytes)' usage = shutil.disk_usage(d) return usage.free
def get_volume_size(disk_name): total, used, free = shutil.disk_usage(disk_name) # print(disk_name, total, used, free) return total
def main(): COUNTER = 0 MAX_FRAME = 4 Flag_detect_face=0 interpreter = Interpreter("model/drowsiness_v1.tflite") interpreter.allocate_tensors() _, height, width, _ = interpreter.get_input_details()[0]['shape'] # Load model camera =WebcamVideoStream(src=1).start() time.sleep(5) cv2.namedWindow("Drowsiness"); cv2.moveWindow('Drowsiness',550,0) sound_alarm("alarm_vn.mp3") mixer.music.pause() while True: try: image = camera.read() image = imutils.resize(image, width = 460) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) bb = getLargestFaceBoundingBox(gray) if bb: Flag_detect_face=1 sx, sy, tx, ty =bb.left()-10,bb.top()-20,bb.right()+10,bb.bottom()+10 roi = gray[sy:ty,sx:tx] roi = cv2.resize(roi,(width, height), interpolation = cv2.INTER_AREA) image_pre=prepare_image(roi) results = classify_image(interpreter, image_pre) if results<0.5: COUNTER += 1 if COUNTER >= MAX_FRAME: if(mixer.music.get_busy()==False): filename = 'alarm_vn.mp3' sound_alarm(filename) try: total, used, free = shutil.disk_usage("/home/bang/Bangau/sdcard") total=total // (2**30) free=free // (2**30) if total>28 and free <1: list_of_files = os.listdir('sdcard/drowsiness/') full_path = ["sdcard/drowsiness/{0}".format(x) for x in list_of_files] if len([name for name in list_of_files]) >1: oldest_file = min(full_path, key=os.path.getctime) os.rmdir(oldest_file) elif((total>28)and(free>1)): date_object = date.today() time_object = datetime.now().time() time_object = time_object.strftime("%H-%M-%S") if not os.path.exists("sdcard/drowsiness/"+str(date_object)): #os.system('sudo mkdir sdcard/'+str(date_object)) os.mkdir("sdcard/drowsiness/"+str(date_object)) #cv2.imwrite(os.path.join("/home/bang/Bangau/sdcard/2019-10-23" , 'waka.jpg'), roi) cv2.imwrite("sdcard/drowsiness/"+str(date_object)+"/" + str(time_object) + ".jpg", roi) except:{} #cv2.putText(image,"Drowsiness Alert",(50,300), cv2.FONT_HERSHEY_SIMPLEX, 1, (200,0,0), 3, cv2.LINE_AA) else: COUNTER =0 mixer.music.stop() cv2.rectangle(image, (sx, sy), (tx, ty), (255, 255, 255), 2) elif Flag_detect_face==1: COUNTER += 1 if COUNTER >= (MAX_FRAME+3): if(mixer.music.get_busy()==False): filename = 'alarm_vn.mp3' sound_alarm(filename) cv2.imshow("Drowsiness", image) key = cv2.waitKey(1) & 0xFF # if the `q` key was pressed, break from the loop if key == ord("q"): break except:{} cv2.destroyAllWindows() camera.stop()
def check_disk_usage(disk): du = shutil.disk_usage(disk) free = du.free / du.total * 100 return free > 20