def export_file(filepath): """ Use filuxe to upload the file if it first matches the include regex and second doesn't match the exclude regex. If the include regex and the exclude regex are both empty strings then the file is exported. """ if not FILUXE_WAN: return path = os.path.dirname(filepath) relpath = os.path.relpath(path, FILE_ROOT) file = os.path.basename(filepath) try: dir_rules = ACTIVE_RULES['dirs'][relpath] if not fwd_util.filename_is_included(file, dir_rules): inf(f'filename {file} is not in scope and will not be exported') return deb(f'forwarding {file}') except: inf(f'from {relpath} uploading file {file} (no rules)') try: deb(f'uploading {FILUXE_LAN.log_path(filepath)}') FILUXE_WAN.upload(filepath, os.path.join(relpath, file)) except requests.ConnectionError: war('upload failed, WAN server is not reachable.') except FileNotFoundError: war(f'exception file not found, {os.path.join(relpath, file)} (internal race)' )
def dump_rules(): try: if not ACTIVE_RULES['dirs'].items(): war('this forwarder has no rules loaded ? Forwarding everything.') else: deb('dumping rules:') for _path, _rules in ACTIVE_RULES['dirs'].items(): deb(f' "{_path}" {_rules}') except: war('no dir rules found')
def delete_files(self, filegroup, group_name, use_http, rules): directory = filegroup['directory'] filelist = filegroup['files'] to_delete = len(filelist) - filegroup['maxfiles'] deleted_files = [] if to_delete > 0: delete_by = filegroup["deleteby"] inf(f'deleting {to_delete} files from {self.domain.domain} filestorage, ' f'path="{filegroup["directory"]}" group "{group_name}". Deleteby={delete_by}') _items = [Item(x, directory, rules, delete_by) for x in filelist.items()] try: _sorted_items = sorted(_items) except: first_filename = list(filelist.keys())[0] war(f'failed running delete by "{delete_by}" in group "{group_name}". Files of type "{first_filename}"') return deleted_files deb(f'http filelist sorted by: "{delete_by}", delete from top') for index, item in enumerate(_sorted_items): if index < to_delete: extra = 'DEL ' else: extra = 'KEEP' fwd_util.print_file(item, extra) for item in _sorted_items[:to_delete]: filepath = os.path.join(directory, item.file) if self.dryrun: inf(f'dryrun: not deleting {filepath}') else: try: if use_http: fwd_util.delete_http_file(self.domain, filepath) else: fqn = os.path.join(self.domain.root(), filepath) inf(f'deleting {self.domain.log_path(fqn)}') os.remove(fqn) deleted_files.append(item.file) except: war(f'failed to delete file {fqn} (http={use_http})') return deleted_files
def synchonize(lan_files): inf('synchonizing with WAN server, please wait') with Indent() as _: # If the WAN server is missing then the forwarder will not be able to do its job before # the WAN server can be reached. wan_files = fwd_util.get_http_filelist(FILUXE_WAN, rules=ACTIVE_RULES) if lan_files is None or wan_files is None: war('retrieving filelists failed, synchonization aborted') return inf(f'found {lan_files["info"]["files"]} files on LAN server and {wan_files["info"]["files"]} on WAN server' ) new_files = [] modified_files = [] copy_bytes = 0 for directory, filelist in lan_files['filelist'].items(): if directory not in wan_files['filelist']: for filename, metrics in lan_files['filelist'][ directory].items(): pathname = os.path.join(directory, filename) new_files.append(pathname) copy_bytes += metrics['size'] continue for filename, metrics in filelist.items(): pathname = os.path.join(directory, filename) if filename not in wan_files['filelist'][directory]: new_files.append(pathname) copy_bytes += metrics['size'] elif metrics['time'] != wan_files['filelist'][directory][ filename]['time']: modified_files.append(pathname) copy_bytes += metrics['size'] if not len(new_files) + len(modified_files): inf('WAN server is up-to-date') else: inf(f'synchonizing: uploading {human_file_size(copy_bytes)} in {len(new_files)} new files ' f'and {len(modified_files)} modified files') for file in new_files + modified_files: export_file(os.path.join(FILE_ROOT, file)) inf('synchonizing: complete')
def list_files(path): recursive = request.args.get('recursive', type=inputs.boolean, default=False) path = safe_join(os.path.join(app.config['fileroot'], path)) if not os.path.exists(path): err(f'filelist failed, path not found "{path}"') return 'path not found', 404 fileroot = os.path.join(app.config['fileroot'], '') file_result = {} dir_result = [] nof_files = 0 for _root, dirs, _files in os.walk(path): for file in _files: p = os.path.join(_root, file) if not file_is_closed(p): war(f'skipping {p} since it is busy') continue relative = os.path.relpath(_root, fileroot) if not file_result.get(relative): file_result[relative] = {} file_result[relative][file] = {'size': os.path.getsize(p), 'time': get_file_time(p)} for directory in dirs: rel_path = os.path.join(os.path.relpath(_root, fileroot), directory) dir_result.append(os.path.normpath(rel_path)) nof_files += len(_files) if not recursive: break extra = "(recursive)" if recursive else "" inf(f'returning filelist at "{path}", {nof_files} files and {len(dir_result)} directories. {extra}') ret = {'info': { 'fileroot': fileroot, 'files': nof_files, 'dirs': len(dir_result) }, 'filelist': file_result, 'dirlist': dir_result} return jsonify(ret)
def get_http_filelist(filuxe_handle, path='/', recursive=True, rules=None): """ Retrieve a filelist of files at path, or starting at path if recursive is True. If rules are given then the filelist will be (post) filtered to only contain entries covered by the rule set. It would make sense if the filtering were done on the server but this is not implemented yet. """ try: error_code, filelist = filuxe_handle.list(path, recursive=recursive) if error_code != ErrorCode.OK: err(f'get http filelist got error {error_code}') return None if not rules: return filelist return filter_filelist(filelist, rules) except requests.ConnectionError: war(f'unable to get file list from {filuxe_handle.domain} over http(s), server unreachable' ) return None
def new_file(filename): if not os.path.exists(filename): deb(f'listener: changed file "{filename}" does not exist anymore?') return inf(f'listener: new/changed file "{FILUXE_LAN.log_path(filename)}"') with Indent() as _: if LAN_FILE_DELETER: path = os.path.dirname(filename) filestorage_path = os.path.relpath(path, FILUXE_LAN.root()) LAN_FILE_DELETER.enforce_max_files(filestorage_path, rules=ACTIVE_RULES, recursive=False) if not os.path.exists(filename): war(f'listener: new file "{FILUXE_LAN.log_path(filename)}" already deleted and will not be forwarded' ) return export_file(filename)
def __init__(self, x, directory, rules, delete_by): self.file = x[0] self.attr = x[1] self.numbers = None self.valid = False self.delete_by = delete_by if delete_by == 'version': try: p = re.compile(rules['dirs'][directory]['version']) self.numbers = [int(x) for x in p.search(self.file).group(1).split('.')] if len(self.numbers) == 3: self.valid = True if not self.valid: war(f'sort by version but failed to parse 3 digits from "{directory}/{self.file}"') except KeyError as e: war(f'sort by version but failed to parse 3 digits from "{directory}/{self.file}", key {e} not found') except AttributeError: war(f'version regex failed on filename "{os.path.join(directory, self.file)}"') except Exception as e: deb(f'exception {e}') else: self.time = x[1]['time'] self.valid = True
parser_screenshot.add_argument('screenshot', action='store_true', help='save screenshot') parser_screenshot.add_argument('-f', '--file', dest='file', action='store', default='img', help='image name, withtou siffix') parser_screenshot.add_argument('-c', '--color', dest='color', action='store', default='color', choices=['color', 'gray', 'invert'], help='image colors') parser_screenshot.add_argument('-d', '--date', dest='date', action='store_true', default=False, help='add the current date before the name') # autoscale parser_autoscale = subparsers.add_parser('autoscale', help='autoscale oscilloscope') parser_autoscale.add_argument('autoscale', action='store_true', help='run autoscale') # function generator parser_fgen = subparsers.add_parser('fgen', help='function generator') parser_fgen.add_argument('fgen', action='store_true', help='function generator') parser_fgen.add_argument('-f', '--frequency', dest='freq', action='store', default='1000', help='output frequency') args = parser.parse_args() if 'screenshot' in args: if args.date: name = time.strftime('%Y.%m.%d-%H:%M:%S-', time.localtime()) + args.file else: name = args.file SCPI().screenshot(name=name, color=args.color) elif 'autoscale' in args: SCPI().autoscale() elif 'fgen' in args: SCPI().function_generator(freq=SI.si_to_exp(args.freq)) else: log.war('no any argument')
def delete_http_file(filuxe_handle, filepath): try: inf(f'http: deleting {filuxe_handle.log_path(filepath)}') filuxe_handle.delete(filepath) except requests.ConnectionError: war('delete failed, server is not reachable.')
def filestorage_scan(root, path='', recursive=True): _filelist = {} total_directories = 0 total_files = 0 total_size = 0 scan_root = os.path.join(root, path) if recursive: inf(f'recursively scanning "{scan_root}"') else: inf(f'rescanning directory "{scan_root}"') with Indent() as _: for _root, _dirs, _files in os.walk(scan_root): _path = os.path.relpath(_root, scan_root) size = 0 relative_path = os.path.normpath(os.path.join(path, _path)) if not _filelist.get(relative_path): _filelist[relative_path] = {} for _file in _files: try: file = os.path.join(_root, _file) if util.file_is_closed(os.path.abspath(file)): _size = os.path.getsize(file) epoch = util.get_file_time(file) metrics = {'size': _size, 'time': epoch} _filelist[relative_path][_file] = metrics size += os.path.getsize(os.path.join(_root, _file)) else: war(f'filestorage scan, ignoring open file {file}') except FileNotFoundError: deb(f'filestorage scan: file not found {file}') total_directories += 1 total_files += len(_files) total_size += size message = f'scanned "{relative_path}", {human_file_size(size)} in {len(_files)} files' if recursive: # If recursive it will be the first full scan so print what is happening inf(message) else: deb(message) if not recursive: break inf(f'found {total_directories} directories with {total_files} files occupying {human_file_size(total_size)}' ) return { 'filelist': _filelist, 'info': { 'dirs': total_directories, 'fileroot': relative_path, 'files': total_files, 'size': total_size } }
def upload(self, filename, path=None, touch=False, force=False): if not path: path = filename if not os.path.exists(filename): err(f'upload failed, file not found "{filename}"') return ErrorCode.FILE_NOT_FOUND if touch: epoch = time.time() else: epoch = get_file_time(filename) if self.force: force = True size = os.path.getsize(filename) inf(f'uploading "{os.path.normpath(filename)}" ({human_file_size(size)}) to {self.domain} server as "{path}"' ) if not size: response = requests.post(f'{self.server}/upload/{path}', headers={'key': self.write_key}, data='', params={ 'time': epoch, 'force': force }, verify=self.certificate) else: try: index = 0 for chunk in chunked_reader(filename): offset = index + len(chunk) response = requests.post( f'{self.server}/upload/{path}', headers={ 'key': self.write_key, 'Content-Type': 'application/octet-stream', 'Content-length': str(size), 'Content-Range': f'bytes {index}-{offset - 1}/{size}' }, data=chunk, params={ 'time': epoch, 'force': force }, verify=self.certificate) if response.status_code != 201: break index = offset except Exception as e: war(f'upload failed with {e}') try: if response.status_code == 201: return ErrorCode.OK if response.status_code == 403: return ErrorCode.FILE_ALREADY_EXIST except: pass return ErrorCode.SERVER_ERROR
def start(args, cfg, rules): global LOADED_RULES, FILE_ROOT, FILUXE_WAN, FILUXE_LAN, CONFIG, LAN_FILE_DELETER, IDLE_DETECT lan_files = None FILE_ROOT = cfg['lan_filestorage'] if not os.path.exists(FILE_ROOT): die(f'filestorage root {FILE_ROOT} not found. Giving up') inf(f'filestorage root {FILE_ROOT}') CONFIG = cfg if rules: LOADED_RULES = rules lan_files = fwd_util.get_local_filelist(FILE_ROOT) coldstart_rules(lan_files) else: war('running with default rules, forwarding everything') try: FILUXE_WAN = filuxe_api.Filuxe(CONFIG, lan=False, force=True) except: war('no wan configuration found, forwarding disabled') if FILUXE_WAN: try: _, stats = FILUXE_WAN.get_stats() inf(f'connected to wan server version {stats["version"]}') except: err('wan server unreachable, forwarding disabled') FILUXE_WAN = None try: FILUXE_LAN = filuxe_api.Filuxe(CONFIG, lan=True) except: die('no lan configuration found, can\'t continue') try: _, stats = FILUXE_LAN.get_stats() inf(f'connected to lan server version {stats["version"]}') except requests.exceptions.ConnectionError: war('lan server unreachable, continuing anyway') except Exception as e: die('unexpected exception while contacting lan server', e) if ACTIVE_RULES: LAN_FILE_DELETER = fwd_file_deleter.FileDeleter( FILUXE_LAN, args.dryrun) LAN_FILE_DELETER.enforce_max_files('', rules=ACTIVE_RULES, recursive=True, lan_files=lan_files) try: if FILUXE_WAN and cfg['sync_at_startup']: if not lan_files: lan_files = fwd_util.get_local_filelist(FILE_ROOT) synchonize(lan_files) except Exception as e: err(f'syncronizing lan to wan failed {e}') IDLE_DETECT = IdleDetect() try: run_filesystem_observer(FILE_ROOT) except Exception as e: die(f'unable to start file observer in {FILE_ROOT}', e) inf('filuxe forwarder is ready') try: LOOP.run_forever() except Exception as e: die('the fileobserver crashed. Perhaps the filestorage was deleted ?', e) return ErrorCode.OK
def calculate_rules(check_dirs): """ 1: check_dirs as list of directories: Starting from the root then recursively propagate rules matching the given directory structure. 2: check_dirs as directory This can set rules on a new directory. If the directory already has rules assigned then it is a no-op. """ global LOADED_RULES, ACTIVE_RULES if not LOADED_RULES: ACTIVE_RULES = None return inf('calculating rules') with Indent() as _: default_rule = LOADED_RULES["default"] try: dir_rules = LOADED_RULES["dirs"] entry_rules_json = json.dumps(dir_rules, sort_keys=True) except: war('no "dir" rules file section found, using "default" section only' ) return if isinstance(check_dirs, list): check_dirs.sort() new_rules = {} else: check_dirs = [check_dirs] new_rules = copy.deepcopy(ACTIVE_RULES['dirs']) try: for _key in check_dirs: _path_elements = _key.split(os.sep) for i in range(len(_path_elements)): path_elements = _path_elements[:i + 1] path = os.path.join(*path_elements) if path == '.': new_rules[path] = default_rule else: previous = os.path.relpath( os.path.join(path, os.pardir)) try: new_rules[path] = { **new_rules[previous], **dir_rules[path] } except: try: new_rules[path] = new_rules[previous] except: deb(f'no rules found for {path}, skipped') deb(f'transient rule: "{path}" {new_rules[path]}') except: war(f'establishing rules for {_key} failed, check rules file') # purge rules that doesn't trigger any actions new_rules_copy = copy.deepcopy(new_rules) active_new_rules = {} active_new_rules['dirs'] = {} for path, path_rules in new_rules_copy.items(): if path_rules.get('export') or len(path_rules) > 1: inf(f'adding rule for "{path}" : {path_rules}') active_new_rules['dirs'][path] = path_rules new_rules_json = json.dumps(active_new_rules, sort_keys=True) changed = entry_rules_json != new_rules_json if changed: ACTIVE_RULES = active_new_rules extras = '. Rules were adjusted' else: extras = '. No changes ?' inf(f'rules calculated, {len(ACTIVE_RULES["dirs"])} active rules{extras}')