def __init__(self, nodedir=None, executable=None): self.executable = executable self.multi_folder_support = True if nodedir: self.nodedir = os.path.expanduser(nodedir) else: self.nodedir = os.path.join(os.path.expanduser('~'), '.tahoe') self.rootcap_path = os.path.join(self.nodedir, 'private', 'rootcap') self.servers_yaml_path = os.path.join(self.nodedir, 'private', 'servers.yaml') self.config = Config(os.path.join(self.nodedir, 'tahoe.cfg')) self.pidfile = os.path.join(self.nodedir, 'twistd.pid') self.nodeurl = None self.shares_happy = None self.name = os.path.basename(self.nodedir) self.api_token = None self.magic_folders_dir = os.path.join(self.nodedir, 'magic-folders') self.lock = DeferredLock() self.rootcap = None self.magic_folders = defaultdict(dict) self.remote_magic_folders = defaultdict(dict) self.use_tor = False self.monitor = Monitor(self) self._monitor_started = False self.state = Tahoe.STOPPED
def load_magic_folders(self): data = {} yaml_path = os.path.join(self.nodedir, 'private', 'magic_folders.yaml') try: with open(yaml_path) as f: data = yaml.safe_load(f) except OSError: pass folders_data = data.get('magic-folders') if folders_data: for key, value in folders_data.items(): # to preserve defaultdict self.magic_folders[key] = value for nodedir in get_nodedirs(self.magic_folders_dir): folder_name = os.path.basename(nodedir) if folder_name not in self.magic_folders: config = Config(os.path.join(nodedir, 'tahoe.cfg')) self.magic_folders[folder_name] = { 'nodedir': nodedir, 'directory': config.get('magic_folder', 'local.directory') } for folder in self.magic_folders: admin_dircap = self.get_admin_dircap(folder) if admin_dircap: self.magic_folders[folder]['admin_dircap'] = admin_dircap return self.magic_folders
def __init__(self, nodedir=None, executable=None, reactor=None): if reactor is None: from twisted.internet import reactor self.executable = executable self.multi_folder_support = True if nodedir: self.nodedir = os.path.expanduser(nodedir) else: self.nodedir = os.path.join(os.path.expanduser('~'), '.tahoe') self.rootcap_path = os.path.join(self.nodedir, 'private', 'rootcap') self.servers_yaml_path = os.path.join(self.nodedir, 'private', 'servers.yaml') self.config = Config(os.path.join(self.nodedir, 'tahoe.cfg')) self.pidfile = os.path.join(self.nodedir, 'twistd.pid') self.nodeurl = None self.shares_happy = None self.name = os.path.basename(self.nodedir) self.api_token = None self.magic_folders_dir = os.path.join(self.nodedir, 'magic-folders') self.lock = DeferredLock() self.rootcap = None self.magic_folders = defaultdict(dict) self.remote_magic_folders = defaultdict(dict) self.use_tor = False self.monitor = Monitor(self) streamedlogs_maxlen = None debug_settings = global_settings.get('debug') if debug_settings: log_maxlen = debug_settings.get('log_maxlen') if log_maxlen is not None: streamedlogs_maxlen = int(log_maxlen) self.streamedlogs = StreamedLogs(reactor, streamedlogs_maxlen) self.state = Tahoe.STOPPED self.newscap = "" self.newscap_checker = NewscapChecker(self)
def validate_grid(settings, parent=None): nickname = settings.get("nickname") while not nickname: nickname, _ = prompt_for_grid_name(nickname, parent) nodedir = os.path.join(config_dir, nickname) if os.path.isdir(nodedir): conflicting_introducer = False introducer = settings.get("introducer") if introducer: config = Config(os.path.join(nodedir, "tahoe.cfg")) existing_introducer = config.get("client", "introducer.furl") if introducer != existing_introducer: conflicting_introducer = True conflicting_servers = False servers = settings.get("storage") if servers: existing_servers = Tahoe(nodedir).get_storage_servers() if servers != existing_servers: conflicting_servers = True if conflicting_introducer or conflicting_servers: while os.path.isdir(os.path.join(config_dir, nickname)): nickname, _ = prompt_for_grid_name(nickname, parent) settings["nickname"] = nickname return settings
def __init__(self, args): self.args = args self.gateways = [] self.sync_folders = [] self.config = Config(self.args.config) self.servers_connected = 0 self.servers_known = 0 self.total_available_space = 0 self.status_text = 'Status: ' self.new_messages = []
def __init__(self, args): self.args = args self.gateways = [] self.sync_folders = [] self.config = Config(args.config[0] if args.config else None) self.servers_connected = 0 self.servers_known = 0 self.total_available_space = 0 self.status_text = 'Status: ' self.new_messages = [] self.settings = {} self.tray = None
def get_storage_providers(): providers_db = os.path.join(Config().config_dir, 'storage-providers.yml') try: with open(providers_db) as f: return yaml.safe_load(f) except OSError: create_storage_providers_db() return PROVIDERS
def __init__(self, nodedir=None, executable=None): self.executable = executable if nodedir: self.nodedir = os.path.expanduser(nodedir) else: self.nodedir = os.path.join(os.path.expanduser('~'), '.tahoe') self.rootcap_path = os.path.join(self.nodedir, 'private', 'rootcap') self.config = Config(os.path.join(self.nodedir, 'tahoe.cfg')) self.pidfile = os.path.join(self.nodedir, 'twistd.pid') self.nodeurl = None self.shares_happy = None self.name = os.path.basename(self.nodedir) self.api_token = None self.magic_folders_dir = os.path.join(self.nodedir, 'magic-folders') self.lock = DeferredLock() self.rootcap = None self.magic_folders = defaultdict(dict)
def test_config_dir(): config = Config() if sys.platform == 'win32': assert config.config_dir == os.path.join(os.getenv('APPDATA'), 'Gridsync') elif sys.platform == 'darwin': assert config.config_dir == os.path.join(os.path.expanduser('~'), 'Library', 'Application Support', 'Gridsync') else: assert config.config_dir == os.path.join(os.path.expanduser('~'), '.config', 'gridsync')
def create_storage_providers_db(): providers_db = os.path.join(Config().config_dir, 'storage-providers.yml') with open(providers_db, 'w') as f: try: os.chmod(providers_db, 0o600) except OSError: pass yaml.safe_dump(PROVIDERS, f, encoding='utf-8', allow_unicode=True, width=68, default_flow_style=False)
def __init__(self, location=None, settings=None): self.location = location # introducer fURL, gateway URL, or local path self.settings = settings self.node_dir = None self.node_url = None self.status = {} if not location: pass elif location.startswith('pb://'): if not self.settings: self.settings = DEFAULT_SETTINGS self.settings['client']['introducer.furl'] = location _, connection_hints = decode_introducer_furl(location) first_hostname = connection_hints.split(',')[0].split(':')[0] self.node_dir = os.path.join(Config().config_dir, first_hostname) elif location.startswith('http://') or location.startswith('https://'): location += ('/' if not location.endswith('/') else '') self.node_url = location else: self.node_dir = os.path.join(Config().config_dir, location) if self.node_dir: self.name = os.path.basename(self.node_dir) else: self.name = location
def add_storage_provider(introducer_furl, name=None, description=None): providers_db = os.path.join(Config().config_dir, 'storage-providers.yml') storage_providers = get_storage_providers() if not name: _, connection_hints = decode_introducer_furl(introducer_furl) name = connection_hints.split(',')[0].split(':')[0] new_provider = {} new_provider[name] = { 'introducer.furl': introducer_furl, 'description': description } storage_providers.update(new_provider) with open(providers_db, 'w') as f: try: os.chmod(providers_db, 0o600) except OSError: pass yaml.safe_dump(storage_providers, f, encoding='utf-8', allow_unicode=True, width=68, default_flow_style=False)
def test_config_dir_xdg_config_home(monkeypatch): monkeypatch.setattr("sys.platform", "linux") monkeypatch.setenv('XDG_CONFIG_HOME', '/test') assert Config().config_dir == os.path.join('/test', 'gridsync')
def test_config_file(): config = Config('test') assert config.config_file == 'test'
def test_config_dir_darwin(monkeypatch): monkeypatch.setattr("sys.platform", "darwin") assert Config().config_dir == os.path.join(os.path.expanduser('~'), 'Library', 'Application Support', 'Gridsync')
def test_config_dir_other(monkeypatch): monkeypatch.setattr("sys.platform", "linux") assert Config().config_dir == os.path.join(os.path.expanduser('~'), '.config', 'gridsync')
def test_config_set(tmpdir): config = Config(os.path.join(str(tmpdir), 'test_set.ini')) config.set('test_section', 'test_option', 'test_value') with open(config.filename) as f: assert f.read() == '[test_section]\ntest_option = test_value\n\n'
def test_config_dir_win32(monkeypatch): monkeypatch.setattr("sys.platform", "win32") monkeypatch.setenv('APPDATA', 'C:\\Users\\test\\AppData\\Roaming') assert Config().config_dir == os.path.join(os.getenv('APPDATA'), 'Gridsync')
def test_config_load(tmpdir): config = Config(os.path.join(str(tmpdir), 'test_load.ini')) with open(config.filename, 'w') as f: f.write('[test_section]\ntest_option = test_value\n\n') assert config.load() == {'test_section': {'test_option': 'test_value'}}
def test_load(tmpdir): config = Config(os.path.join(str(tmpdir), "test.yml")) with open(config.config_file, "w") as f: f.write("test: test\n") assert config.load() == {"test": "test"}
class Tahoe(): def __init__(self, nodedir=None, executable=None): self.executable = executable self.multi_folder_support = True if nodedir: self.nodedir = os.path.expanduser(nodedir) else: self.nodedir = os.path.join(os.path.expanduser('~'), '.tahoe') self.rootcap_path = os.path.join(self.nodedir, 'private', 'rootcap') self.servers_yaml_path = os.path.join(self.nodedir, 'private', 'servers.yaml') self.config = Config(os.path.join(self.nodedir, 'tahoe.cfg')) self.pidfile = os.path.join(self.nodedir, 'twistd.pid') self.nodeurl = None self.shares_happy = None self.name = os.path.basename(self.nodedir) self.api_token = None self.magic_folders_dir = os.path.join(self.nodedir, 'magic-folders') self.lock = DeferredLock() self.rootcap = None self.magic_folders = defaultdict(dict) self.remote_magic_folders = defaultdict(dict) self.use_tor = False self.monitor = Monitor(self) self._monitor_started = False def config_set(self, section, option, value): self.config.set(section, option, value) def config_get(self, section, option): return self.config.get(section, option) def get_settings(self, include_rootcap=False): settings = { 'nickname': self.name, 'shares-needed': self.config_get('client', 'shares.needed'), 'shares-happy': self.config_get('client', 'shares.happy'), 'shares-total': self.config_get('client', 'shares.total') } introducer = self.config_get('client', 'introducer.furl') if introducer: settings['introducer'] = introducer storage_servers = self.get_storage_servers() if storage_servers: settings['storage'] = storage_servers icon_path = os.path.join(self.nodedir, 'icon') icon_url_path = icon_path + '.url' if os.path.exists(icon_url_path): with open(icon_url_path) as f: settings['icon_url'] = f.read().strip() if include_rootcap and os.path.exists(self.rootcap_path): settings['rootcap'] = self.read_cap_from_file(self.rootcap_path) # TODO: Verify integrity? Support 'icon_base64'? return settings def export(self, dest, include_rootcap=False): log.debug("Exporting settings to '%s'...", dest) settings = self.get_settings(include_rootcap) if self.use_tor: settings['hide-ip'] = True with open(dest, 'w') as f: f.write(json.dumps(settings)) log.debug("Exported settings to '%s'", dest) def get_aliases(self): aliases = {} aliases_file = os.path.join(self.nodedir, 'private', 'aliases') try: with open(aliases_file) as f: for line in f.readlines(): if not line.startswith('#'): try: name, cap = line.split(':', 1) aliases[name + ':'] = cap.strip() except ValueError: pass return aliases except IOError: return aliases def get_alias(self, alias): if not alias.endswith(':'): alias = alias + ':' try: for name, cap in self.get_aliases().items(): if name == alias: return cap return None except AttributeError: return None def _set_alias(self, alias, cap=None): if not alias.endswith(':'): alias = alias + ':' aliases = self.get_aliases() if cap: aliases[alias] = cap else: try: del aliases[alias] except (KeyError, TypeError): return tmp_aliases_file = os.path.join(self.nodedir, 'private', 'aliases.tmp') with open(tmp_aliases_file, 'w') as f: data = '' for name, dircap in aliases.items(): data += '{} {}\n'.format(name, dircap) f.write(data) aliases_file = os.path.join(self.nodedir, 'private', 'aliases') shutil.move(tmp_aliases_file, aliases_file) def add_alias(self, alias, cap): self._set_alias(alias, cap) def remove_alias(self, alias): self._set_alias(alias) def _read_servers_yaml(self): try: with open(self.servers_yaml_path) as f: return yaml.safe_load(f) except OSError: return {} def get_storage_servers(self): yaml_data = self._read_servers_yaml() if not yaml_data: return {} storage = yaml_data.get('storage') if not storage or not isinstance(storage, dict): return {} results = {} for server, server_data in storage.items(): ann = server_data.get('ann') if not ann: continue results[server] = { 'anonymous-storage-FURL': ann.get('anonymous-storage-FURL') } nickname = ann.get('nickname') if nickname: results[server]['nickname'] = nickname return results def add_storage_server(self, server_id, furl, nickname=None): log.debug("Adding storage server: %s...", server_id) yaml_data = self._read_servers_yaml() if not yaml_data or not yaml_data.get('storage'): yaml_data['storage'] = {} yaml_data['storage'][server_id] = { 'ann': { 'anonymous-storage-FURL': furl } } if nickname: yaml_data['storage'][server_id]['ann']['nickname'] = nickname with open(self.servers_yaml_path + '.tmp', 'w') as f: f.write(yaml.safe_dump(yaml_data, default_flow_style=False)) shutil.move(self.servers_yaml_path + '.tmp', self.servers_yaml_path) log.debug("Added storage server: %s", server_id) def add_storage_servers(self, storage_servers): for server_id, data in storage_servers.items(): nickname = data.get('nickname') furl = data.get('anonymous-storage-FURL') if furl: self.add_storage_server(server_id, furl, nickname) else: log.warning("No storage fURL provided for %s!", server_id) def load_magic_folders(self): data = {} yaml_path = os.path.join(self.nodedir, 'private', 'magic_folders.yaml') try: with open(yaml_path) as f: data = yaml.safe_load(f) except OSError: pass folders_data = data.get('magic-folders') if folders_data: for key, value in folders_data.items(): # to preserve defaultdict self.magic_folders[key] = value for folder in self.magic_folders: admin_dircap = self.get_admin_dircap(folder) if admin_dircap: self.magic_folders[folder]['admin_dircap'] = admin_dircap return self.magic_folders def line_received(self, line): # TODO: Connect to Core via Qt signals/slots? log.debug("[%s] >>> %s", self.name, line) def _win32_popen(self, args, env, callback_trigger=None): # This is a workaround to prevent Command Prompt windows from opening # when spawning tahoe processes from the GUI on Windows, as Twisted's # reactor.spawnProcess() API does not allow Windows creation flags to # be passed to subprocesses. By passing 0x08000000 (CREATE_NO_WINDOW), # the opening of the Command Prompt window will be surpressed while # still allowing access to stdout/stderr. See: # https://twistedmatrix.com/pipermail/twisted-python/2007-February/014733.html import subprocess proc = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, creationflags=0x08000000) output = BytesIO() for line in iter(proc.stdout.readline, ''): output.write(line.encode('utf-8')) self.line_received(line.rstrip()) if callback_trigger and callback_trigger in line.rstrip(): return proc.pid proc.poll() if proc.returncode: raise TahoeCommandError(str(output.getvalue()).strip()) else: return str(output.getvalue()).strip() @inlineCallbacks def command(self, args, callback_trigger=None): exe = (self.executable if self.executable else which('tahoe')[0]) args = [exe] + ['-d', self.nodedir] + args env = os.environ env['PYTHONUNBUFFERED'] = '1' log.debug("Executing: %s", ' '.join(args)) if sys.platform == 'win32' and getattr(sys, 'frozen', False): from twisted.internet.threads import deferToThread output = yield deferToThread(self._win32_popen, args, env, callback_trigger) else: protocol = CommandProtocol(self, callback_trigger) reactor.spawnProcess(protocol, exe, args=args, env=env) output = yield protocol.done return output @inlineCallbacks def get_features(self): try: yield self.command(['magic-folder', 'list']) except TahoeCommandError as err: if str(err).strip().endswith('Unknown command: list'): # Has magic-folder support but no multi-magic-folder support return self.executable, True, False # Has no magic-folder support ('Unknown command: magic-folder') # or something else went wrong; consider executable unsupported return self.executable, False, False #if output: # Has magic-folder support and multi-magic-folder support return self.executable, True, True @inlineCallbacks def create_client(self, **kwargs): if os.path.exists(self.nodedir): raise FileExistsError("Nodedir already exists: {}".format( self.nodedir)) args = ['create-client', '--webport=tcp:0:interface=127.0.0.1'] for key, value in kwargs.items(): if key in ('nickname', 'introducer', 'shares-needed', 'shares-happy', 'shares-total'): args.extend(['--{}'.format(key), str(value)]) elif key in ['needed', 'happy', 'total']: args.extend(['--shares-{}'.format(key), str(value)]) elif key == 'hide-ip': args.append('--hide-ip') yield self.command(args) storage_servers = kwargs.get('storage') if storage_servers and isinstance(storage_servers, dict): self.add_storage_servers(storage_servers) def kill(self): try: with open(self.pidfile, 'r') as f: pid = int(f.read()) except (EnvironmentError, ValueError) as err: log.warning("Error loading pid from pidfile: %s", str(err)) return log.debug("Trying to kill PID %d...", pid) try: os.kill(pid, signal.SIGTERM) except OSError as err: if err.errno not in (errno.ESRCH, errno.EINVAL): log.error(err) @inlineCallbacks def stop(self): if not os.path.isfile(self.pidfile): log.error('No "twistd.pid" file found in %s', self.nodedir) return if sys.platform == 'win32': self.kill() else: try: yield self.command(['stop']) except TahoeCommandError: # Process already dead/not running pass try: os.remove(self.pidfile) except EnvironmentError: pass @inlineCallbacks def upgrade_legacy_config(self): log.debug("Upgrading legacy configuration layout..") nodedirs = get_nodedirs(self.magic_folders_dir) if not nodedirs: log.warning("No nodedirs found; returning.") return magic_folders = {} for nodedir in nodedirs: basename = os.path.basename(nodedir) log.debug("Migrating configuration for '%s'...", basename) tahoe = Tahoe(nodedir) directory = tahoe.config_get('magic_folder', 'local.directory') poll_interval = tahoe.config_get('magic_folder', 'poll_interval') collective_dircap = self.read_cap_from_file( os.path.join(nodedir, 'private', 'collective_dircap')) magic_folder_dircap = self.read_cap_from_file( os.path.join(nodedir, 'private', 'magic_folder_dircap')) magic_folders[basename] = { 'collective_dircap': collective_dircap, 'directory': directory, 'poll_interval': poll_interval, 'upload_dircap': magic_folder_dircap } db_src = os.path.join(nodedir, 'private', 'magicfolderdb.sqlite') db_fname = ''.join(['magicfolder_', basename, '.sqlite']) db_dest = os.path.join(self.nodedir, 'private', db_fname) log.debug("Copying %s to %s...", db_src, db_dest) shutil.copyfile(db_src, db_dest) collective_dircap_rw = tahoe.get_alias('magic') if collective_dircap_rw: alias = hashlib.sha256(basename.encode()).hexdigest() + ':' yield self.command(['add-alias', alias, collective_dircap_rw]) yaml_path = os.path.join(self.nodedir, 'private', 'magic_folders.yaml') log.debug("Writing magic-folder configs to %s...", yaml_path) with open(yaml_path, 'w') as f: f.write(yaml.safe_dump({'magic-folders': magic_folders})) log.debug("Backing up legacy configuration...") shutil.move(self.magic_folders_dir, self.magic_folders_dir + '.backup') log.debug("Enabling magic-folder for %s...", self.nodedir) self.config_set('magic_folder', 'enabled', 'True') log.debug("Finished upgrading legacy configuration") @inlineCallbacks def start(self): if not self._monitor_started: self.monitor.start() self._monitor_started = True tcp = self.config_get('connections', 'tcp') if tcp and tcp.lower() == 'tor': self.use_tor = True if os.path.isfile(self.pidfile): yield self.stop() if self.multi_folder_support and os.path.isdir(self.magic_folders_dir): yield self.upgrade_legacy_config() pid = yield self.command(['run'], 'client running') pid = str(pid) if sys.platform == 'win32' and pid.isdigit(): with open(self.pidfile, 'w') as f: f.write(pid) with open(os.path.join(self.nodedir, 'node.url')) as f: self.nodeurl = f.read().strip() token_file = os.path.join(self.nodedir, 'private', 'api_auth_token') with open(token_file) as f: self.api_token = f.read().strip() self.shares_happy = int(self.config_get('client', 'shares.happy')) self.load_magic_folders() @inlineCallbacks def restart(self): log.debug("Restarting %s client...", self.name) # Temporarily disable desktop notifications for (dis)connect events pref = get_preference('notifications', 'connection') set_preference('notifications', 'connection', 'false') yield self.stop() yield self.start() yield self.await_ready() yield deferLater(reactor, 1, lambda: None) set_preference('notifications', 'connection', pref) log.debug("Finished restarting %s client.", self.name) @inlineCallbacks def get_grid_status(self): if not self.nodeurl: return None try: resp = yield treq.get(self.nodeurl + '?t=json') except ConnectError: return None if resp.code == 200: content = yield treq.content(resp) content = json.loads(content.decode('utf-8')) servers_connected = 0 servers_known = 0 available_space = 0 if 'servers' in content: servers = content['servers'] servers_known = len(servers) for server in servers: if server['connection_status'].startswith('Connected'): servers_connected += 1 if server['available_space']: available_space += server['available_space'] return servers_connected, servers_known, available_space return None @inlineCallbacks def get_connected_servers(self): if not self.nodeurl: return None try: resp = yield treq.get(self.nodeurl) except ConnectError: return None if resp.code == 200: html = yield treq.content(resp) match = re.search('Connected to <span>(.+?)</span>', html.decode('utf-8')) if match: return int(match.group(1)) return None @inlineCallbacks def is_ready(self): if not self.shares_happy: return False connected_servers = yield self.get_connected_servers() return bool(connected_servers and connected_servers >= self.shares_happy) @inlineCallbacks def await_ready(self): # TODO: Replace with "readiness" API? # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2844 ready = yield self.is_ready() while not ready: yield deferLater(reactor, 0.2, lambda: None) ready = yield self.is_ready() @inlineCallbacks def mkdir(self, parentcap=None, childname=None): url = self.nodeurl + 'uri' params = {'t': 'mkdir'} if parentcap and childname: url += '/' + parentcap params['name'] = childname resp = yield treq.post(url, params=params) if resp.code == 200: content = yield treq.content(resp) return content.decode('utf-8').strip() raise TahoeWebError("Error creating Tahoe-LAFS directory: {}".format( resp.code)) @inlineCallbacks def create_rootcap(self): log.debug("Creating rootcap...") if os.path.exists(self.rootcap_path): raise OSError("Rootcap file already exists: {}".format( self.rootcap_path)) self.rootcap = yield self.mkdir() with open(self.rootcap_path, 'w') as f: f.write(self.rootcap) log.debug("Rootcap saved to file: %s", self.rootcap_path) return self.rootcap @inlineCallbacks def upload(self, local_path): log.debug("Uploading %s...", local_path) with open(local_path, 'rb') as f: resp = yield treq.put('{}uri'.format(self.nodeurl), f) if resp.code == 200: content = yield treq.content(resp) log.debug("Successfully uploaded %s", local_path) return content.decode('utf-8') content = yield treq.content(resp) raise TahoeWebError(content.decode('utf-8')) @inlineCallbacks def download(self, cap, local_path): log.debug("Downloading %s...", local_path) resp = yield treq.get('{}uri/{}'.format(self.nodeurl, cap)) if resp.code == 200: with open(local_path, 'wb') as f: yield treq.collect(resp, f.write) log.debug("Successfully downloaded %s", local_path) else: content = yield treq.content(resp) raise TahoeWebError(content.decode('utf-8')) @inlineCallbacks def link(self, dircap, childname, childcap): lock = yield self.lock.acquire() try: resp = yield treq.post('{}uri/{}/?t=uri&name={}&uri={}'.format( self.nodeurl, dircap, childname, childcap)) finally: yield lock.release() if resp.code != 200: content = yield treq.content(resp) raise TahoeWebError(content.decode('utf-8')) @inlineCallbacks def unlink(self, dircap, childname): lock = yield self.lock.acquire() try: resp = yield treq.post('{}uri/{}/?t=unlink&name={}'.format( self.nodeurl, dircap, childname)) finally: yield lock.release() if resp.code != 200: content = yield treq.content(resp) raise TahoeWebError(content.decode('utf-8')) @inlineCallbacks def link_magic_folder_to_rootcap(self, name): log.debug("Linking folder '%s' to rootcap...", name) rootcap = self.get_rootcap() admin_dircap = self.get_admin_dircap(name) if admin_dircap: yield self.link(rootcap, name + ' (admin)', admin_dircap) collective_dircap = self.get_collective_dircap(name) yield self.link(rootcap, name + ' (collective)', collective_dircap) personal_dircap = self.get_magic_folder_dircap(name) yield self.link(rootcap, name + ' (personal)', personal_dircap) log.debug("Successfully linked folder '%s' to rootcap", name) @inlineCallbacks def unlink_magic_folder_from_rootcap(self, name): log.debug("Unlinking folder '%s' from rootcap...", name) rootcap = self.get_rootcap() yield self.unlink(rootcap, name + ' (collective)') yield self.unlink(rootcap, name + ' (personal)') if 'admin_dircap' in self.remote_magic_folders[name]: yield self.unlink(rootcap, name + ' (admin)') del self.remote_magic_folders[name] log.debug("Successfully unlinked folder '%s' from rootcap", name) @inlineCallbacks def create_magic_folder(self, path, join_code=None, admin_dircap=None, poll_interval=60): # XXX See Issue #55 path = os.path.realpath(os.path.expanduser(path)) poll_interval = str(poll_interval) try: os.makedirs(path) except OSError: pass name = os.path.basename(path) alias = hashlib.sha256(name.encode()).hexdigest() + ':' if join_code: yield self.command([ 'magic-folder', 'join', '-p', poll_interval, '-n', name, join_code, path ]) if admin_dircap: self.add_alias(alias, admin_dircap) else: yield self.await_ready() yield self.command([ 'magic-folder', 'create', '-p', poll_interval, '-n', name, alias, 'admin', path ]) self.load_magic_folders() yield self.link_magic_folder_to_rootcap(name) def local_magic_folder_exists(self, folder_name): if folder_name in self.magic_folders: return True return False def remote_magic_folder_exists(self, folder_name): if folder_name in self.remote_magic_folders: return True return False def magic_folder_exists(self, folder_name): if self.local_magic_folder_exists(folder_name): return True if self.remote_magic_folder_exists(folder_name): return True return False @inlineCallbacks def magic_folder_invite(self, name, nickname): yield self.await_ready() admin_dircap = self.get_admin_dircap(name) if not admin_dircap: raise TahoeError( 'No admin dircap found for folder "{}"'.format(name)) created = yield self.mkdir(admin_dircap, nickname) code = '{}+{}'.format(self.get_collective_dircap(name), created) return code @inlineCallbacks def magic_folder_uninvite(self, name, nickname): log.debug('Uninviting "%s" from "%s"...', nickname, name) alias = hashlib.sha256(name.encode()).hexdigest() yield self.unlink(self.get_alias(alias), nickname) log.debug('Uninvited "%s" from "%s"...', nickname, name) @inlineCallbacks def remove_magic_folder(self, name): if name in self.magic_folders: del self.magic_folders[name] yield self.command(['magic-folder', 'leave', '-n', name]) self.remove_alias(hashlib.sha256(name.encode()).hexdigest()) @inlineCallbacks def get_magic_folder_status(self, name): if not self.nodeurl or not self.api_token: return None try: resp = yield treq.post(self.nodeurl + 'magic_folder', { 'token': self.api_token, 'name': name, 't': 'json' }) except ConnectError: return None if resp.code == 200: content = yield treq.content(resp) return json.loads(content.decode('utf-8')) return None @inlineCallbacks def get_json(self, cap): if not cap or not self.nodeurl: return None uri = '{}uri/{}/?t=json'.format(self.nodeurl, cap) try: resp = yield treq.get(uri) except ConnectError: return None if resp.code == 200: content = yield treq.content(resp) return json.loads(content.decode('utf-8')) return None @staticmethod def read_cap_from_file(filepath): try: with open(filepath) as f: cap = f.read().strip() except OSError: return None return cap def get_rootcap(self): if not self.rootcap: self.rootcap = self.read_cap_from_file(self.rootcap_path) return self.rootcap def get_admin_dircap(self, name): if name in self.magic_folders: try: return self.magic_folders[name]['admin_dircap'] except KeyError: pass cap = self.get_alias(hashlib.sha256(name.encode()).hexdigest()) self.magic_folders[name]['admin_dircap'] = cap return cap def _get_magic_folder_setting(self, folder_name, setting_name): if folder_name not in self.magic_folders: self.load_magic_folders() if folder_name in self.magic_folders: try: return self.magic_folders[folder_name][setting_name] except KeyError: return None return None def get_collective_dircap(self, name): return self._get_magic_folder_setting(name, 'collective_dircap') def get_magic_folder_dircap(self, name): return self._get_magic_folder_setting(name, 'upload_dircap') def get_magic_folder_directory(self, name): return self._get_magic_folder_setting(name, 'directory') @inlineCallbacks def get_magic_folders_from_rootcap(self, content=None): if not content: content = yield self.get_json(self.get_rootcap()) if content: folders = defaultdict(dict) for name, data in content[1]['children'].items(): data_dict = data[1] if name.endswith(' (collective)'): prefix = name.split(' (collective)')[0] folders[prefix]['collective_dircap'] = data_dict['ro_uri'] elif name.endswith(' (personal)'): prefix = name.split(' (personal)')[0] folders[prefix]['upload_dircap'] = data_dict['rw_uri'] elif name.endswith(' (admin)'): prefix = name.split(' (admin)')[0] folders[prefix]['admin_dircap'] = data_dict['rw_uri'] self.remote_magic_folders = folders return folders return None @inlineCallbacks def ensure_folder_links(self, _): yield self.await_ready() if not self.get_rootcap(): yield self.create_rootcap() if self.magic_folders: remote_folders = yield self.get_magic_folders_from_rootcap() for folder in self.magic_folders: if folder not in remote_folders: self.link_magic_folder_to_rootcap(folder) else: log.debug( 'Folder "%s" already linked to rootcap; ' 'skipping.', folder) @inlineCallbacks def get_magic_folder_members(self, name, content=None): if not content: content = yield self.get_json(self.get_collective_dircap(name)) if content: members = [] children = content[1]['children'] magic_folder_dircap = self.get_magic_folder_dircap(name) for member in children: readcap = children[member][1]['ro_uri'] if magic_folder_dircap: my_fingerprint = magic_folder_dircap.split(':')[-1] fingerprint = readcap.split(':')[-1] if fingerprint == my_fingerprint: self.magic_folders[name]['member'] = member members.insert(0, (member, readcap)) else: members.append((member, readcap)) else: members.append((member, readcap)) return members return None @staticmethod def _extract_metadata(metadata): try: deleted = metadata['metadata']['deleted'] except KeyError: deleted = False if deleted: cap = metadata['metadata']['last_downloaded_uri'] else: cap = metadata['ro_uri'] return { 'size': int(metadata['size']), 'mtime': float(metadata['metadata']['tahoe']['linkmotime']), 'deleted': deleted, 'cap': cap } @inlineCallbacks def get_magic_folder_state(self, name, members=None): total_size = 0 history_dict = {} if not members: members = yield self.get_magic_folder_members(name) if members: for member, dircap in members: json_data = yield self.get_json(dircap) try: children = json_data[1]['children'] except (TypeError, KeyError): continue for filenode, data in children.items(): if filenode.endswith('@_'): # Ignore subdirectories, due to Tahoe-LAFS bug #2924 # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2924 continue try: metadata = self._extract_metadata(data[1]) except KeyError: continue metadata['path'] = filenode.replace('@_', os.path.sep) metadata['member'] = member history_dict[metadata['mtime']] = metadata total_size += metadata['size'] history_od = OrderedDict(sorted(history_dict.items())) latest_mtime = next(reversed(history_od), 0) return members, total_size, latest_mtime, history_od
class Tahoe: STOPPED = 0 STARTING = 1 STARTED = 2 STOPPING = 3 def __init__(self, nodedir=None, executable=None, reactor=None): if reactor is None: from twisted.internet import reactor self.executable = executable self.multi_folder_support = True if nodedir: self.nodedir = os.path.expanduser(nodedir) else: self.nodedir = os.path.join(os.path.expanduser("~"), ".tahoe") self.rootcap_path = os.path.join(self.nodedir, "private", "rootcap") self.servers_yaml_path = os.path.join(self.nodedir, "private", "servers.yaml") self.config = Config(os.path.join(self.nodedir, "tahoe.cfg")) self.pidfile = os.path.join(self.nodedir, "twistd.pid") self.nodeurl = None self.shares_happy = 0 self.name = os.path.basename(self.nodedir) self.api_token = None self.magic_folders_dir = os.path.join(self.nodedir, "magic-folders") self.lock = DeferredLock() self.rootcap = None self.magic_folders = defaultdict(dict) self.remote_magic_folders = defaultdict(dict) self.use_tor = False self.monitor = Monitor(self) streamedlogs_maxlen = None debug_settings = global_settings.get("debug") if debug_settings: log_maxlen = debug_settings.get("log_maxlen") if log_maxlen is not None: streamedlogs_maxlen = int(log_maxlen) self.streamedlogs = StreamedLogs(reactor, streamedlogs_maxlen) self.state = Tahoe.STOPPED self.newscap = "" self.newscap_checker = NewscapChecker(self) @staticmethod def read_cap_from_file(filepath): try: with open(filepath) as f: cap = f.read().strip() except OSError: return None return cap def load_newscap(self): news_settings = global_settings.get("news:{}".format(self.name)) if news_settings: newscap = news_settings.get("newscap") if newscap: self.newscap = newscap return newscap = self.read_cap_from_file( os.path.join(self.nodedir, "private", "newscap")) if newscap: self.newscap = newscap def config_set(self, section, option, value): self.config.set(section, option, value) def config_get(self, section, option): return self.config.get(section, option) def get_settings(self, include_rootcap=False): settings = { "nickname": self.name, "shares-needed": self.config_get("client", "shares.needed"), "shares-happy": self.config_get("client", "shares.happy"), "shares-total": self.config_get("client", "shares.total"), } introducer = self.config_get("client", "introducer.furl") if introducer: settings["introducer"] = introducer storage_servers = self.get_storage_servers() if storage_servers: settings["storage"] = storage_servers icon_path = os.path.join(self.nodedir, "icon") icon_url_path = icon_path + ".url" if os.path.exists(icon_url_path): with open(icon_url_path) as f: settings["icon_url"] = f.read().strip() self.load_newscap() if self.newscap: settings["newscap"] = self.newscap if include_rootcap and os.path.exists(self.rootcap_path): settings["rootcap"] = self.read_cap_from_file(self.rootcap_path) # TODO: Verify integrity? Support 'icon_base64'? return settings def export(self, dest, include_rootcap=False): log.debug("Exporting settings to '%s'...", dest) settings = self.get_settings(include_rootcap) if self.use_tor: settings["hide-ip"] = True with atomic_write(dest, mode="w", overwrite=True) as f: f.write(json.dumps(settings)) log.debug("Exported settings to '%s'", dest) def get_aliases(self): aliases = {} aliases_file = os.path.join(self.nodedir, "private", "aliases") try: with open(aliases_file) as f: for line in f.readlines(): if not line.startswith("#"): try: name, cap = line.split(":", 1) aliases[name + ":"] = cap.strip() except ValueError: pass return aliases except IOError: return aliases def get_alias(self, alias): if not alias.endswith(":"): alias = alias + ":" try: for name, cap in self.get_aliases().items(): if name == alias: return cap return None except AttributeError: return None def _set_alias(self, alias, cap=None): if not alias.endswith(":"): alias = alias + ":" aliases = self.get_aliases() if cap: aliases[alias] = cap else: try: del aliases[alias] except (KeyError, TypeError): return tmp_aliases_file = os.path.join(self.nodedir, "private", "aliases.tmp") with atomic_write(tmp_aliases_file, mode="w", overwrite=True) as f: data = "" for name, dircap in aliases.items(): data += "{} {}\n".format(name, dircap) f.write(data) aliases_file = os.path.join(self.nodedir, "private", "aliases") shutil.move(tmp_aliases_file, aliases_file) def add_alias(self, alias, cap): self._set_alias(alias, cap) def remove_alias(self, alias): self._set_alias(alias) def _read_servers_yaml(self): try: with open(self.servers_yaml_path) as f: return yaml.safe_load(f) except OSError: return {} def get_storage_servers(self): yaml_data = self._read_servers_yaml() if not yaml_data: return {} storage = yaml_data.get("storage") if not storage or not isinstance(storage, dict): return {} results = {} for server, server_data in storage.items(): ann = server_data.get("ann") if not ann: continue results[server] = { "anonymous-storage-FURL": ann.get("anonymous-storage-FURL") } nickname = ann.get("nickname") if nickname: results[server]["nickname"] = nickname return results def add_storage_server(self, server_id, furl, nickname=None): log.debug("Adding storage server: %s...", server_id) yaml_data = self._read_servers_yaml() if not yaml_data or not yaml_data.get("storage"): yaml_data["storage"] = {} yaml_data["storage"][server_id] = { "ann": { "anonymous-storage-FURL": furl } } if nickname: yaml_data["storage"][server_id]["ann"]["nickname"] = nickname with atomic_write(self.servers_yaml_path, mode="w", overwrite=True) as f: f.write(yaml.safe_dump(yaml_data, default_flow_style=False)) log.debug("Added storage server: %s", server_id) def add_storage_servers(self, storage_servers): for server_id, data in storage_servers.items(): nickname = data.get("nickname") furl = data.get("anonymous-storage-FURL") if furl: self.add_storage_server(server_id, furl, nickname) else: log.warning("No storage fURL provided for %s!", server_id) def load_magic_folders(self): data = {} yaml_path = os.path.join(self.nodedir, "private", "magic_folders.yaml") try: with open(yaml_path) as f: data = yaml.safe_load(f) except OSError: pass folders_data = data.get("magic-folders") if folders_data: for key, value in folders_data.items(): # to preserve defaultdict self.magic_folders[key] = value for folder in self.magic_folders: admin_dircap = self.get_admin_dircap(folder) if admin_dircap: self.magic_folders[folder]["admin_dircap"] = admin_dircap return self.magic_folders def line_received(self, line): # TODO: Connect to Core via Qt signals/slots? log.debug("[%s] >>> %s", self.name, line) @inlineCallbacks def command(self, args, callback_trigger=None): from twisted.internet import reactor # Some args may contain sensitive information. Don't show them in logs. if args[0] == "magic-folder": first_args = args[0:2] else: first_args = args[0:1] exe = self.executable if self.executable else which("tahoe")[0] args = [exe] + ["-d", self.nodedir] + args logged_args = [exe] + ["-d", self.nodedir] + first_args env = os.environ env["PYTHONUNBUFFERED"] = "1" log.debug("Executing: %s...", " ".join(logged_args)) protocol = CommandProtocol(self, callback_trigger) reactor.spawnProcess(protocol, exe, args=args, env=env) output = yield protocol.done return output @inlineCallbacks def get_features(self): try: yield self.command(["magic-folder", "list"]) except TahoeCommandError as err: if str(err).strip().endswith("Unknown command: list"): # Has magic-folder support but no multi-magic-folder support return self.executable, True, False # Has no magic-folder support ('Unknown command: magic-folder') # or something else went wrong; consider executable unsupported return self.executable, False, False # if output: # Has magic-folder support and multi-magic-folder support return self.executable, True, True @inlineCallbacks def create_client(self, **kwargs): if os.path.exists(self.nodedir): raise FileExistsError("Nodedir already exists: {}".format( self.nodedir)) args = ["create-client", "--webport=tcp:0:interface=127.0.0.1"] for key, value in kwargs.items(): if key in ( "nickname", "introducer", "shares-needed", "shares-happy", "shares-total", ): args.extend(["--{}".format(key), str(value)]) elif key in ["needed", "happy", "total"]: args.extend(["--shares-{}".format(key), str(value)]) elif key == "hide-ip": args.append("--hide-ip") yield self.command(args) storage_servers = kwargs.get("storage") if storage_servers and isinstance(storage_servers, dict): self.add_storage_servers(storage_servers) def _win32_cleanup(self): # XXX A dirty hack to try to remove any stale magic-folder # sqlite databases that could not be removed earlier due to # being in-use by another process (i.e., Tahoe-LAFS). # See https://github.com/gridsync/gridsync/issues/294 and # https://github.com/LeastAuthority/magic-folder/issues/131 if not self.magic_folders: self.load_magic_folders() # XXX for p in Path(self.nodedir, "private").glob("magicfolder_*.sqlite"): folder_name = p.stem[12:] # len("magicfolder_") -> 12 if folder_name not in self.magic_folders: fullpath = p.resolve() log.debug("Trying to remove stale database %s...", fullpath) try: p.unlink() except OSError as err: log.warning("Error removing %s: %s", fullpath, str(err)) continue log.debug("Successfully removed %s", fullpath) def kill(self): try: with open(self.pidfile, "r") as f: pid = int(f.read()) except (EnvironmentError, ValueError) as err: log.warning("Error loading pid from pidfile: %s", str(err)) return log.debug("Trying to kill PID %d...", pid) try: os.kill(pid, signal.SIGTERM) except OSError as err: if err.errno not in (errno.ESRCH, errno.EINVAL): log.error(err) if sys.platform == "win32": self._win32_cleanup() @inlineCallbacks def stop(self): log.debug('Stopping "%s" tahoe client...', self.name) if not os.path.isfile(self.pidfile): log.error('No "twistd.pid" file found in %s', self.nodedir) return self.state = Tahoe.STOPPING self.streamedlogs.stop() if self.lock.locked: log.warning("Delaying stop operation; " "another operation is trying to modify the rootcap...") yield self.lock.acquire() yield self.lock.release() log.debug("Lock released; resuming stop operation...") if sys.platform == "win32": self.kill() else: try: yield self.command(["stop"]) except TahoeCommandError: # Process already dead/not running pass try: os.remove(self.pidfile) except EnvironmentError: pass self.state = Tahoe.STOPPED log.debug('Finished stopping "%s" tahoe client', self.name) @inlineCallbacks def upgrade_legacy_config(self): log.debug("Upgrading legacy configuration layout..") nodedirs = get_nodedirs(self.magic_folders_dir) if not nodedirs: log.warning("No nodedirs found; returning.") return magic_folders = {} for nodedir in nodedirs: basename = os.path.basename(nodedir) log.debug("Migrating configuration for '%s'...", basename) tahoe = Tahoe(nodedir) directory = tahoe.config_get("magic_folder", "local.directory") poll_interval = tahoe.config_get("magic_folder", "poll_interval") collective_dircap = self.read_cap_from_file( os.path.join(nodedir, "private", "collective_dircap")) magic_folder_dircap = self.read_cap_from_file( os.path.join(nodedir, "private", "magic_folder_dircap")) magic_folders[basename] = { "collective_dircap": collective_dircap, "directory": directory, "poll_interval": poll_interval, "upload_dircap": magic_folder_dircap, } db_src = os.path.join(nodedir, "private", "magicfolderdb.sqlite") db_fname = "".join(["magicfolder_", basename, ".sqlite"]) db_dest = os.path.join(self.nodedir, "private", db_fname) log.debug("Copying %s to %s...", db_src, db_dest) shutil.copyfile(db_src, db_dest) collective_dircap_rw = tahoe.get_alias("magic") if collective_dircap_rw: alias = hashlib.sha256(basename.encode()).hexdigest() + ":" yield self.command(["add-alias", alias, collective_dircap_rw]) yaml_path = os.path.join(self.nodedir, "private", "magic_folders.yaml") log.debug("Writing magic-folder configs to %s...", yaml_path) with atomic_write(yaml_path, mode="w", overwrite=True) as f: f.write(yaml.safe_dump({"magic-folders": magic_folders})) log.debug("Backing up legacy configuration...") shutil.move(self.magic_folders_dir, self.magic_folders_dir + ".backup") log.debug("Enabling magic-folder for %s...", self.nodedir) self.config_set("magic_folder", "enabled", "True") log.debug("Finished upgrading legacy configuration") def get_streamed_log_messages(self): """ Return a ``deque`` containing all buffered log messages. :return: A ``deque`` where each element is a UTF-8 & JSON encoded ``bytes`` object giving a single log event with older events appearing first. """ return self.streamedlogs.get_streamed_log_messages() def get_log(self, apply_filter=False, identifier=None): messages = [] if apply_filter: for line in self.streamedlogs.get_streamed_log_messages(): messages.append(filter_tahoe_log_message(line, identifier)) else: for line in self.streamedlogs.get_streamed_log_messages(): messages.append(json.dumps(json.loads(line), sort_keys=True)) return "\n".join(messages) @inlineCallbacks def start(self): log.debug('Starting "%s" tahoe client...', self.name) self.state = Tahoe.STARTING self.monitor.start() tcp = self.config_get("connections", "tcp") if tcp and tcp.lower() == "tor": self.use_tor = True if os.path.isfile(self.pidfile): yield self.stop() if self.multi_folder_support and os.path.isdir(self.magic_folders_dir): yield self.upgrade_legacy_config() pid = yield self.command(["run"], "client running") pid = str(pid) if sys.platform == "win32" and pid.isdigit(): with atomic_write(self.pidfile, mode="w", overwrite=True) as f: f.write(pid) with open(os.path.join(self.nodedir, "node.url")) as f: self.set_nodeurl(f.read().strip()) token_file = os.path.join(self.nodedir, "private", "api_auth_token") with open(token_file) as f: self.api_token = f.read().strip() self.shares_happy = int(self.config_get("client", "shares.happy")) self.load_magic_folders() self.streamedlogs.start(self.nodeurl, self.api_token) self.load_newscap() self.newscap_checker.start() self.state = Tahoe.STARTED log.debug('Finished starting "%s" tahoe client (pid: %s)', self.name, pid) def set_nodeurl(self, nodeurl): """ Specify the location of the Tahoe-LAFS web API. :param str nodeurl: A text string giving the URI root of the web API. """ self.nodeurl = nodeurl @inlineCallbacks def restart(self): from twisted.internet import reactor log.debug("Restarting %s client...", self.name) if self.state in (Tahoe.STOPPING, Tahoe.STARTING): log.warning( "Aborting restart operation; " 'the "%s" client is already (re)starting', self.name, ) return # Temporarily disable desktop notifications for (dis)connect events pref = get_preference("notifications", "connection") set_preference("notifications", "connection", "false") yield self.stop() if sys.platform == "win32": yield deferLater(reactor, 0.1, lambda: None) self._win32_cleanup() yield self.start() yield self.await_ready() yield deferLater(reactor, 1, lambda: None) set_preference("notifications", "connection", pref) log.debug("Finished restarting %s client.", self.name) @inlineCallbacks def get_grid_status(self): if not self.nodeurl: return None try: resp = yield treq.get(self.nodeurl + "?t=json") except ConnectError: return None if resp.code == 200: content = yield treq.content(resp) content = json.loads(content.decode("utf-8")) servers_connected = 0 servers_known = 0 available_space = 0 if "servers" in content: servers = content["servers"] servers_known = len(servers) for server in servers: if server["connection_status"].startswith("Connected"): servers_connected += 1 if server["available_space"]: available_space += server["available_space"] return servers_connected, servers_known, available_space return None @inlineCallbacks def get_connected_servers(self): if not self.nodeurl: return None try: resp = yield treq.get(self.nodeurl) except ConnectError: return None if resp.code == 200: html = yield treq.content(resp) match = re.search("Connected to <span>(.+?)</span>", html.decode("utf-8")) if match: return int(match.group(1)) return None @inlineCallbacks def is_ready(self): if not self.shares_happy: return False connected_servers = yield self.get_connected_servers() return bool(connected_servers and connected_servers >= self.shares_happy) @inlineCallbacks def await_ready(self): # TODO: Replace with "readiness" API? # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2844 from twisted.internet import reactor ready = yield self.is_ready() if not ready: log.debug('Connecting to "%s"...', self.name) while not ready: yield deferLater(reactor, 0.2, lambda: None) ready = yield self.is_ready() if ready: log.debug('Connected to "%s"', self.name) @inlineCallbacks def mkdir(self, parentcap=None, childname=None): yield self.await_ready() url = self.nodeurl + "uri" params = {"t": "mkdir"} if parentcap and childname: url += "/" + parentcap params["name"] = childname resp = yield treq.post(url, params=params) if resp.code == 200: content = yield treq.content(resp) return content.decode("utf-8").strip() raise TahoeWebError("Error creating Tahoe-LAFS directory: {}".format( resp.code)) @inlineCallbacks def create_rootcap(self): log.debug("Creating rootcap...") if os.path.exists(self.rootcap_path): raise OSError("Rootcap file already exists: {}".format( self.rootcap_path)) self.rootcap = yield self.mkdir() with atomic_write(self.rootcap_path, mode="w") as f: f.write(self.rootcap) log.debug("Rootcap saved to file: %s", self.rootcap_path) return self.rootcap @inlineCallbacks def upload(self, local_path): log.debug("Uploading %s...", local_path) yield self.await_ready() with open(local_path, "rb") as f: resp = yield treq.put("{}uri".format(self.nodeurl), f) if resp.code == 200: content = yield treq.content(resp) log.debug("Successfully uploaded %s", local_path) return content.decode("utf-8") content = yield treq.content(resp) raise TahoeWebError(content.decode("utf-8")) @inlineCallbacks def download(self, cap, local_path): log.debug("Downloading %s...", local_path) yield self.await_ready() resp = yield treq.get("{}uri/{}".format(self.nodeurl, cap)) if resp.code == 200: with atomic_write(local_path, mode="wb", overwrite=True) as f: yield treq.collect(resp, f.write) log.debug("Successfully downloaded %s", local_path) else: content = yield treq.content(resp) raise TahoeWebError(content.decode("utf-8")) @inlineCallbacks def link(self, dircap, childname, childcap): dircap_hash = trunchash(dircap) childcap_hash = trunchash(childcap) log.debug( 'Linking "%s" (%s) into %s...', childname, childcap_hash, dircap_hash, ) yield self.await_ready() yield self.lock.acquire() try: resp = yield treq.post("{}uri/{}/?t=uri&name={}&uri={}".format( self.nodeurl, dircap, childname, childcap)) finally: yield self.lock.release() if resp.code != 200: content = yield treq.content(resp) raise TahoeWebError(content.decode("utf-8")) log.debug( 'Done linking "%s" (%s) into %s', childname, childcap_hash, dircap_hash, ) @inlineCallbacks def unlink(self, dircap, childname): dircap_hash = trunchash(dircap) log.debug('Unlinking "%s" from %s...', childname, dircap_hash) yield self.await_ready() yield self.lock.acquire() try: resp = yield treq.post("{}uri/{}/?t=unlink&name={}".format( self.nodeurl, dircap, childname)) finally: yield self.lock.release() if resp.code != 200: content = yield treq.content(resp) raise TahoeWebError(content.decode("utf-8")) log.debug('Done unlinking "%s" from %s', childname, dircap_hash) @inlineCallbacks def link_magic_folder_to_rootcap(self, name): log.debug("Linking folder '%s' to rootcap...", name) rootcap = self.get_rootcap() tasks = [] admin_dircap = self.get_admin_dircap(name) if admin_dircap: tasks.append(self.link(rootcap, name + " (admin)", admin_dircap)) collective_dircap = self.get_collective_dircap(name) tasks.append( self.link(rootcap, name + " (collective)", collective_dircap)) personal_dircap = self.get_magic_folder_dircap(name) tasks.append(self.link(rootcap, name + " (personal)", personal_dircap)) yield DeferredList(tasks) log.debug("Successfully linked folder '%s' to rootcap", name) @inlineCallbacks def unlink_magic_folder_from_rootcap(self, name): log.debug("Unlinking folder '%s' from rootcap...", name) rootcap = self.get_rootcap() tasks = [] tasks.append(self.unlink(rootcap, name + " (collective)")) tasks.append(self.unlink(rootcap, name + " (personal)")) if "admin_dircap" in self.remote_magic_folders[name]: tasks.append(self.unlink(rootcap, name + " (admin)")) del self.remote_magic_folders[name] yield DeferredList(tasks) log.debug("Successfully unlinked folder '%s' from rootcap", name) @inlineCallbacks def _create_magic_folder(self, path, alias, poll_interval=60): log.debug("Creating magic-folder for %s...", path) admin_dircap = yield self.mkdir() admin_dircap_json = yield self.get_json(admin_dircap) collective_dircap = admin_dircap_json[1]["ro_uri"] upload_dircap = yield self.mkdir() upload_dircap_json = yield self.get_json(upload_dircap) upload_dircap_ro = upload_dircap_json[1]["ro_uri"] yield self.link(admin_dircap, "admin", upload_dircap_ro) yaml_path = os.path.join(self.nodedir, "private", "magic_folders.yaml") try: with open(yaml_path) as f: yaml_data = yaml.safe_load(f) except OSError: yaml_data = {} folders_data = yaml_data.get("magic-folders", {}) folders_data[os.path.basename(path)] = { "directory": path, "collective_dircap": collective_dircap, "upload_dircap": upload_dircap, "poll_interval": poll_interval, } with atomic_write(yaml_path, mode="w", overwrite=True) as f: f.write(yaml.safe_dump({"magic-folders": folders_data})) self.add_alias(alias, admin_dircap) @inlineCallbacks def create_magic_folder(self, path, join_code=None, admin_dircap=None, poll_interval=60): # XXX See Issue #55 from twisted.internet import reactor path = os.path.realpath(os.path.expanduser(path)) poll_interval = str(poll_interval) try: os.makedirs(path) except OSError: pass name = os.path.basename(path) alias = hashlib.sha256(name.encode()).hexdigest() + ":" if join_code: yield self.command([ "magic-folder", "join", "-p", poll_interval, "-n", name, join_code, path, ]) if admin_dircap: self.add_alias(alias, admin_dircap) else: yield self.await_ready() # yield self.command(['magic-folder', 'create', '-p', poll_interval, # '-n', name, alias, 'admin', path]) try: yield self._create_magic_folder(path, alias, poll_interval) except Exception as e: # pylint: disable=broad-except log.debug( 'Magic-folder creation failed: "%s: %s"; retrying...', type(e).__name__, str(e), ) yield deferLater(reactor, 3, lambda: None) # XXX yield self.await_ready() yield self._create_magic_folder(path, alias, poll_interval) if not self.config_get("magic_folder", "enabled"): self.config_set("magic_folder", "enabled", "True") self.load_magic_folders() yield self.link_magic_folder_to_rootcap(name) @inlineCallbacks def restore_magic_folder(self, folder_name, dest): data = self.remote_magic_folders[folder_name] admin_dircap = data.get("admin_dircap") collective_dircap = data.get("collective_dircap") upload_dircap = data.get("upload_dircap") if not collective_dircap or not upload_dircap: raise TahoeError( 'The capabilities needed to restore the folder "{}" could ' "not be found. This probably means that the folder was " "never completely uploaded to begin with -- or worse, " "that your rootcap was corrupted somehow after the fact.\n" "\nYou will need to remove this folder and upload it " "again.".format(folder_name)) yield self.create_magic_folder( os.path.join(dest, folder_name), "{}+{}".format(collective_dircap, upload_dircap), admin_dircap, ) def local_magic_folder_exists(self, folder_name): if folder_name in self.magic_folders: return True return False def remote_magic_folder_exists(self, folder_name): if folder_name in self.remote_magic_folders: return True return False def magic_folder_exists(self, folder_name): if self.local_magic_folder_exists(folder_name): return True if self.remote_magic_folder_exists(folder_name): return True return False @inlineCallbacks def magic_folder_invite(self, name, nickname): yield self.await_ready() admin_dircap = self.get_admin_dircap(name) if not admin_dircap: raise TahoeError( 'No admin dircap found for folder "{}"; you do not have the ' "authority to create invites for this folder.".format(name)) created = yield self.mkdir(admin_dircap, nickname) code = "{}+{}".format(self.get_collective_dircap(name), created) return code @inlineCallbacks def magic_folder_uninvite(self, name, nickname): log.debug('Uninviting "%s" from "%s"...', nickname, name) alias = hashlib.sha256(name.encode()).hexdigest() yield self.unlink(self.get_alias(alias), nickname) log.debug('Uninvited "%s" from "%s"...', nickname, name) @inlineCallbacks def remove_magic_folder(self, name): if name in self.magic_folders: del self.magic_folders[name] yield self.command(["magic-folder", "leave", "-n", name]) self.remove_alias(hashlib.sha256(name.encode()).hexdigest()) @inlineCallbacks def get_magic_folder_status(self, name): if not self.nodeurl or not self.api_token: return None try: resp = yield treq.post( self.nodeurl + "magic_folder", { "token": self.api_token, "name": name, "t": "json" }, ) except ConnectError: return None if resp.code == 200: content = yield treq.content(resp) return json.loads(content.decode("utf-8")) return None @inlineCallbacks def get_json(self, cap): if not cap or not self.nodeurl: return None uri = "{}uri/{}/?t=json".format(self.nodeurl, cap) try: resp = yield treq.get(uri) except ConnectError: return None if resp.code == 200: content = yield treq.content(resp) return json.loads(content.decode("utf-8")) return None def get_rootcap(self): if not self.rootcap: self.rootcap = self.read_cap_from_file(self.rootcap_path) return self.rootcap def get_admin_dircap(self, name): if name in self.magic_folders: try: return self.magic_folders[name]["admin_dircap"] except KeyError: pass cap = self.get_alias(hashlib.sha256(name.encode()).hexdigest()) self.magic_folders[name]["admin_dircap"] = cap return cap def _get_magic_folder_setting(self, folder_name, setting_name): if folder_name not in self.magic_folders: self.load_magic_folders() if folder_name in self.magic_folders: try: return self.magic_folders[folder_name][setting_name] except KeyError: return None return None def get_collective_dircap(self, name): return self._get_magic_folder_setting(name, "collective_dircap") def get_magic_folder_dircap(self, name): return self._get_magic_folder_setting(name, "upload_dircap") def get_magic_folder_directory(self, name): return self._get_magic_folder_setting(name, "directory") @inlineCallbacks def get_magic_folders_from_rootcap(self, content=None): if not content: content = yield self.get_json(self.get_rootcap()) if content: folders = defaultdict(dict) for name, data in content[1]["children"].items(): data_dict = data[1] if name.endswith(" (collective)"): prefix = name.split(" (collective)")[0] folders[prefix]["collective_dircap"] = data_dict["ro_uri"] elif name.endswith(" (personal)"): prefix = name.split(" (personal)")[0] folders[prefix]["upload_dircap"] = data_dict["rw_uri"] elif name.endswith(" (admin)"): prefix = name.split(" (admin)")[0] folders[prefix]["admin_dircap"] = data_dict["rw_uri"] self.remote_magic_folders = folders return folders return None @inlineCallbacks def ensure_folder_links(self, _): yield self.await_ready() if not self.get_rootcap(): yield self.create_rootcap() if self.magic_folders: remote_folders = yield self.get_magic_folders_from_rootcap() for folder in self.magic_folders: if folder not in remote_folders: self.link_magic_folder_to_rootcap(folder) else: log.debug( 'Folder "%s" already linked to rootcap; ' "skipping.", folder, ) @inlineCallbacks def get_magic_folder_members(self, name, content=None): if not content: content = yield self.get_json(self.get_collective_dircap(name)) if content: members = [] children = content[1]["children"] magic_folder_dircap = self.get_magic_folder_dircap(name) for member in children: readcap = children[member][1]["ro_uri"] if magic_folder_dircap: my_fingerprint = magic_folder_dircap.split(":")[-1] fingerprint = readcap.split(":")[-1] if fingerprint == my_fingerprint: self.magic_folders[name]["member"] = member members.insert(0, (member, readcap)) else: members.append((member, readcap)) else: members.append((member, readcap)) return members return None @staticmethod def _extract_metadata(metadata): try: deleted = metadata["metadata"]["deleted"] except KeyError: deleted = False if deleted: cap = metadata["metadata"]["last_downloaded_uri"] else: cap = metadata["ro_uri"] return { "size": int(metadata["size"]), "mtime": float(metadata["metadata"]["tahoe"]["linkmotime"]), "deleted": deleted, "cap": cap, } @inlineCallbacks def get_magic_folder_state(self, name, members=None): total_size = 0 history_dict = {} if not members: members = yield self.get_magic_folder_members(name) if members: for member, dircap in members: json_data = yield self.get_json(dircap) try: children = json_data[1]["children"] except (TypeError, KeyError): continue for filenode, data in children.items(): if filenode.endswith("@_"): # Ignore subdirectories, due to Tahoe-LAFS bug #2924 # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2924 continue try: metadata = self._extract_metadata(data[1]) except KeyError: continue metadata["path"] = filenode.replace("@_", os.path.sep) metadata["member"] = member history_dict[metadata["mtime"]] = metadata total_size += metadata["size"] history_od = OrderedDict(sorted(history_dict.items())) latest_mtime = next(reversed(history_od), 0) return members, total_size, latest_mtime, history_od
class Server(): def __init__(self, args): self.args = args self.gateways = [] self.sync_folders = [] self.config = Config(self.args.config) self.servers_connected = 0 self.servers_known = 0 self.total_available_space = 0 self.status_text = 'Status: ' self.new_messages = [] def initialize_gateways(self): logging.debug("Initializing Tahoe-LAFS gateway(s)...") logging.debug(self.settings) for gateway in self.settings.keys(): try: t = Tahoe(gateway, self.settings[gateway]['tahoe.cfg']) except KeyError: t = Tahoe(gateway) self.gateways.append(t) for section, contents in self.settings[gateway].items(): if section == 'sync': for local_dir, dircap in contents.items(): self.add_sync_folder(local_dir, dircap, t) def add_sync_folder(self, local_dir, dircap=None, tahoe=None): logging.debug("Adding SyncFolder ({})...".format(local_dir)) # TODO: Add error handling if not os.path.isdir(local_dir): logging.debug("Directory {} doesn't exist; " "creating {}...".format(local_dir, local_dir)) os.makedirs(local_dir) if not dircap: logging.debug("No dircap associated with {}; " "creating new dircap...".format(local_dir)) dircap = tahoe.mkdir() self.settings[tahoe.name]['sync'][local_dir] = dircap self.config.save(self.settings) sync_folder = SyncFolder(local_dir, dircap, tahoe) self.sync_folders.append(sync_folder) def start_sync_folders(self): logging.debug("Starting SyncFolders...") for sync_folder in self.sync_folders: reactor.callInThread(sync_folder.start) def stop_sync_folders(self): logging.debug("Stopping SyncFolders...") for sync_folder in self.sync_folders: reactor.callInThread(sync_folder.stop) def handle_command(self, command): if command.lower().startswith('gridsync:'): logging.info('Got gridsync URI: {}'.format(command)) # TODO: Handle this elif command.lower() in ('stop', 'quit', 'exit'): reactor.stop() else: logging.info("Invalid command: {}".format(command)) def check_state(self): active_jobs = [] for sync_folder in self.sync_folders: if sync_folder.sync_state: active_jobs.append(sync_folder) for message in sync_folder.sync_log: self.new_messages.append(message) sync_folder.sync_log.remove(message) if active_jobs: if not self.args.no_gui and self.tray.animation.state() != 2: self.tray.animation.setPaused(False) self.tray.setToolTip("Gridsync - Syncing...") for sync_folder in self.sync_folders: for operation in sync_folder.tahoe.get_operations(): logging.debug(operation) else: if not self.args.no_gui and self.tray.animation.state() == 2: self.tray.animation.setPaused(True) self.tray.setToolTip("Gridsync - Up to date") self.tray.set_icon(":gridsync.png") if self.new_messages: message = '\n'.join(self.new_messages) self.notify("Sync complete", message) self.new_messages = [] def notify(self, title, message): if not self.args.no_gui: self.tray.showMessage(title, message) else: print(title, message) def start_gateways(self): logging.debug("Starting Tahoe-LAFS gateway(s)...") for gateway in self.gateways: reactor.callInThread(gateway.start) def first_run(self): from gridsync.wizard import Wizard w = Wizard(self) w.exec_() logging.debug("Got first run settings: ", self.settings) self.initialize_gateways() self.start_gateways() def start(self): reactor.listenTCP(52045, ServerFactory(self), interface='localhost') try: os.makedirs(self.config.config_dir) except OSError: pass if self.args.debug: logging.basicConfig( format='%(asctime)s %(funcName)s %(message)s', level=logging.DEBUG, stream=sys.stdout) else: logfile = os.path.join(self.config.config_dir, 'gridsync.log') logging.basicConfig( format='%(asctime)s %(funcName)s %(message)s', level=logging.INFO, filename=logfile) logging.info("Server started with args: {}".format((self.args))) logging.debug("$PATH is: {}".format(os.getenv('PATH'))) try: output = Tahoe().command(["--version-and-path"]) logging.info(output.split('\n')[0]) except Exception as e: logging.error('Error checking Tahoe-LAFS version: {}'.format(e)) # TODO: Notify user? try: self.settings = self.config.load() except IOError: self.settings = {} if not self.settings: reactor.callLater(0, self.first_run) else: self.initialize_gateways() reactor.callLater(0, self.start_gateways) if not self.args.no_gui: self.tray = SystemTrayIcon(self) self.tray.show() state_checker = LoopingCall(self.check_state) state_checker.start(1.0) connection_status_updater = LoopingCall( reactor.callInThread, self.update_connection_status) reactor.callLater(5, connection_status_updater.start, 60) reactor.callLater(1, self.start_sync_folders) reactor.addSystemEventTrigger("before", "shutdown", self.stop) reactor.suggestThreadPoolSize(20) # XXX Adjust? reactor.run() def update_connection_status(self): servers_connected = 0 servers_known = 0 available_space = 0 for gateway in self.gateways: try: prev_servers = gateway.status['servers_connected'] except KeyError: pass try: gateway.update_status() servers_connected += gateway.status['servers_connected'] servers_known += gateway.status['servers_known'] available_space += h2b(gateway.status['total_available_space']) except: pass try: if prev_servers != gateway.status['servers_connected']: # TODO: Notify on (dis)connects # FIXME: This should only be called if a Tahoe flag is set logging.debug("New storage node (dis)connected.") #reactor.callInThread(gateway.adjust) except UnboundLocalError: pass self.servers_connected = servers_connected self.total_available_space = b2h(available_space) self.servers_known = servers_known # XXX Add logic to check for paused state, etc. self.status_text = "Status: Connected ({} of {} servers)".format( self.servers_connected, self.servers_known) def stop(self): self.stop_sync_folders() self.stop_gateways() self.config.save(self.settings) logging.debug("Stopping reactor...") def stop_gateways(self): logging.debug("Stopping Tahoe-LAFS gateway(s)...") for gateway in self.gateways: reactor.callInThread(gateway.command, ['stop'])
def test_config_get(tmpdir): config = Config(os.path.join(str(tmpdir), 'test_get.ini')) with open(config.filename, 'w') as f: f.write('[test_section]\ntest_option = test_value\n\n') assert config.get('test_section', 'test_option') == 'test_value'
class Core(object): def __init__(self, args): self.args = args self.gateways = [] self.sync_folders = [] self.config = Config(args.config[0] if args.config else None) self.servers_connected = 0 self.servers_known = 0 self.total_available_space = 0 self.status_text = 'Status: ' self.new_messages = [] self.settings = {} self.tray = None def initialize_gateways(self): logging.debug("Initializing Tahoe-LAFS gateway(s)...") logging.debug(self.settings) for gateway in self.settings: try: t = Tahoe(gateway, self.settings[gateway]['tahoe.cfg']) except KeyError: t = Tahoe(gateway) self.gateways.append(t) for section, contents in self.settings[gateway].items(): if section == 'sync': for local_dir, dircap in contents.items(): self.add_sync_folder(local_dir, dircap, t) def add_sync_folder(self, local_dir, dircap=None, tahoe=None): logging.debug("Adding SyncFolder (%s)...", local_dir) # TODO: Add error handling if not os.path.isdir(local_dir): logging.debug( "Directory %s doesn't exist; creating %s...", local_dir, local_dir) os.makedirs(local_dir) sync_folder = SyncFolder(self, local_dir, dircap, tahoe) self.sync_folders.append(sync_folder) def insert_new_dircap(self, sync_folder): # FIXME: Ugly hack. This should all probably move to SyncFolder:start local_dir = sync_folder.local_dir logging.debug( "No dircap assaciated with %s; creating new dircap...", local_dir) dircap = sync_folder.tahoe.command(['mkdir'], num_attempts=10) for gateway, settings in self.settings.items(): for setting, value in settings.items(): if setting == 'sync' and value[local_dir] is None: sync_folder.remote_dircap = dircap self.settings[gateway]['sync'][local_dir] = dircap self.config.save(self.settings) if gateway.startswith('pb://'): introducer_furl = gateway else: client_settings = setting['tahoe.cfg']['client'] introducer_furl = client_settings['introducer.furl'] dircap_txt = os.path.join( local_dir, 'Gridsync Invite Code.txt') with open(dircap_txt, 'w') as f: f.write( 'gridsync' + introducer_furl[2:] + '/' + dircap) self.notify("Sync Folder Initialized", "Monitoring {}".format(local_dir)) reactor.callInThread(sync_folder.start) def start_sync_folders(self): logging.debug("Starting SyncFolders...") for sync_folder in self.sync_folders: if not sync_folder.remote_dircap: #reactor.callInThread( # reactor.callLater, 5, self.insert_new_dircap, sync_folder) reactor.callLater(5, self.insert_new_dircap, sync_folder) else: reactor.callInThread(sync_folder.start) def stop_sync_folders(self): logging.debug("Stopping SyncFolders...") for sync_folder in self.sync_folders: reactor.callInThread(sync_folder.stop) def check_state(self): active_jobs = [] for sync_folder in self.sync_folders: if sync_folder.sync_state: active_jobs.append(sync_folder) for message in sync_folder.sync_log: self.new_messages.append(message) sync_folder.sync_log.remove(message) if active_jobs: if not self.args.no_gui and self.tray.animation.state() != 2: self.tray.animation.setPaused(False) self.tray.setToolTip("Gridsync - Syncing...") for sync_folder in self.sync_folders: for operation in sync_folder.tahoe.get_operations(): logging.debug(operation) else: if not self.args.no_gui and self.tray.animation.state() == 2: self.tray.animation.setPaused(True) self.tray.setToolTip("Gridsync - Up to date") self.tray.set_icon(":gridsync-checkmark.png") if self.new_messages: message = '\n'.join(self.new_messages) self.notify("Sync complete", message) self.new_messages = [] def notify(self, title, message): if not self.args.no_gui: self.tray.showMessage(title, message, msecs=5000) else: print(title, message) def start_gateways(self): logging.debug("Starting Tahoe-LAFS gateway(s)...") for gateway in self.gateways: reactor.callInThread(gateway.start) def first_run(self): from gridsync.wizard import Wizard w = Wizard() w.exec_() if not w.introducer_furl or not w.folder: logging.debug("Setup wizard not completed; exiting") reactor.stop() return self.settings = {w.introducer_furl: {'tahoe.cfg': DEFAULT_SETTINGS}} self.settings[w.introducer_furl]['sync'] = {w.folder: None} logging.debug("Setup wizard finished. Using: %s", self.settings) self.initialize_gateways() self.start_gateways() def start(self): reactor.listenTCP(52045, CoreFactory(self), interface='localhost') try: os.makedirs(self.config.config_dir) except OSError: pass if self.args.debug: logging.basicConfig( format='%(asctime)s %(funcName)s %(message)s', level=logging.DEBUG, stream=sys.stdout) else: logfile = os.path.join(self.config.config_dir, 'gridsync.log') logging.basicConfig( format='%(asctime)s %(funcName)s %(message)s', level=logging.INFO, filename=logfile) logging.info("Core started with args: %s", self.args) logging.debug("$PATH is: %s", os.getenv('PATH')) try: self.settings = self.config.load() except IOError: self.settings = {} if not self.settings: reactor.callLater(0, self.first_run) else: self.initialize_gateways() reactor.callLater(0, self.start_gateways) if not self.args.no_gui: self.tray = SystemTrayIcon(self) self.tray.show() state_checker = LoopingCall(self.check_state) state_checker.start(1.0) connection_status_updater = LoopingCall( reactor.callInThread, self.update_connection_status) reactor.callLater(5, connection_status_updater.start, 60) reactor.callLater(1, self.start_sync_folders) reactor.addSystemEventTrigger("before", "shutdown", self.stop) reactor.suggestThreadPoolSize(20) # XXX Adjust? reactor.run() def update_connection_status(self): servers_connected = 0 servers_known = 0 available_space = 0 for gateway in self.gateways: try: prev_servers = gateway.status['servers_connected'] except KeyError: pass try: gateway.update_status() servers_connected += gateway.status['servers_connected'] servers_known += gateway.status['servers_known'] available_space += h2b(gateway.status['total_available_space']) except (OSError, IndexError): # XXX pass try: if prev_servers != gateway.status['servers_connected']: # TODO: Notify on (dis)connects # FIXME: This should only be called if a Tahoe flag is set logging.debug("New storage node (dis)connected.") #reactor.callInThread(gateway.adjust) except UnboundLocalError: pass self.servers_connected = servers_connected self.total_available_space = b2h(available_space) self.servers_known = servers_known # XXX Add logic to check for paused state, etc. self.status_text = "Status: Connected ({} of {} servers)".format( self.servers_connected, self.servers_known) def stop(self): self.stop_sync_folders() self.stop_gateways() self.config.save(self.settings) logging.debug("Stopping reactor...") def stop_gateways(self): logging.debug("Stopping Tahoe-LAFS gateway(s)...") for gateway in self.gateways: reactor.callInThread(gateway.command, ['stop'])
def test_save(tmpdir): config = Config(os.path.join(str(tmpdir), 'test.yml')) config.save({'test': 'test'}) with open(config.config_file) as f: assert f.read() == 'test: test\n'
'appid': 'lothar.com/wormhole/text-or-file-xfer', 'relay': 'ws://relay.magic-wormhole.io:4000/v1' } } if getattr(sys, 'frozen', False): pkgdir = os.path.dirname(os.path.realpath(sys.executable)) os.environ["PATH"] += os.pathsep + os.path.join(pkgdir, 'Tahoe-LAFS') try: del sys.modules['twisted.internet.reactor'] # PyInstaller workaround except KeyError: pass else: pkgdir = os.path.dirname(os.path.realpath(__file__)) settings = Config(os.path.join(pkgdir, 'resources', 'config.txt')).load() if not settings: settings = default_settings APP_NAME = settings['application']['name'] if sys.platform == 'win32': config_dir = os.path.join(os.getenv('APPDATA'), APP_NAME) elif sys.platform == 'darwin': config_dir = os.path.join(os.path.expanduser('~'), 'Library', 'Application Support', APP_NAME) else: config_home = os.environ.get( 'XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config')) config_dir = os.path.join(config_home, APP_NAME.lower())
def test_load(tmpdir): config = Config(os.path.join(str(tmpdir), 'test.yml')) with open(config.config_file, 'w') as f: f.write('test: test\n') assert config.load() == {'test': 'test'}
def test_save(tmpdir): config = Config(os.path.join(str(tmpdir), "test.yml")) config.save({"test": "test"}) with open(config.config_file) as f: assert f.read() == "test: test\n"
def test_specified_config_file(): config = Config(['test']) assert config.config_file == 'test'
def test_config_get_no_option_error(tmpdir): config = Config(os.path.join(str(tmpdir), 'test_get_no_option_error.ini')) with open(config.filename, 'w') as f: f.write('[test_section]\ntest_option = test_value\n\n') assert config.get('test_section', 'missing_option') is None
class Tahoe(object): # pylint: disable=too-many-public-methods def __init__(self, nodedir=None, executable=None): self.executable = executable if nodedir: self.nodedir = os.path.expanduser(nodedir) else: self.nodedir = os.path.join(os.path.expanduser('~'), '.tahoe') self.rootcap_path = os.path.join(self.nodedir, 'private', 'rootcap') self.config = Config(os.path.join(self.nodedir, 'tahoe.cfg')) self.pidfile = os.path.join(self.nodedir, 'twistd.pid') self.nodeurl = None self.shares_happy = None self.name = os.path.basename(self.nodedir) self.api_token = None self.magic_folders_dir = os.path.join(self.nodedir, 'magic-folders') self.lock = DeferredLock() self.rootcap = None self.magic_folders = defaultdict(dict) def config_set(self, section, option, value): self.config.set(section, option, value) def config_get(self, section, option): return self.config.get(section, option) def get_settings(self, include_rootcap=False): settings = { 'nickname': self.name, 'introducer': self.config_get('client', 'introducer.furl'), 'shares-needed': self.config_get('client', 'shares.needed'), 'shares-happy': self.config_get('client', 'shares.happy'), 'shares-total': self.config_get('client', 'shares.total') } icon_path = os.path.join(self.nodedir, 'icon') icon_url_path = icon_path + '.url' if os.path.exists(icon_url_path): with open(icon_url_path) as f: settings['icon_url'] = f.read().strip() if include_rootcap and os.path.exists(self.rootcap_path): settings['rootcap'] = self.read_cap_from_file(self.rootcap_path) # TODO: Verify integrity? Support 'icon_base64'? return settings def export(self, dest, include_rootcap=False): log.debug("Exporting settings to '%s'...", dest) settings = self.get_settings(include_rootcap) with open(dest, 'w') as f: f.write(json.dumps(settings)) log.debug("Exported settings to '%s'", dest) def get_aliases(self): aliases = {} aliases_file = os.path.join(self.nodedir, 'private', 'aliases') try: with open(aliases_file) as f: for line in f.readlines(): if not line.startswith('#'): try: name, cap = line.split(':', 1) aliases[name + ':'] = cap.strip() except ValueError: pass return aliases except IOError: return def get_alias(self, alias): if not alias.endswith(':'): alias = alias + ':' try: for name, cap in self.get_aliases().items(): if name == alias: return cap except AttributeError: return def load_magic_folders(self): data = None yaml_path = os.path.join(self.nodedir, 'private', 'magic_folders.yaml') try: with open(yaml_path) as f: data = yaml.safe_load(f) except OSError: pass if data: for key, value in data.items(): # to preserve defaultdict self.magic_folders[key] = value for nodedir in get_nodedirs(self.magic_folders_dir): folder_name = os.path.basename(nodedir) if folder_name not in self.magic_folders: config = Config(os.path.join(nodedir, 'tahoe.cfg')) self.magic_folders[folder_name] = { 'nodedir': nodedir, 'directory': config.get('magic_folder', 'local.directory') } return self.magic_folders def line_received(self, line): # TODO: Connect to Core via Qt signals/slots? log.debug("[%s] >>> %s", self.name, line) def _win32_popen(self, args, env, callback_trigger=None): # This is a workaround to prevent Command Prompt windows from opening # when spawning tahoe processes from the GUI on Windows, as Twisted's # reactor.spawnProcess() API does not allow Windows creation flags to # be passed to subprocesses. By passing 0x08000000 (CREATE_NO_WINDOW), # the opening of the Command Prompt window will be surpressed while # still allowing access to stdout/stderr. See: # https://twistedmatrix.com/pipermail/twisted-python/2007-February/014733.html import subprocess proc = subprocess.Popen( args, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, creationflags=0x08000000) output = BytesIO() for line in iter(proc.stdout.readline, ''): output.write(line.encode('utf-8')) self.line_received(line.rstrip()) if callback_trigger and callback_trigger in line.rstrip(): return proc.pid proc.poll() if proc.returncode: raise TahoeCommandError(str(output.getvalue()).strip()) else: return str(output.getvalue()).strip() @inlineCallbacks def command(self, args, callback_trigger=None): exe = (self.executable if self.executable else which('tahoe')[0]) args = [exe] + ['-d', self.nodedir] + args env = os.environ env['PYTHONUNBUFFERED'] = '1' log.debug("Executing: %s", ' '.join(args)) if sys.platform == 'win32' and getattr(sys, 'frozen', False): from twisted.internet.threads import deferToThread output = yield deferToThread( self._win32_popen, args, env, callback_trigger) else: protocol = CommandProtocol(self, callback_trigger) reactor.spawnProcess(protocol, exe, args=args, env=env) output = yield protocol.done returnValue(output) @inlineCallbacks def version(self): output = yield self.command(['--version']) returnValue((self.executable, output.split()[1])) @inlineCallbacks def create_client(self, **kwargs): if os.path.exists(self.nodedir): raise NodedirExistsError valid_kwargs = ('nickname', 'introducer', 'shares-needed', 'shares-happy', 'shares-total') args = ['create-client', '--webport=tcp:0:interface=127.0.0.1'] for key, value in kwargs.items(): if key in valid_kwargs: args.extend(['--{}'.format(key), str(value)]) elif key in ['needed', 'happy', 'total']: args.extend(['--shares-{}'.format(key), str(value)]) yield self.command(args) @inlineCallbacks def _stop_magic_folder_subclients(self): # For magic-folders created by '_create_magic_folder_subclient' below; # provides support for multiple magic-folders on older tahoe clients tasks = [] for nodedir in get_nodedirs(self.magic_folders_dir): tasks.append(Tahoe(nodedir, executable=self.executable).stop()) yield gatherResults(tasks) def kill(self): try: with open(self.pidfile, 'r') as f: pid = int(f.read()) except (EnvironmentError, ValueError) as err: log.warning("Error loading pid from pidfile: %s", str(err)) return log.debug("Trying to kill PID %d...", pid) try: os.kill(pid, signal.SIGTERM) except OSError as err: if err.errno not in (errno.ESRCH, errno.EINVAL): log.error(err) @inlineCallbacks def stop(self): if not os.path.isfile(self.pidfile): log.error('No "twistd.pid" file found in %s', self.nodedir) return elif sys.platform == 'win32': self.kill() else: try: yield self.command(['stop']) except TahoeCommandError: # Process already dead/not running pass try: os.remove(self.pidfile) except EnvironmentError: pass yield self._stop_magic_folder_subclients() @inlineCallbacks def _start_magic_folder_subclients(self): # For magic-folders created by '_create_magic_folder_subclient' below; # provides support for multiple magic-folders on older tahoe clients tasks = [] for folder, settings in self.magic_folders.items(): nodedir = settings.get('nodedir') if nodedir: client = Tahoe(nodedir, executable=self.executable) self.magic_folders[folder]['client'] = client tasks.append(client.start()) yield gatherResults(tasks) @inlineCallbacks def start(self): if os.path.isfile(self.pidfile): yield self.stop() pid = yield self.command(['run'], 'client running') pid = str(pid) if sys.platform == 'win32' and pid.isdigit(): with open(self.pidfile, 'w') as f: f.write(pid) with open(os.path.join(self.nodedir, 'node.url')) as f: self.nodeurl = f.read().strip() token_file = os.path.join(self.nodedir, 'private', 'api_auth_token') with open(token_file) as f: self.api_token = f.read().strip() self.shares_happy = int(self.config_get('client', 'shares.happy')) self.load_magic_folders() yield self._start_magic_folder_subclients() @staticmethod def _parse_welcome_page(html): # XXX: This can be removed once a new, stable version of # Tahoe-LAFS is released with Trac ticket #2476 resolved. # See: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2476 match = re.search('Connected to <span>(.+?)</span>', html) servers_connected = (int(match.group(1)) if match else 0) match = re.search("of <span>(.+?)</span> known storage servers", html) servers_known = (int(match.group(1)) if match else 0) available_space = 0 for s in re.findall('"service-available-space">(.+?)</td>', html): try: size = dehumanized_size(s) except ValueError: # "N/A" continue available_space += size return servers_connected, servers_known, available_space @inlineCallbacks # noqa: max-complexity=11 XXX def get_grid_status(self): if not self.nodeurl: return try: resp = yield treq.get(self.nodeurl + '?t=json') # not yet released except ConnectError: return if resp.code == 200: content = yield treq.content(resp) content = content.decode('utf-8') try: content = json.loads(content) except json.decoder.JSONDecodeError: # See: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2476 connected, known, space = self._parse_welcome_page(content) returnValue((connected, known, space)) servers_connected = 0 servers_known = 0 available_space = 0 if 'servers' in content: servers = content['servers'] servers_known = len(servers) for server in servers: if server['connection_status'].startswith('Connected'): servers_connected += 1 if server['available_space']: available_space += server['available_space'] returnValue((servers_connected, servers_known, available_space)) @inlineCallbacks def get_connected_servers(self): if not self.nodeurl: return try: resp = yield treq.get(self.nodeurl) except ConnectError: return if resp.code == 200: html = yield treq.content(resp) match = re.search( 'Connected to <span>(.+?)</span>', html.decode('utf-8')) if match: returnValue(int(match.group(1))) @inlineCallbacks def is_ready(self): if not self.shares_happy: returnValue(False) connected_servers = yield self.get_connected_servers() if not connected_servers: returnValue(False) elif connected_servers >= self.shares_happy: returnValue(True) else: returnValue(False) @inlineCallbacks def await_ready(self): # TODO: Replace with "readiness" API? # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2844 ready = yield self.is_ready() while not ready: yield deferLater(reactor, 0.2, lambda: None) ready = yield self.is_ready() @inlineCallbacks def mkdir(self): resp = yield treq.post(self.nodeurl + 'uri', params={'t': 'mkdir'}) if resp.code == 200: content = yield treq.content(resp) returnValue(content.decode('utf-8').strip()) else: raise TahoeWebError( "Error creating Tahoe-LAFS directory: {}".format(resp.code)) @inlineCallbacks def create_rootcap(self): log.debug("Creating rootcap...") if os.path.exists(self.rootcap_path): raise OSError( "Rootcap file already exists: {}".format(self.rootcap_path)) self.rootcap = yield self.mkdir() with open(self.rootcap_path, 'w') as f: f.write(self.rootcap) log.debug("Rootcap saved to file: %s", self.rootcap_path) returnValue(self.rootcap) @inlineCallbacks def upload(self, local_path): log.debug("Uploading %s...", local_path) with open(local_path, 'rb') as f: resp = yield treq.put('{}uri'.format(self.nodeurl), f) if resp.code == 200: content = yield treq.content(resp) log.debug("Successfully uploaded %s", local_path) returnValue(content.decode('utf-8')) else: content = yield treq.content(resp) raise TahoeWebError(content.decode('utf-8')) @inlineCallbacks def download(self, cap, local_path): log.debug("Downloading %s...", local_path) resp = yield treq.get('{}uri/{}'.format(self.nodeurl, cap)) if resp.code == 200: with open(local_path, 'wb') as f: yield treq.collect(resp, f.write) log.debug("Successfully downloaded %s", local_path) else: content = yield treq.content(resp) raise TahoeWebError(content.decode('utf-8')) @inlineCallbacks def link(self, dircap, childname, childcap): lock = yield self.lock.acquire() try: resp = yield treq.post( '{}uri/{}/?t=uri&name={}&uri={}'.format( self.nodeurl, dircap, childname, childcap)) finally: yield lock.release() if resp.code != 200: content = yield treq.content(resp) raise TahoeWebError(content.decode('utf-8')) @inlineCallbacks def unlink(self, dircap, childname): lock = yield self.lock.acquire() try: resp = yield treq.post( '{}uri/{}/?t=unlink&name={}'.format( self.nodeurl, dircap, childname)) finally: yield lock.release() if resp.code != 200: content = yield treq.content(resp) raise TahoeWebError(content.decode('utf-8')) @inlineCallbacks def _create_magic_folder_subclient(self, path, join_code=None): # Because Tahoe-LAFS doesn't (yet) support having multiple # magic-folders per tahoe client, create the magic-folder inside # a new nodedir using the current nodedir's connection settings. # See https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2792 basename = os.path.basename(path) subclient = Tahoe( os.path.join(self.magic_folders_dir, basename), executable=self.executable) self.magic_folders[basename] = { 'directory': path, 'client': subclient } settings = { 'nickname': self.config_get('node', 'nickname'), 'introducer': self.config_get('client', 'introducer.furl'), 'shares-needed': self.config_get('client', 'shares.needed'), 'shares-happy': self.config_get('client', 'shares.happy'), 'shares-total': self.config_get('client', 'shares.total') } yield subclient.create_client(**settings) yield subclient.start() yield subclient.await_ready() if join_code: # XXX collective_cap, personal_cap = join_code.split('+') if collective_cap.startswith('URI:DIR2:'): # is admin subclient.command(['add-alias', 'magic:', collective_cap]) data = yield self.get_json(collective_cap) collective_cap_ro = data[1]['ro_uri'] # diminish to readcap join_code = "{}+{}".format(collective_cap_ro, personal_cap) yield subclient.command( ['magic-folder', 'join', join_code, path]) yield subclient.stop() yield subclient.start() returnValue(subclient) yield subclient.command( ['magic-folder', 'create', 'magic:', 'admin', path]) yield subclient.stop() yield subclient.start() rootcap = self.read_cap_from_file(self.rootcap_path) yield self.link(rootcap, basename + ' (collective)', subclient.get_alias('magic')) yield self.link(rootcap, basename + ' (personal)', subclient.get_magic_folder_dircap()) @inlineCallbacks def create_magic_folder(self, path, join_code=None): try: os.makedirs(self.magic_folders_dir) except OSError: pass path = os.path.realpath(os.path.expanduser(path)) try: os.makedirs(path) except OSError: pass name = os.path.basename(path) try: yield self.command(['magic-folder', 'create', '-n', name, name + ':', 'admin', path]) except TahoeCommandError as err: if str(err).endswith('not recognized'): yield self._create_magic_folder_subclient(path, join_code) return yield self.stop() yield self.start() rootcap = self.read_cap_from_file(self.rootcap_path) yield self.link(rootcap, name + ' (collective)', self.get_alias(name)) yield self.link(rootcap, name + ' (personal)', self.get_magic_folder_dircap(name)) def get_magic_folder_client(self, name): for folder, settings in self.magic_folders.items(): if folder == name: return settings.get('client') @inlineCallbacks def magic_folder_invite(self, name, nickname): yield self.await_ready() client = self.get_magic_folder_client(name) if client: code = yield client.command( ['magic-folder', 'invite', 'magic:', nickname]) else: code = yield self.command( ['magic-folder', 'invite', '-n', name, name + ':', nickname]) returnValue(code.strip()) @inlineCallbacks def magic_folder_uninvite(self, name, nickname): client = self.get_magic_folder_client(name) if client: yield client.unlink(client.get_alias('magic'), nickname) else: yield self.unlink(self.get_alias(name), nickname) @inlineCallbacks def remove_magic_folder(self, name): if name in self.magic_folders: client = self.magic_folders[name].get('client') del self.magic_folders[name] if client: yield self.command(['magic-folder', 'leave']) yield client.stop() shutil.rmtree(client.nodedir, ignore_errors=True) else: yield self.command(['magic-folder', 'leave', '-n', name]) @inlineCallbacks def get_magic_folder_status(self, name=None): nodeurl = self.nodeurl token = self.api_token if name: gateway = self.get_magic_folder_client(name) if gateway: nodeurl = gateway.nodeurl token = gateway.api_token data = {'token': token, 't': 'json'} else: data = {'token': token, 'name': name, 't': 'json'} else: data = {'token': token, 't': 'json'} if not nodeurl or not token: return try: resp = yield treq.post(nodeurl + 'magic_folder', data) except ConnectError: return if resp.code == 200: content = yield treq.content(resp) returnValue(json.loads(content.decode('utf-8'))) @inlineCallbacks def get_json(self, cap): if not cap or not self.nodeurl: return uri = '{}uri/{}/?t=json'.format(self.nodeurl, cap) try: resp = yield treq.get(uri) except ConnectError: return if resp.code == 200: content = yield treq.content(resp) returnValue(json.loads(content.decode('utf-8'))) @staticmethod def read_cap_from_file(filepath): try: with open(filepath) as f: cap = f.read().strip() except OSError: return return cap def get_rootcap(self): if not self.rootcap: self.rootcap = self.read_cap_from_file(self.rootcap_path) return self.rootcap def get_collective_dircap(self, name=None): if name in self.magic_folders: try: return self.magic_folders[name]['collective_dircap'] except KeyError: pass gateway = self.get_magic_folder_client(name) if gateway: path = os.path.join(self.magic_folders_dir, name, 'private', 'collective_dircap') else: path = os.path.join(self.nodedir, 'private', 'collective_dircap') name = 'default' cap = self.read_cap_from_file(path) self.magic_folders[name]['collective_dircap'] = cap return cap def get_magic_folder_dircap(self, name=None): if name in self.magic_folders: try: return self.magic_folders[name]['upload_dircap'] except KeyError: pass gateway = self.get_magic_folder_client(name) if gateway: path = os.path.join(self.magic_folders_dir, name, 'private', 'magic_folder_dircap') else: path = os.path.join(self.nodedir, 'private', 'magic_folder_dircap') name = 'default' cap = self.read_cap_from_file(path) if cap: self.magic_folders[name]['upload_dircap'] = cap return cap def get_magic_folder_directory(self, name=None): if name in self.magic_folders: try: return self.magic_folders[name]['directory'] except KeyError: pass gateway = self.get_magic_folder_client(name) if gateway: directory = gateway.config_get('magic_folder', 'local.directory') else: directory = self.config_get('magic_folder', 'local.directory') self.magic_folders[name]['directory'] = directory return directory @inlineCallbacks def get_magic_folders_from_rootcap(self, content=None): if not content: content = yield self.get_json(self.get_rootcap()) if content: folders = defaultdict(dict) for name, data in content[1]['children'].items(): data_dict = data[1] if name.endswith(' (collective)'): prefix = name.split(' (collective)')[0] if 'rw_uri' in data_dict: folders[prefix]['collective'] = data_dict['rw_uri'] else: folders[prefix]['collective'] = data_dict['ro_uri'] elif name.endswith(' (personal)'): prefix = name.split(' (personal)')[0] if 'rw_uri' in data_dict: folders[prefix]['personal'] = data_dict['rw_uri'] else: folders[prefix]['personal'] = data_dict['ro_uri'] returnValue(folders) @inlineCallbacks def get_magic_folder_members(self, name=None, content=None): if not content: content = yield self.get_json(self.get_collective_dircap(name)) if content: members = [] children = content[1]['children'] magic_folder_dircap = self.get_magic_folder_dircap(name) for member in children: readcap = children[member][1]['ro_uri'] if magic_folder_dircap: my_fingerprint = magic_folder_dircap.split(':')[-1] fingerprint = readcap.split(':')[-1] if fingerprint == my_fingerprint: self.magic_folders[name]['member'] = member members.insert(0, (member, readcap)) else: members.append((member, readcap)) else: members.append((member, readcap)) returnValue(members) @staticmethod def size_from_content(content): size = 0 filenodes = content[1]['children'] for filenode in filenodes: size += int(filenodes[filenode][1]['size']) return size @inlineCallbacks def get_magic_folder_size(self, name=None, content=None): if not content: content = yield self.get_json(self.get_magic_folder_dircap(name)) if content: returnValue(self.size_from_content(content)) @inlineCallbacks def get_magic_folder_info(self, name=None, members=None): total_size = 0 sizes_dict = {} latest_mtime = 0 if not members: members = yield self.get_magic_folder_members(name) if members: for member, dircap in reversed(members): sizes_dict[member] = {} json_data = yield self.get_json(dircap) children = json_data[1]['children'] for filenode, data in children.items(): filepath = filenode.replace('@_', os.path.sep) metadata = data[1] try: size = int(metadata['size']) except KeyError: # if linked manually continue sizes_dict[member][filepath] = size total_size += size try: mt = int(metadata['metadata']['tahoe']['linkmotime']) except KeyError: continue if mt > latest_mtime: latest_mtime = mt returnValue((members, total_size, latest_mtime, sizes_dict))
def test_default_config_file(): config = Config() assert config.config_file == os.path.join(config.config_dir, 'config.yml')