def upload(self, path): if not utils.is_shared(path): editor.error_message('Cannot share %s because is not in shared path %s.\n\nPlease move it there and try again.' % (path, G.PROJECT_PATH)) return ig = ignore.create_ignore_tree(G.PROJECT_PATH) G.IGNORE = ig is_dir = os.path.isdir(path) if ig.is_ignored(path, is_dir, True): editor.error_message('Cannot share %s because it is ignored.\n\nAdd an exclude rule (!%s) to your .flooignore file.' % (path, path)) return rel_path = utils.to_rel_path(path) if not is_dir: self._upload_file_by_path(rel_path) return for p in rel_path.split('/'): child = ig.children.get(p) if not child: break ig = child if ig.path != path: msg.warn(ig.path, ' is not the same as ', path) self._rate_limited_upload(ig.list_paths(), ig.total_size, upload_func=self._upload_file_by_path)
def upload(self, path): if not utils.is_shared(path): editor.error_message( 'Cannot share %s because is not in shared path %s.\n\nPlease move it there and try again.' % (path, G.PROJECT_PATH)) return ig = ignore.create_ignore_tree(G.PROJECT_PATH) G.IGNORE = ig is_dir = os.path.isdir(path) if ig.is_ignored(path, is_dir, True): editor.error_message( 'Cannot share %s because it is ignored.\n\nAdd an exclude rule (!%s) to your .flooignore file.' % (path, path)) return rel_path = utils.to_rel_path(path) if not is_dir: self._upload_file_by_path(rel_path) return for p in rel_path.split('/'): child = ig.children.get(p) if not child: break ig = child if ig.path != path: msg.warn(ig.path, ' is not the same as ', path) self._rate_limited_upload(ig.list_paths(), ig.total_size, upload_func=self._upload_file_by_path)
def stop_handler(self, handler): try: handler.proto.stop() except Exception as e: msg.warn('Error stopping connection: %s' % str(e)) self._handlers.remove(handler) self._protos.remove(handler.proto) if not self._handlers and not self._protos: self.stop()
def stop_handler(self, handler): try: handler.proto.stop() except Exception as e: msg.warn('Error stopping connection: %s' % str_e(e)) self._handlers.remove(handler) self._protos.remove(handler.proto) if hasattr(handler, 'listener_factory'): return handler.listener_factory.stop() if not self._handlers and not self._protos: msg.log('All handlers stopped. Stopping reactor.') self.stop()
def stop_handler(self, handler): try: handler.proto.stop() except Exception as e: msg.warn('Error stopping connection: %s' % str(e)) self._handlers.remove(handler) self._protos.remove(handler.proto) if hasattr(handler, 'listener_factory'): return handler.listener_factory.stop() if not self._handlers and not self._protos: msg.log('All handlers stopped. Stopping reactor.') self.stop()
def get_buf(self, buf_id, view=None): self.send({ 'name': 'get_buf', 'id': buf_id }) buf = self.bufs[buf_id] msg.warn('Syncing buffer %s for consistency.' % buf['path']) if 'buf' in buf: del buf['buf'] if view: view.set_read_only(True) view.set_status('Floobits locked this file until it is synced.')
def ticker_watcher(ticker): global ticker_errors if not G.AGENT: return ticker.poll() if ticker.returncode is None: return msg.warn('respawning new ticker') ticker_errors += 1 if ticker_errors > 10: return fallback_to_feedkeys('Too much trouble with the floobits external ticker.') start_event_loop() utils.set_timeout(ticker_watcher, 2000, ticker)
def is_modifiable(name_to_check=None): if not agent or not agent.protocol: return vim_buf = vim.current.buffer name = vim_buf.name if not name: return if name_to_check and name_to_check != name: msg.warn('Can not call readonly on file: %s' % name) if not utils.is_shared(name): return if 'patch' not in agent.protocol.perms: vim.command("call g:FlooSetReadOnly()") utils.set_timeout(is_modifiable, 0, name)
def ticker_watcher(ticker): global ticker_errors if not G.AGENT: return ticker.poll() if ticker.returncode is None: return msg.warn('respawning new ticker') ticker_errors += 1 if ticker_errors > 10: return fallback_to_feedkeys( 'Too much trouble with the floobits external ticker.') start_event_loop() utils.set_timeout(ticker_watcher, 2000, ticker)
def get_buf(self, buf_id, view=None): self.send({'name': 'get_buf', 'id': buf_id}) buf = self.bufs[buf_id] msg.warn('Syncing buffer ', buf['path'], ' for consistency.') if 'buf' in buf: del buf['buf'] if view: view.set_read_only(True) view.set_status('Floobits locked this file until it is synced.') try: del G.VIEW_TO_HASH[view.native_id] except Exception: pass
def _get_host(self, context, cb): if not G.AUTH: msg.warn('no auth') return hosts = list(G.AUTH.keys()) if len(hosts) == 1: host = hosts[0] else: little = ["%s on %s" % (a['username'], h) for h, a in G.AUTH.items()] (host, index) = yield self.user_select, context, 'Which Floobits account should be used?', hosts, little if not host: cb(None) return cb(host)
def floobits_part_workspace(self): if not G.AGENT: return msg.warn( 'Unable to leave workspace: You are not joined to a workspace.' ) floobits_stop_everything() msg.log('You left the workspace.')
def _on_get_buf(self, data): buf_id = data['id'] buf = self.bufs.get(buf_id) if not buf: return msg.warn('no buf found: ', data, '. Hopefully you didn\'t need that.') timeout_id = buf.get('timeout_id') if timeout_id: utils.cancel_timeout(timeout_id) if data['encoding'] == 'base64': data['buf'] = base64.b64decode(data['buf']) self.bufs[buf_id] = data save = False if buf_id in self.save_on_get_bufs: self.save_on_get_bufs.remove(buf_id) save = True view = self.get_view(buf_id) if not view: msg.debug('No view for buf ', buf_id, '. Saving to disk.') return utils.save_buf(data) view.update(data) if save: view.save()
def floobits_say_something(self): if not G.AGENT: return msg.warn('Not connected to a workspace.') something = self.vim_input( 'Say something in %s: ' % (G.AGENT.workspace, ), '') if something: G.AGENT.send_msg(something)
def floobits_users_in_workspace(self): if not G.AGENT: return msg.warn('Not connected to a workspace.') vim.command('echom "Users connected to %s"' % (G.AGENT.workspace, )) for user in G.AGENT.workspace_info['users'].values(): vim.command('echom " %s connected with %s on %s"' % (user['username'], user['client'], user['platform']))
def _on_get_buf(self, data): buf_id = data['id'] buf = self.bufs.get(buf_id) if not buf: return msg.warn('no buf found: %s. Hopefully you didn\'t need that' % data) timeout_id = buf.get('timeout_id') if timeout_id: utils.cancel_timeout(timeout_id) if data['encoding'] == 'base64': data['buf'] = base64.b64decode(data['buf']) self.bufs[buf_id] = data save = False if buf_id in self.save_on_get_bufs: self.save_on_get_bufs.remove(buf_id) save = True view = self.get_view(buf_id) if not view: msg.debug('No view for buf %s. Saving to disk.' % buf_id) return utils.save_buf(data) view.update(data) if save: view.save()
def get_buf(self, buf_id, view=None): self.send({ 'name': 'get_buf', 'id': buf_id }) buf = self.bufs[buf_id] msg.warn('Syncing buffer ', buf['path'], ' for consistency.') if 'buf' in buf: del buf['buf'] if view: view.set_read_only(True) view.set_status('Floobits locked this file until it is synced.') try: del G.VIEW_TO_HASH[view.native_id] except Exception: pass
def floobits_maybe_new_file(): path = vim.current.buffer.name if path is None or path == '': msg.debug('get:buf buffer has no filename') return None if not os.path.exists(path): return None if not utils.is_shared(path): msg.debug('get_buf: %s is not shared' % path) return None buf = G.AGENT.get_buf_by_path(path) if not buf: is_dir = os.path.isdir(path) if not G.IGNORE: msg.warn('G.IGNORE is not set. Uploading anyway.') G.AGENT.upload(path) if G.IGNORE and G.IGNORE.is_ignored(path, is_dir, True): G.AGENT.upload(path)
def floobits_part_workspace(self): if not G.AGENT: return msg.warn('Unable to leave workspace: You are not joined to a workspace.') floobits_stop_everything() msg.log('You left the workspace.')
def floobits_users_in_workspace(self): if not G.AGENT: return msg.warn('Not connected to a workspace.') vim.command('echom "Users connected to %s"' % (G.AGENT.workspace,)) for user in G.AGENT.workspace_info['users'].values(): vim.command('echom " %s connected with %s on %s"' % (user['username'], user['client'], user['platform']))
def part_workspace(): if not agent: return msg.warn( 'Unable to leave workspace: You are not joined to a workspace.') stop_everything() msg.log('You left the workspace.')
def share_dir(dir_to_share): dir_to_share = os.path.expanduser(dir_to_share) dir_to_share = utils.unfuck_path(dir_to_share) dir_to_share = os.path.abspath(dir_to_share) workspace_name = os.path.basename(dir_to_share) floo_workspace_dir = os.path.join(G.COLAB_DIR, G.USERNAME, workspace_name) if os.path.isfile(dir_to_share): return msg.error('give me a directory please') if not os.path.isdir(dir_to_share): return msg.error('The directory %s doesn\'t appear to exist' % dir_to_share) floo_file = os.path.join(dir_to_share, '.floo') # look for the .floo file for hints about previous behavior info = {} try: floo_info = open(floo_file, 'rb').read().decode('utf-8') info = json.loads(floo_info) except (IOError, OSError): pass except Exception: msg.warn("couldn't read the floo_info file: %s" % floo_file) workspace_url = info.get('url') if workspace_url: try: result = utils.parse_url(workspace_url) except Exception as e: msg.error(str(e)) else: workspace_name = result['workspace'] floo_workspace_dir = os.path.join(G.COLAB_DIR, result['owner'], result['workspace']) # they have previously joined the workspace if os.path.realpath(floo_workspace_dir) == os.path.realpath( dir_to_share): # it could have been deleted, try to recreate it if possible # TODO: org or something here? if result['owner'] == G.USERNAME: try: api.create_workspace({'name': workspace_name}) msg.debug('Created workspace %s' % workspace_url) except Exception as e: msg.debug('Tried to create workspace' + str(e)) # they wanted to share teh dir, so always share it return join_workspace( workspace_url, lambda x: agent.protocol.create_buf(dir_to_share, force=True)) # link to what they want to share try: utils.mkdir(os.path.dirname(floo_workspace_dir)) os.symlink(dir_to_share, floo_workspace_dir) except OSError as e: if e.errno != 17: raise except Exception as e: return msg.error("Couldn't create symlink from %s to %s: %s" % (dir_to_share, floo_workspace_dir, str(e))) # make & join workspace create_workspace(workspace_name, floo_workspace_dir, dir_to_share)
def _on_patch(self, data): buf_id = data['id'] buf = self.bufs[buf_id] if 'buf' not in buf: msg.debug('buf %s not populated yet. not patching' % buf['path']) return if buf['encoding'] == 'base64': # TODO apply binary patches return self.get_buf(buf_id, None) if len(data['patch']) == 0: msg.debug('wtf? no patches to apply. server is being stupid') return msg.debug('patch is', data['patch']) dmp_patches = DMP.patch_fromText(data['patch']) # TODO: run this in a separate thread old_text = buf['buf'] view = self.get_view(buf_id) if view and not view.is_loading(): view_text = view.get_text() if old_text == view_text: buf['forced_patch'] = False elif not buf.get('forced_patch'): patch = utils.FlooPatch(view_text, buf) # Update the current copy of the buffer buf['buf'] = patch.current buf['md5'] = hashlib.md5(patch.current.encode('utf-8')).hexdigest() buf['forced_patch'] = True msg.debug('forcing patch for %s' % buf['path']) self.send(patch.to_json()) old_text = view_text else: msg.debug('forced patch is true. not sending another patch for buf %s' % buf['path']) md5_before = hashlib.md5(old_text.encode('utf-8')).hexdigest() if md5_before != data['md5_before']: msg.warn('starting md5s don\'t match for %s. this is dangerous!' % buf['path']) t = DMP.patch_apply(dmp_patches, old_text) clean_patch = True for applied_patch in t[1]: if not applied_patch: clean_patch = False break if G.DEBUG: if len(t[0]) == 0: try: msg.debug('OMG EMPTY!') msg.debug('Starting data:', buf['buf']) msg.debug('Patch:', data['patch']) except Exception as e: print(e) if '\x01' in t[0]: msg.debug('FOUND CRAZY BYTE IN BUFFER') msg.debug('Starting data:', buf['buf']) msg.debug('Patch:', data['patch']) timeout_id = buf.get('timeout_id') if timeout_id: utils.cancel_timeout(timeout_id) del buf['timeout_id'] if not clean_patch: msg.log('Couldn\'t patch %s cleanly.' % buf['path']) return self.get_buf(buf_id, view) cur_hash = hashlib.md5(t[0].encode('utf-8')).hexdigest() if cur_hash != data['md5_after']: buf['timeout_id'] = utils.set_timeout(self.get_buf, 2000, buf_id, view) buf['buf'] = t[0] buf['md5'] = cur_hash if not view: msg.debug('No view. Saving buffer %s' % buf_id) utils.save_buf(buf) return view.apply_patches(buf, t, data['username'])
def fallback_to_feedkeys(warning): global using_feedkeys using_feedkeys = True warning += ' Falling back to f//e hack which will break some key commands. You may need to call FlooPause/FlooUnPause before some commands.' msg.warn(warning) floobits_unpause()
def floobits_list_messages(self): if not G.AGENT: return msg.warn('Not connected to a workspace.') vim.command('echom "Recent messages for %s"' % (G.AGENT.workspace,)) for message in G.AGENT.get_messages(): vim.command('echom " %s"' % (message,))
def floobits_share_dir(dir_to_share, perms): utils.reload_settings() workspace_name = os.path.basename(dir_to_share) G.PROJECT_PATH = os.path.realpath(dir_to_share) msg.debug('%s %s %s' % (G.USERNAME, workspace_name, G.PROJECT_PATH)) file_to_share = None dir_to_share = os.path.expanduser(dir_to_share) dir_to_share = utils.unfuck_path(dir_to_share) dir_to_share = os.path.abspath(dir_to_share) dir_to_share = os.path.realpath(dir_to_share) workspace_name = os.path.basename(dir_to_share) if os.path.isfile(dir_to_share): file_to_share = dir_to_share dir_to_share = os.path.dirname(dir_to_share) try: utils.mkdir(dir_to_share) except Exception: return msg.error("The directory %s doesn't exist and I can't create it." % dir_to_share) if not os.path.isdir(dir_to_share): return msg.error('The directory %s doesn\'t appear to exist' % dir_to_share) floo_file = os.path.join(dir_to_share, '.floo') # look for the .floo file for hints about previous behavior info = {} try: floo_info = open(floo_file, 'rb').read().decode('utf-8') info = json.loads(floo_info) except (IOError, OSError): pass except Exception: msg.warn('couldn\'t read the floo_info file: %s' % floo_file) workspace_url = info.get('url') if workspace_url: parsed_url = api.prejoin_workspace(workspace_url, dir_to_share, {'perms': perms}) if parsed_url: return floobits_join_workspace(workspace_url, dir_to_share, upload_path=file_to_share or dir_to_share) filter_func = lambda workspace_url: api.prejoin_workspace(workspace_url, dir_to_share, {'perms': perms}) parsed_url = utils.get_workspace_by_path(dir_to_share, filter_func) if parsed_url: return floobits_join_workspace(workspace_url, dir_to_share, upload_path=file_to_share or dir_to_share) try: r = api.get_orgs_can_admin() except IOError as e: return editor.error_message('Error getting org list: %s' % str(e)) if r.code >= 400 or len(r.body) == 0: workspace_name = vim_input('Workspace name:', workspace_name, "file") return create_workspace(workspace_name, dir_to_share, G.USERNAME, perms, upload_path=file_to_share or dir_to_share) orgs = r.body if len(orgs) == 0: return create_workspace(workspace_name, dir_to_share, G.USERNAME, perms, upload_path=file_to_share or dir_to_share) choices = [] choices.append(G.USERNAME) for o in orgs: choices.append(o['name']) owner = vim_choice('Create workspace for:', G.USERNAME, choices) if owner: return create_workspace(workspace_name, dir_to_share, owner, perms, upload_path=file_to_share or dir_to_share)
def floobits_say_something(self): if not G.AGENT: return msg.warn('Not connected to a workspace.') something = self.vim_input('Say something in %s: ' % (G.AGENT.workspace,), '') if something: G.AGENT.send_msg(something)
def floobits_list_messages(): if not G.AGENT: return msg.warn('Not connected to a workspace.') vim.command('echom "Recent messages for %s"' % (G.AGENT.workspace,)) for message in G.AGENT.get_messages(): vim.command('echom " %s"' % (message,))
def _on_patch(self, data): buf_id = data['id'] buf = self.bufs[buf_id] if 'buf' not in buf: msg.debug('buf ', buf['path'], ' not populated yet. not patching') return if buf['encoding'] == 'base64': # TODO apply binary patches return self.get_buf(buf_id, None) if len(data['patch']) == 0: msg.debug('wtf? no patches to apply. server is being stupid') return msg.debug('patch is', data['patch']) dmp_patches = DMP.patch_fromText(data['patch']) # TODO: run this in a separate thread old_text = buf['buf'] view = self.get_view(buf_id) if view and not view.is_loading(): view_text = view.get_text() if old_text == view_text: buf['forced_patch'] = False elif not buf.get('forced_patch'): patch = utils.FlooPatch(view_text, buf) # Update the current copy of the buffer buf['buf'] = patch.current buf['md5'] = hashlib.md5( patch.current.encode('utf-8')).hexdigest() buf['forced_patch'] = True msg.debug('forcing patch for ', buf['path']) self.send(patch.to_json()) old_text = view_text else: msg.debug( 'forced patch is true. not sending another force patch for buf ', buf['path']) md5_before = hashlib.md5(old_text.encode('utf-8')).hexdigest() if md5_before != data['md5_before']: msg.warn('starting md5s don\'t match for ', buf['path'], '. this is dangerous!') t = DMP.patch_apply(dmp_patches, old_text) clean_patch = True for applied_patch in t[1]: if not applied_patch: clean_patch = False break if G.DEBUG: if len(t[0]) == 0: try: msg.debug('OMG EMPTY!') msg.debug('Starting data:', buf['buf']) msg.debug('Patch:', data['patch']) except Exception as e: msg.error(e) if '\x01' in t[0]: msg.debug('FOUND CRAZY BYTE IN BUFFER') msg.debug('Starting data:', buf['buf']) msg.debug('Patch:', data['patch']) timeout_id = buf.get('timeout_id') if timeout_id: utils.cancel_timeout(timeout_id) del buf['timeout_id'] if not clean_patch: msg.log('Couldn\'t patch ', buf['path'], ' cleanly.') return self.get_buf(buf_id, view) cur_hash = hashlib.md5(t[0].encode('utf-8')).hexdigest() if cur_hash != data['md5_after']: msg.debug('Ending md5s don\'t match for ', buf['path'], ' Setting get_buf timeout.') buf['timeout_id'] = utils.set_timeout(self.get_buf, 2000, buf_id, view) buf['buf'] = t[0] buf['md5'] = cur_hash if not view: msg.debug('No view. Not saving buffer ', buf_id) def _on_load(): v = self.get_view(buf_id) if v and 'buf' in buf: v.update(buf, message=False) self.on_load[buf_id]['patch'] = _on_load return view.apply_patches(buf, t, data['username'])
def floobits_share_dir(dir_to_share, perms): utils.reload_settings() workspace_name = os.path.basename(dir_to_share) G.PROJECT_PATH = os.path.realpath(dir_to_share) msg.debug('%s %s %s' % (G.USERNAME, workspace_name, G.PROJECT_PATH)) file_to_share = None dir_to_share = os.path.expanduser(dir_to_share) dir_to_share = utils.unfuck_path(dir_to_share) dir_to_share = os.path.abspath(dir_to_share) dir_to_share = os.path.realpath(dir_to_share) workspace_name = os.path.basename(dir_to_share) if os.path.isfile(dir_to_share): file_to_share = dir_to_share dir_to_share = os.path.dirname(dir_to_share) try: utils.mkdir(dir_to_share) except Exception: return msg.error( "The directory %s doesn't exist and I can't create it." % dir_to_share) if not os.path.isdir(dir_to_share): return msg.error('The directory %s doesn\'t appear to exist' % dir_to_share) floo_file = os.path.join(dir_to_share, '.floo') # look for the .floo file for hints about previous behavior info = {} try: floo_info = open(floo_file, 'rb').read().decode('utf-8') info = json.loads(floo_info) except (IOError, OSError): pass except Exception: msg.warn('couldn\'t read the floo_info file: %s' % floo_file) workspace_url = info.get('url') if workspace_url: parsed_url = api.prejoin_workspace(workspace_url, dir_to_share, {'perms': perms}) if parsed_url: return floobits_join_workspace(workspace_url, dir_to_share, upload_path=file_to_share or dir_to_share) filter_func = lambda workspace_url: api.prejoin_workspace( workspace_url, dir_to_share, {'perms': perms}) parsed_url = utils.get_workspace_by_path(dir_to_share, filter_func) if parsed_url: return floobits_join_workspace(workspace_url, dir_to_share, upload_path=file_to_share or dir_to_share) try: r = api.get_orgs_can_admin() except IOError as e: return editor.error_message('Error getting org list: %s' % str(e)) if r.code >= 400 or len(r.body) == 0: workspace_name = vim_input('Workspace name:', workspace_name, "file") return create_workspace(workspace_name, dir_to_share, G.USERNAME, perms, upload_path=file_to_share or dir_to_share) orgs = r.body if len(orgs) == 0: return create_workspace(workspace_name, dir_to_share, G.USERNAME, perms, upload_path=file_to_share or dir_to_share) choices = [] choices.append(G.USERNAME) for o in orgs: choices.append(o['name']) owner = vim_choice('Create workspace for:', G.USERNAME, choices) if owner: return create_workspace(workspace_name, dir_to_share, owner, perms, upload_path=file_to_share or dir_to_share)