def _on_get_buf(self, data): buf_id = data['id'] buf = self.bufs.get(buf_id) if not buf: return msg.warn('no buf found: ', data, '. Hopefully you didn\'t need that.') timeout_id = buf.get('timeout_id') if timeout_id: utils.cancel_timeout(timeout_id) if data['encoding'] == 'base64': data['buf'] = base64.b64decode(data['buf']) self.bufs[buf_id] = data save = False if buf_id in self.save_on_get_bufs: self.save_on_get_bufs.remove(buf_id) save = True view = self.get_view(buf_id) if not view: msg.debug('No view for buf ', buf_id, '. Saving to disk.') return utils.save_buf(data) view.update(data) if save: view.save()
def reset(self): self.bufs = {} self.paths_to_ids = {} self.save_on_get_bufs = set() self.on_load = collections.defaultdict(dict) utils.cancel_timeout(self.upload_timeout) self.upload_timeout = None
def stop(self): self._retries = -1 utils.cancel_timeout(self._reconnect_timeout) self._reconnect_timeout = None self.cleanup() self.emit('stop') msg.log('Disconnected.')
def connect(self, conn=None): utils.cancel_timeout(self._reconnect_timeout) self._reconnect_timeout = None self.cleanup() host = self._host port = self._port self._empty_selects = 0 # TODO: Horrible code here if self.proxy: if G.OUTBOUND_FILTERING: port = self.start_proxy(G.OUTBOUND_FILTER_PROXY_HOST, G.OUTBOUND_FILTER_PROXY_PORT) else: port = self.start_proxy(self.host, self.port) elif G.OUTBOUND_FILTERING: host = G.OUTBOUND_FILTER_PROXY_HOST port = G.OUTBOUND_FILTER_PROXY_PORT self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._sock.setblocking(False) if self._secure: with open(self._cert_path, 'wb') as cert_fd: cert_fd.write(cert.CA_CERT.encode('utf-8')) conn_msg = 'Connecting to %s:%s' % (self.host, self.port) if self.port != self._port or self.host != self._host: conn_msg += ' (proxying through %s:%s)' % (self._host, self._port) if host != self._host: conn_msg += ' (proxying through %s:%s)' % (host, port) msg.log(conn_msg) editor.status_message(conn_msg) self._connect(host, port)
def shutdown(): print('Floobits plugin updated. Shutting down old instance.', old_time) try: utils.cancel_timeout(interval) except Exception: pass
def _on_get_buf(self, data): buf_id = data['id'] buf = self.bufs.get(buf_id) if not buf: return msg.warn('no buf found: %s. Hopefully you didn\'t need that' % data) timeout_id = buf.get('timeout_id') if timeout_id: utils.cancel_timeout(timeout_id) if data['encoding'] == 'base64': data['buf'] = base64.b64decode(data['buf']) self.bufs[buf_id] = data save = False if buf_id in self.save_on_get_bufs: self.save_on_get_bufs.remove(buf_id) save = True view = self.get_view(buf_id) if not view: msg.debug('No view for buf %s. Saving to disk.' % buf_id) return utils.save_buf(data) view.update(data) if save: view.save()
def _run(self, edit, selections, r, data, view=None): global ignore_modified_timeout if not getattr(self, 'view', None): return selections G.IGNORE_MODIFIED_EVENTS = True utils.cancel_timeout(ignore_modified_timeout) ignore_modified_timeout = utils.set_timeout(unignore_modified_events, 2) start = max(int(r[0]), 0) stop = min(int(r[1]), self.view.size()) region = sublime.Region(start, stop) if stop - start > 10000: self.view.replace(edit, region, data) G.VIEW_TO_HASH[self.view.buffer_id()] = hashlib.md5( listener.get_text(self.view).encode('utf-8')).hexdigest() return transform_selections(selections, start, stop - start) existing = self.view.substr(region) i = 0 data_len = len(data) existing_len = len(existing) length = min(data_len, existing_len) while (i < length): if existing[i] != data[i]: break i += 1 j = 0 while j < (length - i): if existing[existing_len - j - 1] != data[data_len - j - 1]: break j += 1 region = sublime.Region(start + i, stop - j) replace_str = data[i:data_len - j] self.view.replace(edit, region, replace_str) G.VIEW_TO_HASH[self.view.buffer_id()] = hashlib.md5( listener.get_text(self.view).encode('utf-8')).hexdigest() new_offset = len(replace_str) - ((stop - j) - (start + i)) return transform_selections(selections, start + i, new_offset)
def connect(self, conn=None): utils.cancel_timeout(self._reconnect_timeout) self._reconnect_timeout = None self.cleanup() self._empty_selects = 0 if self.proxy: self.start_proxy() self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._sock.setblocking(False) if self._secure: with open(self._cert_path, 'wb') as cert_fd: cert_fd.write(cert.CA_CERT.encode('utf-8')) conn_msg = 'Connecting to %s:%s' % (self.host, self.port) if self.port != self._port or self.host != self._host: conn_msg += ' (proxying through %s:%s)' % (self._host, self._port) msg.log(conn_msg) editor.status_message(conn_msg) self._connect()
def connect(self, conn=None): utils.cancel_timeout(self._reconnect_timeout) self._reconnect_timeout = None self.cleanup() self._empty_selects = 0 self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._sock.setblocking(False) if self.secure: if ssl: with open(self._cert_path, 'wb') as cert_fd: cert_fd.write(cert.CA_CERT.encode('utf-8')) else: msg.log('No SSL module found. Connection will not be encrypted.') self.secure = False if self.port == G.DEFAULT_PORT: self.port = 3148 # plaintext port conn_msg = 'Connecting to %s:%s' % (self.host, self.port) msg.log(conn_msg) editor.status_message(conn_msg) self._connect()
def _run(self, edit, selections, r, data, view=None): global ignore_modified_timeout if not getattr(self, 'view', None): return selections G.IGNORE_MODIFIED_EVENTS = True utils.cancel_timeout(ignore_modified_timeout) ignore_modified_timeout = utils.set_timeout(unignore_modified_events, 2) start = max(int(r[0]), 0) stop = min(int(r[1]), self.view.size()) region = sublime.Region(start, stop) if stop - start > 10000: self.view.replace(edit, region, data) G.VIEW_TO_HASH[self.view.buffer_id()] = hashlib.md5(listener.get_text(self.view).encode('utf-8')).hexdigest() return transform_selections(selections, start, stop - start) existing = self.view.substr(region) i = 0 data_len = len(data) existing_len = len(existing) length = min(data_len, existing_len) while (i < length): if existing[i] != data[i]: break i += 1 j = 0 while j < (length - i): if existing[existing_len - j - 1] != data[data_len - j - 1]: break j += 1 region = sublime.Region(start + i, stop - j) replace_str = data[i:data_len - j] self.view.replace(edit, region, replace_str) G.VIEW_TO_HASH[self.view.buffer_id()] = hashlib.md5(listener.get_text(self.view).encode('utf-8')).hexdigest() new_offset = len(replace_str) - ((stop - j) - (start + i)) return transform_selections(selections, start + i, new_offset)
def connect(self, conn=None): utils.cancel_timeout(self._reconnect_timeout) self._reconnect_timeout = None self.cleanup() self._empty_selects = 0 self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._sock.setblocking(False) if self.secure: if ssl: with open(self._cert_path, 'wb') as cert_fd: cert_fd.write(cert.CA_CERT.encode('utf-8')) else: msg.log( 'No SSL module found. Connection will not be encrypted.') self.secure = False if self.port == G.DEFAULT_PORT: self.port = 3148 # plaintext port conn_msg = 'Connecting to %s:%s' % (self.host, self.port) msg.log(conn_msg) editor.status_message(conn_msg) self._connect()
def stop(self): utils.cancel_timeout(self.upload_timeout) self.upload_timeout = None super(FlooHandler, self).stop()
def _on_patch(self, data): buf_id = data['id'] buf = self.bufs[buf_id] if 'buf' not in buf: msg.debug('buf %s not populated yet. not patching' % buf['path']) return if buf['encoding'] == 'base64': # TODO apply binary patches return self.get_buf(buf_id, None) if len(data['patch']) == 0: msg.debug('wtf? no patches to apply. server is being stupid') return msg.debug('patch is', data['patch']) dmp_patches = DMP.patch_fromText(data['patch']) # TODO: run this in a separate thread old_text = buf['buf'] view = self.get_view(buf_id) if view and not view.is_loading(): view_text = view.get_text() if old_text == view_text: buf['forced_patch'] = False elif not buf.get('forced_patch'): patch = utils.FlooPatch(view_text, buf) # Update the current copy of the buffer buf['buf'] = patch.current buf['md5'] = hashlib.md5(patch.current.encode('utf-8')).hexdigest() buf['forced_patch'] = True msg.debug('forcing patch for %s' % buf['path']) self.send(patch.to_json()) old_text = view_text else: msg.debug('forced patch is true. not sending another patch for buf %s' % buf['path']) md5_before = hashlib.md5(old_text.encode('utf-8')).hexdigest() if md5_before != data['md5_before']: msg.warn('starting md5s don\'t match for %s. this is dangerous!' % buf['path']) t = DMP.patch_apply(dmp_patches, old_text) clean_patch = True for applied_patch in t[1]: if not applied_patch: clean_patch = False break if G.DEBUG: if len(t[0]) == 0: try: msg.debug('OMG EMPTY!') msg.debug('Starting data:', buf['buf']) msg.debug('Patch:', data['patch']) except Exception as e: print(e) if '\x01' in t[0]: msg.debug('FOUND CRAZY BYTE IN BUFFER') msg.debug('Starting data:', buf['buf']) msg.debug('Patch:', data['patch']) timeout_id = buf.get('timeout_id') if timeout_id: utils.cancel_timeout(timeout_id) del buf['timeout_id'] if not clean_patch: msg.log('Couldn\'t patch %s cleanly.' % buf['path']) return self.get_buf(buf_id, view) cur_hash = hashlib.md5(t[0].encode('utf-8')).hexdigest() if cur_hash != data['md5_after']: buf['timeout_id'] = utils.set_timeout(self.get_buf, 2000, buf_id, view) buf['buf'] = t[0] buf['md5'] = cur_hash if not view: msg.debug('No view. Saving buffer %s' % buf_id) utils.save_buf(buf) return view.apply_patches(buf, t, data['username'])
def shutdown(): print('Floobits plugin updated. Shutting down old instance.', old_time) utils.cancel_timeout(interval)
def _on_patch(self, data): buf_id = data['id'] buf = self.bufs[buf_id] if 'buf' not in buf: msg.debug('buf ', buf['path'], ' not populated yet. not patching') return if buf['encoding'] == 'base64': # TODO apply binary patches return self.get_buf(buf_id, None) if len(data['patch']) == 0: msg.debug('wtf? no patches to apply. server is being stupid') return msg.debug('patch is', data['patch']) dmp_patches = DMP.patch_fromText(data['patch']) # TODO: run this in a separate thread old_text = buf['buf'] view = self.get_view(buf_id) if view and not view.is_loading(): view_text = view.get_text() if old_text == view_text: buf['forced_patch'] = False elif not buf.get('forced_patch'): patch = utils.FlooPatch(view_text, buf) # Update the current copy of the buffer buf['buf'] = patch.current buf['md5'] = hashlib.md5( patch.current.encode('utf-8')).hexdigest() buf['forced_patch'] = True msg.debug('forcing patch for ', buf['path']) self.send(patch.to_json()) old_text = view_text else: msg.debug( 'forced patch is true. not sending another force patch for buf ', buf['path']) md5_before = hashlib.md5(old_text.encode('utf-8')).hexdigest() if md5_before != data['md5_before']: msg.warn('starting md5s don\'t match for ', buf['path'], '. this is dangerous!') t = DMP.patch_apply(dmp_patches, old_text) clean_patch = True for applied_patch in t[1]: if not applied_patch: clean_patch = False break if G.DEBUG: if len(t[0]) == 0: try: msg.debug('OMG EMPTY!') msg.debug('Starting data:', buf['buf']) msg.debug('Patch:', data['patch']) except Exception as e: msg.error(e) if '\x01' in t[0]: msg.debug('FOUND CRAZY BYTE IN BUFFER') msg.debug('Starting data:', buf['buf']) msg.debug('Patch:', data['patch']) timeout_id = buf.get('timeout_id') if timeout_id: utils.cancel_timeout(timeout_id) del buf['timeout_id'] if not clean_patch: msg.log('Couldn\'t patch ', buf['path'], ' cleanly.') return self.get_buf(buf_id, view) cur_hash = hashlib.md5(t[0].encode('utf-8')).hexdigest() if cur_hash != data['md5_after']: msg.debug('Ending md5s don\'t match for ', buf['path'], ' Setting get_buf timeout.') buf['timeout_id'] = utils.set_timeout(self.get_buf, 2000, buf_id, view) buf['buf'] = t[0] buf['md5'] = cur_hash if not view: msg.debug('No view. Not saving buffer ', buf_id) def _on_load(): v = self.get_view(buf_id) if v and 'buf' in buf: v.update(buf, message=False) self.on_load[buf_id]['patch'] = _on_load return view.apply_patches(buf, t, data['username'])
def stop(self): self.retries = -1 utils.cancel_timeout(self._reconnect_timeout) self._reconnect_timeout = None self.cleanup() msg.log("Disconnected.")