def main(*args): connection = Client() connection.connect('127.0.0.1') connection.login_service('smtp') parser = argparse.ArgumentParser(description='Process email') parser.add_argument('-i', dest='strip_leading_dot', action='store_false', default=True, help='see sendmail(8) -i') parser.add_argument('-t', dest='parse_recipients', action='store_true', default=False, help='parse recipients from message') parser.usage = ' '.join(parser.format_usage().split(' ')[1:-1]) parser.usage += ' [email_addr|user] ..' args, to_addrs = parser.parse_known_args() if not to_addrs and not args.parse_recipients: parser.exit(message=parser.format_usage()) msg = sys.stdin.read() em_parser = email.parser.Parser() em = em_parser.parsestr(msg) if args.parse_recipients: # Strip away the comma based delimiters and whitespace. to_addrs = map(str.strip, em.get('To').split(',')) if not to_addrs or not to_addrs[0]: to_addrs = ['root'] margs = {} margs['extra_headers'] = dict(em) margs['extra_headers'].update({ 'X-Mailer': 'FreeNAS', 'X-FreeNAS-Host': socket.gethostname(), }) margs['subject'] = em.get('Subject') if em.is_multipart(): margs['attachments'] = filter( lambda part: part.get_content_maintype() != 'multipart', em.walk() ) margs['message'] = ( 'This is a MIME formatted message. If you see ' 'this text it means that your email software ' 'does not support MIME formatted messages.') else: margs['message'] = ''.join(email.iterators.body_line_iterator(em)) if to_addrs: margs['to'] = to_addrs connection.call_sync('mail.send', margs) connection.disconnect()
def test_unix_server(self): sockpath = os.path.join(os.getcwd(), 'test.{0}.sock'.format(os.getpid())) sockurl = 'unix://' + sockpath context = RpcContext() context.register_service('test', TestService) server = Server() server.rpc = context server.start(sockurl) threading.Thread(target=server.serve_forever, daemon=True).start() # Spin until server is ready while not os.path.exists(sockpath): time.sleep(0.1) client = Client() client.connect(sockurl) self.assertTrue(client.connected) self.assertEqual(client.call_sync('test.hello', 'freenas'), 'Hello World, freenas') client.disconnect() server.close() os.unlink(sockpath)
class FileWrapRemote(FileWrapBase): def __init__(self, uri, type=None, username='******', password=''): super(FileWrapRemote, self).__init__(uri, type) self.client = Client() self.username = username self.password = password def readdir(self): if not self.is_dir: raise NotADirectoryError self.client.connect('ws://'+self.hostname) self.client.login_user(self.username, self.password) for e in self.client.call_sync('filesystem.list_dir', self.path): yield FileWrapRemote( PurePath(self.uri).joinpath(e['name']), type=e['type'], username=self.username, password=self.password) def _map_type(self, val): self._freenas_mappings = { 'DIRECTORY': FileType.dir, 'FILE': FileType.file, } return self._freenas_mappings[val] def _get_type(self): #self.client.connect('ws://'+self.hostname) #self.client.login_user(self.username, self.password) """ TODO """ return self._map_type('DIRECTORY') def _get_parent(self): return FileWrapRemote(PurePath(self.uri).parent.as_posix(), username=self.username, password=self.password)
def main(*args): connection = Client() connection.connect("127.0.0.1") connection.login_service("smtp") parser = argparse.ArgumentParser(description="Process email") parser.add_argument("-i", dest="strip_leading_dot", action="store_false", default=True, help="see sendmail(8) -i") parser.add_argument( "-t", dest="parse_recipients", action="store_true", default=False, help="parse recipients from message" ) parser.usage = " ".join(parser.format_usage().split(" ")[1:-1]) parser.usage += " [email_addr|user] .." args, to_addrs = parser.parse_known_args() if not to_addrs and not args.parse_recipients: parser.exit(message=parser.format_usage()) msg = sys.stdin.read() em_parser = email.parser.Parser() em = em_parser.parsestr(msg) if args.parse_recipients: # Strip away the comma based delimiters and whitespace. to_addrs = map(str.strip, em.get("To").split(",")) if not to_addrs or not to_addrs[0]: to_addrs = ["root"] margs = {} margs["extra_headers"] = dict(em) margs["extra_headers"].update({"X-Mailer": "FreeNAS", "X-FreeNAS-Host": socket.gethostname()}) margs["subject"] = em.get("Subject") if em.is_multipart(): margs["attachments"] = filter(lambda part: part.get_content_maintype() != "multipart", em.walk()) margs["message"] = ( "This is a MIME formatted message. If you see " "this text it means that your email software " "does not support MIME formatted messages." ) else: margs["message"] = "".join(email.iterators.body_line_iterator(em)) if to_addrs: margs["to"] = to_addrs connection.call_sync("mail.send", margs) connection.disconnect()
class SyslogProvider(Provider): def initialize(self, context): self.client = Client() self.client.connect('unix:///var/run/logd.sock') @generator def query(self, filter=None, params=None): return self.client.call_sync('logd.logging.query', filter, params)
def test_back_to_back(self): a, b = socket.socketpair() self.assertGreaterEqual(a.fileno(), 0) self.assertGreaterEqual(b.fileno(), 0) c1 = Client() c1.standalone_server = True c1.enable_server() c1.register_service('test', TestService()) c1.connect('fd://{0}'.format(a.fileno())) self.assertTrue(c1.connected) c2 = Client() c2.connect('fd://{0}'.format(b.fileno())) self.assertTrue(c2.connected) self.assertEqual(c2.call_sync('test.hello', 'freenas'), 'Hello World, freenas') c2.disconnect() a.close() c1.disconnect() b.close()
def run(self, peer, initial_credentials): hostid = self.dispatcher.call_sync('system.info.host_uuid') hostname = self.dispatcher.call_sync('system.general.get_config')['hostname'] remote_peer_name = hostname credentials = peer['credentials'] remote = credentials.get('address') port = credentials.get('port', 22) username = initial_credentials.get('username') password = initial_credentials.get('password') auth_code = initial_credentials.get('auth_code') key_auth = initial_credentials.get('key_auth') local_ssh_config = self.dispatcher.call_sync('service.sshd.get_config') if self.datastore.exists('peers', ('credentials.address', '=', remote), ('type', '=', 'freenas')): raise TaskException( errno.EEXIST, 'FreeNAS peer entry for {0} already exists'.format(remote) ) remote_client = Client() try: if auth_code: try: remote_client.connect('ws://{0}'.format(wrap_address(remote))) except (AuthenticationException, OSError, ConnectionRefusedError): raise TaskException(errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port)) try: remote_host_uuid, pubkey = remote_client.call_sync( 'peer.freenas.auth_with_code', auth_code, hostname, local_ssh_config['port'] ) except RpcException as err: raise TaskException(err.code, err.message) try: self.dispatcher.call_sync('peer.freenas.put_temp_pubkey', pubkey) if not self.dispatcher.test_or_wait_for_event( 'peer.changed', lambda ar: ar['operation'] == 'create' and remote_host_uuid in ar['ids'], lambda: self.datastore.exists('peers', ('id', '=', remote_host_uuid)), timeout=30 ): raise TaskException( errno.EAUTH, 'FreeNAS peer creation failed. Check connection to host {0}.'.format(remote) ) finally: self.dispatcher.call_sync('peer.freenas.remove_temp_pubkey', pubkey) else: try: if key_auth: with io.StringIO() as f: f.write(self.configstore.get('peer.freenas.key.private')) f.seek(0) pkey = RSAKey.from_private_key(f) max_tries = 50 while True: try: remote_client.connect('ws+ssh://freenas@{0}'.format( wrap_address(remote)), pkey=pkey, port=port ) break except AuthenticationException: if max_tries: max_tries -= 1 time.sleep(1) else: raise else: remote_client.connect( 'ws+ssh://{0}@{1}'.format(username, wrap_address(remote)), port=port, password=password ) remote_client.login_service('replicator') except (AuthenticationException, OSError, ConnectionRefusedError): raise TaskException(errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port)) local_host_key, local_pub_key = self.dispatcher.call_sync('peer.freenas.get_ssh_keys') remote_host_key, remote_pub_key = remote_client.call_sync('peer.freenas.get_ssh_keys') ip_at_remote_side = remote_client.local_address[0] remote_hostname = remote_client.call_sync('system.general.get_config')['hostname'] remote_host_key = remote_host_key.rsplit(' ', 1)[0] local_host_key = local_host_key.rsplit(' ', 1)[0] if remote_client.call_sync('peer.query', [('id', '=', hostid)]): raise TaskException(errno.EEXIST, 'Peer entry of {0} already exists at {1}'.format(hostname, remote)) peer['credentials'] = { '%type': 'freenas-credentials', 'pubkey': remote_pub_key, 'hostkey': remote_host_key, 'port': port, 'address': remote_hostname } local_id = remote_client.call_sync('system.info.host_uuid') peer['id'] = local_id peer['name'] = remote_hostname ip = socket.gethostbyname(remote) created_id = self.run_subtask_sync( 'peer.freenas.create_local', peer, ip, True ) peer['id'] = hostid peer['name'] = remote_peer_name peer['credentials'] = { '%type': 'freenas-credentials', 'pubkey': local_pub_key, 'hostkey': local_host_key, 'port': local_ssh_config['port'], 'address': hostname } try: call_task_and_check_state( remote_client, 'peer.freenas.create_local', peer, ip_at_remote_side ) except TaskException: self.datastore.delete('peers', local_id) self.dispatcher.dispatch_event('peer.changed', { 'operation': 'delete', 'ids': [local_id] }) raise return created_id finally: remote_client.disconnect()
class Main(object): def __init__(self): self.logger = logging.getLogger('neighbord') self.config = None self.datastore = None self.configstore = None self.client = None self.config = None self.logger = logging.getLogger() self.plugin_dirs = [] self.plugins = {} def parse_config(self, filename): try: with open(filename, 'r') as f: self.config = json.load(f) except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error( 'Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['neighbord']['plugin-dirs'] def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = load_module_from_file(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def register_plugin(self, name, cls): self.plugins[name] = cls(self) self.logger.info('Registered plugin {0} (class {1})'.format(name, cls)) def register_service(self, name, regtype, port, properties=None): for plugin in self.plugins.values(): plugin.register(regtype, name, port, properties) def register(self): try: hostname = socket.gethostname() general = self.client.call_sync('system.general.get_config') properties = { 'version': self.client.call_sync('system.info.version'), 'description': general['description'], 'tags': ','.join(general['tags']) } self.register_service(hostname, 'freenas', 80, properties) self.register_service(hostname, 'http', 80) self.register_service(hostname, 'ssh', 22) self.register_service(hostname, 'sftp-ssh', 22) except BaseException as err: self.logger.error('Failed to register services: {0}'.format( str(err))) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('neighbord') self.client.enable_server() self.client.register_service('neighbord.management', ManagementService(self)) self.client.register_service('neighbord.discovery', DiscoveryService(self)) self.client.register_service('neighbord.debug', DebugService()) self.client.resume_service('neighbord.management') self.client.resume_service('neighbord.discovery') self.client.resume_service('neighbord.debug') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() self.config = args.c configure_logging('/var/log/neighbord.log', 'DEBUG') setproctitle.setproctitle('neighbord') self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.scan_plugins() self.register() self.client.wait_forever()
class Context(object): def __init__(self): self.service = TaskProxyService(self) self.task = queue.Queue(1) self.datastore = None self.configstore = None self.conn = None self.instance = None self.running = Event() def put_status(self, state, result=None, exception=None): obj = { 'status': state, 'result': None } if result is not None: obj['result'] = result if exception is not None: obj['error'] = serialize_error(exception) self.conn.call_sync('task.put_status', obj) def task_progress_handler(self, args): if self.instance: self.instance.task_progress_handler(args) def collect_fds(self, obj): if isinstance(obj, dict): for v in obj.values(): if isinstance(v, FileDescriptor): yield v else: yield from self.collect_fds(v) if isinstance(obj, (list, tuple)): for o in obj: if isinstance(o, FileDescriptor): yield o else: yield from self.collect_fds(o) def close_fds(self, fds): for i in fds: try: os.close(i.fd) except OSError: pass def main(self): if len(sys.argv) != 2: print("Invalid number of arguments", file=sys.stderr) sys.exit(errno.EINVAL) key = sys.argv[1] configure_logging(None, logging.DEBUG) self.datastore = get_datastore() self.configstore = ConfigStore(self.datastore) self.conn = Client() self.conn.connect('unix:') self.conn.login_service('task.{0}'.format(os.getpid())) self.conn.enable_server() self.conn.rpc.register_service_instance('taskproxy', self.service) self.conn.register_event_handler('task.progress', self.task_progress_handler) self.conn.call_sync('task.checkin', key) setproctitle.setproctitle('task executor (idle)') while True: try: task = self.task.get() logging.root.setLevel(self.conn.call_sync('management.get_logging_level')) setproctitle.setproctitle('task executor (tid {0})'.format(task['id'])) if task['debugger']: sys.path.append('/usr/local/lib/dispatcher/pydev') import pydevd host, port = task['debugger'] pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True) name, _ = os.path.splitext(os.path.basename(task['filename'])) module = load_module_from_file(name, task['filename']) setproctitle.setproctitle('task executor (tid {0})'.format(task['id'])) fds = list(self.collect_fds(task['args'])) try: self.instance = getattr(module, task['class'])(DispatcherWrapper(self.conn), self.datastore) self.instance.configstore = self.configstore self.instance.user = task['user'] self.instance.environment = task['environment'] self.running.set() result = self.instance.run(*task['args']) except BaseException as err: print("Task exception: {0}".format(str(err)), file=sys.stderr) traceback.print_exc(file=sys.stderr) if hasattr(self.instance, 'rollback'): self.put_status('ROLLBACK') try: self.instance.rollback(*task['args']) except BaseException as rerr: print("Task exception during rollback: {0}".format(str(rerr)), file=sys.stderr) traceback.print_exc(file=sys.stderr) self.put_status('FAILED', exception=err) else: self.put_status('FINISHED', result=result) finally: self.close_fds(fds) self.running.clear() except RpcException as err: print("RPC failed: {0}".format(str(err)), file=sys.stderr) print(traceback.format_exc(), flush=True) sys.exit(errno.EBADMSG) except socket.error as err: print("Cannot connect to dispatcher: {0}".format(str(err)), file=sys.stderr) sys.exit(errno.ETIMEDOUT) if task['debugger']: import pydevd pydevd.stoptrace() setproctitle.setproctitle('task executor (idle)')
def run(self, peer): if self.datastore.exists('peers', ('address', '=', peer['address']), ('type', '=', 'replication')): raise TaskException(errno.EEXIST, 'Replication peer entry for {0} already exists'.format(peer['address'])) if peer['credentials']['type'] != 'ssh': raise TaskException(errno.EINVAL, 'SSH credentials type is needed to perform replication peer pairing') remote = peer.get('address') credentials = peer['credentials'] username = credentials.get('username') port = credentials.get('port', 22) password = credentials.get('password') if not username: raise TaskException(errno.EINVAL, 'Username has to be specified') if not remote: raise TaskException(errno.EINVAL, 'Address of remote host has to be specified') if not password: raise TaskException(errno.EINVAL, 'Password has to be specified') remote_client = Client() try: try: remote_client.connect('ws+ssh://{0}@{1}'.format(username, remote), port=port, password=password) remote_client.login_service('replicator') except (AuthenticationException, OSError, ConnectionRefusedError): raise TaskException(errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port)) local_keys = self.dispatcher.call_sync('peer.get_ssh_keys') remote_keys = remote_client.call_sync('peer.get_ssh_keys') ip_at_remote_side = remote_client.call_sync('management.get_sender_address').split(',', 1)[0] remote_host_key = remote + ' ' + remote_keys[0].rsplit(' ', 1)[0] local_host_key = ip_at_remote_side + ' ' + local_keys[0].rsplit(' ', 1)[0] local_ssh_config = self.dispatcher.call_sync('service.sshd.get_config') if remote_client.call_sync('peer.query', [('name', '=', peer['name'])]): raise TaskException(errno.EEXIST, 'Peer entry {0} already exists at {1}'.format(peer['name'], remote)) peer['credentials'] = { 'pubkey': remote_keys[1], 'hostkey': remote_host_key, 'port': port, 'type': 'replication' } self.join_subtasks(self.run_subtask( 'peer.replication.create_local', peer )) peer['address'] = ip_at_remote_side peer['credentials'] = { 'pubkey': local_keys[1], 'hostkey': local_host_key, 'port': local_ssh_config['port'], 'type': 'replication' } id = self.datastore.query('peers', ('name', '=', peer['name']), select='id') try: call_task_and_check_state( remote_client, 'peer.replication.create_local', peer ) except TaskException: self.datastore.delete('peers', id) self.dispatcher.dispatch_event('peer.changed', { 'operation': 'delete', 'ids': [id] }) raise finally: remote_client.disconnect()
class Context(object): def __init__(self): self.logger = logging.getLogger('schedulerd') self.config = None self.datastore = None self.configstore = None self.client = None self.scheduler = None self.active_tasks = {} def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_scheduler(self): store = FreeNASJobStore() self.scheduler = BackgroundScheduler(jobstores={'default': store, 'temp': MemoryJobStore()}, timezone=pytz.utc) self.scheduler.start() def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('schedulerd') self.client.enable_server() self.client.register_service('scheduler.management', ManagementService(self)) self.client.register_service('scheduler.debug', DebugService()) self.client.resume_service('scheduler.management') self.client.resume_service('scheduler.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def run_job(self, *args, **kwargs): tid = self.client.call_sync('task.submit_with_env', args[0], args[1:], { 'RUN_AS_USER': '******', 'CALENDAR_TASK_NAME': kwargs.get('name') }) self.active_tasks[kwargs['id']] = tid self.client.call_sync('task.wait', tid, timeout=None) result = self.client.call_sync('task.status', tid) if result['state'] != 'FINISHED': try: self.client.call_sync('alert.emit', { 'name': 'scheduler.task.failed', 'severity': 'CRITICAL', 'description': 'Task {0} has failed: {1}'.format( kwargs.get('name', tid), result['error']['message'] ), }) except RpcException as e: self.logger.error('Failed to emit alert', exc_info=True) del self.active_tasks[kwargs['id']] self.datastore.insert('schedulerd.runs', { 'job_id': kwargs['id'], 'task_id': result['id'] }) def emit_event(self, name, params): self.client.emit_event(name, params) def checkin(self): checkin() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-f', action='store_true', default=False, help='Run in foreground') args = parser.parse_args() configure_logging('/var/log/schedulerd.log', 'DEBUG') setproctitle('schedulerd') self.config = args.c self.init_datastore() self.init_scheduler() self.init_dispatcher() self.checkin() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('clid') self.config = None self.datastore = None self.configstore = None self.client = None self.config = None self.logger = logging.getLogger() self.plugin_dirs = [] self.ml = None self.context = None def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_cli(self): self.logger.info('Initializing CLI instance') self.context = Context() self.context.connection = self.client self.context.plugin_dirs = PLUGIN_DIRS self.context.discover_plugins() self.context.start_entity_subscribers() self.context.login_plugins() self.ml = MainLoop(self.context) self.logger.info('CLI instance ready') def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('clid') self.client.enable_server() self.client.call_sync('management.enable_features', ['streaming_responses']) self.client.register_service('clid.management', ManagementService(self)) self.client.register_service('clid.eval', EvalService(self)) self.client.register_service('clid.debug', DebugService()) self.client.resume_service('clid.management') self.client.resume_service('clid.eval') self.client.resume_service('clid.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() self.config = args.c configure_logging('clid', 'DEBUG') setproctitle('clid') self.init_dispatcher() self.init_cli() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('clid') self.config = None self.datastore = None self.configstore = None self.client = None self.config = None self.logger = logging.getLogger() self.plugin_dirs = [] self.ml = None self.context = None def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_cli(self): self.logger.info('Initializing CLI instance') self.context = Context() self.context.connection = self.client self.context.plugin_dirs = PLUGIN_DIRS self.context.discover_plugins() self.context.start_entity_subscribers() self.context.login_plugins() self.ml = MainLoop(self.context) self.logger.info('CLI instance ready') def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('clid') self.client.enable_server() self.client.call_sync('management.enable_features', ['streaming_responses']) self.client.register_service('clid.management', ManagementService(self)) self.client.register_service('clid.eval', EvalService(self)) self.client.register_service('clid.debug', DebugService()) self.client.resume_service('clid.management') self.client.resume_service('clid.eval') self.client.resume_service('clid.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() self.config = args.c configure_logging('/var/log/clid.log', 'DEBUG') setproctitle('clid') self.init_dispatcher() self.init_cli() self.client.wait_forever()
class BaseTestCase(unittest.TestCase): class TaskState(object): def __init__(self): self.tid = None self.state = None self.message = None self.result = None self.name = None self.ended = Event() def __init__(self, methodName): super(BaseTestCase, self).__init__(methodName) self.tasks = {} self.tasks_lock = Lock() self.conn = None self.task_timeout = 30 def setUp(self): try: self.conn = Client() self.conn.event_callback = self.on_event self.conn.connect(os.getenv('TESTHOST', '127.0.0.1')) self.conn.login_user(os.getenv('TESTUSER', 'root'), os.getenv('TESTPWD', ''), timeout = self.task_timeout) self.conn.subscribe_events('*') except: raise def tearDown(self): self.conn.disconnect() def submitTask(self, name, *args): with self.tasks_lock: try: tid = self.conn.call_sync('task.submit', name, args) except RpcException: raise except Exception: raise self.tasks[tid] = self.TaskState() self.tasks[tid].tid = tid self.tasks[tid].name = name return tid def assertTaskCompletion(self, tid): t = self.tasks[tid] if not t.ended.wait(self.task_timeout): self.fail('Task {0} timed out'.format(tid)) #print dir(t) #print 'Message is ' + str(t.message) #print 'State is ' + str(t.state) #print 'Result is ' + str(t.result) if t.state.count('Executing...'): message = t.error elif t.__getattribute__('message') and t.message.count('Executing...'): message = t.state else: message = t.message if not message: self.query_task(tid) self.assertEqual(t.state, 'FINISHED', msg=message) def assertTaskFailure(self, tid): t = self.tasks[tid] if not t.ended.wait(self.task_timeout): self.fail('Task {0} timed out'.format(tid)) self.assertNotEqual(t.state, 'FINISHED', msg=t.message) def assertSeenEvent(self, name, func=None): pass def skip(self, reason): raise unittest.SkipTest(str(reason)) def getTaskResult(self, tid): t = self.tasks[tid] return t.result def on_event(self, name, args): with self.tasks_lock: if name == 'task.updated': #DEBUG #print 'ARGS IS ' + str(args) #print 'TASK LIST IS ' + str(self.tasks) #for pc in self.conn.pending_calls.keys(): # print 'PENDING CALL METHOD ' + str(self.conn.pending_calls[pc].method) + \ # ' and ID ' + str(self.conn.pending_calls[pc].id) if args['id'] not in self.tasks.keys(): if args['state'] == 'EXECUTING': return else: t = self.tasks[args['id']] t.state = args['state'] if t.state in ('FINISHED', 'FAILED'): t.result = args['result'] if 'result' in args else None t.ended.set() elif name == 'task.progress': if args['id'] not in self.tasks.keys(): if args['state'] == 'EXECUTING': return else: t = self.tasks[args['id']] t.message = args['message'] def on_eventOrig(self, name, args): self.tasks_lock.acquire() if name == 'task.updated': #DEBUG #print 'ARGS IS ' + str(args) #print 'TASK LIST IS ' + str(self.tasks) #for pc in self.conn.pending_calls.keys(): # print 'PENDING CALL METHOD ' + str(self.conn.pending_calls[pc].method) + \ # ' and ID ' + str(self.conn.pending_calls[pc].id) if args['id'] not in self.tasks.keys(): if args['state'] == 'EXECUTING': self.tasks_lock.release() return else: t = self.tasks[args['id']] t.state = args['state'] if t.state in ('FINISHED', 'FAILED'): t.result = args['result'] if 'result' in args else None t.ended.set() elif name == 'task.progress': if args['id'] not in self.tasks.keys(): if args['state'] == 'EXECUTING': self.tasks_lock.release() return else: t = self.tasks[args['id']] t.message = args['message'] self.tasks_lock.release() def pretty_print(self, res): if '-v' in sys.argv: print json.dumps(res, indent=4, sort_keys=True) def query_task(self, tid): # Makes tests very slow, keep as debug query = self.conn.call_sync('task.query', [('id','=',tid)]) message = query[0]['error'] self.pretty_print(message)
def run(self, peer, initial_credentials): hostid = self.dispatcher.call_sync('system.info.host_uuid') hostname = self.dispatcher.call_sync( 'system.general.get_config')['hostname'] remote_peer_name = hostname credentials = peer['credentials'] remote = credentials.get('address') port = credentials.get('port', 22) username = initial_credentials.get('username') password = initial_credentials.get('password') auth_code = initial_credentials.get('auth_code') key_auth = initial_credentials.get('key_auth') local_ssh_config = self.dispatcher.call_sync('service.sshd.get_config') if self.datastore.exists('peers', ('credentials.address', '=', remote), ('type', '=', 'freenas')): raise TaskException( errno.EEXIST, 'FreeNAS peer entry for {0} already exists'.format(remote)) remote_client = Client() try: if auth_code: try: remote_client.connect('ws://{0}'.format( wrap_address(remote))) except (AuthenticationException, OSError, ConnectionRefusedError): raise TaskException( errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port)) try: remote_host_uuid, pubkey = remote_client.call_sync( 'peer.freenas.auth_with_code', auth_code, hostname, local_ssh_config['port']) except RpcException as err: raise TaskException(err.code, err.message) try: self.dispatcher.call_sync('peer.freenas.put_temp_pubkey', pubkey) if not self.dispatcher.test_or_wait_for_event( 'peer.changed', lambda ar: ar['operation'] == 'create' and remote_host_uuid in ar['ids'], lambda: self.datastore.exists( 'peers', ('id', '=', remote_host_uuid)), timeout=30): raise TaskException( errno.EAUTH, 'FreeNAS peer creation failed. Check connection to host {0}.' .format(remote)) finally: self.dispatcher.call_sync( 'peer.freenas.remove_temp_pubkey', pubkey) else: try: if key_auth: with io.StringIO() as f: f.write( self.configstore.get( 'peer.freenas.key.private')) f.seek(0) pkey = RSAKey.from_private_key(f) max_tries = 50 while True: try: remote_client.connect( 'ws+ssh://freenas@{0}'.format( wrap_address(remote)), pkey=pkey, port=port) break except AuthenticationException: if max_tries: max_tries -= 1 time.sleep(1) else: raise else: remote_client.connect('ws+ssh://{0}@{1}'.format( username, wrap_address(remote)), port=port, password=password) remote_client.login_service('replicator') except (AuthenticationException, OSError, ConnectionRefusedError): raise TaskException( errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port)) local_host_key, local_pub_key = self.dispatcher.call_sync( 'peer.freenas.get_ssh_keys') remote_host_key, remote_pub_key = remote_client.call_sync( 'peer.freenas.get_ssh_keys') ip_at_remote_side = remote_client.local_address[0] remote_hostname = remote_client.call_sync( 'system.general.get_config')['hostname'] remote_host_key = remote_host_key.rsplit(' ', 1)[0] local_host_key = local_host_key.rsplit(' ', 1)[0] if remote_client.call_sync('peer.query', [('id', '=', hostid)]): raise TaskException( errno.EEXIST, 'Peer entry of {0} already exists at {1}'.format( hostname, remote)) peer['credentials'] = { '%type': 'freenas-credentials', 'pubkey': remote_pub_key, 'hostkey': remote_host_key, 'port': port, 'address': remote_hostname } local_id = remote_client.call_sync('system.info.host_uuid') peer['id'] = local_id peer['name'] = remote_hostname ip = socket.gethostbyname(remote) created_ids = self.join_subtasks( self.run_subtask('peer.freenas.create_local', peer, ip, True)) peer['id'] = hostid peer['name'] = remote_peer_name peer['credentials'] = { '%type': 'freenas-credentials', 'pubkey': local_pub_key, 'hostkey': local_host_key, 'port': local_ssh_config['port'], 'address': hostname } try: call_task_and_check_state(remote_client, 'peer.freenas.create_local', peer, ip_at_remote_side) except TaskException: self.datastore.delete('peers', local_id) self.dispatcher.dispatch_event('peer.changed', { 'operation': 'delete', 'ids': [local_id] }) raise return created_ids[0] finally: remote_client.disconnect()
class Main(object): def __init__(self): self.client = None self.datastore = None self.configstore = None self.config = None self.mgmt = None self.vm_started = Event() self.containers = {} self.tokens = {} self.logger = logging.getLogger('containerd') self.bridge_interface = None self.used_nmdms = [] def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def allocate_nmdm(self): for i in range(0, 255): if i not in self.used_nmdms: self.used_nmdms.append(i) return i def release_nmdm(self, index): self.used_nmdms.remove(index) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('containerd') self.client.enable_server() self.client.register_service('containerd.management', ManagementService(self)) self.client.register_service('containerd.debug', DebugService(gevent=True, builtins={"context": self})) self.client.resume_service('containerd.management') self.client.resume_service('containerd.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.use_bursts = True self.client.on_error(on_error) self.connect() def init_mgmt(self): self.mgmt = ManagementNetwork(self, MGMT_INTERFACE, MGMT_ADDR) self.mgmt.up() self.mgmt.bridge_if.add_address(netif.InterfaceAddress( netif.AddressFamily.INET, ipaddress.ip_interface('169.254.169.254/32') )) def init_nat(self): default_if = self.client.call_sync('networkd.configuration.get_default_interface') if not default_if: self.logger.warning('No default route interface; not configuring NAT') return p = pf.PF() # Try to find and remove existing NAT rules for the same subnet oldrule = first_or_default( lambda r: r.src.address.address == MGMT_ADDR.network.network_address, p.get_rules('nat') ) if oldrule: p.delete_rule('nat', oldrule.index) rule = pf.Rule() rule.src.address.address = MGMT_ADDR.network.network_address rule.src.address.netmask = MGMT_ADDR.netmask rule.action = pf.RuleAction.NAT rule.af = socket.AF_INET rule.ifname = default_if rule.redirect_pool.append(pf.Address(ifname=default_if)) rule.proxy_ports = [50001, 65535] p.append_rule('nat', rule) try: p.enable() except OSError as err: if err.errno != errno.EEXIST: raise err def init_ec2(self): self.ec2 = EC2MetadataServer(self) self.ec2.start() def vm_by_mgmt_mac(self, mac): for i in self.containers.values(): for tapmac in i.tap_interfaces.values(): if tapmac == mac: return i return None def vm_by_mgmt_ip(self, ip): for i in self.mgmt.allocations.values(): if i.lease.client_ip == ip: return i.vm() def die(self): self.logger.warning('Exiting') for i in self.containers.values(): i.stop(True) self.client.disconnect() sys.exit(0) def generate_id(self): return ''.join([random.choice(string.ascii_letters + string.digits) for n in range(32)]) def dispatcher_error(self, error): self.die() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-p', type=int, metavar='PORT', default=5500, help="WebSockets server port") args = parser.parse_args() configure_logging('/var/log/containerd.log', 'DEBUG') setproctitle.setproctitle('containerd') gevent.signal(signal.SIGTERM, self.die) gevent.signal(signal.SIGQUIT, self.die) self.config = args.c self.init_datastore() self.init_dispatcher() self.init_mgmt() self.init_nat() self.init_ec2() self.logger.info('Started') # WebSockets server kwargs = {} s4 = WebSocketServer(('', args.p), ServerResource({ '/console': ConsoleConnection, }, context=self), **kwargs) s6 = WebSocketServer(('::', args.p), ServerResource({ '/console': ConsoleConnection, }, context=self), **kwargs) serv_threads = [gevent.spawn(s4.serve_forever), gevent.spawn(s6.serve_forever)] gevent.joinall(serv_threads)
class Context(object): def __init__(self): self.hostname = None self.connection = Client() self.ml = None self.logger = logging.getLogger('cli') self.plugin_dirs = [] self.task_callbacks = {} self.plugins = {} self.variables = VariableStore() self.root_ns = RootNamespace('') self.event_masks = ['*'] self.event_divert = False self.event_queue = six.moves.queue.Queue() self.keepalive_timer = None self.argparse_parser = None config.instance = self @property def is_interactive(self): return os.isatty(sys.stdout.fileno()) def start(self): self.discover_plugins() self.connect() def connect(self): try: self.connection.connect(self.hostname) except socket_error as err: output_msg(_( "Could not connect to host: {0} due to error: {1}".format(self.hostname, err) )) self.argparse_parser.print_help() sys.exit(1) def login(self, user, password): try: self.connection.login_user(user, password) self.connection.subscribe_events(*EVENT_MASKS) self.connection.on_event(self.handle_event) self.connection.on_error(self.connection_error) except RpcException as e: if e.code == errno.EACCES: self.connection.disconnect() output_msg(_("Wrong username or password")) sys.exit(1) self.login_plugins() def keepalive(self): if self.connection.opened: self.connection.call_sync('management.ping') def read_middleware_config_file(self, file): """ If there is a cli['plugin-dirs'] in middleware.conf use that, otherwise use the default plugins dir within cli namespace """ plug_dirs = None if file: with open(file, 'r') as f: data = json.load(f) if 'cli' in data and 'plugin-dirs' in data['cli']: if type(data['cli']['plugin-dirs']) != list: return self.plugin_dirs += data['cli']['plugin-dirs'] if plug_dirs is None: plug_dirs = os.path.dirname(os.path.realpath(__file__)) plug_dirs = os.path.join(plug_dirs, 'plugins') self.plugin_dirs += [plug_dirs] def discover_plugins(self): for dir in self.plugin_dirs: self.logger.debug(_("Searching for plugins in %s"), dir) self.__discover_plugin_dir(dir) def login_plugins(self): for i in list(self.plugins.values()): if hasattr(i, '_login'): i._login(self) def __discover_plugin_dir(self, dir): for i in glob.glob1(dir, "*.py"): self.__try_load_plugin(os.path.join(dir, i)) def __try_load_plugin(self, path): if path in self.plugins: return self.logger.debug(_("Loading plugin from %s"), path) name, ext = os.path.splitext(os.path.basename(path)) plugin = imp.load_source(name, path) if hasattr(plugin, '_init'): plugin._init(self) self.plugins[path] = plugin def __try_reconnect(self): output_lock.acquire() self.ml.blank_readline() output_msg(_('Connection lost! Trying to reconnect...')) retries = 0 while True: retries += 1 try: time.sleep(2) self.connect() try: if self.hostname == '127.0.0.1': self.connection.login_user(getpass.getuser(), '') else: self.connection.login_token(self.connection.token) self.connection.subscribe_events(*EVENT_MASKS) except RpcException: output_msg(_("Reauthentication failed (most likely token expired or server was restarted)")) sys.exit(1) break except Exception as e: output_msg(_('Cannot reconnect: {0}'.format(str(e)))) self.ml.restore_readline() output_lock.release() def attach_namespace(self, path, ns): splitpath = path.split('/') ptr = self.root_ns ptr_namespaces = ptr.namespaces() for n in splitpath[1:-1]: if n not in list(ptr_namespaces().keys()): self.logger.warn(_("Cannot attach to namespace %s"), path) return ptr = ptr_namespaces()[n] ptr.register_namespace(ns) def connection_error(self, event, **kwargs): if event == ClientError.LOGOUT: output_msg('Logged out from server.') self.connection.disconnect() sys.exit(0) if event == ClientError.CONNECTION_CLOSED: time.sleep(1) self.__try_reconnect() return def handle_event(self, event, data): if event == 'task.updated': if data['id'] in self.task_callbacks: self.handle_task_callback(data) self.print_event(event, data) def handle_task_callback(self, data): if data['state'] in ('FINISHED', 'CANCELLED', 'ABORTED', 'FAILED'): self.task_callbacks[data['id']](data['state']) def print_event(self, event, data): if self.event_divert: self.event_queue.put((event, data)) return if event == 'task.progress': return output_lock.acquire() self.ml.blank_readline() translation = events.translate(self, event, data) if translation: output_msg(translation) if 'state' in data: if data['state'] == 'FAILED': status = self.connection.call_sync('task.status', data['id']) output_msg(_( "Task #{0} error: {1}".format( data['id'], status['error'].get('message', '') if status.get('error') else '' ) )) sys.stdout.flush() self.ml.restore_readline() output_lock.release() def call_sync(self, name, *args, **kwargs): return wrap(self.connection.call_sync(name, *args, **kwargs)) def call_task_sync(self, name, *args, **kwargs): self.ml.skip_prompt_print = True wrapped_result = wrap(self.connection.call_task_sync(name, *args)) self.ml.skip_prompt_print = False return wrapped_result def submit_task(self, name, *args, **kwargs): callback = kwargs.pop('callback', None) message_formatter = kwargs.pop('message_formatter', None) if not self.variables.get('tasks_blocking'): tid = self.connection.call_sync('task.submit', name, args) if callback: self.task_callbacks[tid] = callback return tid else: output_msg(_("Hit Ctrl+C to terminate task if needed")) self.event_divert = True tid = self.connection.call_sync('task.submit', name, args) progress = ProgressBar() try: while True: event, data = self.event_queue.get() if event == 'task.progress' and data['id'] == tid: message = data['message'] if isinstance(message_formatter, collections.Callable): message = message_formatter(message) progress.update(percentage=data['percentage'], message=message) if event == 'task.updated' and data['id'] == tid: progress.update(message=data['state']) if data['state'] == 'FINISHED': progress.finish() break if data['state'] == 'FAILED': print() break except KeyboardInterrupt: print() output_msg(_("User requested task termination. Task abort signal sent")) self.call_sync('task.abort', tid) self.event_divert = False return tid
class Main(object): def __init__(self): self.logger = logging.getLogger('neighbord') self.config = None self.datastore = None self.configstore = None self.client = None self.config = None self.logger = logging.getLogger() self.plugin_dirs = [] self.plugins = {} def parse_config(self, filename): try: with open(filename, 'r') as f: self.config = json.load(f) except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error('Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['neighbord']['plugin-dirs'] def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = load_module_from_file(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def register_plugin(self, name, cls): self.plugins[name] = cls(self) self.logger.info('Registered plugin {0} (class {1})'.format(name, cls)) def register_service(self, name, regtype, port, properties=None): for plugin in self.plugins.values(): plugin.register(regtype, name, port, properties) def register(self): try: hostname = socket.gethostname() general = self.client.call_sync('system.general.get_config') properties = { 'version': self.client.call_sync('system.info.version'), 'description': general['description'], 'tags': ','.join(general['tags']) } self.register_service(hostname, 'freenas', 80, properties) self.register_service(hostname, 'http', 80) self.register_service(hostname, 'ssh', 22) self.register_service(hostname, 'sftp-ssh', 22) except BaseException as err: self.logger.error('Failed to register services: {0}'.format(str(err))) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('neighbord') self.client.enable_server() self.client.register_service('neighbord.management', ManagementService(self)) self.client.register_service('neighbord.discovery', DiscoveryService(self)) self.client.register_service('neighbord.debug', DebugService()) self.client.resume_service('neighbord.management') self.client.resume_service('neighbord.discovery') self.client.resume_service('neighbord.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def checkin(self): checkin() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() self.config = args.c configure_logging('/var/log/neighbord.log', 'DEBUG') setproctitle('neighbord') self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.scan_plugins() self.register() self.checkin() self.client.wait_forever()
class BaseTestCase(unittest.TestCase): class TaskState(object): def __init__(self): self.tid = None self.state = None self.message = None self.result = None self.name = None self.ended = Event() def __init__(self, methodName): super(BaseTestCase, self).__init__(methodName) self.tasks = {} self.tasks_lock = Lock() self.conn = None self.task_timeout = 30 def setUp(self): try: self.conn = Client() self.conn.event_callback = self.on_event self.conn.connect(os.getenv('TESTHOST', '127.0.0.1')) self.conn.login_user(os.getenv('TESTUSER', 'root'), os.getenv('TESTPWD', ''), timeout=self.task_timeout) self.conn.subscribe_events('*') except: raise def tearDown(self): self.conn.disconnect() def submitTask(self, name, *args): with self.tasks_lock: try: tid = self.conn.call_sync('task.submit', name, args) except RpcException: raise except Exception: raise self.tasks[tid] = self.TaskState() self.tasks[tid].tid = tid self.tasks[tid].name = name return tid def assertTaskCompletion(self, tid): t = self.tasks[tid] if not t.ended.wait(self.task_timeout): self.fail('Task {0} timed out'.format(tid)) #print dir(t) #print 'Message is ' + str(t.message) #print 'State is ' + str(t.state) #print 'Result is ' + str(t.result) if t.state.count('Executing...'): message = t.error elif t.__getattribute__('message') and t.message.count('Executing...'): message = t.state else: message = t.message if not message: self.query_task(tid) self.assertEqual(t.state, 'FINISHED', msg=message) def assertTaskFailure(self, tid): t = self.tasks[tid] if not t.ended.wait(self.task_timeout): self.fail('Task {0} timed out'.format(tid)) self.assertNotEqual(t.state, 'FINISHED', msg=t.message) def assertSeenEvent(self, name, func=None): pass def skip(self, reason): raise unittest.SkipTest(str(reason)) def getTaskResult(self, tid): t = self.tasks[tid] return t.result def on_event(self, name, args): with self.tasks_lock: if name == 'task.updated': #DEBUG #print 'ARGS IS ' + str(args) #print 'TASK LIST IS ' + str(self.tasks) #for pc in self.conn.pending_calls.keys(): # print 'PENDING CALL METHOD ' + str(self.conn.pending_calls[pc].method) + \ # ' and ID ' + str(self.conn.pending_calls[pc].id) if args['id'] not in self.tasks.keys(): if args['state'] == 'EXECUTING': return else: t = self.tasks[args['id']] t.state = args['state'] if t.state in ('FINISHED', 'FAILED'): t.result = args['result'] if 'result' in args else None t.ended.set() elif name == 'task.progress': if args['id'] not in self.tasks.keys(): if args['state'] == 'EXECUTING': return else: t = self.tasks[args['id']] t.message = args['message'] def on_eventOrig(self, name, args): self.tasks_lock.acquire() if name == 'task.updated': #DEBUG #print 'ARGS IS ' + str(args) #print 'TASK LIST IS ' + str(self.tasks) #for pc in self.conn.pending_calls.keys(): # print 'PENDING CALL METHOD ' + str(self.conn.pending_calls[pc].method) + \ # ' and ID ' + str(self.conn.pending_calls[pc].id) if args['id'] not in self.tasks.keys(): if args['state'] == 'EXECUTING': self.tasks_lock.release() return else: t = self.tasks[args['id']] t.state = args['state'] if t.state in ('FINISHED', 'FAILED'): t.result = args['result'] if 'result' in args else None t.ended.set() elif name == 'task.progress': if args['id'] not in self.tasks.keys(): if args['state'] == 'EXECUTING': self.tasks_lock.release() return else: t = self.tasks[args['id']] t.message = args['message'] self.tasks_lock.release() def pretty_print(self, res): if '-v' in sys.argv: print json.dumps(res, indent=4, sort_keys=True) def query_task(self, tid): # Makes tests very slow, keep as debug query = self.conn.call_sync('task.query', [('id', '=', tid)]) message = query[0]['error'] self.pretty_print(message)
class Context(object): def __init__(self): self.logger = logging.getLogger(self.__class__.__name__) self.msock = msock.client.Client() self.msock.on_closed = self.on_msock_close self.rpc_fd = -1 self.connection_id = None self.jobs = [] self.state = ConnectionState.OFFLINE self.config = None self.keepalive = None self.connected_at = None self.cv = Condition() self.rpc = RpcContext() self.client = Client() self.server = Server() self.middleware_endpoint = None def start(self, configpath, sockpath): signal.signal(signal.SIGUSR2, lambda signo, frame: self.connect()) self.read_config(configpath) self.server.rpc = RpcContext() self.server.rpc.register_service_instance('control', ControlService(self)) self.server.start(sockpath) threading.Thread(target=self.server.serve_forever, name='server thread', daemon=True).start() def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect_dispatcher() self.middleware_endpoint = Client() self.middleware_endpoint.on_error(on_error) self.connect_dispatcher() def connect_dispatcher(self): while True: try: self.middleware_endpoint.connect('unix:') self.middleware_endpoint.login_service('debugd') self.middleware_endpoint.enable_server() self.middleware_endpoint.register_service( 'debugd.management', ControlService(self)) self.middleware_endpoint.resume_service('debugd.management') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def read_config(self, path): try: with open(path) as f: self.config = json.load(f) except (IOError, OSError, ValueError) as err: self.logger.fatal('Cannot open config file: {0}'.format(str(err))) self.logger.fatal('Exiting.') sys.exit(1) def connect(self, discard=False): if discard: self.connection_id = None self.keepalive = threading.Thread(target=self.connect_keepalive, daemon=True) self.keepalive.start() def connect_keepalive(self): while True: try: if not self.connection_id: self.connection_id = uuid.uuid4() self.msock.connect(SUPPORT_PROXY_ADDRESS) self.logger.info( 'Connecting to {0}'.format(SUPPORT_PROXY_ADDRESS)) self.rpc_fd = self.msock.create_channel(0) time.sleep(1) # FIXME self.client = Client() self.client.connect('fd://', fobj=self.rpc_fd) self.client.channel_serializer = MSockChannelSerializer( self.msock) self.client.standalone_server = True self.client.enable_server() self.client.register_service('debug', DebugService(self)) self.client.call_sync('server.login', str(self.connection_id), socket.gethostname(), get_version(), 'none') self.set_state(ConnectionState.CONNECTED) except BaseException as err: self.logger.warning( 'Failed to initiate support connection: {0}'.format(err), exc_info=True) self.msock.disconnect() else: self.connected_at = datetime.now() with self.cv: self.cv.wait_for(lambda: self.state in ( ConnectionState.LOST, ConnectionState.OFFLINE)) if self.state == ConnectionState.OFFLINE: return self.logger.warning( 'Support connection lost, retrying in 10 seconds') time.sleep(10) def disconnect(self): self.connected_at = None self.set_state(ConnectionState.OFFLINE) self.client.disconnect() self.msock.destroy_channel(0) self.msock.disconnect() self.jobs.clear() def on_msock_close(self): self.connected_at = None self.set_state(ConnectionState.LOST) def run_job(self, job): self.jobs.append(job) job.context = self job.start() def set_state(self, state): with self.cv: self.state = state self.cv.notify_all()
class RESTApi(object): def __init__(self): self.logger = logging.getLogger('restd') self._cruds = [] self._threads = [] self._rpcs = {} self._schemas = {} self._used_schemas = set() self._services = {} self._tasks = {} self.api = falcon.API(middleware=[ AuthMiddleware(), JSONTranslator(), ]) self.api.add_route('/', SwaggerResource(self)) gevent.signal(signal.SIGINT, self.die) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.dispatcher = Client() self.dispatcher.on_error(on_error) self.connect() def init_metadata(self): self._tasks = self.dispatcher.call_sync('discovery.get_tasks') self._schemas = self.dispatcher.call_sync('discovery.get_schema') for service in self.dispatcher.call_sync('discovery.get_services'): self._services[service] = self.dispatcher.call_sync( 'discovery.get_methods', service) for method in self._services[service]: self._rpcs['{0}.{1}'.format(service, method['name'])] = method def load_plugins(self): pluginsdir = os.path.realpath( os.path.join(os.path.dirname(__file__), '..', 'plugins')) for i in glob.glob1(pluginsdir, "*.py"): try: loader = importlib.machinery.SourceFileLoader( i.split('.')[0], os.path.join(pluginsdir, i)) mod = loader.load_module() except: self.logger.error('Failed to load plugin %s', i, exc_info=True) raise mod._init(self) def connect(self): while True: try: self.dispatcher.connect('unix:') self.dispatcher.login_service('restd') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def __call__(self, environ, start_response): if 'HTTP_X_REAL_IP' in environ: environ['PATH_INFO'] = environ.get('PATH_INFO', '').replace('/api/v2.0', '', 1) return self.api.__call__(environ, start_response) def register_crud(self, klass): ins = klass(self, self.dispatcher) self._cruds.append(ins) def register_singleitem(self, klass): klass(self, self.dispatcher) def register_resource(self, klass): klass(self) def run(self): self.init_dispatcher() self.init_metadata() self.load_plugins() server4 = WSGIServer(('0.0.0.0', 8889), self, handler_class=RESTWSGIHandler) self._threads = [gevent.spawn(server4.serve_forever)] checkin() gevent.joinall(self._threads) def die(self, *args): gevent.killall(self._threads) sys.exit(0)
class Context(object): def __init__(self): self.logger = logging.getLogger('schedulerd') self.config = None self.datastore = None self.configstore = None self.client = None self.scheduler = None self.active_tasks = {} def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_scheduler(self): store = MongoDBJobStore(database='freenas', collection='calendar_tasks', client=self.datastore.client) self.scheduler = BackgroundScheduler(jobstores={'default': store}, timezone=pytz.utc) self.scheduler.start() def register_schemas(self): self.client.register_schema('calendar-task', { 'type': 'object', 'additionalProperties': False, 'properties': { 'id': {'type': 'string'}, 'name': {'type': 'string'}, 'args': {'type': 'array'}, 'description': {'type': 'string'}, 'enabled': {'type': 'boolean'}, 'hidden': {'type': 'boolean'}, 'protected': {'type': 'boolean'}, 'status': {'$ref': 'calendar-task-status'}, 'schedule': { 'type': 'object', 'additionalProperties': False, 'properties': { 'coalesce': {'type': ['boolean', 'integer', 'null']}, 'year': {'type': ['string', 'integer', 'null']}, 'month': {'type': ['string', 'integer', 'null']}, 'day': {'type': ['string', 'integer', 'null']}, 'week': {'type': ['string', 'integer', 'null']}, 'day_of_week': {'type': ['string', 'integer', 'null']}, 'hour': {'type': ['string', 'integer', 'null']}, 'minute': {'type': ['string', 'integer', 'null']}, 'second': {'type': ['string', 'integer', 'null']}, 'timezone': {'type': ['string', 'null']} } } } }) self.client.register_schema('calendar-task-status', { 'type': 'object', 'properties': { 'next_run_time': {'type': 'string'}, 'last_run_status': {'type': 'string'}, 'current_run_status': {'type': ['string', 'null']}, 'current_run_progress': {'type': ['object', 'null']} } }) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('schedulerd') self.client.enable_server() self.client.register_service('scheduler.management', ManagementService(self)) self.client.register_service('scheduler.debug', DebugService()) self.client.resume_service('scheduler.management') self.client.resume_service('scheduler.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def run_job(self, *args, **kwargs): tid = self.client.submit_task(*args) self.active_tasks[kwargs['id']] = tid self.client.call_sync('task.wait', tid, timeout=None) result = self.client.call_sync('task.status', tid) if result['state'] != 'FINISHED': try: self.client.call_sync('alerts.emit', { 'name': 'scheduler.task.failed', 'severity': 'CRITICAL', 'description': 'Task {0} has failed: {1}'.format(kwargs['name'], result['error']['message']), }) except RpcException as e: self.logger.error('Failed to emit alert', exc_info=True) del self.active_tasks[kwargs['id']] self.datastore.insert('schedulerd.runs', { 'job_id': kwargs['id'], 'task_id': result['id'] }) def emit_event(self, name, params): self.client.emit_event(name, params) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-f', action='store_true', default=False, help='Run in foreground') args = parser.parse_args() configure_logging('/var/log/schedulerd.log', 'DEBUG') setproctitle.setproctitle('schedulerd') self.config = args.c self.init_datastore() self.init_scheduler() self.init_dispatcher() self.register_schemas() self.client.wait_forever()
class Context(object): def __init__(self): self.service = TaskProxyService(self) self.task = queue.Queue(1) self.datastore = None self.configstore = None self.conn = None self.instance = None self.running = Event() def put_status(self, state, result=None, exception=None): obj = { 'status': state, 'result': None } if result is not None: obj['result'] = result if exception is not None: obj['error'] = serialize_error(exception) self.conn.call_sync('task.put_status', obj) def main(self): if len(sys.argv) != 2: print("Invalid number of arguments", file=sys.stderr) sys.exit(errno.EINVAL) key = sys.argv[1] logging.basicConfig(level=logging.DEBUG) self.datastore = get_datastore() self.configstore = ConfigStore(self.datastore) self.conn = Client() self.conn.connect('unix:') self.conn.login_service('task.{0}'.format(os.getpid())) self.conn.enable_server() self.conn.rpc.register_service_instance('taskproxy', self.service) self.conn.call_sync('task.checkin', key) setproctitle.setproctitle('task executor (idle)') while True: try: task = self.task.get() setproctitle.setproctitle('task executor (tid {0})'.format(task['id'])) if task['debugger']: sys.path.append('/usr/local/lib/dispatcher/pydev') import pydevd host, port = task['debugger'] pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True) name, _ = os.path.splitext(os.path.basename(task['filename'])) module = load_module_from_file(name, task['filename']) setproctitle.setproctitle('task executor (tid {0})'.format(task['id'])) try: self.instance = getattr(module, task['class'])(DispatcherWrapper(self.conn), self.datastore) self.instance.configstore = self.configstore self.instance.environment = task['environment'] self.running.set() result = self.instance.run(*task['args']) except BaseException as err: print("Task exception: {0}".format(str(err)), file=sys.stderr) traceback.print_exc(file=sys.stderr) if hasattr(self.instance, 'rollback'): self.put_status('ROLLBACK') try: self.instance.rollback(*task['args']) except BaseException as rerr: print("Task exception during rollback: {0}".format(str(rerr)), file=sys.stderr) traceback.print_exc(file=sys.stderr) self.put_status('FAILED', exception=err) else: self.put_status('FINISHED', result=result) finally: self.running.clear() except RpcException as err: print("RPC failed: {0}".format(str(err)), file=sys.stderr) sys.exit(errno.EBADMSG) except socket.error as err: print("Cannot connect to dispatcher: {0}".format(str(err)), file=sys.stderr) sys.exit(errno.ETIMEDOUT) if task['debugger']: import pydevd pydevd.stoptrace() setproctitle.setproctitle('task executor (idle)')
class RESTApi(object): def __init__(self): self.logger = logging.getLogger('restd') self._cruds = [] self._threads = [] self._rpcs = {} self._schemas = {} self._used_schemas = set() self._services = {} self._tasks = {} self.api = falcon.API(middleware=[ AuthMiddleware(), JSONTranslator(), ]) self.api.add_route('/', SwaggerResource(self)) gevent.signal(signal.SIGINT, self.die) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.dispatcher = Client() self.dispatcher.on_error(on_error) self.connect() def init_metadata(self): self._tasks = self.dispatcher.call_sync('discovery.get_tasks') self._schemas = self.dispatcher.call_sync('discovery.get_schema') for service in self.dispatcher.call_sync('discovery.get_services'): self._services[service] = self.dispatcher.call_sync('discovery.get_methods', service) for method in self._services[service]: self._rpcs['{0}.{1}'.format(service, method['name'])] = method def load_plugins(self): pluginsdir = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'plugins')) for i in glob.glob1(pluginsdir, "*.py"): try: loader = importlib.machinery.SourceFileLoader(i.split('.')[0], os.path.join(pluginsdir, i)) mod = loader.load_module() except: self.logger.error('Failed to load plugin %s', i, exc_info=True) raise mod._init(self) def connect(self): while True: try: self.dispatcher.connect('unix:') self.dispatcher.login_service('restd') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def __call__(self, environ, start_response): if 'HTTP_X_REAL_IP' in environ: environ['PATH_INFO'] = environ.get('PATH_INFO', '').replace('/api/v2.0', '', 1) return self.api.__call__(environ, start_response) def register_crud(self, klass): ins = klass(self, self.dispatcher) self._cruds.append(ins) def register_singleitem(self, klass): klass(self, self.dispatcher) def register_resource(self, klass): klass(self) def run(self): self.init_dispatcher() self.init_metadata() self.load_plugins() server4 = WSGIServer(('', 8889), self, handler_class=RESTWSGIHandler) self._threads = [gevent.spawn(server4.serve_forever)] checkin() gevent.joinall(self._threads) def die(self, *args): gevent.killall(self._threads) sys.exit(0)
class Context(object): def __init__(self): self.logger = logging.getLogger('schedulerd') self.config = None self.datastore = None self.configstore = None self.client = None self.scheduler = None self.active_tasks = {} def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_scheduler(self): store = MongoDBJobStore(database='freenas', collection='calendar_tasks', client=self.datastore.client) self.scheduler = BackgroundScheduler(jobstores={'default': store}, timezone=pytz.utc) self.scheduler.start() def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('schedulerd') self.client.enable_server() self.client.register_service('scheduler.management', ManagementService(self)) self.client.register_service('scheduler.debug', DebugService()) self.client.resume_service('scheduler.management') self.client.resume_service('scheduler.debug') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def run_job(self, *args, **kwargs): tid = self.client.call_sync( 'task.submit_with_env', args[0], args[1:], { 'RUN_AS_USER': '******', 'CALENDAR_TASK_NAME': kwargs.get('name') }) self.active_tasks[kwargs['id']] = tid self.client.call_sync('task.wait', tid, timeout=None) result = self.client.call_sync('task.status', tid) if result['state'] != 'FINISHED': try: self.client.call_sync( 'alert.emit', { 'name': 'scheduler.task.failed', 'severity': 'CRITICAL', 'description': 'Task {0} has failed: {1}'.format( kwargs.get('name', tid), result['error']['message']), }) except RpcException as e: self.logger.error('Failed to emit alert', exc_info=True) del self.active_tasks[kwargs['id']] self.datastore.insert('schedulerd.runs', { 'job_id': kwargs['id'], 'task_id': result['id'] }) def emit_event(self, name, params): self.client.emit_event(name, params) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-f', action='store_true', default=False, help='Run in foreground') args = parser.parse_args() configure_logging('/var/log/schedulerd.log', 'DEBUG') setproctitle.setproctitle('schedulerd') self.config = args.c self.init_datastore() self.init_scheduler() self.init_dispatcher() self.client.wait_forever()
class Context(object): def __init__(self): self.service = TaskProxyService(self) self.task = queue.Queue(1) self.datastore = None self.configstore = None self.conn = None self.instance = None self.module_cache = {} self.running = Event() def put_status(self, state, result=None, exception=None): obj = {'status': state, 'result': None} if result is not None: obj['result'] = result if exception is not None: obj['error'] = serialize_error(exception) self.conn.call_sync('task.put_status', obj) def task_progress_handler(self, args): if self.instance: self.instance.task_progress_handler(args) def collect_fds(self, obj): if isinstance(obj, dict): for v in obj.values(): if isinstance(v, FileDescriptor): yield v else: yield from self.collect_fds(v) if isinstance(obj, (list, tuple)): for o in obj: if isinstance(o, FileDescriptor): yield o else: yield from self.collect_fds(o) def close_fds(self, fds): for i in fds: try: os.close(i.fd) except OSError: pass def run_task_hooks(self, instance, task, type, **extra_env): for hook, props in task['hooks'].get(type, {}).items(): try: if props['condition'] and not props['condition']( *task['args']): continue except BaseException as err: print(err) continue instance.join_subtasks( instance.run_subtask(hook, *task['args'], **extra_env)) def main(self): if len(sys.argv) != 2: print("Invalid number of arguments", file=sys.stderr) sys.exit(errno.EINVAL) key = sys.argv[1] configure_logging(None, logging.DEBUG) self.datastore = get_datastore() self.configstore = ConfigStore(self.datastore) self.conn = Client() self.conn.connect('unix:') self.conn.login_service('task.{0}'.format(os.getpid())) self.conn.enable_server() self.conn.call_sync('management.enable_features', ['streaming_responses']) self.conn.rpc.register_service_instance('taskproxy', self.service) self.conn.register_event_handler('task.progress', self.task_progress_handler) self.conn.call_sync('task.checkin', key) setproctitle.setproctitle('task executor (idle)') while True: try: task = self.task.get() logging.root.setLevel( self.conn.call_sync('management.get_logging_level')) setproctitle.setproctitle('task executor (tid {0})'.format( task['id'])) if task['debugger']: sys.path.append('/usr/local/lib/dispatcher/pydev') import pydevd host, port = task['debugger'] pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True) name, _ = os.path.splitext(os.path.basename(task['filename'])) module = self.module_cache.get(task['filename']) if not module: module = load_module_from_file(name, task['filename']) self.module_cache[task['filename']] = module setproctitle.setproctitle('task executor (tid {0})'.format( task['id'])) fds = list(self.collect_fds(task['args'])) try: dispatcher = DispatcherWrapper(self.conn) self.instance = getattr(module, task['class'])(dispatcher, self.datastore) self.instance.configstore = self.configstore self.instance.user = task['user'] self.instance.environment = task['environment'] self.running.set() self.run_task_hooks(self.instance, task, 'before') result = self.instance.run(*task['args']) self.run_task_hooks(self.instance, task, 'after', result=result) except BaseException as err: print("Task exception: {0}".format(str(err)), file=sys.stderr) traceback.print_exc(file=sys.stderr) if hasattr(self.instance, 'rollback'): self.put_status('ROLLBACK') try: self.instance.rollback(*task['args']) except BaseException as rerr: print("Task exception during rollback: {0}".format( str(rerr)), file=sys.stderr) traceback.print_exc(file=sys.stderr) # Main task is already failed at this point, so ignore hook errors with contextlib.suppress(RpcException): self.run_task_hooks(self.instance, task, 'error', error=serialize_error(err)) self.put_status('FAILED', exception=err) else: self.put_status('FINISHED', result=result) finally: self.close_fds(fds) self.running.clear() except RpcException as err: print("RPC failed: {0}".format(str(err)), file=sys.stderr) print(traceback.format_exc(), flush=True) sys.exit(errno.EBADMSG) except socket.error as err: print("Cannot connect to dispatcher: {0}".format(str(err)), file=sys.stderr) sys.exit(errno.ETIMEDOUT) if task['debugger']: import pydevd pydevd.stoptrace() setproctitle.setproctitle('task executor (idle)')
class Context(object): def __init__(self): self.logger = logging.getLogger(self.__class__.__name__) self.msock = msock.client.Client() self.msock.on_closed = self.on_msock_close self.rpc_fd = -1 self.connection_id = None self.jobs = [] self.state = ConnectionState.OFFLINE self.config = None self.keepalive = None self.connected_at = None self.cv = Condition() self.rpc = RpcContext() self.client = Client() self.server = Server() self.middleware_endpoint = None def start(self, configpath, sockpath): signal.signal(signal.SIGUSR2, lambda signo, frame: self.connect()) self.read_config(configpath) self.server.rpc = RpcContext() self.server.rpc.register_service_instance("control", ControlService(self)) self.server.start(sockpath) threading.Thread(target=self.server.serve_forever, name="server thread", daemon=True).start() def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning("Connection to dispatcher lost") self.connect_dispatcher() self.middleware_endpoint = Client() self.middleware_endpoint.on_error(on_error) self.connect_dispatcher() def connect_dispatcher(self): while True: try: self.middleware_endpoint.connect("unix:") self.middleware_endpoint.login_service("debugd") self.middleware_endpoint.enable_server() self.middleware_endpoint.register_service("debugd.management", ControlService(self)) self.middleware_endpoint.resume_service("debugd.management") return except (OSError, RpcException) as err: self.logger.warning("Cannot connect to dispatcher: {0}, retrying in 1 second".format(str(err))) time.sleep(1) def read_config(self, path): try: with open(path) as f: self.config = json.load(f) except (IOError, OSError, ValueError) as err: self.logger.fatal("Cannot open config file: {0}".format(str(err))) self.logger.fatal("Exiting.") sys.exit(1) def connect(self, discard=False): if discard: self.connection_id = None self.keepalive = threading.Thread(target=self.connect_keepalive, daemon=True) self.keepalive.start() def connect_keepalive(self): while True: try: if not self.connection_id: self.connection_id = uuid.uuid4() self.msock.connect(SUPPORT_PROXY_ADDRESS) self.logger.info("Connecting to {0}".format(SUPPORT_PROXY_ADDRESS)) self.rpc_fd = self.msock.create_channel(0) time.sleep(1) # FIXME self.client = Client() self.client.connect("fd://", fobj=self.rpc_fd) self.client.channel_serializer = MSockChannelSerializer(self.msock) self.client.standalone_server = True self.client.enable_server() self.client.register_service("debug", DebugService(self)) self.client.call_sync( "server.login", str(self.connection_id), socket.gethostname(), get_version(), "none" ) self.set_state(ConnectionState.CONNECTED) except BaseException as err: self.logger.warning("Failed to initiate support connection: {0}".format(err), exc_info=True) self.msock.disconnect() else: self.connected_at = datetime.now() with self.cv: self.cv.wait_for(lambda: self.state in (ConnectionState.LOST, ConnectionState.OFFLINE)) if self.state == ConnectionState.OFFLINE: return self.logger.warning("Support connection lost, retrying in 10 seconds") time.sleep(10) def disconnect(self): self.connected_at = None self.set_state(ConnectionState.OFFLINE) self.client.disconnect() self.msock.destroy_channel(0) self.msock.disconnect() self.jobs.clear() def on_msock_close(self): self.connected_at = None self.set_state(ConnectionState.LOST) def run_job(self, job): self.jobs.append(job) job.context = self job.start() def set_state(self, state): with self.cv: self.state = state self.cv.notify_all()