def test_unix_server(self): sockpath = os.path.join(os.getcwd(), 'test.{0}.sock'.format(os.getpid())) sockurl = 'unix://' + sockpath context = RpcContext() context.register_service('test', TestService) server = Server() server.rpc = context server.start(sockurl) threading.Thread(target=server.serve_forever, daemon=True).start() # Spin until server is ready while not os.path.exists(sockpath): time.sleep(0.1) client = Client() client.connect(sockurl) self.assertTrue(client.connected) self.assertEqual(client.call_sync('test.hello', 'freenas'), 'Hello World, freenas') client.disconnect() server.close() os.unlink(sockpath)
def main(name, *args): connection = Client() connection.connect('127.0.0.1') connection.login_service('ups') connection.emit_event('service.ups.signal', { 'name': name, 'type': os.environ['NOTIFYTYPE'], }) connection.disconnect()
def main(*args): connection = Client() connection.connect('127.0.0.1') connection.login_service('smtp') parser = argparse.ArgumentParser(description='Process email') parser.add_argument('-i', dest='strip_leading_dot', action='store_false', default=True, help='see sendmail(8) -i') parser.add_argument('-t', dest='parse_recipients', action='store_true', default=False, help='parse recipients from message') parser.usage = ' '.join(parser.format_usage().split(' ')[1:-1]) parser.usage += ' [email_addr|user] ..' args, to_addrs = parser.parse_known_args() if not to_addrs and not args.parse_recipients: parser.exit(message=parser.format_usage()) msg = sys.stdin.read() em_parser = email.parser.Parser() em = em_parser.parsestr(msg) if args.parse_recipients: # Strip away the comma based delimiters and whitespace. to_addrs = map(str.strip, em.get('To').split(',')) if not to_addrs or not to_addrs[0]: to_addrs = ['root'] margs = {} margs['extra_headers'] = dict(em) margs['extra_headers'].update({ 'X-Mailer': 'FreeNAS', 'X-FreeNAS-Host': socket.gethostname(), }) margs['subject'] = em.get('Subject') if em.is_multipart(): margs['attachments'] = filter( lambda part: part.get_content_maintype() != 'multipart', em.walk() ) margs['message'] = ( 'This is a MIME formatted message. If you see ' 'this text it means that your email software ' 'does not support MIME formatted messages.') else: margs['message'] = ''.join(email.iterators.body_line_iterator(em)) if to_addrs: margs['to'] = to_addrs connection.call_sync('mail.send', margs) connection.disconnect()
def main(*args): connection = Client() connection.connect("127.0.0.1") connection.login_service("smtp") parser = argparse.ArgumentParser(description="Process email") parser.add_argument("-i", dest="strip_leading_dot", action="store_false", default=True, help="see sendmail(8) -i") parser.add_argument( "-t", dest="parse_recipients", action="store_true", default=False, help="parse recipients from message" ) parser.usage = " ".join(parser.format_usage().split(" ")[1:-1]) parser.usage += " [email_addr|user] .." args, to_addrs = parser.parse_known_args() if not to_addrs and not args.parse_recipients: parser.exit(message=parser.format_usage()) msg = sys.stdin.read() em_parser = email.parser.Parser() em = em_parser.parsestr(msg) if args.parse_recipients: # Strip away the comma based delimiters and whitespace. to_addrs = map(str.strip, em.get("To").split(",")) if not to_addrs or not to_addrs[0]: to_addrs = ["root"] margs = {} margs["extra_headers"] = dict(em) margs["extra_headers"].update({"X-Mailer": "FreeNAS", "X-FreeNAS-Host": socket.gethostname()}) margs["subject"] = em.get("Subject") if em.is_multipart(): margs["attachments"] = filter(lambda part: part.get_content_maintype() != "multipart", em.walk()) margs["message"] = ( "This is a MIME formatted message. If you see " "this text it means that your email software " "does not support MIME formatted messages." ) else: margs["message"] = "".join(email.iterators.body_line_iterator(em)) if to_addrs: margs["to"] = to_addrs connection.call_sync("mail.send", margs) connection.disconnect()
class BaseTestCase(unittest.TestCase): def __init__(self, methodName): super(BaseTestCase, self).__init__(methodName) self.context = None def setUp(self): super(BaseTestCase, self).setUp() assert self.context is not None self.ssh_client = self.context.ssh_client self.client = Client() self.client.connect('ws://{0}'.format(self.context.hostname)) self.client.login_user(self.context.username, self.context.password) load_schema_definitions(self.client) def tearDown(self): self.client.disconnect() def ssh_exec(self, command, output=False): _, stdout, stderr = self.ssh_client.exec_command(command) exitcode = stdout.channel.recv_exit_status() if output: return exitcode, stdout.read(), stderr.read() return exitcode def get_params_schema(self, method): return get_methods(self.client, method).get('params-schema') def get_result_schema(self, method): return get_methods(self.client, method).get('results-schema') def assertConformsToSchema(self, obj, schema, strict=False): errors = verify_schema(schema, obj, strict) if errors: raise AssertionError( 'Object {0} does not match {1} schema. Errors: {2}'.format( obj, schema, errors)) def assertConformsToNamedSchema(self, obj, schema_name, strict=False): schema = get_schema(schema_name) if not schema: raise AssertionError('Schema {0} is unknown'.format(schema_name)) self.assertConformsToSchema(obj, schema, strict)
class BaseTestCase(unittest.TestCase): def __init__(self, methodName): super(BaseTestCase, self).__init__(methodName) self.context = None def setUp(self): super(BaseTestCase, self).setUp() assert self.context is not None self.ssh_client = self.context.ssh_client self.client = Client() self.client.connect('ws://{0}'.format(self.context.hostname)) self.client.login_user(self.context.username, self.context.password) load_schema_definitions(self.client) def tearDown(self): self.client.disconnect() def ssh_exec(self, command, output=False): _, stdout, stderr = self.ssh_client.exec_command(command) exitcode = stdout.channel.recv_exit_status() if output: return exitcode, stdout.read(), stderr.read() return exitcode def get_params_schema(self, method): return get_methods(self.client, method).get('params-schema') def get_result_schema(self, method): return get_methods(self.client, method).get('results-schema') def assertConformsToSchema(self, obj, schema, strict=False): errors = verify_schema(schema, obj, strict) if errors: raise AssertionError('Object {0} does not match {1} schema. Errors: {2}'.format(obj, schema, errors)) def assertConformsToNamedSchema(self, obj, schema_name, strict=False): schema = get_schema(schema_name) if not schema: raise AssertionError('Schema {0} is unknown'.format(schema_name)) self.assertConformsToSchema(obj, schema, strict)
def test_back_to_back(self): a, b = socket.socketpair() self.assertGreaterEqual(a.fileno(), 0) self.assertGreaterEqual(b.fileno(), 0) c1 = Client() c1.standalone_server = True c1.enable_server() c1.register_service('test', TestService()) c1.connect('fd://{0}'.format(a.fileno())) self.assertTrue(c1.connected) c2 = Client() c2.connect('fd://{0}'.format(b.fileno())) self.assertTrue(c2.connected) self.assertEqual(c2.call_sync('test.hello', 'freenas'), 'Hello World, freenas') c2.disconnect() a.close() c1.disconnect() b.close()
class LogdLogHandler(logging.Handler): def __init__(self, level=logging.NOTSET, address=None, ident=None): super(LogdLogHandler, self).__init__(level) self.address = address or 'unix:///var/run/logd.sock' self.ident = ident or os.path.basename(sys.executable) self.client = Client() self.client.connect(self.address) def emit(self, record): try: if not self.client.connected: self.client.connect(self.address) item = { 'timestamp': datetime.utcfromtimestamp(record.created), 'priority': PRIORITY_MAP.get(record.levelno, 'INFO'), 'message': record.getMessage(), 'identifier': self.ident, 'thread': record.threadName, 'tid': record.thread, 'module_name': record.name, 'source_language': 'python', 'source_file': record.pathname, 'source_line': record.lineno, } if record.exc_info: item['exception'] = ''.join( traceback.format_exception(*record.exc_info)) self.client.call_async('logd.logging.push', None, item) except: self.handleError(record) def close(self): super(LogdLogHandler, self).close() self.client.disconnect()
class LogdLogHandler(logging.Handler): def __init__(self, level=logging.NOTSET, address=None, ident=None): super(LogdLogHandler, self).__init__(level) self.address = address or 'unix:///var/run/logd.sock' self.ident = ident or os.path.basename(sys.executable) self.client = Client() self.client.connect(self.address) def emit(self, record): try: if not self.client.connected: self.client.connect(self.address) item = { 'timestamp': datetime.utcfromtimestamp(record.created), 'priority': PRIORITY_MAP.get(record.levelno, 'INFO'), 'message': record.getMessage(), 'identifier': self.ident, 'thread': record.threadName, 'tid': record.thread, 'module_name': record.name, 'source_language': 'python', 'source_file': record.pathname, 'source_line': record.lineno, } if record.exc_info: item['exception'] = ''.join(traceback.format_exception(*record.exc_info)) self.client.call_async('logd.logging.push', None, item) except: self.handleError(record) def close(self): super(LogdLogHandler, self).close() self.client.disconnect()
class Context(object): def __init__(self): self.server = None self.client = None self.jobs = {} self.provides = set() self.lock = RLock() self.kq = select.kqueue() self.devnull = os.open('/dev/null', os.O_RDWR) self.logger = logging.getLogger('Context') self.rpc = RpcContext() self.rpc.register_service_instance('serviced.management', ManagementService(self)) self.rpc.register_service_instance('serviced.job', JobService(self)) def init_dispatcher(self): if self.client and self.client.connected: return def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_server(self, address): self.server = Server(self) self.server.rpc = self.rpc self.server.streaming = True self.server.start(address, transport_options={'permissions': 0o777}) thread = Thread(target=self.server.serve_forever) thread.name = 'ServerThread' thread.daemon = True thread.start() def provide(self, targets): def doit(): self.logger.debug('Adding dependency targets: {0}'.format( ', '.join(targets))) with self.lock: self.provides |= targets for job in list(self.jobs.values()): if job.state == JobState.STOPPED and job.requires <= self.provides: job.start() if targets: Timer(2, doit).start() def job_by_pid(self, pid): job = first_or_default(lambda j: j.pid == pid, self.jobs.values()) return job def event_loop(self): while True: with contextlib.suppress(InterruptedError): for ev in self.kq.control(None, MAX_EVENTS): self.logger.log(TRACE, 'New event: {0}'.format(ev)) if ev.filter == select.KQ_FILTER_PROC: job = self.job_by_pid(ev.ident) if job: job.pid_event(ev) continue if ev.fflags & select.KQ_NOTE_CHILD: if ev.fflags & select.KQ_NOTE_EXIT: continue pjob = self.job_by_pid(ev.data) if not pjob: self.untrack_pid(ev.ident) continue # Stop tracking at session ID boundary try: if pjob.pgid != os.getpgid(ev.ident): self.untrack_pid(ev.ident) continue except ProcessLookupError: continue with self.lock: job = Job(self) job.load_anonymous(pjob, ev.ident) self.jobs[job.id] = job self.logger.info('Added job {0}'.format( job.label)) def track_pid(self, pid): ev = select.kevent( pid, select.KQ_FILTER_PROC, select.KQ_EV_ADD | select.KQ_EV_ENABLE, select.KQ_NOTE_EXIT | select.KQ_NOTE_EXEC | select.KQ_NOTE_FORK | select.KQ_NOTE_TRACK, 0, 0) self.kq.control([ev], 0) def untrack_pid(self, pid): ev = select.kevent(pid, select.KQ_FILTER_PROC, select.KQ_EV_DELETE, 0, 0, 0) with contextlib.suppress(FileNotFoundError): self.kq.control([ev], 0) def emit_event(self, name, args): self.server.broadcast_event(name, args) if self.client and self.client.connected: self.client.emit_event(name, args) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('serviced') self.client.enable_server(self.rpc) self.client.resume_service('serviced.job') self.client.resume_service('serviced.management') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def bootstrap(self): def doit(): with self.lock: job = Job(self) job.load({ 'Label': 'org.freenas.serviced.bootstrap', 'ProgramArguments': BOOTSTRAP_JOB, 'OneShot': True, 'RunAtLoad': True, }) self.jobs[job.id] = job Thread(target=doit).start() def shutdown(self): self.client.disconnect() self.server.close() sys.exit(0) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on') args = parser.parse_args() configure_logging('/var/log/serviced.log', 'DEBUG', file=True) bsd.setproctitle('serviced') self.logger.info('Started') self.init_server(args.s) self.bootstrap() self.event_loop()
def run(self, peer): if self.datastore.exists('peers', ('address', '=', peer['address']), ('type', '=', 'replication')): raise TaskException(errno.EEXIST, 'Replication peer entry for {0} already exists'.format(peer['address'])) if peer['credentials']['type'] != 'ssh': raise TaskException(errno.EINVAL, 'SSH credentials type is needed to perform replication peer pairing') remote = peer.get('address') credentials = peer['credentials'] username = credentials.get('username') port = credentials.get('port', 22) password = credentials.get('password') if not username: raise TaskException(errno.EINVAL, 'Username has to be specified') if not remote: raise TaskException(errno.EINVAL, 'Address of remote host has to be specified') if not password: raise TaskException(errno.EINVAL, 'Password has to be specified') remote_client = Client() try: try: remote_client.connect('ws+ssh://{0}@{1}'.format(username, remote), port=port, password=password) remote_client.login_service('replicator') except (AuthenticationException, OSError, ConnectionRefusedError): raise TaskException(errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port)) local_keys = self.dispatcher.call_sync('peer.get_ssh_keys') remote_keys = remote_client.call_sync('peer.get_ssh_keys') ip_at_remote_side = remote_client.call_sync('management.get_sender_address').split(',', 1)[0] remote_host_key = remote + ' ' + remote_keys[0].rsplit(' ', 1)[0] local_host_key = ip_at_remote_side + ' ' + local_keys[0].rsplit(' ', 1)[0] local_ssh_config = self.dispatcher.call_sync('service.sshd.get_config') if remote_client.call_sync('peer.query', [('name', '=', peer['name'])]): raise TaskException(errno.EEXIST, 'Peer entry {0} already exists at {1}'.format(peer['name'], remote)) peer['credentials'] = { 'pubkey': remote_keys[1], 'hostkey': remote_host_key, 'port': port, 'type': 'replication' } self.join_subtasks(self.run_subtask( 'peer.replication.create_local', peer )) peer['address'] = ip_at_remote_side peer['credentials'] = { 'pubkey': local_keys[1], 'hostkey': local_host_key, 'port': local_ssh_config['port'], 'type': 'replication' } id = self.datastore.query('peers', ('name', '=', peer['name']), select='id') try: call_task_and_check_state( remote_client, 'peer.replication.create_local', peer ) except TaskException: self.datastore.delete('peers', id) self.dispatcher.dispatch_event('peer.changed', { 'operation': 'delete', 'ids': [id] }) raise finally: remote_client.disconnect()
class Main(object): def __init__(self): self.client = None self.server = None self.datastore = None self.hdf = None self.hdf_group = None self.config = None self.logger = logging.getLogger('statd') self.data_sources = {} def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) def init_database(self): # adding this try/except till system-dataset plugin is added back in in full fidelity # just a hack (since that directory's data will not persist) # Please remove this when system-dataset plugin is added back in try: directory = self.client.call_sync( 'system_dataset.request_directory', 'statd') except RpcException: directory = '/var/tmp/statd' if not os.path.exists(directory): os.makedirs(directory) self.hdf = tables.open_file(os.path.join(directory, DEFAULT_DBFILE), mode='a') if not hasattr(self.hdf.root, 'stats'): self.hdf.create_group('/', 'stats') self.hdf_group = self.hdf.root.stats def request_table(self, name): try: if hasattr(self.hdf_group, name): return getattr(self.hdf_group, name) return self.hdf.create_table(self.hdf_group, name, DataPoint, name) except Exception as e: self.logger.error(str(e)) def init_alert_config(self, name): config_name = name if self.datastore.exists('statd.alerts', ('id', '=', name)) else 'default' alert_config = self.datastore.get_by_id('statd.alerts', config_name) return alert_config def get_data_source(self, name): if name not in list(self.data_sources.keys()): config = DataSourceConfig(self.datastore, name) alert_config = self.init_alert_config(name) ds = DataSource(self, name, config, alert_config) self.data_sources[name] = ds self.client.call_sync('plugin.register_event_type', 'statd.output', 'statd.{0}.pulse'.format(name)) return self.data_sources[name] def register_schemas(self): self.client.register_schema( 'GetStatsParams', { 'type': 'object', 'additionalProperties': False, 'properties': { 'start': { 'type': 'datetime' }, 'end': { 'type': 'datetime' }, 'timespan': { 'type': 'integer' }, 'frequency': { 'type': 'string' } } }) self.client.register_schema( 'GetStatsResult', { 'type': 'object', 'additionalProperties': False, 'properties': { 'data': { 'type': 'array', } } }) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('statd') self.client.enable_server() self.register_schemas() self.client.register_service('statd.output', OutputService(self)) self.client.register_service('statd.alert', AlertService(self)) self.client.register_service('statd.debug', DebugService(gevent=True)) self.client.resume_service('statd.output') self.client.resume_service('statd.alert') self.client.resume_service('statd.debug') for i in list(self.data_sources.keys()): self.client.call_sync('plugin.register_event_type', 'statd.output', 'statd.{0}.pulse'.format(i)) return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def die(self): self.logger.warning('Exiting') self.server.stop() self.client.disconnect() sys.exit(0) def dispatcher_error(self, error): self.die() def checkin(self): checkin() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() configure_logging('/var/log/fnstatd.log', 'DEBUG') setproctitle('fnstatd') # Signal handlers gevent.signal(signal.SIGQUIT, self.die) gevent.signal(signal.SIGTERM, self.die) gevent.signal(signal.SIGINT, self.die) self.server = InputServer(self) self.config = args.c self.init_datastore() self.init_dispatcher() self.init_database() self.server.start() self.logger.info('Started') self.checkin() self.client.wait_forever()
class BaseTestCase(unittest.TestCase): class TaskState(object): def __init__(self): self.tid = None self.state = None self.message = None self.result = None self.name = None self.ended = Event() def __init__(self, methodName): super(BaseTestCase, self).__init__(methodName) self.tasks = {} self.tasks_lock = Lock() self.conn = None self.task_timeout = 30 def setUp(self): try: self.conn = Client() self.conn.event_callback = self.on_event self.conn.connect(os.getenv('TESTHOST', '127.0.0.1')) self.conn.login_user(os.getenv('TESTUSER', 'root'), os.getenv('TESTPWD', ''), timeout = self.task_timeout) self.conn.subscribe_events('*') except: raise def tearDown(self): self.conn.disconnect() def submitTask(self, name, *args): with self.tasks_lock: try: tid = self.conn.call_sync('task.submit', name, args) except RpcException: raise except Exception: raise self.tasks[tid] = self.TaskState() self.tasks[tid].tid = tid self.tasks[tid].name = name return tid def assertTaskCompletion(self, tid): t = self.tasks[tid] if not t.ended.wait(self.task_timeout): self.fail('Task {0} timed out'.format(tid)) #print dir(t) #print 'Message is ' + str(t.message) #print 'State is ' + str(t.state) #print 'Result is ' + str(t.result) if t.state.count('Executing...'): message = t.error elif t.__getattribute__('message') and t.message.count('Executing...'): message = t.state else: message = t.message if not message: self.query_task(tid) self.assertEqual(t.state, 'FINISHED', msg=message) def assertTaskFailure(self, tid): t = self.tasks[tid] if not t.ended.wait(self.task_timeout): self.fail('Task {0} timed out'.format(tid)) self.assertNotEqual(t.state, 'FINISHED', msg=t.message) def assertSeenEvent(self, name, func=None): pass def skip(self, reason): raise unittest.SkipTest(str(reason)) def getTaskResult(self, tid): t = self.tasks[tid] return t.result def on_event(self, name, args): with self.tasks_lock: if name == 'task.updated': #DEBUG #print 'ARGS IS ' + str(args) #print 'TASK LIST IS ' + str(self.tasks) #for pc in self.conn.pending_calls.keys(): # print 'PENDING CALL METHOD ' + str(self.conn.pending_calls[pc].method) + \ # ' and ID ' + str(self.conn.pending_calls[pc].id) if args['id'] not in self.tasks.keys(): if args['state'] == 'EXECUTING': return else: t = self.tasks[args['id']] t.state = args['state'] if t.state in ('FINISHED', 'FAILED'): t.result = args['result'] if 'result' in args else None t.ended.set() elif name == 'task.progress': if args['id'] not in self.tasks.keys(): if args['state'] == 'EXECUTING': return else: t = self.tasks[args['id']] t.message = args['message'] def on_eventOrig(self, name, args): self.tasks_lock.acquire() if name == 'task.updated': #DEBUG #print 'ARGS IS ' + str(args) #print 'TASK LIST IS ' + str(self.tasks) #for pc in self.conn.pending_calls.keys(): # print 'PENDING CALL METHOD ' + str(self.conn.pending_calls[pc].method) + \ # ' and ID ' + str(self.conn.pending_calls[pc].id) if args['id'] not in self.tasks.keys(): if args['state'] == 'EXECUTING': self.tasks_lock.release() return else: t = self.tasks[args['id']] t.state = args['state'] if t.state in ('FINISHED', 'FAILED'): t.result = args['result'] if 'result' in args else None t.ended.set() elif name == 'task.progress': if args['id'] not in self.tasks.keys(): if args['state'] == 'EXECUTING': self.tasks_lock.release() return else: t = self.tasks[args['id']] t.message = args['message'] self.tasks_lock.release() def pretty_print(self, res): if '-v' in sys.argv: print json.dumps(res, indent=4, sort_keys=True) def query_task(self, tid): # Makes tests very slow, keep as debug query = self.conn.call_sync('task.query', [('id','=',tid)]) message = query[0]['error'] self.pretty_print(message)
class Main(object): def __init__(self): self.client = None self.datastore = None self.configstore = None self.config = None self.mgmt = None self.vm_started = Event() self.containers = {} self.tokens = {} self.logger = logging.getLogger('containerd') self.bridge_interface = None self.used_nmdms = [] def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def allocate_nmdm(self): for i in range(0, 255): if i not in self.used_nmdms: self.used_nmdms.append(i) return i def release_nmdm(self, index): self.used_nmdms.remove(index) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('containerd') self.client.enable_server() self.client.register_service('containerd.management', ManagementService(self)) self.client.register_service('containerd.debug', DebugService(gevent=True, builtins={"context": self})) self.client.resume_service('containerd.management') self.client.resume_service('containerd.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.use_bursts = True self.client.on_error(on_error) self.connect() def init_mgmt(self): self.mgmt = ManagementNetwork(self, MGMT_INTERFACE, MGMT_ADDR) self.mgmt.up() self.mgmt.bridge_if.add_address(netif.InterfaceAddress( netif.AddressFamily.INET, ipaddress.ip_interface('169.254.169.254/32') )) def init_nat(self): default_if = self.client.call_sync('networkd.configuration.get_default_interface') if not default_if: self.logger.warning('No default route interface; not configuring NAT') return p = pf.PF() # Try to find and remove existing NAT rules for the same subnet oldrule = first_or_default( lambda r: r.src.address.address == MGMT_ADDR.network.network_address, p.get_rules('nat') ) if oldrule: p.delete_rule('nat', oldrule.index) rule = pf.Rule() rule.src.address.address = MGMT_ADDR.network.network_address rule.src.address.netmask = MGMT_ADDR.netmask rule.action = pf.RuleAction.NAT rule.af = socket.AF_INET rule.ifname = default_if rule.redirect_pool.append(pf.Address(ifname=default_if)) rule.proxy_ports = [50001, 65535] p.append_rule('nat', rule) try: p.enable() except OSError as err: if err.errno != errno.EEXIST: raise err def init_ec2(self): self.ec2 = EC2MetadataServer(self) self.ec2.start() def vm_by_mgmt_mac(self, mac): for i in self.containers.values(): for tapmac in i.tap_interfaces.values(): if tapmac == mac: return i return None def vm_by_mgmt_ip(self, ip): for i in self.mgmt.allocations.values(): if i.lease.client_ip == ip: return i.vm() def die(self): self.logger.warning('Exiting') for i in self.containers.values(): i.stop(True) self.client.disconnect() sys.exit(0) def generate_id(self): return ''.join([random.choice(string.ascii_letters + string.digits) for n in range(32)]) def dispatcher_error(self, error): self.die() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-p', type=int, metavar='PORT', default=5500, help="WebSockets server port") args = parser.parse_args() configure_logging('/var/log/containerd.log', 'DEBUG') setproctitle.setproctitle('containerd') gevent.signal(signal.SIGTERM, self.die) gevent.signal(signal.SIGQUIT, self.die) self.config = args.c self.init_datastore() self.init_dispatcher() self.init_mgmt() self.init_nat() self.init_ec2() self.logger.info('Started') # WebSockets server kwargs = {} s4 = WebSocketServer(('', args.p), ServerResource({ '/console': ConsoleConnection, }, context=self), **kwargs) s6 = WebSocketServer(('::', args.p), ServerResource({ '/console': ConsoleConnection, }, context=self), **kwargs) serv_threads = [gevent.spawn(s4.serve_forever), gevent.spawn(s6.serve_forever)] gevent.joinall(serv_threads)
class Context(object): def __init__(self): self.server = None self.client = None self.jobs = {} self.provides = set() self.lock = RLock() self.kq = select.kqueue() self.devnull = os.open('/dev/null', os.O_RDWR) self.logger = logging.getLogger('Context') self.rpc = RpcContext() self.rpc.register_service_instance('serviced.management', ManagementService(self)) self.rpc.register_service_instance('serviced.job', JobService(self)) def init_dispatcher(self): if self.client and self.client.connected: return def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_server(self, address): self.server = Server(self) self.server.rpc = self.rpc self.server.streaming = True self.server.start(address, transport_options={'permissions': 0o777}) thread = Thread(target=self.server.serve_forever) thread.name = 'ServerThread' thread.daemon = True thread.start() def provide(self, targets): def doit(): self.logger.debug('Adding dependency targets: {0}'.format(', '.join(targets))) with self.lock: self.provides |= targets for job in list(self.jobs.values()): if job.state == JobState.STOPPED and job.requires <= self.provides: job.start() if targets: Timer(2, doit).start() def job_by_pid(self, pid): job = first_or_default(lambda j: j.pid == pid, self.jobs.values()) return job def event_loop(self): while True: with contextlib.suppress(InterruptedError): for ev in self.kq.control(None, MAX_EVENTS): self.logger.log(TRACE, 'New event: {0}'.format(ev)) if ev.filter == select.KQ_FILTER_PROC: job = self.job_by_pid(ev.ident) if job: job.pid_event(ev) continue if ev.fflags & select.KQ_NOTE_CHILD: if ev.fflags & select.KQ_NOTE_EXIT: continue pjob = self.job_by_pid(ev.data) if not pjob: self.untrack_pid(ev.ident) continue # Stop tracking at session ID boundary try: if pjob.pgid != os.getpgid(ev.ident): self.untrack_pid(ev.ident) continue except ProcessLookupError: continue with self.lock: job = Job(self) job.load_anonymous(pjob, ev.ident) self.jobs[job.id] = job self.logger.info('Added job {0}'.format(job.label)) def track_pid(self, pid): ev = select.kevent( pid, select.KQ_FILTER_PROC, select.KQ_EV_ADD | select.KQ_EV_ENABLE, select.KQ_NOTE_EXIT | select.KQ_NOTE_EXEC | select.KQ_NOTE_FORK | select.KQ_NOTE_TRACK, 0, 0 ) self.kq.control([ev], 0) def untrack_pid(self, pid): ev = select.kevent( pid, select.KQ_FILTER_PROC, select.KQ_EV_DELETE, 0, 0, 0 ) with contextlib.suppress(FileNotFoundError): self.kq.control([ev], 0) def emit_event(self, name, args): self.server.broadcast_event(name, args) if self.client and self.client.connected: self.client.emit_event(name, args) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('serviced') self.client.enable_server(self.rpc) self.client.resume_service('serviced.job') self.client.resume_service('serviced.management') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def bootstrap(self): def doit(): with self.lock: job = Job(self) job.load({ 'Label': 'org.freenas.serviced.bootstrap', 'ProgramArguments': BOOTSTRAP_JOB, 'OneShot': True, 'RunAtLoad': True, }) self.jobs[job.id] = job Thread(target=doit).start() def shutdown(self): self.client.disconnect() self.server.close() sys.exit(0) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on') args = parser.parse_args() configure_logging('/var/log/serviced.log', 'DEBUG', file=True) bsd.setproctitle('serviced') self.logger.info('Started') self.init_server(args.s) self.bootstrap() self.event_loop()
class Context(object): def __init__(self): self.logger = logging.getLogger(self.__class__.__name__) self.msock = msock.client.Client() self.msock.on_closed = self.on_msock_close self.rpc_fd = -1 self.connection_id = None self.jobs = [] self.state = ConnectionState.OFFLINE self.config = None self.keepalive = None self.connected_at = None self.cv = Condition() self.rpc = RpcContext() self.client = Client() self.server = Server() self.middleware_endpoint = None def start(self, configpath, sockpath): signal.signal(signal.SIGUSR2, lambda signo, frame: self.connect()) self.read_config(configpath) self.server.rpc = RpcContext() self.server.rpc.register_service_instance("control", ControlService(self)) self.server.start(sockpath) threading.Thread(target=self.server.serve_forever, name="server thread", daemon=True).start() def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning("Connection to dispatcher lost") self.connect_dispatcher() self.middleware_endpoint = Client() self.middleware_endpoint.on_error(on_error) self.connect_dispatcher() def connect_dispatcher(self): while True: try: self.middleware_endpoint.connect("unix:") self.middleware_endpoint.login_service("debugd") self.middleware_endpoint.enable_server() self.middleware_endpoint.register_service("debugd.management", ControlService(self)) self.middleware_endpoint.resume_service("debugd.management") return except (OSError, RpcException) as err: self.logger.warning("Cannot connect to dispatcher: {0}, retrying in 1 second".format(str(err))) time.sleep(1) def read_config(self, path): try: with open(path) as f: self.config = json.load(f) except (IOError, OSError, ValueError) as err: self.logger.fatal("Cannot open config file: {0}".format(str(err))) self.logger.fatal("Exiting.") sys.exit(1) def connect(self, discard=False): if discard: self.connection_id = None self.keepalive = threading.Thread(target=self.connect_keepalive, daemon=True) self.keepalive.start() def connect_keepalive(self): while True: try: if not self.connection_id: self.connection_id = uuid.uuid4() self.msock.connect(SUPPORT_PROXY_ADDRESS) self.logger.info("Connecting to {0}".format(SUPPORT_PROXY_ADDRESS)) self.rpc_fd = self.msock.create_channel(0) time.sleep(1) # FIXME self.client = Client() self.client.connect("fd://", fobj=self.rpc_fd) self.client.channel_serializer = MSockChannelSerializer(self.msock) self.client.standalone_server = True self.client.enable_server() self.client.register_service("debug", DebugService(self)) self.client.call_sync( "server.login", str(self.connection_id), socket.gethostname(), get_version(), "none" ) self.set_state(ConnectionState.CONNECTED) except BaseException as err: self.logger.warning("Failed to initiate support connection: {0}".format(err), exc_info=True) self.msock.disconnect() else: self.connected_at = datetime.now() with self.cv: self.cv.wait_for(lambda: self.state in (ConnectionState.LOST, ConnectionState.OFFLINE)) if self.state == ConnectionState.OFFLINE: return self.logger.warning("Support connection lost, retrying in 10 seconds") time.sleep(10) def disconnect(self): self.connected_at = None self.set_state(ConnectionState.OFFLINE) self.client.disconnect() self.msock.destroy_channel(0) self.msock.disconnect() self.jobs.clear() def on_msock_close(self): self.connected_at = None self.set_state(ConnectionState.LOST) def run_job(self, job): self.jobs.append(job) job.context = self job.start() def set_state(self, state): with self.cv: self.state = state self.cv.notify_all()
class Main(object): def __init__(self): self.client = None self.datastore = None self.config = None self.containers = {} self.tokens = {} self.logger = logging.getLogger('containerd') def parse_config(self, filename): try: f = open(filename, 'r') self.config = json.load(f) f.close() except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error('Config file has unreadable format (not valid JSON)') sys.exit(1) def init_datastore(self): try: self.datastore = get_datastore(self.config['datastore']['driver'], self.config['datastore']['dsn']) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) def connect(self): while True: try: self.client.connect('127.0.0.1') self.client.login_service('containerd') self.client.enable_server() self.client.register_service('containerd.management', ManagementService(self)) self.client.register_service('containerd.debug', DebugService(gevent=True)) self.client.resume_service('containerd.management') self.client.resume_service('containerd.debug') return except socket.error as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.use_bursts = True self.client.on_error(on_error) self.connect() def die(self): self.logger.warning('Exiting') self.client.disconnect() sys.exit(0) def generate_id(self): return ''.join([random.choice(string.ascii_letters + string.digits) for n in range(32)]) def dispatcher_error(self, error): self.die() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-p', type=int, metavar='PORT', default=5500, help="WebSockets server port") args = parser.parse_args() configure_logging('/var/log/containerd.log', 'DEBUG') setproctitle.setproctitle('containerd') self.parse_config(args.c) self.init_datastore() self.init_dispatcher() self.logger.info('Started') # WebSockets server kwargs = {} s4 = WebSocketServer(('', args.p), ServerResource({ '/console': ConsoleConnection, }, context=self), **kwargs) s6 = WebSocketServer(('::', args.p), ServerResource({ '/console': ConsoleConnection, }, context=self), **kwargs) serv_threads = [gevent.spawn(s4.serve_forever), gevent.spawn(s6.serve_forever)] gevent.joinall(serv_threads)
class BaseTestCase(unittest.TestCase): class TaskState(object): def __init__(self): self.tid = None self.state = None self.message = None self.result = None self.name = None self.ended = Event() def __init__(self, methodName): super(BaseTestCase, self).__init__(methodName) self.tasks = {} self.tasks_lock = Lock() self.conn = None self.task_timeout = 30 def setUp(self): try: self.conn = Client() self.conn.event_callback = self.on_event self.conn.connect(os.getenv('TESTHOST', '127.0.0.1')) self.conn.login_user(os.getenv('TESTUSER', 'root'), os.getenv('TESTPWD', ''), timeout=self.task_timeout) self.conn.subscribe_events('*') except: raise def tearDown(self): self.conn.disconnect() def submitTask(self, name, *args): with self.tasks_lock: try: tid = self.conn.call_sync('task.submit', name, args) except RpcException: raise except Exception: raise self.tasks[tid] = self.TaskState() self.tasks[tid].tid = tid self.tasks[tid].name = name return tid def assertTaskCompletion(self, tid): t = self.tasks[tid] if not t.ended.wait(self.task_timeout): self.fail('Task {0} timed out'.format(tid)) #print dir(t) #print 'Message is ' + str(t.message) #print 'State is ' + str(t.state) #print 'Result is ' + str(t.result) if t.state.count('Executing...'): message = t.error elif t.__getattribute__('message') and t.message.count('Executing...'): message = t.state else: message = t.message if not message: self.query_task(tid) self.assertEqual(t.state, 'FINISHED', msg=message) def assertTaskFailure(self, tid): t = self.tasks[tid] if not t.ended.wait(self.task_timeout): self.fail('Task {0} timed out'.format(tid)) self.assertNotEqual(t.state, 'FINISHED', msg=t.message) def assertSeenEvent(self, name, func=None): pass def skip(self, reason): raise unittest.SkipTest(str(reason)) def getTaskResult(self, tid): t = self.tasks[tid] return t.result def on_event(self, name, args): with self.tasks_lock: if name == 'task.updated': #DEBUG #print 'ARGS IS ' + str(args) #print 'TASK LIST IS ' + str(self.tasks) #for pc in self.conn.pending_calls.keys(): # print 'PENDING CALL METHOD ' + str(self.conn.pending_calls[pc].method) + \ # ' and ID ' + str(self.conn.pending_calls[pc].id) if args['id'] not in self.tasks.keys(): if args['state'] == 'EXECUTING': return else: t = self.tasks[args['id']] t.state = args['state'] if t.state in ('FINISHED', 'FAILED'): t.result = args['result'] if 'result' in args else None t.ended.set() elif name == 'task.progress': if args['id'] not in self.tasks.keys(): if args['state'] == 'EXECUTING': return else: t = self.tasks[args['id']] t.message = args['message'] def on_eventOrig(self, name, args): self.tasks_lock.acquire() if name == 'task.updated': #DEBUG #print 'ARGS IS ' + str(args) #print 'TASK LIST IS ' + str(self.tasks) #for pc in self.conn.pending_calls.keys(): # print 'PENDING CALL METHOD ' + str(self.conn.pending_calls[pc].method) + \ # ' and ID ' + str(self.conn.pending_calls[pc].id) if args['id'] not in self.tasks.keys(): if args['state'] == 'EXECUTING': self.tasks_lock.release() return else: t = self.tasks[args['id']] t.state = args['state'] if t.state in ('FINISHED', 'FAILED'): t.result = args['result'] if 'result' in args else None t.ended.set() elif name == 'task.progress': if args['id'] not in self.tasks.keys(): if args['state'] == 'EXECUTING': self.tasks_lock.release() return else: t = self.tasks[args['id']] t.message = args['message'] self.tasks_lock.release() def pretty_print(self, res): if '-v' in sys.argv: print json.dumps(res, indent=4, sort_keys=True) def query_task(self, tid): # Makes tests very slow, keep as debug query = self.conn.call_sync('task.query', [('id', '=', tid)]) message = query[0]['error'] self.pretty_print(message)
class Context(object): def __init__(self): self.hostname = None self.connection = Client() self.ml = None self.logger = logging.getLogger('cli') self.plugin_dirs = [] self.task_callbacks = {} self.plugins = {} self.variables = VariableStore() self.root_ns = RootNamespace('') self.event_masks = ['*'] self.event_divert = False self.event_queue = six.moves.queue.Queue() self.keepalive_timer = None self.argparse_parser = None config.instance = self @property def is_interactive(self): return os.isatty(sys.stdout.fileno()) def start(self): self.discover_plugins() self.connect() def connect(self): try: self.connection.connect(self.hostname) except socket_error as err: output_msg(_( "Could not connect to host: {0} due to error: {1}".format(self.hostname, err) )) self.argparse_parser.print_help() sys.exit(1) def login(self, user, password): try: self.connection.login_user(user, password) self.connection.subscribe_events(*EVENT_MASKS) self.connection.on_event(self.handle_event) self.connection.on_error(self.connection_error) except RpcException as e: if e.code == errno.EACCES: self.connection.disconnect() output_msg(_("Wrong username or password")) sys.exit(1) self.login_plugins() def keepalive(self): if self.connection.opened: self.connection.call_sync('management.ping') def read_middleware_config_file(self, file): """ If there is a cli['plugin-dirs'] in middleware.conf use that, otherwise use the default plugins dir within cli namespace """ plug_dirs = None if file: with open(file, 'r') as f: data = json.load(f) if 'cli' in data and 'plugin-dirs' in data['cli']: if type(data['cli']['plugin-dirs']) != list: return self.plugin_dirs += data['cli']['plugin-dirs'] if plug_dirs is None: plug_dirs = os.path.dirname(os.path.realpath(__file__)) plug_dirs = os.path.join(plug_dirs, 'plugins') self.plugin_dirs += [plug_dirs] def discover_plugins(self): for dir in self.plugin_dirs: self.logger.debug(_("Searching for plugins in %s"), dir) self.__discover_plugin_dir(dir) def login_plugins(self): for i in list(self.plugins.values()): if hasattr(i, '_login'): i._login(self) def __discover_plugin_dir(self, dir): for i in glob.glob1(dir, "*.py"): self.__try_load_plugin(os.path.join(dir, i)) def __try_load_plugin(self, path): if path in self.plugins: return self.logger.debug(_("Loading plugin from %s"), path) name, ext = os.path.splitext(os.path.basename(path)) plugin = imp.load_source(name, path) if hasattr(plugin, '_init'): plugin._init(self) self.plugins[path] = plugin def __try_reconnect(self): output_lock.acquire() self.ml.blank_readline() output_msg(_('Connection lost! Trying to reconnect...')) retries = 0 while True: retries += 1 try: time.sleep(2) self.connect() try: if self.hostname == '127.0.0.1': self.connection.login_user(getpass.getuser(), '') else: self.connection.login_token(self.connection.token) self.connection.subscribe_events(*EVENT_MASKS) except RpcException: output_msg(_("Reauthentication failed (most likely token expired or server was restarted)")) sys.exit(1) break except Exception as e: output_msg(_('Cannot reconnect: {0}'.format(str(e)))) self.ml.restore_readline() output_lock.release() def attach_namespace(self, path, ns): splitpath = path.split('/') ptr = self.root_ns ptr_namespaces = ptr.namespaces() for n in splitpath[1:-1]: if n not in list(ptr_namespaces().keys()): self.logger.warn(_("Cannot attach to namespace %s"), path) return ptr = ptr_namespaces()[n] ptr.register_namespace(ns) def connection_error(self, event, **kwargs): if event == ClientError.LOGOUT: output_msg('Logged out from server.') self.connection.disconnect() sys.exit(0) if event == ClientError.CONNECTION_CLOSED: time.sleep(1) self.__try_reconnect() return def handle_event(self, event, data): if event == 'task.updated': if data['id'] in self.task_callbacks: self.handle_task_callback(data) self.print_event(event, data) def handle_task_callback(self, data): if data['state'] in ('FINISHED', 'CANCELLED', 'ABORTED', 'FAILED'): self.task_callbacks[data['id']](data['state']) def print_event(self, event, data): if self.event_divert: self.event_queue.put((event, data)) return if event == 'task.progress': return output_lock.acquire() self.ml.blank_readline() translation = events.translate(self, event, data) if translation: output_msg(translation) if 'state' in data: if data['state'] == 'FAILED': status = self.connection.call_sync('task.status', data['id']) output_msg(_( "Task #{0} error: {1}".format( data['id'], status['error'].get('message', '') if status.get('error') else '' ) )) sys.stdout.flush() self.ml.restore_readline() output_lock.release() def call_sync(self, name, *args, **kwargs): return wrap(self.connection.call_sync(name, *args, **kwargs)) def call_task_sync(self, name, *args, **kwargs): self.ml.skip_prompt_print = True wrapped_result = wrap(self.connection.call_task_sync(name, *args)) self.ml.skip_prompt_print = False return wrapped_result def submit_task(self, name, *args, **kwargs): callback = kwargs.pop('callback', None) message_formatter = kwargs.pop('message_formatter', None) if not self.variables.get('tasks_blocking'): tid = self.connection.call_sync('task.submit', name, args) if callback: self.task_callbacks[tid] = callback return tid else: output_msg(_("Hit Ctrl+C to terminate task if needed")) self.event_divert = True tid = self.connection.call_sync('task.submit', name, args) progress = ProgressBar() try: while True: event, data = self.event_queue.get() if event == 'task.progress' and data['id'] == tid: message = data['message'] if isinstance(message_formatter, collections.Callable): message = message_formatter(message) progress.update(percentage=data['percentage'], message=message) if event == 'task.updated' and data['id'] == tid: progress.update(message=data['state']) if data['state'] == 'FINISHED': progress.finish() break if data['state'] == 'FAILED': print() break except KeyboardInterrupt: print() output_msg(_("User requested task termination. Task abort signal sent")) self.call_sync('task.abort', tid) self.event_divert = False return tid
def run(self, peer, initial_credentials): hostid = self.dispatcher.call_sync('system.info.host_uuid') hostname = self.dispatcher.call_sync('system.general.get_config')['hostname'] remote_peer_name = hostname credentials = peer['credentials'] remote = credentials.get('address') port = credentials.get('port', 22) username = initial_credentials.get('username') password = initial_credentials.get('password') auth_code = initial_credentials.get('auth_code') key_auth = initial_credentials.get('key_auth') local_ssh_config = self.dispatcher.call_sync('service.sshd.get_config') if self.datastore.exists('peers', ('credentials.address', '=', remote), ('type', '=', 'freenas')): raise TaskException( errno.EEXIST, 'FreeNAS peer entry for {0} already exists'.format(remote) ) remote_client = Client() try: if auth_code: try: remote_client.connect('ws://{0}'.format(wrap_address(remote))) except (AuthenticationException, OSError, ConnectionRefusedError): raise TaskException(errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port)) try: remote_host_uuid, pubkey = remote_client.call_sync( 'peer.freenas.auth_with_code', auth_code, hostname, local_ssh_config['port'] ) except RpcException as err: raise TaskException(err.code, err.message) try: self.dispatcher.call_sync('peer.freenas.put_temp_pubkey', pubkey) if not self.dispatcher.test_or_wait_for_event( 'peer.changed', lambda ar: ar['operation'] == 'create' and remote_host_uuid in ar['ids'], lambda: self.datastore.exists('peers', ('id', '=', remote_host_uuid)), timeout=30 ): raise TaskException( errno.EAUTH, 'FreeNAS peer creation failed. Check connection to host {0}.'.format(remote) ) finally: self.dispatcher.call_sync('peer.freenas.remove_temp_pubkey', pubkey) else: try: if key_auth: with io.StringIO() as f: f.write(self.configstore.get('peer.freenas.key.private')) f.seek(0) pkey = RSAKey.from_private_key(f) max_tries = 50 while True: try: remote_client.connect('ws+ssh://freenas@{0}'.format( wrap_address(remote)), pkey=pkey, port=port ) break except AuthenticationException: if max_tries: max_tries -= 1 time.sleep(1) else: raise else: remote_client.connect( 'ws+ssh://{0}@{1}'.format(username, wrap_address(remote)), port=port, password=password ) remote_client.login_service('replicator') except (AuthenticationException, OSError, ConnectionRefusedError): raise TaskException(errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port)) local_host_key, local_pub_key = self.dispatcher.call_sync('peer.freenas.get_ssh_keys') remote_host_key, remote_pub_key = remote_client.call_sync('peer.freenas.get_ssh_keys') ip_at_remote_side = remote_client.local_address[0] remote_hostname = remote_client.call_sync('system.general.get_config')['hostname'] remote_host_key = remote_host_key.rsplit(' ', 1)[0] local_host_key = local_host_key.rsplit(' ', 1)[0] if remote_client.call_sync('peer.query', [('id', '=', hostid)]): raise TaskException(errno.EEXIST, 'Peer entry of {0} already exists at {1}'.format(hostname, remote)) peer['credentials'] = { '%type': 'freenas-credentials', 'pubkey': remote_pub_key, 'hostkey': remote_host_key, 'port': port, 'address': remote_hostname } local_id = remote_client.call_sync('system.info.host_uuid') peer['id'] = local_id peer['name'] = remote_hostname ip = socket.gethostbyname(remote) created_id = self.run_subtask_sync( 'peer.freenas.create_local', peer, ip, True ) peer['id'] = hostid peer['name'] = remote_peer_name peer['credentials'] = { '%type': 'freenas-credentials', 'pubkey': local_pub_key, 'hostkey': local_host_key, 'port': local_ssh_config['port'], 'address': hostname } try: call_task_and_check_state( remote_client, 'peer.freenas.create_local', peer, ip_at_remote_side ) except TaskException: self.datastore.delete('peers', local_id) self.dispatcher.dispatch_event('peer.changed', { 'operation': 'delete', 'ids': [local_id] }) raise return created_id finally: remote_client.disconnect()
class Context(object): def __init__(self): self.logger = logging.getLogger(self.__class__.__name__) self.msock = msock.client.Client() self.msock.on_closed = self.on_msock_close self.rpc_fd = -1 self.connection_id = None self.jobs = [] self.state = ConnectionState.OFFLINE self.config = None self.keepalive = None self.connected_at = None self.cv = Condition() self.rpc = RpcContext() self.client = Client() self.server = Server() self.middleware_endpoint = None def start(self, configpath, sockpath): signal.signal(signal.SIGUSR2, lambda signo, frame: self.connect()) self.read_config(configpath) self.server.rpc = RpcContext() self.server.rpc.register_service_instance('control', ControlService(self)) self.server.start(sockpath) threading.Thread(target=self.server.serve_forever, name='server thread', daemon=True).start() def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect_dispatcher() self.middleware_endpoint = Client() self.middleware_endpoint.on_error(on_error) self.connect_dispatcher() def connect_dispatcher(self): while True: try: self.middleware_endpoint.connect('unix:') self.middleware_endpoint.login_service('debugd') self.middleware_endpoint.enable_server() self.middleware_endpoint.register_service( 'debugd.management', ControlService(self)) self.middleware_endpoint.resume_service('debugd.management') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def read_config(self, path): try: with open(path) as f: self.config = json.load(f) except (IOError, OSError, ValueError) as err: self.logger.fatal('Cannot open config file: {0}'.format(str(err))) self.logger.fatal('Exiting.') sys.exit(1) def connect(self, discard=False): if discard: self.connection_id = None self.keepalive = threading.Thread(target=self.connect_keepalive, daemon=True) self.keepalive.start() def connect_keepalive(self): while True: try: if not self.connection_id: self.connection_id = uuid.uuid4() self.msock.connect(SUPPORT_PROXY_ADDRESS) self.logger.info( 'Connecting to {0}'.format(SUPPORT_PROXY_ADDRESS)) self.rpc_fd = self.msock.create_channel(0) time.sleep(1) # FIXME self.client = Client() self.client.connect('fd://', fobj=self.rpc_fd) self.client.channel_serializer = MSockChannelSerializer( self.msock) self.client.standalone_server = True self.client.enable_server() self.client.register_service('debug', DebugService(self)) self.client.call_sync('server.login', str(self.connection_id), socket.gethostname(), get_version(), 'none') self.set_state(ConnectionState.CONNECTED) except BaseException as err: self.logger.warning( 'Failed to initiate support connection: {0}'.format(err), exc_info=True) self.msock.disconnect() else: self.connected_at = datetime.now() with self.cv: self.cv.wait_for(lambda: self.state in ( ConnectionState.LOST, ConnectionState.OFFLINE)) if self.state == ConnectionState.OFFLINE: return self.logger.warning( 'Support connection lost, retrying in 10 seconds') time.sleep(10) def disconnect(self): self.connected_at = None self.set_state(ConnectionState.OFFLINE) self.client.disconnect() self.msock.destroy_channel(0) self.msock.disconnect() self.jobs.clear() def on_msock_close(self): self.connected_at = None self.set_state(ConnectionState.LOST) def run_job(self, job): self.jobs.append(job) job.context = self job.start() def set_state(self, state): with self.cv: self.state = state self.cv.notify_all()
class Main(object): def __init__(self): self.client = None self.server = None self.datastore = None self.hdf = None self.hdf_group = None self.config = None self.logger = logging.getLogger('statd') self.data_sources = {} def parse_config(self, filename): try: f = open(filename, 'r') self.config = json.load(f) f.close() except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error('Config file has unreadable format (not valid JSON)') sys.exit(1) def init_datastore(self): try: self.datastore = get_datastore(self.config['datastore']['driver'], self.config['datastore']['dsn']) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) def init_database(self): # adding this try/except till system-dataset plugin is added back in in full fidelity # just a hack (since that directory's data will not persist) # Please remove this when system-dataset plugin is added back in try: directory = self.client.call_sync('system_dataset.request_directory', 'statd') except RpcException: directory = '/var/tmp/statd' if not os.path.exists(directory): os.makedirs(directory) self.hdf = tables.open_file(os.path.join(directory, DEFAULT_DBFILE), mode='a') if not hasattr(self.hdf.root, 'stats'): self.hdf.create_group('/', 'stats') self.hdf_group = self.hdf.root.stats def request_table(self, name): try: if hasattr(self.hdf_group, name): return getattr(self.hdf_group, name) return self.hdf.create_table(self.hdf_group, name, DataPoint, name) except Exception as e: self.logger.error(str(e)) def get_data_source(self, name): if name not in list(self.data_sources.keys()): config = DataSourceConfig(self.datastore, name) ds = DataSource(self, name, config) self.data_sources[name] = ds self.client.call_sync('plugin.register_event_type', 'statd.output', 'statd.{0}.pulse'.format(name)) return self.data_sources[name] def connect(self): while True: try: self.client.connect('127.0.0.1') self.client.login_service('statd') self.client.enable_server() self.client.register_service('statd.output', OutputService(self)) self.client.register_service('statd.debug', DebugService(gevent=True)) self.client.resume_service('statd.output') self.client.resume_service('statd.debug') for i in list(self.data_sources.keys()): self.client.call_sync('plugin.register_event_type', 'statd.output', 'statd.{0}.pulse'.format(i)) return except socket.error as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.use_bursts = True self.client.on_error(on_error) self.connect() def die(self): self.logger.warning('Exiting') self.server.stop() self.client.disconnect() sys.exit(0) def dispatcher_error(self, error): self.die() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() configure_logging('/var/log/fnstatd.log', 'DEBUG') setproctitle.setproctitle('fnstatd') # Signal handlers gevent.signal(signal.SIGQUIT, self.die) gevent.signal(signal.SIGQUIT, self.die) gevent.signal(signal.SIGINT, self.die) self.server = InputServer(self) self.parse_config(args.c) self.init_datastore() self.init_dispatcher() self.init_database() self.server.start() self.logger.info('Started') self.client.wait_forever()
def run(self, peer, initial_credentials): hostid = self.dispatcher.call_sync('system.info.host_uuid') hostname = self.dispatcher.call_sync( 'system.general.get_config')['hostname'] remote_peer_name = hostname credentials = peer['credentials'] remote = credentials.get('address') port = credentials.get('port', 22) username = initial_credentials.get('username') password = initial_credentials.get('password') auth_code = initial_credentials.get('auth_code') key_auth = initial_credentials.get('key_auth') local_ssh_config = self.dispatcher.call_sync('service.sshd.get_config') if self.datastore.exists('peers', ('credentials.address', '=', remote), ('type', '=', 'freenas')): raise TaskException( errno.EEXIST, 'FreeNAS peer entry for {0} already exists'.format(remote)) remote_client = Client() try: if auth_code: try: remote_client.connect('ws://{0}'.format( wrap_address(remote))) except (AuthenticationException, OSError, ConnectionRefusedError): raise TaskException( errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port)) try: remote_host_uuid, pubkey = remote_client.call_sync( 'peer.freenas.auth_with_code', auth_code, hostname, local_ssh_config['port']) except RpcException as err: raise TaskException(err.code, err.message) try: self.dispatcher.call_sync('peer.freenas.put_temp_pubkey', pubkey) if not self.dispatcher.test_or_wait_for_event( 'peer.changed', lambda ar: ar['operation'] == 'create' and remote_host_uuid in ar['ids'], lambda: self.datastore.exists( 'peers', ('id', '=', remote_host_uuid)), timeout=30): raise TaskException( errno.EAUTH, 'FreeNAS peer creation failed. Check connection to host {0}.' .format(remote)) finally: self.dispatcher.call_sync( 'peer.freenas.remove_temp_pubkey', pubkey) else: try: if key_auth: with io.StringIO() as f: f.write( self.configstore.get( 'peer.freenas.key.private')) f.seek(0) pkey = RSAKey.from_private_key(f) max_tries = 50 while True: try: remote_client.connect( 'ws+ssh://freenas@{0}'.format( wrap_address(remote)), pkey=pkey, port=port) break except AuthenticationException: if max_tries: max_tries -= 1 time.sleep(1) else: raise else: remote_client.connect('ws+ssh://{0}@{1}'.format( username, wrap_address(remote)), port=port, password=password) remote_client.login_service('replicator') except (AuthenticationException, OSError, ConnectionRefusedError): raise TaskException( errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port)) local_host_key, local_pub_key = self.dispatcher.call_sync( 'peer.freenas.get_ssh_keys') remote_host_key, remote_pub_key = remote_client.call_sync( 'peer.freenas.get_ssh_keys') ip_at_remote_side = remote_client.local_address[0] remote_hostname = remote_client.call_sync( 'system.general.get_config')['hostname'] remote_host_key = remote_host_key.rsplit(' ', 1)[0] local_host_key = local_host_key.rsplit(' ', 1)[0] if remote_client.call_sync('peer.query', [('id', '=', hostid)]): raise TaskException( errno.EEXIST, 'Peer entry of {0} already exists at {1}'.format( hostname, remote)) peer['credentials'] = { '%type': 'freenas-credentials', 'pubkey': remote_pub_key, 'hostkey': remote_host_key, 'port': port, 'address': remote_hostname } local_id = remote_client.call_sync('system.info.host_uuid') peer['id'] = local_id peer['name'] = remote_hostname ip = socket.gethostbyname(remote) created_ids = self.join_subtasks( self.run_subtask('peer.freenas.create_local', peer, ip, True)) peer['id'] = hostid peer['name'] = remote_peer_name peer['credentials'] = { '%type': 'freenas-credentials', 'pubkey': local_pub_key, 'hostkey': local_host_key, 'port': local_ssh_config['port'], 'address': hostname } try: call_task_and_check_state(remote_client, 'peer.freenas.create_local', peer, ip_at_remote_side) except TaskException: self.datastore.delete('peers', local_id) self.dispatcher.dispatch_event('peer.changed', { 'operation': 'delete', 'ids': [local_id] }) raise return created_ids[0] finally: remote_client.disconnect()
class Main(object): def __init__(self): self.client = None self.server = None self.datastore = None self.hdf = None self.hdf_group = None self.config = None self.logger = logging.getLogger('statd') self.data_sources = {} def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) def init_database(self): # adding this try/except till system-dataset plugin is added back in in full fidelity # just a hack (since that directory's data will not persist) # Please remove this when system-dataset plugin is added back in try: directory = self.client.call_sync('system_dataset.request_directory', 'statd') except RpcException: directory = '/var/tmp/statd' if not os.path.exists(directory): os.makedirs(directory) self.hdf = tables.open_file(os.path.join(directory, DEFAULT_DBFILE), mode='a') if not hasattr(self.hdf.root, 'stats'): self.hdf.create_group('/', 'stats') self.hdf_group = self.hdf.root.stats def request_table(self, name): try: if hasattr(self.hdf_group, name): return getattr(self.hdf_group, name) return self.hdf.create_table(self.hdf_group, name, DataPoint, name) except Exception as e: self.logger.error(str(e)) def init_alert_config(self, name): config_name = name if self.datastore.exists('statd.alerts', ('id', '=', name)) else 'default' alert_config = self.datastore.get_by_id('statd.alerts', config_name) self.client.call_sync( 'alert.register_alert', 'stat.{0}.too_high'.format(name), '{0} statistic value is too high'.format(name) ) self.client.call_sync( 'alert.register_alert', 'stat.{0}.too_low'.format(name), '{0} statistic value is too low'.format(name) ) return alert_config def get_data_source(self, name): if name not in list(self.data_sources.keys()): config = DataSourceConfig(self.datastore, name) alert_config = self.init_alert_config(name) ds = DataSource(self, name, config, alert_config) self.data_sources[name] = ds self.client.call_sync('plugin.register_event_type', 'statd.output', 'statd.{0}.pulse'.format(name)) return self.data_sources[name] def register_schemas(self): self.client.register_schema('stat', { 'type': 'object', 'additionalProperties': True, 'properties': { 'name': {'type': 'string'}, 'last_value': {'type': ['integer', 'number', 'null']}, 'alerts': {'$ref': 'stat-alert'}, } }) self.client.register_schema('stat-alert', { 'type': 'object', 'additionalProperties': True, 'properties': { 'alert_high': {'type': ['integer', 'number', 'null']}, 'alert_high_enabled': {'type': 'boolean'}, 'alert_low': {'type': ['integer', 'number', 'null']}, 'alert_low_enabled': {'type': 'boolean'} } }) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('statd') self.client.enable_server() self.client.register_service('statd.output', OutputService(self)) self.client.register_service('statd.alert', AlertService(self)) self.client.register_service('statd.debug', DebugService(gevent=True)) self.client.resume_service('statd.output') self.client.resume_service('statd.alert') self.client.resume_service('statd.debug') for i in list(self.data_sources.keys()): self.client.call_sync('plugin.register_event_type', 'statd.output', 'statd.{0}.pulse'.format(i)) return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.use_bursts = True self.client.on_error(on_error) self.connect() def die(self): self.logger.warning('Exiting') self.server.stop() self.client.disconnect() sys.exit(0) def dispatcher_error(self, error): self.die() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() configure_logging('/var/log/fnstatd.log', 'DEBUG') setproctitle.setproctitle('fnstatd') # Signal handlers gevent.signal(signal.SIGQUIT, self.die) gevent.signal(signal.SIGTERM, self.die) gevent.signal(signal.SIGINT, self.die) self.server = InputServer(self) self.config = args.c self.init_datastore() self.init_dispatcher() self.init_database() self.register_schemas() self.server.start() self.logger.info('Started') self.client.wait_forever()