class FileWrapRemote(FileWrapBase): def __init__(self, uri, type=None, username='******', password=''): super(FileWrapRemote, self).__init__(uri, type) self.client = Client() self.username = username self.password = password def readdir(self): if not self.is_dir: raise NotADirectoryError self.client.connect('ws://'+self.hostname) self.client.login_user(self.username, self.password) for e in self.client.call_sync('filesystem.list_dir', self.path): yield FileWrapRemote( PurePath(self.uri).joinpath(e['name']), type=e['type'], username=self.username, password=self.password) def _map_type(self, val): self._freenas_mappings = { 'DIRECTORY': FileType.dir, 'FILE': FileType.file, } return self._freenas_mappings[val] def _get_type(self): #self.client.connect('ws://'+self.hostname) #self.client.login_user(self.username, self.password) """ TODO """ return self._map_type('DIRECTORY') def _get_parent(self): return FileWrapRemote(PurePath(self.uri).parent.as_posix(), username=self.username, password=self.password)
def get_replication_client(dispatcher, remote): host = dispatcher.call_sync( 'peer.query', [('address', '=', remote), ('type', '=', 'replication')], {'single': True} ) if not host: raise TaskException(errno.ENOENT, 'There are no known keys to connect to {0}'.format(remote)) with open('/etc/replication/key') as f: pkey = RSAKey.from_private_key(f) credentials = host['credentials'] try: client = Client() with tempfile.NamedTemporaryFile('w') as host_key_file: host_key_file.write(credentials['hostkey']) host_key_file.flush() client.connect( 'ws+ssh://replication@{0}'.format(remote), port=credentials['port'], host_key_file=host_key_file.name, pkey=pkey ) client.login_service('replicator') return client except (AuthenticationException, SSHException): raise TaskException(errno.EAUTH, 'Cannot connect to {0}'.format(remote)) except (OSError, ConnectionRefusedError): raise TaskException(errno.ECONNREFUSED, 'Cannot connect to {0}'.format(remote)) except IOError: raise TaskException(errno.EINVAL, 'Provided host key is not valid')
def process_request(self, req, resp): # Do not require auth to access index if req.relative_uri == '/': return auth = req.get_header("Authorization") if auth is None or not auth.startswith('Basic '): raise falcon.HTTPUnauthorized( 'Authorization token required', 'Provide a Basic Authentication header', ['Basic realm="FreeNAS"'], ) try: username, password = base64.b64decode(auth[6:]).decode('utf8').split(':', 1) except binascii.Error: raise falcon.HTTPUnauthorized( 'Invalid Authorization token', 'Provide a valid Basic Authentication header', ['Basic realm="FreeNAS"'], ) try: client = Client() client.connect('unix:') client.login_user(username, password, check_password=True) req.context['client'] = client except RpcException as e: if e.code == errno.EACCES: raise falcon.HTTPUnauthorized( 'Invalid credentials', 'Verify your credentials and try again.', ['Basic realm="FreeNAS"'], ) raise falcon.HTTPUnauthorized('Unknown authentication error', str(e), ['Basic realm="FreeNAS"'])
class SyslogProvider(Provider): def initialize(self, context): self.client = Client() self.client.connect('unix:///var/run/logd.sock') @generator def query(self, filter=None, params=None): return self.client.call_sync('logd.logging.query', filter, params)
def setUp(self): super(BaseTestCase, self).setUp() assert self.context is not None self.ssh_client = self.context.ssh_client self.client = Client() self.client.connect('ws://{0}'.format(self.context.hostname)) self.client.login_user(self.context.username, self.context.password) load_schema_definitions(self.client)
def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect()
def main(name, *args): connection = Client() connection.connect('127.0.0.1') connection.login_service('ups') connection.emit_event('service.ups.signal', { 'name': name, 'type': os.environ['NOTIFYTYPE'], }) connection.disconnect()
def setUp(self): try: self.conn = Client() self.conn.event_callback = self.on_event self.conn.connect(os.getenv('TESTHOST', '127.0.0.1')) self.conn.login_user(os.getenv('TESTUSER', 'root'), os.getenv('TESTPWD', ''), timeout=self.task_timeout) self.conn.subscribe_events('*') except: raise
class Context(object): def __init__(self, *args, **kwargs): self.client = None self.entity_subscribers = {} def start_entity_subscribers(self): for i in ENTITY_SUBSCRIBERS: if i in self.entity_subscribers: self.entity_subscribers[i].stop() del self.entity_subscribers[i] e = EntitySubscriber(self.client, i) e.start() self.entity_subscribers[i] = e def wait_entity_subscribers(self): for i in self.entity_subscribers.values(): i.wait_ready() def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('collectd_{0}'.format(PLUGIN_NAME)) # enable streaming responses as they are needed but entitysubscriber for # reliable performace and such self.client.call_sync('management.enable_features', ['streaming_responses']) self.start_entity_subscribers() self.wait_entity_subscribers() return except (OSError, RpcException) as err: collectd.warning( "{0} collectd plugin could not connect to server retrying in 5 seconds" .format(PLUGIN_NAME)) time.sleep(5) def connection_error(self, event, **kwargs): if event in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): collectd.info( '{0} collectd plugin connection to dispatcher lost'.format( PLUGIN_NAME)) self.connect() def init_dispatcher(self): self.client = Client() self.client.on_error(self.connection_error) self.connect() def disk_temps(self): for disk in self.entity_subscribers['disk'].query( ('status.smart_info.temperature', '!=', None)): yield (disk['name'], disk['status']['smart_info']['temperature'])
def main(*args): connection = Client() connection.connect('127.0.0.1') connection.login_service('smtp') parser = argparse.ArgumentParser(description='Process email') parser.add_argument('-i', dest='strip_leading_dot', action='store_false', default=True, help='see sendmail(8) -i') parser.add_argument('-t', dest='parse_recipients', action='store_true', default=False, help='parse recipients from message') parser.usage = ' '.join(parser.format_usage().split(' ')[1:-1]) parser.usage += ' [email_addr|user] ..' args, to_addrs = parser.parse_known_args() if not to_addrs and not args.parse_recipients: parser.exit(message=parser.format_usage()) msg = sys.stdin.read() em_parser = email.parser.Parser() em = em_parser.parsestr(msg) if args.parse_recipients: # Strip away the comma based delimiters and whitespace. to_addrs = map(str.strip, em.get('To').split(',')) if not to_addrs or not to_addrs[0]: to_addrs = ['root'] margs = {} margs['extra_headers'] = dict(em) margs['extra_headers'].update({ 'X-Mailer': 'FreeNAS', 'X-FreeNAS-Host': socket.gethostname(), }) margs['subject'] = em.get('Subject') if em.is_multipart(): margs['attachments'] = filter( lambda part: part.get_content_maintype() != 'multipart', em.walk() ) margs['message'] = ( 'This is a MIME formatted message. If you see ' 'this text it means that your email software ' 'does not support MIME formatted messages.') else: margs['message'] = ''.join(email.iterators.body_line_iterator(em)) if to_addrs: margs['to'] = to_addrs connection.call_sync('mail.send', margs) connection.disconnect()
class Context(object): def __init__(self, *args, **kwargs): self.client = None self.entity_subscribers = {} def start_entity_subscribers(self): for i in ENTITY_SUBSCRIBERS: if i in self.entity_subscribers: self.entity_subscribers[i].stop() del self.entity_subscribers[i] e = EntitySubscriber(self.client, i) e.start() self.entity_subscribers[i] = e def wait_entity_subscribers(self): for i in self.entity_subscribers.values(): i.wait_ready() def connect(self): while True: try: self.client.connect("unix:") self.client.login_service("collectd_{0}".format(PLUGIN_NAME)) # enable streaming responses as they are needed but entitysubscriber for # reliable performace and such self.client.call_sync("management.enable_features", ["streaming_responses"]) self.start_entity_subscribers() self.wait_entity_subscribers() return except (OSError, RpcException) as err: collectd.warning( "{0} collectd plugin could not connect to server retrying in 5 seconds".format(PLUGIN_NAME) ) time.sleep(5) def connection_error(self, event, **kwargs): if event in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): collectd.info("{0} collectd plugin connection to dispatcher lost".format(PLUGIN_NAME)) self.connect() def init_dispatcher(self): self.client = Client() self.client.on_error(self.connection_error) self.connect() def disk_temps(self): for disk in self.entity_subscribers["disk"].query(("status.smart_info.temperature", "!=", None)): yield (disk["name"], disk["status"]["smart_info"]["temperature"])
def __init__(self): self.logger = logging.getLogger(self.__class__.__name__) self.msock = msock.client.Client() self.msock.on_closed = self.on_msock_close self.rpc_fd = -1 self.connection_id = None self.jobs = [] self.state = ConnectionState.OFFLINE self.config = None self.keepalive = None self.connected_at = None self.cv = Condition() self.rpc = RpcContext() self.client = Client() self.server = Server() self.middleware_endpoint = None
def connect_keepalive(self): while True: try: if not self.connection_id: self.connection_id = uuid.uuid4() self.msock.connect(SUPPORT_PROXY_ADDRESS) self.logger.info("Connecting to {0}".format(SUPPORT_PROXY_ADDRESS)) self.rpc_fd = self.msock.create_channel(0) time.sleep(1) # FIXME self.client = Client() self.client.connect("fd://", fobj=self.rpc_fd) self.client.channel_serializer = MSockChannelSerializer(self.msock) self.client.standalone_server = True self.client.enable_server() self.client.register_service("debug", DebugService(self)) self.client.call_sync( "server.login", str(self.connection_id), socket.gethostname(), get_version(), "none" ) self.set_state(ConnectionState.CONNECTED) except BaseException as err: self.logger.warning("Failed to initiate support connection: {0}".format(err), exc_info=True) self.msock.disconnect() else: self.connected_at = datetime.now() with self.cv: self.cv.wait_for(lambda: self.state in (ConnectionState.LOST, ConnectionState.OFFLINE)) if self.state == ConnectionState.OFFLINE: return self.logger.warning("Support connection lost, retrying in 10 seconds") time.sleep(10)
def test_unix_server(self): sockpath = os.path.join(os.getcwd(), 'test.{0}.sock'.format(os.getpid())) sockurl = 'unix://' + sockpath context = RpcContext() context.register_service('test', TestService) server = Server() server.rpc = context server.start(sockurl) threading.Thread(target=server.serve_forever, daemon=True).start() # Spin until server is ready while not os.path.exists(sockpath): time.sleep(0.1) client = Client() client.connect(sockurl) self.assertTrue(client.connected) self.assertEqual(client.call_sync('test.hello', 'freenas'), 'Hello World, freenas') client.disconnect() server.close() os.unlink(sockpath)
def main(self): if len(sys.argv) != 2: print("Invalid number of arguments", file=sys.stderr) sys.exit(errno.EINVAL) key = sys.argv[1] logging.basicConfig(level=logging.DEBUG) self.datastore = get_default_datastore() self.configstore = ConfigStore(self.datastore) self.conn = Client() self.conn.connect('unix:') self.conn.login_service('task.{0}'.format(os.getpid())) self.conn.enable_server() self.conn.rpc.register_service_instance('taskproxy', self.service) self.conn.call_sync('task.checkin', key) setproctitle.setproctitle('task executor (idle)') while True: try: task = self.task.get() setproctitle.setproctitle('task executor (tid {0})'.format(task['id'])) if task['debugger']: sys.path.append('/usr/local/lib/dispatcher/pydev') import pydevd host, port = task['debugger'] pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True) module = imp.load_source('plugin', task['filename']) setproctitle.setproctitle('task executor (tid {0})'.format(task['id'])) try: self.instance = getattr(module, task['class'])(DispatcherWrapper(self.conn), self.datastore) self.instance.configstore = self.configstore self.running.set() result = self.instance.run(*task['args']) except BaseException as err: print("Task exception: {0}".format(str(err)), file=sys.stderr) traceback.print_exc(file=sys.stderr) self.put_status('FAILED', exception=err) else: self.put_status('FINISHED', result=result) except RpcException as err: print("RPC failed: {0}".format(str(err)), file=sys.stderr) sys.exit(errno.EBADMSG) except socket.error as err: print("Cannot connect to dispatcher: {0}".format(str(err)), file=sys.stderr) sys.exit(errno.ETIMEDOUT) if task['debugger']: import pydevd pydevd.stoptrace() setproctitle.setproctitle('task executor (idle)')
def setUp(self): try: self.conn = Client() self.conn.event_callback = self.on_event self.conn.connect(os.getenv('TESTHOST', '127.0.0.1')) self.conn.login_user(os.getenv('TESTUSER', 'root'), os.getenv('TESTPWD', ''), timeout = self.task_timeout) self.conn.subscribe_events('*') except: raise
def get_freenas_peer_client(parent, remote): try: address = socket.gethostbyname(remote) except socket.error as err: raise TaskException(err.errno, '{0} is unreachable'.format(remote)) host = parent.dispatcher.call_sync( 'peer.query', [ ('or', [ ('credentials.address', '=', remote), ('credentials.address', '=', address), ]), ('type', '=', 'freenas') ], {'single': True} ) if not host: raise TaskException(errno.ENOENT, 'There are no known keys to connect to {0}'.format(remote)) with io.StringIO() as f: f.write(parent.configstore.get('peer.freenas.key.private')) f.seek(0) pkey = RSAKey.from_private_key(f) credentials = host['credentials'] try: client = Client() with tempfile.NamedTemporaryFile('w') as host_key_file: host_key_file.write(remote + ' ' + credentials['hostkey']) host_key_file.flush() client.connect( 'ws+ssh://freenas@{0}'.format(wrap_address(remote)), port=credentials['port'], host_key_file=host_key_file.name, pkey=pkey ) client.login_service('replicator') return client except (AuthenticationException, SSHException): raise TaskException(errno.EAUTH, 'Cannot connect to {0}'.format(remote)) except OSError as err: raise TaskException(errno.ECONNREFUSED, 'Cannot connect to {0}: {1}'.format(remote, err))
def main(*args): connection = Client() connection.connect("127.0.0.1") connection.login_service("smtp") parser = argparse.ArgumentParser(description="Process email") parser.add_argument("-i", dest="strip_leading_dot", action="store_false", default=True, help="see sendmail(8) -i") parser.add_argument( "-t", dest="parse_recipients", action="store_true", default=False, help="parse recipients from message" ) parser.usage = " ".join(parser.format_usage().split(" ")[1:-1]) parser.usage += " [email_addr|user] .." args, to_addrs = parser.parse_known_args() if not to_addrs and not args.parse_recipients: parser.exit(message=parser.format_usage()) msg = sys.stdin.read() em_parser = email.parser.Parser() em = em_parser.parsestr(msg) if args.parse_recipients: # Strip away the comma based delimiters and whitespace. to_addrs = map(str.strip, em.get("To").split(",")) if not to_addrs or not to_addrs[0]: to_addrs = ["root"] margs = {} margs["extra_headers"] = dict(em) margs["extra_headers"].update({"X-Mailer": "FreeNAS", "X-FreeNAS-Host": socket.gethostname()}) margs["subject"] = em.get("Subject") if em.is_multipart(): margs["attachments"] = filter(lambda part: part.get_content_maintype() != "multipart", em.walk()) margs["message"] = ( "This is a MIME formatted message. If you see " "this text it means that your email software " "does not support MIME formatted messages." ) else: margs["message"] = "".join(email.iterators.body_line_iterator(em)) if to_addrs: margs["to"] = to_addrs connection.call_sync("mail.send", margs) connection.disconnect()
def connect_keepalive(self): while True: try: if not self.connection_id: self.connection_id = uuid.uuid4() self.msock.connect(SUPPORT_PROXY_ADDRESS) self.logger.info( 'Connecting to {0}'.format(SUPPORT_PROXY_ADDRESS)) self.rpc_fd = self.msock.create_channel(0) time.sleep(1) # FIXME self.client = Client() self.client.connect('fd://', fobj=self.rpc_fd) self.client.channel_serializer = MSockChannelSerializer( self.msock) self.client.standalone_server = True self.client.enable_server() self.client.register_service('debug', DebugService(self)) self.client.call_sync('server.login', str(self.connection_id), socket.gethostname(), get_version(), 'none') self.set_state(ConnectionState.CONNECTED) except BaseException as err: self.logger.warning( 'Failed to initiate support connection: {0}'.format(err), exc_info=True) self.msock.disconnect() else: self.connected_at = datetime.now() with self.cv: self.cv.wait_for(lambda: self.state in ( ConnectionState.LOST, ConnectionState.OFFLINE)) if self.state == ConnectionState.OFFLINE: return self.logger.warning( 'Support connection lost, retrying in 10 seconds') time.sleep(10)
def __init__(self): self.hostname = None self.connection = Client() self.ml = None self.logger = logging.getLogger('cli') self.plugin_dirs = [] self.task_callbacks = {} self.plugins = {} self.variables = VariableStore() self.root_ns = RootNamespace('') self.event_masks = ['*'] self.event_divert = False self.event_queue = six.moves.queue.Queue() self.keepalive_timer = None self.argparse_parser = None config.instance = self
class LogdLogHandler(logging.Handler): def __init__(self, level=logging.NOTSET, address=None, ident=None): super(LogdLogHandler, self).__init__(level) self.address = address or 'unix:///var/run/logd.sock' self.ident = ident or os.path.basename(sys.executable) self.client = Client() self.client.connect(self.address) def emit(self, record): try: if not self.client.connected: self.client.connect(self.address) item = { 'timestamp': datetime.utcfromtimestamp(record.created), 'priority': PRIORITY_MAP.get(record.levelno, 'INFO'), 'message': record.getMessage(), 'identifier': self.ident, 'thread': record.threadName, 'tid': record.thread, 'module_name': record.name, 'source_language': 'python', 'source_file': record.pathname, 'source_line': record.lineno, } if record.exc_info: item['exception'] = ''.join( traceback.format_exception(*record.exc_info)) self.client.call_async('logd.logging.push', None, item) except: self.handleError(record) def close(self): super(LogdLogHandler, self).close() self.client.disconnect()
class LogdLogHandler(logging.Handler): def __init__(self, level=logging.NOTSET, address=None, ident=None): super(LogdLogHandler, self).__init__(level) self.address = address or 'unix:///var/run/logd.sock' self.ident = ident or os.path.basename(sys.executable) self.client = Client() self.client.connect(self.address) def emit(self, record): try: if not self.client.connected: self.client.connect(self.address) item = { 'timestamp': datetime.utcfromtimestamp(record.created), 'priority': PRIORITY_MAP.get(record.levelno, 'INFO'), 'message': record.getMessage(), 'identifier': self.ident, 'thread': record.threadName, 'tid': record.thread, 'module_name': record.name, 'source_language': 'python', 'source_file': record.pathname, 'source_line': record.lineno, } if record.exc_info: item['exception'] = ''.join(traceback.format_exception(*record.exc_info)) self.client.call_async('logd.logging.push', None, item) except: self.handleError(record) def close(self): super(LogdLogHandler, self).close() self.client.disconnect()
class BaseTestCase(unittest.TestCase): def __init__(self, methodName): super(BaseTestCase, self).__init__(methodName) self.context = None def setUp(self): super(BaseTestCase, self).setUp() assert self.context is not None self.ssh_client = self.context.ssh_client self.client = Client() self.client.connect('ws://{0}'.format(self.context.hostname)) self.client.login_user(self.context.username, self.context.password) load_schema_definitions(self.client) def tearDown(self): self.client.disconnect() def ssh_exec(self, command, output=False): _, stdout, stderr = self.ssh_client.exec_command(command) exitcode = stdout.channel.recv_exit_status() if output: return exitcode, stdout.read(), stderr.read() return exitcode def get_params_schema(self, method): return get_methods(self.client, method).get('params-schema') def get_result_schema(self, method): return get_methods(self.client, method).get('results-schema') def assertConformsToSchema(self, obj, schema, strict=False): errors = verify_schema(schema, obj, strict) if errors: raise AssertionError( 'Object {0} does not match {1} schema. Errors: {2}'.format( obj, schema, errors)) def assertConformsToNamedSchema(self, obj, schema_name, strict=False): schema = get_schema(schema_name) if not schema: raise AssertionError('Schema {0} is unknown'.format(schema_name)) self.assertConformsToSchema(obj, schema, strict)
class BaseTestCase(unittest.TestCase): def __init__(self, methodName): super(BaseTestCase, self).__init__(methodName) self.context = None def setUp(self): super(BaseTestCase, self).setUp() assert self.context is not None self.ssh_client = self.context.ssh_client self.client = Client() self.client.connect('ws://{0}'.format(self.context.hostname)) self.client.login_user(self.context.username, self.context.password) load_schema_definitions(self.client) def tearDown(self): self.client.disconnect() def ssh_exec(self, command, output=False): _, stdout, stderr = self.ssh_client.exec_command(command) exitcode = stdout.channel.recv_exit_status() if output: return exitcode, stdout.read(), stderr.read() return exitcode def get_params_schema(self, method): return get_methods(self.client, method).get('params-schema') def get_result_schema(self, method): return get_methods(self.client, method).get('results-schema') def assertConformsToSchema(self, obj, schema, strict=False): errors = verify_schema(schema, obj, strict) if errors: raise AssertionError('Object {0} does not match {1} schema. Errors: {2}'.format(obj, schema, errors)) def assertConformsToNamedSchema(self, obj, schema_name, strict=False): schema = get_schema(schema_name) if not schema: raise AssertionError('Schema {0} is unknown'.format(schema_name)) self.assertConformsToSchema(obj, schema, strict)
def process_request(self, req, resp): # Do not require auth to access index if req.relative_uri == '/': return auth = req.get_header("Authorization") if auth is None or not auth.startswith('Basic '): raise falcon.HTTPUnauthorized( 'Authorization token required', 'Provide a Basic Authentication header', ['Basic realm="FreeNAS"'], ) try: username, password = base64.b64decode( auth[6:]).decode('utf8').split(':', 1) except binascii.Error: raise falcon.HTTPUnauthorized( 'Invalid Authorization token', 'Provide a valid Basic Authentication header', ['Basic realm="FreeNAS"'], ) try: client = Client() client.connect('unix:') client.login_user(username, password, check_password=True) req.context['client'] = client except RpcException as e: if e.code == errno.EACCES: raise falcon.HTTPUnauthorized( 'Invalid credentials', 'Verify your credentials and try again.', ['Basic realm="FreeNAS"'], ) raise falcon.HTTPUnauthorized('Unknown authentication error', str(e), ['Basic realm="FreeNAS"'])
class RESTApi(object): def __init__(self): self.logger = logging.getLogger('restd') self._cruds = [] self._threads = [] self._rpcs = {} self._schemas = {} self._used_schemas = set() self._services = {} self._tasks = {} self.api = falcon.API(middleware=[ AuthMiddleware(), JSONTranslator(), ]) self.api.add_route('/', SwaggerResource(self)) gevent.signal(signal.SIGINT, self.die) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.dispatcher = Client() self.dispatcher.on_error(on_error) self.connect() def init_metadata(self): self._tasks = self.dispatcher.call_sync('discovery.get_tasks') self._schemas = self.dispatcher.call_sync('discovery.get_schema') for service in self.dispatcher.call_sync('discovery.get_services'): self._services[service] = self.dispatcher.call_sync( 'discovery.get_methods', service) for method in self._services[service]: self._rpcs['{0}.{1}'.format(service, method['name'])] = method def load_plugins(self): pluginsdir = os.path.realpath( os.path.join(os.path.dirname(__file__), '..', 'plugins')) for i in glob.glob1(pluginsdir, "*.py"): try: loader = importlib.machinery.SourceFileLoader( i.split('.')[0], os.path.join(pluginsdir, i)) mod = loader.load_module() except: self.logger.error('Failed to load plugin %s', i, exc_info=True) raise mod._init(self) def connect(self): while True: try: self.dispatcher.connect('unix:') self.dispatcher.login_service('restd') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def __call__(self, environ, start_response): if 'HTTP_X_REAL_IP' in environ: environ['PATH_INFO'] = environ.get('PATH_INFO', '').replace('/api/v2.0', '', 1) return self.api.__call__(environ, start_response) def register_crud(self, klass): ins = klass(self, self.dispatcher) self._cruds.append(ins) def register_singleitem(self, klass): klass(self, self.dispatcher) def register_resource(self, klass): klass(self) def run(self): self.init_dispatcher() self.init_metadata() self.load_plugins() server4 = WSGIServer(('0.0.0.0', 8889), self, handler_class=RESTWSGIHandler) self._threads = [gevent.spawn(server4.serve_forever)] checkin() gevent.joinall(self._threads) def die(self, *args): gevent.killall(self._threads) sys.exit(0)
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ##################################################################### from threading import Lock from freenas.dispatcher.client import Client from freenas.dispatcher.rpc import RpcException SERVICED_SOCKET = 'unix:///var/run/serviced.sock' _client = Client() _lock = Lock() class ServicedException(RpcException): pass def checkin(): with _lock: try: _client.connect(SERVICED_SOCKET) return _client.call_sync('serviced.job.checkin') except RpcException as err: raise ServicedException(err.code, err.message, err.extra) finally:
class Context(object): def __init__(self): self.logger = logging.getLogger(self.__class__.__name__) self.msock = msock.client.Client() self.msock.on_closed = self.on_msock_close self.rpc_fd = -1 self.connection_id = None self.jobs = [] self.state = ConnectionState.OFFLINE self.config = None self.keepalive = None self.connected_at = None self.cv = Condition() self.rpc = RpcContext() self.client = Client() self.server = Server() self.middleware_endpoint = None def start(self, configpath, sockpath): signal.signal(signal.SIGUSR2, lambda signo, frame: self.connect()) self.read_config(configpath) self.server.rpc = RpcContext() self.server.rpc.register_service_instance('control', ControlService(self)) self.server.start(sockpath) threading.Thread(target=self.server.serve_forever, name='server thread', daemon=True).start() def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect_dispatcher() self.middleware_endpoint = Client() self.middleware_endpoint.on_error(on_error) self.connect_dispatcher() def connect_dispatcher(self): while True: try: self.middleware_endpoint.connect('unix:') self.middleware_endpoint.login_service('debugd') self.middleware_endpoint.enable_server() self.middleware_endpoint.register_service( 'debugd.management', ControlService(self)) self.middleware_endpoint.resume_service('debugd.management') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def read_config(self, path): try: with open(path) as f: self.config = json.load(f) except (IOError, OSError, ValueError) as err: self.logger.fatal('Cannot open config file: {0}'.format(str(err))) self.logger.fatal('Exiting.') sys.exit(1) def connect(self, discard=False): if discard: self.connection_id = None self.keepalive = threading.Thread(target=self.connect_keepalive, daemon=True) self.keepalive.start() def connect_keepalive(self): while True: try: if not self.connection_id: self.connection_id = uuid.uuid4() self.msock.connect(SUPPORT_PROXY_ADDRESS) self.logger.info( 'Connecting to {0}'.format(SUPPORT_PROXY_ADDRESS)) self.rpc_fd = self.msock.create_channel(0) time.sleep(1) # FIXME self.client = Client() self.client.connect('fd://', fobj=self.rpc_fd) self.client.channel_serializer = MSockChannelSerializer( self.msock) self.client.standalone_server = True self.client.enable_server() self.client.register_service('debug', DebugService(self)) self.client.call_sync('server.login', str(self.connection_id), socket.gethostname(), get_version(), 'none') self.set_state(ConnectionState.CONNECTED) except BaseException as err: self.logger.warning( 'Failed to initiate support connection: {0}'.format(err), exc_info=True) self.msock.disconnect() else: self.connected_at = datetime.now() with self.cv: self.cv.wait_for(lambda: self.state in ( ConnectionState.LOST, ConnectionState.OFFLINE)) if self.state == ConnectionState.OFFLINE: return self.logger.warning( 'Support connection lost, retrying in 10 seconds') time.sleep(10) def disconnect(self): self.connected_at = None self.set_state(ConnectionState.OFFLINE) self.client.disconnect() self.msock.destroy_channel(0) self.msock.disconnect() self.jobs.clear() def on_msock_close(self): self.connected_at = None self.set_state(ConnectionState.LOST) def run_job(self, job): self.jobs.append(job) job.context = self job.start() def set_state(self, state): with self.cv: self.state = state self.cv.notify_all()
class Main(object): def __init__(self): self.logger = logging.getLogger('etcd') self.root = None self.configfile = None self.config = None self.datastore = None self.configstore = None self.client = None self.plugin_dirs = [] self.renderers = {} self.managed_files = {} def init_datastore(self): try: self.datastore = datastore.get_datastore(self.configfile) except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('etcd') self.client.enable_server() self.client.register_service('etcd.generation', FileGenerationService(self)) self.client.register_service('etcd.management', ManagementService(self)) self.client.register_service('etcd.debug', DebugService()) self.client.resume_service('etcd.generation') self.client.resume_service('etcd.management') self.client.resume_service('etcd.debug') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def init_renderers(self): for name, impl in TEMPLATE_RENDERERS.items(): self.renderers[name] = impl(self) def parse_config(self, filename): try: f = open(filename, 'r') self.config = json.load(f) f.close() except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error( 'Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['etcd']['plugin-dirs'] def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for root, dirs, files in os.walk(dir): for name in files: abspath = os.path.join(root, name) path = os.path.relpath(abspath, dir) name, ext = os.path.splitext(path) if name in self.managed_files.keys(): continue if ext in TEMPLATE_RENDERERS.keys(): self.managed_files[name] = abspath self.logger.info('Adding managed file %s [%s]', name, ext) def generate_file(self, file_path): if file_path not in self.managed_files.keys(): raise RpcException(errno.ENOENT, 'No such file') template_path = self.managed_files[file_path] name, ext = os.path.splitext(template_path) if ext not in self.renderers.keys(): raise RuntimeError("Can't find renderer for {0}".format(file_path)) renderer = self.renderers[ext] try: return renderer.render_template(template_path) except Exception as e: self.logger.warn('Cannot generate file {0}: {1}'.format( file_path, str(e))) return "# FILE GENERATION FAILED: {0}\n".format(str(e)) def emit_event(self, name, params): self.client.emit_event(name, params) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-f', action='store_true', default=False, help='Run in foreground') parser.add_argument('mountpoint', metavar='MOUNTPOINT', default='/etc', help='/etc mount point') args = parser.parse_args() configure_logging('/var/log/etcd.log', 'DEBUG') setproctitle.setproctitle('etcd') self.root = args.mountpoint self.configfile = args.c self.parse_config(args.c) self.scan_plugins() self.init_renderers() self.init_datastore() self.init_dispatcher() self.client.wait_forever()
class Main(object): def __init__(self): self.config = None self.client = None self.datastore = None self.configstore = None self.rtsock_thread = None self.dhcp_clients = {} self.dhcp_lock = threading.RLock() self.logger = logging.getLogger('networkd') self.default_interface = None self.cv = Condition() def dhclient_pid(self, interface): path = os.path.join('/var/run', 'dhclient.{0}.pid'.format(interface)) if not os.path.exists(path): return None try: with open(path) as f: pid = int(f.read().strip()) return pid except (IOError, ValueError): return None def dhclient_running(self, interface): pid = self.dhclient_pid(interface) if not pid: return False try: os.kill(pid, 0) return True except OSError: return False def configure_dhcp(self, interface, block=False, timeout=None): if interface in self.dhcp_clients: self.logger.info('Interface {0} already configured by DHCP'.format(interface)) return True def bind(old_lease, lease): self.logger.info('{0} DHCP lease on {1} from {2}, valid for {3} seconds'.format( 'Renewed' if old_lease else 'Acquired', interface, client.server_address, lease.lifetime, interface )) if old_lease is None or lease.client_ip != old_lease.client_ip: self.logger.info('Assigning IP address {0} to interface {1}'.format(lease.client_ip, interface)) alias = lease.client_interface iface = netif.get_interface(interface) if old_lease: try: addr = first_or_default(lambda a: a.address == old_lease.client_ip, iface.addresses) if addr: iface.remove_address(addr) except OSError as err: self.logger.error('Cannot remove alias {0}: {1}'.format(old_lease.client_ip, err.strerror)) try: iface.add_address(netif.InterfaceAddress(netif.AddressFamily.INET, alias)) except OSError as err: self.logger.error('Cannot add alias to {0}: {1}'.format(interface, err.strerror)) if lease.router and self.configstore.get('network.dhcp.assign_gateway'): try: rtable = netif.RoutingTable() newroute = default_route(lease.router) if rtable.default_route_ipv4 != newroute: if rtable.default_route_ipv4: self.logger.info('DHCP default route changed from {0} to {1}'.format( rtable.default_route_ipv4, newroute )) rtable.delete(rtable.default_route_ipv4) rtable.add(default_route(lease.router)) else: self.logger.info('Adding default route via {0}'.format(lease.router)) rtable.add(default_route(lease.router)) except OSError as err: self.logger.error('Cannot configure default route: {0}'.format(err.strerror)) if lease.dns_addresses and self.configstore.get('network.dhcp.assign_dns'): inp = [] addrs = [] proc = subprocess.Popen( ['/sbin/resolvconf', '-a', interface], stdout=subprocess.PIPE, stdin=subprocess.PIPE ) for i in lease.dns_addresses: # Filter out bogus DNS server addresses if str(i) in ('127.0.0.1', '0.0.0.0', '255.255.255.255'): continue inp.append('nameserver {0}'.format(i)) addrs.append(i) if lease.domain_name: inp.append('search {0}'.format(lease.domain_name)) proc.communicate('\n'.join(inp).encode('ascii')) proc.wait() self.client.emit_event('network.dns.configured', { 'addresses': addrs, }) self.logger.info('Updated DNS configuration') else: subprocess.call(['/sbin/resolvconf', '-d', interface]) self.client.emit_event('network.dns.configured', { 'addresses': [], }) self.logger.info('Deleted DNS configuration') def reject(reason): self.logger.info('DHCP request rejected on {0}: {1}'.format(interface, reason)) self.deconfigure_dhcp(interface) if not block: t = threading.Timer(60, self.configure_dhcp, args=(interface,)) t.start() def unbind(lease, reason): reasons = { dhcp.client.UnbindReason.EXPIRE: 'expired', dhcp.client.UnbindReason.REVOKE: 'revoked' } self.logger.info('DHCP lease on {0}: {1}'.format(interface, reasons.get(reason, 'revoked'))) def state_change(state): self.client.emit_event('network.interface.changed', { 'operation': 'update', 'ids': [interface] }) self.client.emit_event('network.changed', { 'operation': 'update' }) with self.dhcp_lock: client = dhcp.client.Client(interface, lambda: socket.gethostname().split('.')[0]) client.on_bind = bind client.on_unbind = unbind client.on_reject = reject client.on_state_change = state_change client.start() self.dhcp_clients[interface] = client if block: ret = client.wait_for_bind(timeout) if ret is None: client.stop() del self.dhcp_clients[interface] return ret is not None return True def deconfigure_dhcp(self, interface): with self.dhcp_lock: client = self.dhcp_clients[interface] client.release() client.stop() del self.dhcp_clients[interface] def renew_dhcp(self, interface): if interface not in self.dhcp_clients: raise RpcException(errno.ENXIO, 'Interface {0} is not configured for DHCP'.format(interface)) if not self.dhcp_clients[interface].lease: raise RpcException(errno.ENOENT, 'Cannot renew without a lease') self.dhcp_clients[interface].request(renew=True, timeout=30) def interface_detached(self, name): self.logger.warn('Interface {0} detached from the system'.format(name)) def interface_attached(self, name): self.logger.warn('Interface {0} attached to the system'.format(name)) def using_dhcp_for_gateway(self): for i in self.datastore.query('network.interfaces'): if i.get('dhcp') and self.configstore.get('network.dhcp.assign_gateway'): return True return False def scan_interfaces(self): self.logger.info('Scanning available network interfaces...') existing = [] # Add newly plugged NICs to DB for i in list(netif.list_interfaces().values()): existing.append(i.name) # We want only physical NICs if i.cloned: continue if i.name in ('mgmt0', 'nat0'): continue if i.name.startswith(('tap', 'brg')): continue if not self.datastore.exists('network.interfaces', ('id', '=', i.name)): self.logger.info('Found new interface {0} ({1})'.format(i.name, i.type.name)) self.datastore.insert('network.interfaces', { 'enabled': False, 'id': i.name, 'name': None, 'cloned': False, 'type': i.type.name, 'dhcp': False, 'noipv6': False, 'rtadv': False, 'mtu': i.mtu, 'media': None, 'mediaopts': [], 'aliases': [], 'capabilities': { 'add': [], 'del': [] } }) # Remove unplugged NICs from DB for i in self.datastore.query('network.interfaces', ('id', 'nin', existing), ('cloned', '=', False)): self.datastore.delete('network.interfaces', i['id']) def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def connect(self, resume=False): while True: try: self.client.connect('unix:') self.client.login_service('networkd') self.client.enable_server() self.register_schemas() self.client.register_service('networkd.configuration', ConfigurationService(self)) self.client.register_service('networkd.debug', DebugService()) if resume: self.client.resume_service('networkd.configuration') self.client.resume_service('networkd.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect(resume=True) self.client = Client() self.client.on_error(on_error) self.connect() def init_routing_socket(self): self.rtsock_thread = RoutingSocketEventSource(self) self.rtsock_thread.start() def register_schemas(self): self.client.register_schema('network-aggregation-protocols', { 'type': 'string', 'enum': list(netif.AggregationProtocol.__members__.keys()) }) self.client.register_schema('network-lagg-port-flags', { 'type': 'array', 'items': {'$ref': 'network-lagg-port-flags-items'} }) self.client.register_schema('network-lagg-port-flags-items', { 'type': 'string', 'enum': list(netif.LaggPortFlags.__members__.keys()) }) self.client.register_schema('network-interface-flags', { 'type': 'array', 'items': {'$ref': 'network-interface-flags-items'} }) self.client.register_schema('network-interface-flags-items', { 'type': 'string', 'enum': list(netif.InterfaceFlags.__members__.keys()) }) self.client.register_schema('network-interface-capabilities', { 'type': 'array', 'items': {'$ref': 'network-interface-capabilities-items'} }) self.client.register_schema('network-interface-capabilities-items', { 'type': 'string', 'enum': list(netif.InterfaceCapability.__members__.keys()) }) self.client.register_schema('network-interface-mediaopts', { 'type': 'array', 'items': {'$ref': 'network-interface-mediaopts-items'} }) self.client.register_schema('network-interface-mediaopts-items', { 'type': 'string', 'enum': list(netif.InterfaceMediaOption.__members__.keys()) }) self.client.register_schema('network-interface-nd6-flag', { 'type': 'array', 'items': {'$ref': 'network-interface-nd6-flag-items'} }) self.client.register_schema('network-interface-nd6-flag-items', { 'type': 'string', 'enum': list(netif.NeighborDiscoveryFlags.__members__.keys()) }) self.client.register_schema('network-interface-type', { 'type': 'string', 'enum': [ 'LOOPBACK', 'ETHER', 'VLAN', 'BRIDGE', 'LAGG' ] }) self.client.register_schema('network-interface-dhcp-state', { 'type': 'string', 'enum': [ 'INIT', 'SELECTING', 'REQUESTING', 'INIT_REBOOT', 'REBOOTING', 'BOUND', 'RENEWING', 'REBINDING' ] }) self.client.register_schema('network-interface-status', { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'link_state': {'$ref': 'network-interface-status-linkstate'}, 'link_address': {'type': 'string'}, 'mtu': {'type': 'integer'}, 'media_type': {'type': 'string'}, 'media_subtype': {'type': 'string'}, 'active_media_type': {'type': 'string'}, 'active_media_subtype': {'type': 'string'}, 'media_options': {'$ref': 'network-interface-mediaopts'}, 'cloned': {'type': 'boolean'}, 'capabilities': {'$ref': 'network-interface-capabilities'}, 'flags': {'$ref': 'network-interface-flags'}, 'dhcp': { 'type': 'object', 'properties': { 'state': {'$ref': 'network-interface-dhcp-state'}, 'server_address': {'type': 'string'}, 'server_name': {'type': 'string'}, 'lease_starts_at': {'type': 'datetime'}, 'lease_ends_at': {'type': 'datetime'} } }, 'aliases': { 'type': 'array', 'items': {'$ref': 'network-interface-alias'} }, 'nd6_flags': { 'type': 'array', 'items': {'$ref': 'network-interface-nd6-flag'} }, 'ports': { 'oneOf': [ {'type': 'null'}, { 'type': 'array', 'members': { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'flags': {'$ref': 'network-lagg-port-flags'} } } } ] }, 'members': { 'oneOf': [ {'type': 'null'}, { 'type': 'array', 'members': {'type': 'string'} } ] }, 'parent': {'type': ['string', 'null']}, 'tag': {'type': ['integer', 'null']} } }) self.client.register_schema('network-interface-status-linkstate', { 'type': 'string', 'enum': list(netif.InterfaceLinkState.__members__.keys()) }) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() configure_logging('/var/log/networkd.log', 'DEBUG') setproctitle.setproctitle('networkd') self.config = args.c self.init_datastore() self.init_dispatcher() self.scan_interfaces() self.init_routing_socket() self.client.resume_service('networkd.configuration') self.client.resume_service('networkd.debug') self.logger.info('Started') self.client.wait_forever()
class Context(object): def __init__(self): self.service = TaskProxyService(self) self.task = queue.Queue(1) self.datastore = None self.configstore = None self.conn = None self.instance = None self.running = Event() def put_status(self, state, result=None, exception=None): obj = { 'status': state, 'result': None } if result is not None: obj['result'] = result if exception is not None: obj['error'] = serialize_error(exception) self.conn.call_sync('task.put_status', obj) def task_progress_handler(self, args): if self.instance: self.instance.task_progress_handler(args) def collect_fds(self, obj): if isinstance(obj, dict): for v in obj.values(): if isinstance(v, FileDescriptor): yield v else: yield from self.collect_fds(v) if isinstance(obj, (list, tuple)): for o in obj: if isinstance(o, FileDescriptor): yield o else: yield from self.collect_fds(o) def close_fds(self, fds): for i in fds: try: os.close(i.fd) except OSError: pass def main(self): if len(sys.argv) != 2: print("Invalid number of arguments", file=sys.stderr) sys.exit(errno.EINVAL) key = sys.argv[1] configure_logging(None, logging.DEBUG) self.datastore = get_datastore() self.configstore = ConfigStore(self.datastore) self.conn = Client() self.conn.connect('unix:') self.conn.login_service('task.{0}'.format(os.getpid())) self.conn.enable_server() self.conn.rpc.register_service_instance('taskproxy', self.service) self.conn.register_event_handler('task.progress', self.task_progress_handler) self.conn.call_sync('task.checkin', key) setproctitle.setproctitle('task executor (idle)') while True: try: task = self.task.get() logging.root.setLevel(self.conn.call_sync('management.get_logging_level')) setproctitle.setproctitle('task executor (tid {0})'.format(task['id'])) if task['debugger']: sys.path.append('/usr/local/lib/dispatcher/pydev') import pydevd host, port = task['debugger'] pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True) name, _ = os.path.splitext(os.path.basename(task['filename'])) module = load_module_from_file(name, task['filename']) setproctitle.setproctitle('task executor (tid {0})'.format(task['id'])) fds = list(self.collect_fds(task['args'])) try: self.instance = getattr(module, task['class'])(DispatcherWrapper(self.conn), self.datastore) self.instance.configstore = self.configstore self.instance.user = task['user'] self.instance.environment = task['environment'] self.running.set() result = self.instance.run(*task['args']) except BaseException as err: print("Task exception: {0}".format(str(err)), file=sys.stderr) traceback.print_exc(file=sys.stderr) if hasattr(self.instance, 'rollback'): self.put_status('ROLLBACK') try: self.instance.rollback(*task['args']) except BaseException as rerr: print("Task exception during rollback: {0}".format(str(rerr)), file=sys.stderr) traceback.print_exc(file=sys.stderr) self.put_status('FAILED', exception=err) else: self.put_status('FINISHED', result=result) finally: self.close_fds(fds) self.running.clear() except RpcException as err: print("RPC failed: {0}".format(str(err)), file=sys.stderr) print(traceback.format_exc(), flush=True) sys.exit(errno.EBADMSG) except socket.error as err: print("Cannot connect to dispatcher: {0}".format(str(err)), file=sys.stderr) sys.exit(errno.ETIMEDOUT) if task['debugger']: import pydevd pydevd.stoptrace() setproctitle.setproctitle('task executor (idle)')
class Context(object): def __init__(self): self.logger = logging.getLogger('schedulerd') self.config = None self.datastore = None self.configstore = None self.client = None self.scheduler = None self.active_tasks = {} def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_scheduler(self): store = MongoDBJobStore(database='freenas', collection='calendar_tasks', client=self.datastore.client) self.scheduler = BackgroundScheduler(jobstores={'default': store}, timezone=pytz.utc) self.scheduler.start() def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('schedulerd') self.client.enable_server() self.client.register_service('scheduler.management', ManagementService(self)) self.client.register_service('scheduler.debug', DebugService()) self.client.resume_service('scheduler.management') self.client.resume_service('scheduler.debug') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def run_job(self, *args, **kwargs): tid = self.client.call_sync( 'task.submit_with_env', args[0], args[1:], { 'RUN_AS_USER': '******', 'CALENDAR_TASK_NAME': kwargs.get('name') }) self.active_tasks[kwargs['id']] = tid self.client.call_sync('task.wait', tid, timeout=None) result = self.client.call_sync('task.status', tid) if result['state'] != 'FINISHED': try: self.client.call_sync( 'alert.emit', { 'name': 'scheduler.task.failed', 'severity': 'CRITICAL', 'description': 'Task {0} has failed: {1}'.format( kwargs.get('name', tid), result['error']['message']), }) except RpcException as e: self.logger.error('Failed to emit alert', exc_info=True) del self.active_tasks[kwargs['id']] self.datastore.insert('schedulerd.runs', { 'job_id': kwargs['id'], 'task_id': result['id'] }) def emit_event(self, name, params): self.client.emit_event(name, params) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-f', action='store_true', default=False, help='Run in foreground') args = parser.parse_args() configure_logging('/var/log/schedulerd.log', 'DEBUG') setproctitle.setproctitle('schedulerd') self.config = args.c self.init_datastore() self.init_scheduler() self.init_dispatcher() self.client.wait_forever()
def run(self, peer, initial_credentials): hostid = self.dispatcher.call_sync('system.info.host_uuid') hostname = self.dispatcher.call_sync( 'system.general.get_config')['hostname'] remote_peer_name = hostname credentials = peer['credentials'] remote = credentials.get('address') port = credentials.get('port', 22) username = initial_credentials.get('username') password = initial_credentials.get('password') auth_code = initial_credentials.get('auth_code') key_auth = initial_credentials.get('key_auth') local_ssh_config = self.dispatcher.call_sync('service.sshd.get_config') if self.datastore.exists('peers', ('credentials.address', '=', remote), ('type', '=', 'freenas')): raise TaskException( errno.EEXIST, 'FreeNAS peer entry for {0} already exists'.format(remote)) remote_client = Client() try: if auth_code: try: remote_client.connect('ws://{0}'.format( wrap_address(remote))) except (AuthenticationException, OSError, ConnectionRefusedError): raise TaskException( errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port)) try: remote_host_uuid, pubkey = remote_client.call_sync( 'peer.freenas.auth_with_code', auth_code, hostname, local_ssh_config['port']) except RpcException as err: raise TaskException(err.code, err.message) try: self.dispatcher.call_sync('peer.freenas.put_temp_pubkey', pubkey) if not self.dispatcher.test_or_wait_for_event( 'peer.changed', lambda ar: ar['operation'] == 'create' and remote_host_uuid in ar['ids'], lambda: self.datastore.exists( 'peers', ('id', '=', remote_host_uuid)), timeout=30): raise TaskException( errno.EAUTH, 'FreeNAS peer creation failed. Check connection to host {0}.' .format(remote)) finally: self.dispatcher.call_sync( 'peer.freenas.remove_temp_pubkey', pubkey) else: try: if key_auth: with io.StringIO() as f: f.write( self.configstore.get( 'peer.freenas.key.private')) f.seek(0) pkey = RSAKey.from_private_key(f) max_tries = 50 while True: try: remote_client.connect( 'ws+ssh://freenas@{0}'.format( wrap_address(remote)), pkey=pkey, port=port) break except AuthenticationException: if max_tries: max_tries -= 1 time.sleep(1) else: raise else: remote_client.connect('ws+ssh://{0}@{1}'.format( username, wrap_address(remote)), port=port, password=password) remote_client.login_service('replicator') except (AuthenticationException, OSError, ConnectionRefusedError): raise TaskException( errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port)) local_host_key, local_pub_key = self.dispatcher.call_sync( 'peer.freenas.get_ssh_keys') remote_host_key, remote_pub_key = remote_client.call_sync( 'peer.freenas.get_ssh_keys') ip_at_remote_side = remote_client.local_address[0] remote_hostname = remote_client.call_sync( 'system.general.get_config')['hostname'] remote_host_key = remote_host_key.rsplit(' ', 1)[0] local_host_key = local_host_key.rsplit(' ', 1)[0] if remote_client.call_sync('peer.query', [('id', '=', hostid)]): raise TaskException( errno.EEXIST, 'Peer entry of {0} already exists at {1}'.format( hostname, remote)) peer['credentials'] = { '%type': 'freenas-credentials', 'pubkey': remote_pub_key, 'hostkey': remote_host_key, 'port': port, 'address': remote_hostname } local_id = remote_client.call_sync('system.info.host_uuid') peer['id'] = local_id peer['name'] = remote_hostname ip = socket.gethostbyname(remote) created_ids = self.join_subtasks( self.run_subtask('peer.freenas.create_local', peer, ip, True)) peer['id'] = hostid peer['name'] = remote_peer_name peer['credentials'] = { '%type': 'freenas-credentials', 'pubkey': local_pub_key, 'hostkey': local_host_key, 'port': local_ssh_config['port'], 'address': hostname } try: call_task_and_check_state(remote_client, 'peer.freenas.create_local', peer, ip_at_remote_side) except TaskException: self.datastore.delete('peers', local_id) self.dispatcher.dispatch_event('peer.changed', { 'operation': 'delete', 'ids': [local_id] }) raise return created_ids[0] finally: remote_client.disconnect()
class Main(object): def __init__(self): self.logger = logging.getLogger('dscached') self.config = None self.datastore = None self.configstore = None self.rpc = RpcContext() self.rpc.streaming_enabled = True self.rpc.streaming_burst = 16 self.client = None self.server = None self.plugin_dirs = [] self.plugins = {} self.directories = [] self.users_cache = TTLCacheStore() self.groups_cache = TTLCacheStore() self.hosts_cache = TTLCacheStore() self.cache_ttl = 7200 self.search_order = [] self.cache_enumerations = True self.cache_lookups = True self.home_directory_root = None self.account_service = AccountService(self) self.group_service = GroupService(self) self.rpc.register_service_instance('dscached.account', self.account_service) self.rpc.register_service_instance('dscached.group', self.group_service) self.rpc.register_service_instance('dscached.host', HostService(self)) self.rpc.register_service_instance('dscached.idmap', IdmapService(self)) self.rpc.register_service_instance('dscached.management', ManagementService(self)) self.rpc.register_service_instance('dscached.debug', DebugService()) def get_active_directories(self): return list( filter(lambda d: d and d.state == DirectoryState.BOUND, self.directories)) def get_searched_directories(self): return list( filter(lambda d: d and d.state == DirectoryState.BOUND, (self.get_directory_by_name(n) for n in self.get_search_order()))) def get_search_order(self): return self.search_order def get_directory_by_domain(self, domain_name): return first_or_default(lambda d: d.domain_name == domain_name, self.directories) def get_directory_by_name(self, name): return first_or_default(lambda d: d.name == name, self.directories) def get_directory_for_id(self, uid=None, gid=None): if uid is not None: if uid == 0: # Special case for root user return first_or_default(lambda d: d.plugin_type == 'local', self.directories) return first_or_default( lambda d: d.max_uid and d.max_uid >= uid >= d.min_uid, self.directories) if gid is not None: if gid == 0: # Special case for wheel group return first_or_default(lambda d: d.plugin_type == 'local', self.directories) return first_or_default( lambda d: d.max_gid and d.max_gid >= gid >= d.min_gid, self.directories) def get_home_directory(self, directory, username): if not self.home_directory_root: return '/nonexistent' return os.path.join(self.home_directory_root, f'{username}@{directory.domain_name}') def wait_for_etcd(self): self.client.test_or_wait_for_event( 'plugin.service_resume', lambda args: args['name'] == 'etcd.generation', lambda: 'etcd.generation' in self.client.call_sync( 'discovery.get_services')) def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_server(self, address): self.server = Server(self) self.server.rpc = self.rpc self.server.streaming = True self.server.start(address, transport_options={'permissions': 0o777}) thread = Thread(target=self.server.serve_forever) thread.name = 'ServerThread' thread.daemon = True thread.start() def parse_config(self, filename): try: with open(filename, 'r') as f: self.config = json.load(f) except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error( 'Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['dscached']['plugin-dirs'] def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('dscached') self.client.enable_server(self.rpc) self.client.resume_service('dscached.account') self.client.resume_service('dscached.group') self.client.resume_service('dscached.host') self.client.resume_service('dscached.idmap') self.client.resume_service('dscached.management') self.client.resume_service('dscached.debug') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = load_module_from_file(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def register_plugin(self, name, cls): self.plugins[name] = cls self.logger.info('Registered plugin {0} (class {1})'.format(name, cls)) def register_schema(self, name, schema): self.client.register_schema(name, schema) def register_schemas(self): from freenas.dispatcher.model import context for name, schema in (s.__named_json_schema__() for s in context.local_json_schema_objects): self.logger.debug(f'Registering schema: {name}') self.client.register_schema(name, schema) def init_directories(self): for i in self.datastore.query('directories'): try: directory = Directory(self, i) self.directories.append(directory) directory.configure() except: continue def load_config(self): self.search_order = self.configstore.get('directory.search_order') self.cache_ttl = self.configstore.get('directory.cache_ttl') self.cache_enumerations = self.configstore.get( 'directory.cache_enumerations') self.cache_lookups = self.configstore.get('directory.cache_lookups') self.home_directory_root = self.configstore.get( 'system.home_directory_root') def checkin(self): checkin() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on') args = parser.parse_args() configure_logging('dscached', 'DEBUG') setproctitle('dscached') self.config = args.c self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.load_config() self.init_server(args.s) self.scan_plugins() self.register_schemas() self.wait_for_etcd() self.init_directories() self.checkin() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('etcd') self.root = None self.configfile = None self.config = None self.datastore = None self.configstore = None self.client = None self.plugin_dirs = [] self.renderers = {} self.managed_files = {} def init_datastore(self): try: self.datastore = datastore.get_datastore(self.configfile) except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('etcd') self.client.enable_server() self.client.register_service('etcd.generation', FileGenerationService(self)) self.client.register_service('etcd.management', ManagementService(self)) self.client.register_service('etcd.debug', DebugService()) self.client.resume_service('etcd.generation') self.client.resume_service('etcd.management') self.client.resume_service('etcd.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def init_renderers(self): for name, impl in TEMPLATE_RENDERERS.items(): self.renderers[name] = impl(self) def parse_config(self, filename): try: f = open(filename, 'r') self.config = json.load(f) f.close() except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error('Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['etcd']['plugin-dirs'] def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for root, dirs, files in os.walk(dir): for name in files: abspath = os.path.join(root, name) path = os.path.relpath(abspath, dir) name, ext = os.path.splitext(path) if name in self.managed_files.keys(): continue if ext in TEMPLATE_RENDERERS.keys(): self.managed_files[name] = abspath self.logger.info('Adding managed file %s [%s]', name, ext) def generate_file(self, file_path): if file_path not in self.managed_files.keys(): raise RpcException(errno.ENOENT, 'No such file') template_path = self.managed_files[file_path] name, ext = os.path.splitext(template_path) if ext not in self.renderers.keys(): raise RuntimeError("Can't find renderer for {0}".format(file_path)) renderer = self.renderers[ext] try: return renderer.render_template(template_path) except Exception as e: self.logger.warn('Cannot generate file {0}: {1}'.format(file_path, str(e))) return "# FILE GENERATION FAILED: {0}\n".format(str(e)) def emit_event(self, name, params): self.client.emit_event(name, params) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-f', action='store_true', default=False, help='Run in foreground') parser.add_argument('mountpoint', metavar='MOUNTPOINT', default='/etc', help='/etc mount point') args = parser.parse_args() configure_logging('/var/log/etcd.log', 'DEBUG') setproctitle.setproctitle('etcd') self.root = args.mountpoint self.configfile = args.c self.parse_config(args.c) self.scan_plugins() self.init_renderers() self.init_datastore() self.init_dispatcher() self.client.wait_forever()
def __init__(self, level=logging.NOTSET, address=None, ident=None): super(LogdLogHandler, self).__init__(level) self.address = address or 'unix:///var/run/logd.sock' self.ident = ident or os.path.basename(sys.executable) self.client = Client() self.client.connect(self.address)
class Context(object): def __init__(self): self.service = TaskProxyService(self) self.task = queue.Queue(1) self.datastore = None self.configstore = None self.conn = None self.instance = None self.module_cache = {} self.running = Event() def put_status(self, state, result=None, exception=None): obj = {'status': state, 'result': None} if result is not None: obj['result'] = result if exception is not None: obj['error'] = serialize_error(exception) self.conn.call_sync('task.put_status', obj) def task_progress_handler(self, args): if self.instance: self.instance.task_progress_handler(args) def collect_fds(self, obj): if isinstance(obj, dict): for v in obj.values(): if isinstance(v, FileDescriptor): yield v else: yield from self.collect_fds(v) if isinstance(obj, (list, tuple)): for o in obj: if isinstance(o, FileDescriptor): yield o else: yield from self.collect_fds(o) def close_fds(self, fds): for i in fds: try: os.close(i.fd) except OSError: pass def run_task_hooks(self, instance, task, type, **extra_env): for hook, props in task['hooks'].get(type, {}).items(): try: if props['condition'] and not props['condition']( *task['args']): continue except BaseException as err: print(err) continue instance.join_subtasks( instance.run_subtask(hook, *task['args'], **extra_env)) def main(self): if len(sys.argv) != 2: print("Invalid number of arguments", file=sys.stderr) sys.exit(errno.EINVAL) key = sys.argv[1] configure_logging(None, logging.DEBUG) self.datastore = get_datastore() self.configstore = ConfigStore(self.datastore) self.conn = Client() self.conn.connect('unix:') self.conn.login_service('task.{0}'.format(os.getpid())) self.conn.enable_server() self.conn.call_sync('management.enable_features', ['streaming_responses']) self.conn.rpc.register_service_instance('taskproxy', self.service) self.conn.register_event_handler('task.progress', self.task_progress_handler) self.conn.call_sync('task.checkin', key) setproctitle.setproctitle('task executor (idle)') while True: try: task = self.task.get() logging.root.setLevel( self.conn.call_sync('management.get_logging_level')) setproctitle.setproctitle('task executor (tid {0})'.format( task['id'])) if task['debugger']: sys.path.append('/usr/local/lib/dispatcher/pydev') import pydevd host, port = task['debugger'] pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True) name, _ = os.path.splitext(os.path.basename(task['filename'])) module = self.module_cache.get(task['filename']) if not module: module = load_module_from_file(name, task['filename']) self.module_cache[task['filename']] = module setproctitle.setproctitle('task executor (tid {0})'.format( task['id'])) fds = list(self.collect_fds(task['args'])) try: dispatcher = DispatcherWrapper(self.conn) self.instance = getattr(module, task['class'])(dispatcher, self.datastore) self.instance.configstore = self.configstore self.instance.user = task['user'] self.instance.environment = task['environment'] self.running.set() self.run_task_hooks(self.instance, task, 'before') result = self.instance.run(*task['args']) self.run_task_hooks(self.instance, task, 'after', result=result) except BaseException as err: print("Task exception: {0}".format(str(err)), file=sys.stderr) traceback.print_exc(file=sys.stderr) if hasattr(self.instance, 'rollback'): self.put_status('ROLLBACK') try: self.instance.rollback(*task['args']) except BaseException as rerr: print("Task exception during rollback: {0}".format( str(rerr)), file=sys.stderr) traceback.print_exc(file=sys.stderr) # Main task is already failed at this point, so ignore hook errors with contextlib.suppress(RpcException): self.run_task_hooks(self.instance, task, 'error', error=serialize_error(err)) self.put_status('FAILED', exception=err) else: self.put_status('FINISHED', result=result) finally: self.close_fds(fds) self.running.clear() except RpcException as err: print("RPC failed: {0}".format(str(err)), file=sys.stderr) print(traceback.format_exc(), flush=True) sys.exit(errno.EBADMSG) except socket.error as err: print("Cannot connect to dispatcher: {0}".format(str(err)), file=sys.stderr) sys.exit(errno.ETIMEDOUT) if task['debugger']: import pydevd pydevd.stoptrace() setproctitle.setproctitle('task executor (idle)')
def main(self): if len(sys.argv) != 2: print("Invalid number of arguments", file=sys.stderr) sys.exit(errno.EINVAL) key = sys.argv[1] configure_logging(None, logging.DEBUG) self.datastore = get_datastore() self.configstore = ConfigStore(self.datastore) self.conn = Client() self.conn.connect('unix:') self.conn.login_service('task.{0}'.format(os.getpid())) self.conn.enable_server() self.conn.rpc.register_service_instance('taskproxy', self.service) self.conn.register_event_handler('task.progress', self.task_progress_handler) self.conn.call_sync('task.checkin', key) setproctitle.setproctitle('task executor (idle)') while True: try: task = self.task.get() logging.root.setLevel(self.conn.call_sync('management.get_logging_level')) setproctitle.setproctitle('task executor (tid {0})'.format(task['id'])) if task['debugger']: sys.path.append('/usr/local/lib/dispatcher/pydev') import pydevd host, port = task['debugger'] pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True) name, _ = os.path.splitext(os.path.basename(task['filename'])) module = load_module_from_file(name, task['filename']) setproctitle.setproctitle('task executor (tid {0})'.format(task['id'])) fds = list(self.collect_fds(task['args'])) try: self.instance = getattr(module, task['class'])(DispatcherWrapper(self.conn), self.datastore) self.instance.configstore = self.configstore self.instance.user = task['user'] self.instance.environment = task['environment'] self.running.set() result = self.instance.run(*task['args']) except BaseException as err: print("Task exception: {0}".format(str(err)), file=sys.stderr) traceback.print_exc(file=sys.stderr) if hasattr(self.instance, 'rollback'): self.put_status('ROLLBACK') try: self.instance.rollback(*task['args']) except BaseException as rerr: print("Task exception during rollback: {0}".format(str(rerr)), file=sys.stderr) traceback.print_exc(file=sys.stderr) self.put_status('FAILED', exception=err) else: self.put_status('FINISHED', result=result) finally: self.close_fds(fds) self.running.clear() except RpcException as err: print("RPC failed: {0}".format(str(err)), file=sys.stderr) print(traceback.format_exc(), flush=True) sys.exit(errno.EBADMSG) except socket.error as err: print("Cannot connect to dispatcher: {0}".format(str(err)), file=sys.stderr) sys.exit(errno.ETIMEDOUT) if task['debugger']: import pydevd pydevd.stoptrace() setproctitle.setproctitle('task executor (idle)')
class Main(object): def __init__(self): self.client = None self.datastore = None self.configstore = None self.config = None self.mgmt = None self.vm_started = Event() self.containers = {} self.tokens = {} self.logger = logging.getLogger('containerd') self.bridge_interface = None self.used_nmdms = [] def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def allocate_nmdm(self): for i in range(0, 255): if i not in self.used_nmdms: self.used_nmdms.append(i) return i def release_nmdm(self, index): self.used_nmdms.remove(index) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('containerd') self.client.enable_server() self.client.register_service('containerd.management', ManagementService(self)) self.client.register_service('containerd.debug', DebugService(gevent=True, builtins={"context": self})) self.client.resume_service('containerd.management') self.client.resume_service('containerd.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.use_bursts = True self.client.on_error(on_error) self.connect() def init_mgmt(self): self.mgmt = ManagementNetwork(self, MGMT_INTERFACE, MGMT_ADDR) self.mgmt.up() self.mgmt.bridge_if.add_address(netif.InterfaceAddress( netif.AddressFamily.INET, ipaddress.ip_interface('169.254.169.254/32') )) def init_nat(self): default_if = self.client.call_sync('networkd.configuration.get_default_interface') if not default_if: self.logger.warning('No default route interface; not configuring NAT') return p = pf.PF() # Try to find and remove existing NAT rules for the same subnet oldrule = first_or_default( lambda r: r.src.address.address == MGMT_ADDR.network.network_address, p.get_rules('nat') ) if oldrule: p.delete_rule('nat', oldrule.index) rule = pf.Rule() rule.src.address.address = MGMT_ADDR.network.network_address rule.src.address.netmask = MGMT_ADDR.netmask rule.action = pf.RuleAction.NAT rule.af = socket.AF_INET rule.ifname = default_if rule.redirect_pool.append(pf.Address(ifname=default_if)) rule.proxy_ports = [50001, 65535] p.append_rule('nat', rule) try: p.enable() except OSError as err: if err.errno != errno.EEXIST: raise err def init_ec2(self): self.ec2 = EC2MetadataServer(self) self.ec2.start() def vm_by_mgmt_mac(self, mac): for i in self.containers.values(): for tapmac in i.tap_interfaces.values(): if tapmac == mac: return i return None def vm_by_mgmt_ip(self, ip): for i in self.mgmt.allocations.values(): if i.lease.client_ip == ip: return i.vm() def die(self): self.logger.warning('Exiting') for i in self.containers.values(): i.stop(True) self.client.disconnect() sys.exit(0) def generate_id(self): return ''.join([random.choice(string.ascii_letters + string.digits) for n in range(32)]) def dispatcher_error(self, error): self.die() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-p', type=int, metavar='PORT', default=5500, help="WebSockets server port") args = parser.parse_args() configure_logging('/var/log/containerd.log', 'DEBUG') setproctitle.setproctitle('containerd') gevent.signal(signal.SIGTERM, self.die) gevent.signal(signal.SIGQUIT, self.die) self.config = args.c self.init_datastore() self.init_dispatcher() self.init_mgmt() self.init_nat() self.init_ec2() self.logger.info('Started') # WebSockets server kwargs = {} s4 = WebSocketServer(('', args.p), ServerResource({ '/console': ConsoleConnection, }, context=self), **kwargs) s6 = WebSocketServer(('::', args.p), ServerResource({ '/console': ConsoleConnection, }, context=self), **kwargs) serv_threads = [gevent.spawn(s4.serve_forever), gevent.spawn(s6.serve_forever)] gevent.joinall(serv_threads)
class Main(object): def __init__(self): self.logger = logging.getLogger('neighbord') self.config = None self.datastore = None self.configstore = None self.client = None self.config = None self.logger = logging.getLogger() self.plugin_dirs = [] self.plugins = {} def parse_config(self, filename): try: with open(filename, 'r') as f: self.config = json.load(f) except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error( 'Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['neighbord']['plugin-dirs'] def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = load_module_from_file(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def register_plugin(self, name, cls): self.plugins[name] = cls(self) self.logger.info('Registered plugin {0} (class {1})'.format(name, cls)) def register_service(self, name, regtype, port, properties=None): for plugin in self.plugins.values(): plugin.register(regtype, name, port, properties) def register(self): try: hostname = socket.gethostname() general = self.client.call_sync('system.general.get_config') properties = { 'version': self.client.call_sync('system.info.version'), 'description': general['description'], 'tags': ','.join(general['tags']) } self.register_service(hostname, 'freenas', 80, properties) self.register_service(hostname, 'http', 80) self.register_service(hostname, 'ssh', 22) self.register_service(hostname, 'sftp-ssh', 22) except BaseException as err: self.logger.error('Failed to register services: {0}'.format( str(err))) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('neighbord') self.client.enable_server() self.client.register_service('neighbord.management', ManagementService(self)) self.client.register_service('neighbord.discovery', DiscoveryService(self)) self.client.register_service('neighbord.debug', DebugService()) self.client.resume_service('neighbord.management') self.client.resume_service('neighbord.discovery') self.client.resume_service('neighbord.debug') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() self.config = args.c configure_logging('/var/log/neighbord.log', 'DEBUG') setproctitle.setproctitle('neighbord') self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.scan_plugins() self.register() self.client.wait_forever()
class Context(object): def __init__(self): self.logger = logging.getLogger('schedulerd') self.config = None self.datastore = None self.configstore = None self.client = None self.scheduler = None self.active_tasks = {} def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_scheduler(self): store = MongoDBJobStore(database='freenas', collection='calendar_tasks', client=self.datastore.client) self.scheduler = BackgroundScheduler(jobstores={'default': store}, timezone=pytz.utc) self.scheduler.start() def register_schemas(self): self.client.register_schema('calendar-task', { 'type': 'object', 'additionalProperties': False, 'properties': { 'id': {'type': 'string'}, 'name': {'type': 'string'}, 'args': {'type': 'array'}, 'description': {'type': 'string'}, 'enabled': {'type': 'boolean'}, 'hidden': {'type': 'boolean'}, 'protected': {'type': 'boolean'}, 'status': {'$ref': 'calendar-task-status'}, 'schedule': { 'type': 'object', 'additionalProperties': False, 'properties': { 'coalesce': {'type': ['boolean', 'integer', 'null']}, 'year': {'type': ['string', 'integer', 'null']}, 'month': {'type': ['string', 'integer', 'null']}, 'day': {'type': ['string', 'integer', 'null']}, 'week': {'type': ['string', 'integer', 'null']}, 'day_of_week': {'type': ['string', 'integer', 'null']}, 'hour': {'type': ['string', 'integer', 'null']}, 'minute': {'type': ['string', 'integer', 'null']}, 'second': {'type': ['string', 'integer', 'null']}, 'timezone': {'type': ['string', 'null']} } } } }) self.client.register_schema('calendar-task-status', { 'type': 'object', 'properties': { 'next_run_time': {'type': 'string'}, 'last_run_status': {'type': 'string'}, 'current_run_status': {'type': ['string', 'null']}, 'current_run_progress': {'type': ['object', 'null']} } }) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('schedulerd') self.client.enable_server() self.client.register_service('scheduler.management', ManagementService(self)) self.client.register_service('scheduler.debug', DebugService()) self.client.resume_service('scheduler.management') self.client.resume_service('scheduler.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def run_job(self, *args, **kwargs): tid = self.client.submit_task(*args) self.active_tasks[kwargs['id']] = tid self.client.call_sync('task.wait', tid, timeout=None) result = self.client.call_sync('task.status', tid) if result['state'] != 'FINISHED': try: self.client.call_sync('alerts.emit', { 'name': 'scheduler.task.failed', 'severity': 'CRITICAL', 'description': 'Task {0} has failed: {1}'.format(kwargs['name'], result['error']['message']), }) except RpcException as e: self.logger.error('Failed to emit alert', exc_info=True) del self.active_tasks[kwargs['id']] self.datastore.insert('schedulerd.runs', { 'job_id': kwargs['id'], 'task_id': result['id'] }) def emit_event(self, name, params): self.client.emit_event(name, params) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-f', action='store_true', default=False, help='Run in foreground') args = parser.parse_args() configure_logging('/var/log/schedulerd.log', 'DEBUG') setproctitle.setproctitle('schedulerd') self.config = args.c self.init_datastore() self.init_scheduler() self.init_dispatcher() self.register_schemas() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('alertd') self.config = None self.datastore = None self.configstore = None self.client = None self.plugin_dirs = [] self.emitters = {} def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_reminder(self): t = threading.Thread(target=self.reminder_thread) t.daemon = True t.start() def parse_config(self, filename): try: f = open(filename, 'r') self.config = json.load(f) f.close() except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error('Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['alertd']['plugin-dirs'] def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('alertd') self.client.enable_server() self.client.register_service('alertd.management', ManagementService(self)) self.client.register_service('alertd.alert', AlertService(self)) self.client.register_service('alertd.debug', DebugService()) self.client.resume_service('alertd.management') self.client.resume_service('alertd.alert') self.client.resume_service('alertd.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = imp.load_source(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def emit_alert(self, alert): self.logger.debug('Emitting alert <id:{0}> (class {1})'.format(alert['id'], alert['class'])) for i in self.datastore.query('alert.filters'): for predicate in i.get('predicates', []): if predicate['operator'] not in operators_table: continue if not operators_table[predicate['operator']](alert[predicate['property']], predicate['value']): break else: try: emitter = self.emitters.get(i['emitter']) if not emitter: self.logger.warning('Invalid emitter {0} for alert filter {1}'.format(i['emitter'], i['id'])) continue self.logger.debug('Alert <id:{0}> matched filter {1}'.format(alert['id'], i['id'])) if alert['send_count'] > 0: emitter.emit_again(alert, i['parameters']) else: emitter.emit_first(alert, i['parameters']) except BaseException as err: # Failed to emit alert using alert emitter # XXX: generate another alert about that self.logger.error('Cannot emit alert <id:{0}> using {1}: {2}'.format( alert['id'], i['emitter'], str(err)) ) alert['send_count'] += 1 alert['last_emitted_at'] = datetime.utcnow() if alert['one_shot']: alert['active'] = False self.datastore.update('alerts', alert['id'], alert) def cancel_alert(self, alert): self.logger.debug('Cancelling alert <id:{0}> (class {1})'.format(alert['id'], alert['class'])) alert.update({ 'active': False, 'cancelled': datetime.utcnow() }) self.datastore.update('alerts', alert['id'], alert) def register_emitter(self, name, cls): self.emitters[name] = cls(self) self.logger.info('Registered emitter {0} (class {1})'.format(name, cls)) def reminder_thread(self): while True: time.sleep(REMINDER_SECONDS) for i in self.datastore.query('alerts'): if not i['active'] or i['dismissed']: continue last_emission = i.get('last_emitted_at') or i['created_at'] interval = REMINDER_SCHEDULE[i['severity']] if not interval: continue if last_emission + timedelta(seconds=interval) <= datetime.utcnow(): self.emit_alert(i) def checkin(self): checkin() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() configure_logging('/var/log/alertd.log', 'DEBUG') setproctitle('alertd') self.config = args.c self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.scan_plugins() self.init_reminder() self.checkin() self.client.wait_forever()
class RESTApi(object): def __init__(self): self.logger = logging.getLogger('restd') self._cruds = [] self._threads = [] self._rpcs = {} self._schemas = {} self._used_schemas = set() self._services = {} self._tasks = {} self.api = falcon.API(middleware=[ AuthMiddleware(), JSONTranslator(), ]) self.api.add_route('/', SwaggerResource(self)) gevent.signal(signal.SIGINT, self.die) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.dispatcher = Client() self.dispatcher.on_error(on_error) self.connect() def init_metadata(self): self._tasks = self.dispatcher.call_sync('discovery.get_tasks') self._schemas = self.dispatcher.call_sync('discovery.get_schema') for service in self.dispatcher.call_sync('discovery.get_services'): self._services[service] = self.dispatcher.call_sync('discovery.get_methods', service) for method in self._services[service]: self._rpcs['{0}.{1}'.format(service, method['name'])] = method def load_plugins(self): pluginsdir = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'plugins')) for i in glob.glob1(pluginsdir, "*.py"): try: loader = importlib.machinery.SourceFileLoader(i.split('.')[0], os.path.join(pluginsdir, i)) mod = loader.load_module() except: self.logger.error('Failed to load plugin %s', i, exc_info=True) raise mod._init(self) def connect(self): while True: try: self.dispatcher.connect('unix:') self.dispatcher.login_service('restd') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def __call__(self, environ, start_response): if 'HTTP_X_REAL_IP' in environ: environ['PATH_INFO'] = environ.get('PATH_INFO', '').replace('/api/v2.0', '', 1) return self.api.__call__(environ, start_response) def register_crud(self, klass): ins = klass(self, self.dispatcher) self._cruds.append(ins) def register_singleitem(self, klass): klass(self, self.dispatcher) def register_resource(self, klass): klass(self) def run(self): self.init_dispatcher() self.init_metadata() self.load_plugins() server4 = WSGIServer(('', 8889), self, handler_class=RESTWSGIHandler) self._threads = [gevent.spawn(server4.serve_forever)] checkin() gevent.joinall(self._threads) def die(self, *args): gevent.killall(self._threads) sys.exit(0)
def initialize(self, context): self.client = Client() self.client.connect('unix:///var/run/logd.sock')
def run(self, peer): if self.datastore.exists('peers', ('address', '=', peer['address']), ('type', '=', 'replication')): raise TaskException(errno.EEXIST, 'Replication peer entry for {0} already exists'.format(peer['address'])) if peer['credentials']['type'] != 'ssh': raise TaskException(errno.EINVAL, 'SSH credentials type is needed to perform replication peer pairing') remote = peer.get('address') credentials = peer['credentials'] username = credentials.get('username') port = credentials.get('port', 22) password = credentials.get('password') if not username: raise TaskException(errno.EINVAL, 'Username has to be specified') if not remote: raise TaskException(errno.EINVAL, 'Address of remote host has to be specified') if not password: raise TaskException(errno.EINVAL, 'Password has to be specified') remote_client = Client() try: try: remote_client.connect('ws+ssh://{0}@{1}'.format(username, remote), port=port, password=password) remote_client.login_service('replicator') except (AuthenticationException, OSError, ConnectionRefusedError): raise TaskException(errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port)) local_keys = self.dispatcher.call_sync('peer.get_ssh_keys') remote_keys = remote_client.call_sync('peer.get_ssh_keys') ip_at_remote_side = remote_client.call_sync('management.get_sender_address').split(',', 1)[0] remote_host_key = remote + ' ' + remote_keys[0].rsplit(' ', 1)[0] local_host_key = ip_at_remote_side + ' ' + local_keys[0].rsplit(' ', 1)[0] local_ssh_config = self.dispatcher.call_sync('service.sshd.get_config') if remote_client.call_sync('peer.query', [('name', '=', peer['name'])]): raise TaskException(errno.EEXIST, 'Peer entry {0} already exists at {1}'.format(peer['name'], remote)) peer['credentials'] = { 'pubkey': remote_keys[1], 'hostkey': remote_host_key, 'port': port, 'type': 'replication' } self.join_subtasks(self.run_subtask( 'peer.replication.create_local', peer )) peer['address'] = ip_at_remote_side peer['credentials'] = { 'pubkey': local_keys[1], 'hostkey': local_host_key, 'port': local_ssh_config['port'], 'type': 'replication' } id = self.datastore.query('peers', ('name', '=', peer['name']), select='id') try: call_task_and_check_state( remote_client, 'peer.replication.create_local', peer ) except TaskException: self.datastore.delete('peers', id) self.dispatcher.dispatch_event('peer.changed', { 'operation': 'delete', 'ids': [id] }) raise finally: remote_client.disconnect()
class Main(object): def __init__(self): self.logger = logging.getLogger('clid') self.config = None self.datastore = None self.configstore = None self.client = None self.config = None self.logger = logging.getLogger() self.plugin_dirs = [] self.ml = None self.context = None def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_cli(self): self.logger.info('Initializing CLI instance') self.context = Context() self.context.connection = self.client self.context.plugin_dirs = PLUGIN_DIRS self.context.discover_plugins() self.context.start_entity_subscribers() self.context.login_plugins() self.ml = MainLoop(self.context) self.logger.info('CLI instance ready') def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('clid') self.client.enable_server() self.client.call_sync('management.enable_features', ['streaming_responses']) self.client.register_service('clid.management', ManagementService(self)) self.client.register_service('clid.eval', EvalService(self)) self.client.register_service('clid.debug', DebugService()) self.client.resume_service('clid.management') self.client.resume_service('clid.eval') self.client.resume_service('clid.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() self.config = args.c configure_logging('/var/log/clid.log', 'DEBUG') setproctitle('clid') self.init_dispatcher() self.init_cli() self.client.wait_forever()
class Context(object): def __init__(self): self.server = None self.client = None self.jobs = {} self.provides = set() self.lock = RLock() self.kq = select.kqueue() self.devnull = os.open('/dev/null', os.O_RDWR) self.logger = logging.getLogger('Context') self.rpc = RpcContext() self.rpc.register_service_instance('serviced.management', ManagementService(self)) self.rpc.register_service_instance('serviced.job', JobService(self)) def init_dispatcher(self): if self.client and self.client.connected: return def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_server(self, address): self.server = Server(self) self.server.rpc = self.rpc self.server.streaming = True self.server.start(address, transport_options={'permissions': 0o777}) thread = Thread(target=self.server.serve_forever) thread.name = 'ServerThread' thread.daemon = True thread.start() def provide(self, targets): def doit(): self.logger.debug('Adding dependency targets: {0}'.format( ', '.join(targets))) with self.lock: self.provides |= targets for job in list(self.jobs.values()): if job.state == JobState.STOPPED and job.requires <= self.provides: job.start() if targets: Timer(2, doit).start() def job_by_pid(self, pid): job = first_or_default(lambda j: j.pid == pid, self.jobs.values()) return job def event_loop(self): while True: with contextlib.suppress(InterruptedError): for ev in self.kq.control(None, MAX_EVENTS): self.logger.log(TRACE, 'New event: {0}'.format(ev)) if ev.filter == select.KQ_FILTER_PROC: job = self.job_by_pid(ev.ident) if job: job.pid_event(ev) continue if ev.fflags & select.KQ_NOTE_CHILD: if ev.fflags & select.KQ_NOTE_EXIT: continue pjob = self.job_by_pid(ev.data) if not pjob: self.untrack_pid(ev.ident) continue # Stop tracking at session ID boundary try: if pjob.pgid != os.getpgid(ev.ident): self.untrack_pid(ev.ident) continue except ProcessLookupError: continue with self.lock: job = Job(self) job.load_anonymous(pjob, ev.ident) self.jobs[job.id] = job self.logger.info('Added job {0}'.format( job.label)) def track_pid(self, pid): ev = select.kevent( pid, select.KQ_FILTER_PROC, select.KQ_EV_ADD | select.KQ_EV_ENABLE, select.KQ_NOTE_EXIT | select.KQ_NOTE_EXEC | select.KQ_NOTE_FORK | select.KQ_NOTE_TRACK, 0, 0) self.kq.control([ev], 0) def untrack_pid(self, pid): ev = select.kevent(pid, select.KQ_FILTER_PROC, select.KQ_EV_DELETE, 0, 0, 0) with contextlib.suppress(FileNotFoundError): self.kq.control([ev], 0) def emit_event(self, name, args): self.server.broadcast_event(name, args) if self.client and self.client.connected: self.client.emit_event(name, args) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('serviced') self.client.enable_server(self.rpc) self.client.resume_service('serviced.job') self.client.resume_service('serviced.management') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def bootstrap(self): def doit(): with self.lock: job = Job(self) job.load({ 'Label': 'org.freenas.serviced.bootstrap', 'ProgramArguments': BOOTSTRAP_JOB, 'OneShot': True, 'RunAtLoad': True, }) self.jobs[job.id] = job Thread(target=doit).start() def shutdown(self): self.client.disconnect() self.server.close() sys.exit(0) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on') args = parser.parse_args() configure_logging('/var/log/serviced.log', 'DEBUG', file=True) bsd.setproctitle('serviced') self.logger.info('Started') self.init_server(args.s) self.bootstrap() self.event_loop()
class Main(object): def __init__(self): self.logger = logging.getLogger('dscached') self.config = None self.datastore = None self.configstore = None self.rpc = RpcContext() self.rpc.streaming_enabled = True self.rpc.streaming_burst = 16 self.client = None self.server = None self.plugin_dirs = [] self.plugins = {} self.directories = [] self.users_cache = TTLCacheStore() self.groups_cache = TTLCacheStore() self.hosts_cache = TTLCacheStore() self.cache_ttl = 7200 self.search_order = [] self.cache_enumerations = True self.cache_lookups = True self.rpc.register_service_instance('dscached.account', AccountService(self)) self.rpc.register_service_instance('dscached.group', GroupService(self)) self.rpc.register_service_instance('dscached.host', HostService(self)) self.rpc.register_service_instance('dscached.management', ManagementService(self)) self.rpc.register_service_instance('dscached.debug', DebugService()) def get_enabled_directories(self): return list(filter(None, (self.get_directory_by_name(n) for n in self.get_search_order()))) def get_search_order(self): return ['local', 'system'] + self.search_order def get_directory_by_domain(self, domain_name): return first_or_default(lambda d: d.domain_name == domain_name, self.directories) def get_directory_by_name(self, name): return first_or_default(lambda d: d.name == name, self.directories) def get_directory_for_id(self, uid=None, gid=None): if uid is not None: if uid == 0: # Special case for root user return first_or_default(lambda d: d.plugin_type == 'local', self.directories) return first_or_default( lambda d: d.max_uid and d.max_uid >= uid >= d.min_uid, self.directories ) if gid is not None: if gid == 0: # Special case for wheel group return first_or_default(lambda d: d.plugin_type == 'local', self.directories) return first_or_default( lambda d: d.max_gid and d.max_gid >= gid >= d.min_gid, self.directories ) def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_server(self, address): self.server = Server(self) self.server.rpc = self.rpc self.server.start(address) thread = Thread(target=self.server.serve_forever) thread.name = 'ServerThread' thread.daemon = True thread.start() def parse_config(self, filename): try: with open(filename, 'r') as f: self.config = json.load(f) except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error('Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['dscached']['plugin-dirs'] def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('dscached') self.client.enable_server(self.rpc) self.client.resume_service('dscached.account') self.client.resume_service('dscached.group') self.client.resume_service('dscached.host') self.client.resume_service('dscached.management') self.client.resume_service('dscached.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = imp.load_source(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def register_plugin(self, name, cls): self.plugins[name] = cls self.logger.info('Registered plugin {0} (class {1})'.format(name, cls)) def register_schema(self, name, schema): self.client.register_schema(name, schema) def init_directories(self): for i in self.datastore.query('directories'): try: directory = Directory(self, i) directory.configure() self.directories.append(directory) except BaseException as err: continue def load_config(self): self.search_order = self.configstore.get('directory.search_order') self.cache_ttl = self.configstore.get('directory.cache_ttl') self.cache_enumerations = self.configstore.get('directory.cache_enumerations') self.cache_lookups = self.configstore.get('directory.cache_lookups') def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on') args = parser.parse_args() configure_logging('/var/log/dscached.log', 'DEBUG') setproctitle.setproctitle('dscached') self.config = args.c self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.load_config() self.init_server(args.s) self.scan_plugins() self.init_directories() self.client.wait_forever()
def run(self, peer, initial_credentials): hostid = self.dispatcher.call_sync('system.info.host_uuid') hostname = self.dispatcher.call_sync('system.general.get_config')['hostname'] remote_peer_name = hostname credentials = peer['credentials'] remote = credentials.get('address') port = credentials.get('port', 22) username = initial_credentials.get('username') password = initial_credentials.get('password') auth_code = initial_credentials.get('auth_code') key_auth = initial_credentials.get('key_auth') local_ssh_config = self.dispatcher.call_sync('service.sshd.get_config') if self.datastore.exists('peers', ('credentials.address', '=', remote), ('type', '=', 'freenas')): raise TaskException( errno.EEXIST, 'FreeNAS peer entry for {0} already exists'.format(remote) ) remote_client = Client() try: if auth_code: try: remote_client.connect('ws://{0}'.format(wrap_address(remote))) except (AuthenticationException, OSError, ConnectionRefusedError): raise TaskException(errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port)) try: remote_host_uuid, pubkey = remote_client.call_sync( 'peer.freenas.auth_with_code', auth_code, hostname, local_ssh_config['port'] ) except RpcException as err: raise TaskException(err.code, err.message) try: self.dispatcher.call_sync('peer.freenas.put_temp_pubkey', pubkey) if not self.dispatcher.test_or_wait_for_event( 'peer.changed', lambda ar: ar['operation'] == 'create' and remote_host_uuid in ar['ids'], lambda: self.datastore.exists('peers', ('id', '=', remote_host_uuid)), timeout=30 ): raise TaskException( errno.EAUTH, 'FreeNAS peer creation failed. Check connection to host {0}.'.format(remote) ) finally: self.dispatcher.call_sync('peer.freenas.remove_temp_pubkey', pubkey) else: try: if key_auth: with io.StringIO() as f: f.write(self.configstore.get('peer.freenas.key.private')) f.seek(0) pkey = RSAKey.from_private_key(f) max_tries = 50 while True: try: remote_client.connect('ws+ssh://freenas@{0}'.format( wrap_address(remote)), pkey=pkey, port=port ) break except AuthenticationException: if max_tries: max_tries -= 1 time.sleep(1) else: raise else: remote_client.connect( 'ws+ssh://{0}@{1}'.format(username, wrap_address(remote)), port=port, password=password ) remote_client.login_service('replicator') except (AuthenticationException, OSError, ConnectionRefusedError): raise TaskException(errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port)) local_host_key, local_pub_key = self.dispatcher.call_sync('peer.freenas.get_ssh_keys') remote_host_key, remote_pub_key = remote_client.call_sync('peer.freenas.get_ssh_keys') ip_at_remote_side = remote_client.local_address[0] remote_hostname = remote_client.call_sync('system.general.get_config')['hostname'] remote_host_key = remote_host_key.rsplit(' ', 1)[0] local_host_key = local_host_key.rsplit(' ', 1)[0] if remote_client.call_sync('peer.query', [('id', '=', hostid)]): raise TaskException(errno.EEXIST, 'Peer entry of {0} already exists at {1}'.format(hostname, remote)) peer['credentials'] = { '%type': 'freenas-credentials', 'pubkey': remote_pub_key, 'hostkey': remote_host_key, 'port': port, 'address': remote_hostname } local_id = remote_client.call_sync('system.info.host_uuid') peer['id'] = local_id peer['name'] = remote_hostname ip = socket.gethostbyname(remote) created_id = self.run_subtask_sync( 'peer.freenas.create_local', peer, ip, True ) peer['id'] = hostid peer['name'] = remote_peer_name peer['credentials'] = { '%type': 'freenas-credentials', 'pubkey': local_pub_key, 'hostkey': local_host_key, 'port': local_ssh_config['port'], 'address': hostname } try: call_task_and_check_state( remote_client, 'peer.freenas.create_local', peer, ip_at_remote_side ) except TaskException: self.datastore.delete('peers', local_id) self.dispatcher.dispatch_event('peer.changed', { 'operation': 'delete', 'ids': [local_id] }) raise return created_id finally: remote_client.disconnect()
def main(self): if len(sys.argv) != 2: print("Invalid number of arguments", file=sys.stderr) sys.exit(errno.EINVAL) key = sys.argv[1] configure_logging(None, logging.DEBUG) self.datastore = get_datastore() self.configstore = ConfigStore(self.datastore) self.conn = Client() self.conn.connect('unix:') self.conn.login_service('task.{0}'.format(os.getpid())) self.conn.enable_server() self.conn.call_sync('management.enable_features', ['streaming_responses']) self.conn.rpc.register_service_instance('taskproxy', self.service) self.conn.register_event_handler('task.progress', self.task_progress_handler) self.conn.call_sync('task.checkin', key) setproctitle.setproctitle('task executor (idle)') while True: try: task = self.task.get() logging.root.setLevel( self.conn.call_sync('management.get_logging_level')) setproctitle.setproctitle('task executor (tid {0})'.format( task['id'])) if task['debugger']: sys.path.append('/usr/local/lib/dispatcher/pydev') import pydevd host, port = task['debugger'] pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True) name, _ = os.path.splitext(os.path.basename(task['filename'])) module = self.module_cache.get(task['filename']) if not module: module = load_module_from_file(name, task['filename']) self.module_cache[task['filename']] = module setproctitle.setproctitle('task executor (tid {0})'.format( task['id'])) fds = list(self.collect_fds(task['args'])) try: dispatcher = DispatcherWrapper(self.conn) self.instance = getattr(module, task['class'])(dispatcher, self.datastore) self.instance.configstore = self.configstore self.instance.user = task['user'] self.instance.environment = task['environment'] self.running.set() self.run_task_hooks(self.instance, task, 'before') result = self.instance.run(*task['args']) self.run_task_hooks(self.instance, task, 'after', result=result) except BaseException as err: print("Task exception: {0}".format(str(err)), file=sys.stderr) traceback.print_exc(file=sys.stderr) if hasattr(self.instance, 'rollback'): self.put_status('ROLLBACK') try: self.instance.rollback(*task['args']) except BaseException as rerr: print("Task exception during rollback: {0}".format( str(rerr)), file=sys.stderr) traceback.print_exc(file=sys.stderr) # Main task is already failed at this point, so ignore hook errors with contextlib.suppress(RpcException): self.run_task_hooks(self.instance, task, 'error', error=serialize_error(err)) self.put_status('FAILED', exception=err) else: self.put_status('FINISHED', result=result) finally: self.close_fds(fds) self.running.clear() except RpcException as err: print("RPC failed: {0}".format(str(err)), file=sys.stderr) print(traceback.format_exc(), flush=True) sys.exit(errno.EBADMSG) except socket.error as err: print("Cannot connect to dispatcher: {0}".format(str(err)), file=sys.stderr) sys.exit(errno.ETIMEDOUT) if task['debugger']: import pydevd pydevd.stoptrace() setproctitle.setproctitle('task executor (idle)')
class Main(object): def __init__(self): self.client = None self.server = None self.datastore = None self.hdf = None self.hdf_group = None self.config = None self.logger = logging.getLogger('statd') self.data_sources = {} def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) def init_database(self): # adding this try/except till system-dataset plugin is added back in in full fidelity # just a hack (since that directory's data will not persist) # Please remove this when system-dataset plugin is added back in try: directory = self.client.call_sync( 'system_dataset.request_directory', 'statd') except RpcException: directory = '/var/tmp/statd' if not os.path.exists(directory): os.makedirs(directory) self.hdf = tables.open_file(os.path.join(directory, DEFAULT_DBFILE), mode='a') if not hasattr(self.hdf.root, 'stats'): self.hdf.create_group('/', 'stats') self.hdf_group = self.hdf.root.stats def request_table(self, name): try: if hasattr(self.hdf_group, name): return getattr(self.hdf_group, name) return self.hdf.create_table(self.hdf_group, name, DataPoint, name) except Exception as e: self.logger.error(str(e)) def init_alert_config(self, name): config_name = name if self.datastore.exists('statd.alerts', ('id', '=', name)) else 'default' alert_config = self.datastore.get_by_id('statd.alerts', config_name) return alert_config def get_data_source(self, name): if name not in list(self.data_sources.keys()): config = DataSourceConfig(self.datastore, name) alert_config = self.init_alert_config(name) ds = DataSource(self, name, config, alert_config) self.data_sources[name] = ds self.client.call_sync('plugin.register_event_type', 'statd.output', 'statd.{0}.pulse'.format(name)) return self.data_sources[name] def register_schemas(self): self.client.register_schema( 'GetStatsParams', { 'type': 'object', 'additionalProperties': False, 'properties': { 'start': { 'type': 'datetime' }, 'end': { 'type': 'datetime' }, 'timespan': { 'type': 'integer' }, 'frequency': { 'type': 'string' } } }) self.client.register_schema( 'GetStatsResult', { 'type': 'object', 'additionalProperties': False, 'properties': { 'data': { 'type': 'array', } } }) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('statd') self.client.enable_server() self.register_schemas() self.client.register_service('statd.output', OutputService(self)) self.client.register_service('statd.alert', AlertService(self)) self.client.register_service('statd.debug', DebugService(gevent=True)) self.client.resume_service('statd.output') self.client.resume_service('statd.alert') self.client.resume_service('statd.debug') for i in list(self.data_sources.keys()): self.client.call_sync('plugin.register_event_type', 'statd.output', 'statd.{0}.pulse'.format(i)) return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def die(self): self.logger.warning('Exiting') self.server.stop() self.client.disconnect() sys.exit(0) def dispatcher_error(self, error): self.die() def checkin(self): checkin() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() configure_logging('/var/log/fnstatd.log', 'DEBUG') setproctitle('fnstatd') # Signal handlers gevent.signal(signal.SIGQUIT, self.die) gevent.signal(signal.SIGTERM, self.die) gevent.signal(signal.SIGINT, self.die) self.server = InputServer(self) self.config = args.c self.init_datastore() self.init_dispatcher() self.init_database() self.server.start() self.logger.info('Started') self.checkin() self.client.wait_forever()