def extend_cluster(master_ip, new_ip, cluster_name, exclude_ports): """ Extends a cluster to a given new node """ logger.debug('Extending cluster {0} from {1} to {2}'.format( cluster_name, master_ip, new_ip)) client = SSHClient(master_ip) config = ArakoonClusterConfig(cluster_name) config.load_config(client) client = SSHClient(new_ip) base_dir = client.config_read('ovs.arakoon.location').rstrip('/') port_range = client.config_read('ovs.ports.arakoon') ports = System.get_free_ports(port_range, exclude_ports, 2, client) node_name = System.get_my_machine_id(client) if not [node.name for node in config.nodes if node.name == node_name]: config.nodes.append( ArakoonNodeConfig( name=node_name, ip=new_ip, client_port=ports[0], messaging_port=ports[1], log_dir=ArakoonInstaller.ARAKOON_LOG_DIR.format( cluster_name), home=ArakoonInstaller.ARAKOON_HOME_DIR.format( base_dir, cluster_name), tlog_dir=ArakoonInstaller.ARAKOON_TLOG_DIR.format( base_dir, cluster_name))) ArakoonInstaller._deploy(config) logger.debug('Extending cluster {0} from {1} to {2} completed'.format( cluster_name, master_ip, new_ip)) return {'client_port': ports[0], 'messaging_port': ports[1]}
def create_cluster(cluster_name, ip, exclude_ports, plugins=None): """ Creates a cluster """ logger.debug('Creating cluster {0} on {1}'.format(cluster_name, ip)) client = SSHClient(ip) base_dir = client.config_read('ovs.arakoon.location').rstrip('/') port_range = client.config_read('ovs.ports.arakoon') ports = System.get_free_ports(port_range, exclude_ports, 2, client) node_name = System.get_my_machine_id(client) config = ArakoonClusterConfig(cluster_name, plugins) if not [node.name for node in config.nodes if node.name == node_name]: config.nodes.append( ArakoonNodeConfig( name=node_name, ip=ip, client_port=ports[0], messaging_port=ports[1], log_dir=ArakoonInstaller.ARAKOON_LOG_DIR.format( cluster_name), home=ArakoonInstaller.ARAKOON_HOME_DIR.format( base_dir, cluster_name), tlog_dir=ArakoonInstaller.ARAKOON_TLOG_DIR.format( base_dir, cluster_name))) ArakoonInstaller._deploy(config) logger.debug('Creating cluster {0} on {1} completed'.format( cluster_name, ip)) return {'client_port': ports[0], 'messaging_port': ports[1]}
def extend_cluster(master_ip, new_ip, cluster_name, exclude_ports): """ Extends a cluster to a given new node """ logger.debug('Extending cluster {0} from {1} to {2}'.format(cluster_name, master_ip, new_ip)) client = SSHClient(master_ip) config = ArakoonClusterConfig(cluster_name) config.load_config(client) client = SSHClient(new_ip) base_dir = client.config_read('ovs.arakoon.location').rstrip('/') port_range = client.config_read('ovs.ports.arakoon') ports = System.get_free_ports(port_range, exclude_ports, 2, client) node_name = System.get_my_machine_id(client) if not [node.name for node in config.nodes if node.name == node_name]: config.nodes.append(ArakoonNodeConfig(name=node_name, ip=new_ip, client_port=ports[0], messaging_port=ports[1], log_dir=ArakoonInstaller.ARAKOON_LOG_DIR.format(cluster_name), home=ArakoonInstaller.ARAKOON_HOME_DIR.format(base_dir, cluster_name), tlog_dir=ArakoonInstaller.ARAKOON_TLOG_DIR.format(base_dir, cluster_name))) ArakoonInstaller._deploy(config) logger.debug('Extending cluster {0} from {1} to {2} completed'.format(cluster_name, master_ip, new_ip)) return {'client_port': ports[0], 'messaging_port': ports[1]}
def create_cluster(cluster_name, ip, exclude_ports, base_dir, plugins=None): """ Creates a cluster """ logger.debug("Creating cluster {0} on {1}".format(cluster_name, ip)) client = SSHClient(ip) base_dir = base_dir.rstrip("/") port_range = client.config_read("ovs.ports.arakoon") ports = System.get_free_ports(port_range, exclude_ports, 2, client) node_name = System.get_my_machine_id(client) config = ArakoonClusterConfig(cluster_name, plugins) if not [node.name for node in config.nodes if node.name == node_name]: config.nodes.append( ArakoonNodeConfig( name=node_name, ip=ip, client_port=ports[0], messaging_port=ports[1], log_dir=ArakoonInstaller.ARAKOON_LOG_DIR.format(cluster_name), home=ArakoonInstaller.ARAKOON_HOME_DIR.format(base_dir, cluster_name), tlog_dir=ArakoonInstaller.ARAKOON_TLOG_DIR.format(base_dir, cluster_name), ) ) ArakoonInstaller._deploy(config) logger.debug("Creating cluster {0} on {1} completed".format(cluster_name, ip)) return {"client_port": ports[0], "messaging_port": ports[1]}
def up_and_running(mountpoint, storagedriver_id): """ Volumedriver informs us that the service is completely started. Post-start events can be executed """ storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id) if storagedriver is None: raise RuntimeError('A Storage Driver with id {0} could not be found.'.format(storagedriver_id)) storagedriver.startup_counter += 1 storagedriver.save() if storagedriver.storagerouter.pmachine.hvtype == 'VMWARE': client = SSHClient(storagedriver.storagerouter) if client.config_read('ovs.storagedriver.vmware_mode') == 'classic': nfs = Nfsexports() nfs.unexport(mountpoint) nfs.export(mountpoint) nfs.trigger_rpc_mountd()
def prepare_mds_service(storagerouter, vpool, fresh_only, reload_config): """ Prepares an MDS service: * Creates the required configuration * Sets up the service files Assumes the StorageRouter and VPool are already configured with a StorageDriver and that all model-wise configuration regarding both is completed. :param storagerouter: Storagerouter on which MDS service will be created :param vpool: The vPool for which the MDS service will be created :param fresh_only: If True and no current mds services exist for this vpool on this storagerouter, a new 1 will be created :param reload_config: If True, the volumedriver's updated configuration will be reloaded """ # Fetch service sequence number based on MDS services for current vPool and current storage router service_number = -1 for mds_service in vpool.mds_services: if mds_service.service.storagerouter_guid == storagerouter.guid: service_number = max(mds_service.number, service_number) if fresh_only is True and service_number >= 0: return # There is already 1 or more MDS services running, aborting # VALIDATIONS # 1. Find free port based on MDS services for all vPools on current storage router client = SSHClient(storagerouter) mdsservice_type = ServiceTypeList.get_by_name('MetadataServer') occupied_ports = [] for service in mdsservice_type.services: if service.storagerouter_guid == storagerouter.guid: occupied_ports.extend(service.ports) mds_port_range = client.config_read('ovs.ports.mds') free_ports = System.get_free_ports(selected_range=mds_port_range, exclude=occupied_ports, nr=1, client=client) if not free_ports: raise RuntimeError('Failed to find an available port on storage router {0} within range {1}'.format(storagerouter.name, mds_port_range)) # 2. Partition check db_partition = None for disk in storagerouter.disks: for partition in disk.partitions: if DiskPartition.ROLES.DB in partition.roles: db_partition = partition break if db_partition is None: raise RuntimeError('Could not find DB partition on storage router {0}'.format(storagerouter.name)) # 3. Verify storage driver configured storagedrivers = [sd for sd in vpool.storagedrivers if sd.storagerouter_guid == storagerouter.guid] if not storagedrivers: raise RuntimeError('Expected to find a configured storagedriver for vpool {0} on storage router {1}'.format(vpool.name, storagerouter.name)) # MODEL UPDATES # 1. Service service_number += 1 service = Service() service.name = 'metadataserver_{0}_{1}'.format(vpool.name, service_number) service.type = mdsservice_type service.ports = [free_ports[0]] service.storagerouter = storagerouter service.save() mds_service = MDSService() mds_service.vpool = vpool mds_service.number = service_number mds_service.service = service mds_service.save() # 2. Storage driver partitions from ovs.lib.storagedriver import StorageDriverController sdp = StorageDriverController.add_storagedriverpartition(storagedrivers[0], {'size': None, 'role': DiskPartition.ROLES.DB, 'sub_role': StorageDriverPartition.SUBROLE.MDS, 'partition': db_partition, 'mds_service': mds_service}) # CONFIGURATIONS # 1. Volumedriver mds_nodes = [] for service in mdsservice_type.services: if service.storagerouter_guid == storagerouter.guid: mds_service = service.mds_service if mds_service.vpool_guid == vpool.guid: mds_nodes.append({'host': service.storagerouter.ip, 'port': service.ports[0], 'db_directory': sdp.path, 'scratch_directory': sdp.path}) # Generate the correct section in the Storage Driver's configuration storagedriver_config = StorageDriverConfiguration('storagedriver', vpool.name) storagedriver_config.load(client) storagedriver_config.clean() # Clean out obsolete values storagedriver_config.configure_metadata_server(mds_nodes=mds_nodes) storagedriver_config.save(client, reload_config=reload_config) return mds_service
def get(self, request, *args, **kwargs): """ Fetches metadata """ _ = args, kwargs data = {'authenticated': False, 'authentication_state': None, 'authentication_metadata': {}, 'username': None, 'userguid': None, 'roles': [], 'identification': {}, 'storagerouter_ips': [sr.ip for sr in StorageRouterList.get_storagerouters()], 'versions': list(settings.VERSION), 'plugins': {}, 'registration': {'registered': False, 'remaining': None}} try: # Gather plugin metadata plugins = {} # - Backends. BackendType plugins must set the has_plugin flag on True for backend_type in BackendTypeList.get_backend_types(): if backend_type.has_plugin is True: if backend_type.code not in plugins: plugins[backend_type.code] = [] plugins[backend_type.code] += ['backend', 'gui'] # - Generic plugins, as added to the configuration file(s) generic_plugins = Configuration.get('ovs.plugins.generic') for plugin_name in generic_plugins: if plugin_name not in plugins: plugins[plugin_name] = [] plugins[plugin_name] += ['gui'] data['plugins'] = plugins # Fill identification data['identification'] = {'cluster_id': Configuration.get('ovs.support.cid')} # Registration data registered = Configuration.get('ovs.core.registered') data['registration']['registered'] = registered if registered is False: cluster_install_time = None for storagerouter in StorageRouterList.get_storagerouters(): client = SSHClient(storagerouter) install_time = client.config_read('ovs.core.install_time') if cluster_install_time is None or (install_time is not None and install_time < cluster_install_time): cluster_install_time = install_time if cluster_install_time is not None: timeout_days = 30 * 24 * 60 * 60 data['registration']['remaining'] = (timeout_days - time.time() + cluster_install_time) / 24 / 60 / 60 # Get authentication metadata authentication_metadata = {'ip': System.get_my_storagerouter().ip} for key in ['mode', 'authorize_uri', 'client_id', 'scope']: if Configuration.exists('ovs.webapps.oauth2.{0}'.format(key)): authentication_metadata[key] = Configuration.get('ovs.webapps.oauth2.{0}'.format(key)) data['authentication_metadata'] = authentication_metadata # Gather authorization metadata if 'HTTP_AUTHORIZATION' not in request.META: return HttpResponse, dict(data.items() + {'authentication_state': 'unauthenticated'}.items()) authorization_type, access_token = request.META['HTTP_AUTHORIZATION'].split(' ') if authorization_type != 'Bearer': return HttpResponse, dict(data.items() + {'authentication_state': 'invalid_authorization_type'}.items()) tokens = BearerTokenList.get_by_access_token(access_token) if len(tokens) != 1: return HttpResponse, dict(data.items() + {'authentication_state': 'invalid_token'}.items()) token = tokens[0] if token.expiration < time.time(): for junction in token.roles.itersafe(): junction.delete() token.delete() return HttpResponse, dict(data.items() + {'authentication_state': 'token_expired'}.items()) # Gather user metadata user = token.client.user if not user.is_active: return HttpResponse, dict(data.items() + {'authentication_state': 'inactive_user'}.items()) roles = [j.role.code for j in token.roles] return HttpResponse, dict(data.items() + {'authenticated': True, 'authentication_state': 'authenticated', 'username': user.username, 'userguid': user.guid, 'roles': roles, 'plugins': plugins}.items()) except Exception as ex: logger.exception('Unexpected exception: {0}'.format(ex)) return HttpResponse, dict(data.items() + {'authentication_state': 'unexpected_exception'}.items())