def config_changed(): utils.juju_log('INFO', 'Begin config-changed hook.') utils.juju_log('INFO', 'Monitor hosts are ' + repr(get_mon_hosts())) fsid = utils.config_get('fsid') if fsid == '': utils.juju_log('CRITICAL', 'No fsid supplied, cannot proceed.') sys.exit(1) monitor_secret = utils.config_get('monitor-secret') if monitor_secret == '': utils.juju_log('CRITICAL', 'No monitor-secret supplied, cannot proceed.') sys.exit(1) emit_cephconf() for dev in utils.config_get('osd-devices').split(' '): osdize(dev) # Support use of single node ceph if (not ceph.is_bootstrapped() and int(utils.config_get('monitor-count')) == 1): bootstrap_monitor_cluster() ceph.wait_for_bootstrap() if ceph.is_bootstrapped(): ceph.rescan_osd_devices() utils.juju_log('INFO', 'End config-changed hook.')
def deploy_run(cloud, data_s3, products_list, vm_service_offers, canned_offer_name, timeout): """ :param cloud: :param data_s3: :param products_list: :param vm_service_offers: :param canned_offer_name: :param timeout: in seconds :return: """ server_ip = config_get('dmm_ip') server_hostname = config_get('dmm_hostname') mapper_so = vm_service_offers['mapper'] reducer_so = vm_service_offers['reducer'] if mapper_so and reducer_so: mapper_params = { 'service-offer': mapper_so, 'product-list': ' '.join(products_list), 's3-host': data_s3['s3host'], 's3-bucket': data_s3['s3bucket'], 'server_hn': server_hostname, 'server_ip': server_ip } reducer_params = { 'service-offer': reducer_so, 'server_hn': server_hostname, 'server_ip': server_ip } if result_s3_creds: reducer_params['s3-host'] = result_s3_creds.get('host', '') reducer_params['s3-bucket'] = result_s3_creds.get('bucket', '') reducer_params['s3-access-key'] = result_s3_creds.get('key', '') reducer_params['s3-secret-key'] = result_s3_creds.get('secret', '') comps_params = {'mapper': mapper_params, 'reducer': reducer_params} comps_counts = {'mapper': len(products_list), 'reducer': 1} proc_module = config_get('ss_module_proc_sar') comps_clouds = {'mapper': cloud, 'reducer': cloud} logger.info( 'Deploying: on "%s" with params "%s" and multiplicity "%s".' % (comps_clouds, comps_params, comps_counts)) deployment_id = ss_api.deploy(proc_module, cloud=comps_clouds, parameters=comps_params, multiplicity=comps_counts, tags='EOproc', keep_running='never') daemon_watcher = Thread(target=wait_product, args=(deployment_id, cloud, canned_offer_name, timeout)) daemon_watcher.setDaemon(True) daemon_watcher.start() return '%s/run/%s' % (ss_api.endpoint, deployment_id) else: msg = "No suitable instance types found for mapper and reducer on cloud %s" % cloud logger.warn(msg) return msg
def emit_cephconf(): cephcontext = { 'auth_supported': utils.config_get('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': utils.config_get('fsid'), } with open('/etc/ceph/ceph.conf', 'w') as cephconf: cephconf.write(utils.render_template('ceph.conf', cephcontext))
def notify_osds(): utils.juju_log('INFO', 'Begin notify_osds.') for relid in utils.relation_ids('osd'): utils.relation_set(fsid=utils.config_get('fsid'), osd_bootstrap_key=ceph.get_osd_bootstrap_key(), auth=utils.config_get('auth-supported'), rid=relid) utils.juju_log('INFO', 'End notify_osds.')
def osd_relation(): utils.juju_log('INFO', 'Begin osd-relation hook.') if ceph.is_quorum(): utils.juju_log('INFO', 'mon cluster in quorum - providing fsid & keys') utils.relation_set(fsid=utils.config_get('fsid'), osd_bootstrap_key=ceph.get_osd_bootstrap_key(), auth=utils.config_get('auth-supported')) else: utils.juju_log('INFO', 'mon cluster not in quorum - deferring fsid provision') utils.juju_log('INFO', 'End osd-relation hook.')
def identity_joined(relid=None): if ceph.get_ceph_version('radosgw') < "0.55": utils.juju_log('ERROR', 'Integration with keystone requires ceph >= 0.55') sys.exit(1) hostname = utils.unit_get('private-address') admin_url = 'http://{}:80/swift'.format(hostname) internal_url = public_url = '{}/v1'.format(admin_url) utils.relation_set(service='swift', region=utils.config_get('region'), public_url=public_url, internal_url=internal_url, admin_url=admin_url, requested_roles=utils.config_get('operator-roles'), rid=relid)
def get_cert(): cert = config_get('ssl_cert') key = config_get('ssl_key') if not (cert and key): juju_log('INFO', "Inspecting identity-service relations for SSL certificate.") cert = key = None for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): if not cert: cert = relation_get('ssl_cert', rid=r_id, unit=unit) if not key: key = relation_get('ssl_key', rid=r_id, unit=unit) return (cert, key)
def __init__(self) -> None: self.root = Tk() self.root.geometry("300x370+0+0") # self.root.resizable(True, True) self.root.eval("tk::PlaceWindow . center") self.root.title("Чеботарёв Степан ПКС-405") self.root.iconphoto(False, PhotoImage(file=config_get("icon_path"))) self.Frame = None self.database_manager = DatabaseManager() # ? Fonts self.font_color = "#37474F" self.font_color_complementary = "#78909C" self.font_color_accent = "#FF9C1A" self.font_captcha = ("Curlz MT", 24) self.font_20 = ( "Arial", 20, ) self.font_24 = ( "Arial", 24, ) self.font_28 = ( "Arial", 28, )
def bootstrap_monitor_cluster(): hostname = utils.get_unit_hostname() done = '/var/lib/ceph/mon/ceph-{}/done'.format(hostname) secret = utils.config_get('monitor-secret') keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) if os.path.exists(done): utils.juju_log('INFO', 'bootstrap_monitor_cluster: mon already initialized.') else: try: subprocess.check_call(['ceph-authtool', keyring, '--create-keyring', '--name=mon.', '--add-key={}'.format(secret), '--cap', 'mon', 'allow *']) subprocess.check_call(['ceph-mon', '--mkfs', '-i', hostname, '--keyring', keyring]) with open(done, 'w'): pass subprocess.check_call(['start', 'ceph-mon-all-starter']) except: raise finally: os.unlink(keyring)
def get_keystone_conf(): for relid in utils.relation_ids('identity-service'): for unit in utils.relation_list(relid): ks_auth = { 'auth_type': 'keystone', 'auth_protocol': 'http', 'auth_host': utils.relation_get('auth_host', unit, relid), 'auth_port': utils.relation_get('auth_port', unit, relid), 'admin_token': utils.relation_get('admin_token', unit, relid), 'user_roles': utils.config_get('operator-roles'), 'cache_size': utils.config_get('cache-size'), 'revocation_check_interval': utils.config_get('revocation-check-interval') } if None not in ks_auth.itervalues(): return ks_auth return None
def configure_gmetad(): juju_log("INFO", "Configuring gmetad for master unit") data_sources = { "self": ["localhost"] } for _rid in relation_ids("master"): for _unit in relation_list(_rid): # endpoint is set by ganglia-node # subordinate to indicate that # gmond should not be used as a # datasource _datasource = relation_get('datasource', _unit, _rid) if _datasource == "true": service_name = _unit.split('/')[0] if service_name not in data_sources: data_sources[service_name] = [] data_sources[service_name]\ .append(relation_get('private-address', _unit, _rid)) context = { "data_sources": data_sources, "gridname": config_get("gridname") } before = checksum(GMETAD_CONF) with open(GMETAD_CONF, "w") as gmetad: gmetad.write(render_template("gmetad.conf", context)) if before != checksum(GMETAD_CONF): control(GMETAD, RESTART)
def notify_radosgws(): utils.juju_log('INFO', 'Begin notify_radosgws.') for relid in utils.relation_ids('radosgw'): utils.relation_set(radosgw_key=ceph.get_radosgw_key(), auth=utils.config_get('auth-supported'), rid=relid) utils.juju_log('INFO', 'End notify_radosgws.')
def https(): ''' Determines whether enough data has been provided in configuration or relation data to configure HTTPS . returns: boolean ''' if config_get('use-https') == "yes": return True if config_get('ssl_cert') and config_get('ssl_key'): return True for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): if (relation_get('https_keystone', rid=r_id, unit=unit) and relation_get('ssl_cert', rid=r_id, unit=unit) and relation_get('ssl_key', rid=r_id, unit=unit) and relation_get('ca_cert', rid=r_id, unit=unit)): return True return False
def get_logger(name=__name__, log_level=None): if log_level is None: try: log_level = logging.getLevelName(config_get('log_level').strip()) except Exception: log_level = LOG_LEVEL logging.basicConfig(filename=LOG_FILE, format=FORMAT, datefmt=FORMAT_DATE, level=log_level) logger = logging.getLogger(name) return logger
def notify_client(): utils.juju_log('INFO', 'Begin notify_client.') for relid in utils.relation_ids('client'): service_name = utils.relation_list(relid)[0].split('/')[0] utils.relation_set(key=ceph.get_named_key(service_name), auth=utils.config_get('auth-supported'), rid=relid) utils.juju_log('INFO', 'End notify_client.')
def client_relation(): utils.juju_log('INFO', 'Begin client-relation hook.') if ceph.is_quorum(): utils.juju_log('INFO', 'mon cluster in quorum - \ providing client with keys') service_name = os.environ['JUJU_REMOTE_UNIT'].split('/')[0] utils.relation_set(key=ceph.get_named_key(service_name), auth=utils.config_get('auth-supported')) else: utils.juju_log('INFO', 'mon cluster not in quorum - deferring key provision') utils.juju_log('INFO', 'End client-relation hook.')
def radosgw_relation(): utils.juju_log('INFO', 'Begin radosgw-relation hook.') utils.install('radosgw') # Install radosgw for admin tools if ceph.is_quorum(): utils.juju_log('INFO', 'mon cluster in quorum - \ providing radosgw with keys') utils.relation_set(radosgw_key=ceph.get_radosgw_key(), auth=utils.config_get('auth-supported')) else: utils.juju_log('INFO', 'mon cluster not in quorum - deferring key provision') utils.juju_log('INFO', 'End radosgw-relation hook.')
def osdize(dev): e_mountpoint = utils.config_get('ephemeral-unmount') if e_mountpoint != "": subprocess.call(['umount', e_mountpoint]) if ceph.is_osd_disk(dev): utils.juju_log('INFO', 'Looks like {} is already an OSD, skipping.' .format(dev)) return if subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0: utils.juju_log('INFO', 'Looks like {} is in use, skipping.'.format(dev)) return if os.path.exists(dev): subprocess.call(['ceph-disk-prepare', dev])
def mon_relation(): utils.juju_log('INFO', 'Begin mon-relation hook.') emit_cephconf() moncount = int(utils.config_get('monitor-count')) if len(get_mon_hosts()) >= moncount: bootstrap_monitor_cluster() ceph.wait_for_bootstrap() ceph.rescan_osd_devices() notify_osds() notify_radosgws() notify_client() else: utils.juju_log('INFO', 'Not enough mons ({}), punting.'.format( len(get_mon_hosts()))) utils.juju_log('INFO', 'End mon-relation hook.')
def configure_gmond(): juju_log("INFO", "Configuring ganglia monitoring daemon") service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0] _rids = relation_ids("head") masters = [] if _rids: # Configure as head unit and send data to masters for _rid in _rids: for _master in relation_list(_rid): masters.append(relation_get('private-address', _master, _rid)) context = { "service_name": service_name, "masters": masters, "dead_host_timeout": config_get("dead_host_timeout") } before = checksum(GMOND_CONF) with open(GMOND_CONF, "w") as gmond: gmond.write(render_template("gmond.conf", context)) if before != checksum(GMOND_CONF): control(GMOND, RESTART)
def summarize_run(duid, cloud, canned_offer): logger.info("Running summarizer: %s, %s, %s" % (duid, cloud, canned_offer)) run = _query_run(duid, cloud) mappers, reducer = _div_node(run['hits']) logger.info('summarize_run mappers: %s' % mappers) logger.info('summarize_run reducer: %s' % reducer) mappers_data_dict, reducer_data = _extract_node_data( mappers, reducer, duid) logger.info('summarize_run mappers_data: %s' % mappers_data_dict) logger.info('summarize_run reducer_data: %s' % reducer_data) time_records = _compute_time_records(mappers_data_dict.values(), reducer_data, duid) products = _get_products_list(mappers_data_dict.values()) service_offers = _get_service_offer(mappers, reducer) _create_run_doc(cloud, canned_offer, time_records, products, service_offers) _publish_benchmarks(cloud, canned_offer, time_records, products, service_offers) logger.info("Done summarizer: %s, %s, %s" % (duid, cloud, canned_offer)) if __name__ == '__main__': duid, cloud, offer = sys.argv[1:4] ss_username = config_get('ss_username') ss_password = config_get('ss_password') ss_api.login_internal(ss_username, ss_password) summarize_run(duid, cloud, offer)