def process_data(): identity = "" data = OrderedDict() all_strings = {} for rid in relation_ids('master'): for unit in related_units(rid): identity = relation_get(attribute='identity\ ', unit=unit, rid=rid) raw_private_string = relation_get(attribute='private-string\ ', unit=unit, rid=rid) raw_public_string = relation_get(attribute='public-string\ ', unit=unit, rid=rid) raw_vip_string = relation_get(attribute='vip-string\ ', unit=unit, rid=rid) raw_ssh_key = relation_get(attribute='host-ssh-key\ ', unit=unit, rid=rid) juju_log('Relation confirmed from {}'.format(identity)) if identity: data = json.load(open(NODE_DATA_FILE)) if identity in data.keys(): pass else: all_strings['private'] = pickle.loads(raw_private_string) all_strings['public'] = pickle.loads(raw_public_string) all_strings['vip'] = pickle.loads(raw_vip_string) all_strings['ssh_pub_key'] = pickle.loads(raw_ssh_key) data[identity] = all_strings juju_log('Storing node {} data {}'.format(identity, data[identity])) json.dump(data, open(NODE_DATA_FILE, 'w'))
def wait_for_boot_images(cluster_uuid): """Waits for the boot images to appear on the cluster.""" # Load all of the images that need to go from the region, to the cluster. conf = config() resources = get_boot_resources() needed_images = set() for resource in resources: os, series = resource['name'].split('/') arch, subarch = resource['architecture'].split('/') image_name = '%s/%s/%s/%s' % (os, arch, subarch, series) needed_images.add(image_name) # Wait for those images to be reported by the cluster. juju_log('Waiting for all boot images to be reported by the cluster.') while True: images = check_output_load([ 'maas', conf['username'], 'boot-images', 'read', cluster_uuid]) images = { '%s/%s/%s/%s' % ( image['osystem'], image['architecture'], image['subarchitecture'], image['release']) for image in images } if set(needed_images).issubset(images): juju_log("Have Needed Images") break else: juju_log("Needed: %s" % needed_images) juju_log("Have: %s" % images) time.sleep(10) juju_log('Finished importing of boot images on the cluster.')
def trove_dashboard_relation_departed(): """ remove-relation has been run """ juju_log('trove-plugin-relation-departed hook run') juju_log('relation_ids: {}'.format(relation_ids('trove-plugin'))) for rel_id in relation_ids('trove-plugin'): relation_clear(rel_id)
def set_cp_agent(): juju_log('Settig cp-agentd configuration for {} hook'.format(hook_name())) mport = 0 for rid in relation_ids('cplane-controller'): for unit in related_units(rid): mport = relation_get(attribute='mport', unit=unit, rid=rid) uport = relation_get(attribute='uport', unit=unit, rid=rid) unicast_mode = config('enable-unicast') cplane_controller = relation_get(attribute='private-address', unit=unit, rid=rid) if mport: key = 'mcast-port=' + mport cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) key = 'mgmt-iface=' + config('mgmt-int') cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) if unicast_mode is True: key = 'ucast-ip=' + cplane_controller cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) else: cmd = "sed -i '/ucast-ip/d' /etc/cplane/cp-config.json" os.system(cmd) key = 'ucast-port=' + uport cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) key = 'log-level=' + str(config('cp-agent-log-level')) with open('/etc/cplane/cp-config.json', 'r') as file: filedata = file.read() if '"{}"'.format(config('cp-agent-log-level')) not in filedata: cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) key = 'vm-mtu=' + str(config('cp-vm-mtu')) cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) return key = 'mcast-port=' + str(config('cp-controller-mport')) cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) key = 'mgmt-iface=' + config('mgmt-int') cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) key = 'ucast-ip=' + config('cplane-controller-ip') cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) key = 'ucast-port=' + str(config('cp-controller-uport')) cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) key = 'log-level=' + str(config('cp-agent-log-level')) with open('/etc/cplane/cp-config.json', 'r') as file: filedata = file.read() if '"{}"'.format(config('cp-agent-log-level')) not in filedata: cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) key = 'vm-mtu=' + str(config('cp-vm-mtu')) cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd)
def send_notification(relation, state): juju_log('Sending notification') hostname = socket.gethostname() for rid in relation_ids(relation): relation_info = {'identity': hostname, 'state': state} relation_set(rid, relation_settings=relation_info)
def master_state_relation_changed(): if not relation_get('identity'): juju_log('Relationship with slave-state not yet complete') return process_clustered_data() state = relation_get('state') if check_all_clustered_nodes(state): if state == 'install': status_set('maintenance', 'Installing grid') if install_grid(): install_root_scripts() send_notification("master-state", "cluster") elif state == 'clustered': if install_db(): status_set('maintenance', 'Installing Database') install_db_root_scripts() send_notification("master-state", "database") elif state == 'final': send_notification("master-state", "final") set_oracle_env() create_db() configure_database() for rid in relation_ids('oracle'): oracle_relation_changed(relation_id=rid) juju_log("Oracle Rac 12C installation is succeeded on master") status_set('active', 'Unit is ready')
def ensure_client_connectivity(f): """Ensure a client can successfully call the server's API This is needed because remote service restarts are async. This could be removed if we can make restart_on_change() take an optional post-restart action it executes to ensure API is up before considering it restarted. :param f: A callable from a fully instantiated/configured client lib. """ i = 0 while i <= CLIENT_RETRY_MAX: try: f() juju_log( 'Confirmed remote API connectivity /w %s after %s attempts' % (f, i)) return except Exception as e: juju_log( 'Failed to connect to remote API /w %s, retrying (%s/%s): %s' % (f, i, CLIENT_RETRY_MAX, e)) i += 1 time.sleep(1) raise Exception( 'Failed to connect to remote API /w %s after %s retries.' % (f, CLIENT_RETRY_MAX))
def remove_source_files(): """ Remove the directories and files from the dist-packages python install location. Since the trove dashboard was installed from source, pip uninstall will not uninstall the trove dashboard since there was no package for the install. """ juju_log('getsitepackages: {}'.format(site.getsitepackages()[0])) py_install_dir = site.getsitepackages()[0] for directory in OPSMGR_UNINSTALL_DIRS: uninstall_dir = os.path.join(py_install_dir, directory) if os.path.exists(uninstall_dir): shutil.rmtree(uninstall_dir) # Make sure to remove egg directories and pth files uninstall_dir = os.path.join(py_install_dir, directory + '*') uninstall_dir_list = glob.glob(uninstall_dir) juju_log('uninstall_dir_list: {}'.format(uninstall_dir_list)) for path in uninstall_dir_list: if os.path.isdir(path): shutil.rmtree(path) else: os.remove(path) # Remove the opsmgr bin file if os.path.exists('/usr/local/bin/opsmgr'): os.remove('/usr/local/bin/opsmgr') if os.path.exists('/user/local/bin/opsmgr-admin'): os.remove('/user/local/bin/opsmgr-admin')
def config_changed(): if openstack_upgrade_available('glance-common'): juju_log('Upgrading OpenStack release') do_openstack_upgrade(CONFIGS) open_port(9292) configure_https()
def trove_dashboard_git_clone(config_yaml): """ Clone from git repository specified in the config.yaml. Assuming here the trove dashboard is not supplied in a normal distro package, meaning the only install option is to specify the git url in the config.yaml. (No default location is specified here either in the code.) """ config = _git_yaml_load(config_yaml) git_repository = None for c in config['repositories']: if c['name'] == TROVE_DASHBOARD: git_repository = c['repository'] git_branch = c['branch'] if git_repository is None: error_out('Missing repository in config.yaml') juju_log('Git repository: {} branch: {}'.format(git_repository, git_branch)) depth = '1' parent_dir = GIT_CLONE_PARENT_DIR clone_dir = install_remote(git_repository, dest=parent_dir, branch=git_branch, depth=depth) juju_log('Cloned into directory: {}'.format(clone_dir)) return clone_dir
def set_cp_agent(): juju_log('Settig cp-agentd configuration for {} hook'.format(hook_name())) mport = 0 for rid in relation_ids('cplane-controller'): for unit in related_units(rid): mport = relation_get(attribute='mport', unit=unit, rid=rid) uport = relation_get(attribute='uport', unit=unit, rid=rid) unicast_mode = config('enable-unicast') cplane_controller = relation_get('private-address') if mport: key = 'mcast-port=' + mport cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) key = 'mgmt-iface=' + config('mgmt-int') cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) if unicast_mode is True: key = 'ucast-ip=' + cplane_controller cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) else: cmd = "sed -i '/ucast-ip/d' /etc/cplane/cp-config.json" os.system(cmd) key = 'ucast-port=' + uport cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) key = 'log-level=' + str(config('cp-agent-log-level')) with open('/etc/cplane/cp-config.json', 'r') as file: filedata = file.read() if '"{}"'.format(config('cp-agent-log-level')) not in filedata: cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) key = 'vm-mtu=' + str(config('cp-vm-mtu')) cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) return key = 'mcast-port=' + str(config('cp-controller-mport')) cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) key = 'mgmt-iface=' + config('mgmt-int') cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) key = 'ucast-ip=' + config('cplane-controller-ip') cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) key = 'ucast-port=' + str(config('cp-controller-uport')) cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) key = 'log-level=' + str(config('cp-agent-log-level')) with open('/etc/cplane/cp-config.json', 'r') as file: filedata = file.read() if '"{}"'.format(config('cp-agent-log-level')) not in filedata: cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd) key = 'vm-mtu=' + str(config('cp-vm-mtu')) cmd = ['cp-agentd', 'set-config', key] subprocess.check_call(cmd)
def data_relation_changed(): juju_log("data_relation_changed") if volume_get_id_for_storage_subordinate() is None: juju_log("mountpoint from storage subordinate not ready, let's wait") return (True) config_changed()
def get_db_status(): cmd = "su - oracle -c 'lsnrctl status'" res = commands.getoutput(cmd) juju_log('lsnrctl status: {}'.format(res)) if config('db-service') in res: return True else: return False
def enable_arbiter(master_node=None, host=None): juju_log("enable_arbiter: master_node: %s, host: %s" % (master_node, host)) if master_node is None or host is None: retVal = False else: retVal = mongo_client(master_node, "rs.addArb(\"%s\")" % host) juju_log("enable_arbiter returns: %s" % retVal) return (retVal)
def join_replset(master_node=None, host=None): juju_log("join_replset: master_node: %s, host: %s" % (master_node, host)) if master_node is None or host is None: retVal = False else: retVal = mongo_client(master_node, "rs.add(\"%s\")" % host) juju_log("join_replset returns: %s" % retVal) return (retVal)
def init_replset(master_node=None): if master_node is None: juju_log("init_replset: master_node must be defined.") retVal = False else: retVal = mongo_client(master_node, 'rs.initiate()') juju_log("init_replset returns: %s" % retVal) return (retVal)
def do_pip_installs(plugin_yaml, clone_dir): """ Run pip install for the source code downloaded from the git clone. """ for plugin in _git_yaml_load(plugin_yaml): plugin_dir = os.path.join(clone_dir, plugin) juju_log('pip install from: {}'.format(plugin_dir)) pip_install(plugin_dir)
def stop_hook(): juju_log("stop_hook") try: retVal = service('stop', 'mongodb') os.remove('/var/lib/mongodb/mongod.lock') #FIXME Need to check if this is still needed except Exception, e: juju_log(str(e)) retVal = False
def mongo_client(host=None, command=None): if host is None or command is None: return (False) else: cmd_line = 'mongo' cmd_line += ' --host %s' % host cmd_line += ' --eval \'%s\'' % command juju_log("Executing: %s" % cmd_line) return (subprocess.call(cmd_line, shell=True) == 0)
def pgsql_db_joined(): if is_relation_made('shared-db'): # raise error e = ('Attempting to associate a postgresql database when' ' there is already associated a mysql one') juju_log(e, level=ERROR) raise Exception(e) relation_set(database=config('database'))
def manage_fip(): for rid in relation_ids('cplane-controller-ovs'): for unit in related_units(rid): fip_set = relation_get(attribute='fip-set', unit=unit, rid=rid) if fip_set: if check_interface(config('fip-interface')): add_bridge('br-fip', config('fip-interface')) else: juju_log('Fip interface doesnt exist, and \ will be used by default by Cplane controller')
def manage_fip(): for rid in relation_ids('cplane-controller'): for unit in related_units(rid): fip_mode = relation_get(attribute='fip-mode', unit=unit, rid=rid) if fip_mode == 'True': if check_interface(config('fip-interface')): add_bridge('br-fip', config('fip-interface')) else: juju_log('Fip interface doesnt exist, and \ will be used by default by Cplane controller')
def manage_fip(): for rid in relation_ids('cplane-controller'): for unit in related_units(rid): fip_mode = relation_get(attribute='fip-mode', unit=unit, rid=rid) if fip_mode == 'True': if check_interface(config('fip-interface')): create_br_fip(config('fip-interface')) else: juju_log('Fip interface doesnt exist, and \ will be used by default by Cplane controller')
def send_notification(relation, state): juju_log('Sending notification') hostname = socket.gethostname() for rid in relation_ids(relation): relation_info = { 'identity': hostname, 'state': state } relation_set(rid, relation_settings=relation_info)
def image_service_joined(relation_id=None): relation_data = { 'glance-api-server': "{}:9292".format(canonical_url(CONFIGS, INTERNAL)) } juju_log("%s: image-service_joined: To peer glance-api-server=%s" % (CHARM, relation_data['glance-api-server'])) relation_set(relation_id=relation_id, **relation_data)
def regex_sub(pat_replace=None, data=None): juju_log("regex_sub") if not pat_replace or not data: raise Exception("pat_replace or data not defined") if not isinstance(pat_replace, list): raise Exception("pat_replace must be a list of pat, replace tuples") new_data = data for (pattern, replace) in pat_replace: new_data = re.sub(pattern, replace, data, 0, re.MULTILINE) return (new_data)
def volume_get_id_for_storage_subordinate(): # storage charm is a subordinate so we should only ever have one # relation_id for the data relation ids = relation_ids('data') if len(ids) > 0: mountpoint = relation_get('mountpoint', os.environ['JUJU_UNIT_NAME'], ids[0]) juju_log('mountpoint: %s' % (mountpoint, )) if mountpoint and os.path.exists(mountpoint): return mountpoint.split('/')[-1]
def ha_relation_changed(): clustered = relation_get('clustered') if not clustered or clustered in [None, 'None', '']: juju_log('ha_changed: hacluster subordinate is not fully clustered.') return # reconfigure endpoint in keystone to point to clustered VIP. [keystone_joined(rid) for rid in relation_ids('identity-service')] # notify glance client services of reconfigured URL. [image_service_joined(rid) for rid in relation_ids('image-service')]
def restart_services(): cmd = ['service', 'openvswitch-switch', 'restart'] subprocess.check_call(cmd) juju_log('Restarting cp-agentd service for {} hook'.format(hook_name())) cmd = ['service', 'cp-agentd', 'stop'] subprocess.check_call(cmd) cmd = ['service', 'cp-agentd', 'start'] subprocess.check_call(cmd) cmd = ['update-rc.d', 'cp-agentd', 'enable'] subprocess.check_call(cmd)
def install(): apt_update(fatal=True) # disable_neutron_agent() pkgs = determine_packages() apt_install(pkgs, fatal=True) install_cplane_packages() add_bridge('br-ext', config('data-interface')) if check_interface(config('tun-interface')): add_bridge('br-tun', config('tun-interface')) else: juju_log('Tunnel interface doesnt exist, and will be \ used by default by Cplane controller')
def configsvr_relation_joined(): juju_log("configsvr_relation_joined") my_hostname = unit_get('public-address') my_port = config('config_server_port') my_install_order = os.environ['JUJU_UNIT_NAME'].split('/')[1] relation_set( relation_id(), { 'hostname': my_hostname, 'port': my_port, 'install-order': my_install_order, 'type': 'configsvr', })
def object_store_joined(): if 'identity-service' not in CONFIGS.complete_contexts(): juju_log('Deferring swift storage configuration until ' 'an identity-service relation exists') return if 'object-store' not in CONFIGS.complete_contexts(): juju_log('swift relation incomplete') return CONFIGS.write(GLANCE_API_CONF)
def update_file(filename=None, new_data=None, old_data=None): juju_log("update_file: %s" % filename) if filename is None or new_data is None: retVal = False try: if old_data != new_data: with open(filename, 'w') as f: f.write(new_data) retVal = True except Exception, e: juju_log(str(e)) retVal = False
def image_service_joined(relation_id=None): if not eligible_leader(CLUSTER_RES): return relation_data = { 'glance-api-server': canonical_url(CONFIGS) + ":9292" } juju_log("%s: image-service_joined: To peer glance-api-server=%s" % (CHARM, relation_data['glance-api-server'])) relation_set(relation_id=relation_id, **relation_data)
def upgrade_charm(): apt_install(filter_installed_packages(determine_packages()), fatal=True) packages_removed = remove_old_packages() reinstall_paste_ini(force_reinstall=packages_removed) configure_https() update_nrpe_config() update_image_location_policy() CONFIGS.write_all() if packages_removed: juju_log("Package purge detected, restarting services", "INFO") for s in services(): service_restart(s)
def process_check(pid=None): try: if pid is not None: cmd_line = subprocess.check_output('ps -p %d -o cmd h' % int(pid), shell=True) retVal = (pid, cmd_line) else: juju_log("process_check: pid not defined.") retVal = (None, None) except Exception, e: juju_log("process_check exception: %s" % str(e)) retVal = (None, None)
def volume_get_volid_from_volume_map(): config_data = config() volume_map = {} try: volume_map = yaml.load(config_data['volume-map'].strip()) if volume_map: juju_unit_name = os.environ['JUJU_UNIT_NAME'] volid = volume_map.get(juju_unit_name) juju_log("Juju unit name: %s Volid:%s" % (juju_unit_name, volid)) return volid except ConstructorError as e: juju_log("invalid YAML in 'volume-map': {}".format(e)) return None
def enable_web_admin_ui(port=None): if port is None: juju_log("enable_web_admin_ui: port not defined.") return (False) try: mongodb_init_config = open(default_mongodb_init_config).read() if re.search(' --rest ', mongodb_init_config, re.MULTILINE) is None: mongodb_init_config = regex_sub([(' -- ', ' -- --rest ')], mongodb_init_config) retVal = update_file(default_mongodb_init_config, mongodb_init_config) except Exception, e: juju_log(str(e)) retVal = False
def start(): if not config('slave-units-number'): status_set('maintenance', 'Performing Oracle standalone Installation') download_cplane_packages() copy_oracle_package() modify_oracle_db_response_file() if install_db(): install_db_root_scripts() set_oracle_env() create_db() juju_log('Database is created and the listerner is started') status_set('active', 'Unit is ready')
def start(): if not config('slave-units-number'): status_set('maintenance', 'Performing Oracle standalone Installation') download_cplane_packages() copy_oracle_package() modify_oracle_db_response_file() if install_db(): install_db_root_scripts() set_oracle_env() create_db() configure_database() juju_log('Database is created and the listerner is started') status_set('active', 'Unit is ready')
def oracle_relation_changed(relation_id=None): if config('slave-units-number'): if check_all_clustered_nodes('final'): relation_info = { 'oracle-host': '{}-scan'.format(config('scan-name')), 'db-service': '{}'.format(config('db-service')), 'scan-string': pickle.dumps(get_scan_str()), 'db-password': '******'.format(config('db-password')), 'db-path': '+DATA' } juju_log('Sending relation info to Cplane Controller') relation_set(relation_id=relation_id, relation_settings=relation_info) else: hostname = socket.gethostname() relation_info = { 'oracle-host': hostname, 'db-service': '{}'.format(config('db-service')), 'db-password': '******'.format(config('db-password')), 'db-path': '/u01/app/oracle/oradata/CPLANE/' } for num in range(0, 5): if get_db_status() is False: juju_log("Service is not registered with listener... \ Retry checking it after 60 sec") time.sleep(60) else: juju_log("Service is regitered with listener") juju_log('Sending relation info to Cplane Controller') relation_set(relation_id=relation_id, relation_settings=relation_info) break
def db_changed(): rel = os_release('glance-common') if 'shared-db' not in CONFIGS.complete_contexts(): juju_log('shared-db relation incomplete. Peer not ready?') return CONFIGS.write(GLANCE_REGISTRY_CONF) # since folsom, a db connection setting in glance-api.conf is required. if rel != "essex": CONFIGS.write(GLANCE_API_CONF) if is_elected_leader(CLUSTER_RES): # Bugs 1353135 & 1187508. Dbs can appear to be ready before the units # acl entry has been added. So, if the db supports passing a list of # permitted units then check if we're in the list. allowed_units = relation_get('allowed_units') if allowed_units and local_unit() in allowed_units.split(): if rel == "essex": status = call(['glance-manage', 'db_version']) if status != 0: juju_log('Setting version_control to 0') cmd = ["glance-manage", "version_control", "0"] check_call(cmd) juju_log('Cluster leader, performing db sync') migrate_database() else: juju_log('allowed_units either not presented, or local unit ' 'not in acl list: %s' % allowed_units)
def process_clustered_data(): identity = "" for rid in relation_ids('master-state'): for unit in related_units(rid): identity = relation_get(attribute='identity\ ', unit=unit, rid=rid) state = relation_get(attribute='state\ ', unit=unit, rid=rid) juju_log('Relation confirmed from {}'.format(identity)) if identity: change_cluster_state(identity, state)
def get_or_create_subnet(client, cidr, network_id): juju_log('get_or_create_subnet: %s (net: %s)' % (cidr, network_id)) for sn in client.list_subnets(network_id=network_id)['subnets']: if sn['cidr'] == cidr: juju_log('- found existing subnet: %s' % sn['id']) return sn subnet = netaddr.IPNetwork(cidr) if subnet.version == 6: subnet_args = { 'ip_version': 6, 'ipv6_address_mode': 'slaac', } else: subnet_args = { 'ip_version': 4, } subnet_args.update({ 'cidr': cidr, 'network_id': network_id, 'enable_dhcp': True, }) juju_log('- creating new subnet: %s' % cidr) res = client.create_subnet({'subnet': subnet_args})['subnet'] juju_log('- created new subnet: %s' % res['id']) return res
def object_store_joined(): if 'identity-service' not in CONFIGS.complete_contexts(): juju_log('Deferring swift storage configuration until ' 'an identity-service relation exists') return if 'object-store' not in CONFIGS.complete_contexts(): juju_log('swift relation incomplete') return [image_service_joined(rid) for rid in relation_ids('image-service')] update_image_location_policy() CONFIGS.write(GLANCE_API_CONF)
def keystone_joined(relation_id=None): if not eligible_leader(CLUSTER_RES): juju_log('Deferring keystone_joined() to service leader.') return url = canonical_url(CONFIGS) + ":9292" relation_data = { 'service': 'glance', 'region': config('region'), 'public_url': url, 'admin_url': url, 'internal_url': url, } relation_set(relation_id=relation_id, **relation_data)
def import_key(keyid): key = keyid.strip() if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and key.endswith('-----END PGP PUBLIC KEY BLOCK-----')): juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG) juju_log("Importing ASCII Armor PGP key", level=DEBUG) with tempfile.NamedTemporaryFile() as keyfile: with open(keyfile.name, 'w') as fd: fd.write(key) fd.write("\n") cmd = ['apt-key', 'add', keyfile.name] try: subprocess.check_call(cmd) except subprocess.CalledProcessError: error_out("Error importing PGP key '%s'" % key) else: juju_log("PGP key found (looks like Radix64 format)", level=DEBUG) juju_log("Importing PGP key from keyserver", level=DEBUG) cmd = ['apt-key', 'adv', '--keyserver', 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] try: subprocess.check_call(cmd) except subprocess.CalledProcessError: error_out("Error importing PGP key '%s'" % key)
def download_package(self, package_name, version): version = int(version) if package_name not in self.package_data: msg = "Invalid Package: Package {} is not found in the \ Cplane repo".format(package_name) status_set('blocked', msg) raise ErrorException(msg) package_list = self.package_data.get(package_name) version_exist = False package_dwnld_link = "" file_checksum = "" if int(version) != -1: for package in package_list: if package.get("build_nr", 0) == int(version): package_dwnld_link = package.get("dwd_link", "") file_checksum = package.get("checksum", "") version_exist = True logging.info("Package download link %s" % package_dwnld_link) break else: package_dwnld_link = package_list[-1].get("dwd_link", "") file_checksum = package_list[-1].get("checksum", "") version_exist = True logging.info("Package download link %s" % package_dwnld_link) if not version_exist: msg = "Invalid Version: Version {} doesn't exist for \ package {}".format(version, package_name) status_set('blocked', msg) raise ErrorException(msg) mkdir(CHARM_LIB_DIR) filename = urlparse.urlsplit(package_dwnld_link).path dwnld_package_name = os.path.join(CHARM_LIB_DIR, os.path.basename(filename)) urllib.urlretrieve(package_dwnld_link, dwnld_package_name) if self.verify_file_checksum(dwnld_package_name, file_checksum): juju_log("Package %s downloaded successfully" % dwnld_package_name) else: msg = "Invalid Checksum: Package {} checksum \ mismatch".format(dwnld_package_name) status_set('blocked', msg) raise ErrorException(msg) return dwnld_package_name
def slave_state_relation_changed(): if not relation_get('identity'): juju_log('Relationship with master-state not yet complete') return if relation_get('state') == 'cluster': status_set('maintenance', 'Installing Root scripts') install_root_scripts() send_notification("slave-state", "clustered") if relation_get('state') == 'database': status_set('maintenance', 'Installing DB scripts') install_db_root_scripts() send_notification("slave-state", "final") if relation_get('state') == 'final': juju_log("Oracle Rac 12C installation is succeeded on slave") status_set('active', 'Unit is ready')
def install_hook(): juju_log('Installing glance packages') execd_preinstall() src = config('openstack-origin') if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and src == 'distro'): src = 'cloud:precise-folsom' configure_installation_source(src) apt_update() apt_install(PACKAGES) for service in SERVICES: service_stop(service)
def keystone_changed(): if 'identity-service' not in CONFIGS.complete_contexts(): juju_log('identity-service relation incomplete. Peer not ready?') return CONFIGS.write(GLANCE_API_CONF) CONFIGS.write(GLANCE_REGISTRY_CONF) # Configure any object-store / swift relations now that we have an # identity-service if relation_ids('object-store'): object_store_joined() # possibly configure HTTPS for API and registry configure_https()