def _add_ingress_rule(self, args, security_group): """ Add a local -> PostgreSQL ingress rule to a security group """ ec2 = self._get_aws_client('ec2', args) ip = args.public_ip if args.public_ip else\ '{}/32'.format(get_my_ip()) port = args.db_port or 5432 IpRanges = [] ip = ip.split(',') for i in ip: IpRanges.append({ 'CidrIp': i, 'Description': 'pgcloud client {}'.format(i) }) try: output({'Adding': 'Adding ingress rule for: {}...'.format(ip)}) debug(args, 'Adding ingress rule for: {}...'.format(ip)) ec2.authorize_security_group_ingress(GroupId=security_group, IpPermissions=[ { 'FromPort': port, 'ToPort': port, 'IpProtocol': 'tcp', 'IpRanges': IpRanges }, ]) except Exception as e: error(args, e)
def _create_rds_instance(self, args, security_group): """ Create an RDS instance """ ec2 = self._get_aws_client('ec2', args) rds = self._get_aws_client('rds', args) db_password = self._database_pass if self._database_pass is not None\ else args.db_password try: debug(args, 'Creating RDS instance: {}...'.format(args.name)) rds.create_db_instance(DBInstanceIdentifier=args.name, AllocatedStorage=args.storage_size, DBName=args.db_name, Engine='postgres', Port=args.db_port, EngineVersion=args.db_version, StorageType=args.storage_type, StorageEncrypted=True, Iops=args.storage_iops, AutoMinorVersionUpgrade=True, MultiAZ=False, MasterUsername=args.db_username, MasterUserPassword=db_password, DBInstanceClass=args.instance_type, VpcSecurityGroupIds=[ security_group, ]) except rds.exceptions.DBInstanceAlreadyExistsFault as e: try: debug(args, DEL_SEC_GROUP_MSG.format(security_group)) ec2.delete_security_group(GroupId=security_group) except Exception: pass error(args, 'RDS instance {} already exists.'.format(args.name)) except Exception as e: try: debug(args, DEL_SEC_GROUP_MSG.format(security_group)) ec2.delete_security_group(GroupId=security_group) except Exception: pass error(args, str(e)) # Wait for completion running = True while running: response = rds.describe_db_instances( DBInstanceIdentifier=args.name) db_instance = response['DBInstances'][0] status = db_instance['DBInstanceStatus'] if status != 'creating' and status != 'backing-up': running = False if running: time.sleep(5) return response['DBInstances']
def _delete_security_group(self, args, id): """ Delete a security group """ ec2 = self._get_aws_client('ec2', args) debug(args, 'Deleting security group: {}...'.format(id)) try: ec2.delete_security_group(GroupId=id) except Exception as e: error(args, str(e))
def _delete_azure_instance(self, args): """ Delete an Azure instance """ # Obtain the management client object postgresql_client = self._get_azure_client('postgresql') # Delete the server and wait for the result debug('Deleting Azure instance: {}...'.format(args.name)) try: poller = postgresql_client.servers.begin_delete( args.resource_group, args.name) except Exception as e: error(str(e)) poller.result()
def _create_security_group(self, args): """ Create a new security group for the instance """ ec2 = self._get_aws_client('ec2', args) ip = args.public_ip if args.public_ip else get_my_ip() ip = ip.split(',') # Deploy the security group try: name = 'pgacloud_{}_{}_{}'.format(args.name, ip[0].replace('.', '-'), get_random_id()) debug(args, 'Creating security group: {}...'.format(name)) output({'Creating': 'Creating security group: {}...'.format(name)}) response = ec2.create_security_group( Description='Inbound access for {} to RDS instance {}'.format( ip[0], args.name), GroupName=name) except Exception as e: error(args, str(e)) return response['GroupId']
def _delete_rds_instance(self, args, name): """ Delete an RDS instance """ rds = self._get_aws_client('rds', args) debug(args, 'Deleting RDS instance: {}...'.format(name)) try: rds.delete_db_instance(DBInstanceIdentifier=name, SkipFinalSnapshot=True, DeleteAutomatedBackups=True) except Exception as e: error(args, str(e)) # Wait for completion while True: try: rds.describe_db_instances(DBInstanceIdentifier=args.name) except rds.exceptions.DBInstanceNotFoundFault: return except Exception as e: error(args, str(e)) time.sleep(5)
def get_instance_status(self, instance_id): """ Get the biganimal cluster status """ running = True status = None while running: _url = "{0}/{1}/{2}".format(self.BASE_URL, 'clusters', instance_id) _headers = { "accept": "application/json", 'authorization': 'Bearer {0}'.format(self._access_key) } cluster_resp = requests.get(_url, headers=_headers) if cluster_resp.status_code == 200 and cluster_resp.content: cluster_info = json.loads(cluster_resp.content) self._cluster_info = cluster_info[0] if self._cluster_info['instance'] != 0 and\ self._cluster_info['phase'] not in [ 'Cluster creation request received', 'Setting up primary', 'Creating CNP cluster' ]: running = False if status != self._cluster_info['phase']: status = self._cluster_info['phase'] debug('BigAnimal cluster status: {}...'.format(status)) else: running = False error(str(cluster_resp.text)) if running: time.sleep(5) return self._cluster_info
def _create_azure_instance(self, args): """ Create an Azure instance """ # Obtain the management client object postgresql_client = self._get_azure_client('postgresql') # Check if the server already exists svr = None try: svr = postgresql_client.servers.get(args.resource_group, args.name) except ResourceNotFoundError: pass except Exception as e: error(str(e)) if svr is not None: error('Azure Database for PostgreSQL instance {} already ' 'exists.'.format(args.name)) db_password = self._database_pass if self._database_pass is not None \ else args.db_password # Provision the server and wait for the result debug('Creating Azure instance: {}...'.format(args.name)) try: poller = postgresql_client.servers.begin_create( resource_group_name=args.resource_group, server_name=args.name, parameters=Server( sku=Sku(name=args.instance_type, tier=SkuTier(args.instance_tier_type)), high_availability=HighAvailability( mode=args.high_availability), administrator_login=args.db_username, administrator_login_password=db_password, version=args.db_major_version, storage=Storage(storage_size_gb=args.storage_size), location=args.region, create_mode=CreateMode("Default"))) except Exception as e: error(str(e)) server = poller.result() return server.__dict__
# then the user must name one... if not ARGS.deployment: if len(deployments) > 1: print('ERROR: You need to supply the name of a deployment.' ' The following are available:') for deployment in deployments: # Display the deployment without the path # and removing the '.yaml' suffix. print(os.path.basename(deployment)[:-5]) sys.exit(1) deployment_file = os.path.basename(deployments[0])[:-5] else: deployment_file = ARGS.deployment # Load the deployment's configuration file... config_file = 'deployments/{}.yaml'.format(deployment_file) if not os.path.exists(config_file): io.error('No config file ({}) for an "{}" deployment'.format( config_file, deployment_file)) sys.exit(1) # Go... success = _main(ARGS, deployment_file) # Done # ...or failed and exhausted retry attempts! if not success: io.error('Failed to start cluster') # Return non-zero exit value to the shell... sys.exit(1)
# We must have a deployment defined if we get here. # Even if there is just one, it's safe to force the # user to specify the deployment. if not ARGS.deployment: print('ERROR: You need to supply the name of a deployment.' ' The following are available:') for deployment in deployments: # Display the deployment without the path # and removing the '.yaml' suffix. print(os.path.basename(deployment)[:-5]) sys.exit(1) else: deployment_file = ARGS.deployment # Load the deployment's configuration file... config_file = 'deployments/{}.yaml'.format(deployment_file) if not os.path.exists(config_file): io.error('No config file ({}) for an "{}" deployment'.format( config_file, deployment_file)) sys.exit(1) # Go... success = _main(ARGS, deployment_file) # Done # ...or failed and exhausted retry attempts! if not success: io.error('Failed to destroy the cluster.') # Return non-zero exit value to the shell... sys.exit(1)
PARSER.add_argument('-n', '--now', help="Destroy without confirmation", action='store_true') PARSER.add_argument('-sr', '--skip-rendering', help='Skip the Jinja2 rendering stage', action='store_true') PARSER.add_argument('deployment', metavar='DEPLOYMENT', type=str, nargs='?', help='The name of the deployment') ARGS = PARSER.parse_args() # Go... deployment_name = io.get_deployment_config_name(ARGS.deployment, ARGS.display_deployments) success = _main(ARGS, deployment_name) # Done # ...or failed and exhausted retry attempts! if not success: io.error('Failed to destroy the cluster.') # Return non-zero exit value to the shell... sys.exit(1)
def _main(cli_args, deployment_name): """Deployment entry point. :param cli_args: The command-line arguments :type cli_args: ``list`` :param deployment_name: The deployment file (excluding the extension) :type deployment_name: ``str`` :returns: True on success :rtype: ``bool`` """ deployment_file = 'deployments/{}.yaml'.format(deployment_name) if not os.path.exists(deployment_file): io.error(('No config file ({}) for an "{}" deployment'.format( deployment_file, deployment_name))) return False with open(deployment_file, 'r') as stream: deployment = yaml.load(stream) # There must be an openshift/inventories/<deployment> directory if not os.path.isdir('openshift/inventories/{}'.format(deployment_name)): io.error('Missing "openshift/inventories" directory') print( 'Expected to find the directory "{}" but it was not there.'.format( deployment_name)) print('Every deployment must have a matching "inventories" directory') return False # ----- # Hello # ----- io.banner(deployment['name'], full_heading=True, quiet=False) if not cli_args.auto_approve: target = 'Bastion machine' if cli_args.bastion else 'OpenShift Cluster' confirmation_word = io.get_confirmation_word() confirmation = raw_input('Enter "{}" to CREATE the {}: '.format( confirmation_word, target)) if confirmation != confirmation_word: print('Phew! That was close!') return True # ------ # Render (jinja2 files) # ------ # Translate content of jinja2 template files. if not cli_args.skip_rendering: cmd = './render.py {}'.format(deployment_name) cwd = '.' rv = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # --------- # Terraform # --------- # Create compute instances for the cluster. if not cli_args.skip_terraform: # The 'terraform' sub-directory. # The sub-directory where execution material is located. # For terraform the files either relate to the bastion or cluster # and are in subdirectories 'bastion' and 'cluster'. # The same applies to the ansible playbook - one will be # in 'bastion' and one will be in 'cluster' tf_sub_dir = 'bastion' if cli_args.bastion else 'cluster' t_dir = deployment['terraform']['dir'] cmd = '~/bin/terraform init' cwd = 'terraform/{}/{}'.format(t_dir, tf_sub_dir) rv = io.run(cmd, cwd, cli_args.quiet) if not rv: return False cmd = '~/bin/terraform apply' \ ' -auto-approve' \ ' -state=.terraform.{}'.format(deployment_name) cwd = 'terraform/{}/{}'.format(t_dir, tf_sub_dir) rv = io.run(cmd, cwd, cli_args.quiet) if not rv: return False if cli_args.bastion: cmd = 'ansible-playbook' \ ' ../ansible/bastion/site.yaml' \ ' -e keypair_name={}' \ ' -e deployment={}'.format(deployment['cluster']['keypair_name'], deployment_name) cwd = 'openshift' rv = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # Done return True # ------- # Ansible (A specific version) # ------- # Install the ansible version name in the deployment file if not cli_args.skip_initialisation: cmd = 'pip install --upgrade pip --user' rv = io.run(cmd, '.', cli_args.quiet) if not rv: return False cmd = 'pip install ansible=={} --user'. \ format(deployment['ansible']['version']) rv = io.run(cmd, '.', cli_args.quiet) if not rv: return False # -------- # Checkout (OpenShift Ansible) # -------- # Updates our OpenShift-Ansible sub-module # and checks out the revision defined by the deployment tag. if not cli_args.skip_initialisation: # Git sub-module initialisation cmd = 'git submodule update --init --remote' cwd = '.' rv = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # OpenShift Ansible cmd = 'git checkout tags/{}'. \ format(deployment['openshift']['ansible_tag']) cwd = 'openshift-ansible' rv = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # ----------------- # Pre-Ansible Pause # ----------------- # Pre-OpenShift playbook. if not cli_args.skip_terraform and not cli_args.skip_openshift: io.banner('time.sleep({})'.format(_PRE_ANSIBLE_PAUSE_S), cli_args.quiet) time.sleep(_PRE_ANSIBLE_PAUSE_S) # ------- # Ansible (Pre-OpenShift) # ------- if ('play' in deployment['ansible'] and 'pre_os_create' in deployment['ansible']['play']): pre_os_create = deployment['ansible']['play']['pre_os_create'] if not cli_args.skip_pre_openshift and pre_os_create: cmd = 'ansible-playbook {}.yaml'.format(pre_os_create) cwd = 'ansible/pre-os' rv = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # ------- # Ansible (OpenShift) # ------- # Deploy OpenShift using the playbooks named in the deployment # from the checked-out version. if not cli_args.skip_openshift: for play in deployment['openshift']['play']: cmd = 'ansible-playbook ../openshift-ansible/playbooks/{}'.\ format(play) cwd = 'openshift' rv = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # ------- # Ansible (Post-OpenShift) # ------- if not cli_args.skip_post_openshift: cmd = 'ansible-playbook site.yaml' cwd = 'ansible/post-os' rv = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # ------- # Success # ------- # OK if we get here... return True
def _main(cli_args, chosen_deployment_name): """Destruction entry point. :param cli_args: The command-line arguments :type cli_args: ``list`` :param chosen_deployment_name: The deployment file (excluding the extension) :type chosen_deployment_name: ``str`` :return: True on success :rtype: ``bool`` """ config_file = os.path.join( OKD_DEPLOYMENTS_DIRECTORY, chosen_deployment_name, io.get_deployment_config_filename(chosen_deployment_name)) if not os.path.isfile(config_file): io.error( 'Configuration file does not exist in the deployment ({})'.format( chosen_deployment_name)) return False with codecs.open(config_file, 'r', 'utf8') as stream: deployment = Munch.fromDict(yaml.load(stream)) # There must be an okd/inventories directory inventory_dir = deployment.okd.inventory_dir if not os.path.isdir('okd/inventories/{}'.format(inventory_dir)): io.error('Missing "okd/inventories" directory') print( 'Expected to find the directory "{}" but it was not there.'.format( chosen_deployment_name)) print('Every deployment must have a matching "inventories" directory') return False # If the cluster SSH user is not defined, # insert it. if 'ssh_user' not in deployment.cluster: print('Setting default SSH user "{}"'.format( OKD_DEFAULT_CLUSTER_SSH_USER)) deployment.cluster.ssh_user = OKD_DEFAULT_CLUSTER_SSH_USER # ----- # Hello # ----- io.banner(deployment.name, full_heading=True, quiet=False) if not cli_args.now: # Display the orchestration description # (f there is one) if deployment.description: io.description(deployment.description) # User said "now" so don't ask for confirmation print('CAUTION You are about to destroy the cluster.') print('======= Are you sure you want to do this?') print() confirmation_word = io.get_confirmation_word() confirmation = input('Enter "{}" to DESTROY this deployment: '.format( confirmation_word)) if confirmation != confirmation_word: print('Phew! That was close!') return True # ------ # Render # ------ if not cli_args.skip_rendering: cmd = './render.py {} --ssh-user {}'.\ format(chosen_deployment_name, deployment.cluster.ssh_user) cwd = '.' rv, _ = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # --------- # Terraform # --------- # Destroy the cluster. t_dir = deployment.cluster.terraform_dir cmd = 'terraform init' cwd = 'terraform/{}'.format(t_dir) rv = io.run(cmd, cwd, cli_args.quiet) if not rv: return False t_dir = deployment.cluster.terraform_dir cmd = 'terraform destroy -force -state=.terraform.{}'.\ format(chosen_deployment_name) cwd = 'terraform/{}'.format(t_dir) rv = io.run(cmd, cwd, cli_args.quiet) # ---------------------- # Ansible (Post-Destroy) # ---------------------- # If there's an 'ansible/post-destroy/<t_dir>' directory # we run the site.yaml file in it. There is no inventory, # the ansible script runs locally. if os.path.exists('ansible/post-destroy/{}'.format(t_dir)): cmd = 'ansible-playbook site.yaml' cwd = 'ansible/post-destroy/{}'.format(t_dir) rv, _ = io.run(cmd, cwd, cli_args.quiet) if not rv: return False return rv
def _main(cli_args, chosen_deployment_name): """Deployment entry point. :param cli_args: The command-line arguments :type cli_args: ``list`` :param chosen_deployment_name: The deployment file :type chosen_deployment_name: ``str`` :returns: True on success :rtype: ``bool`` """ config_file = os.path.join( OKD_DEPLOYMENTS_DIRECTORY, chosen_deployment_name, io.get_deployment_config_filename(chosen_deployment_name)) if not os.path.isfile(config_file): print('Config file does not exist ({})'.format(chosen_deployment_name)) return False with codecs.open(config_file, 'r', 'utf8') as stream: deployment = DefaultMunch.fromDict(yaml.load(stream)) # First check: # is the version present # and do we support it? if 'version' not in deployment: print('The deployment configuration has no version.') return False if deployment.version not in SUPPORTED_DEPLOYMENT_VERSIONS: supported_versions = str(SUPPORTED_DEPLOYMENT_VERSIONS[0]) for version in SUPPORTED_DEPLOYMENT_VERSIONS[1:]: supported_versions += ', {}'.format(version) print('The deployment configuration file version ({})' ' is not supported.'.format(deployment.version)) print('Supported versions are: {}'.format(supported_versions)) return False # There must be an okd/inventories directory inventory_dir = deployment.okd.inventory_dir if not os.path.isdir('okd/inventories/{}'.format(inventory_dir)): print('Missing "okd/inventories" directory') print('Expected to find the inventory directory "{}"' ' but it was not there.'.format(inventory_dir)) print('Every deployment must have an "inventories" directory') return False # If the cluster SSH user is not defined, # insert it. if 'ssh_user' not in deployment.cluster: print('Setting default SSH user "{}"'.format( OKD_DEFAULT_CLUSTER_SSH_USER)) deployment.cluster.ssh_user = OKD_DEFAULT_CLUSTER_SSH_USER # ----- # Hello # ----- io.banner(deployment.name, full_heading=True, quiet=False) if not cli_args.auto_acknowledge and not cli_args.just_plan: # Display the orchestration description # (f there is one) if deployment.description: io.description(deployment.description) confirmation_word = io.get_confirmation_word() target = 'CREATE the Cluster' \ if cli_args.cluster else 'INSTALL OpenShift/OKD' confirmation = input('Enter "{}" to {}: '.format( confirmation_word, target)) if confirmation != confirmation_word: print('Phew! That was close!') return True # Some key information... okd_admin_password = os.environ.get(OKD_ADMIN_PASSWORD_ENV) if not okd_admin_password: io.error('You must define {}'.format(OKD_ADMIN_PASSWORD_ENV)) okd_api_hostname = deployment.cluster.public_hostname okd_api_port = deployment.cluster.api_port # ------- # Ansible (A specific version) # ------- # Install the ansible version name in the deployment file cmd = 'pip install --upgrade pip setuptools --user' rv, _ = io.run(cmd, '.', cli_args.quiet) if not rv: return False cmd = 'pip install --upgrade pip setuptools --user' rv, _ = io.run(cmd, '.', cli_args.quiet) if not rv: return False cmd = 'pip install ansible=={} --user'. \ format(deployment.okd.ansible_version) rv, _ = io.run(cmd, '.', cli_args.quiet) if not rv: return False t_dir = deployment.cluster.terraform_dir if cli_args.cluster: # ------ # Render (jinja2 files) # ------ # Translate content of Jinja2 template files # using the deployment configuration's YAML file content. if not cli_args.skip_rendering: cmd = './render.py {} --ssh-user {}'.\ format(chosen_deployment_name, deployment.cluster.ssh_user) cwd = '.' rv, _ = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # If the deployment file has a 'my_machines' section # then we assume the user's provided their own cluster # and the Terraform step is not needed. if 'my_machines' in deployment: # ----------------- # Manual Templating # ----------------- # The user has provided their own cluster # and defined it in the my_machines section # of their deployment configuration. # # Here we process the rendered inventory files # just as Terraform would do. io.banner('Templating ...') print('inventory') if not templater.render(deployment): return False print('bastion/inventory') file_name = 'ansible/bastion/inventory.yaml.tpl' if not templater.\ render(deployment, template_file_name=file_name): return False print('post-okd/inventory') file_name = 'ansible/post-okd/inventory.yaml.tpl' if not templater. \ render(deployment, template_file_name=file_name, admin_password=okd_admin_password): return False else: # --------- # Terraform # --------- # Create compute instances for the cluster. cmd = 'terraform init' cwd = 'terraform/{}'.format(t_dir) rv, _ = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # Plan or Apply? action = 'plan' if cli_args.just_plan else 'apply -auto-approve' cmd = 'terraform {}' \ ' -state=.terraform.{}'.format(action, chosen_deployment_name) cwd = 'terraform/{}'.format(t_dir) rv, _ = io.run(cmd, cwd, cli_args.quiet) if not rv: return False if cli_args.just_plan: # Just plan means just that... return True # ------- # Ansible # ------- # Run the bastion site file. if not cli_args.skip_pre_okd: extra_env = '' if deployment.okd.certificates: if deployment.okd.certificates.generate_api_cert: certbot_email = os.environ.get(OKD_CERTBOT_EMAIL_ENV) if not certbot_email: io.error( 'You must define {}'.format(OKD_CERTBOT_EMAIL_ENV)) return False extra_env += ' -e master_cert_email="{}"'.\ format(certbot_email) extra_env += ' -e public_hostname="{}"'. \ format(deployment.cluster.public_hostname) elif (deployment.okd.certificates.wildcard_cert or deployment.okd.certificates.master_api_cert): # User-supplied certificates - # expect a vault password file # in the deployment directory extra_env += ' --vault-password-file' \ ' {}/{}/vault-pass.txt'.\ format(OKD_DEPLOYMENTS_DIRECTORY, chosen_deployment_name) if OKD_DEPLOYMENTS_DIRECTORY != 'deployments': extra_env += ' -e deployments_directory="{}"'.\ format(OKD_DEPLOYMENTS_DIRECTORY) else: extra_env += ' -e deployments_directory="../../deployments"' keypair_name = os.environ.get(OKD_KEYPAIR_NAME_ENV) if not keypair_name: io.error('You must define {}'.format(OKD_KEYPAIR_NAME_ENV)) return False cmd = 'ansible-playbook site.yaml' \ ' {}' \ ' -e keypair_name={}' \ ' -e inventory_dir={}' \ ' -e cluster_ssh_user={}' \ ' -e deployment_name={}'.format(extra_env, keypair_name, deployment.okd.inventory_dir, deployment.cluster.ssh_user, chosen_deployment_name) cwd = 'ansible/bastion' rv, _ = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # Now expose the Bastion's IP... if 'my_machines' in deployment: # Simulate the final step in Terraform, # i.e. exposing the bastion. # Doing this simplifies things for the user # i.e. "it looks and feels the same" io.banner('terraform output ...') print('bastion_ip = {}'.format(deployment.my_machines.bastion)) else: cmd = 'terraform output' \ ' -state=.terraform.{}'.format(chosen_deployment_name) cwd = 'terraform/{}'.format(t_dir) rv, _ = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # Leave. return True # If we get here we're installing OpenShift/OKD # (on a cluster that is assumed to exist)... # ----- # Clone (OpenShift Ansible Repo) # ----- # ...and checkout the revision defined by the deployment tag. if not cli_args.skip_okd: # If the expected clone directory does not exist # then clone OpenShift Ansible. if not os.path.exists('openshift-ansible'): cmd = 'git clone' \ ' https://github.com/openshift/openshift-ansible.git' \ ' --no-checkout' cwd = '.' rv, _ = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # Checkout the required OpenShift Ansible TAG cmd = 'git checkout tags/{}'. \ format(deployment.okd.ansible_tag) cwd = 'openshift-ansible' rv, _ = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # ------- # Ansible (Pre-OKD) # ------- if not cli_args.skip_pre_okd: extra_env = '' if deployment.okd.certificates and\ deployment.okd.certificates.generate_api_cert: extra_env += ' -e public_hostname={}'. \ format(deployment.cluster.public_hostname) cmd = 'ansible-playbook site.yaml' \ ' {}' \ ' -i ../../okd/inventories/{}/inventory.yaml'.\ format(extra_env, inventory_dir) cwd = 'ansible/pre-okd' rv, _ = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # ------- # Ansible (OKD) # ------- # Deploy using the playbooks named in the deployment # (from the checked-out version). if not cli_args.skip_okd: for play in deployment.okd.play: cmd = 'ansible-playbook ../openshift-ansible/playbooks/{}.yml' \ ' -i inventories/{}/inventory.yaml'.\ format(play, inventory_dir) cwd = 'okd' rv, _ = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # ------- # Ansible (Post-OKD) # ------- if not cli_args.skip_post_okd: # Always run the 'site' playbook. # This adds the OKD admin and (optional) developer user accounts # and other common things like template deployment. # # The following variables are made available to all the playbooks: - # # - okd_api_hostname # - okd_admin # - okd_admin_password extra_env = '' dev_password = os.environ.get(OKD_DEVELOPER_PASSWORD_ENV) if dev_password: extra_env += ' -e okd_developer_password={}'.format(dev_password) # The template namespace # (optionally defined in the configuration) if deployment.okd.template and deployment.okd.template.namespace: template_namespace = deployment.okd.template.namespace extra_env += ' -e template_namespace={}'.format(template_namespace) cmd = 'ansible-playbook site.yaml' \ '{}' \ ' -e okd_api_hostname=https://{}:{}' \ ' -e okd_admin=admin' \ ' -e okd_admin_password={}' \ ' -e okd_deployment={}'. \ format(extra_env, okd_api_hostname, okd_api_port, okd_admin_password, chosen_deployment_name) cwd = 'ansible/post-okd' rv, _ = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # Now iterate through the plays listed in the cluster's # 'post_okd' list... if deployment.okd.post_okd: for play in deployment.okd.post_okd: # Any user-defined 'extra' variables? play_vars = '' if play.vars: for var in play.vars: play_vars += '-e {} '.format(var) play_vars = play_vars[:-1] # Run the user playbook... cmd = 'ansible-playbook playbooks/{}/deploy.yaml' \ ' -e okd_api_hostname=https://{}:{}' \ ' -e okd_admin=admin' \ ' -e okd_admin_password={}' \ ' -e okd_deployment={}' \ ' {}'.\ format(play.play, okd_api_hostname, okd_api_port, okd_admin_password, chosen_deployment_name, play_vars) cwd = 'ansible/post-okd' rv, _ = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # ------- # Success # ------- # OK if we get here. # Cluster created and OKD installed. return True
nargs='?', help='The name of the deployment') ARGS = PARSER.parse_args() # Go... deployment_name = io.get_deployment_config_name(ARGS.deployment, ARGS.display_deployments) # User must have specified 'cluster' or 'open-shift' if not ARGS.cluster and not ARGS.okd: # If 'destroy' exists then we need to know whether # we're creating a cluster or an OKD deployment # (on an existing cluster) if os.path.isfile('destroy.py'): io.error('Must specify --cluster or --okd') sys.exit(1) # Otherwise we assume --okd ARGS.okd = True if ARGS.just_plan and not ARGS.cluster: io.error('Must specify --cluster if using --skip-plan') sys.exit(1) # The OKD admin password must be set. if not os.environ.get(OKD_ADMIN_PASSWORD_ENV): io.error('You must define the "{}" environment variable'.format( OKD_ADMIN_PASSWORD_ENV)) sys.exit(1) success = _main(ARGS, deployment_name)
def cmd_create_instance(self, args): """ Create a biganimal cluster """ try: private_network = True if args.private_network == '1' else False ip = args.public_ip if args.public_ip else '0.0.0.0/0' ip_ranges = [] ip = ip.split(',') for i in ip: ip_ranges.append([i, 'pgcloud client {}'.format(i)]) debug('Creating BigAnimal cluster: {}...'.format(args.name)) _url = "{0}/{1}".format(self.BASE_URL, 'clusters') _headers = { "content-type": "application/json", "accept": "application/json", 'authorization': 'Bearer {0}'.format(self._access_key) } _data = { 'clusterName': args.name, 'instanceTypeId': args.instance_type, 'password': self._database_pass, 'postgresTypeId': args.db_type, 'postgresVersion': args.db_version, 'privateNetworking': private_network, 'providerId': 'azure', 'regionId': args.region, 'replicas': 3, 'volumePropertiesId': args.volume_properties, 'volumeTypeId': args.volume_type, 'clusterArch': { 'id': args.cluster_arch, 'nodes': int(args.nodes) }, 'pgConfigMap': [], } if not private_network: _data['allowIpRangeMap'] = ip_ranges cluster_resp = requests.post(_url, headers=_headers, data=json.dumps(_data)) if cluster_resp.status_code == 202 and cluster_resp.content: cluster_info = json.loads(cluster_resp.content) instance_id = cluster_info['pgId'] instance = self.get_instance_status(instance_id) data = { 'instance': { 'ImageName': instance['imageName'], 'Database Type': instance['pgType']['name'], 'Hostname': instance['clusterConnectionInfo']['serviceName'], 'Port': instance['clusterConnectionInfo']['port'], 'Database': instance['clusterConnectionInfo']['databaseName'], 'Username': instance['clusterConnectionInfo']['username'] } } output(data) else: error(str(cluster_resp.text)) except Exception as e: debug(str(e))
def _main(cli_args, deployment_name): """Destruction entry point. :param cli_args: The command-line arguments :type cli_args: ``list`` :param deployment_name: The deployment file (excluding the extension) :type deployment_name: ``str`` :return: True on success :rtype: ``bool`` """ deployment_file = 'deployments/{}.yaml'.format(deployment_name) if not os.path.exists(deployment_file): io.error(('No config file ({}) for an "{}" deployment'.format( deployment_file, deployment_name))) return False with open(deployment_file, 'r') as stream: deployment = yaml.load(stream) # There must be an openshift/inventories/<deployment> directory if not os.path.isdir('openshift/inventories/{}'.format(deployment_name)): io.error('Missing "openshift/inventories" directory') print( 'Expected to find the directory "{}" but it was nto there.'.format( deployment_name)) print('Every deployment must have a matching "inventories" directory') return False # ----- # Hello # ----- io.banner(deployment['name'], full_heading=True, quiet=False) # Caution if bastion if cli_args.bastion: print() print('CAUTION You are about to destroy the bastion.') print('------- Have you destroyed all the clusters created from it?') print(' If not you risk leaving a large number of cloud') print(' objects orphaned that might otherwise be difficult to') print(' delete.') print(' Are you sure you want to destroy the bastion?') print() confirmation_word = io.get_confirmation_word() confirmation = raw_input( 'Enter "{}" to DESTROY this deployment: '.format(confirmation_word)) if confirmation != confirmation_word: print('Phew! That was close!') return True # ------- # Ansible # ------- if ('play' in deployment['ansible'] and 'pre_os_destroy' in deployment['ansible']['play']): # Undo local (bastion) /etc/hosts? pre_os_destroy = deployment['ansible']['play']['pre_os_destroy'] if pre_os_destroy: cmd = 'ansible-playbook {}.yaml'.format(pre_os_destroy) cwd = 'ansible/pre-os' rv = io.run(cmd, cwd, cli_args.quiet) if not rv: return False # --------- # Terraform # --------- # Destroy the cluster. # The 'action' sub-directory. # The sub-directory where execution material is located. # For terraform the files either relate to the bastion or cluster # and are in subdirectories 'bastion' and 'cluster'. # The same applies to the ansible playbook - one will be # in 'bastion' and one will be in 'cluster' tf_sub_dir = 'bastion' if cli_args.bastion else 'cluster' t_dir = deployment['terraform']['dir'] cmd = '~/bin/terraform init' cwd = 'terraform/{}/{}'.format(t_dir, tf_sub_dir) rv = io.run(cmd, cwd, cli_args.quiet) if not rv: return False t_dir = deployment['terraform']['dir'] cmd = '~/bin/terraform destroy -force -state=.terraform.{}'.\ format(deployment_name) cwd = 'terraform/{}/{}'.format(t_dir, tf_sub_dir) rv = io.run(cmd, cwd, cli_args.quiet) return rv