def configure_secondary_interface(self, args, extra_vars, subnet_cidr): logging.info("[app] Configuring second NIC") self.wait_for_ssh_port(extra_vars["ssh_host"], args.search_pattern, extra_vars["ssh_port"]) subnet_network, subnet_netmask = subnet_cidr.split('/') # Copy and run script to configure routes scp_to_tmp(get_datafile_path('configure_nic.sh'), extra_vars["ssh_host"], extra_vars["ssh_user"], extra_vars["ssh_port"], args.private_key_file) cmd = ("sudo /tmp/configure_nic.sh " "--subnet_network {} --subnet_netmask {} --cloud {}").format( subnet_network, subnet_netmask, self.name) rc, stdout, stderr = remote_exec_command(extra_vars["ssh_host"], extra_vars["ssh_port"], extra_vars["ssh_user"], args.private_key_file, cmd) if rc: raise YBOpsRuntimeError( "Could not configure second nic {} {}".format(stdout, stderr)) # Since this is on start, wait for ssh on default port # Reboot instance remote_exec_command(extra_vars["ssh_host"], extra_vars["ssh_port"], extra_vars["ssh_user"], args.private_key_file, 'sudo reboot') self.wait_for_ssh_port(extra_vars["ssh_host"], args.search_pattern, extra_vars["ssh_port"]) # Verify that the command ran successfully: rc, stdout, stderr = remote_exec_command(extra_vars["ssh_host"], extra_vars["ssh_port"], extra_vars["ssh_user"], args.private_key_file, 'ls /tmp/dhclient-script-*') if rc: raise YBOpsRuntimeError("Second nic not configured at start up")
def execute_boot_script(self, args, extra_vars): dest_path = os.path.join("/tmp", os.path.basename(args.boot_script)) # Make it executable, in case it isn't one. st = os.stat(args.boot_script) os.chmod(args.boot_script, st.st_mode | stat.S_IEXEC) scp_to_tmp( args.boot_script, extra_vars["ssh_host"], extra_vars["ssh_user"], extra_vars["ssh_port"], args.private_key_file) cmd = "sudo {}".format(dest_path) rc, stdout, stderr = remote_exec_command( extra_vars["ssh_host"], extra_vars["ssh_port"], extra_vars["ssh_user"], args.private_key_file, cmd) if rc: raise YBOpsRuntimeError( "[app] Could not run bootscript {} {}".format(stdout, stderr))
def callback(self, args): host_info = self.cloud.get_host_info(args) if not host_info: raise YBOpsRuntimeError("Instance: {} does not exist, cannot run preflight checks" .format(args.search_pattern)) results = {} logging.info("Running {} preflight checks for instance: {}".format( args.precheck_type, args.search_pattern)) self.update_ansible_vars_with_args(args) self.update_ansible_vars_with_host_info(host_info, args.custom_ssh_port) try: is_configure = args.precheck_type == "configure" self.wait_for_host(args, default_port=is_configure) except YBOpsRuntimeError as e: logging.info("Failed to connect to node {}: {}".format(args.search_pattern, e)) # No point continuing test if ssh fails. results["SSH Connection"] = False print(json.dumps(results, indent=2)) return scp_result = scp_to_tmp( get_datafile_path('preflight_checks.sh'), self.extra_vars["private_ip"], self.extra_vars["ssh_user"], self.extra_vars["ssh_port"], args.private_key_file) results["SSH Connection"] = scp_result == 0 ansible_status = self.cloud.setup_ansible(args).run("test_connection.yml", self.extra_vars, host_info, print_output=False) results["Try Ansible Command"] = ansible_status == 0 cmd = "/tmp/preflight_checks.sh --type {} --yb_home_dir {} --mount_points {}".format( args.precheck_type, YB_HOME_DIR, self.cloud.get_mount_points_csv(args)) if args.install_node_exporter: cmd += " --install_node_exporter" if args.air_gap: cmd += " --airgap" self.update_ansible_vars_with_args(args) self.update_ansible_vars_with_host_info(host_info, args.custom_ssh_port) rc, stdout, stderr = remote_exec_command( self.extra_vars["private_ip"], self.extra_vars["ssh_port"], self.extra_vars["ssh_user"], args.private_key_file, cmd) if rc != 0: results["Preflight Script Error"] = stderr else: # stdout will be returned as a list of lines, which should just be one line of json. stdout = json.loads(stdout[0]) stdout = {k: v == "true" for k, v in stdout.iteritems()} results.update(stdout) output = json.dumps(results, indent=2) print(output)
def callback(self, args): if args.type == self.YB_SERVER_TYPE: if args.master_addresses_for_tserver is None: raise YBOpsRuntimeError( "Missing argument for YugaByte configure") self.extra_vars.update({ "instance_name": args.search_pattern, "master_addresses_for_tserver": args.master_addresses_for_tserver, "master_http_port": args.master_http_port, "master_rpc_port": args.master_rpc_port, "tserver_http_port": args.tserver_http_port, "tserver_rpc_port": args.tserver_rpc_port, "cql_proxy_rpc_port": args.cql_proxy_rpc_port, "redis_proxy_rpc_port": args.redis_proxy_rpc_port, "cert_valid_duration": args.cert_valid_duration, "org_name": args.org_name, "certs_node_dir": args.certs_node_dir, "encryption_key_dir": args.encryption_key_target_dir }) if args.master_addresses_for_master is not None: self.extra_vars[ "master_addresses_for_master"] = args.master_addresses_for_master if args.server_broadcast_addresses is not None: self.extra_vars[ "server_broadcast_addresses"] = args.server_broadcast_addresses if args.yb_process_type: self.extra_vars[ "yb_process_type"] = args.yb_process_type.lower() else: raise YBOpsRuntimeError( "Supported types for this command are only: {}".format( self.supported_types)) # Make sure we set server_type so we pick the right configure. self.update_ansible_vars_with_args(args) if args.gflags is not None: if args.package: raise YBOpsRuntimeError( "When changing gflags, do not set packages info.") self.extra_vars["gflags"] = json.loads(args.gflags) if args.package is not None: self.extra_vars["package"] = args.package if args.extra_gflags is not None: self.extra_vars["extra_gflags"] = json.loads(args.extra_gflags) if args.gflags_to_remove is not None: self.extra_vars["gflags_to_remove"] = json.loads( args.gflags_to_remove) if args.rootCA_cert is not None: self.extra_vars["rootCA_cert"] = args.rootCA_cert.strip() if args.rootCA_key is not None: self.extra_vars["rootCA_key"] = args.rootCA_key.strip() if args.client_cert is not None: self.extra_vars["client_cert"] = args.client_cert.strip() if args.client_key is not None: self.extra_vars["client_key"] = args.client_key.strip() if args.root_cert_path is not None: self.extra_vars["root_cert_path"] = args.root_cert_path.strip() if args.node_cert_path is not None: self.extra_vars["node_cert_path"] = args.node_cert_path.strip() if args.node_key_path is not None: self.extra_vars["node_key_path"] = args.node_key_path.strip() if args.client_cert_path is not None: self.extra_vars["client_cert_path"] = args.client_cert_path.strip() if args.client_key_path is not None: self.extra_vars["client_key_path"] = args.client_key_path.strip() host_info = None if args.search_pattern != 'localhost': host_info = self.cloud.get_host_info(args) if not host_info: raise YBOpsRuntimeError( "Instance: {} does not exists, cannot configure".format( args.search_pattern)) if host_info['server_type'] != args.type: raise YBOpsRuntimeError( "Instance: {} is of type {}, not {}, cannot configure". format(args.search_pattern, host_info['server_type'], args.type)) self.update_ansible_vars_with_host_info(host_info, args.custom_ssh_port) # If we have a package, then manually copy it over using scp instead of going over # ansible, so we do not have issues such as ENG-3424. # Python based paramiko seemed to have the same problems as ansible copy module! # # NOTE: we should only do this if we have to download the package... # NOTE 2: itest should download package from s3 to improve speed for instances in AWS. # TODO: Add a variable to specify itest ssh_user depending on VM users. start_time = time.time() if args.package and (args.tags is None or args.tags == "download-software"): if args.itest_s3_package_path and args.type == self.YB_SERVER_TYPE: itest_extra_vars = self.extra_vars.copy() itest_extra_vars[ "itest_s3_package_path"] = args.itest_s3_package_path itest_extra_vars["ssh_user"] = "******" # Runs all itest-related tasks (e.g. download from s3 bucket). itest_extra_vars["tags"] = "itest" self.cloud.setup_ansible(args).run( "configure-{}.yml".format(args.type), itest_extra_vars, host_info) logging.info( ("[app] Running itest tasks including S3 " + "package download {} to {} took {:.3f} sec").format( args.itest_s3_package_path, args.search_pattern, time.time() - start_time)) else: scp_to_tmp(args.package, self.extra_vars["private_ip"], self.extra_vars["ssh_user"], self.extra_vars["ssh_port"], args.private_key_file) logging.info( "[app] Copying package {} to {} took {:.3f} sec". format(args.package, args.search_pattern, time.time() - start_time)) logging.info("Configuring Instance: {}".format(args.search_pattern)) ssh_options = { # TODO: replace with args.ssh_user when it's setup in the flow "ssh_user": self.get_ssh_user(), "private_key_file": args.private_key_file } ssh_options.update(get_ssh_host_port(host_info, args.custom_ssh_port)) if args.use_custom_certs: if args.rotating_certs: logging.info("Verifying root certs are the same.") self.cloud.compare_root_certs(self.extra_vars, ssh_options) logging.info("Copying custom certificates to {}.".format( args.search_pattern)) self.cloud.copy_certs(self.extra_vars, ssh_options) else: if args.rootCA_cert and args.rootCA_key is not None: logging.info( "Creating and copying over client TLS certificate to {}". format(args.search_pattern)) self.cloud.generate_client_cert(self.extra_vars, ssh_options) if args.encryption_key_source_file is not None: self.extra_vars[ "encryption_key_file"] = args.encryption_key_source_file logging.info( "Copying over encryption-at-rest certificate from {} to {}". format(args.encryption_key_source_file, args.encryption_key_target_dir)) self.cloud.create_encryption_at_rest_file(self.extra_vars, ssh_options) # If we are just rotating certs, we don't need to do any configuration changes. if not args.rotating_certs: self.cloud.setup_ansible(args).run( "configure-{}.yml".format(args.type), self.extra_vars, host_info)
def callback(self, args): host_info = self.cloud.get_host_info(args) if not host_info: raise YBOpsRuntimeError( "Instance: {} does not exist, cannot run preflight checks". format(args.search_pattern)) results = {} logging.info("Running {} preflight checks for instance: {}".format( args.precheck_type, args.search_pattern)) self.update_ansible_vars_with_args(args) self.update_ansible_vars_with_host_info(host_info, args.custom_ssh_port) try: is_configure = args.precheck_type == "configure" self.wait_for_host(args, default_port=is_configure) except YBOpsRuntimeError as e: logging.info("Failed to connect to node {}: {}".format( args.search_pattern, e)) # No point continuing test if ssh fails. results["SSH Connection"] = False print(json.dumps(results, indent=2)) return scp_result = scp_to_tmp(get_datafile_path('preflight_checks.sh'), self.extra_vars["private_ip"], self.extra_vars["ssh_user"], self.extra_vars["ssh_port"], args.private_key_file) results["SSH Connection"] = scp_result == 0 ssh_options = { "ssh_user": "******", "ssh_host": self.extra_vars["private_ip"], "ssh_port": self.extra_vars["ssh_port"], "private_key_file": args.private_key_file } if args.root_cert_path is not None: self.verify_certificates("Server", args.root_cert_path, args.server_cert_path, args.server_key_path, ssh_options, args.skip_cert_validation, results) if args.root_cert_path_client_to_server is not None: self.verify_certificates("Server clientRootCA", args.root_cert_path_client_to_server, args.server_cert_path_client_to_server, args.server_key_path_client_to_server, ssh_options, args.skip_cert_validation, results) if args.client_cert_path is not None: root_cert_path = args.root_cert_path_client_to_server \ if args.root_cert_path_client_to_server is not None else args.root_cert_path self.verify_certificates( "Client", root_cert_path, args.client_cert_path, args.client_key_path, ssh_options, 'HOSTNAME', # not checking hostname for that serts results) sudo_pass_file = '/tmp/.yb_sudo_pass.sh' self.extra_vars['sudo_pass_file'] = sudo_pass_file ansible_status = self.cloud.setup_ansible(args).run( "send_sudo_pass.yml", self.extra_vars, host_info, print_output=False) results["Try Ansible Command"] = ansible_status == 0 ports_to_check = ",".join([ str(p) for p in [ args.master_http_port, args.master_rpc_port, args.tserver_http_port, args.tserver_rpc_port, args.cql_proxy_http_port, args.cql_proxy_rpc_port, args.ysql_proxy_http_port, args.ysql_proxy_rpc_port, args.redis_proxy_http_port, args.redis_proxy_rpc_port, args.node_exporter_http_port ] if p is not None ]) cmd = "/tmp/preflight_checks.sh --type {} --yb_home_dir {} --mount_points {} " \ "--ports_to_check {} --sudo_pass_file {} --cleanup".format( args.precheck_type, YB_HOME_DIR, self.cloud.get_mount_points_csv(args), ports_to_check, sudo_pass_file) if args.install_node_exporter: cmd += " --install_node_exporter" if args.air_gap: cmd += " --airgap" self.update_ansible_vars_with_args(args) self.update_ansible_vars_with_host_info(host_info, args.custom_ssh_port) rc, stdout, stderr = remote_exec_command(self.extra_vars["private_ip"], self.extra_vars["ssh_port"], self.extra_vars["ssh_user"], args.private_key_file, cmd) if rc != 0: results["Preflight Script Error"] = stderr else: # stdout will be returned as a list of lines, which should just be one line of json. stdout = json.loads(stdout[0]) stdout = {k: v == "true" for k, v in iteritems(stdout)} results.update(stdout) output = json.dumps(results, indent=2) print(output)