def test_generate_crash_log_on_unsupported_locale(self): crashlog_suffix = '123456' flexmock(uuid) uuid.should_receive('uuid4').and_return(crashlog_suffix) exception_class = 'Exception' exception_message = 'baz message' exception = Exception(exception_message) stacktrace = "\n".join([ 'Traceback (most recent call last):', ' File "<stdin>", line 2, in <module>', '{0}: {1}'.format(exception_class, exception_message) ]) # Mock out grabbing our system's information flexmock(platform) platform.should_receive('platform').and_return("MyOS") platform.should_receive('python_implementation').and_return("MyPython") flexmock(locale) locale.should_receive('setlocale').and_raise(locale.Error) # Mock out writing it to the crash log file expected = '{0}log-{1}'.format(LocalState.LOCAL_APPSCALE_PATH, crashlog_suffix) fake_file = flexmock(name='fake_file') fake_file.should_receive('write').with_args(str) fake_builtins = flexmock(sys.modules['__builtin__']) fake_builtins.should_call('open') # set the fall-through fake_builtins.should_receive('open').with_args( expected, 'w').and_return(fake_file) # mock out printing the crash log message flexmock(AppScaleLogger) AppScaleLogger.should_receive('warn') actual = LocalState.generate_crash_log(exception, stacktrace) self.assertEquals(expected, actual)
def test_generate_crash_log_on_unsupported_locale(self): crashlog_suffix = '123456' flexmock(uuid) uuid.should_receive('uuid4').and_return(crashlog_suffix) exception_class = 'Exception' exception_message = 'baz message' exception = Exception(exception_message) stacktrace = "\n".join(['Traceback (most recent call last):', ' File "<stdin>", line 2, in <module>', '{0}: {1}'.format(exception_class, exception_message)]) # Mock out grabbing our system's information flexmock(platform) platform.should_receive('platform').and_return("MyOS") platform.should_receive('python_implementation').and_return("MyPython") flexmock(locale) locale.should_receive('setlocale').and_raise(locale.Error) # Mock out writing it to the crash log file expected = '{0}log-{1}'.format(LocalState.LOCAL_APPSCALE_PATH, crashlog_suffix) fake_file = flexmock(name='fake_file') fake_file.should_receive('write').with_args(str) fake_builtins = flexmock(sys.modules['__builtin__']) fake_builtins.should_call('open') # set the fall-through fake_builtins.should_receive('open').with_args(expected, 'w').and_return( fake_file) # mock out printing the crash log message flexmock(AppScaleLogger) AppScaleLogger.should_receive('warn') actual = LocalState.generate_crash_log(exception, stacktrace) self.assertEquals(expected, actual)
def terminate_virtualized_cluster(cls, keyname, clean, is_verbose): """Stops all API services running on all nodes in the currently running AppScale deployment. Args: keyname: The name of the SSH keypair used for this AppScale deployment. is_verbose: A bool that indicates if we should print the commands executed to stdout. clean: A bool representing whether clean should be ran on the nodes. """ AppScaleLogger.log("Stopping appscale deployment with keyname {0}" .format(keyname)) time.sleep(2) shadow_host = LocalState.get_host_with_role(keyname, 'shadow') try: secret = LocalState.get_secret_key(keyname) except IOError: # We couldn't find the secret key: AppScale is most likely not # running. raise AppScaleException("Couldn't find AppScale secret key.") acc = AppControllerClient(shadow_host, secret) try: machines = len(acc.get_all_public_ips()) - 1 acc.run_terminate(clean) terminated_successfully = True log_dump = u"" while not acc.is_appscale_terminated(): # For terminate receive_server_message will return a JSON string that # is a list of dicts with keys: ip, status, output try: output_list = yaml.safe_load(acc.receive_server_message()) except Exception as e: log_dump += e.message continue for node in output_list: if node.get("status"): machines -= 1 AppScaleLogger.success("Node at {node_ip}: {status}".format( node_ip=node.get("ip"), status="Stopping AppScale finished")) else: AppScaleLogger.warn("Node at {node_ip}: {status}".format( node_ip=node.get("ip"), status="Stopping AppScale failed")) terminated_successfully = False log_dump += u"Node at {node_ip}: {status}\nNode Output:"\ u"{output}".format(node_ip=node.get("ip"), status="Stopping AppScale failed", output=node.get("output")) AppScaleLogger.verbose(u"Output of node at {node_ip}:\n" u"{output}".format(node_ip=node.get("ip"), output=node.get("output")), is_verbose) if not terminated_successfully or machines > 0: LocalState.generate_crash_log(AppControllerException, log_dump) raise AppScaleException("{0} node(s) failed stopping AppScale, " "head node is still running AppScale services." .format(machines)) cls.stop_remote_appcontroller(shadow_host, keyname, is_verbose, clean) except socket.error as socket_error: AppScaleLogger.warn(u'Unable to talk to AppController: {}'. format(socket_error.message)) raise except Exception as exception: AppScaleLogger.verbose(u'Saw Exception while stopping AppScale {0}'. format(str(exception)), is_verbose) raise
def run_upgrade_script(cls, options, node_layout): """ Runs the upgrade script which checks for any upgrades needed to be performed. Args: options: A Namespace that has fields for each parameter that can be passed in via the command-line interface. node_layout: A NodeLayout object for the deployment. """ timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S') db_ips = [node.private_ip for node in node_layout.nodes if node.is_role('db_master') or node.is_role('db_slave')] zk_ips = [node.private_ip for node in node_layout.nodes if node.is_role('zookeeper')] upgrade_script_command = '{script} --keyname {keyname} '\ '--log-postfix {timestamp} '\ '--db-master {db_master} '\ '--zookeeper {zk_ips} '\ '--database {db_ips} '\ '--replication {replication}'.format( script=cls.UPGRADE_SCRIPT, keyname=options.keyname, timestamp=timestamp, db_master=node_layout.db_master().private_ip, zk_ips=' '.join(zk_ips), db_ips=' '.join(db_ips), replication=node_layout.replication ) master_public_ip = node_layout.head_node().public_ip AppScaleLogger.log("Running upgrade script to check if any other upgrade is needed.") # Run the upgrade command as a background process. error_bucket = Queue.Queue() threading.Thread( target=async_layout_upgrade, args=(master_public_ip, options.keyname, upgrade_script_command, error_bucket, options.verbose) ).start() last_message = None while True: # Check if the SSH thread has crashed. try: ssh_error = error_bucket.get(block=False) AppScaleLogger.warn('Error executing upgrade script') LocalState.generate_crash_log(ssh_error, traceback.format_exc()) except Queue.Empty: pass upgrade_status_file = cls.UPGRADE_STATUS_FILE_LOC + timestamp + ".json" command = 'cat' + " " + upgrade_status_file upgrade_status = RemoteHelper.ssh( master_public_ip, options.keyname, command, options.verbose) json_status = json.loads(upgrade_status) if 'status' not in json_status or 'message' not in json_status: raise AppScaleException('Invalid status log format') if json_status['status'] == 'complete': AppScaleLogger.success(json_status['message']) break if json_status['status'] == 'inProgress': if json_status['message'] != last_message: AppScaleLogger.log(json_status['message']) last_message = json_status['message'] time.sleep(cls.SLEEP_TIME) continue # Assume the message is an error. AppScaleLogger.warn(json_status['message']) raise AppScaleException(json_status['message'])
def run_upgrade_script(cls, options, node_layout): """ Runs the upgrade script which checks for any upgrades needed to be performed. Args: options: A Namespace that has fields for each parameter that can be passed in via the command-line interface. node_layout: A NodeLayout object for the deployment. """ timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S") db_ips = [ node.private_ip for node in node_layout.nodes if node.is_role("db_master") or node.is_role("db_slave") ] zk_ips = [node.private_ip for node in node_layout.nodes if node.is_role("zookeeper")] upgrade_script_command = ( "{script} --keyname {keyname} " "--log-postfix {timestamp} " "--db-master {db_master} " "--zookeeper {zk_ips} " "--database {db_ips} " "--replication {replication}".format( script=cls.UPGRADE_SCRIPT, keyname=options.keyname, timestamp=timestamp, db_master=node_layout.db_master().private_ip, zk_ips=" ".join(zk_ips), db_ips=" ".join(db_ips), replication=node_layout.replication, ) ) master_public_ip = node_layout.head_node().public_ip AppScaleLogger.log("Running upgrade script to check if any other upgrade is needed.") # Run the upgrade command as a background process. error_bucket = Queue.Queue() threading.Thread( target=async_layout_upgrade, args=(master_public_ip, options.keyname, upgrade_script_command, error_bucket, options.verbose), ).start() last_message = None while True: # Check if the SSH thread has crashed. try: ssh_error = error_bucket.get(block=False) AppScaleLogger.warn("Error executing upgrade script") LocalState.generate_crash_log(ssh_error, traceback.format_exc()) except Queue.Empty: pass upgrade_status_file = cls.UPGRADE_STATUS_FILE_LOC + timestamp + ".json" command = "cat" + " " + upgrade_status_file upgrade_status = RemoteHelper.ssh(master_public_ip, options.keyname, command, options.verbose) json_status = json.loads(upgrade_status) if "status" not in json_status or "message" not in json_status: raise AppScaleException("Invalid status log format") if json_status["status"] == "complete": AppScaleLogger.success(json_status["message"]) break if json_status["status"] == "inProgress": if json_status["message"] != last_message: AppScaleLogger.log(json_status["message"]) last_message = json_status["message"] time.sleep(cls.SLEEP_TIME) continue # Assume the message is an error. AppScaleLogger.warn(json_status["message"]) raise AppScaleException(json_status["message"])