def get_log(batch_client, blob_client, cluster_id: str, application_name: str, tail=False, current_bytes: int = 0): job_id = cluster_id task_id = application_name task = __wait_for_app_to_be_running(batch_client, cluster_id, application_name) if not __check_task_node_exist(batch_client, cluster_id, task): return get_log_from_storage(blob_client, cluster_id, application_name, task) file = __get_output_file_properties(batch_client, cluster_id, application_name) target_bytes = file.content_length if target_bytes != current_bytes: ocp_range = None if tail: ocp_range = "bytes={0}-{1}".format(current_bytes, target_bytes - 1) stream = batch_client.file.get_from_task( job_id, task_id, output_file, batch_models.FileGetFromTaskOptions(ocp_range=ocp_range)) content = helpers.read_stream_as_string(stream) base_model = base_models.ApplicationLog( name=application_name, cluster_id=cluster_id, application_state=task.state.name, log=content, total_bytes=target_bytes, exit_code=task.execution_info.exit_code, ) return models.ApplicationLog(base_model) else: base_model = base_models.ApplicationLog( name=application_name, cluster_id=cluster_id, application_state=task.state.name, log="", total_bytes=target_bytes, exit_code=task.execution_info.exit_code, ) return models.ApplicationLog(base_model)
def get_application_log(core_base_operations, cluster_id: str, application_name: str, tail=False, current_bytes: int = 0): base_application_log = core_base_operations.get_application_log( cluster_id, application_name, tail, current_bytes) return models.ApplicationLog(base_application_log)
def get_job_application_log(core_job_operations, spark_job_operations, job_id, application_name): try: return models.ApplicationLog( _get_application_log(core_job_operations, spark_job_operations, job_id, application_name)) except BatchErrorException as e: raise error.AztkError(helpers.format_batch_exception(e))
def get_log(batch_client, cluster_id: str, application_name: str, tail=False, current_bytes: int = 0): job_id = cluster_id task_id = application_name task = __wait_for_app_to_be_running(batch_client, cluster_id, application_name) if not __check_task_node_exist(batch_client, cluster_id, task): raise error.AztkError("The node the app ran on doesn't exist anymore!") file = __get_output_file_properties(batch_client, cluster_id, application_name) target_bytes = file.content_length if target_bytes != current_bytes: ocp_range = None if tail: ocp_range = "bytes={0}-{1}".format( current_bytes, target_bytes - 1) stream = batch_client.file.get_from_task( job_id, task_id, output_file, batch_models.FileGetFromTaskOptions(ocp_range=ocp_range)) content = helpers.read_stream_as_string(stream) return models.ApplicationLog( name=application_name, cluster_id=cluster_id, application_state=task.state._value_, log=content, total_bytes=target_bytes) else: return models.ApplicationLog( name=application_name, cluster_id=cluster_id, application_state=task.state._value_, log='', total_bytes=target_bytes)
def get_log_from_storage(blob_client, container_name, application_name, task): try: blob = blob_client.get_blob_to_text( container_name, application_name + '/' + constants.SPARK_SUBMIT_LOGS_FILE) except azure.common.AzureMissingResourceHttpError: raise error.AztkError( "Logs not found in your storage account. They were either deleted or never existed." ) return models.ApplicationLog(name=application_name, cluster_id=container_name, application_state=task.state._value_, log=blob.content, total_bytes=blob.properties.content_length, exit_code=task.execution_info.exit_code)