示例#1
0
def verify_signature(signed_file_path, output_file_path):
    """Verifies the signed file's signature.

    Returns:
        True    : If the signature is valid.
        False   : If the signature is invalid.
    """
    cmd = ["gpg", "-d"]
    keyring_path = configuration.get_gpg_public_keyring_path()

    # if a keyring is specified in the conf, used it, else use default one
    if keyring_path != "":
        cmd += [GPG_NO_DEFAULT_KEYRING_OPTION, GPG_KEYRING_ARG, keyring_path]
    cmd += ["--output", output_file_path, signed_file_path]

    # temporary workaround for the omi/gpg bug causing gpg to create a .gpg folder in the wrong home dir
    # only apply the workaround for oms installation
    env = None
    if "nxOMSAutomationWorkerResource" in os.path.abspath(__file__):
        env = os.environ.copy()
        env["HOME"] = "/var/opt/microsoft/omsagent/run"

    proc = subprocessfactory.create_subprocess(cmd=cmd,
                                               env=env,
                                               stdout=subprocess.PIPE,
                                               stderr=subprocess.PIPE)
    stdout, stderr = proc.communicate()

    if proc.poll() == 0:
        tracer.log_debug_trace("Signature is valid.")
        return True

    tracer.log_sandbox_job_runbook_signature_validation_failed(stderr)
    return False
示例#2
0
 def unload_job(self):
     """Unloads the job."""
     self.jrds_client.unload_job(self.job_data["subscriptionId"],
                                 self.sandbox_id, self.job_id,
                                 self.job_updatable_data["isDraft"],
                                 datetime.now(), 2)
     tracer.log_debug_trace("Unloading job.")
示例#3
0
    def routine(self):
        self.stop_tracking_terminated_sandbox()

        sandbox_actions = self.jrds_client.get_sandbox_actions()
        tracer.log_debug_trace("Get sandbox action. Found " + str(len(sandbox_actions)) + " action(s).")

        for action in sandbox_actions:
            tracer.log_worker_sandbox_action_found(len(sandbox_actions))
            sandbox_id = str(action["SandboxId"])

            # prevent duplicate sandbox from running
            if sandbox_id in self.running_sandboxes:
                return

            # create sandboxes folder if needed
            sandboxes_base_path = "sandboxes"
            sandbox_working_dir = os.path.join(configuration.get_working_directory_path(), sandboxes_base_path,
                                               sandbox_id)

            try:
                iohelper.assert_or_create_path(sandbox_working_dir)
            except OSError:
                tracer.log_debug_trace("Failed to create sandbox folder.")
                pass

            cmd = ["python", os.path.join(configuration.get_source_directory_path(), "sandbox.py")]
            process_env_variables = {"sandbox_id": sandbox_id}
            sandbox_process = subprocessfactory.create_subprocess(cmd=cmd,
                                                                  env=process_env_variables,
                                                                  stdout=subprocess.PIPE,
                                                                  stderr=subprocess.PIPE,
                                                                  cwd=sandbox_working_dir)
            self.running_sandboxes[sandbox_id] = sandbox_process
            self.monitor_sandbox_process_outputs(sandbox_id, sandbox_process)
            tracer.log_worker_starting_sandbox(sandbox_id, str(sandbox_process.pid))
示例#4
0
def generate_state_file():
    state_file_name = "state.conf"
    if configuration.get_state_directory_path() == configuration.DEFAULT_STATE_DIRECTORY_PATH:
        state_file_path = os.path.join(configuration.get_working_directory_path(), state_file_name)
    else:
        state_file_path = os.path.join(configuration.get_state_directory_path(), state_file_name)

    tracer.log_debug_trace("State file path : " + str(state_file_path))

    if os.path.isfile(state_file_path):
        os.remove(state_file_path)

    section = "state"
    conf_file = open(state_file_path, 'wb')
    config = ConfigParser.ConfigParser()
    config.add_section(section)
    config.set(section, configuration.STATE_PID, str(os.getpid()))
    config.set(section, configuration.WORKER_VERSION, str(configuration.get_worker_version()))

    # for OMS scenarios, optional for DIY
    if len(sys.argv) >= 3:
        config.set(section, configuration.STATE_WORKSPACE_ID, str(sys.argv[2]))

    if len(sys.argv) >= 4:
        config.set(section, configuration.STATE_RESOURCE_VERSION, str(sys.argv[3]))

    config.write(conf_file)
    conf_file.close()
示例#5
0
def verify_signature(signed_file_path, output_file_path):
    """Verifies the signed file's signature.

    Returns:
        True    : If the signature is valid.
        False   : If the signature is invalid.
    """
    cmd = GPG_DECRYPT_BASE_CMD
    keyring_path = configuration.get_gpg_public_keyring_path()

    # if a keyring is specified in the conf, used it, else use default one
    if keyring_path != "":
        cmd += [GPG_NO_DEFAULT_KEYRING_OPTION, GPG_KEYRING_ARG, keyring_path]
    cmd += ["--output", output_file_path, signed_file_path]

    proc = subprocessfactory.create_subprocess(cmd=cmd,
                                               stdout=subprocess.PIPE,
                                               stderr=subprocess.PIPE)
    stdout, stderr = proc.communicate()

    if proc.poll() == 0:
        tracer.log_debug_trace("Signature is valid.")
        return True

    tracer.log_debug_trace("Signature is invalid.[exception=" + str(stderr) +
                           "]")
    return False
    def start_runbook_subprocess(self):
        """Creates the runbook subprocess based on the script language and using properties set by the derived class.

        Requires self.base_cmd & self.runbook_file_path to be set by derived class.
        """
        cmd = self.base_cmd + [self.runbook.runbook_file_path]
        job_parameters = self.job_data.parameters
        if job_parameters is not None and len(job_parameters) > 0:
            for parameter in job_parameters:
                tracer.log_debug_trace("Parameter is: \n" + str(parameter))
                if self.runbook.definition_kind_str == "PowerShell" and parameter["Name"]:
                    # Handle named parameters for PowerShell arriving out of order
                    cmd += ["-%s" % parameter["Name"]]
                cmd += [str(json.loads(parameter["Value"]))]

        # Do not copy current process env var to the sandbox process
        env = os.environ.copy()
        env.update({"AUTOMATION_JOB_ID": str(self.job_data.job_id),
                    "AUTOMATION_ACTIVITY_ID": str(tracer.u_activity_id),
                    "PYTHONPATH": str(
                        configuration.get_source_directory_path())})  # windows env have to be str (not unicode)
        self.runbook_subprocess = subprocessfactory.create_subprocess(cmd=cmd,
                                                                      env=env,
                                                                      stdout=subprocess.PIPE,
                                                                      stderr=subprocess.PIPE)
    def start_runbook_subprocess(self):
        """Creates the runbook subprocess based on the script language and using properties set by the derived class.

        Requires self.base_cmd & self.runbook_file_path to be set by derived class.
        """
        cmd = self.base_cmd + [self.runbook.runbook_file_path]
        job_parameters = self.job_data.parameters
        if job_parameters is not None and len(job_parameters) > 0:
            for parameter in job_parameters:
                tracer.log_debug_trace("Parameter is: \n" + str(parameter))
                if (self.runbook.definition_kind_str == "PowerShell" or self.runbook.definition_kind_str == "PowerShell7") and parameter["Name"]:
                    # Handle named parameters for PowerShell arriving out of order
                    cmd += ["-%s" % parameter["Name"]]
                try:
                    cmd += [str(json.loads(parameter["Value"]))]
                except:
                    cmd += [str(parameter["Value"])]

        # Do not copy current process env var to the sandbox process
        env = os.environ.copy()
        env.update({"AUTOMATION_JOB_ID": str(self.job_data.job_id),
                    "AUTOMATION_ACTIVITY_ID": str(tracer.u_activity_id),
                    "PYTHONPATH": str(configuration.get_source_directory_path()),
                    "HOME": str(os.getcwd())})  # windows env have to be str (not unicode)
        self.runbook_subprocess = subprocessfactory.create_subprocess(cmd=cmd,
                                                                      env=env,
                                                                      stdout=subprocess.PIPE,
                                                                      stderr=subprocess.PIPE)
示例#8
0
def generate_state_file():
    state_file_name = "state.conf"
    if configuration.get_state_directory_path(
    ) == configuration.DEFAULT_STATE_DIRECTORY_PATH:
        state_file_path = os.path.join(
            configuration.get_working_directory_path(), state_file_name)
    else:
        state_file_path = os.path.join(
            configuration.get_state_directory_path(), state_file_name)

    tracer.log_debug_trace("State file path : " + str(state_file_path))

    if os.path.isfile(state_file_path):
        os.remove(state_file_path)

    section = "state"
    conf_file = open(state_file_path, 'wb')
    config = ConfigParser.ConfigParser()
    config.add_section(section)
    config.set(section, configuration.STATE_PID, str(os.getpid()))
    config.set(section, configuration.WORKER_VERSION,
               str(configuration.get_worker_version()))

    # for OMS scenarios, optional for DIY
    if len(sys.argv) >= 3:
        config.set(section, configuration.STATE_WORKSPACE_ID, str(sys.argv[2]))

    if len(sys.argv) >= 4:
        config.set(section, configuration.STATE_RESOURCE_VERSION,
                   str(sys.argv[3]))

    config.write(conf_file)
    conf_file.close()
示例#9
0
    def execute_runbook(self):
        """Executes the job runtime and performs runtime operation (stream upload / status change)."""
        # set status to running
        tracer.log_debug_trace("Starting runbook.")
        self.jrds_client.set_job_status(self.sandbox_id, self.job_id,
                                        jobstatus.RUNNING, False)

        # create runbook subprocess
        self.runtime.start_runbook_subprocess()

        # monitor runbook output for streams
        stream_handler = StreamHandler(self.job_data,
                                       self.runtime.runbook_subprocess,
                                       self.jrds_client)
        stream_handler.daemon = True
        stream_handler.start()

        # wait for runbook execution to complete
        pending_action = None
        while stream_handler.isAlive(
        ) or self.runtime.runbook_subprocess.poll() is None:
            try:
                pending_action = self.msg_queue.get(block=False)
                tracer.log_debug_trace("Pending action detected. " +
                                       str(pending_action))
                if pending_action == pendingactions.STOP_ENUM_INDEX:
                    self.jrds_client.set_job_status(self.sandbox_id,
                                                    self.job_id,
                                                    jobstatus.STOPPING, False)
                    self.runtime.kill_runbook_subprocess()
                    break
            except Queue.Empty:
                pass
            time.sleep(0.2)

        # handle terminal state changes
        if pending_action == pendingactions.STOP_ENUM_INDEX:
            self.jrds_client.set_job_status(self.sandbox_id, self.job_id,
                                            jobstatus.STOPPED, True)
            tracer.log_debug_trace("Completed - Stopped")
        elif self.runtime.runbook_subprocess.poll(
        ) is not None and self.runtime.runbook_subprocess.poll(
        ) == EXIT_SUCCESS:
            self.jrds_client.set_job_status(self.sandbox_id, self.job_id,
                                            jobstatus.COMPLETED, True)
            tracer.log_debug_trace("Completed - Without error")
        else:
            full_error_output = self.get_full_stderr_content(
                self.runtime.runbook_subprocess.stderr)
            self.jrds_client.set_job_status(self.sandbox_id,
                                            self.job_id,
                                            jobstatus.FAILED,
                                            True,
                                            exception=full_error_output)
            tracer.log_debug_trace("Completed - With error")
示例#10
0
    def run(self):
        """Monitor the job's subprocess for output (which will be uploaded as streams).

        Notes:
        PowerShell stdout : http://stackoverflow.com/questions/22349139/utf-8-output-from-powershell

        IMPORTANT: Do not log streams to cloud.
        """
        stream_count = 0
        while True:
            try:
                output = codecs.getwriter('utf8')(
                    self.runtime_process.stdout).readline().decode()
                error_output = codecs.getwriter('utf8')(
                    self.runtime_process.stderr).readline().decode()
                if output == '' and error_output == '' and self.runtime_process.poll(
                ) is not None:
                    break
                if output:
                    if output.startswith(PREFIX_DEBUG.lower()) or \
                            output.startswith(PREFIX_DEBUG.upper()) or \
                            output.startswith(PREFIX_DEBUG.capitalize()):
                        self.process_debug_stream(stream_count, output)
                    elif output.startswith(PREFIX_ERROR.lower()) or \
                            output.startswith(PREFIX_ERROR.upper()) or \
                            output.startswith(PREFIX_ERROR.capitalize()):
                        self.process_error_stream(stream_count, output)
                    elif output.startswith(PREFIX_VERBOSE.lower()) or \
                            output.startswith(PREFIX_VERBOSE.upper()) or \
                            output.startswith(PREFIX_VERBOSE.capitalize()):
                        self.process_verbose_stream(stream_count, output)
                    elif output.startswith(PREFIX_WARNING.lower()) or \
                            output.startswith(PREFIX_WARNING.upper()) or \
                            output.startswith(PREFIX_WARNING.capitalize()):
                        self.process_warning_stream(stream_count, output)
                    else:
                        self.process_output_stream(stream_count, output)
                    stream_count += 1

                    # leave trace at the end to prevent encoding issue from pushing streams to cloud
                    # leave this as debug trace to prevent logging customer streams to automation logs
                    tracer.log_debug_trace("STDOUT : " + str(output.strip()))
                if error_output:
                    self.process_error_stream(stream_count, error_output)
                    stream_count += 1
                    tracer.log_debug_trace("STDERR : " +
                                           str(error_output.strip()))
            except:
                tracer.log_sandbox_job_streamhandler_unhandled_exception(
                    self.job_data.job_id, traceback.format_exc())
                continue
        tracer.log_sandbox_job_streamhandler_processing_complete(
            self.job_data.job_id)
示例#11
0
    def stop_tracking_terminated_sandbox(self):
        terminated_sandbox_ids = []

        # detect terminated sandboxes
        for sandbox_id, sandbox_process in self.running_sandboxes.items():
            if sandbox_process.poll() is not None:
                terminated_sandbox_ids.append(sandbox_id)

        # clean-up terminated sandboxes
        for sandbox_id in terminated_sandbox_ids:
            removal = self.running_sandboxes.pop(sandbox_id, None)
            if removal is not None:
                tracer.log_debug_trace("Worker stopped tracking sandbox : " + str(sandbox_id))
示例#12
0
    def routine(self):
        # clean up finished jobs
        self.stop_tracking_terminated_jobs()

        # get job actions
        try:
            job_actions = self.jrds_client.get_job_actions(self.sandbox_id)
        except JrdsSandboxTerminated:
            tracer.log_debug_trace("Terminating sandbox.")
            global routine_loop
            routine_loop = False
            return

        for job_action in job_actions:
            job_id = job_action["JobId"]
            job_data = self.jrds_client.get_job_data(job_id)
            job_pending_action = job_data["pendingAction"]
            job_status = job_data["jobStatus"]

            # issue pending action
            if job_pending_action == pendingactions.ACTIVATE_ENUM_INDEX or \
                    (job_pending_action is None and job_status == jobstatus.ACTIVATING_ENUM_INDEX) or \
                    (job_pending_action is None and job_status == jobstatus.RUNNING_ENUM_INDEX):
                # check if the specified job is already running to prevent duplicate
                if job_id in job_map:
                    continue

                # create and start the new job
                job_message_queue = Queue()
                job_thread_exception_queue = Queue()
                job = Job(self.sandbox_id, job_id, job_message_queue,
                          self.jrds_client, job_thread_exception_queue)
                job_map[job_id] = (job, job_message_queue,
                                   job_thread_exception_queue)
                job.start()
                tracer.log_debug_trace(
                    "Pending action activate detected.[pendingaction=" +
                    str(job_status) + "]")
            elif job_pending_action == pendingactions.STOP_ENUM_INDEX:
                # check if the specified job is already running before issuing pending action
                if job_id not in job_map:
                    continue

                # propagate pending action to job thread
                job_map[job_id][1].put(job_pending_action)
                tracer.log_debug_trace("Pending action detected")
            elif job_pending_action is None:
                tracer.log_debug_trace("No pending action detected")
            else:
                tracer.log_debug_trace(
                    "Unsupported pending action / job action")
示例#13
0
    def stop_tracking_terminated_sandbox(self):
        terminated_sandbox_ids = []

        # detect terminated sandboxes
        for sandbox_id, sandbox_process in self.running_sandboxes.items():
            if sandbox_process.poll() is not None:
                terminated_sandbox_ids.append(sandbox_id)

        # clean-up terminated sandboxes
        for sandbox_id in terminated_sandbox_ids:
            removal = self.running_sandboxes.pop(sandbox_id, None)
            if removal is not None:
                tracer.log_debug_trace("Worker stopped tracking sandbox : " +
                                       str(sandbox_id))
示例#14
0
    def routine(self):
        self.stop_tracking_terminated_sandbox()

        sandbox_actions = self.jrds_client.get_sandbox_actions()
        tracer.log_debug_trace("Get sandbox action. Found " +
                               str(len(sandbox_actions)) + " action(s).")

        for action in sandbox_actions:
            tracer.log_worker_sandbox_action_found(len(sandbox_actions))
            sandbox_id = str(action["SandboxId"])

            # prevent duplicate sandbox from running
            if sandbox_id in self.running_sandboxes:
                return

            # create sandboxes folder if needed
            sandbox_working_dir = os.path.join(
                configuration.get_working_directory_path(),
                sandboxes_root_folder_name, sandbox_id)

            try:
                iohelper.assert_or_create_path(sandbox_working_dir)
            except OSError, exception:
                tracer.log_worker_failed_to_create_sandbox_root_folder(
                    sandbox_id, exception)
                pass

            # copy current process env variable (contains configuration) and add the sanbox_id key
            process_env_variables = os.environ.copy()
            process_env_variables["sandbox_id"] = sandbox_id

            cmd = [
                "python",
                os.path.join(configuration.get_source_directory_path(),
                             "sandbox.py")
            ]
            tracer.log_worker_starting_sandbox(sandbox_id)
            sandbox_process = subprocessfactory.create_subprocess(
                cmd=cmd,
                env=process_env_variables,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                cwd=sandbox_working_dir)
            self.running_sandboxes[sandbox_id] = sandbox_process
            tracer.log_worker_started_tracking_sandbox(sandbox_id)

            self.monitor_sandbox_process_outputs(sandbox_id, sandbox_process)
            tracer.log_worker_sandbox_process_started(sandbox_id,
                                                      str(sandbox_process.pid))
示例#15
0
    def routine(self):
        # clean up finished jobs
        self.stop_tracking_terminated_jobs()

        # get job actions
        try:
            job_actions = self.jrds_client.get_job_actions(self.sandbox_id)
        except JrdsSandboxTerminated:
            tracer.log_debug_trace("Terminating sandbox.")
            global routine_loop
            routine_loop = False
            return

        for job_action in job_actions:
            job_id = job_action["JobId"]
            job_data = self.jrds_client.get_job_data(job_id)
            job_pending_action = job_data["pendingAction"]
            job_status = job_data["jobStatus"]

            # issue pending action
            if job_pending_action == pendingactions.ACTIVATE_ENUM_INDEX or \
                    (job_pending_action is None and job_status == jobstatus.ACTIVATING_ENUM_INDEX) or \
                    (job_pending_action is None and job_status == jobstatus.RUNNING_ENUM_INDEX):
                # check if the specified job is already running to prevent duplicate
                if job_id in job_map:
                    continue

                # create and start the new job
                job_message_queue = Queue()
                job_thread_exception_queue = Queue()
                job = Job(self.sandbox_id, job_id, job_message_queue, self.jrds_client, job_thread_exception_queue)
                job_map[job_id] = (job, job_message_queue, job_thread_exception_queue)
                job.start()
                tracer.log_debug_trace("Pending action activate detected.[pendingaction=" +
                                       str(job_status) + "]")
            elif job_pending_action == pendingactions.STOP_ENUM_INDEX:
                # check if the specified job is already running before issuing pending action
                if job_id not in job_map:
                    continue

                # propagate pending action to job thread
                job_map[job_id][1].put(job_pending_action)
                tracer.log_debug_trace("Pending action detected")
            elif job_pending_action is None:
                tracer.log_debug_trace("No pending action detected")
            else:
                tracer.log_debug_trace("Unsupported pending action / job action")
示例#16
0
    def run(self):
        """Main job execution logic. This methods returns when the job execution is completed.

        Throws:
            WorkerUnsupportedRunbookType  : If the language isn't supported by by the worker.
            OSUnsupportedRunbookType      : If the language isn't supported by by the host.
            Exception                     : Any unhandled exception.
        """
        try:
            self.load_job()
            self.initialize_runtime()
            self.execute_runbook()
            self.unload_job()
        except (WorkerUnsupportedRunbookType, OSUnsupportedRunbookType), e:
            tracer.log_debug_trace("Runbook type not supported.")
            self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.FAILED, True, exception=e.message)
            self.unload_job()
示例#17
0
    def stop_tracking_terminated_jobs():
        terminated_job_ids = []

        # clean up finished jobs
        for job_id, job_tuple in job_map.items():
            if job_tuple[0].isAlive() is False:
                try:
                    job_tuple[2].get(block=False)
                    raise SandboxRuntimeException()
                except Empty:
                    pass
                terminated_job_ids.append(job_id)

        for job_id in terminated_job_ids:
            removal = job_map.pop(job_id, None)
            if removal is not None:
                tracer.log_debug_trace("Sandbox stopped tracking job : " + str(job_id))
示例#18
0
    def stop_tracking_terminated_jobs():
        terminated_job_ids = []

        # clean up finished jobs
        for job_id, job_tuple in job_map.items():
            if job_tuple[0].isAlive() is False:
                try:
                    job_tuple[2].get(block=False)
                    raise SandboxRuntimeException()
                except Empty:
                    pass
                terminated_job_ids.append(job_id)

        for job_id in terminated_job_ids:
            removal = job_map.pop(job_id, None)
            if removal is not None:
                tracer.log_debug_trace("Sandbox stopped tracking job : " +
                                       str(job_id))
示例#19
0
    def monitor_sandbox_process_outputs(self, sandbox_id, process):
        while process.poll() is None:
            output = process.stdout.readline().replace("\n", "")
            if output == '':
                continue
            if output != '':
                tracer.log_sandbox_stdout(output)

        if process.poll() != 0:
            full_error_output = ""
            while True:
                error_output = process.stderr.readline()
                if error_output is None or error_output == '':
                    break
                full_error_output += error_output
            tracer.log_debug_trace("Sandbox crashed : " + str(full_error_output))

        tracer.log_worker_sandbox_process_exited(sandbox_id, str(process.pid), process.poll())
示例#20
0
    def routine(self):
        self.stop_tracking_terminated_sandbox()

        sandbox_actions = self.jrds_client.get_sandbox_actions()
        tracer.log_debug_trace("Get sandbox action. Found " +
                               str(len(sandbox_actions)) + " action(s).")

        for action in sandbox_actions:
            tracer.log_worker_sandbox_action_found(len(sandbox_actions))
            sandbox_id = str(action["SandboxId"])

            # prevent duplicate sandbox from running
            if sandbox_id in self.running_sandboxes:
                return

            # create sandboxes folder if needed
            sandboxes_base_path = "sandboxes"
            sandbox_working_dir = os.path.join(
                configuration.get_working_directory_path(),
                sandboxes_base_path, sandbox_id)

            try:
                iohelper.assert_or_create_path(sandbox_working_dir)
            except OSError:
                tracer.log_debug_trace("Failed to create sandbox folder.")
                pass

            cmd = [
                "python",
                os.path.join(configuration.get_source_directory_path(),
                             "sandbox.py")
            ]
            process_env_variables = {"sandbox_id": sandbox_id}
            sandbox_process = subprocessfactory.create_subprocess(
                cmd=cmd,
                env=process_env_variables,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                cwd=sandbox_working_dir)
            self.running_sandboxes[sandbox_id] = sandbox_process
            self.monitor_sandbox_process_outputs(sandbox_id, sandbox_process)
            tracer.log_worker_starting_sandbox(sandbox_id,
                                               str(sandbox_process.pid))
    def run(self):
        """Monitor the job's subprocess for output (which will be uploaded as streams).

        Notes:
        PowerShell stdout : http://stackoverflow.com/questions/22349139/utf-8-output-from-powershell

        IMPORTANT: Do not log streams to cloud.
        """
        stream_count = 0
        while True:
            try:
                output = codecs.getwriter('utf8')(self.runtime_process.stdout).readline()
                if output == '' and self.runtime_process.poll() is not None:
                    break
                elif output:
                    if output.startswith(PREFIX_DEBUG.lower()) or \
                            output.startswith(PREFIX_DEBUG.upper()) or \
                            output.startswith(PREFIX_DEBUG.capitalize()):
                        self.process_debug_stream(stream_count, output)
                    elif output.startswith(PREFIX_ERROR.lower()) or \
                            output.startswith(PREFIX_ERROR.upper()) or \
                            output.startswith(PREFIX_ERROR.capitalize()):
                        self.process_error_stream(stream_count, output)
                    elif output.startswith(PREFIX_VERBOSE.lower()) or \
                            output.startswith(PREFIX_VERBOSE.upper()) or \
                            output.startswith(PREFIX_VERBOSE.capitalize()):
                        self.process_verbose_stream(stream_count, output)
                    elif output.startswith(PREFIX_WARNING.lower()) or \
                            output.startswith(PREFIX_WARNING.upper()) or \
                            output.startswith(PREFIX_WARNING.capitalize()):
                        self.process_warning_stream(stream_count, output)
                    else:
                        self.process_output_stream(stream_count, output)
                    stream_count += 1

                    # leave trace at the end to prevent encoding issue from pushing streams to cloud
                    # leave this as debug trace to prevent logging customer streams to automation logs
                    tracer.log_debug_trace("STDOUT : " + str(output.strip()))
            except:
                tracer.log_exception_trace(traceback.format_exc())
                continue
        tracer.log_debug_trace("Stream processing complete.")
示例#22
0
    def monitor_sandbox_process_outputs(self, sandbox_id, process):
        while process.poll() is None:
            output = process.stdout.readline().replace("\n", "")
            if output == '':
                continue
            if output != '':
                tracer.log_sandbox_stdout(output)

        if process.poll() != 0:
            full_error_output = ""
            while True:
                error_output = process.stderr.readline()
                if error_output is None or error_output == '':
                    break
                full_error_output += error_output
            tracer.log_debug_trace("Sandbox crashed : " +
                                   str(full_error_output))

        tracer.log_worker_sandbox_process_exited(sandbox_id, str(process.pid),
                                                 process.poll())
示例#23
0
    def run(self):
        """Main job execution logic. This methods returns when the job execution is completed.

        Throws:
            WorkerUnsupportedRunbookType  : If the language isn't supported by by the worker.
            OSUnsupportedRunbookType      : If the language isn't supported by by the host.
            Exception                     : Any unhandled exception.
        """
        try:
            self.load_job()
            self.initialize_runtime()
            self.execute_runbook()
            self.unload_job()
        except (WorkerUnsupportedRunbookType, OSUnsupportedRunbookType), e:
            tracer.log_debug_trace("Runbook type not supported.")
            self.jrds_client.set_job_status(self.sandbox_id,
                                            self.job_id,
                                            jobstatus.FAILED,
                                            True,
                                            exception=e.message)
            self.unload_job()
示例#24
0
    def execute_runbook(self):
        """Executes the job runtime and performs runtime operation (stream upload / status change)."""
        # set status to running
        tracer.log_debug_trace("Starting runbook.")
        self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.RUNNING, False)

        # create runbook subprocess
        self.runtime.start_runbook_subprocess()

        # monitor runbook output for streams
        stream_handler = StreamHandler(self.job_data, self.runtime.runbook_subprocess, self.jrds_client)
        stream_handler.daemon = True
        stream_handler.start()

        # wait for runbook execution to complete
        pending_action = None
        while stream_handler.isAlive() or self.runtime.runbook_subprocess.poll() is None:
            try:
                pending_action = self.msg_queue.get(block=False)
                tracer.log_debug_trace("Pending action detected. " + str(pending_action))
                if pending_action == pendingactions.STOP_ENUM_INDEX:
                    self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.STOPPING, False)
                    self.runtime.kill_runbook_subprocess()
                    break
            except Queue.Empty:
                pass
            time.sleep(0.2)

        # handle terminal state changes
        if pending_action == pendingactions.STOP_ENUM_INDEX:
            self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.STOPPED, True)
            tracer.log_debug_trace("Completed - Stopped")
        elif self.runtime.runbook_subprocess.poll() is not None and self.runtime.runbook_subprocess.poll() == EXIT_SUCCESS:
            self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.COMPLETED, True)
            tracer.log_debug_trace("Completed - Without error")
        else:
            full_error_output = self.get_full_stderr_content(self.runtime.runbook_subprocess.stderr)
            self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.FAILED, True,
                                            exception=full_error_output)
            tracer.log_debug_trace("Completed - With error")
示例#25
0
def verify_signature(signed_file_path, output_file_path):
    """Verifies the signed file's signature.

    Returns:
        True    : If the signature is valid.
        False   : If the signature is invalid.
    """
    cmd = GPG_DECRYPT_BASE_CMD
    keyring_path = configuration.get_gpg_public_keyring_path()

    # if a keyring is specified in the conf, used it, else use default one
    if keyring_path != "":
        cmd += [GPG_NO_DEFAULT_KEYRING_OPTION, GPG_KEYRING_ARG, keyring_path]
    cmd += ["--output", output_file_path, signed_file_path]

    proc = subprocessfactory.create_subprocess(cmd=cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    stdout, stderr = proc.communicate()

    if proc.poll() == 0:
        tracer.log_debug_trace("Signature is valid.")
        return True

    tracer.log_debug_trace("Signature is invalid.[exception=" + str(stderr) + "]")
    return False
示例#26
0
 def unload_job(self):
     """Unloads the job."""
     self.jrds_client.unload_job(self.job_data["subscriptionId"], self.sandbox_id, self.job_id,
                                 self.job_updatable_data["isDraft"], datetime.now(), 2)
     tracer.log_debug_trace("Unloading job.")
示例#27
0
            self.jrds_client.set_job_status(self.sandbox_id,
                                            self.job_id,
                                            jobstatus.FAILED,
                                            True,
                                            exception=e.message)
            self.unload_job()
        except InvalidRunbookSignature, e:
            tracer.log_debug_trace("Runbook signature is invalid.")
            self.jrds_client.set_job_status(self.sandbox_id,
                                            self.job_id,
                                            jobstatus.FAILED,
                                            True,
                                            exception=e.message)
            self.unload_job()
        except Exception:
            tracer.log_debug_trace("Job runtime unhandled exception.")
            tracer.log_exception_trace(traceback.format_exc())
            self.job_thread_exception_queue.put(sys.exc_info())

    def execute_runbook(self):
        """Executes the job runtime and performs runtime operation (stream upload / status change)."""
        # set status to running
        tracer.log_debug_trace("Starting runbook.")
        self.jrds_client.set_job_status(self.sandbox_id, self.job_id,
                                        jobstatus.RUNNING, False)

        # create runbook subprocess
        self.runtime.start_runbook_subprocess()

        # monitor runbook output for streams
        stream_handler = StreamHandler(self.job_data,
示例#28
0
class Job(Thread):
    """Job class."""
    def __init__(self, sandbox_id, job_id, msg_queue, jrds_client,
                 job_thread_exception_queue):
        Thread.__init__(self)
        self.daemon = True

        self.sandbox_id = sandbox_id
        self.job_id = job_id
        self.msg_queue = msg_queue
        self.jrds_client = jrds_client
        self.job_thread_exception_queue = job_thread_exception_queue

        # values populated in load_job()
        self.runtime = None
        self.runbook = None
        self.job_data = None
        self.job_updatable_data = None
        self.runbook_data = None

    def load_job(self):
        """Load all required artifact for the job to be executed."""
        self.jrds_client.set_job_status(self.sandbox_id, self.job_id,
                                        jobstatus.ACTIVATING, False)
        self.job_data = self.jrds_client.get_job_data(self.job_id)
        self.job_updatable_data = self.jrds_client.get_job_updatable_data(
            self.job_id)
        self.runbook_data = self.jrds_client.get_runbook_data(
            self.job_data["runbookVersionId"])

    def initialize_runtime(self):
        """Initializes the runtime component for the job. The runtime component is language specific."""
        self.runbook, self.runtime = runtimefactory.create_runtime(
            self.job_data, self.runbook_data)

    def run(self):
        """Main job execution logic. This methods returns when the job execution is completed.

        Throws:
            WorkerUnsupportedRunbookType  : If the language isn't supported by by the worker.
            OSUnsupportedRunbookType      : If the language isn't supported by by the host.
            Exception                     : Any unhandled exception.
        """
        try:
            self.load_job()
            self.initialize_runtime()
            self.execute_runbook()
            self.unload_job()
        except (WorkerUnsupportedRunbookType, OSUnsupportedRunbookType), e:
            tracer.log_debug_trace("Runbook type not supported.")
            self.jrds_client.set_job_status(self.sandbox_id,
                                            self.job_id,
                                            jobstatus.FAILED,
                                            True,
                                            exception=e.message)
            self.unload_job()
        except InvalidRunbookSignature, e:
            tracer.log_debug_trace("Runbook signature is invalid.")
            self.jrds_client.set_job_status(self.sandbox_id,
                                            self.job_id,
                                            jobstatus.FAILED,
                                            True,
                                            exception=e.message)
            self.unload_job()
示例#29
0
def generate_state_file():
    state_file_name = "state.conf"
    if configuration.get_state_directory_path(
    ) == configuration.DEFAULT_STATE_DIRECTORY_PATH:
        state_file_path = os.path.join(
            configuration.get_working_directory_path(), state_file_name)
    else:
        state_file_path = os.path.join(
            configuration.get_state_directory_path(), state_file_name)

    tracer.log_debug_trace("State file path : " + str(state_file_path))

    if os.path.isfile(state_file_path):
        os.remove(state_file_path)

    section = "state"
    conf_file = open(state_file_path, 'wb')
    config = ConfigParser.ConfigParser()
    config.add_section(section)
    config.set(section, configuration.STATE_PID, str(os.getpid()))
    config.set(section, configuration.WORKER_VERSION,
               str(configuration.get_worker_version()))

    # for OMS scenarios, optional for DIY
    if len(sys.argv) >= 3:
        config.set(section, configuration.STATE_WORKSPACE_ID, str(sys.argv[2]))

    if len(sys.argv) >= 4:
        config.set(section, configuration.STATE_RESOURCE_VERSION,
                   str(sys.argv[3]))

    config.write(conf_file)
    conf_file.close()

    # OMS integration
    # set the ownership of the state file to nxautomation:omiusers
    # set the permission of the state file to 660
    if os.name.lower() != "nt":
        import pwd
        try:
            nxautomation_uid = int(pwd.getpwnam('nxautomation').pw_uid)
            if os.getuid() == nxautomation_uid:
                retval = subprocess.call([
                    "sudo", "chown", "nxautomation:omiusers", state_file_path
                ])
                if retval != 0:
                    exit_on_error(
                        "Could not change owner of state file %s to nxautomation:omiusers"
                        % (state_file_path))

                retval = subprocess.call(
                    ["sudo", "chmod", "660", state_file_path])
                if retval != 0:
                    exit_on_error(
                        "Could not change permission of state file %s " %
                        (state_file_path))
        except KeyError:
            # nxautomation user was not found on the system, skip this step
            tracer.log_debug_trace(
                "State file permission change skipped. nxautomation user not found."
            )
            pass
示例#30
0
        """
        try:
            self.load_job()
            self.initialize_runtime()
            self.execute_runbook()
            self.unload_job()
        except (WorkerUnsupportedRunbookType, OSUnsupportedRunbookType), e:
            tracer.log_debug_trace("Runbook type not supported.")
            self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.FAILED, True, exception=e.message)
            self.unload_job()
        except InvalidRunbookSignature, e:
            tracer.log_debug_trace("Runbook signature is invalid.")
            self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.FAILED, True, exception=e.message)
            self.unload_job()
        except Exception:
            tracer.log_debug_trace("Job runtime unhandled exception.")
            tracer.log_exception_trace(traceback.format_exc())
            self.job_thread_exception_queue.put(sys.exc_info())

    def execute_runbook(self):
        """Executes the job runtime and performs runtime operation (stream upload / status change)."""
        # set status to running
        tracer.log_debug_trace("Starting runbook.")
        self.jrds_client.set_job_status(self.sandbox_id, self.job_id, jobstatus.RUNNING, False)

        # create runbook subprocess
        self.runtime.start_runbook_subprocess()

        # monitor runbook output for streams
        stream_handler = StreamHandler(self.job_data, self.runtime.runbook_subprocess, self.jrds_client)
        stream_handler.daemon = True
示例#31
0
def validate_and_setup_path():
    # default to user dir for exception logs to be writen to disk
    test_file_name = "test_file"

    # test certificate and key path
    if not os.path.isfile(
            configuration.get_jrds_cert_path()) or not os.path.isfile(
                configuration.get_jrds_key_path()):
        exit_on_error(
            "Invalid certificate of key file path (absolute path is required)."
        )

    # test working directory for existence and permissions
    working_directory_path = configuration.get_working_directory_path()
    if not os.path.exists(working_directory_path):
        exit_on_error(
            "Invalid working directory path (absolute path is required).")

    file_creation = test_file_creation(
        os.path.join(working_directory_path, test_file_name))
    if file_creation is False:
        exit_on_error(
            "Invalid working directory permission (read/write permissions are required)."
        )

    # test keyring paths
    keyrings = configuration.get_gpg_public_keyrings_path()
    for keyring_path in keyrings:
        if keyring_path != configuration.DEFAULT_GPG_PUBLIC_KEYRING_PATH and not os.path.isfile(
                keyring_path):
            exit_on_error(
                "Invalid gpg public keyring path (absolute path is required).")

    # test state file path
    if configuration.get_state_directory_path(
    ) != configuration.DEFAULT_STATE_DIRECTORY_PATH:
        if not os.path.exists(configuration.get_state_directory_path()):
            exit_on_error(
                "Invalid state directory path (absolute path is required).")

        file_creation = test_file_creation(
            os.path.join(configuration.get_state_directory_path(),
                         test_file_name))
        if file_creation is False:
            exit_on_error(
                "Invalid state directory permission (read/write permissions are required)."
            )

    # OMS integration
    # set the working directory owner to be nxautomation:omiusers
    if os.name.lower() != "nt":
        import pwd
        try:
            nxautomation_uid = int(pwd.getpwnam('nxautomation').pw_uid)
            if os.getuid() == nxautomation_uid:
                retval = subprocess.call([
                    "sudo", "chown", "-R", "nxautomation:omiusers",
                    working_directory_path
                ])
                if retval != 0:
                    exit_on_error(
                        "Could not change owner of working directory %s to nxautomation:omiusers"
                        % (working_directory_path))
        except KeyError:
            # nxautomation user was not found on the system, skip this step
            tracer.log_debug_trace(
                "Ownership change of working directory skipped. nxautomation user not found."
            )
            pass