Example #1
0
    def _add_to_job_object(self):
        global _global_process_job_handle
        if _global_process_job_handle is not None:
            #This means that we are creating another process family - we'll all be in the same job
            return

        already_in_job = win32job.IsProcessInJob(win32api.GetCurrentProcess(), None)

        #Create a new job and put us in it before we create any children
        logger.debug("Creating job object and adding parent process to it")
        security_attrs = win32security.SECURITY_ATTRIBUTES()
        security_attrs.bInheritHandle = 0
        _global_process_job_handle = win32job.CreateJobObject(security_attrs, self.get_job_object_name())
        extended_info = win32job.QueryInformationJobObject(_global_process_job_handle, win32job.JobObjectExtendedLimitInformation)
        extended_info['BasicLimitInformation']['LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
        win32job.SetInformationJobObject(_global_process_job_handle, win32job.JobObjectExtendedLimitInformation, extended_info)
        try:
            win32job.AssignProcessToJobObject(_global_process_job_handle, win32api.GetCurrentProcess())
        except Exception as e:
            winv = sys.getwindowsversion()
            logger.error("Error raised during assignment of the current process to a new job object. " +\
                         "The process %s already in a job. " +\
                         "The windows version is %d.%d.\nError: %s",
                            "is" if already_in_job else "is not",
                            winv.major,
                            winv.minor,
                            _exception_str())

            if already_in_job and not (winv.major >= 6 and winv.minor >= 2):
                raise JobObjectAssignError("On Windows versions older than Windows 8 / Windows Server 2012, ProcessFamily relies on the parent process NOT being in a job already", e, already_in_job)

            raise JobObjectAssignError("Error raised during assignment of the current process to a new job object.", e, already_in_job)


        logger.debug("Added to job object")
Example #2
0
    def _start(self, argv):
        """In most cases, just call subprocess.Popen(). On windows,
        add the started process to a new Job Object, so that any
        child processes of this process can be killed with a single
        call to TerminateJobObject (see self.stop()).
        """
        proc = Popen(argv)

        if os.sys.platform == "win32":
            # Create a job object with the "kill on job close"
            # flag; this is inherited by child processes (ie
            # the mongod started on our behalf by buildlogger)
            # and lets us terminate the whole tree of processes
            # rather than orphaning the mongod.
            import win32job

            self.job_object = win32job.CreateJobObject(None, '')

            job_info = win32job.QueryInformationJobObject(
                self.job_object, win32job.JobObjectExtendedLimitInformation)
            job_info['BasicLimitInformation'][
                'LimitFlags'] |= win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
            win32job.SetInformationJobObject(
                self.job_object, win32job.JobObjectExtendedLimitInformation,
                job_info)

            win32job.AssignProcessToJobObject(self.job_object, proc._handle)

        return proc
Example #3
0
    def limit_memory(self, memory: int) -> None:
        """Limit's the memory of this process."""
        # https://stackoverflow.com/a/54958772/5332072

        # First try to import stuff, an easy exception to catch and give good
        # information about
        try:
            import win32job
        except ModuleNotFoundError as e:
            raise ModuleNotFoundError(_win32_import_error_msg) from e

        job = self.job()

        # Get the information for the job object we created
        enum_for_info = win32job.JobObjectExtendedLimitInformation
        info = win32job.QueryInformationJobObject(job, enum_for_info)

        # This appears to by bytes by looking for "JOB_OBJECT_LIMIT_JOB_MEMORY"
        # on github, specifically Python code
        info["ProcessMemoryLimit"] = memory

        # Mask of which limit flag to set
        mask_limit_memory = win32job.JOB_OBJECT_LIMIT_PROCESS_MEMORY
        info["BasicLimitInformation"]["LimitFlags"] |= mask_limit_memory

        # Finally set the new information
        win32job.SetInformationJobObject(job, enum_for_info, info)
Example #4
0
    def run(self):
        logger.info("Started App usage tracker thread")
        file_loc = os.path.join(os.path.dirname(__file__), "apps-usage",
                                "apps-usage.exe")
        try:
            hJob = win32job.CreateJobObject(None, "")
            extended_info = win32job.QueryInformationJobObject(
                hJob, win32job.JobObjectExtendedLimitInformation)
            extended_info['BasicLimitInformation'][
                'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
            win32job.SetInformationJobObject(
                hJob, win32job.JobObjectExtendedLimitInformation,
                extended_info)

            p = subprocess.Popen(f"{file_loc} \"{ROOT_DIR}\"", shell=False)
            perms = win32con.PROCESS_TERMINATE | win32con.PROCESS_SET_QUOTA
            hProcess = win32api.OpenProcess(perms, False, p.pid)

            win32job.AssignProcessToJobObject(hJob, hProcess)
            while True and not self.kill:
                time.sleep(0.5)
            p.terminate()
            p.kill()
        except Exception as e:
            logger.error(f"Error Starting Apps-Usage. {e} ")
        logger.warning("Stopped app usage tracking thread")
Example #5
0
    def get_response_object(self):
        if self.path.startswith('/injob'):
            return json.dumps(win32job.IsProcessInJob(
                win32api.GetCurrentProcess(), None),
                              indent=3)
        if self.path.startswith('/job'):
            extended_info = win32job.QueryInformationJobObject(
                None, win32job.JobObjectExtendedLimitInformation)

            return json.dumps(extended_info, indent=3)
        if self.path.startswith('/close_file_and_delete_it'):
            try:
                if self.funkyserver._open_file_handle is not None:
                    f = os.path.join(os.path.dirname(__file__), 'tmp',
                                     'testfile.txt')
                    logging.info("Closing test file handle")
                    self.funkyserver._open_file_handle.close()
                    self.funkyserver._open_file_handle = None
                    assert os.path.exists(f)
                    os.remove(f)
                    assert not os.path.exists(f)
                    return "OK"
            except Exception as e:
                logging.error(
                    "Failed to close file handle and delete file: %s\n%s", e,
                    _traceback_str())
                return "FAIL"
        if self.path.startswith('/stop'):
            logging.info("Returning child_processes_terminated: %r",
                         self.funkyserver.child_processes_terminated)
            return repr(self.funkyserver.child_processes_terminated)
        return "OK"
Example #6
0
    def SvcDoRun(self):
        if hasattr(sys, "frozen"):
            this_dir = os.path.dirname(win32api.GetModuleFileName(None))
        else:
            this_dir = os.path.dirname(os.path.abspath(__file__))
        # TODO: maybe it is better to run this in a job object too
        with open(os.path.join(this_dir, 'npm.log'), 'w') as npm_log:
            subprocess.check_call('npm install',
                                  cwd=this_dir,
                                  shell=True,
                                  stdin=None,
                                  stdout=npm_log,
                                  stderr=subprocess.STDOUT)

        security_attributes = win32security.SECURITY_ATTRIBUTES()
        security_attributes.bInheritHandle = True
        startup = win32process.STARTUPINFO()
        startup.dwFlags |= win32process.STARTF_USESTDHANDLES
        startup.hStdInput = None
        startup.hStdOutput = win32file.CreateFile(
            os.path.join(this_dir, "service_stderr.log"),
            win32file.GENERIC_WRITE, win32file.FILE_SHARE_READ,
            security_attributes, win32file.CREATE_ALWAYS, 0, None)
        startup.hStdError = win32file.CreateFile(
            os.path.join(this_dir, "service_stdout.log"),
            win32file.GENERIC_WRITE, win32file.FILE_SHARE_READ,
            security_attributes, win32file.CREATE_ALWAYS, 0, None)
        (hProcess, hThread, processId, threadId) = win32process.CreateProcess(
            None, r'"C:\Program Files\nodejs\node.exe" node_worker.js', None,
            None, True, win32process.CREATE_SUSPENDED
            | win32process.CREATE_BREAKAWAY_FROM_JOB, None, this_dir, startup)

        assert not win32job.IsProcessInJob(hProcess, None)

        self.hJob = win32job.CreateJobObject(None, "")
        extended_info = win32job.QueryInformationJobObject(
            self.hJob, win32job.JobObjectExtendedLimitInformation)
        extended_info['BasicLimitInformation'][
            'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE | win32job.JOB_OBJECT_LIMIT_BREAKAWAY_OK
        win32job.SetInformationJobObject(
            self.hJob, win32job.JobObjectExtendedLimitInformation,
            extended_info)
        win32job.AssignProcessToJobObject(self.hJob, hProcess)

        win32process.ResumeThread(hThread)
        win32api.CloseHandle(hThread)

        signalled = win32event.WaitForMultipleObjects(
            [self.hWaitStop, hProcess], False, win32event.INFINITE)
        if signalled == win32event.WAIT_OBJECT_0 + 1 and win32process.GetExitCodeProcess(
                hProcess) != 0:
            servicemanager.LogErrorMsg(
                self._svc_name_ + " process exited with non-zero status " +
                str(win32process.GetExitCodeProcess(hProcess)))
        win32api.CloseHandle(hProcess)
        win32api.CloseHandle(self.hJob)
        win32api.CloseHandle(self.hWaitStop)
        win32api.CloseHandle(startup.hStdOutput)
        win32api.CloseHandle(startup.hStdError)
Example #7
0
def create_job(hProcess):
    hJob = win32job.CreateJobObject(None, "")
    extended_info = win32job.QueryInformationJobObject(hJob, win32job.JobObjectExtendedLimitInformation)
    extended_info['BasicLimitInformation']['LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
    win32job.SetInformationJobObject(hJob, win32job.JobObjectExtendedLimitInformation, extended_info)
    win32job.AssignProcessToJobObject(hJob, hProcess)

    return hJob
Example #8
0
def limit_memory(memory_limit):
    if g_hjob is None:
        return
    info = win32job.QueryInformationJobObject(
        g_hjob, win32job.JobObjectExtendedLimitInformation)
    info['ProcessMemoryLimit'] = memory_limit
    info['BasicLimitInformation']['LimitFlags'] |= (
        win32job.JOB_OBJECT_LIMIT_PROCESS_MEMORY)
    win32job.SetInformationJobObject(
        g_hjob, win32job.JobObjectExtendedLimitInformation, info)
Example #9
0
 def __init__(self, suffix):
     self.hJob = win32job.CreateJobObject(None,
                                          "SupervisorJob{}".format(suffix))
     extended_info = win32job.QueryInformationJobObject(
         self.hJob, win32job.JobObjectExtendedLimitInformation)
     extended_info['BasicLimitInformation'][
         'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
     win32job.SetInformationJobObject(
         self.hJob, win32job.JobObjectExtendedLimitInformation,
         extended_info)
Example #10
0
 def _create_job_object(self):
     """Create a new anonymous job object"""
     hjob = win32job.CreateJobObject(None, "")
     extended_info = win32job.QueryInformationJobObject(
         hjob, win32job.JobObjectExtendedLimitInformation)
     extended_info['BasicLimitInformation'][
         'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
     win32job.SetInformationJobObject(
         hjob, win32job.JobObjectExtendedLimitInformation, extended_info)
     return hjob
Example #11
0
    def attach_queue_to_stdout(self):
        start_ts = time.time()
        while time.time() - start_ts < self.START_SECONDS_DEFAULT:
            if self.is_suprocess_started:

                hJob = win32job.CreateJobObject(None, "")
                extended_info = win32job.QueryInformationJobObject(
                    hJob, win32job.JobObjectExtendedLimitInformation)
                extended_info['BasicLimitInformation'][
                    'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
                win32job.SetInformationJobObject(
                    hJob, win32job.JobObjectExtendedLimitInformation,
                    extended_info)

                perms = win32con.PROCESS_TERMINATE | win32con.PROCESS_SET_QUOTA
                hProcess = win32api.OpenProcess(perms, False, self._pipe.pid)
                win32job.AssignProcessToJobObject(hJob, hProcess)

                self.log.debug("attaching queue to stdout")
                while True:
                    try:
                        if self._stopped:
                            break
                        if self._start_failed:
                            break
                        gc.disable()
                        nextline = self._pipe.stdout.readline()
                        if nextline == '':  # and self._pipe.poll() is not None:
                            time.sleep(0.05)
                            continue
                        self.log.debug("got from stdout: %s" %
                                       nextline.strip())
                        try:
                            self._stdout_queue.put(nextline.strip())
                        except Exception as e:
                            self.log.exception(
                                "could not put result to stdout queue, reason: %s"
                                % e.message)
                        gc.enable()

                    except AttributeError:
                        self.log.exception("stdout queue broken")
                        break
                    finally:
                        gc.enable()
                #if self._pipe:
                #    self._pipe.stdout.close()
            else:
                if not self._stopped:
                    self.log.warning(
                        "pipe is None; can't attach queue to stdout")

            time.sleep(0.2)
Example #12
0
    def start(self):

        argv, env = [self.executable] + self.arguments, self.env

        if self.env_vars:
            if not env:
                env = os.environ.copy()
            env.update(self.env_vars)

        creation_flags = 0
        if os.sys.platform == "win32":
            # Magic number needed to allow job reassignment in Windows 7
            # see: MSDN - Process Creation Flags - ms684863
            CREATE_BREAKAWAY_FROM_JOB = 0x01000000
            creation_flags = CREATE_BREAKAWAY_FROM_JOB

        stdout = sys.stdout if not self.logger else subprocess.PIPE
        stderr = sys.stderr if not self.logger else subprocess.PIPE

        self.subprocess = subprocess.Popen(argv, env=env, creationflags=creation_flags,
                                           stdout=stdout, stderr=stderr)

        if stdout == subprocess.PIPE:
            self.stdout_logger = LoggerPipe(self.logger, logging.INFO, self.subprocess.stdout)
            self.stdout_logger.wait_until_started()
        if stderr == subprocess.PIPE:
            self.stderr_logger = LoggerPipe(self.logger, logging.ERROR, self.subprocess.stderr)
            self.stderr_logger.wait_until_started()

        if os.sys.platform == "win32":

            # Create a job object with the "kill on job close" flag
            # This is inherited by child processes (i.e. the mongod started on our behalf by
            # buildlogger) and lets us terminate the whole tree of processes rather than
            # orphaning the mongod.
            import win32job

            job_object = win32job.CreateJobObject(None, '')

            job_info = win32job.QueryInformationJobObject(
                job_object,
                win32job.JobObjectExtendedLimitInformation)
            job_info['BasicLimitInformation']['LimitFlags'] |= \
                win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
            win32job.SetInformationJobObject(job_object,
                                             win32job.JobObjectExtendedLimitInformation,
                                             job_info)
            win32job.AssignProcessToJobObject(job_object, proc._handle)

            self.subprocess_job_object = job_object
Example #13
0
    def _createParentJob(self):
        # Create a new job that this process will be assigned to.
        job_name = ''  # must be anonymous otherwise we'd get conflicts
        security_attrs = win32security.SECURITY_ATTRIBUTES()
        security_attrs.bInheritHandle = 1
        job = win32job.CreateJobObject(security_attrs, job_name)
        extended_limits = win32job.QueryInformationJobObject(
            job, win32job.JobObjectExtendedLimitInformation)
        extended_limits['BasicLimitInformation'][
            'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE

        win32job.SetInformationJobObject(
            job, win32job.JobObjectExtendedLimitInformation, extended_limits)
        return job
Example #14
0
def create_job(job_name='', breakaway='silent'):
    hjob = win32job.CreateJobObject(None, job_name)
    if breakaway:
        info = win32job.QueryInformationJobObject(
            hjob, win32job.JobObjectExtendedLimitInformation)
        if breakaway == 'silent':
            info['BasicLimitInformation']['LimitFlags'] |= (
                win32job.JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK)
        else:
            info['BasicLimitInformation']['LimitFlags'] |= (
                win32job.JOB_OBJECT_LIMIT_BREAKAWAY_OK)
        win32job.SetInformationJobObject(
            hjob, win32job.JobObjectExtendedLimitInformation, info)
    return hjob
Example #15
0
    def _init_job_object():
        job_object = win32job.CreateJobObject(None, "")

        # Get the limit and job state information of the newly-created job object.
        job_info = win32job.QueryInformationJobObject(
            job_object, win32job.JobObjectExtendedLimitInformation)

        # Set up the job object so that closing the last handle to the job object
        # will terminate all associated processes and destroy the job object itself.
        job_info["BasicLimitInformation"]["LimitFlags"] |= \
                win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE

        # Update the limits of the job object.
        win32job.SetInformationJobObject(
            job_object, win32job.JobObjectExtendedLimitInformation, job_info)

        return job_object
Example #16
0
    def start(self):
        if self._state != 'stopped':
            return

        LOG.debug('Starting Executor')

        self._state = 'starting'

        self._push_server = PushServer(self._push_queue,
            self._push_server_address,
            self._ipc_authkey,
            self._callback_launcher)
        self._pull_server = PullServer(self._pull_server_address,
            self._ipc_authkey,
            self._callback_launcher)
        self._push_server.start()
        self._pull_server.start()

        if sys.platform == 'win32':
            self._job_obj = win32job.CreateJobObject(None, 'Bollard')
            ex_info = win32job.QueryInformationJobObject(self._job_obj,
                win32job.JobObjectExtendedLimitInformation)
            ex_info['BasicLimitInformation']['LimitFlags'] = \
                win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
            win32job.SetInformationJobObject(self._job_obj,
                win32job.JobObjectExtendedLimitInformation,
                ex_info)

        self._validate_running_tasks()

        for task in Task.load_tasks(state='pending'):
            self._push_queue.put(task)

        # wait for push and pull server before start workers
        while not self._push_server.is_alive() or not self._pull_server.is_alive():
            time.sleep(0.1)

        self._poll_thread = threading.Thread(target=self._poll)
        self._poll_thread.daemon = True
        self._poll_thread.start()

        __node__['periodical_executor'].add_task(tasks_cleanup, 3600, title='Bollard cleanup')

        self._state = 'started'
        LOG.debug('Executor started')
def create_job(hProcess):
    '''
    create_job(hProcess) creates win32job with correct flags:

    Note on JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE:
        https://msdn.microsoft.com/en-us/library/windows/desktop/ms684161(v=vs.85).aspx
        However, if the job has the
        JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE flag specified,
        closing the last job object handle terminates all
        associated processes and then destroys the job
        object itself.
    '''
    hJob = win32job.CreateJobObject(None, "")
    extended_info = win32job.QueryInformationJobObject(
        hJob, win32job.JobObjectExtendedLimitInformation)
    extended_info['BasicLimitInformation'][
        'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
    win32job.SetInformationJobObject(
        hJob, win32job.JobObjectExtendedLimitInformation, extended_info)
    win32job.AssignProcessToJobObject(hJob, hProcess)

    return hJob
Example #18
0
    def limit_cpu_time(self, cpu_time: int) -> None:
        """Limit's the cpu time of this process.

        Note
        ----
        Unfortunatly, when windows terminates a process, it doesn't use signaling
        methods like unix based systems, it just kills it and gives no chance for clean
        up. Pynisher detects this in the parent process by looking for an empty response
        from the pipe, checking that the process was killed non-gracefully, and then
        seeing if cpu_time was set.

        Parameters
        ----------
        cpu_time: int
            How many seconds to limit the process for
        """
        # First try to import stuff, an easy exception to catch and give good
        # information about
        try:
            import win32job
        except ModuleNotFoundError as e:
            raise ModuleNotFoundError(_win32_import_error_msg) from e

        job = self.job()

        # Get the information for the job object we created
        enum_for_info = win32job.JobObjectBasicLimitInformation
        info = win32job.QueryInformationJobObject(job, enum_for_info)

        # Set the time limit
        time = round(cpu_time * 10_000_000)  # In 100ns units (1e+9 / 100)
        info["PerProcessUserTimeLimit"] = time

        # Activate the flag to turn on the limiting of cput time
        flag = win32job.JOB_OBJECT_LIMIT_PROCESS_TIME
        info["LimitFlags"] |= flag

        # Finally set the new information
        win32job.SetInformationJobObject(job, enum_for_info, info)
Example #19
0
 def __register_job_object(self):
     if self.is_windows8_or_above_flag or not self.is_cloud_mode_flag:
         self.child = None
         self.hJob = None
         self.hProcess = None
         self.hJob = win32job.CreateJobObject(None, self.win32_job_name)
         extended_info = win32job.QueryInformationJobObject(
             self.hJob, win32job.JobObjectExtendedLimitInformation)
         extended_info['BasicLimitInformation'][
             'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
         win32job.SetInformationJobObject(
             self.hJob, win32job.JobObjectExtendedLimitInformation,
             extended_info)
         # Convert process id to process handle:
         perms = win32con.PROCESS_TERMINATE | win32con.PROCESS_SET_QUOTA
         self.hProcess = win32api.OpenProcess(perms, False, self.__p.pid)
         try:
             win32job.AssignProcessToJobObject(self.hJob, self.hProcess)
             self.__user_job_object_flag = True
         except Exception as e:
             self.__user_job_object_flag = False
     else:
         self.__user_job_object_flag = False
Example #20
0
    def SvcDoRun(self):
        import servicemanager
        servicemanager.LogInfoMsg(self._svc_name_ + " Start Requested")
        try:
            hJob = win32job.CreateJobObject(None, "")
            extended_info = win32job.QueryInformationJobObject(hJob, win32job.JobObjectExtendedLimitInformation)
            extended_info['BasicLimitInformation']['LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
            win32job.SetInformationJobObject(hJob, win32job.JobObjectExtendedLimitInformation, extended_info)
            command = "resilient-circuits.exe run " + self._resilient_args_
            command_args = shlex.split(command)
            self.process_handle = subprocess.Popen(command_args)
            # Convert process id to process handle:
            perms = win32con.PROCESS_TERMINATE | win32con.PROCESS_SET_QUOTA
            hProcess = win32api.OpenProcess(perms, False, self.process_handle.pid)
            win32job.AssignProcessToJobObject(hJob, hProcess)
        except:
            servicemanager.LogErrorMsg(self._svc_name_ + " failed to launch resilient-circuits.exe")
            raise
        servicemanager.LogInfoMsg(self._svc_name_ + " Started")

        while self.isAlive:
            if self.process_handle.poll() != None:
                self.SvcStop()
            win32api.SleepEx(10000, True)
Example #21
0
    def _start(self, argv):
        """In most cases, just call subprocess.Popen(). On windows,
        add the started process to a new Job Object, so that any
        child processes of this process can be killed with a single
        call to TerminateJobObject (see self.stop()).
        """
        if valgrind:
            argv = [
                'buildscripts/valgrind.bash', '--show-reachable=yes',
                '--leak-check=full', '--suppressions=valgrind.suppressions'
            ] + argv
        elif drd:
            argv = ['buildscripts/valgrind.bash', '--tool=drd'] + argv
        proc = Popen(argv, stdout=self.outfile)

        if os.sys.platform == "win32":
            # Create a job object with the "kill on job close"
            # flag; this is inherited by child processes (ie
            # the mongod started on our behalf by buildlogger)
            # and lets us terminate the whole tree of processes
            # rather than orphaning the mongod.
            import win32job

            self.job_object = win32job.CreateJobObject(None, '')

            job_info = win32job.QueryInformationJobObject(
                self.job_object, win32job.JobObjectExtendedLimitInformation)
            job_info['BasicLimitInformation'][
                'LimitFlags'] |= win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
            win32job.SetInformationJobObject(
                self.job_object, win32job.JobObjectExtendedLimitInformation,
                job_info)

            win32job.AssignProcessToJobObject(self.job_object, proc._handle)

        return proc
Example #22
0
def main():
    # escape list of arguments
    command = _win32_arglist_to_string(sys.argv[1:])

    # create job
    hJob = win32job.CreateJobObject(None, '')
    extended_info = win32job.QueryInformationJobObject(
        hJob, win32job.JobObjectExtendedLimitInformation)
    extended_info['BasicLimitInformation'][
        'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
    win32job.SetInformationJobObject(
        hJob, win32job.JobObjectExtendedLimitInformation, extended_info)

    # associate job with completion port
    hIoPort = win32file.CreateIoCompletionPort(win32file.INVALID_HANDLE_VALUE,
                                               None, 0, 1)
    # pywin32 is missing support for JOBOBJECT_ASSOCIATE_COMPLETION_PORT, therefore
    #   we call it through ctypes
    port = JOBOBJECT_ASSOCIATE_COMPLETION_PORT()
    port.CompletionKey = hJob.handle
    port.CompletionPort = hIoPort.handle
    assert bool(
        ctypes.windll.kernel32.SetInformationJobObject(
            ctypes.wintypes.HANDLE(hJob.handle),
            ctypes.c_int(JobObjectAssociateCompletionPortInformation),
            ctypes.byref(port),
            ctypes.sizeof(JOBOBJECT_ASSOCIATE_COMPLETION_PORT)))

    # create process suspended
    si = win32process.STARTUPINFO()
    hProcess, hThread, processId, threadId = win32process.CreateProcess(
        None, command, None, None, True,
        win32process.CREATE_BREAKAWAY_FROM_JOB | win32process.CREATE_SUSPENDED,
        None, None, si)

    # add process to job
    win32job.AssignProcessToJobObject(hJob, hProcess)

    # resume process
    win32process.ResumeThread(hThread)
    win32api.CloseHandle(hThread)
    win32api.CloseHandle(hProcess)

    # wait for job termination
    numberOfBytes = ctypes.wintypes.DWORD(0)
    completionKey = ctypes.wintypes.HANDLE(0)
    overlapped = OVERLAPPED()
    while True:
        # calling this through pywin32 crashes the program, therefore we call it through ctypes
        res = bool(
            ctypes.windll.kernel32.GetQueuedCompletionStatus(
                ctypes.wintypes.HANDLE(hIoPort.handle),
                ctypes.byref(numberOfBytes), ctypes.byref(completionKey),
                ctypes.byref(overlapped),
                ctypes.wintypes.DWORD(win32event.INFINITE)))
        if not res or (bytes(completionKey) == bytes(
                ctypes.c_void_p(hJob.handle))
                       and bytes(numberOfBytes) == bytes(
                           ctypes.c_ulong(
                               win32job.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO))):
            break
Example #23
0
    def __init__(self,
                 script: str = "",
                 ahk_path: Path = None,
                 execute_from: Path = None) -> None:
        self.file = None
        self.script = script

        if ahk_path is None:
            ahk_path = PACKAGE_PATH / r'lib\AutoHotkey\AutoHotkey.exe'
        assert ahk_path and ahk_path.is_file()

        # Windows notification area relies on consistent exe path
        if execute_from is not None:
            execute_from_dir = Path(execute_from)
            assert execute_from_dir.is_dir()
            ahk_into_folder = execute_from_dir / ahk_path.name

            try:
                if os.path.getmtime(ahk_into_folder) != os.path.getmtime(
                        ahk_path):
                    os.remove(ahk_into_folder)
            except FileNotFoundError:
                pass

            try:
                os.link(ahk_path, ahk_into_folder)
            except FileExistsError:
                pass
            except OSError as ex:
                # 5: "Access is denied"
                # 17: "The system cannot move the file to a different disk drive"
                if ex.winerror in (5, 17):
                    shutil.copyfile(ahk_path, ahk_into_folder)
            ahk_path = ahk_into_folder

        self.pid = os.getpid()

        # if we exit, exit AutoHotkey
        atexit.register(self.exit)

        # if we terminate, terminate AutoHotkey
        self.job = win32job.CreateJobObject(None, "")
        extended_info = win32job.QueryInformationJobObject(
            self.job, win32job.JobObjectExtendedLimitInformation)
        extended_info['BasicLimitInformation'][
            'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
        win32job.SetInformationJobObject(
            self.job, win32job.JobObjectExtendedLimitInformation,
            extended_info)
        # add ourselves and subprocess will inherit job membership
        handle = win32api.OpenProcess(
            win32con.PROCESS_TERMINATE | win32con.PROCESS_SET_QUOTA, False,
            self.pid)
        win32job.AssignProcessToJobObject(self.job, handle)
        win32api.CloseHandle(handle)

        # user script exceptions are already caught and sent to stderr, so /ErrorStdOut would only affect debugging CORE
        # self.cmd = [str(ahk_path), "/ErrorStdOut=utf-16-raw", "/CP65001", "*"]
        self.cmd = [str(ahk_path), "/CP65001", "*"]
        # must pipe all three within a PyInstaller bundled exe
        self.popen = subprocess.Popen(self.cmd,
                                      bufsize=Script.BUFFER_SIZE,
                                      executable=str(ahk_path),
                                      stdin=subprocess.PIPE,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)

        # keep grandchild processes from inheriting job membership
        extended_info['BasicLimitInformation'][
            'LimitFlags'] |= win32job.JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK
        win32job.SetInformationJobObject(
            self.job, win32job.JobObjectExtendedLimitInformation,
            extended_info)

        self.popen.stdin.write(Script.CORE.encode('utf-8'))
        self.popen.stdin.write(self.script.encode('utf-8'))
        self.popen.stdin.close()

        self.hwnd = int(self._read_response(), 16)
        assert self._read_response() == "Initialized"
Example #24
0
SINK = object()

# get default args from subprocess.Popen to use in subproc.Popen
a = inspect.getargspec(subprocess.Popen.__init__)
_Popen_defaults = zip(a.args[-len(a.defaults):], a.defaults)
del a
if mswindows:
    # required for os.kill() to work
    _Popen_creationflags = subprocess.CREATE_NEW_PROCESS_GROUP

    if _kill_children_on_death:
        _chJob = win32job.CreateJobObject(None, "")
        if not _chJob:
            raise WinError()

        chJeli = win32job.QueryInformationJobObject(
            _chJob, win32job.JobObjectExtendedLimitInformation)
        # JOB_OBJECT_LIMIT_BREAKAWAY_OK allows children to assign grandchildren
        # to their own jobs
        chJeli['BasicLimitInformation']['LimitFlags'] |= (
            win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
            | win32job.JOB_OBJECT_LIMIT_BREAKAWAY_OK)

        if win32job.SetInformationJobObject(
                _chJob, win32job.JobObjectExtendedLimitInformation,
                chJeli) == 0:
            raise WinError()
        del chJeli

        # If we already belong to a JobObject, our children are auto-assigned
        # to that and AssignProcessToJobObject(ch, _chJob) fails. This flag
        # prevents this auto-assignment, as long as the parent JobObject has

old_print = print
def print(x):
    old_print('\n'.join(json.dumps(y)[1:-1] for y in x.splitlines()))
    sys.stdout.flush()


if is_win:
    # Create a job object and assign ourselves to it, so that
    # all remaining test subprocesses get killed off on completion.
    # This prevents AppVeyor from waiting an hour
    # https://stackoverflow.com/a/23587108 (and its first comment)
    import win32api, win32con, win32job  # noqa
    hJob = win32job.CreateJobObject(None, "")
    extended_info = win32job.QueryInformationJobObject(hJob, win32job.JobObjectExtendedLimitInformation)
    extended_info['BasicLimitInformation']['LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
    win32job.SetInformationJobObject(hJob, win32job.JobObjectExtendedLimitInformation, extended_info)
    perms = win32con.PROCESS_TERMINATE | win32con.PROCESS_SET_QUOTA
    hProcess = win32api.OpenProcess(perms, False, os.getpid())
    win32job.AssignProcessToJobObject(hJob, hProcess)


def find_test_keys():
    if os.environ.get('CONDA_BUILD'):
        # The current version of conda build manually adds the activation
        # directories to the PATH---and then calls the standard conda
        # activation script, which does it again. This frustrates conda's
        # ability to deactivate this environment. Most package builds are
        # not affected by this, but we are, because our tests need to do
        # environment activation and deactivation. To fix this, we remove