Exemplo n.º 1
0
    def run(self):
        logger.info("Started App usage tracker thread")
        file_loc = os.path.join(os.path.dirname(__file__), "apps-usage",
                                "apps-usage.exe")
        try:
            hJob = win32job.CreateJobObject(None, "")
            extended_info = win32job.QueryInformationJobObject(
                hJob, win32job.JobObjectExtendedLimitInformation)
            extended_info['BasicLimitInformation'][
                'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
            win32job.SetInformationJobObject(
                hJob, win32job.JobObjectExtendedLimitInformation,
                extended_info)

            p = subprocess.Popen(f"{file_loc} \"{ROOT_DIR}\"", shell=False)
            perms = win32con.PROCESS_TERMINATE | win32con.PROCESS_SET_QUOTA
            hProcess = win32api.OpenProcess(perms, False, p.pid)

            win32job.AssignProcessToJobObject(hJob, hProcess)
            while True and not self.kill:
                time.sleep(0.5)
            p.terminate()
            p.kill()
        except Exception as e:
            logger.error(f"Error Starting Apps-Usage. {e} ")
        logger.warning("Stopped app usage tracking thread")
Exemplo n.º 2
0
    def _start(self, argv):
        """In most cases, just call subprocess.Popen(). On windows,
        add the started process to a new Job Object, so that any
        child processes of this process can be killed with a single
        call to TerminateJobObject (see self.stop()).
        """
        proc = Popen(argv)

        if os.sys.platform == "win32":
            # Create a job object with the "kill on job close"
            # flag; this is inherited by child processes (ie
            # the mongod started on our behalf by buildlogger)
            # and lets us terminate the whole tree of processes
            # rather than orphaning the mongod.
            import win32job

            self.job_object = win32job.CreateJobObject(None, '')

            job_info = win32job.QueryInformationJobObject(
                self.job_object, win32job.JobObjectExtendedLimitInformation)
            job_info['BasicLimitInformation'][
                'LimitFlags'] |= win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
            win32job.SetInformationJobObject(
                self.job_object, win32job.JobObjectExtendedLimitInformation,
                job_info)

            win32job.AssignProcessToJobObject(self.job_object, proc._handle)

        return proc
Exemplo n.º 3
0
    def _add_to_job_object(self):
        global _global_process_job_handle
        if _global_process_job_handle is not None:
            #This means that we are creating another process family - we'll all be in the same job
            return

        already_in_job = win32job.IsProcessInJob(win32api.GetCurrentProcess(), None)

        #Create a new job and put us in it before we create any children
        logger.debug("Creating job object and adding parent process to it")
        security_attrs = win32security.SECURITY_ATTRIBUTES()
        security_attrs.bInheritHandle = 0
        _global_process_job_handle = win32job.CreateJobObject(security_attrs, self.get_job_object_name())
        extended_info = win32job.QueryInformationJobObject(_global_process_job_handle, win32job.JobObjectExtendedLimitInformation)
        extended_info['BasicLimitInformation']['LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
        win32job.SetInformationJobObject(_global_process_job_handle, win32job.JobObjectExtendedLimitInformation, extended_info)
        try:
            win32job.AssignProcessToJobObject(_global_process_job_handle, win32api.GetCurrentProcess())
        except Exception as e:
            winv = sys.getwindowsversion()
            logger.error("Error raised during assignment of the current process to a new job object. " +\
                         "The process %s already in a job. " +\
                         "The windows version is %d.%d.\nError: %s",
                            "is" if already_in_job else "is not",
                            winv.major,
                            winv.minor,
                            _exception_str())

            if already_in_job and not (winv.major >= 6 and winv.minor >= 2):
                raise JobObjectAssignError("On Windows versions older than Windows 8 / Windows Server 2012, ProcessFamily relies on the parent process NOT being in a job already", e, already_in_job)

            raise JobObjectAssignError("Error raised during assignment of the current process to a new job object.", e, already_in_job)


        logger.debug("Added to job object")
Exemplo n.º 4
0
    def SvcDoRun(self):
        if hasattr(sys, "frozen"):
            this_dir = os.path.dirname(win32api.GetModuleFileName(None))
        else:
            this_dir = os.path.dirname(os.path.abspath(__file__))
        # TODO: maybe it is better to run this in a job object too
        with open(os.path.join(this_dir, 'npm.log'), 'w') as npm_log:
            subprocess.check_call('npm install',
                                  cwd=this_dir,
                                  shell=True,
                                  stdin=None,
                                  stdout=npm_log,
                                  stderr=subprocess.STDOUT)

        security_attributes = win32security.SECURITY_ATTRIBUTES()
        security_attributes.bInheritHandle = True
        startup = win32process.STARTUPINFO()
        startup.dwFlags |= win32process.STARTF_USESTDHANDLES
        startup.hStdInput = None
        startup.hStdOutput = win32file.CreateFile(
            os.path.join(this_dir, "service_stderr.log"),
            win32file.GENERIC_WRITE, win32file.FILE_SHARE_READ,
            security_attributes, win32file.CREATE_ALWAYS, 0, None)
        startup.hStdError = win32file.CreateFile(
            os.path.join(this_dir, "service_stdout.log"),
            win32file.GENERIC_WRITE, win32file.FILE_SHARE_READ,
            security_attributes, win32file.CREATE_ALWAYS, 0, None)
        (hProcess, hThread, processId, threadId) = win32process.CreateProcess(
            None, r'"C:\Program Files\nodejs\node.exe" node_worker.js', None,
            None, True, win32process.CREATE_SUSPENDED
            | win32process.CREATE_BREAKAWAY_FROM_JOB, None, this_dir, startup)

        assert not win32job.IsProcessInJob(hProcess, None)

        self.hJob = win32job.CreateJobObject(None, "")
        extended_info = win32job.QueryInformationJobObject(
            self.hJob, win32job.JobObjectExtendedLimitInformation)
        extended_info['BasicLimitInformation'][
            'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE | win32job.JOB_OBJECT_LIMIT_BREAKAWAY_OK
        win32job.SetInformationJobObject(
            self.hJob, win32job.JobObjectExtendedLimitInformation,
            extended_info)
        win32job.AssignProcessToJobObject(self.hJob, hProcess)

        win32process.ResumeThread(hThread)
        win32api.CloseHandle(hThread)

        signalled = win32event.WaitForMultipleObjects(
            [self.hWaitStop, hProcess], False, win32event.INFINITE)
        if signalled == win32event.WAIT_OBJECT_0 + 1 and win32process.GetExitCodeProcess(
                hProcess) != 0:
            servicemanager.LogErrorMsg(
                self._svc_name_ + " process exited with non-zero status " +
                str(win32process.GetExitCodeProcess(hProcess)))
        win32api.CloseHandle(hProcess)
        win32api.CloseHandle(self.hJob)
        win32api.CloseHandle(self.hWaitStop)
        win32api.CloseHandle(startup.hStdOutput)
        win32api.CloseHandle(startup.hStdError)
Exemplo n.º 5
0
def create_job(hProcess):
    hJob = win32job.CreateJobObject(None, "")
    extended_info = win32job.QueryInformationJobObject(hJob, win32job.JobObjectExtendedLimitInformation)
    extended_info['BasicLimitInformation']['LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
    win32job.SetInformationJobObject(hJob, win32job.JobObjectExtendedLimitInformation, extended_info)
    win32job.AssignProcessToJobObject(hJob, hProcess)

    return hJob
Exemplo n.º 6
0
 def _create_job_object(self):
     """Create a new anonymous job object"""
     hjob = win32job.CreateJobObject(None, "")
     extended_info = win32job.QueryInformationJobObject(
         hjob, win32job.JobObjectExtendedLimitInformation)
     extended_info['BasicLimitInformation'][
         'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
     win32job.SetInformationJobObject(
         hjob, win32job.JobObjectExtendedLimitInformation, extended_info)
     return hjob
Exemplo n.º 7
0
 def __init__(self, suffix):
     self.hJob = win32job.CreateJobObject(None,
                                          "SupervisorJob{}".format(suffix))
     extended_info = win32job.QueryInformationJobObject(
         self.hJob, win32job.JobObjectExtendedLimitInformation)
     extended_info['BasicLimitInformation'][
         'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
     win32job.SetInformationJobObject(
         self.hJob, win32job.JobObjectExtendedLimitInformation,
         extended_info)
Exemplo n.º 8
0
    def attach_queue_to_stdout(self):
        start_ts = time.time()
        while time.time() - start_ts < self.START_SECONDS_DEFAULT:
            if self.is_suprocess_started:

                hJob = win32job.CreateJobObject(None, "")
                extended_info = win32job.QueryInformationJobObject(
                    hJob, win32job.JobObjectExtendedLimitInformation)
                extended_info['BasicLimitInformation'][
                    'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
                win32job.SetInformationJobObject(
                    hJob, win32job.JobObjectExtendedLimitInformation,
                    extended_info)

                perms = win32con.PROCESS_TERMINATE | win32con.PROCESS_SET_QUOTA
                hProcess = win32api.OpenProcess(perms, False, self._pipe.pid)
                win32job.AssignProcessToJobObject(hJob, hProcess)

                self.log.debug("attaching queue to stdout")
                while True:
                    try:
                        if self._stopped:
                            break
                        if self._start_failed:
                            break
                        gc.disable()
                        nextline = self._pipe.stdout.readline()
                        if nextline == '':  # and self._pipe.poll() is not None:
                            time.sleep(0.05)
                            continue
                        self.log.debug("got from stdout: %s" %
                                       nextline.strip())
                        try:
                            self._stdout_queue.put(nextline.strip())
                        except Exception as e:
                            self.log.exception(
                                "could not put result to stdout queue, reason: %s"
                                % e.message)
                        gc.enable()

                    except AttributeError:
                        self.log.exception("stdout queue broken")
                        break
                    finally:
                        gc.enable()
                #if self._pipe:
                #    self._pipe.stdout.close()
            else:
                if not self._stopped:
                    self.log.warning(
                        "pipe is None; can't attach queue to stdout")

            time.sleep(0.2)
Exemplo n.º 9
0
    def _createParentJob(self):
        # Create a new job that this process will be assigned to.
        job_name = ''  # must be anonymous otherwise we'd get conflicts
        security_attrs = win32security.SECURITY_ATTRIBUTES()
        security_attrs.bInheritHandle = 1
        job = win32job.CreateJobObject(security_attrs, job_name)
        extended_limits = win32job.QueryInformationJobObject(
            job, win32job.JobObjectExtendedLimitInformation)
        extended_limits['BasicLimitInformation'][
            'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE

        win32job.SetInformationJobObject(
            job, win32job.JobObjectExtendedLimitInformation, extended_limits)
        return job
Exemplo n.º 10
0
    def start(self):

        argv, env = [self.executable] + self.arguments, self.env

        if self.env_vars:
            if not env:
                env = os.environ.copy()
            env.update(self.env_vars)

        creation_flags = 0
        if os.sys.platform == "win32":
            # Magic number needed to allow job reassignment in Windows 7
            # see: MSDN - Process Creation Flags - ms684863
            CREATE_BREAKAWAY_FROM_JOB = 0x01000000
            creation_flags = CREATE_BREAKAWAY_FROM_JOB

        stdout = sys.stdout if not self.logger else subprocess.PIPE
        stderr = sys.stderr if not self.logger else subprocess.PIPE

        self.subprocess = subprocess.Popen(argv, env=env, creationflags=creation_flags,
                                           stdout=stdout, stderr=stderr)

        if stdout == subprocess.PIPE:
            self.stdout_logger = LoggerPipe(self.logger, logging.INFO, self.subprocess.stdout)
            self.stdout_logger.wait_until_started()
        if stderr == subprocess.PIPE:
            self.stderr_logger = LoggerPipe(self.logger, logging.ERROR, self.subprocess.stderr)
            self.stderr_logger.wait_until_started()

        if os.sys.platform == "win32":

            # Create a job object with the "kill on job close" flag
            # This is inherited by child processes (i.e. the mongod started on our behalf by
            # buildlogger) and lets us terminate the whole tree of processes rather than
            # orphaning the mongod.
            import win32job

            job_object = win32job.CreateJobObject(None, '')

            job_info = win32job.QueryInformationJobObject(
                job_object,
                win32job.JobObjectExtendedLimitInformation)
            job_info['BasicLimitInformation']['LimitFlags'] |= \
                win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
            win32job.SetInformationJobObject(job_object,
                                             win32job.JobObjectExtendedLimitInformation,
                                             job_info)
            win32job.AssignProcessToJobObject(job_object, proc._handle)

            self.subprocess_job_object = job_object
Exemplo n.º 11
0
def create_job(job_name='', breakaway='silent'):
    hjob = win32job.CreateJobObject(None, job_name)
    if breakaway:
        info = win32job.QueryInformationJobObject(
            hjob, win32job.JobObjectExtendedLimitInformation)
        if breakaway == 'silent':
            info['BasicLimitInformation']['LimitFlags'] |= (
                win32job.JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK)
        else:
            info['BasicLimitInformation']['LimitFlags'] |= (
                win32job.JOB_OBJECT_LIMIT_BREAKAWAY_OK)
        win32job.SetInformationJobObject(
            hjob, win32job.JobObjectExtendedLimitInformation, info)
    return hjob
Exemplo n.º 12
0
    def _init_job_object():
        job_object = win32job.CreateJobObject(None, "")

        # Get the limit and job state information of the newly-created job object.
        job_info = win32job.QueryInformationJobObject(
            job_object, win32job.JobObjectExtendedLimitInformation)

        # Set up the job object so that closing the last handle to the job object
        # will terminate all associated processes and destroy the job object itself.
        job_info["BasicLimitInformation"]["LimitFlags"] |= \
                win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE

        # Update the limits of the job object.
        win32job.SetInformationJobObject(
            job_object, win32job.JobObjectExtendedLimitInformation, job_info)

        return job_object
Exemplo n.º 13
0
    def job(self) -> WIN32JOB:
        """Get the job associated with this process.

        Caches between calls for safety.

        Returns
        -------
        WIN32JOB
            A Windows job which consists of one or more processes. In this case
            we just have the one process
        """
        if not hasattr(self, "_job"):

            # First try to import stuff, an easy exception to catch and give good
            # information about
            try:
                import win32api
                import win32job
                import winerror
            except ModuleNotFoundError as e:
                raise ModuleNotFoundError(_win32_import_error_msg) from e

            # Here, we assign it to a windows "Job", whatever that is
            # If the process is already assigned to a job, then we have
            # to check if it's less than Windows 8 because apparently
            # nested jobs aren't supported there
            try:
                job = win32job.CreateJobObject(None, "")

                process = win32api.GetCurrentProcess()
                win32job.AssignProcessToJobObject(job, process)

                self._job = job

            except win32job.error as e:
                if (e.winerror != winerror.ERROR_ACCESS_DENIED
                        or sys.getwindowsversion() >= (6, 2)  # type: ignore
                        or not win32job.IsProcessInJob(process, None)):
                    raise e

                msg = ("The process is already in a job."
                       " Nested jobs are not supported prior to Windows 8.")
                raise RuntimeError(msg) from e

        return self._job
Exemplo n.º 14
0
    def start(self):
        if self._state != 'stopped':
            return

        LOG.debug('Starting Executor')

        self._state = 'starting'

        self._push_server = PushServer(self._push_queue,
            self._push_server_address,
            self._ipc_authkey,
            self._callback_launcher)
        self._pull_server = PullServer(self._pull_server_address,
            self._ipc_authkey,
            self._callback_launcher)
        self._push_server.start()
        self._pull_server.start()

        if sys.platform == 'win32':
            self._job_obj = win32job.CreateJobObject(None, 'Bollard')
            ex_info = win32job.QueryInformationJobObject(self._job_obj,
                win32job.JobObjectExtendedLimitInformation)
            ex_info['BasicLimitInformation']['LimitFlags'] = \
                win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
            win32job.SetInformationJobObject(self._job_obj,
                win32job.JobObjectExtendedLimitInformation,
                ex_info)

        self._validate_running_tasks()

        for task in Task.load_tasks(state='pending'):
            self._push_queue.put(task)

        # wait for push and pull server before start workers
        while not self._push_server.is_alive() or not self._pull_server.is_alive():
            time.sleep(0.1)

        self._poll_thread = threading.Thread(target=self._poll)
        self._poll_thread.daemon = True
        self._poll_thread.start()

        __node__['periodical_executor'].add_task(tasks_cleanup, 3600, title='Bollard cleanup')

        self._state = 'started'
        LOG.debug('Executor started')
Exemplo n.º 15
0
def create_job(hProcess):
    '''
    create_job(hProcess) creates win32job with correct flags:

    Note on JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE:
        https://msdn.microsoft.com/en-us/library/windows/desktop/ms684161(v=vs.85).aspx
        However, if the job has the
        JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE flag specified,
        closing the last job object handle terminates all
        associated processes and then destroys the job
        object itself.
    '''
    hJob = win32job.CreateJobObject(None, "")
    extended_info = win32job.QueryInformationJobObject(
        hJob, win32job.JobObjectExtendedLimitInformation)
    extended_info['BasicLimitInformation'][
        'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
    win32job.SetInformationJobObject(
        hJob, win32job.JobObjectExtendedLimitInformation, extended_info)
    win32job.AssignProcessToJobObject(hJob, hProcess)

    return hJob
Exemplo n.º 16
0
 def __register_job_object(self):
     if self.is_windows8_or_above_flag or not self.is_cloud_mode_flag:
         self.child = None
         self.hJob = None
         self.hProcess = None
         self.hJob = win32job.CreateJobObject(None, self.win32_job_name)
         extended_info = win32job.QueryInformationJobObject(
             self.hJob, win32job.JobObjectExtendedLimitInformation)
         extended_info['BasicLimitInformation'][
             'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
         win32job.SetInformationJobObject(
             self.hJob, win32job.JobObjectExtendedLimitInformation,
             extended_info)
         # Convert process id to process handle:
         perms = win32con.PROCESS_TERMINATE | win32con.PROCESS_SET_QUOTA
         self.hProcess = win32api.OpenProcess(perms, False, self.__p.pid)
         try:
             win32job.AssignProcessToJobObject(self.hJob, self.hProcess)
             self.__user_job_object_flag = True
         except Exception as e:
             self.__user_job_object_flag = False
     else:
         self.__user_job_object_flag = False
Exemplo n.º 17
0
    def _start(self, argv):
        """In most cases, just call subprocess.Popen(). On windows,
        add the started process to a new Job Object, so that any
        child processes of this process can be killed with a single
        call to TerminateJobObject (see self.stop()).
        """
        if valgrind:
            argv = [
                'buildscripts/valgrind.bash', '--show-reachable=yes',
                '--leak-check=full', '--suppressions=valgrind.suppressions'
            ] + argv
        elif drd:
            argv = ['buildscripts/valgrind.bash', '--tool=drd'] + argv
        proc = Popen(argv, stdout=self.outfile)

        if os.sys.platform == "win32":
            # Create a job object with the "kill on job close"
            # flag; this is inherited by child processes (ie
            # the mongod started on our behalf by buildlogger)
            # and lets us terminate the whole tree of processes
            # rather than orphaning the mongod.
            import win32job

            self.job_object = win32job.CreateJobObject(None, '')

            job_info = win32job.QueryInformationJobObject(
                self.job_object, win32job.JobObjectExtendedLimitInformation)
            job_info['BasicLimitInformation'][
                'LimitFlags'] |= win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
            win32job.SetInformationJobObject(
                self.job_object, win32job.JobObjectExtendedLimitInformation,
                job_info)

            win32job.AssignProcessToJobObject(self.job_object, proc._handle)

        return proc
Exemplo n.º 18
0
    def SvcDoRun(self):
        import servicemanager
        servicemanager.LogInfoMsg(self._svc_name_ + " Start Requested")
        try:
            hJob = win32job.CreateJobObject(None, "")
            extended_info = win32job.QueryInformationJobObject(hJob, win32job.JobObjectExtendedLimitInformation)
            extended_info['BasicLimitInformation']['LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
            win32job.SetInformationJobObject(hJob, win32job.JobObjectExtendedLimitInformation, extended_info)
            command = "resilient-circuits.exe run " + self._resilient_args_
            command_args = shlex.split(command)
            self.process_handle = subprocess.Popen(command_args)
            # Convert process id to process handle:
            perms = win32con.PROCESS_TERMINATE | win32con.PROCESS_SET_QUOTA
            hProcess = win32api.OpenProcess(perms, False, self.process_handle.pid)
            win32job.AssignProcessToJobObject(hJob, hProcess)
        except:
            servicemanager.LogErrorMsg(self._svc_name_ + " failed to launch resilient-circuits.exe")
            raise
        servicemanager.LogInfoMsg(self._svc_name_ + " Started")

        while self.isAlive:
            if self.process_handle.poll() != None:
                self.SvcStop()
            win32api.SleepEx(10000, True)
Exemplo n.º 19
0
def _get_null_value_for_win32():
    # https://stackoverflow.com/questions/46800142/in-python-with-pywin32-win32job-the-createjobobject-function-how-do-i-pass-nu  # noqa: E501
    return win32job.CreateJobObject(None, "")
Exemplo n.º 20
0
provider = CondaKernelProvider()


old_print = print
def print(x):
    old_print('\n'.join(json.dumps(y)[1:-1] for y in x.splitlines()))
    sys.stdout.flush()


if is_win:
    # Create a job object and assign ourselves to it, so that
    # all remaining test subprocesses get killed off on completion.
    # This prevents AppVeyor from waiting an hour
    # https://stackoverflow.com/a/23587108 (and its first comment)
    import win32api, win32con, win32job  # noqa
    hJob = win32job.CreateJobObject(None, "")
    extended_info = win32job.QueryInformationJobObject(hJob, win32job.JobObjectExtendedLimitInformation)
    extended_info['BasicLimitInformation']['LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
    win32job.SetInformationJobObject(hJob, win32job.JobObjectExtendedLimitInformation, extended_info)
    perms = win32con.PROCESS_TERMINATE | win32con.PROCESS_SET_QUOTA
    hProcess = win32api.OpenProcess(perms, False, os.getpid())
    win32job.AssignProcessToJobObject(hJob, hProcess)


def find_test_keys():
    if os.environ.get('CONDA_BUILD'):
        # The current version of conda build manually adds the activation
        # directories to the PATH---and then calls the standard conda
        # activation script, which does it again. This frustrates conda's
        # ability to deactivate this environment. Most package builds are
        # not affected by this, but we are, because our tests need to do
Exemplo n.º 21
0
    def __init__(self,
                 script: str = "",
                 ahk_path: Path = None,
                 execute_from: Path = None) -> None:
        self.file = None
        self.script = script

        if ahk_path is None:
            ahk_path = PACKAGE_PATH / r'lib\AutoHotkey\AutoHotkey.exe'
        assert ahk_path and ahk_path.is_file()

        # Windows notification area relies on consistent exe path
        if execute_from is not None:
            execute_from_dir = Path(execute_from)
            assert execute_from_dir.is_dir()
            ahk_into_folder = execute_from_dir / ahk_path.name

            try:
                if os.path.getmtime(ahk_into_folder) != os.path.getmtime(
                        ahk_path):
                    os.remove(ahk_into_folder)
            except FileNotFoundError:
                pass

            try:
                os.link(ahk_path, ahk_into_folder)
            except FileExistsError:
                pass
            except OSError as ex:
                # 5: "Access is denied"
                # 17: "The system cannot move the file to a different disk drive"
                if ex.winerror in (5, 17):
                    shutil.copyfile(ahk_path, ahk_into_folder)
            ahk_path = ahk_into_folder

        self.pid = os.getpid()

        # if we exit, exit AutoHotkey
        atexit.register(self.exit)

        # if we terminate, terminate AutoHotkey
        self.job = win32job.CreateJobObject(None, "")
        extended_info = win32job.QueryInformationJobObject(
            self.job, win32job.JobObjectExtendedLimitInformation)
        extended_info['BasicLimitInformation'][
            'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
        win32job.SetInformationJobObject(
            self.job, win32job.JobObjectExtendedLimitInformation,
            extended_info)
        # add ourselves and subprocess will inherit job membership
        handle = win32api.OpenProcess(
            win32con.PROCESS_TERMINATE | win32con.PROCESS_SET_QUOTA, False,
            self.pid)
        win32job.AssignProcessToJobObject(self.job, handle)
        win32api.CloseHandle(handle)

        # user script exceptions are already caught and sent to stderr, so /ErrorStdOut would only affect debugging CORE
        # self.cmd = [str(ahk_path), "/ErrorStdOut=utf-16-raw", "/CP65001", "*"]
        self.cmd = [str(ahk_path), "/CP65001", "*"]
        # must pipe all three within a PyInstaller bundled exe
        self.popen = subprocess.Popen(self.cmd,
                                      bufsize=Script.BUFFER_SIZE,
                                      executable=str(ahk_path),
                                      stdin=subprocess.PIPE,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)

        # keep grandchild processes from inheriting job membership
        extended_info['BasicLimitInformation'][
            'LimitFlags'] |= win32job.JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK
        win32job.SetInformationJobObject(
            self.job, win32job.JobObjectExtendedLimitInformation,
            extended_info)

        self.popen.stdin.write(Script.CORE.encode('utf-8'))
        self.popen.stdin.write(self.script.encode('utf-8'))
        self.popen.stdin.close()

        self.hwnd = int(self._read_response(), 16)
        assert self._read_response() == "Initialized"
Exemplo n.º 22
0
securityAttributes.Length = sizeof(securityAttributes)
securityAttributes.SecDescriptior = None
securityAttributes.InheritHandle = True

hDesktop = windll.user32.CreateDesktopA(DESKTOP_NAME, None, None, 0,
                                        GENERIC_ALL, securityAttributes)
if hDesktop == 0:
    print 'CreateDesktop failed, err=%s' % windll.kernel32.GetLastError()
    sys.exit(20)

#### Create a job

JOB_NAME = 'UniqueJob-%s' % os.getpid()

try:
    hJob = win32job.CreateJobObject(None, JOB_NAME)
except pywintypes.error, e:
    print 'CreateJobObject failed, err=%s' % e[0]
    sys.exit(25)

#### Create the process

startupInfo = win32process.STARTUPINFO()
startupInfo.lpDesktop = DESKTOP_NAME

try:
    processInfo = win32process.CreateProcess(None, childCmdLine, None, None,
                                             True, win32con.CREATE_SUSPENDED,
                                             None, None, startupInfo)
except pywintypes.error, e:
    print 'CreateProcess failed, err=%s' % e[0]
Exemplo n.º 23
0
def main():
    # escape list of arguments
    command = _win32_arglist_to_string(sys.argv[1:])

    # create job
    hJob = win32job.CreateJobObject(None, '')
    extended_info = win32job.QueryInformationJobObject(
        hJob, win32job.JobObjectExtendedLimitInformation)
    extended_info['BasicLimitInformation'][
        'LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
    win32job.SetInformationJobObject(
        hJob, win32job.JobObjectExtendedLimitInformation, extended_info)

    # associate job with completion port
    hIoPort = win32file.CreateIoCompletionPort(win32file.INVALID_HANDLE_VALUE,
                                               None, 0, 1)
    # pywin32 is missing support for JOBOBJECT_ASSOCIATE_COMPLETION_PORT, therefore
    #   we call it through ctypes
    port = JOBOBJECT_ASSOCIATE_COMPLETION_PORT()
    port.CompletionKey = hJob.handle
    port.CompletionPort = hIoPort.handle
    assert bool(
        ctypes.windll.kernel32.SetInformationJobObject(
            ctypes.wintypes.HANDLE(hJob.handle),
            ctypes.c_int(JobObjectAssociateCompletionPortInformation),
            ctypes.byref(port),
            ctypes.sizeof(JOBOBJECT_ASSOCIATE_COMPLETION_PORT)))

    # create process suspended
    si = win32process.STARTUPINFO()
    hProcess, hThread, processId, threadId = win32process.CreateProcess(
        None, command, None, None, True,
        win32process.CREATE_BREAKAWAY_FROM_JOB | win32process.CREATE_SUSPENDED,
        None, None, si)

    # add process to job
    win32job.AssignProcessToJobObject(hJob, hProcess)

    # resume process
    win32process.ResumeThread(hThread)
    win32api.CloseHandle(hThread)
    win32api.CloseHandle(hProcess)

    # wait for job termination
    numberOfBytes = ctypes.wintypes.DWORD(0)
    completionKey = ctypes.wintypes.HANDLE(0)
    overlapped = OVERLAPPED()
    while True:
        # calling this through pywin32 crashes the program, therefore we call it through ctypes
        res = bool(
            ctypes.windll.kernel32.GetQueuedCompletionStatus(
                ctypes.wintypes.HANDLE(hIoPort.handle),
                ctypes.byref(numberOfBytes), ctypes.byref(completionKey),
                ctypes.byref(overlapped),
                ctypes.wintypes.DWORD(win32event.INFINITE)))
        if not res or (bytes(completionKey) == bytes(
                ctypes.c_void_p(hJob.handle))
                       and bytes(numberOfBytes) == bytes(
                           ctypes.c_ulong(
                               win32job.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO))):
            break
Exemplo n.º 24
0
def proc(args, time_limit, mem_size, time_elapsed, return_val, _stdin_name,
         _stdout_name, _stderr_name):
    """测试的 runner 函数

    Args:
        args: 待测程序及其参数的列表
        time_limit: float,秒为单位的运行时间限制
        mem_size: int,byte 为单位的运行内存限制,零值为不限制
        time_elapsed: double 类型的 multiprocessing 中的 shared_ctypes 对象
            用以存储并返回实际运行时间
        return_val: int 类型的 multiprocessing 中的 shared_ctypes 对象
            用以存储并返回待测程序的返回值
        _stdin_name, _stdout_name, _stderr_name: 为文件路径
            分别为待测程序的标准输入、输出、错误输出的重定向目标
    """
    if mem_size:
        try:
            if _mswindows:
                import win32api
                import win32job
                job = win32job.CreateJobObject(None, "judge_mem_limiter")
                win32job.SetInformationJobObject(
                    job, win32job.JobObjectExtendedLimitInformation, {
                        "BasicLimitInformation": {
                            "PerProcessUserTimeLimit": 0,
                            "PerJobUserTimeLimit": 0,
                            "LimitFlags":
                            win32job.JOB_OBJECT_LIMIT_PROCESS_MEMORY,
                            "MinimumWorkingSetSize": 0,
                            "MaximumWorkingSetSize": 0,
                            "ActiveProcessLimit": 0,
                            "Affinity": 0,
                            "PriorityClass": 0,
                            "SchedulingClass": 0
                        },
                        "IoInfo": {
                            "ReadOperationCount": 0,
                            "WriteOperationCount": 0,
                            "OtherOperationCount": 0,
                            "ReadTransferCount": 0,
                            "WriteTransferCount": 0,
                            "OtherTransferCount": 0,
                        },
                        "JobMemoryLimit": 0,
                        "PeakProcessMemoryUsed": 0,
                        "PeakJobMemoryUsed": 0,
                        "ProcessMemoryLimit": mem_size
                    })
                win32job.AssignProcessToJobObject(job,
                                                  win32api.GetCurrentProcess())
            else:
                import resource
                resource.setrlimit(resource.RLIMIT_DATA, (mem_size, mem_size))
        except:
            _logger.error("unable to set memory limit under win32: %s",
                          traceback.format_exc())
    with open(_stdin_name, "rb") as stdin,\
            open(_stdout_name, "wb") as stdout,\
            open(_stderr_name, "wb") as stderr:
        proc = subprocess.Popen(args,
                                stdin=stdin,
                                stdout=stdout,
                                stderr=stderr)
        t_start = time.perf_counter()
        try:
            proc.wait(timeout=time_limit + 1.)
        except subprocess.TimeoutExpired:
            proc.kill()
            proc.returncode = -1
        t_end = time.perf_counter()
        time_elapsed.value = t_end - t_start
        return_val.value = proc.returncode