Esempio n. 1
0
 def get_lastinput(self):
     """未操作時間を取得
         ◆ 操作後 1秒以上経過していたら1(未操作)を返します。
         ------------
         GetTickCount()    :システム起動後時間
         GetLastInputInfo():最後の入力イベントの時刻
         """
     last_input = round((GetTickCount() - GetLastInputInfo()) / 1000,
                        0)  # last_input関数
     return (last_input > 5)
Esempio n. 2
0
def stop_idle(seconds=1, debug=False, debug_statement='user inactive'):
    """
    Detects idle and creates a mouse event to keep windows running

    :param seconds: numeric -  time in seconds before making an event
    :param debug: bool - if true prints the print statement
    :param print_statement: statement to print when user is idle
    """

    last_input = (GetTickCount() - GetLastInputInfo()) / 1000
    if last_input >= seconds:
        if debug:
            print(debug_statement)
        windll.user32.mouse_event(1, 1, 1, 0, 0)
Esempio n. 3
0
def get_states(upath, repo=None):
    """
    Get the states of a given path in source control.
    """
    global overlay_cache, cache_tick_count
    global cache_root, cache_pdir
    global enabled, localonly
    global includepaths, excludepaths

    #debugf("called: _get_state(%s)", path)
    tc = GetTickCount()

    try:
        # handle some Asian charsets
        path = upath.encode('mbcs')
    except:
        path = upath
    # check if path is cached
    pdir = os.path.dirname(path)
    status = overlay_cache.get(path, '')
    if overlay_cache and (cache_pdir == pdir or cache_pdir and status
                          not in ' r' and path.startswith(cache_pdir)):
        #use cached data when pdir has not changed or when the cached state is a repo state
        if tc - cache_tick_count < CACHE_TIMEOUT:
            if not status:
                if os.path.isdir(os.path.join(path, '.hg')):
                    add(path, ROOT)
                    status = ROOT
                else:
                    status = overlay_cache.get(pdir + '*', NOT_IN_REPO)
                    add(path, status)
                debugf("%s: %s (cached~)", (path, status))
            else:
                debugf("%s: %s (cached)", (path, status))
            return status
        else:
            debugf("Timed out!!")
            overlay_cache.clear()
            cache_tick_count = GetTickCount()
    # path is a drive
    if path.endswith(":\\"):
        add(path, NOT_IN_REPO)
        return NOT_IN_REPO
    # open repo
    if cache_pdir == pdir:
        root = cache_root
    else:
        debugf("find new root")
        root = paths.find_root(path)
        if root == path:
            if not overlay_cache:
                cache_root = pdir
            add(path, ROOT)
            debugf("%s: r", path)
            return ROOT
        cache_root = root
        cache_pdir = pdir

    if root is None:
        debugf("_get_state: not in repo")
        overlay_cache = {None: None}
        cache_tick_count = GetTickCount()
        return NOT_IN_REPO
    debugf("_get_state: root = " + root)
    hgdir = os.path.join(root, '.hg', '')
    if pdir == hgdir[:-1] or pdir.startswith(hgdir):
        add(pdir, NOT_IN_REPO)
        return NOT_IN_REPO
    try:
        if not enabled:
            overlay_cache = {None: None}
            cache_tick_count = GetTickCount()
            debugf("overlayicons disabled")
            return NOT_IN_REPO
        if localonly and paths.netdrive_status(path):
            debugf("%s: is a network drive", path)
            overlay_cache = {None: None}
            cache_tick_count = GetTickCount()
            return NOT_IN_REPO
        if includepaths:
            for p in includepaths:
                if path.startswith(p):
                    break
            else:
                debugf("%s: is not in an include path", path)
                overlay_cache = {None: None}
                cache_tick_count = GetTickCount()
                return NOT_IN_REPO
        for p in excludepaths:
            if path.startswith(p):
                debugf("%s: is in an exclude path", path)
                overlay_cache = {None: None}
                cache_tick_count = GetTickCount()
                return NOT_IN_REPO
        tc1 = GetTickCount()
        real = os.path.realpath  #only test if necessary (symlink in path)
        if not repo or (repo.root != root and repo.root != real(root)):
            repo = hg.repository(ui.ui(), path=root)
            debugf("hg.repository() took %g ticks", (GetTickCount() - tc1))
    except error.RepoError:
        # We aren't in a working tree
        debugf("%s: not in repo", pdir)
        add(pdir + '*', IGNORED)
        return IGNORED
    except Exception, e:
        debugf("error while handling %s:", pdir)
        debugf(e)
        add(pdir + '*', UNKNOWN)
        return UNKNOWN
Esempio n. 4
0
        if not repo or (repo.root != root and repo.root != real(root)):
            repo = hg.repository(ui.ui(), path=root)
            debugf("hg.repository() took %g ticks", (GetTickCount() - tc1))
    except error.RepoError:
        # We aren't in a working tree
        debugf("%s: not in repo", pdir)
        add(pdir + '*', IGNORED)
        return IGNORED
    except Exception, e:
        debugf("error while handling %s:", pdir)
        debugf(e)
        add(pdir + '*', UNKNOWN)
        return UNKNOWN

    # get file status
    tc1 = GetTickCount()

    try:
        matcher = scmutil.match(repo[None], [pdir])
        repostate = repo.status(match=matcher,
                                ignored=True,
                                clean=True,
                                unknown=True)
    except util.Abort, inst:
        debugf("abort: %s", inst)
        debugf("treat as unknown : %s", path)
        return UNKNOWN

    debugf("status() took %g ticks", (GetTickCount() - tc1))
    mergestate = repo.dirstate.parents()[1] != node.nullid and \
              hasattr(merge, 'mergestate')
Esempio n. 5
0
def measure(arg,
            commandline,
            delay,
            maxtime,
            outFile=None,
            errFile=None,
            inFile=None,
            logger=None,
            affinitymask=None):

    m = Record(arg)

    # For % CPU usage
    cpu0 = taskManagerCpuTimes()

    ### Use a JobObject so we capture data for child processes as well
    hJob = CreateJobObject(None, 'proctree')

    # For elapsed time try QueryPerformanceCounter otherwise use GetTickCount
    freq = LARGE_INTEGER()
    isCounter = windll.kernel32.QueryPerformanceFrequency(byref(freq))
    if isCounter:
        t0 = LARGE_INTEGER()
        t = LARGE_INTEGER()
        windll.kernel32.QueryPerformanceCounter(byref(t0))
    else:  # the number of milliseconds since windows started
        t0 = GetTickCount()

    try:
        # spawn the program in a separate process
        p = Popen(commandline, stdout=outFile, stderr=errFile, stdin=inFile)
        hProcess = int(p._handle)
        AssignProcessToJobObject(hJob, hProcess)

        # wait for program exit status - time out in milliseconds
        waitexit = WaitForSingleObject(hProcess, maxtime * 1000)

        # For elapsed time try QueryPerformanceCounter otherwise use GetTickCount
        if isCounter:
            windll.kernel32.QueryPerformanceCounter(byref(t))
            m.elapsed = (t.value - t0.value) / float(freq.value)
        else:  # the number of milliseconds since windows started
            t = GetTickCount()
            m.elapsed = (t - t0) / 1000.0

        if waitexit != 0:
            # terminate any child processes as well
            TerminateJobObject(hJob, -1)
            m.setTimedout()
        elif p.poll() == 0:
            m.setOkay()

            ### Use a JobObject so we capture data for child processes as well
            times = QueryInformationJobObject(
                hJob, JobObjectBasicAccountingInformation)
            #ten million - the number of 100-nanosecond units in one second
            totalusr = times['TotalUserTime'] / nanosecs100
            totalsys = times['TotalKernelTime'] / nanosecs100
            m.userSysTime = totalusr + totalsys

            ### "VM Size" seems the more appropriate measure
            ###
            # corresponds to Peak Mem Usage in Task Manager
            # mem = GetProcessMemoryInfo(hProcess)
            # m.maxMem = mem['PeakWorkingSetSize'] / 1024

            # corresponds to VM Size in Task Manager
            mem = QueryInformationJobObject(hJob,
                                            JobObjectExtendedLimitInformation)
            m.maxMem = mem['PeakJobMemoryUsed'] / 1024

            m.cpuLoad = taskManagerCpuLoad(cpu0, taskManagerCpuTimes(),
                                           totalusr)

        elif p.poll() == 2:
            m.setMissing()
        else:
            m.setError()

    except (OSError, ValueError), (e, err):
        if logger: logger.error('%s %s', e, err)
        m.setError()