Ejemplo n.º 1
0
    def duringMeasureHook(self, elapsed):
        if self._pol_move:
            if self._attached_polarizer.status(0)[0] != status.BUSY:
                self._pol_move = False
                self._start_measurement()
            return

        # start new measurement when needed
        spstatus = self._attached_status_in.read(0)
        if spstatus == BUSY:
            return
        self._nfinished += 1

        # after 1 measurement we know approximately how long it takes
        if self._nfinished == 1:
            self._duration = currenttime() - self._started
        # if we have less than an approximate duration to go, stop
        if currenttime() > self._started + self._presettime - self._duration:
            self.log.info('made %d FTIR measurement(s)', self._nfinished)
            self._measuring = False
            return
        # move to next polarizer position if wanted, else start measuring
        if self.polvalues:
            polvalue = self.polvalues[self._polindex % len(self.polvalues)]
            self._polindex += 1
            self.log.debug('moving polarizer to %s', polvalue)
            self._pol_move = True
            self._attached_polarizer.start(polvalue)
        else:
            self._start_measurement()
Ejemplo n.º 2
0
    def doIsCompleted(self):
        if self._started is None:
            return True
        if self._dont_stop_flag is True:
            return (currenttime() - self._started) >= self._preset['value']

        if self._stop is not None:
            if currenttime() >= self._stop:
                return True

        if 'TrueTime' in self._preset:
            return self._dev.TrueTime >= self._preset['TrueTime']
        elif 'LiveTime' in self._preset:
            return self._dev.LiveTime >= self._preset['LiveTime']
        elif 'counts' in self._preset:
            return self.doRead(0)[2] >= self._preset['counts']
        try:
            # self.log.warning('poll')
            stop = self.poll[0]
            return stop < 0
        except NicosError:
            self._dont_stop_flag = True
            # self.log.warning('read poll time failed, waiting for other '
            #                  'detector(s)...')
            return False
        return False
Ejemplo n.º 3
0
async def check_on_restart():
    with open('dotabase/ongoing_challenges.txt', 'r') as f:
        ongoings = f.readlines()
    for ongoing in ongoings:
        chal = dict(eval(ongoing))
        chalchan = client.get_channel(chal['chnl_id'])
        if chal['start_time'] + chal['comp_time'] >= int(currenttime()):
            checkchallenge(chal, chalchan)
            ongoings.remove(ongoing)
            with open('dotabase/ongoing_challenges.txt', 'w') as f:
                for el in ongoings:
                    f.write(el)
        elif chal['start_time'] + chal['comp_time'] < int(currenttime()):
            reset = 1
            task = asyncio.create_task(
                waitchallenge(
                    int(currenttime()) -
                    (chal['start_time'] + chal['comp_time']), chalchan, reset))

            def check(reaction, user):
                return user.id == chal['discord_user']

            react = await client.wait_for("reaction_add", check=check)
            if str(react[0]) == "<a:Emoticon_obs:593495627342676040>":
                task.cancel()
Ejemplo n.º 4
0
 def isCompleted(self):
     if session.mode == SIMULATION:
         return True
     if not self.stopflag and self.endtime > currenttime():
         # arbitrary choice of max 5s
         session.delay(min(5, self.endtime - currenttime()))
         return False
     if self.duration > 3:
         session.endActionScope()
     return True
Ejemplo n.º 5
0
    def handle_eta(self, data):
        """Handles the "eta" signal."""
        state, eta = data

        if state in (SIM_STATES['pending'], SIM_STATES['running']):
            self.cur_eta = '<calculation in progress>'
        elif state == SIM_STATES['failed']:
            self.cur_eta = '<calculation failed>'
        elif state == SIM_STATES['success'] and eta > currenttime():
            self.cur_eta = formatEndtime(eta - currenttime())
Ejemplo n.º 6
0
def calculate_new_rate():
    all_ids = scrape_all_ids()
    new_rates = {}

    # Look for a page of the category rates.
    for single_id in all_ids:
        start_time = currenttime()
        driver.get(
            f"https://first-website.com/catalog/shops.html?idpat={single_id}")
        WebDriverWait(driver, 20).until(
            EC.presence_of_element_located((By.TAG_NAME, "tr")))
        all_competitors = tuple(driver.find_elements(By.TAG_NAME, "tr")[1:])

        # Scrape rates of the category.
        child_number = 2
        competitors_rates = {}
        for competitor in all_competitors:
            seller = driver.find_element(
                By.CSS_SELECTOR,
                f"body > table > tbody > tr:nth-child({child_number}) > td:nth-child(2) > a"
            ).text
            seller_rate = driver.find_element(
                By.CSS_SELECTOR,
                f"body > table > tbody > tr:nth-child({child_number}) > td:nth-child(4)"
            ).text
            competitors_rates.update(
                {seller.lower(): float(f"+{seller_rate}")})
            child_number += 1

        # Calculate new rate and put it into the overall dictionary.
        rates_after_cut = {
            key: value
            for key, value in competitors_rates.items() if value < MAX_RATE
        }
        rates_of_main_competitors = {
            key: value
            for key, value in rates_after_cut.items()
            if key in main_competitors
        }
        new_rate = round(
            (max(rates_of_main_competitors.values(), default=0) + 0.01), 2)
        new_rates.update({f"rate{single_id}": new_rate})
        end_time = currenttime()

        # Reduce the CPU load by limiting the speed.
        time_difference = end_time - start_time
        if time_difference < 2:
            wait = 2 - time_difference
            sleep(wait)
        else:
            pass

    return new_rates
Ejemplo n.º 7
0
 def doStatus(self, maxage=0):
     if self.started:
         if self.mode == MASTER and self._attached_input.read(maxage):
             self.started = 0
             self.triggered = currenttime()
             return status.OK, 'triggered'
         else:
             return status.BUSY, 'waiting'
     elif self.triggered:
         if self.mode == MASTER and currenttime() > self.triggered + 5:
             self.triggered = 0
         return status.OK, 'triggered'
     return status.OK, ''
Ejemplo n.º 8
0
    def _put_pv_blocking(self, pvparam, value, update_rate=0.1, timeout=60):
        if epics.ca.current_context() is None:
            epics.ca.use_initial_context()

        pv = self._pvs[pvparam]

        pv.put(value, use_complete=True)

        start = currenttime()
        while not pv.put_complete:
            if currenttime() - start > timeout:
                raise CommunicationError('Timeout in setting %s' % pv.pvname)
            session.delay(update_rate)
Ejemplo n.º 9
0
 def preparePoint(self, num, xvalues):
     if session.mode == SIMULATION and num > self._simpoints:
         session.log.info('skipping %d points...',
                          self._numpoints - self._simpoints)
         duration = session.clock.time - self._sim_start
         session.clock.tick(duration * (self._numpoints - self._simpoints))
         if self._numpoints < 0 and not self._sweepdevices:
             session.log.info('would scan indefinitely, skipping...')
         raise StopScan
     if num == 1:
         self._etime.started = currenttime()
         if self._sweeptargets:
             try:
                 self.moveDevices(self._sweepdevices,
                                  self._sweeptargets,
                                  wait=False)
             except SkipPoint:
                 raise StopScan
     elif self._delay:
         # wait between points, but only from the second point on
         session.action('Delay')
         session.delay(self._delay)
     Scan.preparePoint(self, num, xvalues)
     if session.mode == SIMULATION:
         self._sim_start = session.clock.time
Ejemplo n.º 10
0
    def _handle_msg(self, time, ttlop, ttl, tsop, key, op, value):
        if op not in (OP_TELL, OP_TELLOLD):
            return
        try:
            time = float(time)
        except (ValueError, TypeError):
            time = currenttime()
        try:
            value = cache_load(value)
        except ValueError:
            value = None

        if key == 'watchdog/warnings' and self.showwatchdog:
            self._process_warnings(value)
            return

        # self.log.debug('processing %s', [time, ttl, key, op, value])

        if key == self._prefix + 'session/master':
            self._masteractive = value and op != OP_TELLOLD

        if key == self._prefix + 'session/mastersetup':
            self._setups = set(value)
            # reconfigure displayed blocks
            self.reconfigureBoxes()
            self.log.info('reconfigured display for setups %s',
                          ', '.join(self._setups))

        expired = value is None or op == OP_TELLOLD

        # now check if we need to update something
        objs = self._keymap.get(key, [])
        for obj in objs:
            self.signalKeyChange(obj, key, value, time, expired)
Ejemplo n.º 11
0
def getFollowing(user, chan):
    try:
        try:
            url = ("https://api.twitch.tv/kraken/users/" + user +
                   "/follows/channels/" + chan)
            req = urllib2.Request(url)
            req.add_header("Client-ID", tclientid)
            resp = urllib2.urlopen(req)
            page = json.load(resp)
            dateFollowed = page['created_at']
        except:
            return "0"
        #timediff = currenttime()-mktime(strptime(dateFollowed, "%Y-%m-%dT%H:%M:%SZ"))
        #delta = timedelta(seconds=timediff-7200)
        dif = rd.relativedelta(
            datetime.fromtimestamp(currenttime() - 7200),
            datetime.fromtimestamp(
                mktime(strptime(dateFollowed, "%Y-%m-%dT%H:%M:%SZ"))))
        if dif.years != 0:
            return "{0} years, {1} months, {2} days, {3} hrs".format(
                dif.years, dif.months, dif.days, dif.hours)
        if dif.months != 0:
            return "{0} months, {1} days, {2} hrs".format(
                dif.months, dif.days, dif.hours)
        if dif.days != 0:
            return "{0} days, {1} hrs, {2} min".format(dif.days, dif.hours,
                                                       dif.minutes)
        if dif.hours != 0:
            return "{0} hours, {1} minutes".format(dif.hours, dif.minutes)
        return "{0} minutes, {1} seconds".format(dif.minutes, dif.seconds)
    except Exception, e:
        print "getFollowing error at Api.py, info:", e
Ejemplo n.º 12
0
 def add_value(self, vtime, value, real=True, use_scale=True):
     if not isinstance(value, number_types):
         if isinstance(value, string_types):
             value = self.string_mapping.setdefault(
                 value, len(self.string_mapping))
             self.info = ', '.join(
                 '%g=%s' % (v * self.scale + self.offset, k)
                 for (k, v) in sorted(iteritems(self.string_mapping),
                                      key=lambda x: x[1]))
         else:
             return
     elif use_scale:
         value = value * self.scale + self.offset
     n, real_n = self.n, self.real_n
     arrsize = self.data.shape[0]
     self.last_y = value
     # do not add value if it comes too fast
     if real_n > 0 and self.data[real_n - 1, 0] > vtime - self.interval:
         return
     self._last_update_time = currenttime()
     # double array size if array is full
     if n >= arrsize:
         # keep array around the size of maxsize
         if arrsize >= self.maxsize:
             # don't add more points, make existing ones more sparse
             data = self.data[:real_n]
             new_data = lttb.downsample(data[data[:, 0].argsort()],
                                        n_out=arrsize // 2)
             n = self.n = self.real_n = new_data.shape[0]
             # can resize in place here
             new_data.resize(self.data, (n * 2, 2))
             self.data = new_data
         else:
             # can't resize in place
             self.data = np.resize(self.data, (2 * arrsize, 2))
     # fill next entry
     if not real and real_n < n - 1:
         # do not generate endless amounts of synthesized points,
         # two are enough (one at the beginning, one at the end of
         # the interval without real points)
         self.data[n - 1] = vtime, value
     else:
         self.data[n] = vtime, value
         self.n += 1
         if real:
             self.real_n = self.n
     # check sliding window
     if self.window:
         i = -1
         threshold = vtime - self.window
         while self.data[i + 1, 0] < threshold and i < n:
             if self.data[i + 2, 0] > threshold:
                 self.data[i + 1, 0] = threshold
                 break
             i += 1
         if i >= 0:
             self.data[0:n - i] = self.data[i + 1:n + 1].copy()
             self.n -= i + 1
             self.real_n -= i + 1
     self.signal_obj.timeSeriesUpdate.emit(self)
Ejemplo n.º 13
0
 def ask_wc(self, key, ts, time, ttl):
     ret = set()
     with self._db_lock:
         # look for matching keys
         for dbkey, entries in iteritems(self._db):
             if key not in dbkey:
                 continue
             lastent = entries[-1]
             # check for removed keys
             if lastent.value is None:
                 continue
             if dbkey.startswith('nocat/'):
                 dbkey = dbkey[6:]
             # check for expired keys
             if lastent.ttl:
                 remaining = lastent.time + lastent.ttl - currenttime()
                 op = remaining > 0 and OP_TELL or OP_TELLOLD
                 if ts:
                     ret.add('%r+%s@%s%s%s\n' % (lastent.time, lastent.ttl,
                                                 dbkey, op, lastent.value))
                 else:
                     ret.add(dbkey + op + lastent.value + '\n')
             elif ts:
                 ret.add('%r@%s%s%s\n' %
                         (lastent.time, dbkey, OP_TELL, lastent.value))
             else:
                 ret.add(dbkey + OP_TELL + lastent.value + '\n')
     return ret
Ejemplo n.º 14
0
 def __repr__(self):
     if self.endtime:
         # already started, __repr__ is used for updating status strings.
         return str(timedelta(
             seconds=round(self.endtime - currenttime())))
     else:
         return '%g s' % self.duration
Ejemplo n.º 15
0
 def doStart(self):
     self._enable_gates()
     try:
         self._dev.Stop()
         self._dev.Clear()
         self._dev.Start()
     except NicosError:
         try:
             self._dev.Stop()
             self._dev.Init()
             self._dev.Clear()
             self._dev.Start()
         except NicosError:
             pass
     self._started = currenttime()
     self._lastread = currenttime()
Ejemplo n.º 16
0
    def read(self, maxage=0):

        rois = self.addfile("")
        msg = json.loads(rois)
        self._cache.put(self, 'value', msg, currenttime(), self.maxage)
        # not put in the cache by Tango
        return msg
Ejemplo n.º 17
0
 def lock(self, key, value, time, ttl):
     """Lock handling code, common to both subclasses."""
     with self._lock_lock:
         entry = self._locks.get(key)
         # want to lock?
         req, client_id = value[0], value[1:]
         if req == OP_LOCK_LOCK:
             if entry and entry.value != client_id and \
                (not entry.ttl or entry.time + entry.ttl >= currenttime()):
                 # still locked by different client, deny (tell the client
                 # the current client_id though)
                 self.log.debug(
                     'lock request %s=%s, but still locked by %s',
                     key, client_id, entry.value)
                 return [key + OP_LOCK + entry.value + '\n']
             else:
                 # not locked, expired or locked by same client, overwrite
                 ttl = ttl or 600  # set a maximum time to live
                 self.log.debug('lock request %s=%s ttl %s, accepted',
                                key, client_id, ttl)
                 self._locks[key] = CacheEntry(time, ttl, client_id)
                 return [key + OP_LOCK + '\n']
         # want to unlock?
         elif req == OP_LOCK_UNLOCK:
             if entry and entry.value != client_id:
                 # locked by different client, deny
                 self.log.debug('unlock request %s=%s, but locked by %s',
                                key, client_id, entry.value)
                 return [key + OP_LOCK + entry.value + '\n']
             else:
                 # unlocked or locked by same client, allow
                 self.log.debug('unlock request %s=%s, accepted',
                                key, client_id)
                 self._locks.pop(key, None)
                 return [key + OP_LOCK + '\n']
Ejemplo n.º 18
0
 def _set_next_update(self, message):
     if 'update_interval' in message:
         self._setROParam('statusinterval',
                          message['update_interval'] // 1000)
         next_update = currenttime() + self.statusinterval
         if next_update > self.nextupdate:
             self._setROParam('nextupdate', next_update)
Ejemplo n.º 19
0
 def resetTimeout(self, target):
     """Method called to reset the timeout when the device is started to
     a new target.
     """
     self._timeoutActionCalled = False
     timesout = self._getTimeoutTimes(self.read(), target, currenttime())
     self._setROParam('_timesout', timesout)
Ejemplo n.º 20
0
 def ask(self, key, ts, time, ttl):
     dbkey = key if '/' in key else 'nocat/' + key
     with self._db_lock:
         if dbkey not in self._db:
             return [key + OP_TELLOLD + '\n']
         lastent = self._db[dbkey][-1]
     # check for already removed keys
     if lastent.value is None:
         return [key + OP_TELLOLD + '\n']
     # check for expired keys
     if lastent.ttl:
         remaining = lastent.time + lastent.ttl - currenttime()
         op = remaining > 0 and OP_TELL or OP_TELLOLD
         if ts:
             return [
                 '%r+%s@%s%s%s\n' %
                 (lastent.time, lastent.ttl, key, op, lastent.value)
             ]
         else:
             return [key + op + lastent.value + '\n']
     if ts:
         return [
             '%r@%s%s%s\n' % (lastent.time, key, OP_TELL, lastent.value)
         ]
     else:
         return [key + OP_TELL + lastent.value + '\n']
Ejemplo n.º 21
0
 def _history(self):
     if self._cache:
         self._cache.addCallback(self, 'value', self._cacheCB)
         self._subscriptions.append(('value', self._cacheCB))
         t = currenttime()
         return self._cache.history(self, 'value', t - self.window, t)
     return []
Ejemplo n.º 22
0
    def put(self, dev, key, value, time=None, ttl=None, flag=''):
        """Put a value for a given device and subkey.

        The value is serialized by this method using `cache_dump()`.
        """
        if ttl == 0:
            # no need to process immediately-expired values
            return
        if time is None:
            time = currenttime()
        ttlstr = ttl and '+%s' % ttl or ''
        dbkey = ('%s/%s' % (dev, key)).lower()
        with self._dblock:
            self._db[dbkey] = (value, time)
        dvalue = cache_dump(value)
        msg = '%r%s@%s%s%s%s%s\n' % (time, ttlstr, self._prefix, dbkey, flag,
                                     OP_TELL, dvalue)
        # self.log.debug('putting %s=%s', dbkey, value)
        self._queue.put(msg)
        self._propagate((time, dbkey, OP_TELL, dvalue))
        if key == 'value' and session.experiment:
            session.experiment.data.cacheCallback(dbkey, value, time)
        # we have to check rewrites here, since the cache server won't send
        # us updates for a rewritten key if we sent the original key
        if str(dev).lower() in self._rewrites:
            for newprefix in self._rewrites[str(dev).lower()]:
                rdbkey = ('%s/%s' % (newprefix, key)).lower()
                with self._dblock:
                    self._db[rdbkey] = (value, time)
                self._propagate((time, rdbkey, OP_TELL, dvalue))
                if key == 'value' and session.experiment:
                    session.experiment.data.cacheCallback(rdbkey, value, time)
Ejemplo n.º 23
0
    def __init__(self,
                 name,
                 interval,
                 scale,
                 offset,
                 window,
                 signal_obj,
                 info=None,
                 mapping=None):
        self.name = name
        self.disabled = False
        self.signal_obj = signal_obj
        self.info = info
        self.interval = interval
        self.window = window
        self.scale = scale
        self.offset = offset

        # [[x, y], [x, y]] array of data points
        self.data = None
        # number of actual data points in the array (the array is larger to
        # be able to add new data efficiently)
        self.n = 0
        # number of real datapoints, not considering "synthesized" ones that
        # extend the last value when no updates are coming
        self.real_n = 0
        # the last value to use for synthesized points
        self.last_y = None
        self.string_mapping = mapping or {}
        self._last_update_time = currenttime()
Ejemplo n.º 24
0
    def new_messages_callback(self, messages):
        json_messages = {}
        for timestamp, msg in messages.items():
            try:
                if isinstance(msg, str):
                    # handle "old style" messages
                    js = json.loads(msg)
                    if 'next_message_eta_ms' in js:
                        self._setROParam('statusinterval',
                                         js['next_message_eta_ms'] // 1000)
                else:
                    message = deserialise_x5f2(msg)
                    js = json.loads(message.status_json)
                    js['update_interval'] = message.update_interval
                    self._setROParam('statusinterval',
                                     message.update_interval // 1000)
                json_messages[timestamp] = js
                next_update = currenttime() + self.statusinterval
                if next_update > self.nextupdate:
                    self._setROParam('nextupdate', next_update)
            except Exception as e:
                self.log.warning(
                    'Could not decode message from status topic: %s', e)

        if json_messages:
            self._status_update_callback(json_messages)
Ejemplo n.º 25
0
 def synthesize_value(self):
     if not self.n:
         return
     delta = currenttime() - self._last_update_time
     if delta > self.interval:
         self.add_value(self.data[self.n - 1, 0] + delta, self.last_y,
                        real=False, use_scale=False)
Ejemplo n.º 26
0
 def getHTML(self):
     if not self.enabled:
         return ''
     if not self.data or not self.curves:
         return '<span>No data or curves found</span>'
     with self.lock:
         for i, (d, c) in enumerate(zip(self.data, self.curves)):
             try:
                 # add a point "current value" at "right now" to avoid curves
                 # not updating if the value doesn't change
                 now = currenttime()
                 if d[0][-1] < now - 10:
                     self.updatevalues(i, now, d[1][-1])
                 c.x, c.y = self.maybeDownsamplePlotdata(d)
             except IndexError:
                 # no data (yet)
                 pass
     c = self.axes.getCurves()
     self.axes.setWindow(c.xmin, c.xmax, c.ymin, c.ymax)
     if os.path.isfile(self.tempfile):
         os.unlink(self.tempfile)
     gr.beginprint(self.tempfile)
     gr.setwsviewport(0, self.width * 0.0022, 0, self.height * 0.0022)
     try:
         self.plot.drawGR()
     except Exception as err:
         return html.escape('Error generating plot: %s' % err)
     finally:
         gr.endprint()
         gr.clearws()
     with open(self.tempfile, 'rb') as fp:
         imgbytes = fp.read()
     return ('<img src="data:image/svg+xml;base64,%s" '
             'style="width: %sex; height: %sex">' %
             (b64encode(imgbytes).decode(), self.width, self.height))
Ejemplo n.º 27
0
    def _handle_line(self, line):
        # self.log.debug('handling line: %s', line)
        match = msg_pattern.match(line)
        if not match:
            # disconnect on trash lines (for now)
            if line:
                self.log.warning('garbled line: %r', line)
            self.closedown()
            return []
        # extract and clean up individual values
        time, ttlop, ttl, tsop, key, op, value = match.groups()
        key = key.lower()
        value = value or None  # no value -> value gets deleted
        try:
            time = float(time)
        except (TypeError, ValueError):
            # some timestamp is required -- note that this assumes clocks not
            # to be too far out of sync between server and clients
            time = currenttime()
        try:
            ttl = float(ttl)
        except (TypeError, ValueError):
            ttl = None
        # acceptable syntax: either time1-time2 and time1+ttl; convert to ttl
        if ttlop == '-' and ttl:
            ttl = ttl - time

        # dispatch operations to database object
        if op == OP_TELL:
            self.db.tell(key, value, time, ttl, self)
        elif op == OP_ASK:
            if ttl:
                return self.db.ask_hist(key, time, time + ttl)
            else:
                # although passed, time and ttl are ignored here
                return self.db.ask(key, tsop, time, ttl)
        elif op == OP_WILDCARD:
            # time and ttl are currently ignored for wildcard requests
            return self.db.ask_wc(key, tsop, time, ttl)
        elif op == OP_SUBSCRIBE:
            # both time and ttl are ignored for subscription requests,
            # but the return format changes when the @ is included
            if tsop:
                self.ts_updates_on.add(key)
            else:
                self.updates_on.add(key)
        elif op == OP_UNSUBSCRIBE:
            if tsop:
                self.ts_updates_on.discard(key)  # note: discard does not raise
            else:
                self.updates_on.discard(key)
        elif op == OP_TELLOLD:
            # the server shouldn't get TELLOLD, ignore it
            pass
        elif op == OP_LOCK:
            return self.db.lock(key, value, time, ttl)
        elif op == OP_REWRITE:
            self.db.rewrite(key, value)
        return []
Ejemplo n.º 28
0
 def message(self, s):
     global glo_logging
     global glo_log
     if glo_logging:
         lo=open(glo_log,"a")
         lo.write(str(currenttime())+"\n")
         lo.write("### "+s+"\n")
         lo.close()
Ejemplo n.º 29
0
 def doStart(self):
     self._measuring = True
     self._polindex = 0
     self._pol_move = False
     self._nfinished = -1
     self._started = currenttime()
     self._duration = 0
     self._firstfile = self._attached_fileno_in.read(0) + 1
Ejemplo n.º 30
0
 def doReset(self):
     speed = sum(self._readspeeds())
     if speed > 0.5:
         raise NicosError(
             self, 'Attention: It is strictly forbidden to '
             'reset the chopper system if one or more discs '
             'are running!')
     self._setROParam('changetime', currenttime())
Ejemplo n.º 31
0
 def _set_seq_status(self, newstatus=status.OK, newstatusstring='unknown'):
     """Set the current sequence status."""
     oldstatus = self.status()
     self._seq_status = (newstatus, newstatusstring.strip())
     self.log.debug(self._seq_status[1])
     if self._cache and oldstatus != self._seq_status:
         self._cache.put(self, 'status', self._seq_status,
                         currenttime(), self.maxage)
Ejemplo n.º 32
0
def getTimeOffset(region="US", path="/enrollment/time.htm"):
	"""
	Calculates the time difference in seconds as
	a floating point number between the local
	host and a Blizzard server
	"""
	from struct import unpack

	host = ENROLL_HOSTS.get(region, ENROLL_HOSTS["default"]) # get the host, or fallback to default
	response = getServerResponse(None, host, path)

	time = int(unpack(">Q", response)[0])
	difference = time - int(currenttime() * 1000)
	return difference / 1000.0
Ejemplo n.º 33
0
 def run(self):
     global glo_stillalive
     while (glo_stillalive):
         ctime=currenttime()
         while (glo_stillalive):
             #timer
             sleep(1.0)
             if currenttime()-ctime>=self.interval:
                 break
         if glo_stillalive:#don't do this if told to quit
             #do this the simple and dirty way - convert all the filenames to torrents
             # and see if those files are listed in the torrent dir.
             filelist=os.listdir(self.filedir)
             if self.ignoredirs:
                 f=0
                 while f<len(filelist):
                     if os.path.isdir(os.path.join(self.filedir,filelist[f])):filelist=filelist[:f]+filelist[f+1:]
                     else:f+=1
             tfilelist=[make_torrent_filename(fname) for fname in filelist]
             torrlist=os.listdir(self.torrentdir)
             mlist=[]
             for f in range(len(filelist)):
                 if not tfilelist[f] in torrlist:
                     mlist.append(filelist[f])
             for i in mlist:
                 try:
                     if not make_torrent(self.host,self.port,os.path.join(self.filedir,i),self.torrentdir):
                         if self.logging:
                             lo=open(self.log,"a")
                             lo.write(str(currenttime())+":")
                             lo.write("TorrentMakerTimer - unknown error making torrent in '"+self.torrentdir+"' from file '"+i+"'.\n")
                             lo.close()
                 except Exception, e:
                     lo=open(self.log,"a")
                     lo.write(str(currenttime())+":")
                     lo.write("TorrentMakerTimer - "+str(e)+'\n')
                     lo.close()
Ejemplo n.º 34
0
 def display(self, data):
     global glo_logging
     global glo_log
     if glo_logging:
         lo=open(glo_log,"a")
         lo.write(str(currenttime())+"\n")
         if not data:
             self.message('no torrents')
         for x in data:
             ( name, status, progress, peers, seeds, seedsmsg, dist,
               uprate, dnrate, upamt, dnamt, size, t, msg ) = x
             lo.write( '"%s": "%s" (%s) - %sP%s%s%.3fD u%0.1fK/s-d%0.1fK/s u%dK-d%dK "%s" \n' % (
                         name, status, progress, peers, seeds, seedsmsg, dist,
                         uprate/1000, dnrate/1000, upamt/1024, dnamt/1024, msg) )
         lo.close()
     return False
Ejemplo n.º 35
0
def getToken(secret, digits=8, seconds=30, time=None):
	"""
	Computes the token for a given secret
	Returns the token, and the seconds remaining
	for that token
	"""
	from struct import pack, unpack

	if time is None:
		time = currenttime()

	t = int(time)
	msg = pack(">Q", int(t / seconds))
	r = hmac.new(secret, msg, sha1).digest()
	k = r[19]

	# Python2 compat
	if isinstance(k, str):
		k = ord(k)

	idx = k & 0x0f
	h = unpack(">L", r[idx:idx+4])[0] & 0x7fffffff
	return h % (10 ** digits), -(t % seconds - seconds)
Ejemplo n.º 36
0
	def timedigest():
		return sha1(str(currenttime()).encode()).digest()
Ejemplo n.º 37
0
def daemonize(program,log):
    """ 
      Turns the currently running program into a daemon, detaching it from
      the console so that it runs in the background.  

      program:
              Filename (not including path) of the program being turned 
              into a daemon.  Used for logging, error reporting, and to 
              write the pid of the daemon to `/var/run/PROGRAM.pid'. 
      log:
              Filename of the log to be used.
     TypeError if a bad parameter is detected.
     Terminates the program if the command fails.
     Returns pid of the daemon
    """

    if not log is None and log=="": log is None
    flog = None
    if log and os.path.isdir(os.path.split(log)[0]):    
        flog = open(log,"a")
        # log what we're about to do and fork
        flog.write(str(currenttime())+"\nDaemonizing:now\n")
        flog.flush()
    pid = os.fork()

    # if fork was successful, exit the parent process so it returns 
    try:
        if pid > 0:
            os._exit(0) 
    except OSError:
        if not log is None:
            flog.write(str(currenttime())+"\nERROR: fork failed, daemon not started")
            flog.flush()
            flog.close()
            sys.exit(1) 
            
    # Print my pid into /var/run/PROGRAM.pid
    #unnecessary for testing:
    pid = str(os.getpid())
    filename = "/var/run/" + program + ".pid"
    try:
        out_file = open(filename, "w")
        out_file.write(pid)
        out_file.close()
    except IOError:
        if not log is None:
            flog.write(str(currenttime())+"\nERROR: IOError writing pid "+filename)
            flog.flush()

    # close any open files 
    sys.stdin.close()
    sys.stdout.close()
    sys.stderr.close()
    for i in range(1023):
        try:
            os.close(i)
        except OSError:
            pass
         
    # redirect stdin to /dev/null
    # redirect stdout/err to log files.
    sys.stdin = open('/dev/null')       # fd 0
    #sys.stdout = open(program+'.pyout', 'w') # fd 1
    #sys.stderr = open(program+'.pyerr', 'w') # fd 2
    sys.stdout = open('/dev/null') # fd 1
    sys.stderr = open('/dev/null') # fd 2
    
   
    # disassociate from parent 
    os.chdir(os.environ["HOME"]) #must maintain the home directory to keep the logs together
    os.setsid()
    os.umask(0) 
    try:
        if not log is None:flog.close() # might have been closed by the os.close(#) for loop
    except:pass
    return pid
Ejemplo n.º 38
0
 
    try:
        if len(sys.argv) < 2:
            printHelp(uiname, defaults)
            sys.exit(1)
        config, args = configfile.parse_configuration_and_args(defaults,
                                      uiname, sys.argv[1:], 0, 1)
        if args:
            config['torrent_dir'] = args[0]
        if not os.path.isdir(config['torrent_dir']):
            raise BTFailure("Warning: "+args[0]+" is not a directory")
    except BTFailure, e:
        #print 'error: ' + str(e) + '\nrun with no args for parameter explanations'
        if glo_logging:
            lo=open(glo_log,"a")
            lo.write(str(currenttime())+"\n")
            lo.write('error: ' + str(e) + '\n')
            lo.close()
        glo_stillalive=False
        sys.exit(1)
    tmt=TorrentMakerTimer(set[0],set[1],set[2],set[5],set[6],glo_logging,glo_log,get_option("arizonabittorrentignoredirs","True").lower()=="true")
    tmt.start()

    LaunchMany(config, HeadlessDisplayer(), 'btlaunchmany')
    glo_stillalive=False
    if exceptions:
        #print '\nEXCEPTION:'
        #print exceptions[0]
        if glo_logging:
            lo=open(glo_log,"a")
            lo.write(str(currenttime())+"\n")