Пример #1
0
    def scandir(self, path, namespaces=None, page=None):
        _path = abspath(normpath(path))
        cache_key = (_path, frozenset(namespaces or ()))
        if cache_key not in self._cache:
            _scan_result = self._wrap_fs.scandir(
                path,
                namespaces=namespaces,
                page=page
            )
            _dir = {info.name: info for info in _scan_result}
            self._cache[cache_key] = {'time':gettime(),'data':_dir}
        else:
            if self.livetime >= 0:
                if self._cache[cache_key]['time'] + self.livetime < gettime():
                    _scan_result = self._wrap_fs.scandir(
                        path,
                        namespaces=namespaces,
                        page=page
                    )
                    _dir = {info.name: info for info in _scan_result}
                    self._cache[cache_key] = {'time':gettime(),'data':_dir}
                else:
                    if self.speedup:
                        self._cache[cache_key]['time'] = gettime()

        gen_scandir = iter(self._cache[cache_key].values())
        return gen_scandir
 def get_stats(cache):
     now = int(gettime())
     return dict(curr_items=len(cache._dict), total_items=cache._stat_total_items,
                 bytes=cache._data_size, limit_maxbytes=cache.max_data_size,
                 get_hits=cache._stat_get_hits, get_misses=cache._stat_get_misses, evictions=cache._stat_evictions,
                 oldest_item_age=int(gettime())-cache._list.prev.access,
                 cmd_get=cache._stat_cmd_get, cmd_set=cache._stat_cmd_set,
                 time=now, uptime=now-cache._start_time)
 def get_stats(cache):
     now = int(gettime())
     return dict(curr_items=len(cache._dict),
                 total_items=cache._stat_total_items,
                 bytes=cache._data_size,
                 limit_maxbytes=cache.max_data_size,
                 get_hits=cache._stat_get_hits,
                 get_misses=cache._stat_get_misses,
                 evictions=cache._stat_evictions,
                 oldest_item_age=int(gettime()) - cache._list.prev.access,
                 cmd_get=cache._stat_cmd_get,
                 cmd_set=cache._stat_cmd_set,
                 time=now,
                 uptime=now - cache._start_time)
Пример #4
0
    def STBTVADSHOWTIME(self, _strValue, _socketClient):
        pf = PublicFunc()
        objParam = {}
        pf.GetStrValue(_strValue, objParam)
        try:
            ip = objParam[0]
            adid = objParam[1]
            roomstatus = objParam[2]
            ad = ktv_tvad_log()
            ad.ad_id = adid
            ad.roomip = ip
            ad.roomstatus = roomstatus
            ad.time = time.gettime()
            info = localCachManage.GetKtv_tvadinfoByID(adid)
            if info != None:
                ad.ad_name = info.ad_name

            Ktv_tvadinfoAccess.Ins().Addktv_tvadlog()
            _count = ktv_tvad_count()
            _count.ad_id = adid
            _count.ad_name = ad.ad_name
            _count.roomstatus = ad.roomstatus
            Ktv_tvadinfoAccess.Ins().AddKtv_tvadTimes()
            _socketClient.request.sendall(1)
        except:
            logger.error('ktv_tvadinfoService.STBTVADSHOWTIME捕获异常')
            _socketClient.request.sendall(0)
Пример #5
0
    def _write(self):
        if self.output:
            self.output.Freeze()
            self.lock.acquire()
            for s, style in self.stack:
                if style is None:
                    style = self.black_white
                if style != self.black_white:
                    self.output.StartStyling(self.output.GetLength(), 0xFF)

                # Temporary deactivate read only mode on StyledTextCtrl for
                # adding text. It seems that text modifications, even
                # programmatically, are disabled in StyledTextCtrl when read
                # only is active
                self.output.SetReadOnly(False)
                self.output.AppendText(s)
                self.output.SetReadOnly(True)

                if style != self.black_white:
                    self.output.SetStyling(len(s), style)
            self.stack = []
            self.lock.release()
            self.output.Thaw()
            self.LastRefreshTime = gettime()
            try:
                self.RefreshLock.release()
            except:
                pass
            newtime = time.time()
            if newtime - self.rising_timer > 1:
                self.risecall(self.output)
            self.rising_timer = newtime
Пример #6
0
    def update(self, time, particles, modules) :
        if self.particle_sets == []:
            massgroups = getKeyMasses(particles.mass)
            self.particle_sets = [particles.select(lambda x:  x <= massgroups[0], ["mass"])]
            for i in range(1,len(massgroups)):
                self.particle_sets .append(particles.select(
                    lambda x: x > massgroups[i-1] and x <= massgroups[i], 
                    ["mass"]
                    ))
            self.particle_sets.append(particles)
        LR = np.asarray(get_lagrangians(self.particle_sets, 10))
        self.datadict["LR"].append(LR[-1,:])
        self.datadict["HalfMass"].append(LR[:,5])
        self.datadict["t"].append(self.convert_nbody.to_nbody(time).number)
        self.datadict["KE"].append(self.convert_nbody.to_nbody(modules[-1].kinetic_energy).number)
        self.datadict["PE"].append(self.convert_nbody.to_nbody(modules[-1].potential_energy).number)
        self.datadict["TE"].append(self.datadict["PE"][-1] + self.datadict["KE"][-1])
        #print LR
        #print LR[:,5]
        
#        self.parent.diagnosticUpdated.emit()

        curt = gettime()
        if(curt > self.lasttime+self.UpdateInterval):
            self.parent.diagnosticUpdated.emit()
Пример #7
0
    def __init__(self, producer, debug, subscribe_tick=True):
        """
        Constructor
        @param producer: Object receiving debug value and dispatching them to
        consumers
        @param debug: Flag indicating that Viewer is debugging
        @param subscribe_tick: Flag indicating that viewer need tick value to
        synchronize
        """
        self.Debug = debug
        self.SubscribeTick = subscribe_tick

        # Flag indicating that consumer value update inhibited
        # (DebugViewer is refreshing)
        self.Inhibited = False

        # List of data consumers subscribed to DataProducer
        self.DataConsumers = {}

        # Time stamp indicating when last refresh have been initiated
        self.LastRefreshTime = gettime()
        # Flag indicating that DebugViewer has acquire common debug lock
        self.HasAcquiredLock = False
        # Lock for access to the two preceding variable
        self.AccessLock = Lock()

        # Timer to refresh Debug Viewer one last time in the case that a new
        # value have been received during refresh was inhibited and no one
        # after refresh was activated
        self.LastRefreshTimer = None
        # Lock for access to the timer
        self.TimerAccessLock = Lock()

        # Set DataProducer and subscribe tick if needed
        self.SetDataProducer(producer)
Пример #8
0
 def __init__(self, producer, debug, subscribe_tick=True):
     """
     Constructor
     @param producer: Object receiving debug value and dispatching them to
     consumers
     @param debug: Flag indicating that Viewer is debugging
     @param subscribe_tick: Flag indicating that viewer need tick value to
     synchronize
     """
     self.Debug = debug
     self.SubscribeTick = subscribe_tick
     
     # Flag indicating that consumer value update inhibited
     # (DebugViewer is refreshing)
     self.Inhibited = False
     
     # List of data consumers subscribed to DataProducer
     self.DataConsumers = {}
     
     # Time stamp indicating when last refresh have been initiated
     self.LastRefreshTime = gettime()
     # Flag indicating that DebugViewer has acquire common debug lock
     self.HasAcquiredLock = False
     # Lock for access to the two preceding variable
     self.AccessLock = Lock()
     
     # Timer to refresh Debug Viewer one last time in the case that a new
     # value have been received during refresh was inhibited and no one
     # after refresh was activated
     self.LastRefreshTimer = None
     # Lock for access to the timer
     self.TimerAccessLock = Lock()
     
     # Set DataProducer and subscribe tick if needed
     self.SetDataProducer(producer)
Пример #9
0
    def _write(self):
        if self.output:
            self.output.Freeze()
            self.lock.acquire()
            for s, style in self.stack:
                if style is None:
                    style = self.black_white
                if style != self.black_white:
                    self.output.StartStyling(self.output.GetLength(), 0xff)

                # Temporary deactivate read only mode on StyledTextCtrl for
                # adding text. It seems that text modifications, even
                # programmatically, are disabled in StyledTextCtrl when read
                # only is active
                start_pos = self.output.GetLength()
                self.output.SetReadOnly(False)
                self.output.AppendText(s)
                self.output.SetReadOnly(True)
                text_len = self.output.GetLength() - start_pos

                if style != self.black_white:
                    self.output.SetStyling(text_len, style)
            self.stack = []
            self.lock.release()
            self.output.Thaw()
            self.LastRefreshTime = gettime()
            try:
                self.RefreshLock.release()
            except Exception:
                pass
            newtime = time.time()
            if newtime - self.rising_timer > 1:
                self.risecall(self.output)
            self.rising_timer = newtime
 def __init__(cache, max_data_size=DEFAULT_MAX_DATA_SIZE):
     cache._lock = Lock()
     cache._heap = []
     cache._dict = {}
     cache._list = list = Node()
     list.prev = list.next = list.expire = list.key = list.value = None
     list.prev = list.next = list
     list.access = int(gettime())
     cache._data_size = 0
     if not isinstance(max_data_size, int):
         raise TypeError('Max data size must be int. Got: %s' % type(max_data_size).__name__)
     cache.max_data_size = max_data_size
     cache._stat_get_hits = cache._stat_get_misses = cache._stat_evictions = 0
     cache._stat_cmd_get = cache._stat_cmd_set = 0
     cache._stat_total_items = 0
     cache._start_time = int(gettime())
Пример #11
0
 def NewDataAvailable(self, ticks):
     """
     Called by DataProducer for each tick captured
     @param tick: PLC tick captured
     All other parameters are passed to refresh function 
     """
     # Stop last refresh timer
     self.TimerAccessLock.acquire()
     if self.LastRefreshTimer is not None:
         self.LastRefreshTimer.cancel()
         self.LastRefreshTimer=None
     self.TimerAccessLock.release()
     
     # Only try to refresh DebugViewer if it is visible on screen and not
     # already refreshing
     if self.IsShown() and not self.Inhibited:
         
         # Try to get acquire common refresh lock if minimum period between
         # two refresh has expired
         if gettime() - self.LastRefreshTime > REFRESH_PERIOD and \
            DEBUG_REFRESH_LOCK.acquire(False):
             self.StartRefreshing()
         
         # If common lock wasn't acquired for any reason, restart last
         # refresh timer
         else:
             self.StartLastRefreshTimer()
     
     # In the case that DebugViewer isn't visible on screen and has already
     # acquired common refresh lock, reset DebugViewer
     elif not self.IsShown() and self.HasAcquiredLock:
         DebugViewer.RefreshNewData(self)
Пример #12
0
    def NewDataAvailable(self, ticks):
        """
        Called by DataProducer for each tick captured
        @param tick: PLC tick captured
        All other parameters are passed to refresh function 
        """
        # Stop last refresh timer
        self.TimerAccessLock.acquire()
        if self.LastRefreshTimer is not None:
            self.LastRefreshTimer.cancel()
            self.LastRefreshTimer = None
        self.TimerAccessLock.release()

        # Only try to refresh DebugViewer if it is visible on screen and not
        # already refreshing
        if self.IsShown() and not self.Inhibited:

            # Try to get acquire common refresh lock if minimum period between
            # two refresh has expired
            if gettime() - self.LastRefreshTime > REFRESH_PERIOD and \
               DEBUG_REFRESH_LOCK.acquire(False):
                self.StartRefreshing()

            # If common lock wasn't acquired for any reason, restart last
            # refresh timer
            else:
                self.StartLastRefreshTimer()

        # In the case that DebugViewer isn't visible on screen and has already
        # acquired common refresh lock, reset DebugViewer
        elif not self.IsShown() and self.HasAcquiredLock:
            DebugViewer.RefreshNewData(self)
Пример #13
0
 def OnScrollTimer(self, event):
     if self.ScrollSpeed != 0.:
         speed_norm = abs(self.ScrollSpeed)
         period = REFRESH_PERIOD / speed_norm
         self.ScrollMessagePanel(-speed_norm / self.ScrollSpeed)
         self.LastStartTime = gettime()
         self.ScrollTimer.Start(int(period * 1000), True)
     event.Skip()
Пример #14
0
 def OnScrollTimer(self, event):
     if self.ScrollSpeed != 0.:
         speed_norm = abs(self.ScrollSpeed)
         period = REFRESH_PERIOD / speed_norm
         self.ScrollMessagePanel(-speed_norm / self.ScrollSpeed)
         self.LastStartTime = gettime()
         self.ScrollTimer.Start(int(period * 1000), True)
     event.Skip()
 def __init__(cache, max_data_size=DEFAULT_MAX_DATA_SIZE):
     cache._lock = Lock()
     cache._heap = []
     cache._dict = {}
     cache._list = list = Node()
     list.prev = list.next = list.expire = list.key = list.value = None
     list.prev = list.next = list
     list.access = int(gettime())
     cache._data_size = 0
     if not isinstance(max_data_size, int):
         raise TypeError('Max data size must be int. Got: %s' %
                         type(max_data_size).__name__)
     cache.max_data_size = max_data_size
     cache._stat_get_hits = cache._stat_get_misses = cache._stat_evictions = 0
     cache._stat_cmd_get = cache._stat_cmd_set = 0
     cache._stat_total_items = 0
     cache._start_time = int(gettime())
 def _find_node(cache, key):
     node = cache._dict.get(key)
     if node is None: return None
     prev, next = node.prev, node.next
     prev.next = next
     next.prev = prev
     expire = node.expire
     if expire is None or expire > int(gettime()): return node
     cache._delete_node(node, unlink=False)
     return None
 def _find_node(cache, key):
     node = cache._dict.get(key)
     if node is None: return None
     prev, next = node.prev, node.next
     prev.next = next
     next.prev = prev
     expire = node.expire
     if expire is None or expire > int(gettime()): return node
     cache._delete_node(node, unlink=False)
     return None
 def _delete_expired_nodes(cache):
     now = int(gettime())
     heap = cache._heap
     while heap:
         expire, node_ref = heap[0]
         if expire > now: break
         heappop(heap)
         node = node_ref()
         if node is not None:
             expire = node.expire
             if expire is not None and expire <= now: cache._delete_node(node)
Пример #19
0
    def RefreshNewData(self):
        """
        Called to refresh DebugViewer according to values received by data
        consumers
        May be overridden by inherited classes
        Can receive any parameters depending on what is needed by inherited
        class 
        """
        if self:
            # Activate data consumer value update
            self.Inhibit(False)

            # Release common refresh lock if acquired and update
            # last refresh time
            self.AccessLock.acquire()
            if self.HasAcquiredLock:
                DEBUG_REFRESH_LOCK.release()
                self.HasAcquiredLock = False
            if gettime() - self.LastRefreshTime > REFRESH_PERIOD:
                self.LastRefreshTime = gettime()
            self.AccessLock.release()
Пример #20
0
 def RefreshNewData(self):
     """
     Called to refresh DebugViewer according to values received by data
     consumers
     May be overridden by inherited classes
     Can receive any parameters depending on what is needed by inherited
     class 
     """
     if self:
         # Activate data consumer value update
         self.Inhibit(False)
         
         # Release common refresh lock if acquired and update
         # last refresh time
         self.AccessLock.acquire()
         if self.HasAcquiredLock:
             DEBUG_REFRESH_LOCK.release()
             self.HasAcquiredLock = False
         if gettime() - self.LastRefreshTime > REFRESH_PERIOD:
             self.LastRefreshTime = gettime()
         self.AccessLock.release()
 def items(cache):
     now = int(gettime())
     result = []
     append = result.append
     list = cache._list
     node = list.next
     while node is not list:
         expire = node.expire
         if expire is not None and expire <= now: cache._delete_node(node)
         elif node.value is not None: append((node.key, node.value))
         node = node.next
     return result
 def items(cache):
     now = int(gettime())
     result = []
     append = result.append
     list = cache._list
     node = list.next
     while node is not list:
         expire = node.expire
         if expire is not None and expire <= now: cache._delete_node(node)
         elif node.value is not None: append((node.key, node.value))
         node = node.next
     return result
 def _delete_expired_nodes(cache):
     now = int(gettime())
     heap = cache._heap
     while heap:
         expire, node_ref = heap[0]
         if expire > now: break
         heappop(heap)
         node = node_ref()
         if node is not None:
             expire = node.expire
             if expire is not None and expire <= now:
                 cache._delete_node(node)
Пример #24
0
 def __init__(self, output, risecall):
     self.red_white = 1
     self.red_yellow = 2
     self.black_white = wx.stc.STC_STYLE_DEFAULT
     self.output = output
     self.risecall = risecall
     # to prevent rapid fire on rising log panel
     self.rising_timer = 0
     self.lock = Lock()
     self.YieldLock = Lock()
     self.RefreshLock = Lock()
     self.TimerAccessLock = Lock()
     self.stack = []
     self.LastRefreshTime = gettime()
     self.LastRefreshTimer = None
Пример #25
0
 def __init__(self, output, risecall):
     self.red_white = 1
     self.red_yellow = 2
     self.black_white = wx.stc.STC_STYLE_DEFAULT
     self.output = output
     self.risecall = risecall
     # to prevent rapid fire on rising log panel
     self.rising_timer = 0
     self.lock = Lock()
     self.YieldLock = Lock()
     self.RefreshLock = Lock()
     self.TimerAccessLock = Lock()
     self.stack = []
     self.LastRefreshTime = gettime()
     self.LastRefreshTimer = None
def normalize(key, value="", expire=None):
    if isinstance(key, tuple): hash_value, key = key
    elif not isinstance(key, str): raise ValueError('Key must be tuple or string. Got: %s' % key.__class__.__name__)
    if len(key) > MAX_KEY_LENGTH: raise ValueError('Key size too big: %d' % len(key))
    if key.translate(char_table, noncontrol_chars): raise ValueError('Key cannot contains spaces or control characters')

    if not isinstance(value, str): raise ValueError('Value must be string. Got: %s' % value.__class__.__name__)
    if len(value) > MAX_VALUE_LENGTH: raise ValueError('Value size too big: %d' % len(value))
    if expire is not None:
        expire = int(expire)
        if expire == 0: expire = None
        elif expire < 0: raise ValueError('Expiration must not be negative')
        elif expire <= MONTH: expire = int(gettime()) + expire
        elif expire <= MONTH * 100: raise ValueError('Invalid expire value: %d' % expire)
    return key, value, expire
Пример #27
0
 def write(self, s, style=None):
     if self.lock.acquire():
         self.stack.append((s, style))
         self.lock.release()
         current_time = gettime()
         self.TimerAccessLock.acquire()
         if self.LastRefreshTimer:
             self.LastRefreshTimer.cancel()
             self.LastRefreshTimer = None
         self.TimerAccessLock.release()
         if current_time - self.LastRefreshTime > REFRESH_PERIOD and self.RefreshLock.acquire(False):
             self._should_write()
         else:
             self.TimerAccessLock.acquire()
             self.LastRefreshTimer = Timer(REFRESH_PERIOD, self._timer_expired)
             self.LastRefreshTimer.start()
             self.TimerAccessLock.release()
Пример #28
0
    def StartRefreshing(self):
        """
        Called to initiate a refresh of DebugViewer
        All parameters are passed to refresh function
        """
        # Update last refresh time stamp and flag for common refresh
        # lock acquired
        self.AccessLock.acquire()
        self.HasAcquiredLock = True
        self.LastRefreshTime = gettime()
        self.AccessLock.release()

        # Inhibit data consumer value update
        self.Inhibit(True)

        # Initiate DebugViewer refresh
        wx.CallAfter(self.RefreshNewData)
Пример #29
0
 def StartRefreshing(self):
     """
     Called to initiate a refresh of DebugViewer
     All parameters are passed to refresh function
     """
     # Update last refresh time stamp and flag for common refresh
     # lock acquired
     self.AccessLock.acquire()
     self.HasAcquiredLock = True
     self.LastRefreshTime = gettime()
     self.AccessLock.release()
     
     # Inhibit data consumer value update
     self.Inhibit(True)
     
     # Initiate DebugViewer refresh
     wx.CallAfter(self.RefreshNewData)
Пример #30
0
 def SetScrollSpeed(self, speed):
     if speed == 0.:
         self.ScrollTimer.Stop()
     else:
         speed_norm = abs(speed)
         period = REFRESH_PERIOD / speed_norm
         current_time = gettime()
         if self.LastStartTime is not None:
             elapsed_time = current_time - self.LastStartTime
             if elapsed_time > period:
                 self.ScrollMessagePanel(-speed_norm / speed)
                 self.LastStartTime = current_time
             else:
                 period -= elapsed_time
         else:
             self.LastStartTime = current_time
         self.ScrollTimer.Start(int(period * 1000), True)
     self.ScrollSpeed = speed
Пример #31
0
 def SetScrollSpeed(self, speed):
     if speed == 0.:
         self.ScrollTimer.Stop()
     else:
         speed_norm = abs(speed)
         period = REFRESH_PERIOD / speed_norm
         current_time = gettime()
         if self.LastStartTime is not None:
             elapsed_time = current_time - self.LastStartTime
             if elapsed_time > period:
                 self.ScrollMessagePanel(-speed_norm / speed)
                 self.LastStartTime = current_time
             else:
                 period -= elapsed_time
         else:
             self.LastStartTime = current_time
         self.ScrollTimer.Start(int(period * 1000), True)
     self.ScrollSpeed = speed
Пример #32
0
 def write(self, s, style=None):
     if self.lock.acquire():
         self.stack.append((s, style))
         self.lock.release()
         current_time = gettime()
         self.TimerAccessLock.acquire()
         if self.LastRefreshTimer:
             self.LastRefreshTimer.cancel()
             self.LastRefreshTimer = None
         self.TimerAccessLock.release()
         if current_time - self.LastRefreshTime > REFRESH_PERIOD and self.RefreshLock.acquire(
                 False):
             self._should_write()
         else:
             self.TimerAccessLock.acquire()
             self.LastRefreshTimer = Timer(REFRESH_PERIOD,
                                           self._timer_expired)
             self.LastRefreshTimer.start()
             self.TimerAccessLock.release()
Пример #33
0
    def point_control(self, j, f):
        """Responsável por descobrir quando aconteceu um ponto, de acordo
        com a quantidade de juízes que estão de acordo com o mesmo """

        if self.stopcounting == True or self.n_of_judge == 0:
            return True

        # Descobre o momento em que o clique ocorreu
        time = gettime()

        # Calcula numero de juizes necessarios para que o ponto seja valido
        j_to_ok = int(math.log(self.n_of_judge,2))+1

        self.sem.acquire()

        # Se jogada eh valida
        if j not in self.flist[f]['j']:

            if self.flist[f]['t'] == 0:
                self.flist[f]['t'] = time

            self.flist[f]['j'].append(j)

            # Se nao estourou tempo limite...
            if time - self.flist[f]['t'] <= self.timeout:
                # Se o numero necessario de juizes foi atingido, ponto
                # para o lutador f!
                if len(self.flist[f]['j']) >= j_to_ok:
                    self.Fight.point_to(f)
                    self.flist[f]['j'] = []
                    self.flist[f]['t'] = 0
                else:
                    self.flist[f]['j'].append(j)
            # Se o tempo limite estourou considera esse como outro ponto
            else:
                self.flist[f]['j'] = [j]
                self.flist[f]['t'] = time

        self.sem.release()
def normalize(key, value="", expire=None):
    if isinstance(key, tuple): hash_value, key = key
    elif not isinstance(key, str):
        raise ValueError('Key must be tuple or string. Got: %s' %
                         key.__class__.__name__)
    if len(key) > MAX_KEY_LENGTH:
        raise ValueError('Key size too big: %d' % len(key))
    if key.translate(char_table, noncontrol_chars):
        raise ValueError('Key cannot contains spaces or control characters')

    if not isinstance(value, str):
        raise ValueError('Value must be string. Got: %s' %
                         value.__class__.__name__)
    if len(value) > MAX_VALUE_LENGTH:
        raise ValueError('Value size too big: %d' % len(value))
    if expire is not None:
        expire = int(expire)
        if expire == 0: expire = None
        elif expire < 0: raise ValueError('Expiration must not be negative')
        elif expire <= MONTH: expire = int(gettime()) + expire
        elif expire <= MONTH * 100:
            raise ValueError('Invalid expire value: %d' % expire)
    return key, value, expire
Пример #35
0
    def update(self, time, particles, modules) :
        '''This function needs to be overridden by subclasses'''
        self.time.append(time.number)
        self.KE.append( self.convert_nbody.to_nbody(modules[0].kinetic_energy).number )   #get_kinetic( particles ) )
        self.PE.append( self.convert_nbody.to_nbody(modules[0].potential_energy).number ) #.get_potential(particles) )
        self.TE.append( self.KE[-1] + self.PE[-1] )
        self.VR.append( self.KE[-1] / self.PE[-1] )
        print time, modules[0].kinetic_energy
        
        #Add xdata
        self.pKE.set_xdata(self.time)
        self.pPE.set_xdata(self.time)
        self.pTE.set_xdata(self.time)
        self.pVR.set_xdata(self.time)

        #Add ydata
        self.pKE.set_ydata(self.KE)
        self.pPE.set_ydata(self.PE)
        self.pTE.set_ydata(self.TE)
        self.pVR.set_ydata(self.VR)

        self.p1min = min(self.p1min,self.PE[-1],self.KE[-1])
        self.p1max = max(self.p1max,self.PE[-1],self.KE[-1])
        self.p2min = min(self.p1min,self.TE[-1],self.VR[-1])
        self.p2max = max(self.p1max,self.TE[-1],self.VR[-1])

        #Set the Limits
        self.p1.set_xlim(self.time[0],self.time[-1])
        self.p1.set_ylim(self.p1min*1.1,self.p1max*1.1)
        self.p2.set_xlim(self.time[0],self.time[-1])
        self.p2.set_ylim(self.p2min*1.1,self.p2max*1.1)
        sleep(33.0/1000.0)
        
        #pylab.draw()
        curt = gettime()
        if(curt > self.lasttime+self.interval):
            self.parent.diagnosticUpdated.emit()
Пример #36
0
    def redraw(self):
        if not self.graphVisible:
            self.graphVisible = True
            pylab.show()

#        self.pKE.set_xdata(self.datadict["t"])
#        self.pKE.set_ydata(self.datadict["KE"])
#        self.pTE.set_xdata(self.datadict["t"])
#        self.pTE.set_ydata(self.datadict["TE"])
#        self.pPE.set_xdata(self.datadict["t"])
#        self.pPE.set_ydata(self.datadict["PE"])
    #    pylab.subplot(211)

        curt = gettime()
        if(curt > self.lasttime+self.UpdateInterval):
            nplr = np.asarray(self.datadict["LR"])
            nphm = np.asarray(self.datadict["HalfMass"])
            pylab.clf()
            for i in xrange(10):
#                self.lrlines[i].set_xdata(self.datadict["t"])
#                self.lrlines[i].set_ydata(nplr[:,i]) 
#                print dir(self.lrlines[i])
#                print self.lrlines[i]._y
#                pylab.subplot(211)
                 pylab.plot(self.datadict["t"], nplr[:,i])
#                pylab.subplot(212)
#                print nplr[i,5]
#                print nphm.shape
 #               pylab.plot(self.datadict["t"], nphm[:,i])
#            print dir(self.AXES)
#            import sys
#            sys.exit()
#                self.lrlines[i].autoscale()
#                pylab.margins()
            
            pylab.draw()
            self.lasttime = curt
Пример #37
0
def timel(thunk):
    start = gettime()
    res = thunk()
    return (gettime() - start, res)
 def _build(self, key, builder):
     start = gettime()
     val = builder()
     end = gettime()
     return WeightedCountingEntry(val, end - start)
 def isexpired(self):
     t = gettime()
     return t >= self.weight
 def _build(self, key, builder):
     val = builder()
     entry = AgingEntry(val, gettime() + self.maxseconds)
     return entry
Пример #41
0
 def _build(self, key, builder):
     start = gettime()
     val = builder()
     end = gettime()
     return WeightedCountingEntry(val, end-start)
Пример #42
0
def timel(thunk):
    start = gettime()
    res = thunk()
    return (gettime()-start, res)
def get_manually_installed_packages(date=None):
    """Get list of manually installed packages
    """

    all_packages = get_all_packages()

    if date is None:
        # Get time of fresh install
        t0 = os.path.getctime(initial_status)
        #print ctime(t0)
    else:
        # Use supplied date as starting point
        t0 = float(date)

    start_date = strftime('%Y%m%d', localtime(t0))
    end_date = strftime('%Y%m%d', localtime(gettime()))

    # Remove packages prior to initial system status
    new_packages = []
    for package in all_packages:
        if package[0] > t0:
            new_packages.append(package)

    # Remove t
    new_packages = [p[1:] for p in new_packages]

    # Remove automatically installed packages and add
    # detailed description to each entry
    packages = []
    for package in new_packages:
        date = package[0]
        time = package[1]
        name = package[2]
        cmd = 'aptitude show %s' % name
        p = Popen(cmd, shell=True,
                  stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)

        # Extract Auto flag and description
        if p.stdout is not None:
            info = p.stdout.read().strip()
            lines = info.split('\n')
            auto = False
            description = 'None'
            for line in lines:
                fields = line.split(':')
                if fields[0].startswith('Automatically'):
                    auto = fields[1].lower().strip() == 'yes'
                if fields[0].startswith('Description'):
                    description = fields[1].replace(',', ' -')
            if not auto:
                package.append(description)
                packages.append(package)
                print '%s %s %s: %s' % (date, time, name, description)
        else:
            msg = 'Package %s had no description' % name
            raise Exception(msg)




    Nall = len(all_packages)
    Nnew = len(new_packages)
    Nman = len(packages)
    print('----------------------------')
    print('Total number of packages: %i' % Nall)
    print('Number of new packages: %i' % Nnew)
    print('Number of packages installed manually: %i' % Nman)
    print('')

    return packages, start_date, end_date
Пример #44
0
        else:
            username, password, recipient = getAccount()
            saveAccount(args.file, username, password, recipient)
    else:
        if (args.file is None):
            username, password, recipient = getAccount()
        else:
            username, password, recipient = loadAccount(args.file)

    sleeptime = args.minutes * 60
    if (args.port is None):
        serverPort = args.server
    else:
        serverPort = args.server + ":" + str(args.port)
    external_ip = ""
    hostname = gethostname()
    logger.info("Server Started")
    signal.signal(signal.SIGINT, exit_handler)
    print 'Press Ctrl+C to exit'
    while True:
        timestamp = gettime()
        logger.info('STUN request')
        nat_type, new_external_ip, external_port = stun.get_ip_info()
        if (new_external_ip is None):
            logger.error('STUN request failed')
        elif (new_external_ip != external_ip):
            logger.info('New IP %s' % new_external_ip)
            if (mailIP(new_external_ip, timestamp)):
                external_ip = new_external_ip
        sleep(sleeptime)
Пример #45
0
def main():

  # pull in the hyperparameters for the neural network
  parser = argparse.ArgumentParser()
  parser.add_argument('config_file')
  parser.add_argument('--retrain',help='retrain model',action='store_true')
  parser.add_argument('--val_only',help='generate validation output only',action='store_true')
  parser.add_argument('--train_fullres',help='train on full resolution spectra',action='store_true')
  parser.add_argument('--disable_wp',help='disable weight propagation',action='store_true')

  args       = parser.parse_args()
  retrain    = args.retrain
  use_wp     = (not args.disable_wp)
  val_only   = args.val_only
  full_res   = args.train_fullres

  configname     = pathsplit(args.config_file)[1].replace('.json','')
  config         = json_load_ascii(args.config_file)
  wavelengthfile = config["wavelength_file"]
  names          = config['inputvector_order']
  trainfile      = config['trainfile']
  trainfile_rs   = config.get('trainfile_rs',None)

  if 'alb' not in names:
    names.append('alb')

  # assume outdir is always the same as trainfile (not trainfile_rs)
  outdir,trainf = pathsplit(trainfile)
  
  wl_inst, fwhm_inst = load_wavelength_file(wavelengthfile)

  # prm20151026t173213_libRadtran.mat (full res) shape = (9072, 7101)
  # prm20151026t173213_libRadtran_PRISM_rs.mat (instrument) shape = (9072, 246)

  if full_res:
    # Train on full-resolution RTM channels, rather than (downsampled)
    # instrument channels
    print('loading trainfile: "%s"'%str((trainfile)))
    D = loadmat(trainfile)
    wl =  D['wl'].squeeze()
    assert(len(wl)>len(wl_inst))
  else:
    # Load the resampled instrument channels
    print('loading trainfile_rs: "%s"'%str((trainfile_rs)))
    D  = loadmat(trainfile_rs)
    wl =  D['wl'].squeeze()
    assert(len(wl)==len(wl_inst))
    assert((wl==wl_inst).all())
    
  inputs = s.float32(D['input'])
  print('inputs.shape: "%s"'%str((inputs.shape)))
    
  tgts = D['rho']
  
  # dimensionality of state space = n_inputs
  n_inputs = len(names)
  print('names: "%s"'%str((names)))
  assert(n_inputs == inputs.shape[1])
  
  n_samples = len(inputs)
  n_wl = len(wl)

  # construct train/test partitions
  random.seed(42)    
  samp_idx = s.arange(n_samples)    
  tr_mask = np.zeros(n_samples,dtype=np.bool8)
  stratify_inputs=True
  if stratify_inputs:
    uniq = []
    trinputs,teinputs = [],[]
    for i in range(n_inputs):
      # hold out central unique value of each variable, train on rest
      holdi = np.unique(inputs[:,i])
      teinputs.append(np.array([holdi[len(holdi)//2]]))
      trinputs.append(np.setdiff1d(holdi,teinputs[-1]))
      print('i,trinputs[i],teinputs[i]: "%s"'%str((i,trinputs[i],
                                                   teinputs[i])))
      uniq.append(holdi)
    
    # NOTE: leave all albedo values in training set
    # partition on remaining (4) states
    for i in range(n_inputs):
      if not names[i].startswith('alb'):
        tr_mask |= np.isin(inputs[:,i],teinputs[i])
    # invert mask to get training indices 
    tr_mask = ~tr_mask
  else:
    #  validate on (100*p)% of the data
    p = 0.2
    random.shuffle(samp_idx)
    tr_mask[:int(n_samples*(1-p))] = 1
    
  tr_idx = samp_idx[tr_mask]
  val_idx = samp_idx[~tr_mask]
  n_samplesv = len(val_idx)
  print('n_samples: %d'%(n_samples))
  print('n_train:   %d'%(len(tr_idx)))
  print('n_val:     %d'%(n_samplesv))
  print('n_inputs:  %d'%(n_inputs))
  print('n_wl:      %d'%(n_wl))
  
  # initialize model
  # n_layers = n_hidden_layers + 1 (output)
  n_layers = 2
  n_hidden = 55
  if n_layers==2:
    weight_labs = ['input','output']
    layers = (n_hidden,)
  else:
    weight_labs = ['input','hidden','output']
    layers = (n_hidden,n_hidden,)

  # halfwidth of overlapping channel input range
  # n_half == 0 -> monochromatic
  # n_half == 1->3 channel averaging, 2->5 channel averaging, ...
  n_half = 0
  average_over = True # only valid if n_half > 0
  n_over = 2*n_half+1

  # 'auto'== 200 samples/batch
  batch_size = 'auto' 

  # train first subnetwork for many more epochs to ensure convergence
  init_max_iter = 500
  max_iter = 500

  long_train = False
  if long_train:
    init_max_iter *= 4
    max_iter *= 4

  # compute validation accuracy every val_step epochs
  init_step = 100
  val_step = 25

  # early stopping for val models, init via weight propagation
  es_val = True
  # early stopping for final models, initialized with val weights
  es_fin = True 

  # tol == -1 -> train until max_iter 
  tol_init = -1 
  tol_val = 1e-200 if es_val else -1
  tol_fin = 1e-200 if es_fin else -1

  # use a small percentage of training data for early stopping
  es_percent = 0.1 

  # set early stopping=True and disable on case-by-case basis
  mlpparms = dict(hidden_layer_sizes=layers, activation='relu', solver='adam',
                  alpha=1e-5, batch_size=batch_size, learning_rate='adaptive',
                  learning_rate_init=0.001, power_t=0.5, max_iter=max_iter,
                  random_state=42, early_stopping=True, tol=tol_init,
                  warm_start=False, momentum=0.9, nesterovs_momentum=True,
                  shuffle=False, validation_fraction=es_percent,
                  beta_1=0.9, beta_2=0.999, epsilon=1e-10, verbose=False,
                  n_iter_no_change=10)
  model = MLP(**mlpparms) 

  # map state parameter values to unit range
  inputs = normalize_inputs(inputs,names,clip=False)
  
  val_loss = dict(mse=[],mae=[])

  # pick a few validation toa spectra at varying mad to show in detail
  val_tgts = tgts[val_idx]
  val_dtgts = np.diff(val_tgts,1)
  val_dtgts_med = np.median(val_dtgts,axis=0)
  val_dtgts_mad = abs(val_dtgts-val_dtgts_med).sum(axis=1)
  sorti = np.argsort(val_dtgts_mad)
  val_qper = [0.25,0.5,0.75]
  val_toaidx = [sorti[int(n_samplesv*qp)] for qp in val_qper] 
  val_toalab = ['argq%d'%(qi*100) for qi in val_qper]
  val_toalab = dict(zip([sorti[0]]+val_toaidx+[sorti[-1]],
                        ['argmin']+val_toalab+['argmax']))
  for ridx,rlab in val_toalab.items():
    print('val_dtgts_mad[%s]: "%s"'%(rlab,str((val_dtgts_mad[ridx]))))
    print('inputs[%s]: "%s"'%(rlab,str((inputs[val_idx[ridx]]))))

  # bookkeeping for selected val_toa spectra + predictions
  val_toatrue = dict([(idx,s.zeros(n_wl)) for idx in val_toaidx])
  val_toapred = dict([(idx,s.zeros(n_wl)) for idx in val_toaidx])
  val_toamse = dict([(idx,s.zeros(n_wl)) for idx in val_toaidx])

  plot_toadat=False
  if plot_toadat:
    dwl=wl[:val_dtgts.shape[1]]
    fig0,ax0 = pl.subplots(2,1,sharex=True,sharey=False)
    for ridx,rlab in val_toalab.items():
      ax0[0].plot(wl,val_tgts[ridx],label=rlab)
      ax0[1].plot(dwl,val_dtgts[ridx],label='diff(%s)'%rlab)

    ax0[0].legend()
    ax0[1].legend()
    pl.show()
  
  accum = s.zeros(n_wl)

  modelclass =  '_'.join(['mlp']+list(map(str,layers))+[str(n_over)])
  if long_train:
    modelclass += '_longtr'
  if not use_wp:
    modelclass += '_nowp'
    
  modeldir = pathjoin(outdir,modelclass)
  print('modeldir: "%s"'%str((modeldir)))  
  if not pathexists(modeldir):
    os.makedirs(modeldir)

  log_filename = 'train_pid%s.log'%str(os.getpid())
  log_file = pathjoin(modeldir,log_filename)
  print('Writing log_file=%s'%log_file)
  sleep(1)

  log_fid = open(log_file,'w')
  print('# c, iter, mse, mae, time',file=log_fid)
      
  modelprefix = pathjoin(modeldir,splitext(trainf)[0])
    
  modelbase = modelprefix+'_c%s.pkl'
  init_modelbase = modelprefix+'_init_c%s.pkl'
  fin_modelbase = modelprefix+'_fin_c%s.pkl'

  # weight/bias data bookeeping
  W,b = {},{}
  
  # weight/bias output files
  Wf,bf = {},{}
  
  # validation weights/biases constructed on training set,
  # assesed on validation set  
  W['val'] = [np.zeros([n_wl,n_inputs,n_hidden]),
              np.zeros([n_wl,n_hidden,n_over])]
  b['val'] = [np.zeros([n_wl,n_hidden]),
              np.zeros([n_wl,n_over])]
  
  # "final" weights/biases constructed using *all* states
  # NOTE: use these during deployment to isofit/rt_nn.py
  W['fin'] = [np.zeros([n_wl,n_inputs,n_hidden]),
              np.zeros([n_wl,n_hidden,n_over])]
  b['fin'] = [np.zeros([n_wl,n_hidden]),
              np.zeros([n_wl,n_over])]

  # output file paths for validation/final outputs
  # TODO (BDB, 04/17/19): make multi_layer consistent 
  Wf['val'] = [modelprefix+'_W%dv.npy'%l for l in [1,2]]
  bf['val'] = [modelprefix+'_b%dv.npy'%l for l in [1,2]]
  
  Wf['fin'] = [modelprefix+'_W%dnpy'%l for l in [1,2]]
  bf['fin'] = [modelprefix+'_b%d.npy'%l for l in [1,2]]

  if n_layers==3:
    Wf['val'] = [Wf['val'][0],modelprefix+'_Wmv.npy',Wf['val'][1]]
    bf['val'] = [bf['val'][0],modelprefix+'_bmv.npy',bf['val'][1]]
    Wf['fin'] = [Wf['fin'][0],modelprefix+'_Wm.npy',Wf['fin'][1]]
    bf['fin'] = [bf['fin'][0],modelprefix+'_bm.npy',bf['fin'][1]]
    
    W['val'] = [W1v[0],np.zeros([n_wl,n_hidden,n_hidden]),W2v[1]]
    b['val'] = [b1v[0],np.zeros([n_wl,n_hidden]),b2v[1]]
    W['fin'] = [W1f[0],np.zeros([n_wl,n_hidden,n_hidden]),W2f[1]]
    b['fin'] = [b1f[0],np.zeros([n_wl,n_hidden]),b2f[1]]
  
  abserr = dict(fin=np.zeros([n_samples,len(wl)]), val=np.zeros([n_samplesv,len(wl)]))
  sqderr = dict(fin=np.zeros([n_samples,len(wl)]), val=np.zeros([n_samplesv,len(wl)]))

  # initialize model on initial channel(s)
  cr = s.arange(0,n_over)

  tr_input = inputs[tr_idx]
  val_input = inputs[val_idx]
  tr_tgts = tgts[tr_idx,cr]
  val_tgts = tgts[val_idx,cr]

  init_model_file = init_modelbase%str(0)
  model_params = model.get_params(deep=True)
  model_init = None
  model_init_time = gettime()
  if not pathexists(init_model_file) or retrain:
    fit_init = fit(model, init_model_file, tr_input, tr_tgts, val_input,
                   val_tgts, init_max_iter, es_tol=tol_init,
                   val_step=init_step)
                   
    model_init,model_init_err = fit_init
    model_init_mse,model_init_mae = model_init_err
    model_init_time = gettime()-model_init_time
    model_init_iter = model_init.n_iter_
    print('%d, %d, %.16f, %.16f, %d'%(-1,model_init_iter,model_init_mse,
                                      model_init_mae,model_init_time),
          file=log_fid)
    log_fid.flush()
  else:
    model_init = joblib.load(init_model_file)
    print('loaded',init_model_file)
    
  models = [model_init]
  model_file = modelbase%str(0)
  
  # train channelwise subnetworks
  for c in range(n_wl):    
    # define channel range (cmin==cmax for monochromatic)
    n_off = n_wl-c
    if c < n_half+1:
      cmin,cmax = 0,n_over-1
    elif n_off < n_half+1:
      cmin,cmax = n_wl-n_over,n_wl-1
    else:
      cmin,cmax = c-n_half,c+n_half
      
    cr = s.arange(cmin,cmax+1) if (n_half==0 or average_over) else c
    
    print('\n##### Training subnetwork for center channel %d, wl=[%.2f,%.2f] ##########'%(c,wl[cmin],wl[cmax]))
    # grab the training/validation targets for the current channel(s)  
    fin_tgts = tgts[:,cr].reshape([-1,n_over])
    tr_tgts = tgts[tr_idx,cr].reshape([-1,n_over])
    val_tgts = tgts[val_idx,cr].reshape([-1,n_over])

    # update model file after storing the previous file
    prev_model_file = None
    if use_wp:
      prev_model_file = model_file if c!=0 else init_model_file
    model_file = modelbase%str(c)

    model_c_time = gettime()
    if not pathexists(model_file) or retrain:
      # train new model, save to model_file
      model_c = clone(model)      
      model_c.set_params(**model_params)
      fit_c = fit(model, model_file, tr_input, tr_tgts, val_input,
                  val_tgts, max_iter, val_step=val_step,
                  es_tol=tol_val, prev_model_file=prev_model_file)
      model_c,model_c_err = fit_c
      model_c_mse,model_c_mae = model_c_err
      model_c_iter = model_c.n_iter_
      model_c_time = gettime()-model_c_time
    else:
      # restore from model_file
      model_c = joblib.load(model_file)
      val_preds = model_c.predict(val_input).reshape(val_tgts.shape)
      model_c_iter = model_c.n_iter_
      model_c_mse = mse(val_tgts,val_preds)
      model_c_mae = mae(val_tgts,val_preds)
      model_c_time = gettime()-model_c_time

    print('%d, %d, %.16f, %.16f, %d'%(c,model_c_iter,model_c_mse,model_c_mae,
                                      model_c_time),file=log_fid)
    log_fid.flush()

    if c==0 and model_init is not None:
      # replace initial model with refined version
      models[0] = model_c
    else:
      models.append(model_c)

    # save the output validation weights / biases
    for l in range(n_layers):
      W['val'][l][c] = np.array(model_c.coefs_[l])
      b['val'][l][c] = np.array(model_c.intercepts_[l])


    val_preds = model_c.predict(val_input).reshape(val_tgts.shape)
    #print('cr,val_preds.shape,val_tgts.shape: "%s"'%str((cr,val_preds.shape,
    #                                                    val_tgts.shape)))
    
    # generate detailed summaries for val_toaidx spectra
    for ri in val_toaidx:
      if n_half==0 or average_over:
        # model generates predictions for 0 or more adjacent channels and
        # we average the predictions for all generated channels
        predi,truei = val_preds[ri],val_tgts[ri]
      else:
        # model generates predictions for 1 or more adjacent channels but
        # we only consider the target channel in generating output
        # target channel centered unless c first/prev channel
        predi,truei = val_preds[ri][c-cmin],val_tgts[ri][c-cmin]

      val_toaabserri = np.abs(predi-truei)
      val_toatrue[ri][cr] = truei
      val_toapred[ri][cr] += predi
      val_toamse[ri][cr] += (val_toaabserri*val_toaabserri)

    # keep track of how many times we generate a prediction for each channel
    # (accmum==ones(n_wvl) for monochromatic case)
    accum[cr] += 1
    val_abserr = np.abs(val_tgts-val_preds)
    val_sqderr = val_abserr*val_abserr

    sqderr['val'][:,cr] += val_sqderr
    abserr['val'][:,cr] += val_abserr

    # track the mean/std/median/mad of the mse and mae
    val_loss['mse'].append([np.mean(val_sqderr),np.std(val_sqderr),
                            np.median(val_sqderr),mad(val_sqderr)])
    val_loss['mae'].append([np.mean(val_abserr),np.std(val_abserr),
                            np.median(val_abserr),mad(val_abserr)])

    if val_only: # skip training "final" model
      print('val_only==True, skipping production model fit')
      continue

    print('\n##### Training full subnetwork for center channel %d, wl=[%.3f,%.3f] ##########'%(c,wl[cmin],wl[cmax]))
    fin_model_file = fin_modelbase%(str(c))
    fin_model_filename = pathsplit(fin_model_file)[1]
    model_f_time = gettime()
    if not pathexists(fin_model_file) or retrain:
      # train "final" production model on *all* inputs
      # start with converged validation model for this channel
      model_f = deepcopy(model_c)
      model_f.fit(inputs, fin_tgts)
      model_f_time = gettime()-model_f_time
      joblib.dump(model_f, fin_model_file)
      print('saved',fin_model_filename)
    else:
      model_f = joblib.load(fin_model_file)
      print('loaded',fin_model_filename)

    fin_preds = model_f.predict(inputs).reshape(fin_tgts.shape)
    fin_abserr = np.abs(fin_tgts-fin_preds)
    fin_sqderr = fin_abserr*fin_abserr
    sqderr['fin'][:,cr] += fin_sqderr
    abserr['fin'][:,cr] += fin_abserr

    # extract the final model weights / biases
    for l in range(n_layers):
      W['fin'][l][c] = np.array(model_f.coefs_[l])
      b['fin'][l][c] = np.array(model_f.intercepts_[l])

  # end of channelwise training loop
  
  evalkeys = ['val'] if val_only else ['val','fin']
  for evalkey in evalkeys:
    abserr[evalkey] = abserr[evalkey] / accum 
    sqderr[evalkey] = sqderr[evalkey] / accum 

    outmat = modelprefix+'_%s_sqderr.mat'%evalkey
    savemat(outmat,{'training_idx':tr_idx,
                    'validation_idx':val_idx,
                    'sqderr':sqderr[evalkey],
                    'abserr':abserr[evalkey],
                    'wl':wl})
    print('saved',outmat)
    Woutf,boutf = Wf[evalkey],bf[evalkey]
    Wout,bout = W[evalkey],b[evalkey]    
    for l,(Wl,bl) in enumerate(zip(Woutf,boutf)):
      np.save(Wl,Wout[l])
      np.save(bl,bout[l])
      
    print('saved %s weights to: "%s"'%(evalkey,str(((Woutf[0],boutf[0]),(Woutf[1],boutf[1])))))
    
  # plot the selected val_toaidx predictions vs. actuals
  for idx in val_toaidx:
    toatitle = ', '.join('%s=%.2f'%(n,v) for n,v in zip(names,inputs[idx]))

    toatrue = val_toatrue[idx]
    toapred = val_toapred[idx] / accum
    toamse  = val_toamse[idx] / accum

    toafig = modelprefix+'_toa%d_%s_pred.pdf'%(val_idx[idx],
                                               val_toalab[idx])
    fig,ax = pl.subplots(3,1,sharex=True,sharey=False)
    ax[0].plot(wl,toatrue)
    ax[0].plot(wl,toapred,c='r',ls=':')
    ax[1].plot(wl[:-1],s.diff(toatrue))
    ax[1].plot(wl[:-1],s.diff(toapred),c='r',ls=':')
    ax[2].plot(wl,toamse)
    pl.suptitle(toatitle)
    pl.savefig(toafig)
    print('saved toafig: "%s"'%str((toafig)))

  # plot mse/mae error curves    
  for errkey in val_loss:
    errdat = np.array(val_loss[errkey])
    errmean,errstd = errdat[:,0],errdat[:,1]
    errstr='mean val_%s: %g, std: %g'%(errkey,s.mean(errdat[:,0]),s.std(errdat[:,0]))
    print(errstr)
  
    errfig = modelprefix+'_%s.pdf'%errkey
    fig,ax = pl.subplots(2,1,sharex=True,sharey=False)
    plotmeanstd(ax[0],wl,errmean,errstd)
    plotmeanstd(ax[1],wl,errmean,errstd,diff=True)  
    ax[0].set_ylabel(errkey)
    ax[1].set_ylabel('diff(%s)'%errkey)
    ax[0].set_title(errstr)
    pl.savefig(errfig)
    print('saved errfig: "%s"'%str((errfig)))
    
  # free some memory
  del D
Пример #46
0
	try:
		import colorama
		colorama.init()
		COLORAMA = ('\x1b[0m', '\x1b[31m', '\x1b[32m', '\x1b[33m', '\x1b[34m', '\x1b[35m', '\x1b[36m', '\x1b[37m')
	except:
		COLORAMA = ('', '', '', '', '', '', '', '')

from compiler import *



write_id_files = "ID_%s.py" # Where the compiler will write new iteration ID-files.
show_performance_data = False # Set to true to display compiler performance data by default.
export_filename = '%s.txt' # How to name export files (only used for some debugging purposes).

WRECK.time_started = gettime()


print
print '{2}*** Warband Refined & Enhanced Compiler Kit (W.R.E.C.K.) version {version!s} ***{0}'.format(*COLORAMA, version = WRECK_VERSION)
print 'Please report errors, problems and suggestions at {5}http://lav.lomskih.net/wreck/{0}'.format(*COLORAMA)
print

try:

#   +-----------------------------------------------------------------------------------------------
#  /
# +
# |

	print 'Loading module...',
Пример #47
0
 def redraw(self):
     curt = gettime()
     if(curt > self.lasttime+self.interval):
         pylab.draw()
         self.lasttime = curt
Пример #48
0
def time(fn, *args, **kwargs):
    start = gettime()
    res = fn(*args, **kwargs)
    return (gettime()-start, res)
 def _place_on_top(cache, node):
     list = cache._list
     old_top = list.next
     node.prev, node.next = list, old_top
     list.next = old_top.prev = node
     node.access = int(gettime())
Пример #50
0
 def isexpired(self):
     t = gettime()
     return t >= self.weight 
Пример #51
0
def time(fn, *args, **kwargs):
    start = gettime()
    res = fn(*args, **kwargs)
    return (gettime() - start, res)
def importPosterior(hdf, posteriorTableFile, alignmentNumber, listNumber, coords, numberOfPostProbs=4):
    if "posteriors" not in hdf.root:
        hdf.createGroup(hdf.root, "posteriors")
    
    table = None
    if decodeChrName(alignmentNumber) in hdf.root.posteriors:
        table = hdf.root.posteriors[decodeChrName(alignmentNumber)] #assume well shaped
    else:
        scheme = dict()
        for i in range(numberOfPostProbs):
            scheme["V" + str(i)] = Float64Col(pos=(i+1))
        scheme["maxstate"] = UInt16Col(pos=(numberOfPostProbs+1))
        scheme["maxP"] = Float64Col(pos=(numberOfPostProbs+2))
        scheme["chunk"] = UInt32Col(pos=(numberOfPostProbs+3))
        scheme["alignmentPosition"] = Int64Col(pos=(numberOfPostProbs+4))
        scheme["alignmentNumber"] = UInt16Col(pos=(numberOfPostProbs+5))
        scheme["speciesPosition"] = Int64Col(pos=(numberOfPostProbs+6))
        ## print scheme
        table = hdf.create_table(hdf.root.posteriors, decodeChrName(alignmentNumber), scheme, filters=Filters(complevel=9, complib='blosc', shuffle=True, fletcher32=False))
    data = open(posteriorTableFile)

    data.readline() #discard header
    buff = []
    
    qtime = 0.0
    starttime = gettime()
    processed = 0
    
    for x in hdf.root.lists.where("(alignmentNumber==%i)&(listNumber==%i)" % (alignmentNumber, listNumber)):
        chunk = x["chunk"]
        q = [(x2["begin"],x2["end"], x2["colId"]) for x2 in hdf.root.maps.main.where("(alignmentNumber==%i)&(chunk==%i)"% (alignmentNumber, chunk))]
        assert len(q) >= 1
        prev_end = q[0][0]
        for (alignmentStart, alignmentEnd, colid) in q:
            prev_coords = (-1,-1,-1,-1)
            for alignmentPosition in range(prev_end, alignmentEnd):
                tmp = data.readline().split(" ")
                
                states = [float(tmp[i]) for i in range(1, numberOfPostProbs+1)]
                tmpstates = [(float(tmp[i]), random(),  i-1) for i in range(1, numberOfPostProbs+1)]
                tmpstates.sort()
                (albegin, alend, spbegin, spend) = prev_coords
                if not(albegin <= alignmentPosition <= alend):#a small optimization
                    (albegin, alend, spbegin, spend) = coords.query(alignmentPosition)
                    prev_coords = (albegin, alend, spbegin, spend)
                    
                speciesPosition = spbegin + copysign(abs(alignmentPosition - albegin), spend-spbegin)
                
                if spend == -1:
                    speciesPosition = -1
                    
                statescopy = [float(tmp[i]) for i in range(1, numberOfPostProbs+1)]
                buff.append(tuple(statescopy) + (tmpstates[numberOfPostProbs-1][2], tmpstates[numberOfPostProbs-1][0], chunk, alignmentPosition, alignmentNumber, speciesPosition))
                
                if len(buff) >= 10000:
                    table.append(buff)
                    processed = processed + len(buff)
                    ## print "processed", processed, "per sec", (processed/(gettime() - starttime))
                    del buff[:]
                    
            prev_end = alignmentEnd
            
    if len(buff) > 0:
        table.append(buff)
        processed = processed + len(buff)
#        print "processed", processed, "per sec", (processed/(gettime() - starttime))
        buff = []
Пример #53
0
	try:
		import colorama
		colorama.init()
		COLORAMA = ('\x1b[0m', '\x1b[31m', '\x1b[32m', '\x1b[33m', '\x1b[34m', '\x1b[35m', '\x1b[36m', '\x1b[37m')
	except:
		COLORAMA = ('', '', '', '', '', '', '', '')

from compiler import *



write_id_files = "ID_%s.py" # Where the compiler will write new iteration ID-files.
show_performance_data = False # Set to true to display compiler performance data by default.
export_filename = '%s.txt' # How to name export files (only used for some debugging purposes).

WRECK.time_started = gettime()


print
print '{2}*** Warband Refined & Enhanced Compiler Kit (W.R.E.C.K.) version {version!s} ***{0}'.format(*COLORAMA, version = WRECK_VERSION)
print 'Please report errors, problems and suggestions at {5}http://lav.lomskih.net/wreck/{0}'.format(*COLORAMA)
print

try:

#   +-----------------------------------------------------------------------------------------------
#  /
# +
# |

	print 'Loading module...',
Пример #54
0
     else:
         username,password,recipient = getAccount()
         saveAccount(args.file,username,password,recipient)
 else:
     if(args.file is None):
         username,password,recipient = getAccount()
     else:
         username,password,recipient = loadAccount(args.file)
 
 sleeptime = args.minutes*60
 if(args.port is None):
     serverPort = args.server
 else:
     serverPort = args.server+":"+str(args.port)
 external_ip = ""
 hostname = gethostname()
 logger.info("Server Started")
 signal.signal(signal.SIGINT, exit_handler)
 print 'Press Ctrl+C to exit'
 while True:
     timestamp = gettime()
     logger.info('STUN request')
     nat_type, new_external_ip, external_port = stun.get_ip_info()
     if(new_external_ip is None):
         logger.error('STUN request failed')
     elif(new_external_ip != external_ip):
         logger.info('New IP %s' % new_external_ip)
         if(mailIP(new_external_ip,timestamp)):
             external_ip = new_external_ip
     sleep(sleeptime)
 
 def _place_on_top(cache, node):
     list = cache._list
     old_top = list.next
     node.prev, node.next = list, old_top
     list.next = old_top.prev = node
     node.access = int(gettime())
Пример #56
0
	def getClock(self):
		return gettime()
Пример #57
0
 def _build(self, key, builder):
     val = builder()
     entry = AgingEntry(val, gettime() + self.maxseconds)
     return entry