Esempio n. 1
0
 def __init__(self):
     self.cond = Condition()
     self.readers = 0
     self.writers = 0
Esempio n. 2
0
    def __init__(self,
                 max_workers,
                 build,
                 artefact,
                 build_id,
                 printer,
                 inline_log_lines=10,
                 debug=False):
        self.config = {
            'build-directory': build,
            'artefact-directory': artefact,
        }

        self.printer = printer

        self.start_timestamp = time.time()
        self.start_date = datetime.datetime.now(datetime.timezone.utc).replace(
            microsecond=0).astimezone().isoformat(' ')

        # Parent task controller
        self.ctl = TaskController('scheduler', {}, build, artefact,
                                  self.printer, inline_log_lines, debug)

        # Start the log file
        self.report_file = self.ctl.open_downloadable_file('report.xml', 'w')
        self.report_file.write("<?xml version=\"1.0\"?>\n")
        self.report_file.write("<build number=\"%s\">\n" % build_id)

        # The following attributes (up to self.guard declaration) are guarded
        # by the self.guard mutex and use self.cond to notify about changes
        # in any of them.
        # Lower granularity of the locking would be possible but would
        # complicate too much the conditions inside queue processing where we
        # need to react to multiple events (new task added vs some task
        # terminated vs selecting the right task to be executed).

        # Known tasks (regardless of their state). The mapping is between
        # a task id and TaskWrapper class.
        self.tasks = {}

        # Queue of tasks not yet run (uses task ids only). We insert mutex
        # tasks at queue beginning (instead of appending) as a heuristic to
        # prevent accumulation of mutex tasks at the end of run where it could
        # hurt concurrent execution.
        self.queue = []

        # Number of currently running (executing) tasks. Used solely to
        # control number of concurrently running tasks.
        self.running_tasks_count = 0

        # Flag for the queue processing whether to terminate the loop to allow
        # clean termination of the executor.
        self.terminate = False

        # Here we record which mutexes are held by executing tasks. Mutexes are
        # identified by their (string) name that is used as index. When the
        # value is True, the mutex is held (i.e. do not run any other task
        # claming the same mutex), mutex is not held when the value is False
        # or when the key is not present at all.
        self.task_mutexes = {}

        # Condition variable guarding the above attributes.
        # We initialize CV only without attaching a lock as it creates one
        # automatically and CV serves as a lock too.
        #
        # Always use notify_all as we are waiting in multiple functions
        # for it (e.g. while processing the queue or in barrier).
        self.guard = Condition()

        # Lock guarding output synchronization
        self.output_lock = Lock()

        # Executor for running of individual tasks
        from concurrent.futures import ThreadPoolExecutor
        self.max_workers = max_workers
        self.executor = ThreadPoolExecutor(max_workers=max_workers + 2)

        # Start the queue processor
        self.executor.submit(BuildScheduler.process_queue_wrapper, self)
Esempio n. 3
0
 def __init__(self, events):
     self.conds = dict()
     for evt in events:
         self.conds[evt] = Condition(Lock())
     self.data = None
Esempio n. 4
0
        word = run_through_module(Text('integral'))
        self.assertEqual(word.get_text(), 'integral')
        self.assertEqual(
            word.get_phonotypes(),
            [VOWEL, NASAL, OBSTR, VOWEL, OBSTR, LIQUID, VOWEL, LIQUID])


def run_through_module(word):
    pin.acquire()
    pin.put(word)
    pin.notify()
    pin.release()
    pout.acquire()
    if pout.empty():
        pout.wait()
    word = pout.get()
    pout.release()
    return word


print(path_to_scripts + '/configs/conf_hr_lat.json')

data = ConfigData(path_to_scripts + '/configs/conf_hr_lat.json')

pin = Pipe(Queue(), Condition())
pout = Pipe(Queue(), Condition())
mod = PhonotypeModule([pin, pout], data)

if __name__ == '__main__':
    unittest.main()
Esempio n. 5
0
        cmd = config['source'][len('pipe:'):]
        (child_out, source) = os.popen2(cmd, 'b')
    else:
        # File source
        source = open(config['source'], "rb")
        dscfg.set_video_ratelimit(tdef.get_bitrate())

    restartstatefilename = config['name'] + '.restart'
    dscfg.set_video_source(source, restartstatefilename=restartstatefilename)

    dscfg.set_max_uploads(config['nuploads'])

    d = s.start_download(tdef, dscfg)
    d.set_state_callback(state_callback)

    # condition variable would be prettier, but that don't listen to
    # KeyboardInterrupt
    # time.sleep(sys.maxint/2048)
    # try:
    #    while True:
    #        x = sys.stdin.read()
    # except:
    #    print_exc()
    cond = Condition()
    cond.acquire()
    cond.wait()

    s.shutdown()
    time.sleep(3)
    shutil.rmtree(statedir)
Esempio n. 6
0
 def __init__(self):
     self.cv = Condition()
     self.num = 0
Esempio n. 7
0
 def __init__(self, correlation_id):
     self.correlation_id = correlation_id
     self._result = None
     self._condition = Condition()
 def __init__(self, max_retry_time=60 * 60, sleep_time=5):
     self._condvar = Condition()
     self._retrying = False
     self._max_retry_time = max_retry_time
     self._sleep_time = sleep_time
Esempio n. 9
0
def send(s, host, port, window, lock):

    # Variaveis compartilhadas
    global max_packages
    max_packages = 2
    global aux_time
    aux_time = time()
    global aux_lock
    aux_lock = Condition()
    global print_lock
    print_lock = Condition()

    # Variavel para controlar o fim do programa
    global end
    end = False

    # Número de sequencia do pacote
    seqNumber = 0
    # Pacotes enviados
    i = 0
    payloads = [
        '1', '12', '123', '1234', '12345', '123456', '1234567', '12345678',
        '123456789'
    ]

    # Envia 15 pacotes ao servidor
    while i != 15:

        # Demora aleatóriamente entre 0.6 e 1.2 para enviar um novo pacote
        sleep(random.uniform(0.6, 1.2))
        # Envia pacotes com tamanho aleatório entre 1 e 9 de acordo com o indice da lista "payloads"
        data = payloads[random.randint(0, 8)]
        # Constroi o pacote para ser enviado
        packet = p_client(data, seqNumber)
        # Transforma o pacote em um objeto de bytes
        packet = pickle.dumps(packet)

        lock.acquire()
        # Chega se a quantidades de pacotes enviados é maior do que max_package
        if i > max_packages:
            lock.wait()
        lock.release()

        s.sendto(packet, (host, port))

        print_lock.acquire()
        print("Pacote com número de sequência {} enviado".format(seqNumber))
        print_lock.release()

        # Atualiza o número de sequencia
        seqNumber += len(data)

        # Adiciona o pacote na janela
        lock.acquire()
        window[i % 7] = (packet, seqNumber)
        lock.release()

        # Incrementa os pacotes enviados
        i += 1

    # Atualiza end
    end = True
		print >>cleaningOut, u'\t\t\t<creation-date>'+str(datetime.datetime.now().date())+'</creation-date>'
		print >>cleaningOut, u'\t\t</extraInfo>\n'
		
		print >>cleaningOut, u'\t</doc>\n'
		
		prevTitle = cleaned["title"]
		# sync end
		if multithread: writeCondition.release()
		
		log(u"Obrađeno (" + str(id-1) + "/" + str(linksLen) + ") stranica - " + ("%2.2f" % ((id-1)*100.0/linksLen)) + "%")
						
if __name__ == '__main__':
	
	# Monitori za sinkronizaciju
	if multithread: 
		writeCondition = Condition()
		logCondition = Condition()
	
	# Ispis - start
	# Izlazna datoteka
	#cleaningOut = sys.__stdout__ # stdout
	now = datetime.datetime.now()
	year = now.year
	month = now.month
	day = now.day
	cleaningOut = codecs.open("jutarnji-kolumne-arhiva-"+str(year)+"-"+('%02d' % month)+"-"+('%02d' % day)+".xml", "w", "utf-8")
	
	print >>cleaningOut, u'<?xml version="1.0" encoding="utf-8"?>'
	print >>cleaningOut, u'<documentSet name="jutarnji-kolumne-arhiva-'+str(year)+'-'+("%02d" % month)+'-'+("%02d" % day)+'" type="" description="Arhiva kolumni Jutarnjeg lista do '+str(year)+'-'+("%02d" % month)+'-'+("%02d" % day)+'" xmlns="http://ktlab.fer.hr" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://ktlab.fer.hr http://ktlab.fer.hr/download/documentSet.xsd">'
	# Ispis - end
	
Esempio n. 11
0
from threading import Thread, Lock, Condition
import time
import random

queue = []
lock = Lock()
cv = Condition()

class Producer(Thread):
    def __init__(self, id):
        Thread.__init__(self)
        self.id = id
    def run(self):
        global queue
        while True:
            cv.acquire()
            val = random.randint(1,1000)
            print "Producer %d generated : %d" %(self.id, val)
            queue.append(val)
            cv.notifyAll()
            cv.release()
            time.sleep(random.random())

class Consumer(Thread):
    def __init__(self, id):
        Thread.__init__(self)
        self.id = id

    def run(self):
        global queue
        while True:
Esempio n. 12
0
 def __init__(self):
     self.reject_calls = 0
     self.condition = Condition()
Esempio n. 13
0
 def __init__(self):
     self.receiver_num = self.next_number()
     self.condition = Condition()
     self.messages = []
Esempio n. 14
0
 def __init__(self, max_downloads=2):
     self._cv = Condition(lock=Lock())
     self._active_downloads = set()
     self._max_downloads = max_downloads
Esempio n. 15
0
 def __init__(self):
     self._deque = deque()
     self._closed = False
     self._lock = Lock()
     self._cond = Condition(self._lock)
Esempio n. 16
0
def recv_ack(s, host, port, window, lock, lasted_ack, packages_time):

    # Variaveis compartilhadas
    global max_packages
    global aux_time
    global aux_lock
    aux_lock = Condition()
    global print_lock
    print_lock = Condition()

    rtt = 2.0

    while True:

        try:
            # Tempo que o cliente espera pela confirmação do ack
            s.settimeout(rtt)

            # Recebe o pacote e o endereço do servidor
            data, add = s.recvfrom(4096)

            # Adiciona o tempo de envio dos pacotes
            aux_lock.acquire()
            packages_time.append(time() - aux_time)
            aux_time = time()
            aux_lock.release()

            # Transforma o objeto de bytes no pacotes original
            packet = pickle.loads(data)

            print_lock.acquire()
            print("Ack com número de sequência {} recebido".format(packet.ack))
            print_lock.release()

            # Flag para ver se o ack recebido é o esperado na janela
            ack_awaited = False

            # Percorre a janela de transmissão
            for i in range(lasted_ack, max_packages + 1):

                # Armazena o elemento (pacote, seqnumber) na variavel
                lock.acquire()
                index = window[i % 7]
                lock.release()

                # Verifica se o ack recebido é igual ao numero de sequencia do pacote armazenado na janela
                if (packet.ack == index[1]):
                    # Ack recebido é o esperado
                    ack_awaited = True
                    lock.acquire()

                    # Percorre a janela transformando em None os pacotes confirmados
                    for j in range(lasted_ack, i + 1):

                        window[j % 7] = (None, None)
                        # Atualiza o ultimo ack recebido
                        lasted_ack += 1
                        max_packages += 1

                    lock.notify()
                    lock.release()

            # Ack não é o esperado
            if (ack_awaited == False):
                print_lock.acquire()
                print("Ack duplicado")
                print_lock.release()

                # Como foram recebidos ack duplicados a janela é reenviada
                resend_window(s, host, port, window, lock, lasted_ack)

        except socket.error as error:
            print_lock.acquire()
            print("Timeout")
            print_lock.release()

            # Se o houver um timeout a janela será reenviada
            resend_window(s, host, port, window, lock, lasted_ack)

        # Variavel para verificar se todos os pacotes da janela foram confirmados
        empty_window = True

        # Percorre a janela verificando se existi pacotes não confirmados
        for i in range(lasted_ack, max_packages + 1):
            if window[i % 7] != (None, None):
                # Se houver atualiza a variavel para falso
                empty_window = False

        # Verifica se a janela está vazia e se o servidor parou de enviar pacotes
        if end == True and empty_window == True:
            break
Esempio n. 17
0
 def __init__(self):
     Thread.__init__(self)
     self.on_error = lambda err: print(err)
     self._running = False
     self._paused = False
     self._condition = Condition()
 def __init__(self):
     self.last_frame = datetime.datetime.now()
     self.condition = Condition()
     self.screen = bytes()
Esempio n. 19
0
 def __init__(self):
   self.length = 0
   self.nodes = []
   self.lock = Lock()
   self.was_added = Condition(self.lock)
Esempio n. 20
0
    def __init__(self, Qf, Phi, smin, smax, alpha, nmax=200):
        # Parameters:
        # $\mathbf{Qf}$ -- $\mathcal{Q}f$, sampled on an $r\theta$ grid.
        # $\mathbf{Phi}$ ($\phi$) -- Scattering angle
        # $\mathbf{rmin}$ -- $r_{\min}$, defaults to $1$.
        # $\mathbf{rmax}$ -- $r_{\max}$, defaults to $6$.
        # $\mathbf{D}$ -- Numerical implemenation of $\frac{\partial}{\partial r}$.
        # $\mathbf{nmax}$ -- $n_{\max}$, reconstructs $\tilde{f}\left(r,n\right)$
        # for $\left|n\right| \le n_{\max}$. Defaults to $200$.

        # This reconstruction will assume that $\mathcal{Q}f$ is real and exploit
        # conjugate symmetry in the Fourier series.

        # Initialize variables.
        self.Qf = Qf
        self.Phi = Phi
        self.smin = smin
        self.smax = smax

        H, W = Qf.shape

        self.thetamin = thetamin = -pi
        self.thetamax = thetamax = pi * (1 - 2.0 / H)
        self.nmax = nmax

        self.F = None
        self.F_cartesian = None

        self.lock = Lock()
        self.status = Condition(self.lock)
        self.jobsdone = 0
        self.jobcount = nmax + 1
        self.running = False
        self.projectioncount = 0
        self.projecting = False

        self.dr = dr = ds = (smax - smin) / float(W - 1)
        self.dtheta = dtheta = (thetamax - thetamin) / float(H)

        # Compute $\widetilde{\mathcal{Q}f}$.
        self.FQf = FQf = fft(Qf, axis=0)

        # Perform differentiation of $\widetilde{\mathcal{Q}f}$.
        D = Sderiv(alpha)
        try:
            clip_left, clip_right, self.DFQf = D(FQf, ds)
        except:
            clip_left, clip_right, self.DFQf = D(float64(FQf), ds)

        # Initialize array that will store $\tilde{f}$.
        self.Ff = zeros(self.DFQf.shape, dtype=complex128)

        # Initialize $rs$ grid.
        self.rmin = self.smin + clip_left * ds
        self.rmax = self.smax - clip_right * ds
        R = linspace(self.rmin, self.rmax, W - clip_left - clip_right)
        self.R, self.S = meshgrid(R, R)

        # Compute $q$, $u$, $v$, $w$, and $v^{2}r*\csc(\phi)*{\Delta}r/s^2$.
        self.Q = self.S / self.R

        args = dict(q=self.Q, r=self.R, s=self.S, phi=self.Phi, dr=dr)
        args["u"] = self.U = self._u(**args)
        args["v"] = self.V = self._v(**args)
        self.W = self._w(**args)
        self.Factor = self._cf(**args)
Esempio n. 21
0
    def __init__(self, action: Callable, **kwargs):
        """
        Mandatory positional arguments

          action
            object which will be executed (e.g. a function).
            Must be a callable or a task object
            (e.g. Task, Repeated, Periodic).

        Optional keyword only arguments

          args: tuple=()
            argument list of action
          kwargs: dict={}
            keyword arguments of action
          action_stop: Callable=None
            object (e.g. a function), called when Repeated is stopped.
          args_stop: tuple=()
            argument list of action_stop
          kwargs_stop: dict={}
            keyword arguments of action_stop
          action_cont: Callable=None
            object (e.g. a function), called when Repeated is continued.
          args_cont: tuple=()
            argument list of action_cont
          kwargs_cont: dict={}
            keyword arguments of action_cont
          duration: Number=None
            duration of task (if recursions end earlier, Repeated will wait)
          num: int
            maximum number of iterations
          netto_time: bool=False
            flag, that waiting is netto (execution of action counts extra)
          exc_handler: Callable=None
            user defined handler of exceptions
        """
        assert isinstance(kwargs, dict), \
            'kwarg needs to be a dictionary'
        self._action = action
        self._args = kwargs.pop('args', ())
        self._kwargs = kwargs.pop('kwargs', {})
        self._duration = kwargs.pop('duration', None)
        self._duration_rest: bool = False  # stopped while sleeping
        self._gap = None  # stopped while sleeping
        self._num = kwargs.pop('num', None)
        self._exc_handler = kwargs.pop('exc_handler', None)

        self._next = None  # next task in linked list
        self._root = self  # root task
        self._netto_time = kwargs.pop('netto_time', False)
        self._cnt = 0  # number of action executions

        # the following are root only attributes
        self._action_stop = kwargs.pop('action_stop', None)
        self._args_stop = kwargs.pop('args_stop', ())
        self._kwargs_stop = kwargs.pop('kwargs_stop', {})
        self._action_cont = kwargs.pop('action_cont', None)
        self._args_cont = kwargs.pop('args_cont', ())
        self._kwargs_cont = kwargs.pop('kwargs_cont', {})

        self._state = STATE_INIT
        self._activity = ACTIVITY_NONE
        self._thread = None
        self._thread_start = None
        self._thread_cont = None
        self._restart = False
        self._lock = Lock()
        self._cond = Condition(self._lock)
        self._current = None  # current task object
        self._current_scheduled = None  # scheduled time
        self._last = self  # last task object in chain
        self._time_called_start = None  # time of last starting
        self._time_called_cont = None  # time of last continuing
        self._time_called_stop = None  # time of last stopping
        self._children = []  # child tasks
        self._cont_join = None  # child to join, when continuing
        self._threadless_child = None  # child, started with thread=False
        self._parent = None  # parent task
        self._exc = None  # exception occured
        self._delay = None  # additional timespan in start or cont
        
        self._cont_data = None  # data used for continuation

        assert not kwargs, 'unknown keyword arguments: ' + str(kwargs.keys())

        assert isinstance(self._action, Callable) or \
            isinstance(self._action, Repeated), \
            "action needs to be a callable or a task"
        assert isinstance(self._args, tuple), 'args needs to be a tuple'
        assert isinstance(self._action, Callable) or \
            len(self._args) <= 1, \
            "only one argument for Repeated"
        assert isinstance(self._action, Callable) or \
            len(self._args) == 0, \
            "no args for tasks"
        assert isinstance(self._kwargs, dict), \
            'kwargs needs to be a dictionary'
        assert isinstance(self._action, Callable) or \
            not self._kwargs, \
            "no kwargs for tasks"

        if isinstance(self._action, Repeated):
            # if action is a Repeated, start it as a threadless child
            self._action = self._action.start
            self._kwargs['thread'] = False

        assert (
            self._action_stop is None or
            isinstance(self._action_stop, Callable)
        ), "action_stop needs to be a callable"
        assert isinstance(self._args_stop, tuple), \
            'args_stop needs to be a tuple'
        assert isinstance(self._kwargs_stop, dict), \
            'kwargs_stop needs to be a dictionary'

        assert (
            self._action_cont is None or
            isinstance(self._action_cont, Callable)
            ), "action_cont needs to be a callable"
        assert isinstance(self._args_cont, tuple), \
            'args_cont needs to be a tuple'
        assert isinstance(self._kwargs_cont, dict), \
            'kwargs_cont needs to be a dictionary'

        assert (
            self._duration is None or
            isinstance(self._duration, Number)
        ), 'duration needs to be a number'
        assert self._duration is None or self._duration >= 0, \
            'duration needs to be positive'

        assert self._num is None or isinstance(self._num, int), \
            'num must be an integer'
        assert self._num is None or self._num > 0, 'num must be positive'

        assert isinstance(self._netto_time, bool), \
            'netto_time must be a bool value'

        assert (
            self._exc_handler is None or
            isinstance(self._exc_handler, Callable)
        ), 'exc needs to be a callable'
Esempio n. 22
0
 def __init__(self, interval):
     self._interval = interval
     self._flag = 0
     # self._cv = threading.Condition()
     self._cv = Condition()
Esempio n. 23
0
from tkinter import *
from threading import Thread
from threading import Condition
import seeker
import exact_learning
import shapes
import math

condition = Condition()


class Game(Thread):
    def __init__(self, arena, movements, player_positions, rounds):
        Thread.__init__(self)
        self.arena = arena
        self.movements = movements
        self.player_positions = player_positions
        self.finished = False
        self.rounds = rounds

    def forget(Q, beta):
        for k in Q:
            Q[k] = beta * Q[k]

    def play(self):
        print("Catching a Moving Prey")
        # CS - Start position
        condition.acquire()
        self.player_positions.append(
            (self.arena.seeker.get_representation(), self.arena.prey))
        condition.notify()
Esempio n. 24
0
 def __init__(self, size=10):
     self.empty_slots = size
     self.queue = deque()
     self.empty = Condition()
     self.full = Condition()
     self.lock = Lock()
Esempio n. 25
0
 def __init__(self, prog):
     self.program = prog
     self.condition = Condition()
Esempio n. 26
0
class TrashPurgeScheduleHandler:
    MODULE_OPTION_NAME = "trash_purge_schedule"
    SCHEDULE_OID = "rbd_trash_trash_purge_schedule"

    lock = Lock()
    condition = Condition(lock)
    thread = None

    def __init__(self, module):
        self.module = module
        self.log = module.log
        self.last_refresh_pools = datetime(1970, 1, 1)

        self.init_schedule_queue()

        self.thread = Thread(target=self.run)
        self.thread.start()

    def run(self):
        try:
            self.log.info("TrashPurgeScheduleHandler: starting")
            while True:
                self.refresh_pools()
                with self.lock:
                    (ns_spec, wait_time) = self.dequeue()
                    if not ns_spec:
                        self.condition.wait(min(wait_time, 60))
                        continue
                pool_id, namespace = ns_spec
                self.trash_purge(pool_id, namespace)
                with self.lock:
                    self.enqueue(datetime.now(), pool_id, namespace)

        except Exception as ex:
            self.log.fatal("Fatal runtime error: {}\n{}".format(
                ex, traceback.format_exc()))

    def trash_purge(self, pool_id, namespace):
        try:
            with self.module.rados.open_ioctx2(int(pool_id)) as ioctx:
                ioctx.set_namespace(namespace)
                rbd.RBD().trash_purge(ioctx, datetime.now())
        except Exception as e:
            self.log.error("exception when purgin {}/{}: {}".format(
                pool_id, namespace, e))


    def init_schedule_queue(self):
        self.queue = {}
        self.pools = {}
        self.refresh_pools()
        self.log.debug("scheduler queue is initialized")

    def load_schedules(self):
        self.log.info("TrashPurgeScheduleHandler: load_schedules")

        schedules = Schedules(self)
        schedules.load()
        with self.lock:
            self.schedules = schedules

    def refresh_pools(self):
        if (datetime.now() - self.last_refresh_pools).seconds < 60:
            return

        self.log.debug("TrashPurgeScheduleHandler: refresh_pools")

        self.load_schedules()

        with self.lock:
            if not self.schedules:
                self.images = {}
                self.queue = {}
                self.last_refresh_pools = datetime.now()
                return

        pools = {}

        for pool_id, pool_name in get_rbd_pools(self.module).items():
            with self.module.rados.open_ioctx2(int(pool_id)) as ioctx:
                self.load_pool(ioctx, pools)

        with self.lock:
            self.refresh_queue(pools)
            self.pools = pools

        self.last_refresh_pools = datetime.now()

    def load_pool(self, ioctx, pools):
        pool_id = str(ioctx.get_pool_id())
        pool_name = ioctx.get_pool_name()
        pools[pool_id] = {}
        pool_namespaces = ['']

        try:
            pool_namespaces += rbd.RBD().namespace_list(ioctx)
        except rbd.OperationNotSupported:
            self.log.debug("namespaces not supported")
        except Exception as e:
            self.log.error("exception when scanning pool {}: {}".format(
                pool_name, e))

        for namespace in pool_namespaces:
            pools[pool_id][namespace] = pool_name

    def rebuild_queue(self):
        with self.lock:
            now = datetime.now()

            # don't remove from queue "due" images
            now_string = datetime.strftime(now, "%Y-%m-%d %H:%M:00")

            for schedule_time in list(self.queue):
                if schedule_time > now_string:
                    del self.queue[schedule_time]

            if not self.schedules:
                return

            for pool_id, namespaces in self.pools.items():
                for namespace in namespaces:
                    self.enqueue(now, pool_id, namespace)

            self.condition.notify()

    def refresh_queue(self, current_pools):
        now = datetime.now()

        for pool_id, namespaces in self.pools.items():
            for namespace in namespaces:
                if pool_id not in current_pools or \
                   namespace not in current_pools[pool_id]:
                    self.remove_from_queue(pool_id, namespace)

        for pool_id, namespaces in current_pools.items():
            for namespace in namespaces:
                if pool_id not in self.pools or \
                   namespace not in self.pools[pool_id]:
                    self.enqueue(now, pool_id, namespace)

        self.condition.notify()

    def enqueue(self, now, pool_id, namespace):

        schedule = self.schedules.find(pool_id, namespace)
        if not schedule:
            return

        schedule_time = schedule.next_run(now)
        if schedule_time not in self.queue:
            self.queue[schedule_time] = []
        self.log.debug("schedule image {}/{} at {}".format(
            pool_id, namespace, schedule_time))
        ns_spec = (pool_id, namespace)
        if ns_spec not in self.queue[schedule_time]:
            self.queue[schedule_time].append((pool_id, namespace))

    def dequeue(self):
        if not self.queue:
            return None, 1000

        now = datetime.now()
        schedule_time = sorted(self.queue)[0]

        if datetime.strftime(now, "%Y-%m-%d %H:%M:%S") < schedule_time:
            wait_time = (datetime.strptime(schedule_time,
                                           "%Y-%m-%d %H:%M:%S") - now)
            return None, wait_time.total_seconds()

        namespaces = self.queue[schedule_time]
        namespace = namespaces.pop(0)
        if not namespaces:
            del self.queue[schedule_time]
        return namespace, 0

    def remove_from_queue(self, pool_id, namespace):
        empty_slots = []
        for schedule_time, namespaces in self.queue.items():
            if (pool_id, namespace) in namespaces:
                namespaces.remove((pool_id, namespace))
                if not namespaces:
                    empty_slots.append(schedule_time)
        for schedule_time in empty_slots:
            del self.queue[schedule_time]

    def add_schedule(self, level_spec, interval, start_time):
        self.log.debug(
            "add_schedule: level_spec={}, interval={}, start_time={}".format(
                level_spec.name, interval, start_time))

        with self.lock:
            self.schedules.add(level_spec, interval, start_time)

        # TODO: optimize to rebuild only affected part of the queue
        self.rebuild_queue()
        return 0, "", ""

    def remove_schedule(self, level_spec, interval, start_time):
        self.log.debug(
            "remove_schedule: level_spec={}, interval={}, start_time={}".format(
                level_spec.name, interval, start_time))

        with self.lock:
            self.schedules.remove(level_spec, interval, start_time)

        # TODO: optimize to rebuild only affected part of the queue
        self.rebuild_queue()
        return 0, "", ""

    def list(self, level_spec):
        self.log.debug("list: level_spec={}".format(level_spec.name))

        with self.lock:
            result = self.schedules.to_list(level_spec)

        return 0, json.dumps(result, indent=4, sort_keys=True), ""

    def status(self, level_spec):
        self.log.debug("status: level_spec={}".format(level_spec.name))

        scheduled = []
        with self.lock:
            for schedule_time in sorted(self.queue):
                for pool_id, namespace in self.queue[schedule_time]:
                    if not level_spec.matches(pool_id, namespace):
                        continue
                    pool_name = self.pools[pool_id][namespace]
                    scheduled.append({
                        'schedule_time' : schedule_time,
                        'pool_id' : pool_id,
                        'pool_name' : pool_name,
                        'namespace' : namespace
                    })
        return 0, json.dumps({'scheduled' : scheduled}, indent=4,
                             sort_keys=True), ""

    def handle_command(self, inbuf, prefix, cmd):
        level_spec_name = cmd.get('level_spec', "")

        try:
            level_spec = LevelSpec.from_name(self, level_spec_name,
                                             allow_image_level=False)
        except ValueError as e:
            return -errno.EINVAL, '', "Invalid level spec {}: {}".format(
                level_spec_name, e)

        if prefix == 'add':
            return self.add_schedule(level_spec, cmd['interval'],
                                     cmd.get('start_time'))
        elif prefix == 'remove':
            return self.remove_schedule(level_spec, cmd.get('interval'),
                                        cmd.get('start_time'))
        elif prefix == 'list':
            return self.list(level_spec)
        elif prefix == 'status':
            return self.status(level_spec)

        raise NotImplementedError(cmd['prefix'])
Esempio n. 27
0
 def __init__(self):
   self._cond = Condition()
   self._running = False
   self._queue = deque()
   self._terminate_events = {}
   self._threads = []
Esempio n. 28
0
 def __init__(self, port=9500):
     Thread.__init__(self)
     self.app = Flask(__name__)
     self.port = port
     self.cv = Condition()
     self.document = None
 def __init__(self):
     self.frame = None
     self.buffer = io.BytesIO()
     self.condition = Condition()
Esempio n. 30
0
 def __init__(self):
   self._current_packet_id = 0
   self._request_response_map = {}
   self._new_response_condition = Condition()
   self._packet_id_lock = Lock()
   self._notification_callbacks = []