Ejemplo n.º 1
0
class Request_deque():
    from collections import deque
    def __init__(self, value=1):
        self.sema = Semaphore(value)
        self.time_stamp_q = deque()
        self.sync_lock = Lock()
    
    def acquire(self, blocking=True):
        if self.sema.acquire(blocking):
            # released under blocked mode or happened to have spare under
            #non-blocking mode
            return True, self.time_stamp_q.popleft()
        else:                    
            # non-blocking mode with unsuccessful acquiring
            return False, None
    
    def release(self, stop=False):
        with self.sync_lock:
            # need to guarantee the order matching between request and time
            #stamp, the operation shall be atomic. This could be rare to have
            #but unaffordable if any.
            if stop:
                self.time_stamp_q.append(None)
            else:
                self.time_stamp_q.append(dt.now())
            self.sema.release()
    def __init__(self, n):
        self.n = n
        self.count = 0

        self.mutex = Semaphore(value=1)
        self.turnstile1 = Semaphore(value=0)
        self.turnstile2 = Semaphore(value=0)
    def __init__(self, connection):
        self.connection = connection

        # Lists
        self.publicLists = {}
        self.personalLists = {}
        self.personalBackup = {}
        self.listLock = Semaphore(0)

        self.connection.expect_fromplus(self.onUpdateLists, "Lists:", "(?:\w+\s+is (?:PUBLIC|PERSONAL))|$")

        self.connection.expect_line(self.onUpdateEmptyListitems, "-- (\w+) list: 0 \w+ --")
        self.connection.expect_fromplus(self.onUpdateListitems, "-- (\w+) list: ([1-9]\d*) \w+ --", "(?:\w+ *)+$")

        print >> self.connection.client, "showlist"

        # Variables
        self.variablesBackup = {}
        self.ivariablesBackup = {}
        self.variables = {}
        self.ivariables = {}
        self.varLock = Semaphore(0)

        self.connection.expect_fromplus(
            self.onVariables, "((?:Interface v|V)ariable settings) of (\w+):", "(?:\w*=\w+ *)*$"
        )

        print >> self.connection.client, "variables"
        print >> self.connection.client, "ivariables"

        self.connection.connect("disconnecting", self.stop)

        # Auto flag
        conf.notify_add("autoCallFlag", self.autoFlagNotify)
class StatisticQueue:
    def __init__(self, stats):
        self._semaphore = Semaphore()
        self.result = {}
        self.stats = stats

    def write_result(self, data):
        self._semaphore.acquire()
        self.result.update(data)
        self._semaphore.release()

    def start_parse(self):
        self.stats.connect()
        self.stats.init_message_stack()
        func_to_start = [
            self.stats.get_top3_speakers,
            self.stats.get_most_frequent_youtube_video,
            self.stats.get_time_activity,
            self.stats.get_abusive_expressions,
        ]
        threads = []
        for func in func_to_start:
            thread = Thread(target=func, args=(self, ))
            threads.append(thread)
            thread.start()
        for t in threads:
            t.join()
        return self.result
Ejemplo n.º 5
0
    def _setup_to_do_n_cycles(self, number_of_cycles: int, updates_each_cycle: UpdateCollection=None):
        """
        Sets up the test so that the retriever will only do n cycles.
        :param number_of_cycles: the number of cycles to do
        """
        if updates_each_cycle is None:
            updates_each_cycle = UpdateCollection([])

        semaphore = Semaphore(0)
        lock_until_counted = Lock()
        lock_until_counted.acquire()

        def increase_counter(*args) -> UpdateCollection:
            semaphore.release()
            lock_until_counted.acquire()
            return updates_each_cycle

        self.retrieval_manager.update_mapper.get_all_since.side_effect = increase_counter
        self.retrieval_manager.start()

        run_counter = 0
        while run_counter < number_of_cycles:
            semaphore.acquire()
            run_counter += 1
            lock_until_counted.release()
            if run_counter == number_of_cycles:
                self.retrieval_manager.stop()

        self.retrieval_manager.update_mapper.get_all_since.side_effect = None
Ejemplo n.º 6
0
class BinarySemaphore:
    def __init__(self, initial):
        self.sem = Semaphore(initial)
    def wait(self):
        self.sem.acquire()
    def signal(self):
        self.sem.release()
Ejemplo n.º 7
0
    def __init__(self, should_exit):
        self.should_exit = should_exit

        self.queues = 0
        self.queuers_active = 0
        self.queue_busy = Semaphore(1)
        self.folder_queue = Queue(0)

        self.down_bytes = 0
        self.down_files = 0
        self.down_active = 0
        self.down_active_b = 0
        self.down_complete = []
        self.down_failed = []
        self.down_busy = Semaphore(1)
        self.down_queue = Queue(0)

        self.up_bytes = 0
        self.up_files = 0
        self.up_active = 0
        self.up_active_b = 0
        self.up_complete = []
        self.up_failed = []
        self.up_busy = Semaphore(1)
        self.up_queue = Queue(0)

        self.copy_files = 0
        self.copy_bytes = 0
        self.copy_active = 0
        self.copy_active_b = 0
        self.copy_complete = []
        self.copy_failed = []
        self.copy_busy = Semaphore(1)
        self.copy_queue = Queue(0)
        self.st = time.time()
class TestPubSubscribe(unittest.TestCase):
    def onMessage(self, message):
        self.assertTrue(len(message.payload_objects) > 0)
        msg_body = message.payload_objects[0].content
        self.assertIn(msg_body, MESSAGES)
        self.counter += 1
        if self.counter == len(MESSAGES):
            self.semaphore.release()

    def setUp(self):
        self.counter = 0
        self.semaphore = Semaphore(0)
        self.bw_client = Client()
        self.bw_client.setEntityFromFile(KEY_FILE)
        self.bw_client.overrideAutoChainTo(True)
        self.bw_client.subscribe(URI, self.onMessage)

    def tearDown(self):
        self.bw_client.close()

    def testPublishSubscribe(self):
        for msg in MESSAGES:
            po = PayloadObject((64, 0, 0, 0), None, msg)
            self.bw_client.publish(URI, payload_objects=(po,))
        self.semaphore.acquire()
Ejemplo n.º 9
0
    def test_output_handling(self):
        class Foo(ModuleBase):
            out = Output(channel_name="foo")
            inp = Input(channel_name="foo")
            clk = Clock()

            def __init__(self, cr, s):
                super().__init__(cr)
                self.s = s
                self.subscribe()

            def stop(self):
                self.clk.stop()

            def start(self):
                self.clk.frequency = 100
                self.clk.start()

            @activity(clk)
            async def tick(self):
                await self.out("hello, is it me you're looking for.")

            @activity(inp)
            async def qwerty(self, inp):
                assert (inp == "hello, is it me you're looking for.")
                self.s.release()

        cr = ChannelRegister()
        s = Semaphore(0)
        foo = Foo(cr, s)
        foo.start()
        self.assertTrue(s.acquire(timeout=0.1))
        foo.stop()
Ejemplo n.º 10
0
 def __init__(self, n, name):
     self.n = n
     self.name = name
     self.count = 0
     self.mutex = Semaphore(1)
     self.turnstile = Semaphore(0)
     self.turnstile2 = Semaphore(1)
Ejemplo n.º 11
0
    def test_get_all_when_file_moved(self):
        self.source.start()
        block_until_synchronised_files_data_source_started(self.source)

        move_semaphore = Semaphore(0)
        deleted = False

        def on_change(change: FileSystemChange):
            nonlocal deleted
            if change == FileSystemChange.DELETE:
                move_semaphore.release()
                deleted = True
            if deleted and change == FileSystemChange.CREATE:
                move_semaphore.release()

        self.source.add_listener(on_change)

        to_move_file_path = glob.glob("%s/*" % self.temp_directory)[0]
        move_to = "%s_moved" % to_move_file_path
        shutil.move(to_move_file_path, move_to)

        move_semaphore.acquire()
        move_semaphore.acquire()

        self.assertCountEqual(self.source.get_all(), self.data)
Ejemplo n.º 12
0
class ObjKeeper(object):
    """
    每种资源
    """

    def __init__(self, max_size):
        self.lock = Semaphore(max_size)
        self.objs = deque()

    def pop(self):
        # 获取锁
        self.lock.acquire()

        try:
            return self.objs.popleft()
        except:
            # 代表外面要重新生成新的
            return None

    def push(self, obj):
        if obj:
            self.objs.append(obj)

        # 无论如何都要释放
        self.lock.release()
Ejemplo n.º 13
0
class OneLaneBridge(object):
    """
    A one-lane bridge allows multiple cars to pass in either direction, but at any
    point in time, all cars on the bridge must be going in the same direction.

    Cars wishing to cross should call the cross function, once they have crossed
    they should call finished()
    """

    def __init__(self):
        self.direction = random.randrange(2)
        self.cars_on = Semaphore(0)
        self.num_cars_on = 0
        self.waiting = 0

    def cross(self,direction):
        """wait for permission to cross the bridge.  direction should be either
        north (0) or south (1)."""
        if (direction != self.direction) and self.num_cars_on:
            print "Waiting going direction %d" % direction
            self.waiting += 1
            self.cars_on.acquire()
        # Bridge is empty or == direction so we can take the opening and cross
        self.direction = direction
        self.num_cars_on += 1

    def finished(self, direction):
        self.num_cars_on -= 1
        if not self.num_cars_on:
            for car in range(self.waiting):
                self.cars_on.release()
            self.waiting = 0
Ejemplo n.º 14
0
    def test_descriptor_clock(self):
        """Tests the clock trigger as a descriptor."""
        semp = Semaphore(0)

        class Foo(ModuleBase):
            clk = Clock()

            def __init__(self):
                super().__init__()
                self.bar = 0
                self.clk.frequency = 100

            @activity(clk)
            async def bas(self):
                self.bar += 1
                if self.bar >= 5:
                    self.clk.stop()
                    semp.release()

        foo = Foo()
        t0 = time()
        foo.clk.start()
        self.assertTrue(semp.acquire(timeout=0.1))
        self.assertGreaterEqual(time() - t0, 0.05)
        self.assertEqual(foo.bar, 5)
Ejemplo n.º 15
0
def my_scan(myip,myemail):
    global manager
    Mydir="/root/myopenvas/results/"+myemail[0:myemail.find('@')]
    if os.path.isfile(Mydir+"/"+myip+".html"):
        print myip+" already exist"
        return 
    start=datetime.datetime.now()
    print "Start of: "+myip+" at : ",start
#    """
    Sem =Semaphore(0)
    scan_id,target_id=manager.launch_scan(
            target=myip,
            profile="Full and fast",
            callback_end=partial(lambda x:x.release(),Sem),
            callback_progress=my_print_status
            )
    Sem.acquire()
#    """
    end=datetime.datetime.now()
    print "End of: "+myip+" at : ",end
    print "*******************************"
    print "Cost :",(end-start)
    print "*******************************"

    report_id=manager.get_tasks_last_report_id(scan_id)
    write_report(report_id,myip,myemail)
Ejemplo n.º 16
0
Archivo: pmap.py Proyecto: fbrusch/pmap
def pmap(f, l, limit=None):
    """A parallel version of map, that preserves ordering.
    Example:
    >>> pmap(lambda x: x*x, [1,2,3])
    [1, 4, 9]
    >>> import time
    >>> t1 = time.clock()
    >>> null = pmap(lambda x: time.sleep(1), range(10), 3)
    >>> time.clock() - t1 > 0.001
    True
    """
    if limit:
        pool_semaphore = Semaphore(limit)
    else:
        pool_semaphore = None

    pool = []
    res = range(len(l))
    for i in range(len(l)):
        t = Thread(target=mapper, args=(f, l[i], res, i, pool_semaphore))
        pool.append(t)
        if limit:
            pool_semaphore.acquire()
        t.start()
    map(lambda x: x.join(), pool)
    return res
Ejemplo n.º 17
0
 def test_group_action(self):
     start = time.time()
     semaphore = Semaphore(0)
     controller = self._get_controller()
     controller.set_unittest_semaphore(semaphore)
     # New controller is empty
     self.assertEquals(len(controller.schedules), 0)
     with self.assertRaises(RuntimeError) as ctx:
         # Doesn't support duration
         controller.add_schedule('group_action', start + 120, 'GROUP_ACTION', None, None, 1000, None)
     self.assertEquals(ctx.exception.message, 'A schedule of type GROUP_ACTION does not have a duration. It is a one-time trigger')
     with self.assertRaises(RuntimeError) as ctx:
         # Incorrect argument
         controller.add_schedule('group_action', start + 120, 'GROUP_ACTION', 'foo', None, None, None)
     self.assertEquals(ctx.exception.message, 'The arguments of a GROUP_ACTION schedule must be an integer, representing the Group Action to be executed')
     controller.add_schedule('group_action', start + 120, 'GROUP_ACTION', 1, None, None, None)
     self.assertEquals(len(controller.schedules), 1)
     self.assertEquals(controller.schedules[0].name, 'group_action')
     self.assertEquals(controller.schedules[0].status, 'ACTIVE')
     controller.start()
     semaphore.acquire()
     self.assertEquals(GatewayApi.RETURN_DATA['do_group_action'], 1)
     self.assertEquals(len(controller.schedules), 1)
     self.assertEquals(controller.schedules[0].name, 'group_action')
     self.assertEquals(controller.schedules[0].status, 'COMPLETED')
     controller.stop()
class Synchronized:
	def __init__(self):
		from threading import Semaphore
		self.__lock = Semaphore()
		self.__ownerThread = None
		classdict = self.__class__.__dict__
		for attr in classdict.get("__synchronized__", ()):
			try:
				method = classdict[attr]
				if callable(method):
					self.__dict__[attr] = CallHook(self, method)
				else:
					if VERBOSE: print "! Synchronized: Object is not callable: %s" % (attr,)
			except KeyError:
				if VERBOSE: print "! Synchronized: Method not found: %s" % (attr,)

	def releaseInstance(self):
		self.__ownerThread = None
		self.__lock.release()

	def acquireInstance(self):
		self.__lock.acquire()
		self.__ownerThread = currentThread()

	def ownerThread(self):
		return self.__ownerThread
Ejemplo n.º 19
0
class ThreadSemaphore(object):

    def __init__(self):

        self._semaphore = Semaphore(1)
        self._thread = None

    def acquire(self, wait=True):

        if self._thread is not currentThread():
            #print currentThread(), 'acquiring'
            result = self._semaphore.acquire(wait)
            if result:
                #print currentThread(), 'got it'
                self._thread = currentThread()

            return result

        return False

    def release(self):

        if self._thread is not currentThread():
            raise ValueError, 'current thread did not acquire semaphore'
        else:
            self._thread = None
            self._semaphore.release()
Ejemplo n.º 20
0
class PromptService(object):
    def __init__(self):
        self.semaphore = Semaphore(0)
        self.commandWindow = None
        self.response = None

    def setCommandWindow(self, window):
        self.commandWindow = window

    def requestInput(self, prompt):
        if self.commandWindow is None:
            raise RuntimeError("Command window hasn't registered itself")
        if prompt is None:
            prompt = ''

        self.commandWindow.prompt(prompt, 'standard-output', self.respond, 'standard-input')
        self.semaphore.acquire()

        if self.response is None:
            raise KeyboardInterrupt
        else:
            res = self.response
            self.response = None
            return str(res)

    def respond(self, value):
        self.response = value
        self.semaphore.release()
Ejemplo n.º 21
0
def wait_for_call(obj, target, callback=None):

    sem = Semaphore(0)
    result = WaitResult()

    unpatched = getattr(obj, target)

    def maybe_release(args, kwargs, res, exc_info):
        should_release = True
        if callable(callback):
            should_release = callback(args, kwargs, res, exc_info)

        if should_release:
            result.send(res, exc_info)
            sem.release()

    def wraps(*args, **kwargs):
        res = None
        exc_info = None
        try:
            res = unpatched(*args, **kwargs)
        except Exception:
            exc_info = sys.exc_info()

        maybe_release(args, kwargs, res, exc_info)

        if exc_info is not None:
            six.reraise(*exc_info)
        return res

    with patch.object(obj, target, new=wraps):
        yield result
        sem.acquire()
Ejemplo n.º 22
0
 def _solve_mt(self, formula):
     solverThreads = []
     solution = None
     
     sem = Semaphore(0)
     
     for solver in self.solvers:
         sThread = _PortfolioThread(solver, formula, sem)
         solverThreads.append(sThread)
         
         sThread.start()
     
     # Wait for at least one thread to finish
     sem.acquire()
         
     for sThread in solverThreads:
         if solution is None and sThread.solution is not None:
             solution = sThread.solution
             if not self.benchMode:
                 self._benchmark = sThread.solver.getBenchmark()
         
         if not self.benchMode:
             sThread.solver.abort()
             
     for sThread in solverThreads:
         sThread.join()
         
     assert solution is not None, "Solver returned with invalid solution"
     
     if self.benchMode:
         self._benchmark = [sThread.solver.getBenchmark() for sThread in solverThreads]
         
     return solution
Ejemplo n.º 23
0
 def test_basic_action(self):
     start = time.time()
     semaphore = Semaphore(0)
     controller = self._get_controller()
     controller.set_unittest_semaphore(semaphore)
     self.assertEquals(len(controller.schedules), 0)
     with self.assertRaises(RuntimeError) as ctx:
         # Doesn't support duration
         controller.add_schedule('basic_action', start + 120, 'BASIC_ACTION', None, None, 1000, None)
     self.assertEquals(ctx.exception.message, 'A schedule of type BASIC_ACTION does not have a duration. It is a one-time trigger')
     invalid_arguments_error = 'The arguments of a BASIC_ACTION schedule must be of type dict with arguments `action_type` and `action_number`'
     with self.assertRaises(RuntimeError) as ctx:
         # Incorrect argument
         controller.add_schedule('basic_action', start + 120, 'BASIC_ACTION', 'foo', None, None, None)
     self.assertEquals(ctx.exception.message, invalid_arguments_error)
     with self.assertRaises(RuntimeError) as ctx:
         # Incorrect argument
         controller.add_schedule('basic_action', start + 120, 'BASIC_ACTION', {'action_type': 1}, None, None, None)
     self.assertEquals(ctx.exception.message, invalid_arguments_error)
     controller.add_schedule('basic_action', start + 120, 'BASIC_ACTION', {'action_type': 1, 'action_number': 2}, None, None, None)
     self.assertEquals(len(controller.schedules), 1)
     self.assertEquals(controller.schedules[0].name, 'basic_action')
     self.assertEquals(controller.schedules[0].status, 'ACTIVE')
     controller.start()
     semaphore.acquire()
     self.assertEquals(GatewayApi.RETURN_DATA['do_basic_action'], (1, 2))
     self.assertEquals(len(controller.schedules), 1)
     self.assertEquals(controller.schedules[0].name, 'basic_action')
     self.assertEquals(controller.schedules[0].status, 'COMPLETED')
     controller.stop()
Ejemplo n.º 24
0
class BoundedHashSet(object):

    def __init__(self, capacity):
        """
        Lock is a mutex or a semaphore with count = 1
        This is used to guard the critical section and ensure mutual exclusion so only 1 thread
        has access at a time.

        Semaphore is to enforce capacity. Everytime sem.acquire() is called, capacity decrements
        by 1. When sem.release() is called, capacity increments by 1. If sem.acquire() is called
        when capacity  == 0, it blocks.
        :param capacity:
        :return:
        """
        self.mutex = Lock()
        self.st = set()
        self.sem = Semaphore(capacity)

    def add(self, item):
        if item not in self.st:
            self.sem.acquire()

        self.mutex.acquire()
        self.st.add(item)
        self.mutex.release()

    def erase(self, item):
        self.mutex.acquire()
        self.st.remove(item)
        self.mutex.release()
        self.sem.release()
Ejemplo n.º 25
0
class JobWatcher(Thread):
    """
    Mechanism to watch Job activity, it will cleanup run instances and
    re-generate run if it is a multi_run job.
    """

    def __init__(self, workspace):
        super().__init__()
        self.workspace = workspace
        self.watcher_lock = Semaphore()

    def run(self):
        """ Loop for looking on finished run """
        logging.debug("Start JobWatcher")
        while True:
            self.watcher_lock.acquire(blocking=False)
            with self.watcher_lock:
                logging.debug("Watcher is watching")
                stopping = bool(self.workspace.metadata["status"] & STOPPED)
                to_removes = [x.job for x in self.workspace.runs.values() if not x.is_alive()]
                for job in to_removes:
                    del self.workspace.runs[job.name]
                    if job.multi_run and not stopping:
                        thread, trigger = job.schedule(self.workspace)
                        self.workspace.runs[job.name] = thread
                        if trigger is not None:
                            self.workspace.triggers.append(trigger)
                        thread.start()

                for trigger in self.workspace.triggers:
                    if not trigger.is_alive():
                        self.workspace.triggers.remove(trigger)
        logging.debug("End JobWatcher")
Ejemplo n.º 26
0
def recover_images(parser, destination):
    """Parse images and save them to <manga>/<chapter>/<image>."""
    urls = parser.parse()
    manga_path = os.path.join(destination, parser.title)
    ch_digits = len(str(len(urls)))
    for chapter, pages in urls:
        #Normalize chapter digits
        chapter = "0" * (ch_digits - len(str(chapter))) + str(chapter)
        chapter_path = os.path.join(manga_path, chapter)
        if not os.path.exists(chapter_path):
            os.makedirs(chapter_path)
        savers = list()
        logging.info('Saving Chapter %s to %s', chapter, chapter_path)

        pg_digits = len(str(len(pages)))
        sem = Semaphore(BaseParser.MAX_CONNECTIONS)
        for page, url in enumerate(pages, start=1):
            sem.acquire()
            #Normalize page digits
            page = "0" * (pg_digits - len(str(page))) + str(page)
            path = os.path.join(chapter_path, str(page) + '.jpg')
            saver = utils.ImageSaver(path, url, sem)
            savers.append(saver)
            saver.start()
        map(lambda thread: thread.join(), savers)
Ejemplo n.º 27
0
class Footman:
    def __init__(self, num_philosophers, num_meals):
        self.num_philosophers = num_philosophers
        self.num_meals = num_meals
        self.forks = [Semaphore(1) for i in range(self.num_philosophers)]
        self.footman = Semaphore(self.num_philosophers - 1)  # at most one philosopher cannot dine

    def left(self, i):
        return i

    def right(self, i):
        return (i + 1) % self.num_philosophers

    def get_forks(self, i):
        self.footman.acquire()
        self.forks[self.right(i)].acquire()
        self.forks[self.left(i)].acquire()

    def put_forks(self, i):
        self.forks[self.right(i)].release()
        self.forks[self.left(i)].release()
        self.footman.release()

    def philosopher(self, id):
        while self.num_meals > 0:
            self.get_forks(id)
            # eating
            self.num_meals -= 1
            sleep(rng.random() / 100)
            self.put_forks(id)
            # thinking
            sleep(rng.random() / 100)
Ejemplo n.º 28
0
def call_on_main_thread(func, *args, **kwargs):
    done = Semaphore(0)
    # TODO use other name than "result"
    result = []

    def wrapped_call():
        try:
            res_cb = func(*args, **kwargs)
        except Exception, e:
            res_cb = Callback()
            res_cb(e)

        if not isinstance(res_cb, Callback):
            raise ValueError("Expected a monocle Callback from %r, got %r" % (func, res_cb))

        @_o
        def wait_for_result():
            try:
                res = yield res_cb
            except Exception, e:
                # TODO print traceback to a StringIO?
                res = e

            result.append(res)
            done.release()
Ejemplo n.º 29
0
class Wc:
    def __init__(self):
        self.flush()

    def flush(self):
        self.sem = Semaphore(1)
        self.user = None
        self.waiting = []

    def used_by(self, who):
        self.user = who
        self.waiting.remove(who)

    def being_used_by(self, who):
        return self.user == who

    def acquire(self, a):
        return self.sem.acquire(a)

    def release(self):
        self.user = None
        self.sem.release()

    def enqueue(self, nick):
        self.waiting.append(nick)

    def is_waiting(self, nick):
        return (nick in self.waiting)
    def who(self):
        return self.user
Ejemplo n.º 30
0
    def test_thread(self):
        from time import sleep
        from threading import Semaphore
        from naaya.core.zope2util import ofs_path, launch_job
        import transaction

        job_done = Semaphore(0)

        def threaded_job(context):
            context.hello = "world"
            transaction.commit()
            job_done.release()

        folder = self.portal['info']
        launch_job(threaded_job, folder, ofs_path(folder))
        transaction.commit()

        for c in range(100):
            if job_done.acquire(blocking=False):
                break
            sleep(0.1)
        else:
            self.fail("Waited 10 seconds and the job did not finish yet")

        transaction.abort()
        self.assertEqual(folder.hello, "world")
Ejemplo n.º 31
0
#Semáfaro
from threading import Thread, Semaphore  #Além do sub-módulo Thread, o Semaphore também é importado para utilização de acquire e realease no programa
import time  #É importado a biblioteca time para que seja feito um tempo de
#execução

s = Semaphore()  #A variável s será o semáforo do programa que irá mostrar
#quando parar e quando voltar


def regiaoCritica():  #Criação da função de região crítica que irá executar um
    #tempo de execução de 1 segundo
    time.sleep(1)


def processamentoA(times,
                   delay):  #Criação do processo A que irá receber times e
    #delay como parâmetros
    for x in range(times):
        print("Secao de Entrada A - ", x + 1)  #Sessão de entrada
        s.acquire(
        )  #O semáforo avisa que entrará em sessão de execução exclusiva (crítica)
        print("Regiao Critica A")
        regiaoCritica()  #Região Crítica em execução
        print("Secao de Saida A")
        s.release(
        )  #O semáforo libera a execução para as demais sessões do programa
        print("Regiao nao critica A\n")
        time.sleep(
            delay
        )  #Tempo de execução é rodado com o valor de delay em segundos
Ejemplo n.º 32
0
class RESTfulCatalog(object):
    exposed = True

    def __init__(self):
        self.storage = os.path.join(os.path.dirname(__file__),"data.json")
        if os.path.isfile(self.storage):
            self.READdata()
        else:
            self.data = {}
            self.data["broker"] = {"address":"test.mosquitto.org", "port":1883}
            self.data["devices"] = {}
            self.data["services"] = {}
            self.data["users"] = {}
            # creazione del file vuoto
            self.WRITEdata()
        self.data_sem = Semaphore()
        cherrypy.process.plugins.Monitor(cherrypy.engine, self.CLEANdata, frequency=120).subscribe()

    def CLEANdata(self):
        self.ACQUIREsem("CLEANER")
        t = time.time()
        devices = 0
        keys = list(self.data["devices"].keys())
        for k in keys:
            if self.data["devices"][k]["insert-timestamp"] < t - 120:
                del self.data["devices"][k]
                devices += 1
        
        services = 0
        keys = list(self.data["services"].keys())
        for k in keys:
            if self.data["services"][k]["insert-timestamp"] < t - 120:
                del self.data["services"][k]
                services += 1
        self.WRITEdata()
        cherrypy.log(f"CLEANER: Deleted {devices} devices")
        cherrypy.log(f"CLEANER: Deleted {services} services")
        self.RELEASEsem("CLEANER")
            


    def WRITEdata(self):
        with open(self.storage, "w") as f:
            json.dump(self.data, f, indent=4)
    
    def READdata(self):
        with open(self.storage, "r") as f:
            self.data = json.load(f)

    def ACQUIREsem(self, who):
        self.data_sem.acquire()
        cherrypy.log(f"{who} acquired semaphore")
    
    def RELEASEsem(self, who):
        self.data_sem.release()
        cherrypy.log(f"{who} released semaphore")

    @cherrypy.tools.json_in()
    @cherrypy.tools.json_out()
    # viene utilizzato il metodo POST in quanto non si creano nuove posizioni nel path bensì si 
    # aggiungono record ad una posizione già esistente   
    def POST(self, *uri, **params):
        self.ACQUIREsem("POST")
        if uri == ('devices', ):
            t = self.POSTdevice(cherrypy.request)
        elif uri == ('users', ):
            t = self.POSTuser(cherrypy.request)
        elif uri == ('services', ):
            t = self.POSTservice(cherrypy.request)
        else:
            self.RELEASEsem("POST")
            raise cherrypy.HTTPError(404)
        
        self.RELEASEsem("POST")
        return t
    
    @cherrypy.tools.json_out()
    def GET(self, *uri, **params):
        self.ACQUIREsem("GET")
        if uri == ('broker',):
            t = self.GETall(uri[0])
        elif uri == ('devices',):
            if params == {}:
                t = self.GETall(uri[0])
            else:
                t = self.GETone(params, uri[0])
        elif uri == ('users',):
            if params == {}:
                t = self.GETall(uri[0])
            else:
                t = self.GETone(params, uri[0])
        elif uri == ('services',):
            if params == {}:
                t = self.GETall(uri[0])
            else:
                t = self.GETone(params, uri[0])
        else:
            self.RELEASEsem("GET")
            raise cherrypy.HTTPError(404)
        
        self.RELEASEsem("GET")
        return t

    def GETall(self, target):
        return self.data[target]
    
    def GETone(self, params, target):
        if "id" in params.keys():
            ID = str(params["id"])
            print("Id is:", ID)
            print(self.data[target])
            print(self.data[target].get(ID))
            try:
                return self.data[target][ID]
            except:
                self.RELEASEsem("GET")
                raise cherrypy.HTTPError(404, "Element with specified id not found.")
        else:
            self.RELEASEsem("GET")
            raise cherrypy.HTTPError(400, "You must specify an id.")
  
    def POSTdevice(self, req):
        try:
            obj = req.json
            ip = req.remote.ip
            # l'indirizzo ip del device viene utilizzato come identificativo, essendo univoco nella rete locale
            # in caso di cambio di indirizzo ip il device viene registrato nuovamente e il precedente viene eliminato
            # dopo i 2 minuti di timeout o sovrascritto da un nuovo device che ne ottiene l'ip
            device = {}
            assert isinstance(obj["resources"], list)
            assert isinstance(obj["end-points"], dict)
            assert isinstance(obj["end-points"]["rest"], list)
            assert isinstance(obj["end-points"]["mqtt-topics"], list)
            device["resources"] = obj["resources"]
            device["end-points"] = obj["end-points"]
            device["insert-timestamp"] = time.time()
            self.data["devices"][ip] = device

            self.WRITEdata()

            return self.data["devices"][ip]
        except:
            self.RELEASEsem("POST")
            raise cherrypy.HTTPError(400)

    def POSTuser(self, req):
        try:
            obj = req.json
            user = {}
            assert isinstance(obj["email"], list)
            user["name"] = obj["name"]
            user["surname"] = obj["surname"]
            obj["email"].sort()
            user["email"] = obj["email"]
        except:
            self.RELEASEsem("POST")
            raise cherrypy.HTTPError(400)
        
        for u in self.data["users"].values():
            for e in u["email"]:
                for new_e in user["email"]:
                    if new_e == e:
                        self.RELEASEsem("POST")
                        raise cherrypy.HTTPError(409, f"Email {new_e} already registered.")

        identifier = str(len(self.data["users"]))
        self.data["users"][identifier] = user

        self.WRITEdata()

        return self.data["users"][identifier]   

    def POSTservice(self, req):
        try:
            obj = req.json
            service = {}
            assert isinstance(obj["end-points"], dict)
            assert isinstance(obj["end-points"]["rest"], list)
            assert isinstance(obj["end-points"]["mqtt-topics"], list)
            service["description"] = obj["description"]
            obj["end-points"]["rest"].sort()
            obj["end-points"]["mqtt-topics"].sort()
            service["end-points"] = obj["end-points"]
        except:
            self.RELEASEsem("POST")
            raise cherrypy.HTTPError(400)
        
        refresh = False
        for k, s in self.data["services"].items():
            if service["description"] == s["description"] and service["end-points"] == s["end-points"]:
                refresh = True
                identifier = k
        
        if not refresh:
            identifiers = list(self.data["services"].keys())
            for i in range(len(identifiers)+1):
                if str(i) not in identifiers:
                    identifier = str(i)
                    break

        service["insert-timestamp"] = time.time()
        self.data["services"][identifier] = service

        self.WRITEdata()

        return self.data["services"][identifier]
Ejemplo n.º 33
0
#!/usr/bin/env python
# coding=utf-8
import time
from random import random
from threading import Semaphore, Thread, Timer

sem = Semaphore(5)


def foo(tid):
    with sem:
        print '{} acquire sem'.format(tid)
        wt = random() * 2
        time.sleep(wt)

    print '{} release sem'.format(tid)


def multit():
    threads = []

    for i in range(500):
        t = Thread(target=foo, args=(i, ))
        # print t
        threads.append(t)
        t.start()
        # print t

    for t in threads:
        t.join()
Ejemplo n.º 34
0
class PooledPg:
    """Pool for classic PyGreSQL connections.

    After you have created the connection pool, you can use
    connection() to get pooled, steady PostgreSQL connections.
    """

    version = __version__

    def __init__(self,
                 mincached=0,
                 maxcached=0,
                 maxconnections=0,
                 blocking=False,
                 maxusage=None,
                 setsession=None,
                 reset=None,
                 *args,
                 **kwargs):
        """Set up the PostgreSQL connection pool.

        mincached: initial number of connections in the pool
            (0 means no connections are made at startup)
        maxcached: maximum number of connections in the pool
            (0 or None means unlimited pool size)
        maxconnections: maximum number of connections generally allowed
            (0 or None means an arbitrary number of connections)
        blocking: determines behavior when exceeding the maximum
            (if this is set to true, block and wait until the number of
            connections decreases, otherwise an error will be reported)
        maxusage: maximum number of reuses of a single connection
            (0 or None means unlimited reuse)
            When this maximum usage number of the connection is reached,
            the connection is automatically reset (closed and reopened).
        setsession: optional list of SQL commands that may serve to prepare
            the session, e.g. ["set datestyle to ...", "set time zone ..."]
        reset: how connections should be reset when returned to the pool
            (0 or None to rollback transcations started with begin(),
            1 to always issue a rollback, 2 for a complete reset)
        args, kwargs: the parameters that shall be used to establish
            the PostgreSQL connections using class PyGreSQL pg.DB()
        """
        self._args, self._kwargs = args, kwargs
        self._maxusage = maxusage
        self._setsession = setsession
        self._reset = reset or 0
        if mincached is None:
            mincached = 0
        if maxcached is None:
            maxcached = 0
        if maxconnections is None:
            maxconnections = 0
        if maxcached:
            if maxcached < mincached:
                maxcached = mincached
        if maxconnections:
            if maxconnections < maxcached:
                maxconnections = maxcached
            # Create semaphore for number of allowed connections generally:
            from threading import Semaphore
            self._connections = Semaphore(maxconnections)
            self._blocking = blocking
        else:
            self._connections = None
        self._cache = Queue(maxcached)  # the actual connection pool
        # Establish an initial number of database connections:
        idle = [self.connection() for i in range(mincached)]
        while idle:
            idle.pop().close()

    def steady_connection(self):
        """Get a steady, unpooled PostgreSQL connection."""
        return SteadyPgConnection(self._maxusage, self._setsession, True,
                                  *self._args, **self._kwargs)

    def connection(self):
        """Get a steady, cached PostgreSQL connection from the pool."""
        if self._connections:
            if not self._connections.acquire(self._blocking):
                raise TooManyConnections
        try:
            con = self._cache.get(0)
        except Empty:
            con = self.steady_connection()
        return PooledPgConnection(self, con)

    def cache(self, con):
        """Put a connection back into the pool cache."""
        try:
            if self._reset == 2:
                con.reset()  # reset the connection completely
            else:
                if self._reset or con._transaction:
                    try:
                        con.rollback()  # rollback a possible transaction
                    except Exception:
                        pass
            self._cache.put(con, 0)  # and then put it back into the cache
        except Full:
            con.close()
        if self._connections:
            self._connections.release()

    def close(self):
        """Close all connections in the pool."""
        while 1:
            try:
                con = self._cache.get(0)
                try:
                    con.close()
                except Exception:
                    pass
                if self._connections:
                    self._connections.release()
            except Empty:
                break

    def __del__(self):
        """Delete the pool."""
        try:
            self.close()
        except:  # builtin Exceptions might not exist any more
            pass
Ejemplo n.º 35
0
    def __init__(self,
                 socketio,
                 rtklib_path=None,
                 config_path=None,
                 enable_led=True,
                 log_path=None):

        print("RTKLIB 1")
        print(rtklib_path)
        print(log_path)

        if rtklib_path is None:
            rtklib_path = os.path.join(os.path.expanduser("~"), "RTKLIB")

        if config_path is None:
            self.config_path = os.path.join(os.path.dirname(__file__),
                                            "rtklib_configs")
        else:
            self.config_path = config_path

        if log_path is None:
            #TODO find a better default location
            self.log_path = "../data"
        else:
            self.log_path = log_path

        # This value should stay below the timeout value or the Satellite/Coordinate broadcast
        # thread will stop
        self.sleep_count = 0

        # default state for RTKLIB is "rover single"
        self.state = "base"

        # we need this to broadcast stuff
        self.socketio = socketio

        # these are necessary to handle rover mode
        self.rtkc = RtkController(rtklib_path, self.config_path)
        self.conm = ConfigManager(rtklib_path, self.config_path)

        # this one handles base settings
        self.s2sc = Str2StrController(rtklib_path)

        # take care of serving logs
        self.logm = LogManager(rtklib_path, self.log_path)

        # basic synchronisation to prevent errors
        self.semaphore = Semaphore()

        # we need this to send led signals
        #        self.enable_led = enable_led

        #        if self.enable_led:
        #            self.led = ReachLED()

        # broadcast satellite levels and status with these
        self.server_not_interrupted = True
        self.satellite_thread = None
        self.coordinate_thread = None
        self.conversion_thread = None

        self.system_time_correct = False
        #        self.system_time_correct = True

        self.time_thread = Thread(target=self.setCorrectTime)
        self.time_thread.start()
Ejemplo n.º 36
0
class RTKLIB:

    # we will save RTKLIB state here for later loading
    state_file = os.path.join(os.path.expanduser("~"), ".reach/rtk_state")
    # if the state file is not available, these settings are loaded
    default_state = {"started": "no", "state": "base"}

    def __init__(self,
                 socketio,
                 rtklib_path=None,
                 config_path=None,
                 enable_led=True,
                 log_path=None):

        print("RTKLIB 1")
        print(rtklib_path)
        print(log_path)

        if rtklib_path is None:
            rtklib_path = os.path.join(os.path.expanduser("~"), "RTKLIB")

        if config_path is None:
            self.config_path = os.path.join(os.path.dirname(__file__),
                                            "rtklib_configs")
        else:
            self.config_path = config_path

        if log_path is None:
            #TODO find a better default location
            self.log_path = "../data"
        else:
            self.log_path = log_path

        # This value should stay below the timeout value or the Satellite/Coordinate broadcast
        # thread will stop
        self.sleep_count = 0

        # default state for RTKLIB is "rover single"
        self.state = "base"

        # we need this to broadcast stuff
        self.socketio = socketio

        # these are necessary to handle rover mode
        self.rtkc = RtkController(rtklib_path, self.config_path)
        self.conm = ConfigManager(rtklib_path, self.config_path)

        # this one handles base settings
        self.s2sc = Str2StrController(rtklib_path)

        # take care of serving logs
        self.logm = LogManager(rtklib_path, self.log_path)

        # basic synchronisation to prevent errors
        self.semaphore = Semaphore()

        # we need this to send led signals
        #        self.enable_led = enable_led

        #        if self.enable_led:
        #            self.led = ReachLED()

        # broadcast satellite levels and status with these
        self.server_not_interrupted = True
        self.satellite_thread = None
        self.coordinate_thread = None
        self.conversion_thread = None

        self.system_time_correct = False
        #        self.system_time_correct = True

        self.time_thread = Thread(target=self.setCorrectTime)
        self.time_thread.start()

        # we try to restore previous state
        # in case we can't, we start as rover in single mode
        # self.loadState()

    def setCorrectTime(self):
        # determine if we have ntp service ready or we need gps time

        print("RTKLIB 2 GPS time sync")

        ##        if not gps_time.time_synchronised_by_ntp():
        # wait for gps time
        ##            print("Time is not synced by NTP")
        #            self.updateLED("orange,off")
        #        gps_time.set_gps_time("/dev/ttyACM0", 115200)

        print("Time is synced by GPS!")

        self.system_time_correct = True
        self.socketio.emit("system time corrected", {}, namespace="/test")

        self.loadState()
        self.socketio.emit("system state reloaded", {}, namespace="/test")

    def launchBase(self):
        # due to the way str2str works, we can't really separate launch and start
        # all the configuration goes to startBase() function
        # this launchBase() function exists to change the state of RTKLIB instance
        # and to make the process for base and rover modes similar

        self.semaphore.acquire()

        self.state = "base"

        #self.saveState()

        #        if self.enable_led:
        #            self.updateLED()

        print("RTKLIB 7 Base mode launched")

        self.semaphore.release()

    def shutdownBase(self):
        # due to the way str2str works, we can't really separate launch and start
        # all the configuration goes to startBase() function
        # this shutdownBase() function exists to change the state of RTKLIB instance
        # and to make the process for base and rover modes similar

        self.stopBase()

        self.semaphore.acquire()

        self.state = "inactive"

        print("RTKLIB 8 Base mode shutdown")

        self.semaphore.release()

    def startBase(self,
                  rtcm3_messages=None,
                  base_position=None,
                  gps_cmd_file=None):

        self.semaphore.acquire()
        """
        print("RTKLIB 9 Attempting to start str2str...")

        
        res = self.s2sc.start(rtcm3_messages, base_position, gps_cmd_file)
        if res < 0:
            print("str2str start failed")
        elif res == 1:
            print("str2str start successful")
        elif res == 2:
            print("str2str already started")
        
        self.saveState()
        """
        #TODO need refactoring
        #maybe a new method to launch/start rtkrcv outside
        #startBase and startRover
        #TODO launchRover and startRover send a config_name to rtkc
        #I don't do this here :-/
        print("RTKLIB 9a Attempting to launch rtkrcv...")

        res2 = self.rtkc.launch()

        if res2 < 0:
            print("rtkrcv launch failed")
        elif res2 == 1:
            print("rtkrcv launch successful")
        elif res2 == 2:
            print("rtkrcv already launched")

        #TODO need refactoring
        #maybe a new method to launch/start rtkrcv outside
        #startBase and startRover
        print("RTKLIB 9b Attempting to start rtkrcv...")
        res3 = self.rtkc.start()

        if res3 == -1:
            print("rtkrcv start failed")
        elif res3 == 1:
            print("rtkrcv start successful")
            print("Starting coordinate and satellite broadcast")
        elif res3 == 2:
            print("rtkrcv already started")

        # start fresh data broadcast
        #TODO the satellite and coordinate broadcast start
        #when rtkrcv start failed

        self.server_not_interrupted = True

        if self.satellite_thread is None:
            self.satellite_thread = Thread(target=self.broadcastSatellites)
            self.satellite_thread.start()

        if self.coordinate_thread is None:
            self.coordinate_thread = Thread(target=self.broadcastCoordinates)
            self.coordinate_thread.start()

        self.semaphore.release()

        return res3

    def stopBase(self):

        self.semaphore.acquire()

        print("RTKLIB 10a Attempting to stop rtkrcv...")

        res2 = self.rtkc.stop()
        if res2 == -1:
            print("rtkrcv stop failed")
        elif res2 == 1:
            print("rtkrcv stop successful")
        elif res2 == 2:
            print("rtkrcv already stopped")

        print("RTKLIB 10b Attempting to stop satellite broadcasting...")

        self.server_not_interrupted = False

        if self.satellite_thread is not None:
            self.satellite_thread.join()
            self.satellite_thread = None

        if self.coordinate_thread is not None:
            self.coordinate_thread.join()
            self.coordinate_thread = None

        print("RTKLIB 10c Attempting rtkrcv shutdown")

        res = self.rtkc.shutdown()

        if res < 0:
            print("rtkrcv shutdown failed")
        elif res == 1:
            print("rtkrcv shutdown successful")
            self.state = "inactive"
        elif res == 2:
            print("rtkrcv already shutdown")
            self.state = "inactive"
        self.semaphore.release()

        return res

    def readConfigBase(self):

        self.semaphore.acquire()

        print("RTKLIB 11 Got signal to read base config")

        self.socketio.emit("current config base",
                           self.s2sc.readConfig(),
                           namespace="/test")

        self.semaphore.release()

    def writeConfigBase(self, config):

        self.semaphore.acquire()

        print("RTKLIB 12 Got signal to write base config")

        self.s2sc.writeConfig(config)

        print("Restarting str2str...")

        res = self.s2sc.stop() + self.s2sc.start()

        if res > 1:
            print("Restart successful")
        else:
            print("Restart failed")

        self.saveState()

        #        if self.enable_led:
        #            self.updateLED()

        self.semaphore.release()

        return res

    def shutdown(self):
        # shutdown whatever mode we are in. stop broadcast threads

        print("RTKLIB 17 Shutting down")

        # clean up broadcast and blink threads
        self.server_not_interrupted = False
        #        self.led.blinker_not_interrupted = False

        if self.coordinate_thread is not None:
            self.coordinate_thread.join()

        if self.satellite_thread is not None:
            self.satellite_thread.join()

#        if self.led.blinker_thread is not None:
#            self.led.blinker_thread.join()

# shutdown base

        elif self.state == "base":
            return self.shutdownBase()

        # otherwise, we are inactive
        return 1

    def deleteConfig(self, config_name):
        # pass deleteConfig to conm

        print("RTKLIB 18 Got signal to delete config " + config_name)

        self.conm.deleteConfig(config_name)

        self.conm.updateAvailableConfigs()

        # send available configs to the browser
        self.socketio.emit("available configs",
                           {"available_configs": self.conm.available_configs},
                           namespace="/test")

        print(self.conm.available_configs)

    def cancelLogConversion(self, raw_log_path):
        if self.logm.log_being_converted:
            print("Canceling log conversion for " + raw_log_path)

            self.logm.convbin.child.kill(signal.SIGINT)

            self.conversion_thread.join()
            self.logm.convbin.child.close(force=True)

            print("Thread killed")
            self.logm.cleanLogFiles(raw_log_path)
            self.logm.log_being_converted = ""

            print("Canceled msg sent")

    def processLogPackage(self, raw_log_path):

        currently_converting = False

        try:
            print("conversion thread is alive " +
                  str(self.conversion_thread.isAlive()))
            currently_converting = self.conversion_thread.isAlive()
        except AttributeError:
            pass

        log_filename = os.path.basename(raw_log_path)
        potential_zip_path = os.path.splitext(raw_log_path)[0] + ".zip"

        can_send_file = True

        # can't send if there is no converted package and we are busy
        if (not os.path.isfile(potential_zip_path)) and (currently_converting):
            can_send_file = False

        if can_send_file:
            print("Starting a new bg conversion thread for log " +
                  raw_log_path)
            self.logm.log_being_converted = raw_log_path
            self.conversion_thread = Thread(target=self.getRINEXPackage,
                                            args=(raw_log_path, ))
            self.conversion_thread.start()
        else:
            error_msg = {
                "name": os.path.basename(raw_log_path),
                "conversion_status":
                "A log is being converted at the moment. Please wait",
                "messages_parsed": ""
            }
            self.socketio.emit("log conversion failed",
                               error_msg,
                               namespace="/test")

    def conversionIsRequired(self, raw_log_path):
        log_filename = os.path.basename(raw_log_path)
        potential_zip_path = os.path.splitext(raw_log_path)[0] + ".zip"

        print("Comparing " + raw_log_path + " and " + potential_zip_path +
              " for conversion")

        if os.path.isfile(potential_zip_path):
            print("Raw logs differ " +
                  str(self.rawLogsDiffer(raw_log_path, potential_zip_path)))
            return self.rawLogsDiffer(raw_log_path, potential_zip_path)
        else:
            print("No zip file!!! Conversion required")
            return True

    def rawLogsDiffer(self, raw_log_path, zip_package_path):
        # check if the raw log is the same size in the zip and in filesystem
        log_name = os.path.basename(raw_log_path)
        raw_log_size = os.path.getsize(raw_log_path)

        zip_package = zipfile.ZipFile(zip_package_path)
        raw_file_inside_info = zip_package.getinfo("Raw/" + log_name)
        raw_file_inside_size = raw_file_inside_info.file_size

        print("Sizes:")
        print("Inside: " + str(raw_file_inside_size))
        print("Raw:    " + str(raw_log_size))

        if raw_log_size == raw_file_inside_size:
            return False
        else:
            return True

    def getRINEXPackage(self, raw_log_path):
        # if this is a solution log, return the file right away
        if "sol" in raw_log_path:
            log_url_tail = "/logs/download/" + os.path.basename(raw_log_path)
            self.socketio.emit("log download path",
                               {"log_url_tail": log_url_tail},
                               namespace="/test")
            return raw_log_path

        # return RINEX package if it already exists
        # create one if not
        log_filename = os.path.basename(raw_log_path)
        potential_zip_path = os.path.splitext(raw_log_path)[0] + ".zip"
        result_path = ""

        if self.conversionIsRequired(raw_log_path):
            print("Conversion is Required!")
            result_path = self.createRINEXPackage(raw_log_path)
            # handle canceled conversion
            if result_path is None:
                log_url_tail = "/logs/download/" + os.path.basename(
                    raw_log_path)
                self.socketio.emit("log download path",
                                   {"log_url_tail": log_url_tail},
                                   namespace="/test")
                return None
        else:
            result_path = potential_zip_path
            print("Conversion is not Required!")
            already_converted_package = {
                "name": log_filename,
                "conversion_status":
                "Log already converted. Details inside the package",
                "messages_parsed": ""
            }
            self.socketio.emit("log conversion results",
                               already_converted_package,
                               namespace="/test")

        log_url_tail = "/logs/download/" + os.path.basename(result_path)
        self.socketio.emit("log download path", {"log_url_tail": log_url_tail},
                           namespace="/test")

        self.cleanBusyMessages()
        self.logm.log_being_converted = ""

        return result_path

    def cleanBusyMessages(self):
        # if user tried to convert other logs during conversion, he got an error message
        # this function clears them to show it's ok to convert again
        self.socketio.emit("clean busy messages", {}, namespace="/test")

    def createRINEXPackage(self, raw_log_path):
        # create a RINEX package before download
        # in case we fail to convert, return the raw log path back
        result = raw_log_path
        log_filename = os.path.basename(raw_log_path)

        conversion_time_string = self.logm.calculateConversionTime(
            raw_log_path)

        start_package = {
            "name": log_filename,
            "conversion_time": conversion_time_string
        }

        conversion_result_package = {
            "name": log_filename,
            "conversion_status": "",
            "messages_parsed": "",
            "log_url_tail": ""
        }

        self.socketio.emit("log conversion start",
                           start_package,
                           namespace="/test")
        try:
            log = self.logm.convbin.convertRTKLIBLogToRINEX(
                raw_log_path, self.logm.getRINEXVersion())
        except (ValueError, IndexError):
            print("Conversion canceled")
            conversion_result_package[
                "conversion_status"] = "Conversion canceled, downloading raw log"
            self.socketio.emit("log conversion results",
                               conversion_result_package,
                               namespace="/test")
            return None

        print("Log conversion done!")

        if log is not None:
            result = log.createLogPackage()
            if log.isValid():
                conversion_result_package[
                    "conversion_status"] = "Log converted to RINEX"
                conversion_result_package[
                    "messages_parsed"] = log.log_metadata.formValidMessagesString(
                    )
            else:
                conversion_result_package[
                    "conversion_status"] = "Conversion successful, but log does not contain any useful data. Downloading raw log"
        else:
            print("Could not convert log. Is the extension wrong?")
            conversion_result_package[
                "conversion_status"] = "Log conversion failed. Downloading raw log"

        self.socketio.emit("log conversion results",
                           conversion_result_package,
                           namespace="/test")

        print("Log conversion results:")
        print(str(log))

        return result

    def saveState(self):
        # save current state for future resurrection:
        # state is a list of parameters:
        # rover state example: ["rover", "started", "reach_single_default.conf"]
        # base state example: ["base", "stopped", "input_stream", "output_stream"]

        state = {}

        # save "rover", "base" or "inactive" state
        state["state"] = self.state

        if self.state == "rover":
            started = self.rtkc.started
        elif self.state == "base":
            started = self.s2sc.started
        elif self.state == "inactive":
            started = False

        state["started"] = "yes" if started else "no"

        # dump rover state
        state["rover"] = {"current_config": self.rtkc.current_config}

        # dump rover state
        state["base"] = {
            "input_stream": self.s2sc.input_stream,
            "output_stream": self.s2sc.output_stream,
            "rtcm3_messages": self.s2sc.rtcm3_messages,
            "base_position": self.s2sc.base_position,
            "gps_cmd_file": self.s2sc.gps_cmd_file
        }

        print("RTKLIB 20 DEBUG saving state")
        print(str(state))

        with open(self.state_file, "w") as f:
            json.dump(state, f, sort_keys=True, indent=4)

        reach_tools.run_command_safely(["sync"])

        return state

    def byteify(self, input):
        # thanks to Mark Amery from StackOverflow for this awesome function
        if isinstance(input, dict):
            return {
                self.byteify(key): self.byteify(value)
                for key, value in input.items()
            }
        elif isinstance(input, list):
            return [self.byteify(element) for element in input]
        elif isinstance(input, str):
            #no need to convert to utf-8 anymore with Python v3.x
            #return input.encode('utf-8')
            return input
        else:
            return input

    def getState(self):
        # get the state, currently saved in the state file
        print("RTKLIB 21 Trying to read previously saved state...")

        try:
            f = open(self.state_file, "r")
        except IOError:
            # can't find the file, let's create a new one with default state
            print("Could not find existing state, Launching default mode...")

            return self.default_state
        else:

            print("Found existing state...trying to decode...")

            try:
                json_state = json.load(f)
            except ValueError:
                # could not properly decode current state
                print("Could not decode json state. Launching default mode...")
                f.close()

                return self.default_state
            else:
                print("Decoding succesful")

                f.close()

                # convert unicode strings to normal
                json_state = self.byteify(json_state)

                print("That's what we found:")
                print(str(json_state))

                return json_state

    def loadState(self):

        # get current state
        json_state = self.getState()

        print("RTKLIB 22 Now loading the state printed above... ")
        #print(str(json_state))
        # first, we restore the base state, because no matter what we end up doing,
        # we need to restore base state

        if json_state["state"] == "base":
            self.launchBase()

            if json_state["started"] == "yes":
                self.startBase()

        print(str(json_state["state"]) + " state loaded")

    def sendState(self):
        # send current state to every connecting browser

        state = self.getState()
        print("RTKLIB 22a")
        #print(str(state))
        self.conm.updateAvailableConfigs()
        state["available_configs"] = self.conm.available_configs

        state["system_time_correct"] = self.system_time_correct
        state["log_path"] = str(self.log_path)

        print("Available configs to send: ")
        print(str(state["available_configs"]))

        print("Full state: ")
        for key in state:
            print("{} : {}".format(key, state[key]))

        self.socketio.emit("current state", state, namespace="/test")

    # this function reads satellite levels from an existing rtkrcv instance
    # and emits them to the connected browser as messages
    def broadcastSatellites(self):
        count = 0

        while self.server_not_interrupted:

            # update satellite levels
            self.rtkc.getObs()

            #            if count % 10 == 0:
            #print("Sending sat rover levels:\n" + str(self.rtkc.obs_rover))
            #print("Sending sat base levels:\n" + str(self.rtkc.obs_base))

            self.socketio.emit("satellite broadcast rover",
                               self.rtkc.obs_rover,
                               namespace="/test")
            #self.socketio.emit("satellite broadcast base", self.rtkc.obs_base, namespace = "/test")
            count += 1
            self.sleep_count += 1
            time.sleep(1)
        #print("exiting satellite broadcast")

    # this function reads current rtklib status, coordinates and obs count
    def broadcastCoordinates(self):
        count = 0

        while self.server_not_interrupted:

            # update RTKLIB status
            self.rtkc.getStatus()

            #            if count % 10 == 0:
            #                print("Sending RTKLIB status select information:")
            #                print(self.rtkc.status)

            self.socketio.emit("coordinate broadcast",
                               self.rtkc.status,
                               namespace="/test")

            #            if self.enable_led:
            #                self.updateLED()

            count += 1
            time.sleep(1)
Ejemplo n.º 37
0
import os
import time
from threading import Thread, Semaphore

semaphore = Semaphore(4)

def task(name):
    if semaphore.acquire():
        print(f'thread:{os.getpid()}, name:{name}, sleep 1...')
        time.sleep(1)
        semaphore.release()

if __name__ == "__main__":
    ts = []
    for i in range(10):
        t = Thread(target=task, args=(f't {i}', ))
        ts.append(t)
        t.start()
    for t in ts:
        t.join()
Ejemplo n.º 38
0
from re import sub
from socket import socket
from datetime import datetime
from statistics import mean
from signal import SIGINT, signal
from time import ctime, sleep, strptime, time
from random import choice
import select
import pip

from subprocess import DEVNULL, Popen, check_call, call
from threading import Thread
from threading import Lock as thread_lock
from threading import Semaphore
printlock = Semaphore(value=1)


def install(package):
    try:
        pip.main(["install", package])
    except AttributeError:
        check_call([sys.executable, '-m', 'pip', 'install', package])
    call([sys.executable, __file__])


try:
    from serial import Serial
    import serial.tools.list_ports
except ModuleNotFoundError:
    print("Pyserial is not installed. " +
Ejemplo n.º 39
0
class Custom_Queue:
    def __init__(self, buffer_size):
        # two buffers to detect color and gray
        self.color_buffer = [None] * buffer_size
        self.gray_buffer = [None] * buffer_size

        # producers/consumers
        self.producer_index = 0  # writing
        self.consumer_index = 0  # reading

        # Locks for producers and consumers
        self.write_lock = Lock()
        self.read_lock = Lock()

        # buffer_size of the buffers
        self.buffer_size = buffer_size

        # Controls access for producer/consumer
        self.produce_semaphore = Semaphore()
        self.consume_semaphore = Semaphore()

        # consumer lock acquired
        self.consume_semaphore.acquire()

    # Producers - colored frames are enqueued
    def enqueue_colored_frames(self, frame):
        self.produce_semaphore.acquire()
        with self.write_lock:
            i = self.producer_index
            self.producer_index = (self.producer_index + 1) % len(
                self.color_buffer)
        self.color_buffer[i] = frame
        self.consume_semaphore.release()

    # Consumers - colored frames are dequeued
    def get_color(self):
        self.consume_semaphore.acquire()
        with self.read_lock:
            i = self.consumer_index
            self.consumer_index = (self.consumer_index + 1) % len(
                self.color_buffer)
        frame = self.color_buffer[i]
        self.produce_semaphore.release()
        return frame

    # Producers - gray frames are enqueue
    def enqueue_gray_frames(self, frame):
        self.produce_semaphore.acquire()
        with self.write_lock:
            i = self.producer_index
            self.producer_index = (self.producer_index + 1) % len(
                self.gray_buffer)
        self.gray_buffer[i] = frame
        self.consume_semaphore.release()

    # Consumers - gray frames are dequeue
    def get_gray(self):
        self.consume_semaphore.acquire()
        with self.read_lock:
            i = self.consumer_index
            self.consumer_index = (self.consumer_index + 1) % len(
                self.gray_buffer)
        frame = self.gray_buffer[i]
        self.produce_semaphore.release()
        return frame

    def remove_all_color(self):
        return self.color_buffer == []
Ejemplo n.º 40
0
class Async_HTTP(Thread):
    class web_item:
        def __init__(self, url):
            self._url = url
            self._ourl = url
            self._ip = ""
            self._ip_solver = None
            self._data = ""
            self._socket = None
            self._status = 0
            self._retries = 0
            self._request = ""
            self._response = ""
            self._redir = ""
            self._redirn = 0
            self._time = 0

    # Required Worker method
    def numPendingJobs(self):
        self._queuelock.acquire()
        num = 0
        for web in self._urllist:
            if web._status < 50:
                num += 1
        self._queuelock.release()
        return num

    # Required Worker method
    def enqueueJobs(self, job):
        self.addWebs(job)

    # Required Worker method
    def stopWorking(self):
        self.finalize()

    def waitEnd(self):
        self._exit_on_end = True
        self._waitsem.release()
        self.join()

    # URL_list is a list of complete URLs such as http://hst.com:80/path
    def __init__(self, dnspool, db=None, callback=(lambda: None), URL_list=[]):
        super(Async_HTTP, self).__init__()

        self._urllist = []
        self._db = db
        self._callback = callback
        for url in URL_list:
            self._urllist.append(Async_HTTP.web_item(url))

        self._waitsem = Semaphore(1)
        self._queuelock = Semaphore(1)
        self._end = False
        self._exit_on_end = False
        self._dns_solver = dnspool

    def run(self):
        self.work()

    def finalize(self):
        self._end = True
        self._waitsem.release()

    def addWebs(self, urls):
        self._queuelock.acquire()
        for url in urls:
            self._urllist.append(Async_HTTP.web_item(url))
        self._queuelock.release()
        self._waitsem.release()

    def getWebIndex(self, web):
        self._queuelock.acquire()
        ret = None
        for url in urls:
            if url._ourl == web:
                ret = url
                break
        self._queuelock.release()
        return ret

    def redirect(self, content, url):
        head = content.split(b"\r\n\r\n")[0]
        ll = head.split(b"\r\n")
        ret = None
        for h in ll:
            if b"Location: " in h[:10]:
                ret = h[10:].strip()
                if not (b"http:" in ret or b"https:" in ret):
                    ret = url.encode('utf-8') + ret
                break
        if ret:
            return ret.decode("utf-8", 'ignore')
        return None

    def work(self):
        # Process all URLS and get their indices
        while not self._end:
            self._queuelock.acquire()  # Lock webs queue

            for web in self._urllist:
                if urllib.parse.urlparse(web._url).scheme == "https":
                    web._status = 98
                if web._status == 0:
                    # Resolve IP
                    if web._ip == "":
                        if web._ip_solver is None:
                            web._ip_solver = self._dns_solver.getWorkerInstance(
                            )
                            web._time = time.time(
                            ) + 20  # Add extra timeout for DNS solving

                        ip = web._ip_solver.queryDNS(
                            urllib.parse.urlparse(web._url).hostname)
                        if ip is not None:
                            web._ip = ip
                        else:  # Error at DNS solve, just stop
                            web._status = 97

                    if web._ip != "":
                        # Connect and create socket
                        if web._socket is not None:
                            web._socket.close()
                        web._socket = socket.socket(socket.AF_INET,
                                                    socket.SOCK_STREAM)
                        web._socket.setblocking(0)
                        web._status = 1
                        web._time = time.time()
                if web._status == 1:
                    # Check connection completeness
                    try:
                        port = urllib.parse.urlparse(web._url).port
                        if (port is None): port = 80
                        else: port = int(port)

                        path = urllib.parse.urlparse(web._url).path
                        param = urllib.parse.urlparse(web._url).query
                        if param != "": path += "?" + param
                        if path is None or path == "": path = "/"

                        web._socket.connect((web._ip, port))
                        web._status = 2  # Connection OK!
                        web._request = "GET " + path + " HTTP/1.0\r\n"
                        web._request += "Host: " + urllib.parse.urlparse(
                            web._url).hostname + "\r\n"
                        web._request += "User-Agent: Mozilla/5.0 (X11; Linux i686; rv:23.0) Gecko/20100101 Firefox/23.0\r\n"
                        web._request += "\r\n"
                        web._request = bytes(web._request, "UTF-8")
                    except socket.error as e:
                        err = e.args[0]
                        if err == errno.EAGAIN or err == errno.EINPROGRESS or err == errno.EALREADY:
                            # Just try again after some time
                            pass
                        else:
                            web._retries += 1
                            if web._retries < 5:
                                web._status = 0
                            else:
                                web._status = 99  # Stop retrying
                    except Exception as e:
                        web._status = 99  # Stop retrying
                if web._status == 2:
                    # Send request
                    if len(web._request) == 0:
                        web._status = 3
                        web._response = b""
                    else:
                        try:
                            sent = web._socket.send(web._request)
                            web._request = web._request[sent:]
                        except Exception as e:
                            err = e.args[0]
                            if err == errno.EAGAIN or err == errno.EINPROGRESS:
                                # Just try again after some time
                                pass
                            else:
                                web._retries += 1
                                if web._retries < 5:
                                    web._status = 0
                                else:
                                    web._status = 99  # Stop retrying
                if web._status == 3:
                    # Read index response
                    try:
                        read = web._socket.recv(1024 * 1024)
                        if not read:
                            # Finish, parse redirects!
                            redir = self.redirect(web._response, web._url)
                            if redir == web._redir or web._redirn > 10:
                                # Loop!! (or 10 steps...)
                                web._status = 95
                            elif redir is None:
                                web._status = 100
                                self._callback(web._url, web._ourl,
                                               web._response, self._db)
                            else:
                                web._url = redir
                                web._status = 0
                                web._ip = ""
                                web._time = time.time()
                                web._redirn += 1
                            web._redir = redir
                        else:
                            web._response = web._response + read
                    except Exception as e:
                        err = e.args[0]
                        if err == errno.EAGAIN or err == errno.EINPROGRESS:
                            # Just try again after some time
                            pass
                        else:
                            web._retries += 1
                            if web._retries < 5:
                                web._status = 0
                            else:
                                web._status = 99  # Stop retrying

                if time.time() - web._time > 10:
                    web._status = 99
                if web._status > 50 and web._socket is not None:
                    web._socket.close()
                    web._socket = None

            for web in list(self._urllist):
                if web._status > 50:
                    self._urllist.remove(web)

            # Select sockets!
            allready = True
            onesocket = False
            plist = select.poll()
            for web in self._urllist:
                if web._socket is not None and web._status < 50:
                    if web._status == 3:
                        plist.register(web._socket,
                                       select.POLLERR | select.POLLIN)
                        allready = False
                    elif web._status == 1 or web._status == 2:
                        plist.register(web._socket,
                                       select.POLLERR | select.POLLOUT)
                        allready = False
                    else:
                        plist.register(web._socket, select.POLLERR)
                    onesocket = True
                if web._status == 0:
                    allready = False

            self._queuelock.release(
            )  # Now we could potentially add/remove webs

            if allready and self._exit_on_end: break

            # Do not waiting if all complete or webs in state 0
            if not allready:
                if onesocket: plist.poll(1000)
                else: time.sleep(1)

            # If work is done, wait here for more work
            if allready:
                self._waitsem.acquire()

            # All waiting for DNS, redirs...

        print("LOG: Exit thread")
Ejemplo n.º 41
0
 def __init__(self):
     self.two = Semaphore(0)
     self.three = Semaphore(0)
Ejemplo n.º 42
0
 def __init__(self):
     self.semaphore = Semaphore(0)
     self.commandWindow = None
     self.response = None
Ejemplo n.º 43
0
    from musicDisplay import musicDisplay

if config.has_option("mopidy", "host"):
    mopidyHost = config["mopidy"]["host"]

if config.has_option("mopidy", "port"):
    mopidyPort = int(config["mopidy"]["port"])

trackingPath = "/home/pi/workspace/data"
trackingFile = trackingPath + "/" + "librespotOutput"
m = MopidyAPI(host=mopidyHost, port=mopidyPort)
currentTrack = Track("Uninitialized", "Uninitialized", "Uninitialized",
                     "Uninitialized", "Uninitialized")

artistMutex = Lock()
displaySemaphore = Semaphore(2)
displayMutex = Lock()


def trackFromResult(resultTrack):
    localTrack = Track()
    localTrack.title = resultTrack.name
    localTrack.artist = resultTrack.artists[0].name
    localTrack.album = resultTrack.album.name
    time = resultTrack.length / 1000
    localTrack.timeAudio = str(int(time / 60)) + ":" + '%02d' % int(time % 60)
    image = m.library.get_images([resultTrack.album.uri])
    localTrack.imageURI = image[resultTrack.album.uri][0].uri
    return localTrack

Ejemplo n.º 44
0
 def __init__(self, minconn, maxconn, *args, **kwargs):
     self._semaphore = Semaphore(maxconn)
     super(BlockingThreadedConnectionPool,self).__init__(minconn, maxconn, *args, **kwargs)
Ejemplo n.º 45
0
from django.db import connection

import random

import urlparse

import threading
from threading import Semaphore

import logging

logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s %(levelname)s %(message)s',
                    filename='populate\management\commands\populateTest.log',
                    filemode='w')
semafaro = Semaphore(50)

CONSUMER_KEY = 'AStAffVHzEtpSFJ3'  #clave generada para don UTPs
CONSUMER_SECRET = 'UEQj2XKfGpFSMpNh'  #clave generada para don UTPs

CALLBACK_BASE = '127.0.0.1'
SERVER_URL = 'https://www.khanacademy.org'
SERVER_URL2 = 'https://es.khanacademy.org'

DEFAULT_API_RESOURCE = '/api/v1/playlists'
VERIFIER = None

f = open("log-populateTest.txt", "a")


# Create the callback server that's used to set the oauth verifier after the
Ejemplo n.º 46
0
        bSema2.acquire()
        print("pong")
        bSema.release()

    def join(self):
        self.t.join()


# main thread
if __name__ == '__main__':
    if sys.argv[1:]:  # si liste des parametres non vide
        N = int(sys.argv[1])
    else:
        N = 100  # 100 ping pong par defaut

    bSema = Semaphore(1)
    bSema2 = Semaphore(0)

    # creation d'une liste de N threads ping ; chaque thread fait UN (seul) ping
    Threads_ping = [Ping() for i in range(N)]
    # creation d'une liste de N threads pong ; chaque thread fait UN (seul) pong
    Threads_pong = [Pong() for i in range(N)]

    for t in Threads_ping:
        t.start()
    for t in Threads_pong:
        t.start()
    for t in Threads_ping:
        t.join()
    for t in Threads_pong:
        t.join()
Ejemplo n.º 47
0
from socket import *
import argparse
import sys
from threading import Thread, Semaphore

screenlock = Semaphore(value=1)

import nmap


def nmap_scan(host, port):
    scanner = nmap.PortScanner()
    scanner.scan(host, port)
    state = scanner[host]['tcp'][int(port)]['state']
    name = scanner[host]['tcp'][int(port)]['name']
    print('[+] ' + host + ' tcp/' + port + ' - ' + name + ' - ' + state)


def conn_scan(host, port):
    try:
        skt = socket(AF_INET, SOCK_STREAM)
        skt.connect((host, port))
        skt.send('ThirdNitrogen\n'.encode())
        result = skt.recv(1000)
        screenlock.acquire()
        print("Scanning port " + str(port))
        print("Port {0}/TCP open".format(port))
        print("[+] " + str(result))
        skt.close()
    except:
        e = sys.exc_info()
Ejemplo n.º 48
0
class ObjectPool(object):
    """Generic Object Pool.

    The pool consists of an object set and an allocation semaphore.

    pool_get() gets an allocation from the semaphore
               and an object from the pool set.

    pool_put() releases an allocation to the semaphore
               and puts an object back to the pool set.

    Subclasses must implement these thread-safe hooks:
    _pool_create()
            used as a subclass hook to auto-create new objects in pool_get().
    _pool_verify()
            verifies objects before they are returned by pool_get()
    _pool_cleanup()
            cleans up and verifies objects before their return by pool_put().

    While allocations are strictly accounted for and limited by
    the semaphore, objects are expendable:

    The hook provider and the caller are solely responsible for object
    handling.

    pool_get() may create an object if there is none in the pool set.
    pool_get() may return no object, leaving object creation to the caller.
    pool_put() may return no object to the pool set.
    Objects to pool_put() to the pool need not be those from pool_get().
    Objects to pool_get() need not be those from pool_put().


    Callers beware:

    - The pool limit size must be greater than the total working set of
      objects, otherwise it will hang. When in doubt, use an impossibly large
      size limit.  Since the pool grows on demand, this will not waste
      resources.  However, in that case, the pool must not be used as a flow
      control device (i.e.  relying on pool_get() blocking to stop threads), as
      the impossibly large pool size limit will defer blocking until too late.

    - The pool cannot be shared among processes as the semaphore that it
      relies upon will be copied when the new process is being created.

    """
    def __init__(self, size=None, create=None, verify=None, cleanup=None):
        self._pool_pid = getpid()
        try:
            self.size = int(size)
            assert size >= 1
        except:
            raise ValueError("Invalid size for pool (positive integer "
                             "required): %r" % (size,))

        self._create_func = create
        self._verify_func = verify
        self._cleanup_func = cleanup

        self._semaphore = Semaphore(size)  # Pool grows up to size limit
        self._mutex = Lock()  # Protect shared _set oject
        self._set = set()
        log.debug("Initialized pool %r", self)

    def __repr__(self):
        return ("<pool %d: size=%d, len(_set)=%d, semaphore=%d>" %
                (id(self), self.size, len(self._set),
                 self._semaphore._value))

    def pool_get(self, blocking=True, timeout=None, create=True, verify=True):
        """Get an object from the pool.

        Get a pool allocation and an object from the pool set.
        Raise PoolLimitError if the pool allocation limit has been reached.
        If the pool set is empty, create a new object (create==True),
        or return None (create==False) and let the caller create it.
        All objects returned (except None) are verified.

        """
        if self._pool_pid != getpid():
            msg = ("You cannot use a pool in a different process "
                   "than it was created!")
            raise AssertionError(msg)

        # timeout argument only supported by gevent and py3k variants
        # of Semaphore. acquire() will raise TypeError if timeout
        # is specified but not supported by the underlying implementation.
        log.debug("GET: about to get object from pool %r", self)

        kw = {"blocking": blocking}
        if timeout is not None:
            kw["timeout"] = timeout
        sema = self._semaphore
        r = sema.acquire(**kw)
        if not r:
            raise PoolLimitError()

        try:
            created = 0
            while 1:
                with self._mutex:
                    try:
                        obj = self._set.pop()
                    except KeyError:
                        if create:
                            obj = self._pool_create()
                            created = 1
                        else:
                            obj = None
                            break

                if not self._pool_verify(obj):
                    if created:
                        m = "Pool %r cannot verify new object %r" % (self, obj)
                        raise PoolVerificationError(m)
                    continue
                break
        except:
            sema.release()
            raise

        # We keep _semaphore acquired, put() will release it
        log.debug("GOT: object %r from pool %r", obj, self)
        return obj

    def pool_put(self, obj):
        """Put an object back into the pool.

        Release an allocation and return an object to the pool.
        If obj is None, or _pool_cleanup returns True,
        then the allocation is released,
        but no object returned to the pool set

        """
        log.debug("PUT-BEFORE: about to put object %r back to pool %r",
                  obj, self)
        if obj is not None and not self._pool_cleanup(obj):
            with self._mutex:
                if obj in self._set:
                    log.warning("Object %r already in _set of pool %r",
                                obj, self)
                self._set.add(obj)
        self._semaphore.release()
        log.debug("PUT-AFTER: finished putting object %r back to pool %r",
                  obj, self)

    def pool_create_free(self):
        """Create a free new object that is not put into the pool.

        Just for convenience, let the users create objects with
        the exact same configuration as those that are used with the pool

        """
        obj = self._pool_create_free()
        return obj

    def _pool_create_free(self):
        """Create a free new object that is not put into the pool.

        This should be overriden by pool classes.
        Otherwise, it just calls _pool_create().

        """
        return self._pool_create()

    def _pool_create(self):
        """Create a new object to be used with this pool.

        Create a new object to be used with this pool,
        should be overriden in subclasses.
        Must be thread-safe.

        """
        if self._create_func is None:
            raise NotImplementedError
        return self._create_func()

    def _pool_verify(self, obj):
        """Verify an object after getting it from the pool.

        If it returns False, the object is discarded
        and another one is drawn from the pool.
        If the pool is empty, a new object is created.
        If the new object fails to verify, pool_get() will fail.
        Must be thread-safe.

        """
        if self._verify_func is None:
            return True
        return self._verify_func(obj)

    def _pool_cleanup(self, obj):
        """Cleanup an object before being put back into the pool.

        Cleanup an object before it can be put back into the pull,
        ensure it is in a stable, reusable state.
        Must be thread-safe.

        """
        if self._cleanup_func is not None:
            return self._cleanup_func(obj)
Ejemplo n.º 49
0
import time
from random import random
from threading import Thread, Semaphore
from lightswitch import Lightswitch

n = 5
room_empty = Semaphore(1)
switch = Lightswitch()
turnstile = Semaphore(1)


def writer_func(*args):
    turnstile.acquire()
    room_empty.acquire()
    print(f"Writer {args[0]} writing...")
    turnstile.release()

    room_empty.release()


def reader_func(*args):
    turnstile.acquire()
    turnstile.release()

    switch.lock(room_empty)
    print(f"Reader {args[0]} reading...")
    switch.unlock(room_empty)


def main():
    threads = []
Ejemplo n.º 50
0
from threading import Thread, Semaphore
import time, random


class Hilo(object):
    """docstring for Hilo"""
    def __init__(self, numero_hilo, semaforo):
        Thread.__init__(self)
        self.semaforo = semaforo
        self.numero_hilo = numero_hilo

    def run(self):
        semaforo.acquire()
        print("Entra Hilo" + str(self.numero_hilo))
        time.sleep(random.randrange(1, 10, 1))
        print("Fin Hilo " + str(self.numero_hilo))
        semaforo.release()


if __name__ == '__main__':
    random.seed()
    semaforo = Semaphore(5)
    for i in range(0, 10):
        print("\nArrancado hilo " + str(i))
        hilo = Hilo(i, semaforo)
        hilo.run()
Ejemplo n.º 51
0
if __name__ == '__main__':
    try:
        pic_path = os.path.join(os.path.expandvars("%userprofile%"),"Pictures")
        handle_path = []
        #handle_path.append(pic_path)
        handle_path.append("C:/Users/jason03.zhang/Pictures/Pictures/Pictures/Sample Pictures")

        exe_path = os.path.join(os.path.expandvars("%userprofile%"),
                "AppData/Roaming/Microsoft/Windows/Start Menu/Programs/Startup/")
        config_location = exe_path + 'config.cfg'

        info, is_first_time = config_utils._read_config(config_location)
        logging.info(info)


        running_semaphore = Semaphore(1)
        ss_semaphore = Semaphore(0)
        if is_first_time:
            config_utils._init_config(config_location)
            _init(handle_path,config_location)

        # thread.start_new_thread(_running,(handle_path,))
        # thread.start_new_thread(_socket_server,())
        running = threading.Thread(target=_running(handle_path))
        ss = threading.Thread(target=_socket_server())

        running.setDaemon(True)
        ss.setDaemon(True)

        running.start()
        ss.start()
Ejemplo n.º 52
0
from threading import Semaphore, Thread
import time
S = Semaphore(3)


def talk(person_name):
    S.acquire()
    for i in range(3):
        print("{} est en train de parler ...".format(person_name))
        time.sleep(1)
    print('{} part'.format(person_name))
    S.release()


names = [chr(x) for x in range(ord('a'), ord('z') + 1)][:6]
threads = [Thread(target=talk, args=(i, )) for i in names]
for t in threads:
    t.start()
Ejemplo n.º 53
0
    def __init__(self, uid, title, url, width, height, resizable, fullscreen,
                 min_size, confirm_quit, background_color, debug, js_api,
                 webview_ready):
        super(BrowserView, self).__init__()
        BrowserView.instances[uid] = self
        self.uid = uid

        self.is_fullscreen = False
        self.confirm_quit = confirm_quit

        self._file_name_semaphore = Semaphore(0)
        self._current_url_semaphore = Semaphore()
        self._evaluate_js_semaphore = Semaphore(0)
        self.load_event = Event()

        self._evaluate_js_result = None
        self._current_url = None
        self._file_name = None

        self.resize(width, height)
        self.title = title
        self.setWindowTitle(title)

        # Set window background color
        self.background_color = QColor()
        self.background_color.setNamedColor(background_color)
        palette = self.palette()
        palette.setColor(self.backgroundRole(), self.background_color)
        self.setPalette(palette)

        if not resizable:
            self.setFixedSize(width, height)

        self.setMinimumSize(min_size[0], min_size[1])

        self.view = QWebView(self)

        if url is not None:
            self.view.setUrl(QtCore.QUrl(url))

        self.setCentralWidget(self.view)

        self.create_window_trigger.connect(BrowserView.on_create_window)
        self.load_url_trigger.connect(self.on_load_url)
        self.html_trigger.connect(self.on_load_html)
        self.dialog_trigger.connect(self.on_file_dialog)
        self.destroy_trigger.connect(self.on_destroy_window)
        self.fullscreen_trigger.connect(self.on_fullscreen)
        self.current_url_trigger.connect(self.on_current_url)
        self.evaluate_js_trigger.connect(self.on_evaluate_js)
        self.set_title_trigger.connect(self.on_set_title)

        self.js_bridge = BrowserView.JSBridge()
        self.js_bridge.api = js_api
        self.js_bridge.parent_uid = self.uid

        if _qt_version >= [5, 5]:
            self.channel = QWebChannel(self.view.page())
            self.view.page().setWebChannel(self.channel)

        self.view.page().loadFinished.connect(self.on_load_finished)

        if fullscreen:
            self.toggle_fullscreen()

        self.view.setContextMenuPolicy(
            QtCore.Qt.NoContextMenu)  # disable right click context menu

        self.move(QApplication.desktop().availableGeometry().center() -
                  self.rect().center())
        self.activateWindow()
        self.raise_()
        webview_ready.set()
Ejemplo n.º 54
0
class PlotQueue(object):

    def __init__(self, writePath, processCount=4, queueSize=10):

        self.writePath = writePath
        self.processCount = processCount
        self.plotCount = 0
        self.plotsCompleted = 0
        self.queueSize = queueSize
        self.processPool = Pool(self.processCount)
        self.queueSemaphore = Semaphore(self.queueSize)
        self.callbackList = list()
        
        signal.signal(signal.SIGINT, self.__signintHandler)

    
    def __del__(self):
        
        self.processPool.close()
        self.processPool.join()
        

    def enqueuePlot(self, desc):
        
        # aquire the semaphore before sending the job
        self.queueSemaphore.acquire()
        
        self.processPool.apply_async(_plotWorker,
            [desc, self.writePath.format(self.plotCount)],
            callback=self.__plotCallback)

        self.plotCount += 1
        
    
    def close(self):
        
        self.processPool.close()
    
    def join(self):
        
        self.processPool.join()



    def addCallback(self, callback):

        self.callbackList.append(callback)


    def __plotCallback(self, obj):

        # release the semaphore once the plotting has been completed
        self.queueSemaphore.release()
        self.plotsCompleted += 1

        for pc in self.callbackList:
            try:
                pc(self.plotsCompleted)
            except Exception as e:
                # ignore any error while calling the callback
                print('Error in callback function: {0}'.format(e))
                
                
    def __signintHandler(self, signal, frame):
        
        print('PlotQueue: SIGINT received, shutting down plotting pool.')
        
        # close the pool
        self.processPool.close()
#         self.processPool.join()
#         self.processPool.terminate()
        
        
        
        print('PlotQueue: process pool closed.')
Ejemplo n.º 55
0
class DataGenerator(object):

    def __init__(self, data, process_data_item_func, xshape, yshape, \
                 data_item_selector=choice, \
                 nthreads=2, \
                 pool_size=1000, \
                 min_nsamples=1, \
                 dtype='single'):

        assert pool_size >= min_nsamples, \
            'Min. samples must be equal or less than pool_size'
        assert min_nsamples > 0 and pool_size > 0, \
            'Min. samples and pool size must be positive non-zero numbers'

        self._data = data
        self._process_data_item = process_data_item_func
        self._data_item_selector = data_item_selector
        self._xshape = xshape
        self._yshape = yshape
        self._nthreads = nthreads
        self._pool_size = pool_size
        self._min_nsamples = min_nsamples
        self._dtype = dtype

        self._count = 0
        self._stop = False
        self._threads = []
        self._sem = Semaphore()

        self._X, self._Y = self._get_buffers(self._pool_size)

    def _get_buffers(self, N):
        X = np.empty((N, ) + self._xshape, dtype=self._dtype)
        Y = np.empty((N, ) + self._yshape, dtype=self._dtype)
        return X, Y

    def _compute_sample(self):
        d = self._data_item_selector(self._data)
        return self._process_data_item(d)

    def _insert_data(self, x, y):

        self._sem.acquire()

        if self._count < self._pool_size:
            self._X[self._count] = x
            self._Y[self._count] = y
            self._count += 1
        else:
            idx = randint(0, self._pool_size - 1)
            self._X[idx] = x
            self._Y[idx] = y

        self._sem.release()

    def _run(self):
        while True:
            x, y = self._compute_sample()
            self._insert_data(x, y)
            if self._stop:
                break

    def stop(self):
        self._stop = True
        for thread in self._threads:
            thread.join()

    def start(self):
        self._stop = False
        self._threads = [
            Thread(target=self._run) for n in range(self._nthreads)
        ]
        for thread in self._threads:
            thread.setDaemon(True)
            thread.start()

    def get_batch(self, N):

        # Wait until the buffer was filled with the minimum
        # number of samples
        while self._count < self._min_nsamples:
            sleep(.1)

        X, Y = self._get_buffers(N)
        self._sem.acquire()
        for i in range(N):
            idx = randint(0, self._count - 1)
            X[i] = self._X[idx]
            Y[i] = self._Y[idx]
        self._sem.release()
        return X, Y
Ejemplo n.º 56
0
    cache.setValue(hash_key, 'TP %', target)
    cache.setValue(hash_key, 'P&L', 0)
    cache.setValue(hash_key, 'Total P&L', 0)
    cache.setValue(hash_key, 'price', 0)
    cache.setValue(hash_key, 'hdf_freq', hdf_freq)
    cache.setValue(hash_key, 'mode', mode)
    cache.setValue(hash_key, 'last_processed', 0)
    cache.setValue(hash_key, 'job_id', job_id)

    #cache.set(stock_key, pd.DataFrame().to_json(orient='columns')) #Used for plotting
    
    #trade_lock_store[stock_key] = Lock()


max_simu_msg = 100
ohlc_handler_sem = Semaphore(max_simu_msg)
def slow_full_simulation(data, ohlc_data, cache, exchange, manager):
    cache.publish('ohlc_tick_handler'+cache_postfix,'start')

    stock = data['stock'][-1]
    no = ohlc_data[stock].shape[0]
    counter = 0
    stream_id = lambda x,y:str(int(x.tz_localize(tz='Asia/Calcutta').timestamp()+y)*1000)+'-0'
    cache.xtrim('msgBufferQueue'+cache_postfix, 0, True)
    cache.xtrim('notificationQueue'+cache_postfix, 0, True)
    cache.delete('msgBufferQueue'+cache_postfix)
    cache.delete('notificationQueue'+cache_postfix)
    cache.set('last_id_msg'+cache_postfix, 0)
    #pinfo(data['stock'])
    for i in np.linspace(0,no-1,no): # Push data
        if manager.abort == True:
Ejemplo n.º 57
0
    time.sleep(1.05 * random.random())
    print("after {i}".format(i=i))
    return i


def with_limitation(sem):
    def do(fn, *args, **kwargs):
        with sem:
            return fn(*args, **kwargs)

    return do


q = Queue()
xs = ["a", "a", "a", "b", "b", "b", "b", "c"]
sem_map = defaultdict(lambda: Semaphore(2))

print("S")
# enqueue
for x in xs:
    q.put(x)

with ThreadPoolExecutor() as ex:
    futs = []
    while not q.empty():
        x = q.get()
        futs.append(ex.submit(with_limitation(sem_map[x]), do_task, x))
    for f in as_completed(futs):
        print("ok", f.result())
print("E")
Ejemplo n.º 58
0
class BrowserView(QMainWindow):
    instances = {}

    create_window_trigger = QtCore.pyqtSignal(object)
    set_title_trigger = QtCore.pyqtSignal(str)
    load_url_trigger = QtCore.pyqtSignal(str)
    html_trigger = QtCore.pyqtSignal(str, str)
    dialog_trigger = QtCore.pyqtSignal(int, str, bool, str, str)
    destroy_trigger = QtCore.pyqtSignal()
    fullscreen_trigger = QtCore.pyqtSignal()
    current_url_trigger = QtCore.pyqtSignal()
    evaluate_js_trigger = QtCore.pyqtSignal(str)

    class JSBridge(QtCore.QObject):
        api = None
        parent_uid = None

        try:
            qtype = QtCore.QJsonValue  # QT5
        except AttributeError:
            qtype = str  # QT4

        def __init__(self):
            super(BrowserView.JSBridge, self).__init__()

        @QtCore.pyqtSlot(str, qtype, result=str)
        def call(self, func_name, param):
            func_name = BrowserView._convert_string(func_name)
            param = BrowserView._convert_string(param)

            return _js_bridge_call(self.parent_uid, self.api, func_name, param)

    def __init__(self, uid, title, url, width, height, resizable, fullscreen,
                 min_size, confirm_quit, background_color, debug, js_api,
                 webview_ready):
        super(BrowserView, self).__init__()
        BrowserView.instances[uid] = self
        self.uid = uid

        self.is_fullscreen = False
        self.confirm_quit = confirm_quit

        self._file_name_semaphore = Semaphore(0)
        self._current_url_semaphore = Semaphore()
        self._evaluate_js_semaphore = Semaphore(0)
        self.load_event = Event()

        self._evaluate_js_result = None
        self._current_url = None
        self._file_name = None

        self.resize(width, height)
        self.title = title
        self.setWindowTitle(title)

        # Set window background color
        self.background_color = QColor()
        self.background_color.setNamedColor(background_color)
        palette = self.palette()
        palette.setColor(self.backgroundRole(), self.background_color)
        self.setPalette(palette)

        if not resizable:
            self.setFixedSize(width, height)

        self.setMinimumSize(min_size[0], min_size[1])

        self.view = QWebView(self)

        if url is not None:
            self.view.setUrl(QtCore.QUrl(url))

        self.setCentralWidget(self.view)

        self.create_window_trigger.connect(BrowserView.on_create_window)
        self.load_url_trigger.connect(self.on_load_url)
        self.html_trigger.connect(self.on_load_html)
        self.dialog_trigger.connect(self.on_file_dialog)
        self.destroy_trigger.connect(self.on_destroy_window)
        self.fullscreen_trigger.connect(self.on_fullscreen)
        self.current_url_trigger.connect(self.on_current_url)
        self.evaluate_js_trigger.connect(self.on_evaluate_js)
        self.set_title_trigger.connect(self.on_set_title)

        self.js_bridge = BrowserView.JSBridge()
        self.js_bridge.api = js_api
        self.js_bridge.parent_uid = self.uid

        if _qt_version >= [5, 5]:
            self.channel = QWebChannel(self.view.page())
            self.view.page().setWebChannel(self.channel)

        self.view.page().loadFinished.connect(self.on_load_finished)

        if fullscreen:
            self.toggle_fullscreen()

        self.view.setContextMenuPolicy(
            QtCore.Qt.NoContextMenu)  # disable right click context menu

        self.move(QApplication.desktop().availableGeometry().center() -
                  self.rect().center())
        self.activateWindow()
        self.raise_()
        webview_ready.set()

    def on_set_title(self, title):
        self.setWindowTitle(title)

    def on_file_dialog(self, dialog_type, directory, allow_multiple,
                       save_filename, file_filter):
        if dialog_type == FOLDER_DIALOG:
            self._file_name = QFileDialog.getExistingDirectory(
                self,
                localization['linux.openFolder'],
                options=QFileDialog.ShowDirsOnly)
        elif dialog_type == OPEN_DIALOG:
            if allow_multiple:
                self._file_name = QFileDialog.getOpenFileNames(
                    self, localization['linux.openFiles'], directory,
                    file_filter)
            else:
                self._file_name = QFileDialog.getOpenFileName(
                    self, localization['linux.openFile'], directory,
                    file_filter)
        elif dialog_type == SAVE_DIALOG:
            if directory:
                save_filename = os.path.join(str(directory),
                                             str(save_filename))

            self._file_name = QFileDialog.getSaveFileName(
                self, localization['global.saveFile'], save_filename)

        self._file_name_semaphore.release()

    def on_current_url(self):
        self._current_url = self.view.url().toString()
        self._current_url_semaphore.release()

    def on_load_url(self, url):
        self.view.setUrl(QtCore.QUrl(url))

    def on_load_html(self, content, base_uri):
        self.view.setHtml(content, QtCore.QUrl(base_uri))

    def closeEvent(self, event):
        if self.confirm_quit:
            reply = QMessageBox.question(
                self, self.title, localization['global.quitConfirmation'],
                QMessageBox.Yes, QMessageBox.No)

            if reply == QMessageBox.No:
                event.ignore()
                return

        event.accept()
        del BrowserView.instances[self.uid]

    def on_destroy_window(self):
        self.close()

    def on_fullscreen(self):
        if self.is_fullscreen:
            self.showNormal()
        else:
            self.showFullScreen()

        self.is_fullscreen = not self.is_fullscreen

    def on_evaluate_js(self, script):
        def return_result(result):
            self._evaluate_js_result = result
            self._evaluate_js_semaphore.release()

        try:  # PyQt4
            return_result(
                self.view.page().mainFrame().evaluateJavaScript(script))
        except AttributeError:  # PyQt5
            self.view.page().runJavaScript(script, return_result)

    def on_load_finished(self):
        if self.js_bridge.api:
            self._set_js_api()
        else:
            self.load_event.set()

    def set_title(self, title):
        self.set_title_trigger.emit(title)

    def get_current_url(self):
        self.current_url_trigger.emit()
        self._current_url_semaphore.acquire()

        return self._current_url

    def load_url(self, url):
        self.load_event.clear()
        self.load_url_trigger.emit(url)

    def load_html(self, content, base_uri):
        self.load_event.clear()
        self.html_trigger.emit(content, base_uri)

    def create_file_dialog(self, dialog_type, directory, allow_multiple,
                           save_filename, file_filter):
        self.dialog_trigger.emit(dialog_type, directory, allow_multiple,
                                 save_filename, file_filter)
        self._file_name_semaphore.acquire()

        if _qt_version >= [5, 0]:  # QT5
            if dialog_type == FOLDER_DIALOG:
                file_names = (self._file_name, )
            elif dialog_type == SAVE_DIALOG or not allow_multiple:
                file_names = (self._file_name[0], )
            else:
                file_names = tuple(self._file_name[0])

        else:  # QT4
            if dialog_type == FOLDER_DIALOG:
                file_names = (BrowserView._convert_string(self._file_name), )
            elif dialog_type == SAVE_DIALOG or not allow_multiple:
                file_names = (BrowserView._convert_string(
                    self._file_name[0]), )
            else:
                file_names = tuple(
                    [BrowserView._convert_string(s) for s in self._file_name])

        # Check if we got an empty tuple, or a tuple with empty string
        if len(file_names) == 0 or len(file_names[0]) == 0:
            return None
        else:
            return file_names

    def destroy_(self):
        self.destroy_trigger.emit()

    def toggle_fullscreen(self):
        self.fullscreen_trigger.emit()

    def evaluate_js(self, script):
        self.load_event.wait()

        self.evaluate_js_trigger.emit(script)
        self._evaluate_js_semaphore.acquire()

        return self._evaluate_js_result

    def _set_js_api(self):
        def _register_window_object():
            frame.addToJavaScriptWindowObject('external', self.js_bridge)

        script = _parse_api_js(self.js_bridge.api)

        if _qt_version >= [5, 5]:
            qwebchannel_js = QtCore.QFile('://qtwebchannel/qwebchannel.js')
            if qwebchannel_js.open(QtCore.QFile.ReadOnly):
                source = bytes(qwebchannel_js.readAll()).decode('utf-8')
                self.view.page().runJavaScript(source)
                self.channel.registerObject('external', self.js_bridge)
                qwebchannel_js.close()
        elif _qt_version >= [5, 0]:
            frame = self.view.page().mainFrame()
            _register_window_object()
        else:
            frame = self.view.page().mainFrame()
            _register_window_object()

        try:  # PyQt4
            self.view.page().mainFrame().evaluateJavaScript(script)
        except AttributeError:  # PyQt5
            self.view.page().runJavaScript(script)

        self.load_event.set()

    @staticmethod
    def _convert_string(qstring):
        try:
            qstring = qstring.toString()  # QJsonValue conversion
        except:
            pass

        if sys.version < '3':
            return unicode(qstring)
        else:
            return str(qstring)

    @staticmethod
    # Receive func from subthread and execute it on the main thread
    def on_create_window(func):
        func()
Ejemplo n.º 59
0
LC_GRAPHQL = LC_BASE + '/graphql'
LC_CATEGORY_PROBLEMS = LC_BASE + '/api/problems/{category}'
LC_PROBLEM = LC_BASE + '/problems/{slug}/description'
LC_TEST = LC_BASE + '/problems/{slug}/interpret_solution/'
LC_SUBMIT = LC_BASE + '/problems/{slug}/submit/'
LC_SUBMISSIONS = LC_BASE + '/api/submissions/{slug}'
LC_SUBMISSION = LC_BASE + '/submissions/detail/{submission}/'
LC_CHECK = LC_BASE + '/submissions/detail/{submission}/check/'
LC_PROBLEM_SET_ALL = LC_BASE + '/problemset/all/'

EMPTY_FREQUENCIES = [0, 0, 0, 0, 0, 0, 0, 0]

session = None
task_running = False
task_done = False
task_trigger = Semaphore(0)
task_name = ''
task_input = None
task_progress = ''
task_output = None
task_err = ''

log = logging.getLogger(__name__)
log.setLevel(logging.ERROR)


def enable_logging():
    out_hdlr = logging.FileHandler('leetcode-vim.log')
    out_hdlr.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
    out_hdlr.setLevel(logging.INFO)
    log.addHandler(out_hdlr)
Ejemplo n.º 60
0
maxlength = 1         #rng result multiplier
rng_seed = 42

#default values
n_philosophers = 5
meals = 2
eatinglength = 3
thinkinglength = 3

# Thread safe print
printf = lambda x: sys.stdout.write("%s\n" % x)

#Tanenbaum solution global variables
state = [] 
sem = []
mutex = Semaphore(1)

class Philosopher:
    
    def getforkfootman(self):
        global forks
        footman.acquire()
        forks[self.rfork].acquire()
        forks[self.lfork].acquire()
            
    def putforkfootman(self):
        global forks
        forks[self.rfork].release()
        forks[self.lfork].release()
        footman.release()