Esempio n. 1
0
 def testVoid(self):
     result = yield From(self.client.testVoid())
     self.assertEqual(result, None)
Esempio n. 2
0
def calls_decorated():
  yield From(duration_decorated())
Esempio n. 3
0
    def test_schedule_job_executor_fallback(self):
        EphemeralBuilderManager.EXECUTORS["primary"] = TestExecutor
        EphemeralBuilderManager.EXECUTORS["secondary"] = TestExecutor

        self.manager.initialize({
            "EXECUTORS": [
                {
                    "NAME": "primary",
                    "EXECUTOR": "primary",
                    "NAMESPACE_WHITELIST": ["something"],
                    "MINIMUM_RETRY_THRESHOLD": 3,
                },
                {
                    "NAME": "secondary",
                    "EXECUTOR": "secondary",
                    "MINIMUM_RETRY_THRESHOLD": 2,
                },
            ],
            "ALLOWED_WORKER_COUNT":
            5,
            "ORCHESTRATOR": {
                "MEM_CONFIG": None
            },
        })

        # Try a job not matching the primary's namespace filter. Should schedule on secondary.
        build_job = self._create_build_job(namespace="somethingelse")
        result = yield From(self.manager.schedule(build_job))
        self.assertTrue(result[0])

        self.assertIsNone(self.manager.registered_executors[0].job_started)
        self.assertIsNotNone(self.manager.registered_executors[1].job_started)

        self.manager.registered_executors[0].job_started = None
        self.manager.registered_executors[1].job_started = None

        # Try a job not matching the primary's retry minimum. Should schedule on secondary.
        build_job = self._create_build_job(namespace="something", retries=2)
        result = yield From(self.manager.schedule(build_job))
        self.assertTrue(result[0])

        self.assertIsNone(self.manager.registered_executors[0].job_started)
        self.assertIsNotNone(self.manager.registered_executors[1].job_started)

        self.manager.registered_executors[0].job_started = None
        self.manager.registered_executors[1].job_started = None

        # Try a job matching the primary. Should schedule on the primary.
        build_job = self._create_build_job(namespace="something", retries=3)
        result = yield From(self.manager.schedule(build_job))
        self.assertTrue(result[0])

        self.assertIsNotNone(self.manager.registered_executors[0].job_started)
        self.assertIsNone(self.manager.registered_executors[1].job_started)

        self.manager.registered_executors[0].job_started = None
        self.manager.registered_executors[1].job_started = None

        # Try a job not matching either's restrictions.
        build_job = self._create_build_job(namespace="somethingelse",
                                           retries=1)
        result = yield From(self.manager.schedule(build_job))
        self.assertFalse(result[0])

        self.assertIsNone(self.manager.registered_executors[0].job_started)
        self.assertIsNone(self.manager.registered_executors[1].job_started)

        self.manager.registered_executors[0].job_started = None
        self.manager.registered_executors[1].job_started = None
Esempio n. 4
0
def handle_visual(win):
    '''
    This flips the window, draws the stuff to be drawn, and calls
    functions to be called from the stop task. It is supposed to be
    run in the asyncio loop.
    '''
    
    
    mainClock=clock.Clock() # our main clock...
    frameCounter=0
    previousShapes=[]
    mainCurrentTime=0

    
    # this will run the entire length of the visual...
    # within this time, the stop signal task will (hopefully) finish.
    # OR... we can also just use a counter.
    while mainCurrentTime < 340. :
    
    
    
    
        # the workflow
        # 1) Prepare everything + draw
        # 2) Prepare markers
        # 3) win.flip() + the callOnFlip routines to be done.
    
        
        # all the visual stuff:
        frameIndex, visContents, markers = fd_with_markers[frameCounter]
        frameCounter += 1
        # deal with the visuals -- using vstims which should be accessible
        # we get the list...
        # create the shapes to be drawn using list comprehension
        if len(visContents) > 0:
            shapes=[stim for stim in vstims[ind] for ind in visContents]
        else:
            shapes=[]


        
        
        # add the gonogo stimuli to them:
        for stim in goNogoStimContainer[0]:
            shapes.append(goNogoStims)
        
        
        
        
        # draw them on our little canvas.
        for shape in shapes:
            shape.draw()
        

        
        # prepare the calls for the next iteration, including marlers;
        # deal with visual markers
        if len(markers) > 0:
            for marker in markers:
                win.callOnFlip(eventHandler.handle,marker)
        
        
        for task in nextfliptasks:
            if len(task)==1:
                win.callOnFlip(task)
            elif len(task)==2:
                win.callOnFlip(task, args)



        # we flip the screen here - this will take ~ 16.66667 msec.
        win.flip()
        
        
        
        
        # sleep for a little while:
        yield From(asyncio.sleep(ASYNC_SLEEPTIME))
        def _async_execute_process_pty(protocol_class,
                                       cmd,
                                       cwd,
                                       env,
                                       shell,
                                       stderr_to_stdout=True):
            loop = get_loop()
            # Create the PTY's
            stdout_master, stdout_slave = pty.openpty()
            if stderr_to_stdout:
                stderr_master, stderr_slave = stdout_master, stdout_slave
            else:
                stderr_master, stderr_slave = pty.openpty()

            def protocol_factory():
                return protocol_class(stdin=None,
                                      stdout=stdout_master,
                                      stderr=stderr_master)

            # Start the subprocess
            if shell is True:
                transport, protocol = yield From(
                    loop.subprocess_shell(protocol_factory,
                                          " ".join(cmd),
                                          cwd=cwd,
                                          env=env,
                                          stdout=stdout_slave,
                                          stderr=stderr_slave,
                                          close_fds=False))
            else:
                transport, protocol = yield From(
                    loop.subprocess_exec(protocol_factory,
                                         *cmd,
                                         cwd=cwd,
                                         env=env,
                                         stdout=stdout_slave,
                                         stderr=stderr_slave,
                                         close_fds=False))

            # Close our copies of the slaves,
            # the child's copy of the slave remain open until it terminates
            os.close(stdout_slave)
            if not stderr_to_stdout:
                os.close(stderr_slave)

            # Create Protocol classes
            class PtyStdoutProtocol(asyncio.Protocol):
                def connection_made(self, transport):
                    if hasattr(protocol, 'on_stdout_open'):
                        protocol.on_stdout_open()

                def data_received(self, data):
                    if hasattr(protocol, 'on_stdout_received'):
                        protocol.on_stdout_received(data)

                def connection_lost(self, exc):
                    if hasattr(protocol, 'on_stdout_close'):
                        protocol.on_stdout_close(exc)

            class PtyStderrProtocol(asyncio.Protocol):
                def connection_made(self, transport):
                    if hasattr(protocol, 'on_stderr_open'):
                        protocol.on_stderr_open()

                def data_received(self, data):
                    if hasattr(protocol, 'on_stderr_received'):
                        protocol.on_stderr_received(data)

                def connection_lost(self, exc):
                    if hasattr(protocol, 'on_stderr_close'):
                        protocol.on_stderr_close(exc)

            # Add the pty's to the read loop
            # Also store the transport, protocol tuple for each call to
            # connect_read_pipe, to prevent the destruction of the protocol
            # class instance, otherwise no data is received.
            protocol.stdout_tuple = yield From(
                loop.connect_read_pipe(PtyStdoutProtocol,
                                       os.fdopen(stdout_master, 'rb', 0)))
            if not stderr_to_stdout:
                protocol.stderr_tuple = yield From(
                    loop.connect_read_pipe(PtyStderrProtocol,
                                           os.fdopen(stderr_master, 'rb', 0)))
            # Return the protocol and transport
            raise Return(transport, protocol)
Esempio n. 6
0
 def test():
     try:
         res = yield From(foo())
         raise Return(res)
     except ZeroDivisionError:
         non_local['CHK'] += 1
Esempio n. 7
0
def my_coroutine(task_name, seconds_to_sleep=3):
    print('{0} sleeping for: {1} seconds'.format(task_name, seconds_to_sleep))
    yield From(asyncio.sleep(seconds_to_sleep))
    print('{0} is finished'.format(task_name))
Esempio n. 8
0
 def testOneway(self):
     yield From(self.client.testOneway(2))
     start, end, seconds = yield From(self.handler.onewaysQueue.get())
     self.assertAlmostEqual(seconds, (end - start), places=1)
Esempio n. 9
0
 def testString(self, s):
     yield From(asyncio.sleep(0))
     raise Return(s)
Esempio n. 10
0
 def testI64(self):
     result = yield From(self.client.testI64(-34359738368))
     self.assertEqual(result, -34359738368)
Esempio n. 11
0
 def testDouble(self):
     result = yield From(self.client.testDouble(-5.235098235))
     self.assertAlmostEqual(result, -5.235098235)
Esempio n. 12
0
 def testI32(self):
     result = yield From(self.client.testI32(-1))
     self.assertEqual(result, -1)
     result = yield From(self.client.testI32(0))
     self.assertEqual(result, 0)
Esempio n. 13
0
 def testByte(self):
     result = yield From(self.client.testByte(63))
     self.assertEqual(result, 63)
Esempio n. 14
0
 def testString(self):
     result = yield From(self.client.testString('Python'))
     self.assertEqual(result, 'Python')
Esempio n. 15
0
 def test():
     res = yield From(foo())
     raise Return(res)
Esempio n. 16
0
 def testOneway(self, seconds):
     t = time.time()
     yield From(asyncio.sleep(seconds))
     yield From(self.onewaysQueue.put((t, time.time(), seconds)))
Esempio n. 17
0
 def bar():
     yield From(None)
     yield From(None)
     1/0
Esempio n. 18
0
 def my_dummy_provider(self):
     """
         just a dummy provider when its the first time
         the trigger is handling
     """
     yield From(q2.put(1))
Esempio n. 19
0
 def bar():
     yield From(None)
     raise Return(5)
Esempio n. 20
0
    def my_consumer(self, service_consumer, token, service_id, date_triggered):
        """
            service_consumer : the name of the consumer 'service' class
            token : is the token of the service consumer
            service_id : is the service id from the database
            date_triggered : date_triggered is the data from the database
        """
        count_new_data = 0
        while q.empty() is not True:
            data = yield From(q.get())

            consumer = getattr(service_consumer, 'save_data')

            published = ''
            which_date = ''

            # flag to know if we can push data to the consumer

            # 2) for each one
            # if in a pool of data once of them does not have
            # a date, will take the previous date for this one
            # if it's the first one, set it to 00:00:00
            # let's try to determine the date contained in the data...
            published = to_datetime(data)
            if published is not None:
                # get the published date of the provider
                published = arrow.get(str(published),
                                      'YYYY-MM-DD HH:mm:ss').to(
                                          settings.TIME_ZONE)
                # store the date for the next loop
                # if published became 'None'
                which_date = published
            #... otherwise set it to 00:00:00 of the current date
            if which_date == '':
                # current date
                which_date = arrow.utcnow().replace(hour=0, minute=0, second=0)
                published = which_date
            if published is None and which_date != '':
                published = which_date
            # 3) check if the previous trigger is older than the
            # date of the data we retreived
            # if yes , process the consumer

            # add the TIME_ZONE settings
            my_date_triggered = arrow.get(str(date_triggered),
                                          'YYYY-MM-DD HH:mm:ss').to(
                                              settings.TIME_ZONE)

            # if the published date if greater or equal to the last
            # triggered event ... :
            if date_triggered is not None and \
               published is not None and \
               published >= date_triggered:

                if 'title' in data:
                    logger.info("date {} >= date triggered {} title {}".format(
                        published, date_triggered, data['title']))
                else:
                    logger.info("date {} >= date triggered {} ".format(
                        published, my_date_triggered))

                consumer(token, service_id, **data)

                count_new_data += 1
            # otherwise do nothing
            else:
                if 'title' in data:
                    logger.debug("data outdated skiped : [{}] {}".format(
                        published, data['title']))
                else:
                    logger.debug(
                        "data outdated skiped : [{}] ".format(published))

        # return the number of updates ( to be displayed in the log )
        yield From(q2.put(count_new_data))
Esempio n. 21
0
def handle_gonogo(nextFlipTasks, myClocks, continueRoutineContainer, goNogoStimContainer, eventHandler):
    '''
    This contains the experimenal logic of the Stop Task. A lot of work
    went into constructing the stimuli. Stimuli parameters are loaded
    into variables in the code above. Runs 136 trials of Go-Nogo.
    This function is to be run within the asyncio loop.
    '''
 

    # if the time it took to respond is smaller than this time --> invalid.
    tooSoonTime=0.025
    allResponses=[] 
    numberOfResponses=0
    # set the visual contents here...
    # INITIAL SETTING
    goNogoStimContainer=sstims['fix']


    # yeah, do all kinds of init here.
    for trialNumber in range(SStopgo):

        
        thisDirection=random.choice(('al','ar')) # obtain this from the file!!
        thisTrialType = SStopgo[trialNumber] # this is a 0 (GO) or 1 (STOP)
        thisISIWaitTime = ISIwaitTime[trialNumber]
        
        correctResponseSide = correctResponseSides[trialNumber]
        wrongResponseSide = wrongResponseSides[trialNumber]
        
        responded=False # subj responded?
        tooManyResponses=False
        trialHandled=False
        triggerSentGo=False
        
        
        
        
        # figure out these:
            # BGoL
            # BGoR
            # BStopL
            # BStopR
        
        
        if taskType is STOP:
            # this should be called only 40 times, since there are 40 stop trials...
            thisSSD, thisCondition = myMultiStairs.next() # I defined the myMultiStairs above.
        



        # this code tells the loop to only continue when continueTroutine is not False
        # otherwise it'll just keep yielding.
        # let winflipper make new clock
        continueRoutineContainer[0]=False
        nextfliptasks.append([makeNewClock]) # the makeNewClock automatically makes things continue
        while continueRoutineContainer[0] is False:
            yield From(asyncio.sleep(0))
        cl=clockContainer[0] # obtain the clock that was just made.


        # ok, we can proceed -- the clock has been set.
        while cl.getTime() < 0.5:
            goNogoStimContainer[0]=sstims['pre']
            yield From(asyncio.sleep(0))
    
    
    
    
    
        # obtain our next clock...
        # this code tells the loop to only continue when continueTroutine is not False
        # otherwise it'll just keep yielding.
        # let winflipper make new clock
        continueRoutineContainer[0]=False
        nextfliptasks.append([makeNewClock]) # the makeNewClock automatically makes things continue
        # send the trigger regarding the arrow, as soon as the windows flips
        nextfliptasks.append([eventHandler.handle, 
                              ['BGo','BStop'][thisTrialType] 
                              + {BUTTONS[0]:'L',BUTTONS[1]:'R'}[correctResponseSide]])
        while continueRoutineContainer[0] is False:
            yield From(asyncio.sleep(0))
        cl=clockContainer[0] # obtain the clock that was just made.
        


        
        currentTime = 0.0
        while currentTime < 1.0:
            currentTime = cl.getTime()
            
            # set the stimulus to the proper direction (it's a choice, for now... -- but it's much much better to hard-code it)
            # make the arrow (+ circle)
            vStimContainer[0]=sstims[thisDirection]

            evs=event.getKeys(timeStamped=cl)
            if len(evs)>0:
                buttonsPressed, timesPressed = zip(*evs)
                # it's highly unlikely that two buttons are pressed in a signle
                # frame, but control for that anyway.
                allResponses.append((buttonsPressed[0], timesPressed[0]))
                numberOfResponses += 1
                # LOG this event... (i.e. send trigger)
                
                # handle event:
                if buttonsPressed[0] == BUTTONS[0]:
                    send_event('RR')
                elif buttonsPressed[0] == BUTTONS[1]:
                    send_event('RL')
                    
                    
                

                
            # once a button is pressed -- display fixation point again.
            if len(allResponses) > 0 and not responded:
                # 'clear' the visual window --> fixation cross, again:
                goNogoStimContainer=sstims['fix']
                responded=True
    
            # if it's a stop trial, then make arrow red after X time
            if thisTrialType is STOP and not responded:
                if currentTime > thisSSD:
                    goNogoStimContainer=sstims[thisDirection+'r']

        
            # taking care of the button press itself, as soon as button is pressed:
            if not trialHandled and buttonpressed:
                RTime = allResponses[0][1]
                buttonPressed = allResponses[0][0]
        
                if RTime < tooSoonTime:
                    trialOutcome = 'PressedTooSoon'
                    trialHandled = True
                    myMultiStairs.addResponse(0)
                else:
                    if trialType is STOP:
                        
                        if buttonPressed == correctResponseSide:
                            trialOutcome = 'ErrorCommission'
                            trialHandled = True
                            # ...aaand... of course, add the response to the Staircase Handler.
                            myMultiStairs.addResponse(0)
    
                        elif buttonPressed == wrongResponseSide:
                            trialOutcome = 'WrongSideErrorCommission'
                            trialHandled = True
                            myMultiStairs.addResponse(0)
                            
                        
                    elif trialType is GO:
                        if buttonPressed == correctResponseSide:
                            trialOutcome = 'Go'+correctResponseSide
                            trialHandled = True
                            triggerSentGo = False
                            # not yet...
                        elif buttonPressed == wrongResponseSide:
                            trialOutcome = 'WrongSideGo'
                            trialHandled = True
                            myMultiStairs.addResponse(0)
                        
                # something happened --> so send an event!
                eventHandler.handle(trialOutcome)
                
        
            # here we wait...
            yield From(asyncio.sleep(0))
    
        
        # AFTER 1.0 seconds we should be here... out of the while loop:            
        # handle the 'response' if the button was NOT pressed:
        if not trialHandled and not buttonpressed:
            if trialType is GO:
                trialOutcome = 'ErrorOmission'
                trialHandled = True
                myMultiStairs.addResponse(0)

            if trialType is STOP:
                trialOutcome = 'Stop'+correctResponseSide
                trialHandled = True
                 # ...aaand... of course, add the response to the Staircase Handler.
                myMultiStairs.addResponse(1)
                
                
        # only when 1 button was pressed, and trialoutcome = 'Go'                
        if numberOfResponses > 1:
            tooManyResponses = True
                
        if trialOutcome == 'Go'+correctResponseSide and not triggerSentGo:
            if numberOfResponses == 1:
                myMultiStairs.addResponse(1)
            else:
                myMultiStairs.addResponse(0)
            triggerSentGo=True
            
            
            
        

        # obtain our next clock...
        # this code tells the loop to only continue when continueTroutine is not False
        # otherwise it'll just keep yielding.
        # let winflipper make new clock
        continueRoutineContainer[0]=False
        nextfliptasks.append([makeNewClock]) # the makeNewClock automatically makes things continue
        while continueRoutineContainer[0] is False:
            yield From(asyncio.sleep(0))
        cl=clockContainer[0] # obtain the clock that was just made.



        # this is a nice place to save it to logfile: before the 
        # send a report about the STOP trial, write a nice line:
        logging.data('messa')
        logging.flush()

         # ok, we can proceed -- the clock has been set.
        while cl.getTime() < thisISIWaitTime:
            goNogoStimContainer=sstims['fix']
            yield From(asyncio.sleep(0))
Esempio n. 22
0
 def _get_tshark_process(self, packet_count=None):
     proc = yield From(
         super(PipeCapture,
               self)._get_tshark_process(packet_count=packet_count,
                                         stdin=self._pipe))
     raise Return(proc)
Esempio n. 23
0
def run_server():
    conf = parser.parse_args()
    conf.enable_light_sensor = False
    conf.output_directory = None
    conf.max_lifetime = 999999
    conf.initial_age_mu = 500
    conf.initial_age_sigma = 500

    world = yield From(World.create(conf))
    yield From(world.pause(False))

    trees, bboxes = yield From(world.generate_population(30))
    insert_queue = zip(trees, bboxes)

    for tree, bbox in insert_queue[:15]:
        fut = yield From(birth(world, tree, bbox, None))
        yield From(fut)
        yield From(sleep_sim_time(world, 1.0))

    sim_time_sec = 5.0

    while True:
        bots = []
        for tree, bbox in insert_queue[15:]:
            fut = yield From(birth(world, tree, bbox, None))
            bot = yield From(fut)
            bots.append(bot)
            yield From(sleep_sim_time(world, 1.0))

        print("Inserted all robots")

        before = time.time()
        yield From(sleep_sim_time(world, sim_time_sec))
        after = time.time()

        diff = after - before
        print(sim_time_sec / diff)

        futs = []
        for robot in bots:
            fut = yield From(world.delete_robot(robot))
            futs.append(fut)

        yield From(multi_future(futs))
        yield From(trollius.sleep(0.1))
        print("Deleted all robots")
Esempio n. 24
0
    def append_log(self, log_message, extra_data=None):
        if log_message is None:
            return

        yield From(self._append_log_message(log_message, log_data=extra_data))
Esempio n. 25
0
    def schedule(self, build_job):
        build_uuid = build_job.job_details["build_uuid"]
        logger.debug("Calling schedule with job: %s", build_uuid)

        # Check if there are worker slots available by checking the number of jobs in the orchestrator
        allowed_worker_count = self._manager_config.get("ALLOWED_WORKER_COUNT", 1)
        try:
            active_jobs = yield From(self._orchestrator.get_prefixed_keys(self._job_prefix))
            workers_alive = len(active_jobs)
        except KeyError:
            workers_alive = 0
        except OrchestratorConnectionError:
            logger.exception(
                "Could not read job count from orchestrator for job due to orchestrator being down"
            )
            raise Return(False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
        except OrchestratorError:
            logger.exception(
                "Exception when reading job count from orchestrator for job: %s", build_uuid
            )
            raise Return(False, RETRY_IMMEDIATELY_SLEEP_DURATION)

        logger.debug("Total jobs (scheduling job %s): %s", build_uuid, workers_alive)

        if workers_alive >= allowed_worker_count:
            logger.info(
                "Too many workers alive, unable to start new worker for build job: %s. %s >= %s",
                build_uuid,
                workers_alive,
                allowed_worker_count,
            )
            raise Return(False, TOO_MANY_WORKERS_SLEEP_DURATION)

        job_key = self._job_key(build_job)

        # First try to take a lock for this job, meaning we will be responsible for its lifeline
        realm = str(uuid.uuid4())
        token = str(uuid.uuid4())
        nonce = str(uuid.uuid4())

        machine_max_expiration = self._manager_config.get("MACHINE_MAX_TIME", 7200)
        max_expiration = datetime.utcnow() + timedelta(seconds=machine_max_expiration)

        payload = {
            "max_expiration": calendar.timegm(max_expiration.timetuple()),
            "nonce": nonce,
            "had_heartbeat": False,
            "job_queue_item": build_job.job_item,
        }

        lock_payload = json.dumps(payload)
        logger.debug(
            "Writing key for job %s with expiration in %s seconds",
            build_uuid,
            EPHEMERAL_SETUP_TIMEOUT,
        )

        try:
            yield From(
                self._orchestrator.set_key(
                    job_key, lock_payload, overwrite=False, expiration=EPHEMERAL_SETUP_TIMEOUT
                )
            )
        except KeyError:
            logger.warning(
                "Job: %s already exists in orchestrator, timeout may be misconfigured", build_uuid
            )
            raise Return(False, EPHEMERAL_API_TIMEOUT)
        except OrchestratorConnectionError:
            logger.exception(
                "Exception when writing job %s to orchestrator; could not connect", build_uuid
            )
            raise Return(False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
        except OrchestratorError:
            logger.exception("Exception when writing job %s to orchestrator", build_uuid)
            raise Return(False, RETRY_IMMEDIATELY_SLEEP_DURATION)

        # Got a lock, now lets boot the job via one of the registered executors.
        started_with_executor = None
        execution_id = None

        logger.debug("Registered executors are: %s", [ex.name for ex in self._ordered_executors])
        for executor in self._ordered_executors:
            # Check if we can use this executor based on its whitelist, by namespace.
            namespace = build_job.namespace
            if not executor.allowed_for_namespace(namespace):
                logger.debug(
                    "Job %s (namespace: %s) cannot use executor %s",
                    build_uuid,
                    namespace,
                    executor.name,
                )
                continue

            # Check if we can use this executor based on the retries remaining.
            if executor.minimum_retry_threshold > build_job.retries_remaining:
                metric_queue.builder_fallback.Inc()
                logger.debug(
                    "Job %s cannot use executor %s as it is below retry threshold %s (retry #%s)",
                    build_uuid,
                    executor.name,
                    executor.minimum_retry_threshold,
                    build_job.retries_remaining,
                )
                continue

            logger.debug(
                "Starting builder for job %s with selected executor: %s", build_uuid, executor.name
            )

            try:
                execution_id = yield From(executor.start_builder(realm, token, build_uuid))
            except:
                try:
                    metric_queue.build_start_failure.Inc(labelvalues=[executor.name])
                    metric_queue.put_deprecated(
                        ("ExecutorFailure-%s" % executor.name), 1, unit="Count"
                    )
                except:
                    logger.exception(
                        "Exception when writing failure metric for execution %s for job %s",
                        execution_id,
                        build_uuid,
                    )

                logger.exception("Exception when starting builder for job: %s", build_uuid)
                continue

            try:
                metric_queue.build_start_success.Inc(labelvalues=[executor.name])
            except:
                logger.exception(
                    "Exception when writing success metric for execution %s for job %s",
                    execution_id,
                    build_uuid,
                )

            try:
                metric_queue.ephemeral_build_workers.Inc()
            except:
                logger.exception(
                    "Exception when writing start metrics for execution %s for job %s",
                    execution_id,
                    build_uuid,
                )

            started_with_executor = executor

            # Break out of the loop now that we've started a builder successfully.
            break

        # If we didn't start the job, cleanup and return it to the queue.
        if started_with_executor is None:
            logger.error("Could not start ephemeral worker for build %s", build_uuid)

            # Delete the associated build job record.
            yield From(self._orchestrator.delete_key(job_key))
            raise Return(False, EPHEMERAL_API_TIMEOUT)

        # Job was started!
        logger.debug(
            "Started execution with ID %s for job: %s with executor: %s",
            execution_id,
            build_uuid,
            started_with_executor.name,
        )

        # Store metric data
        metric_spec = json.dumps(
            {"executor_name": started_with_executor.name, "start_time": time.time(),}
        )

        try:
            yield From(
                self._orchestrator.set_key(
                    self._metric_key(realm),
                    metric_spec,
                    overwrite=False,
                    expiration=machine_max_expiration + 10,
                )
            )
        except KeyError:
            logger.error(
                "Realm %s already exists in orchestrator for job %s "
                + "UUID collision or something is very very wrong.",
                realm,
                build_uuid,
            )
        except OrchestratorError:
            logger.exception(
                "Exception when writing realm %s to orchestrator for job %s", realm, build_uuid
            )

        # Store the realm spec which will allow any manager to accept this builder when it connects
        realm_spec = json.dumps(
            {
                "realm": realm,
                "token": token,
                "execution_id": execution_id,
                "executor_name": started_with_executor.name,
                "job_queue_item": build_job.job_item,
            }
        )

        try:
            setup_time = started_with_executor.setup_time or self.overall_setup_time()
            logger.debug(
                "Writing job key for job %s using executor %s with ID %s and ttl %s",
                build_uuid,
                started_with_executor.name,
                execution_id,
                setup_time,
            )
            yield From(
                self._orchestrator.set_key(
                    self._realm_key(realm), realm_spec, expiration=setup_time
                )
            )
        except OrchestratorConnectionError:
            logger.exception(
                "Exception when writing realm %s to orchestrator for job %s", realm, build_uuid
            )
            raise Return(False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
        except OrchestratorError:
            logger.exception(
                "Exception when writing realm %s to orchestrator for job %s", realm, build_uuid
            )
            raise Return(False, setup_time)

        logger.debug(
            "Builder spawn complete for job %s using executor %s with ID %s ",
            build_uuid,
            started_with_executor.name,
            execution_id,
        )
        raise Return(True, None)
Esempio n. 26
0
def test1(connection):
    """ Del/get/set of keys """
    yield From(connection.delete([u'key']))
    yield From(connection.set(u'key', u'value'))
    result = yield From(connection.get(u'key'))
    assert result == u'value'
Esempio n. 27
0
 def _close_async(self):
     for process in self.running_processes:
         yield From(self._cleanup_subprocess(process))
     self.running_processes.clear()
Esempio n. 28
0
 def query_ipv6():
     answers = yield From(
         event_loop.run_in_executor(None, self._query, host, 'AAAA'))
     results.extend((socket.AF_INET6, (answer.address, port))
                    for answer in answers)
Esempio n. 29
0
 def putter():
     for i in range(3):
         yield From(q.put(i))
         have_been_put.append(i)
     raise Return(True)
def get_stats(agent):
    yield From(agent.run_event.wait())
    """
    After plugin installation, copy the configuration file
    from sentinella-plugin-template/conf/ to /etc/sentinella/conf.d/
    """
    config = agent.config['test']
    """
    The plugin Key is a unique UUID provided by Sentinel.la.
    If not valid, the plugin metrics will never be registered.
    """

    plugin_key = config['plugin_key']

    logger.info(
        'starting "get_stats" task for plugin_key "%s"  and host "%s"'.format(
            plugin_key, hostname))

    while agent.run_event.is_set():
        yield From(asyncio.sleep(frequency))
        try:
            data = {'server_name': hostname, 'plugins': {}}
            logger.debug('connecting to data source')

            # [START] To be completed with plugin code
            # Here goes your logic
            """
            Add dict into list plugins:
            value : Metric value
            type : Metric type (integer,percent,binary)

            Replace metric_1, metric_2, metric_3 to your metrics or add more :)
            """
            data['plugins'].update({"{}".format(plugin_key): {}})

            data['plugins'][plugin_key].update({
                "metric_1": {
                    "row_name": "Row name 1",
                    "metric_name": "metric1",
                    "value": 100,
                    "type": "integer"
                }
            })

            data['plugins'][plugin_key].update({
                "metric_2": {
                    "row_name": "Row name 2",
                    "metric_name": "metric2",
                    "value": 3.5,
                    "type": "percent"
                }
            })

            data['plugins'][plugin_key].update({
                "metric_3": {
                    "row_name": "Row name 3",
                    "metric_name": "metric3",
                    "value": 1,
                    "type": "binary"
                }
            })

            logger.debug('{}: myplugin={}%'.format(hostname, data))

            yield From(agent.async_push(data))

        except:

            logger.exception('cannot get data source information')

    logger.info('get_stats terminated')