def get_imu_data_single(self, expected_params=10):
        """
        Receives IMU-data from the Arduino and extracts it
        :param expected_params: amount of data-points
        :return: accelerometer_data, gyroscope_data, magnetometer_data, timestamp
        """
        self.last_time = time.time()
        while self.serial_port.in_waiting < 1 and not rospy.is_shutdown():
            if time.time() - self.last_time > self.timeout:
                raise TimeoutError("Arduino timed out")
        items = []
        while len(items) is not expected_params and not rospy.is_shutdown():
            if time.time() - self.last_time > self.timeout:
                raise TimeoutError("Arduino timed out")

            data = list(bytearray(self.serial_port.readline()))
            string = ''.join(chr(i) for i in data)
            items = string.strip().split(',')
        raw = [float(s) for s in items]

        acc_data = raw[0:3]
        gyro_data = raw[3:6]
        mag_data = raw[6:9]
        t = raw[9]

        acc_data = [acc_data[1], -acc_data[0], acc_data[2]]
        gyro_data = [gyro_data[1], -gyro_data[0], gyro_data[2]]
        mag_data = [mag_data[1], -mag_data[0], mag_data[2]]
        return acc_data, gyro_data, mag_data, t
Exemplo n.º 2
0
    def __init__(self, mul_var_distribution, return_period=25, state_duration=3, timeout=1e6,
                 *args, **kwargs):
        """

        Parameters
        ----------
        mul_var_distribution : MultivariateDistribution
            The distribution to be used to calculate the contour.
        Raises
        ------
        TimeoutError,
            If the calculation takes too long and the given value for timeout is exceeded.
        """
        self.distribution = mul_var_distribution
        self.coordinates = None

        self.state_duration = state_duration
        self.return_period = return_period
        self.alpha = state_duration / (return_period * 365.25 * 24)

        # Use multiprocessing to define a timeout
        pool = Pool(processes=1)
        res = pool.apply_async(self._setup, args, kwargs)
        try:
            computed = res.get(timeout=timeout)
        except TimeoutError:
            err_msg = "The calculation takes too long. " \
                      "It takes longer than the given value for" \
                      " a timeout, which is '{} seconds'.".format(timeout)
            raise TimeoutError(err_msg)
        # Save the results separated
        self._save(computed)
 def test_list_ex_timeout(self, mock_pool):
     """test TimeoutError exception with list operation"""
     config = {'apply_async.return_value.get.side_effect': TimeoutError()}
     mock_pool.return_value.configure_mock(**config)
     msg = 'fakeregion: Timeout while retrieving image list.'
     with self.assertRaisesRegexp(GlanceFacadeException, msg):
         self.facade.get_imagelist(self.region_obj)
Exemplo n.º 4
0
    def wait_until_exists(self, namespace, app_name, delay=10, timeout=600):
        """Wait until exist.

        Waits until the deployment called 'app_name' exists in
        Kubernetes regardless of state.

        :param int delay: the time in seconds between each pull
        :param int timeout: timeout in seconds until time out exception will raised
        :param str namespace:
        :param str app_name:
        """
        query_selector = self._prepare_deployment_default_label_selector(app_name)

        start_time = time.time()

        while True:
            result = self._clients.apps_api.list_namespaced_deployment(
                namespace=namespace, label_selector=query_selector
            ).items
            if not result:
                return
            if time.time() - start_time >= timeout:
                raise TimeoutError(
                    "Timeout: Waiting for deployment {} to be deleted".format(app_name)
                )
            time.sleep(delay)
Exemplo n.º 5
0
 def move_other_frame(self, target_pose, frame='camera_link', retry=True):
     if self.enabled:
         target_pose = self.cam_pose_to_base_pose(target_pose, frame)
         target_pose = transform_pose('map', target_pose)
         target_pose.pose.position.z = 0
         self.goal_pub.publish(target_pose)
         goal = MoveBaseGoal()
         goal.target_pose = target_pose
         if self.knowrob is not None:
             self.knowrob.start_base_movement(self.knowrob.pose_to_prolog(target_pose))
         while True:
             self.client.send_goal(goal)
             wait_result = self.client.wait_for_result(rospy.Duration(self.timeout))
             result = self.client.get_result()
             state = self.client.get_state()
             if wait_result and state == GoalStatus.SUCCEEDED:
                 break
             if retry:
                 cmd = raw_input('base movement did not finish in time, retry? [y/n]')
                 retry = cmd == 'y'
             if not retry:
                 print('movement did not finish in time')
                 if self.knowrob is not None:
                     self.knowrob.finish_action()
                 raise TimeoutError()
         if self.knowrob is not None:
             self.knowrob.finish_action()
         return result
Exemplo n.º 6
0
    def multi_wait(self, instances, state, cancellation_context=None):
        """# noqa
        Will sync wait for the change of state of the instance
        :param instances:
        :param str state:
        :param CancellationContext cancellation_context:
        :return:
        """
        if not instances:
            raise ValueError("Instance cannot be null")
        if state not in self.INSTANCE_STATES:
            raise ValueError("Unsupported instance state")

        instance_ids = set(filter(lambda x: str(x.id), instances))

        start_time = time.time()
        last_item = 0
        while len(instances) - last_item:
            instance = instances[last_item]
            if instance.state["Name"] != state:
                self._reload_instance(instance)
                if time.time() - start_time >= self.timeout:
                    raise TimeoutError(
                        f"Timeout: Waiting for instance to be {state} from"
                    )
                time.sleep(self.delay)
            else:
                last_item += 1

            self.cancellation_service.check_if_cancelled(
                cancellation_context, {"instance_ids": instance_ids}
            )

        return instances
Exemplo n.º 7
0
    def multi_wait(self, instances, state, cancellation_context=None):
        """
        Will sync wait for the change of state of the instance
        :param instances:
        :param str state:
        :param CancellationContext cancellation_context:
        :return:
        """
        if not instances:
            raise ValueError('Instance cannot be null')
        if state not in self.INSTANCE_STATES:
            raise ValueError('Unsupported instance state')

        instance_ids = filter(lambda x: str(x.id), instances)

        start_time = time.time()
        last_item = 0
        while len(instances) - last_item:
            instance = instances[last_item]
            if instance.state['Name'] != state:
                self._reload_instance(instance)
                if time.time() - start_time >= self.timeout:
                    instance = instance or instances[0]
                    raise TimeoutError(
                        'Timeout: Waiting for instance to be {0} from'.format(
                            state, instance.state))
                time.sleep(self.delay)
            else:
                last_item += 1

            self.cancellation_service.check_if_cancelled(
                cancellation_context, {'instance_ids': instance_ids})

        return instances
Exemplo n.º 8
0
    def run(self, command):
        """Method to run an arbitrary command and pipe the log output to a file.
        Uses subprocess.check_call to properly execute and catch if any errors
        occur.

        :param command: Arbitary command to run
        """
        with open(self._logf, 'a+b') as logf:
            logf.write("\n%s : %s\n" %
                       (datetime.datetime.now(), " ".join(command)))
            logf.flush()
        proc = Process(target=fork, args=(self._logf, command))
        proc.start()
        proc.join(self.timeout)
        if proc.is_alive():
            with open(self._logf, 'a+b') as logf:
                logf.write("\n%s : Command still running after %s seconds" %
                           (datetime.datetime.now(), self.timeout))
                logf.flush()

            kill_comm = copy(self.killbase)
            kill_comm.append(" ".join(command))
            self.run(kill_comm)
            proc.terminate()
            proc.join(self.timeout)

            raise TimeoutError("Command was still running after %s "\
                               "seconds" % self.timeout)
        elif not proc.exitcode == 0:
            raise sub.CalledProcessError(
                int(proc.exitcode),
                "Command returned non 0 exit code %s" % proc.exitcode)
Exemplo n.º 9
0
 def wrap_future_result(future, timeout=None):
     """Wrapper for Future.result to implement the same behaviour as
     AsyncResults.get from multiprocessing."""
     try:
         return future.result(timeout=timeout)
     except LokyTimeoutError:
         raise TimeoutError()
Exemplo n.º 10
0
 def _check(self):
     with AmqpConnection() as channel:
         try:
             return channel.basic_get(queue=self.result_queue, no_ack=True)
         except amqp.exceptions.NotFound:
             raise KeyError(self.result_queue)
     raise TimeoutError(0.0)
Exemplo n.º 11
0
    def join(self, timeout=None):
        """Wait until the thread terminates.

                This blocks the calling thread until the thread whose join() method is
                called terminates -- either normally or through an unhandled exception
                or until the optional timeout occurs.

                When the timeout argument is present and not None, it should be a
                floating point number specifying a timeout for the operation in seconds
                (or fractions thereof). As join() always returns None, you must call
                isAlive() after join() to decide whether a timeout happened -- if the
                thread is still alive, the join() call timed out.

                When the timeout argument is not present or None, the operation will
                block until the thread terminates.
                """
        if timeout is not None:
            logger.debug(
                f'Waiting for {self.name} thread {self.target} target for {timeout} seconds...'
            )
        else:
            logger.warning(
                f'WARNING: joining {self.name} thread without timeout')

        threading.Thread.join(self, timeout)
        if self.is_alive() is True:
            raise TimeoutError(
                f'Timeout of {timeout}s for thread {self.name} join occurred. '
                f'Target: {self.target}, args: {self.args}, kwargs: {self.kwargs}'
            )
        self.join_with_exception()
        return self.result
Exemplo n.º 12
0
 def score(game, player):
     if timer.time_left() < 0:
         raise TimeoutError("Timer expired during search. You must " +
                            "return an answer before the timer reaches 0.")
     if limit == game.counts[0]:
         timer.time_limit = 0
     return 0
Exemplo n.º 13
0
 def score(self, game, player):
     # print self.limit
     if self.limit == game.counts[0]:
         self.dv.val = 0
     elif self.timer() < 0:
         raise TimeoutError("Timer expired during search. You must " + \
                         "return an answer before the timer reaches 0.")
     return 0
Exemplo n.º 14
0
def signalHandler(signum, frame):
    """
  def signalHandler
  Signal handler for SIGARLM/SIGINT during timeout
  """

    if signum == signal.SIGALRM:
        raise TimeoutError("SIGALRM received - process has timed out")
    elif signum == signal.SIGINT:
        logging.critical("SIGINT received - shutting down worker")
Exemplo n.º 15
0
    def get(self, timeout=0):  # @UnusedVariable
        if self.result is None:
            try:
                self.result = self.result_queue.get(block=True,
                                                    timeout=timeout)
            except Empty as e:
                raise TimeoutError(e)

        check_isinstance(self.result, dict)
        result_dict_raise_if_error(self.result)
        return self.result
Exemplo n.º 16
0
def query_with_timeout(dbc, timeout, query, *a, **k):
    conn1, conn2 = multiprocessing.Pipe(False)
    subproc = multiprocessing.Process(target=do_query,
                                      args=(dbc, query, conn2) + a,
                                      kwargs=k)
    subproc.start()
    subproc.join(20)
    if conn1.poll():
        return conn1.recv()
    subproc.terminate()
    raise TimeoutError("Query %r ran for >%r" % (query, str(timeout)))
Exemplo n.º 17
0
    def get(self, timeout=0):
        if not self.direct_view or not self.uuid:
            raise TimeoutError()

        if self.results:
            return self.results

        self.results = self.direct_view.apply(get_result, self.uuid,
                                              timeout).get()
        self.finished = True
        return self.results
Exemplo n.º 18
0
 def quick_get(self, timeout=None):
     begin = time()
     while True:
         if timeout is not None and time() - begin > timeout:
             raise TimeoutError("Timeout")
         for queue in self.queues:
             try:
                 obj = queue.get(timeout=1e-3)
                 return obj
             except TimeoutError:
                 continue
Exemplo n.º 19
0
    def recv_bytes(self, timeout=None):
        """
        Receive bytes from the connection pipe.

        Raises:
            TimeoutError if timeout.
        """
        if self.conn.poll(timeout=timeout):
            return self.conn.recv_bytes()
        else:
            raise TimeoutError("Timeout")
Exemplo n.º 20
0
def _send_multi(project_id, destinations, snap_src, snap_dest, timeout, force):
    if snap_dest == snap_src:
        log.debug("no update to %s needed", destinations)
        return

    dataset = dataset_name(project_id)
    t0 = time.time()

    tmp = '/tmp/.storage-%s-%s' % (project_id, uuid4())

    if force:
        force = "-F"
    else:
        force = ''
    try:
        cmd("sudo zfs send -RD %s %s %s  %s > %s" %
            ('-i' if snap_dest else '', snap_dest, snap_src, compress1, tmp))
        diff_size = os.path.getsize(tmp)
        diff_size_mb = diff_size / 1000000.0
        send_timeout = 60 + int(diff_size_mb * 2)
        log.info("%sM of data to send (send_timeout=%s seconds)", diff_size_mb,
                 send_timeout)
        work = []
        for dest in destinations:
            if ip_address(dest) == dest:
                log.info("send to self: nothing to do")
            else:
                work.append((tmp, dest, force, dataset))
        if len(work) > 0:
            pool = Pool(processes=len(work))
            x = pool.imap(mp_send_multi_helper, work)
            start = time.time()
            while True:
                try:
                    elapsed_time = time.time() - start
                    t = timeout - elapsed_time
                    if t > 0:
                        x.next(timeout=t)
                    else:
                        raise TimeoutError("ran out of time before next fetch")
                except TimeoutError as mesg:
                    log.info("timed out connecting to some destination -- %s",
                             mesg)
                    pool.terminate()
                    break
                except StopIteration:
                    break
    finally:
        try:
            os.unlink(tmp)
        except:
            pass
    log.info("done (time=%s seconds)", time.time() - t0)
Exemplo n.º 21
0
 def __init__(self, sld, is_robots_compliant, proxy_manager_client,
              fetcher_id):
     self.proxy_manager_client = proxy_manager_client
     self.is_robots_compliant = is_robots_compliant
     self.fetcher_id = fetcher_id
     self.domain = sld
     self.session = None
     self.proxy_id = None
     self.proxy_endpoint_id = None
     self.timeout = Timeout(
         RESPONSE_TIMEOUT,
         TimeoutError(
             "Proxy failed to complete request within %s seconds." %
             RESPONSE_TIMEOUT))
Exemplo n.º 22
0
    def wait_until_all_replicas_ready(
        self, namespace, app_name, deployed_app_name, delay=10, timeout=120
    ):
        """Wait until ready.

        :param str namespace:
        :param str app_name:
        :param str deployed_app_name:
        :param int delay:
        :param int timeout:
        :return:
        """
        start_time = time.time()
        while True:
            deployment = self.get_deployment_by_name(namespace, app_name)

            if not deployment:
                raise ValueError("Something went wrong. Deployment {} not found.")

            # check if all replicas are ready
            if deployment.spec.replicas == deployment.status.ready_replicas:
                # all replicas are ready - success
                return

            if time.time() - start_time >= timeout:
                try:
                    query_selector = self._prepare_deployment_default_label_selector(
                        app_name
                    )
                    pods = self._clients.core_api.list_namespaced_pod(
                        namespace=namespace, label_selector=query_selector
                    ).items
                    self._logger.error("Deployment dump:")
                    self._logger.error(str(deployment))
                    self._logger.error("Pods dump:")
                    self._logger.error(str(pods))
                except Exception:
                    self._logger.exception(
                        "Failed to get more data about pods and "
                        "deployment for deployed app {}".format(deployed_app_name)
                    )

                raise TimeoutError(
                    "Timeout waiting for {} replicas to be ready for deployed app {}. "
                    "Please look at the logs for more information".format(
                        deployment.status.replicas, deployed_app_name
                    )
                )

            time.sleep(delay)
Exemplo n.º 23
0
def multiprocessing_run(func: Callable,
                        func_args: list,
                        func_log_name: str,
                        timeout: Union[int, None] = None):
    """
    Wraps callable object to a separate process using multiprocessing module
    :param func: callable object
    :param func_args: list of arguments for callable
    :param func_log_name: name of callable used for logging
    :param timeout: positive int to limit execution time
    :return: return value (or values) from callable object
    """
    queue = Queue()
    logger_queue = Queue(-1)
    process = Process(target=_mp_wrapped_func,
                      args=(func, func_args, queue, logger_queue))
    process.start()
    try:
        error_message, *ret_args = queue.get(timeout=timeout)
    except QueueEmpty:
        raise TimeoutError(
            "{func} running timed out!".format(func=func_log_name))
    finally:
        queue.close()

        # Extract logs from Queue and pass to root logger
        while not logger_queue.empty():
            rec = logger_queue.get()
            log.getLogger().handle(rec)
        logger_queue.close()

        if process.is_alive():
            process.terminate()
            process.join()
        else:
            exit_signal = multiprocessing_exitcode_to_signal(process.exitcode)
            if exit_signal:
                raise ProcessError(
                    "{func} was killed with a signal {signal}".format(
                        func=func_log_name, signal=exit_signal))

    if error_message:
        raise ProcessError("\n{func} running failed: \n{msg}".format(
            func=func_log_name, msg=error_message))

    ret_args = ret_args[0] if len(
        ret_args
    ) == 1 else ret_args  # unwrap from list if only 1 item is returned
    return ret_args
Exemplo n.º 24
0
    def run(self, timeout=None, **kwargs):
        def target(**kwargs):
            self.process = subprocess.Popen(self.cmd, **kwargs)
            self.process.communicate()

        thread = threading.Thread(target=target, kwargs=kwargs)
        thread.start()

        thread.join(timeout)
        if thread.is_alive():
            self.process.terminate()
            thread.join()
            raise TimeoutError()

        return self.process.returncode
Exemplo n.º 25
0
		def testWrapper(self):
			queue = Queue()

			try:
				p = Thread(target=handler, args=(self, testcase, queue))
				p.daemon = True
				p.start()
				err, res = queue.get("""timeout=time_limit""")
				p.join()
				if err:
					raise err[0](err[1]).with_traceback(err[2])
				return res
			except QueueEmptyError:
				raise TimeoutError("Test aborted due to timeout. Test was " +
				                   "expected to finish in less than {} second(s).".format(time_limit))
Exemplo n.º 26
0
def render(html):
    """Perform render in a new process to prevent hangs."""

    tries = 3

    for _ in range(tries):
        pool = Pool(1)
        try:
            return pool.apply_async(_render, args=(html, )).get(timeout=120)
        except TimeoutError:
            continue
        finally:
            pool.terminate()

    raise TimeoutError('Timed out attempting to render HTML %d times' % tries)
Exemplo n.º 27
0
 def tsschecker(ecid, product_type, hw_model, ios_version, ios_build,
                ap_nonce, is_ota):
     cmd = 'tsschecker'
     cmd = cmd + ' -e ' + ecid
     cmd = cmd + ' -d ' + product_type
     cmd = cmd + ' -i ' + ios_version
     if len(hw_model) > 0:
         cmd = cmd + ' -B ' + hw_model
     if len(ios_build) > 0:
         cmd = cmd + ' --buildid ' + ios_build
     if len(ap_nonce) > 0:
         cmd = cmd + ' --apnonce ' + ap_nonce
     if is_ota:
         cmd = cmd + ' -o'
     cmd = cmd + ' -s'
     save_prefix = 'resources/'
     save_path = 'shsh2/' + ecid + '/'
     try:
         os.mkdir(save_prefix + save_path, 0755)
     except:
         pass
     if len(ios_build) > 0:
         save_path = save_path + ios_version + '-' + ios_build + '/'
     else:
         save_path = save_path + ios_version + '/'
     try:
         os.mkdir(save_prefix + save_path, 0755)
     except:
         pass
     cmd = cmd + ' --save-path ' + save_prefix + save_path
     print('[Execute] ' + cmd)
     timeout = 30
     res = subprocess.Popen(cmd,
                            shell=True,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.STDOUT)
     (sout, serr) = res.communicate()
     t_beginning = time.time()
     while True:
         if res.poll() is not None:
             break
         seconds_passed = time.time() - t_beginning
         if timeout and seconds_passed > timeout:
             res.terminate()
             raise TimeoutError(cmd, timeout)
         time.sleep(0.1)
     return res.returncode, sout, serr, res.pid, save_path
Exemplo n.º 28
0
 def move_absolute(self, target_pose):
     if self.enabled:
         self.goal_pub.publish(target_pose)
         goal = MoveBaseGoal()
         goal.target_pose = target_pose
         if self.knowrob is not None:
             self.knowrob.start_base_movement(self.knowrob.pose_to_prolog(target_pose))
         self.client.send_goal(goal)
         wait_result = self.client.wait_for_result(rospy.Duration(self.timeout))
         result = self.client.get_result()
         state = self.client.get_state()
         if self.knowrob is not None:
             self.knowrob.finish_action()
         if not wait_result or state != GoalStatus.SUCCEEDED:
             print('movement did not finish in time')
             # self.STOP()
             raise TimeoutError()
         return result
Exemplo n.º 29
0
    def join(self, timeout=None):
        if timeout is not None:
            logger.debug(
                'Waiting for %s thread %s target for %s seconds...' % (
                    self.name, self.target, timeout))
        else:
            logger.debug(
                'WARNING: joining %s thread without timeout' % self.name)

        threading.Thread.join(self, timeout)
        if self.is_alive() is True:
            raise TimeoutError(
                'Timeout of %ss for thread %s join occurred. '
                'Target: %s, args: %s, kwargs: %s' % (
                    timeout, self.name, self.target,
                    self.args, self.kwargs))
        self.join_with_exception()
        return self.result
Exemplo n.º 30
0
    def _wait(self, timeout):
        start_time = time.time()
        msgs = deque()

        def msg_callback(msg):
            msgs.append(msg)

        with AmqpConnection() as channel:
            channel.basic_qos(0, 1, False)
            try:
                tag = channel.basic_consume(queue=self.result_queue,
                                            callback=msg_callback)
            except amqp.exceptions.NotFound:
                raise KeyError(self.result_queue)
            while channel.callbacks:
                elapsed = time.time() - start_time
                cur_timeout = 10.0
                if timeout is not None:
                    remaining = timeout - elapsed
                    cur_timeout = max(min(remaining, 10.0), 0.0)
                try:
                    channel.connection.drain_events(timeout=cur_timeout)
                except socket_timeout:
                    channel.connection.send_heartbeat()
                except socket_error as exc:
                    if exc.errno != errno.EAGAIN:
                        raise
                while len(msgs):
                    msg = msgs.popleft()
                    try:
                        yield msg
                    except GeneratorExit:
                        channel.basic_cancel(tag)
                        raise
                    finally:
                        channel.basic_ack(msg.delivery_tag)
                if timeout is not None and remaining <= 0.0:
                    break
        raise TimeoutError(timeout)