Пример #1
0
 def test_wait_combine(self):
     r = Retrying(wait=tenacity.wait_combine(tenacity.wait_random(0, 3),
                                             tenacity.wait_fixed(5)))
     # Test it a few time since it's random
     for i in six.moves.range(1000):
         w = r.wait(1, 5)
         self.assertLess(w, 8)
         self.assertGreaterEqual(w, 5)
Пример #2
0
 def test_exponential(self):
     r = Retrying(wait=tenacity.wait_exponential(max=100))
     self.assertEqual(r.wait(1, 0), 2)
     self.assertEqual(r.wait(2, 0), 4)
     self.assertEqual(r.wait(3, 0), 8)
     self.assertEqual(r.wait(4, 0), 16)
     self.assertEqual(r.wait(5, 0), 32)
     self.assertEqual(r.wait(6, 0), 64)
Пример #3
0
 def test_wait_triple_sum(self):
     r = Retrying(wait=tenacity.wait_fixed(1) + tenacity.wait_random(0, 3) +
                  tenacity.wait_fixed(5))
     # Test it a few time since it's random
     for i in six.moves.range(1000):
         w = r.wait(1, 5)
         self.assertLess(w, 9)
         self.assertGreaterEqual(w, 6)
Пример #4
0
 def test_stop_backward_compat(self):
     r = Retrying(stop=lambda attempt, delay: attempt == delay)
     with reports_deprecation_warning():
         self.assertFalse(r.stop(make_retry_state(1, 3)))
     with reports_deprecation_warning():
         self.assertFalse(r.stop(make_retry_state(100, 99)))
     with reports_deprecation_warning():
         self.assertTrue(r.stop(make_retry_state(101, 101)))
Пример #5
0
    def test_stop_func_with_retry_state(self):
        def stop_func(retry_state):
            rs = retry_state
            return rs.attempt_number == rs.seconds_since_start

        r = Retrying(stop=stop_func)
        self.assertFalse(r.stop(make_retry_state(1, 3)))
        self.assertFalse(r.stop(make_retry_state(100, 99)))
        self.assertTrue(r.stop(make_retry_state(101, 101)))
Пример #6
0
async def test_upgrading_rules_provider_also_updates_rule_files(
        ops_test, prometheus_tester_charm):
    """Ensure scrape alert rules can be updated.

    This test upgrades the metrics provider charm and checks that
    updates to alert rules are propagated correctly.
    """
    await ops_test.model.deploy(prometheus_tester_charm,
                                resources=tester_resources,
                                application_name=TESTER_APP_NAME)
    await ops_test.model.wait_for_idle(apps=[TESTER_APP_NAME], status="active")
    await ops_test.model.block_until(
        lambda: len(ops_test.model.applications[TESTER_APP_NAME].units) > 0)
    assert ops_test.model.applications[TESTER_APP_NAME].units[
        0].workload_status == "active"

    await ops_test.model.add_relation(PROMETHEUS_APP_NAME, TESTER_APP_NAME)
    await ops_test.model.wait_for_idle(
        apps=[PROMETHEUS_APP_NAME, TESTER_APP_NAME], status="active")

    # Check only one alert rule exists
    tester_rules = []
    for attempt in Retrying(retry=retry_if_exception_type(AssertionError),
                            stop=stop_after_attempt(3)):
        try:
            with attempt:
                rules_with_relation = await get_prometheus_rules(
                    ops_test, PROMETHEUS_APP_NAME, 0)
                tester_rules = get_rules_for(TESTER_APP_NAME,
                                             rules_with_relation)
                assert len(tester_rules) == 1
        except RetryError:
            pass

    # Add new alert rule, rebuild and refresh prometheus tester charm
    write_tester_alert_rule_file(MISSING_TARGET_RULE, "target_missing.rule")
    tester_charm = await rebuild_prometheus_tester(ops_test)
    await ops_test.model.applications[TESTER_APP_NAME].refresh(
        path=tester_charm, resources=tester_resources)
    remove_tester_alert_rule_file("target_missing.rule")

    await ops_test.model.wait_for_idle(
        apps=[PROMETHEUS_APP_NAME, TESTER_APP_NAME], status="active")

    # Check there are now two alert rules
    tester_rules = []
    for attempt in Retrying(retry=retry_if_exception_type(AssertionError),
                            stop=stop_after_attempt(3)):
        try:
            with attempt:
                rules_with_relation = await get_prometheus_rules(
                    ops_test, PROMETHEUS_APP_NAME, 0)
                tester_rules = get_rules_for(TESTER_APP_NAME,
                                             rules_with_relation)
                assert len(tester_rules) == 2
        except RetryError:
            pass
Пример #7
0
 def test_wait_arbitrary_sum(self):
     r = Retrying(wait=sum([tenacity.wait_fixed(1),
                            tenacity.wait_random(0, 3),
                            tenacity.wait_fixed(5),
                            tenacity.wait_none()]))
     # Test it a few time since it's random
     for i in six.moves.range(1000):
         w = r.wait(1, 5)
         self.assertLess(w, 9)
         self.assertGreaterEqual(w, 6)
 def __init__(self, strand_api_client):
     self.strand_api_client = strand_api_client
     self.logger = get_logger('StrandApiClientWrapper')
     self.standard_retrier = Retrying(
         reraise=True,
         wait=wait_fixed(2),
         stop=stop_after_attempt(5),
         after=after_log(logger=self.logger,
                         log_level=self.logger.getEffectiveLevel()),
         retry=retry_if_exception_type(StrandApiClientException))
Пример #9
0
 def __init__(self, SlackClientClass):
     self.SlackClientClass = SlackClientClass
     self.logger = get_logger('SlackClientWrapper')
     self.standard_retrier = Retrying(
         reraise=True,
         wait=wait_fixed(2),
         stop=stop_after_attempt(5),
         after=after_log(logger=self.logger,
                         log_level=self.logger.getEffectiveLevel()),
         retry=(retry_if_exception_type(ConnectionError)))
Пример #10
0
    def test_random_sleep(self):
        r = Retrying(wait=tenacity.wait_random(min=1, max=20))
        times = set()
        for x in six.moves.range(1000):
            times.add(r.wait(1, 6546))

        # this is kind of non-deterministic...
        self.assertTrue(len(times) > 1)
        for t in times:
            self.assertTrue(t >= 1)
            self.assertTrue(t < 20)
    def test_alibi_detect_cifar10_rclone(self, namespace):
        spec = "../resources/adserver-cifar10-od-rclone.yaml"
        name = "cifar10-od-server-rclone"
        vs_prefix = name

        retry_run(f"kubectl apply -f {spec} -n {namespace}")

        wait_for_deployment(name, namespace)

        time.sleep(AFTER_WAIT_SLEEP)

        with open(self.truck_json) as f:
            data = json.load(f)

        for attempt in Retrying(
                wait=wait_fixed(TENACITY_WAIT),
                stop=stop_after_attempt(TENACITY_STOP_AFTER_ATTEMPT),
        ):
            with attempt:
                r = requests.post(
                    f"http://localhost:8004/{vs_prefix}/",
                    json=data,
                    headers=self.HEADERS,
                )
                j = r.json()

        assert j["data"]["is_outlier"][0] == 0
        assert j["meta"]["name"] == "OutlierVAE"
        assert j["meta"]["detector_type"] == "offline"
        assert j["meta"]["data_type"] == "image"

        with open(self.truck_json_outlier) as f:
            data = json.load(f)

        for attempt in Retrying(
                wait=wait_fixed(TENACITY_WAIT),
                stop=stop_after_attempt(TENACITY_STOP_AFTER_ATTEMPT),
        ):
            with attempt:
                r = requests.post(
                    f"http://localhost:8004/{vs_prefix}/",
                    json=data,
                    headers=self.HEADERS,
                )
                j = r.json()

        assert j["data"]["is_outlier"][0] == 1
        assert j["meta"]["name"] == "OutlierVAE"
        assert j["meta"]["detector_type"] == "offline"
        assert j["meta"]["data_type"] == "image"

        run(f"kubectl delete -f {spec} -n {namespace}", shell=True)
Пример #12
0
    def test_retry_function_object(self):
        """Test that six.wraps doesn't cause problems with callable objects.

        It raises an error upon trying to wrap it in Py2, because __name__
        attribute is missing. It's fixed in Py3 but was never backported.
        """
        class Hello(object):
            def __call__(self):
                return "Hello"
        retrying = Retrying(wait=tenacity.wait_fixed(0.01),
                            stop=tenacity.stop_after_attempt(3))
        h = retrying.wraps(Hello())
        self.assertEqual(h(), "Hello")
 def get_engagement(self, url):
     r = Retrying(
         retry=retry_if_exception_type((
             RateLimitError,
             OSError,
             requests.exceptions.ConnectionError,
             urllib3.exceptions.MaxRetryError,
             urllib3.exceptions.NewConnectionError,
         )),
         before_sleep=self._switch_token,
         wait=wait_fixed(5),
     )
     return r.call(self.api_call, url)
Пример #14
0
    def test_random_sleep_without_min(self):
        r = Retrying(wait=tenacity.wait_random(max=2))
        times = set()
        times.add(r.wait(1, 6546))
        times.add(r.wait(1, 6546))
        times.add(r.wait(1, 6546))
        times.add(r.wait(1, 6546))

        # this is kind of non-deterministic...
        self.assertTrue(len(times) > 1)
        for t in times:
            self.assertTrue(t >= 0)
            self.assertTrue(t <= 2)
Пример #15
0
    def test_wait_chain(self):
        r = Retrying(wait=tenacity.wait_chain(
            *[tenacity.wait_fixed(1) for i in six.moves.range(2)] +
            [tenacity.wait_fixed(4) for i in six.moves.range(2)] +
            [tenacity.wait_fixed(8) for i in six.moves.range(1)]))

        for i in six.moves.range(10):
            w = r.wait(i + 1, 1)
            if i < 2:
                self._assert_range(w, 1, 2)
            elif i < 4:
                self._assert_range(w, 4, 5)
            else:
                self._assert_range(w, 8, 9)
Пример #16
0
 def _wait_for_ready_cni(self, timeout=900):
     self.logging.info(
         "Waiting up to %.2f minutes for ready CNI on the Windows agents",
         timeout / 60.0)
     win_node_addresses = self.deployer.windows_private_addresses
     local_script_path = os.path.join(self.e2e_runner_dir,
                                      "scripts/confirm-ready-cni.ps1")
     remote_script_path = "/tmp/confirm-ready-cni.ps1"
     self._upload_to(local_script_path, remote_script_path,
                     win_node_addresses)
     for attempt in Retrying(stop=stop_after_delay(timeout),
                             wait=wait_exponential(max=30),
                             retry=retry_if_exception_type(AssertionError),
                             reraise=True):
         with attempt:
             cni_ready = True
             for node_address in win_node_addresses:
                 try:
                     stdout = subprocess.check_output(
                         ["ssh", node_address, remote_script_path],
                         timeout=30)
                 except Exception:
                     cni_ready = False
                     break
                 cni_ready = strtobool(stdout.decode().strip())
                 if not cni_ready:
                     break
             assert cni_ready
     self.logging.info("The CNI is ready on all the Windows agents")
Пример #17
0
    def test_alibi_explain_anchor_image_tensorflow_protocol(self, namespace):
        spec = "../resources/tf_cifar_anchor_image_explainer.yaml"
        name = "cifar10-default-explainer"
        vs_prefix = (f"seldon/{namespace}/cifar10-explainer/default/v1/models/"
                     f"cifar10-classifier:explain")
        retry_run(f"kubectl apply -f {spec} -n {namespace}")

        wait_for_deployment(name, namespace)

        # note: we add a batch dimension but it should be really one image
        test_data = np.random.randn(1, 32, 32, 3)
        inference_request = {"instances": test_data.tolist()}

        for attempt in Retrying(
                wait=wait_fixed(TENACITY_WAIT),
                stop=stop_after_attempt(TENACITY_STOP_AFTER_ATTEMPT),
        ):
            with attempt:
                r = requests.post(
                    f"http://localhost:8004/{vs_prefix}",
                    json=inference_request,
                )
                explanation = r.json()

        assert explanation["meta"]["name"] == "AnchorImage"
        assert "anchor" in explanation["data"]
        assert "precision" in explanation["data"]
        assert "coverage" in explanation["data"]

        run(f"kubectl delete -f {spec} -n {namespace}", shell=True)
Пример #18
0
def test_change_batch_qty_leading_to_reallocation():
    '''
    测试改变批次数量 导致重新分配
    '''
    orderid, sku = random_orderid(), random_sku()
    earlier_batch, later_batch = random_batchref('old'), random_batchref('new')
    api_client.post_to_add_batch(earlier_batch, sku, 10, str(today))
    api_client.post_to_add_batch(later_batch, sku, 10, str(tomorrow))
    api_client.post_to_allocte(orderid, sku, 10)
    r = api_client.get_allocation(orderid)

    assert r.json()[0]['batchref'] == earlier_batch

    subscription = redis_client.subscribe_to('line_allocated')

    # 发布一个命令 终端的事件消费者接收到 调用hander 这个逻辑里 成功完成分配订单 发布了一个line_allocated
    redis_client.publish_message('change_batch_quantity', {
        'batchref': earlier_batch,
        'qty': 5
    })

    messages = []
    for attempt in Retrying(stop=stop_after_delay(3), reraise=True):
        with attempt:
            message = subscription.get_message(timeout=1)
            if message:
                messages.append(message)
                print(message)
            data = json.loads(messages[-1]['data'])
            assert data['orderid'] == orderid
            assert data['batchref'] == later_batch
    def ingest_from_stream(self, stream_descriptor: Union[StreamDescriptor, IO[AnyStr]], ingestion_properties: IngestionProperties) -> IngestionResult:
        stream_descriptor = BaseIngestClient._prepare_stream(stream_descriptor, ingestion_properties)
        stream = stream_descriptor.stream

        buffered_stream = read_until_size_or_end(stream, self.MAX_STREAMING_SIZE_IN_BYTES + 1)

        if len(buffered_stream.getbuffer()) > self.MAX_STREAMING_SIZE_IN_BYTES:
            stream_descriptor.stream = chain_streams([buffered_stream, stream])
            return self.queued_client.ingest_from_stream(stream_descriptor, ingestion_properties)

        stream_descriptor.stream = buffered_stream

        try:
            for attempt in Retrying(
                stop=stop_after_attempt(self._num_of_attempts), wait=wait_random_exponential(max=self._max_seconds_per_retry), reraise=True
            ):
                with attempt:
                    stream.seek(0, SEEK_SET)
                    client_request_id = ManagedStreamingIngestClient._get_request_id(stream_descriptor.source_id, attempt.retry_state.attempt_number - 1)
                    return self.streaming_client._ingest_from_stream_with_client_request_id(stream_descriptor, ingestion_properties, client_request_id)
        except KustoApiError as ex:
            error = ex.get_api_error()
            if error.permanent:
                raise

        return self.queued_client.ingest_from_stream(stream_descriptor, ingestion_properties)
Пример #20
0
def create_flavors(nova_client=None):
    """Create basic flavors.

    :param nova_client: Authenticated nova client
    :type nova_client: novaclient.v2.client.Client
    """
    if not nova_client:
        keystone_session = openstack_utils.get_overcloud_keystone_session()
        nova_client = openstack_utils.get_nova_session_client(keystone_session)
    cli_utils.setup_logging()

    for attempt in Retrying(stop=stop_after_attempt(3),
                            wait=wait_exponential(multiplier=1, min=2,
                                                  max=10)):
        with attempt:
            existing_flavors = nova_client.flavors.list()

    names = [flavor.name for flavor in existing_flavors]
    for flavor in nova_utils.FLAVORS.keys():
        if flavor not in names:
            nova_flavor = nova_client.flavors.create(
                name=flavor,
                ram=nova_utils.FLAVORS[flavor]['ram'],
                vcpus=nova_utils.FLAVORS[flavor]['vcpus'],
                disk=nova_utils.FLAVORS[flavor]['disk'],
                flavorid=nova_utils.FLAVORS[flavor]['flavorid'])
            if 'extra-specs' in nova_utils.FLAVORS[flavor]:
                nova_flavor.set_keys(nova_utils.FLAVORS[flavor]['extra-specs'])
Пример #21
0
    def test_retry_child_class_with_override_backward_compat(self):
        class MyStop(tenacity.stop_after_attempt):
            def __init__(self):
                super(MyStop, self).__init__(1)

            def __call__(self, attempt_number, seconds_since_start):
                return super(MyStop, self).__call__(attempt_number,
                                                    seconds_since_start)

        retrying = Retrying(wait=tenacity.wait_fixed(0.01), stop=MyStop())

        def failing():
            raise NotImplementedError()

        with pytest.raises(RetryError):
            retrying.call(failing)
Пример #22
0
def test_change_batch_quantity_leading_to_reallocation():
    # start with two batches and an order allocated to one of them
    orderid, sku = random_orderid(), random_sku()
    earlier_batch, later_batch = random_batchref("old"), random_batchref(
        "newer")
    api_client.post_to_add_batch(earlier_batch, sku, qty=10, eta="2011-01-02")
    api_client.post_to_add_batch(later_batch, sku, qty=10, eta="2011-01-03")
    r = api_client.post_to_allocate(orderid, sku, 10)
    assert r.ok
    response = api_client.get_allocation(orderid)
    assert response.json()[0]["batchref"] == earlier_batch

    subscription = redis_client.subscribe_to("line_allocated")

    # change quantity on allocated batch so it's less than our order
    redis_client.publish_message("change_batch_quantity", {
        "batchref": earlier_batch,
        "qty": 5
    })

    # wait until we see a message saying the order has been reallocated
    messages = []
    for attempt in Retrying(stop=stop_after_delay(3), reraise=True):
        with attempt:
            message = subscription.get_message(timeout=1)
            if message:
                messages.append(message)
                print(messages)
            data = json.loads(messages[-1]["data"])
            assert data["orderid"] == orderid
            assert data["batchref"] == later_batch
Пример #23
0
    def test_alibi_explain_anchor_tabular(self, namespace):
        spec = "../resources/iris_anchor_tabular_explainer.yaml"
        name = "iris-default-explainer"
        vs_prefix = f"seldon/{namespace}/iris-explainer/default/api/v1.0/explain"

        inference_request = {
            "data": {
                "names": ["text"],
                "ndarray": [[5.964, 4.006, 2.081, 1.031]],
            }
        }

        retry_run(f"kubectl apply -f {spec} -n {namespace}")

        wait_for_deployment(name, namespace)

        time.sleep(AFTER_WAIT_SLEEP)

        for attempt in Retrying(
                wait=wait_fixed(TENACITY_WAIT),
                stop=stop_after_attempt(TENACITY_STOP_AFTER_ATTEMPT),
        ):
            with attempt:
                r = requests.post(
                    f"http://localhost:8004/{vs_prefix}",
                    json=inference_request,
                )
                explanation = r.json()

        assert explanation["meta"]["name"] == "AnchorTabular"
        assert "anchor" in explanation["data"]
        assert "precision" in explanation["data"]
        assert "coverage" in explanation["data"]

        run(f"kubectl delete -f {spec} -n {namespace}", shell=True)
Пример #24
0
def test_change_batch_quantity_leading_to_reallocation():
    # start with two batches and an order allocated to one of them
    orderid, sku = random_orderid(), random_sku()
    earlier_batch, later_batch = random_batchref('old'), random_batchref(
        'newer')
    api_client.post_to_add_batch(earlier_batch, sku, qty=10, eta='2011-01-01')
    api_client.post_to_add_batch(later_batch, sku, qty=10, eta='2011-01-02')

    response = api_client.post_to_allocate(orderid, sku, 10)
    assert response.ok
    response = api_client.get_allocation(orderid)
    assert response.json()[0]['batchref'] == earlier_batch

    subscription = redis_client.subscribe_to('line_allocated')

    # change quantity on allocated batch so it's less than our order
    redis_client.publish_message('change_batch_quantity', {
        'batchref': earlier_batch,
        'qty': 5
    })

    # wait until we see a message saying the order has been reallocated
    messages = []
    for attempt in Retrying(stop=stop_after_delay(3), reraise=True):
        with attempt:
            message = subscription.get_message(timeout=1)
            if message:
                messages.append(message)
                logger.info("%s", messages)
            data = json.loads(messages[-1]['data'])
            assert data['orderid'] == orderid
            assert data['batchref'] == later_batch
Пример #25
0
    def tearDown(cls):
        """Remove test resources."""
        logging.info('Running teardown')
        for attempt in Retrying(
                stop=stop_after_attempt(8),
                wait=wait_exponential(multiplier=1, min=2, max=60)):
            with attempt:
                volumes = list(cls.cinder_client.volumes.list())
                snapped_volumes = [v for v in volumes
                                   if v.name.endswith("-from-snap")]
                if snapped_volumes:
                    logging.info("Removing volumes from snapshot")
                    cls._remove_volumes(snapped_volumes)
                    volumes = list(cls.cinder_client.volumes.list())

                snapshots = list(cls.cinder_client.volume_snapshots.list())
                if snapshots:
                    logging.info("tearDown - snapshots: {}".format(
                        ", ".join(s.name for s in snapshots)))
                    cls._remove_snapshots(snapshots)

                if volumes:
                    logging.info("tearDown - volumes: {}".format(
                        ", ".join(v.name for v in volumes)))
                    cls._remove_volumes(volumes)
Пример #26
0
def background_event_listening(pubsub, event_name) -> None:
    for attempt in Retrying(stop=stop_after_delay(5), reraise=True):
        with attempt:
            msg = pubsub.get_message()
            if msg:
                data = parse_encoded_message(msg)["data"]
                assert data["name"] == event_name
Пример #27
0
def minio_service(minio_config: Dict[str, str]) -> Iterator[Minio]:

    client = Minio(**minio_config["client"])

    for attempt in Retrying(
        wait=tenacity.wait_fixed(5),
        stop=tenacity.stop_after_attempt(60),
        before_sleep=tenacity.before_sleep_log(log, logging.WARNING),
        reraise=True,
    ):
        with attempt:
            # TODO: improve as https://docs.min.io/docs/minio-monitoring-guide.html
            if not client.bucket_exists("pytest"):
                client.make_bucket("pytest")
            client.remove_bucket("pytest")

    bucket_name = minio_config["bucket_name"]

    # cleans up in case a failing tests left this bucket
    _ensure_remove_bucket(client, bucket_name)

    client.make_bucket(bucket_name)
    assert client.bucket_exists(bucket_name)

    yield client

    # cleanup upon tear-down
    _ensure_remove_bucket(client, bucket_name)
Пример #28
0
def driver(request, driver_class, driver_kwargs):
    """Returns a WebDriver instance based on options and capabilities"""

    retries = int(request.config.getini("max_driver_init_attempts"))
    for retry in Retrying(stop=stop_after_attempt(retries),
                          wait=wait_exponential(),
                          reraise=True):
        with retry:
            LOGGER.info(
                f"Driver init, attempt {retry.retry_state.attempt_number}/{retries}"
            )
            driver = driver_class(**driver_kwargs)

    event_listener = request.config.getoption("event_listener")
    if event_listener is not None:
        # Import the specified event listener and wrap the driver instance
        mod_name, class_name = event_listener.rsplit(".", 1)
        mod = __import__(mod_name, fromlist=[class_name])
        event_listener = getattr(mod, class_name)
        if not isinstance(driver, EventFiringWebDriver):
            driver = EventFiringWebDriver(driver, event_listener())

    request.node._driver = driver
    yield driver
    driver.quit()
Пример #29
0
    def sign_up(
        self,
        email: str,
        password: str,
        phone_number: str = None,
        name: str = None,
        photo_url: str = None,
        user_id: str = None,
        project_id: str = None,
    ):
        if phone_number and not phone_number.startswith("+"):
            phone_number = f"+{phone_number}"

        data = {
            "email": email,
            "password": password,
            "displayName": name,
            "photoUrl": photo_url,
            "disabled": False,
            "localId": user_id,
            "phoneNumber": phone_number,
            "targetProjectId": project_id or self.project_id,
        }
        self._execute(method=self.client.accounts().signUp, body=data)

        for attempt in Retrying(stop=stop_after_attempt(3), wait=wait_fixed(1)):
            with attempt:
                return self.find(email=email, project_id=project_id)
Пример #30
0
        def exists():
            count = max_attempts
            if not is_retry:
                count = 1

            def air_retry_exists():
                return loop_find(self.airtest_ui,
                                 timeout=UI_TIME_OUT,
                                 **kwargs)

            def poco_retry_exists():
                if self.poco_ui is None:
                    self.get_ui()
                    return False
                return self.poco_ui.attr('visible')

            r = Retrying(retry=retry_if_exception_type(TargetNotFoundError),
                         wait=wait_fixed(sleeps),
                         stop=stop_after_attempt(count),
                         reraise=True)
            try:
                res = r(
                    r,
                    air_retry_exists if self.is_airtest else poco_retry_exists)
            except (PocoTargetRemovedException, PocoNoSuchNodeException,
                    TargetNotFoundError):
                res = False

            return res