Exemplo n.º 1
0
    def tabs(self):
        bot = self.bot
        ProductID = [
            'B09CGG1JZ1', 'B09B5VHWJT', 'B09CG181X1', 'B09CCSXFKV',
            'B09CCY1D1F', 'B09C34VF11', 'B09B4WWF75', 'B09B3K28VK',
            'B09B2QQL95', 'B09B233DM6'
        ]

        for i in range(len(ProductID) - 1):
            handles = bot.window_handles
            x = len(handles)
            handle = handles[x - 1]
            bot.switch_to.window(handle)
            sleep(2)
            retry(stop_max_delay=3)
            productSearch = bot.find_element_by_name("field-keywords")
            productSearch.clear()
            sleep(0.5)
            productSearch.send_keys(ProductID[i])
            productSearch.send_keys(Keys.RETURN)
            self.click()

        for j in reversed(handles):
            bot.switch_to.window(j)
            if j == handles[0]:
                self.search()
            bot.close()

        sleep(0.5)
Exemplo n.º 2
0
    def tabs(self):
        bot = self.bot
        ProductID = [
            'ACCG6YJPGZFFHRZS', 'MICG5ZDSZHFDYDEE', 'ACCG6YKD5XX9WYAW',
            'ACCG6FHAJPYERRY9', 'ACCG5EAZGXVQDZF4', 'ACCG5QSB5WYGBKTY',
            'PHCG5GZDUHKAXX9Y', 'SBMG5NC8HEYAYHYK', 'MIAG5ZD3JZFHPJHS'
        ]

        for i in range(len(ProductID) - 1):
            handles = bot.window_handles
            x = len(handles)
            handle = handles[x - 1]
            bot.switch_to.window(handle)
            sleep(2)
            retry(stop_max_delay=3)
            productSearch = bot.find_element_by_name("field-keywords")
            productSearch.clear()
            sleep(0.5)
            productSearch.send_keys(ProductID[i])
            productSearch.send_keys(Keys.RETURN)
            self.click()

        for j in reversed(handles):
            bot.switch_to.window(j)
            if j == handles[0]:
                self.search()
            bot.close()

        sleep(0.5)
Exemplo n.º 3
0
def assert_initial_conditions(scheduler_commands,
                              num_static_nodes,
                              num_dynamic_nodes,
                              partition,
                              cancel_job_id=None):
    """Assert cluster is in expected state before test starts; return list of compute nodes."""
    logging.info(
        "Assert initial condition, expect cluster to have {num_nodes} idle nodes"
        .format(num_nodes=num_static_nodes + num_dynamic_nodes))
    wait_for_num_nodes_in_scheduler(scheduler_commands,
                                    num_static_nodes + num_dynamic_nodes,
                                    filter_by_partition=partition)
    nodes_in_scheduler = scheduler_commands.get_compute_nodes(partition)
    static_nodes = []
    dynamic_nodes = []
    for node in nodes_in_scheduler:
        if "-st-" in node:
            static_nodes.append(node)
        if "-dy-" in node:
            dynamic_nodes.append(node)
    assert_that(len(static_nodes)).is_equal_to(num_static_nodes)
    assert_that(len(dynamic_nodes)).is_equal_to(num_dynamic_nodes)
    assert_compute_node_states(scheduler_commands,
                               nodes_in_scheduler,
                               expected_states=["idle", "mixed", "allocated"])
    if cancel_job_id:
        # Cancel warm up job so no extra scaling behavior should be happening
        scheduler_commands.cancel_job(cancel_job_id)
        retry(wait_fixed=seconds(20),
              stop_max_delay=minutes(2))(assert_compute_node_states)(
                  scheduler_commands,
                  nodes_in_scheduler,
                  expected_states=["idle"])

    return static_nodes, dynamic_nodes
Exemplo n.º 4
0
    def add_and_fetch_remote(
        repo_info: Struct, root_repo: Repo = None, branch_name: str = ""
    ):
        """
        Deprecated function, not use anymore git submodule
        :param repo_info:
        :param root_repo:
        :param branch_name:
        :return:
        """
        try:
            working_repo = Repo(repo_info.relative_path)
            if repo_info.organization in [
                a.name for a in working_repo.remotes
            ]:
                print(
                    f'Remote "{repo_info.organization}" already exist '
                    f"in {repo_info.relative_path}"
                )
                return
        except git.NoSuchPathError:
            print(f"New repo {repo_info.relative_path}")
            if not root_repo:
                print(
                    f"Missing git repository to root for repo {repo_info.path}"
                )
                return
            if branch_name:
                submodule_repo = retry(
                    wait_exponential_multiplier=1000, stop_max_delay=15000
                )(root_repo.create_submodule)(
                    repo_info.path,
                    repo_info.path,
                    url=repo_info.url_https,
                    branch=branch_name,
                )
            else:
                submodule_repo = retry(
                    wait_exponential_multiplier=1000, stop_max_delay=15000
                )(root_repo.create_submodule)(
                    repo_info.path, repo_info.path, url=repo_info.url_https
                )
                return
        # Add remote
        upstream_remote = retry(
            wait_exponential_multiplier=1000, stop_max_delay=15000
        )(working_repo.create_remote)(
            repo_info.organization, repo_info.url_https
        )
        print(
            'Remote "%s" created for %s'
            % (repo_info.organization, repo_info.url_https)
        )

        # Fetch the remote
        retry(wait_exponential_multiplier=1000, stop_max_delay=15000)(
            upstream_remote.fetch
        )()
        print('Remote "%s" fetched' % repo_info.organization)
Exemplo n.º 5
0
def retry(*dargs, **dkw):
    defaults = dict(
        retry_on_exception=retry_filter,
        wait_exponential_multiplier=500,
        wait_exponential_max=15000,
    )

    if len(dargs) == 1 and callable(dargs[0]):
        return retrying.retry(**defaults)(dargs[0])
    else:
        dkw = dict(defaults, **dkw)
        return retrying.retry(*dargs, **dkw)
Exemplo n.º 6
0
    def sync_to(result, checkout_when_diff=False):
        lst_compare_repo_info, lst_missing_info, lst_over_info = result
        total = len(lst_missing_info)
        if total:
            print(f"\nList of missing : {total}")
            i = 0
            for info in lst_missing_info:
                i += 1
                print(f"Nb element {i}/{total}")
                print(f"Missing '{info}'")

        total = len(lst_over_info)
        if total:
            print(f"\nList of over : {total}")
            i = 0
            for info in lst_over_info:
                i += 1
                print(f"Nb element {i}/{total}")
                print(f"Missing '{info}'")

        total = len(lst_compare_repo_info)
        print(f"\nList of normalize : {total}")
        lst_same = []
        lst_diff = []
        i = 0
        for original, compare_to in lst_compare_repo_info:
            i += 1
            print(f"Nb element {i}/{total}")
            repo_original = Repo(original.get("relative_path"))
            commit_original = repo_original.head.object.hexsha
            repo_compare = Repo(compare_to.get("relative_path"))
            commit_compare = repo_compare.head.object.hexsha
            if commit_original != commit_compare:
                print(
                    f"DIFF - {original.get('name')} - O {commit_original} - "
                    f"R {commit_compare}"
                )
                lst_diff.append((original, compare_to))
                if checkout_when_diff:
                    # Update all remote
                    for remote in repo_original.remotes:
                        retry(
                            wait_exponential_multiplier=1000,
                            stop_max_delay=15000,
                        )(remote.fetch)()
                    repo_original.git.checkout(commit_compare)
            else:
                print(f"SAME - {original.get('name')}")
                lst_same.append((original, compare_to))
        print(f"finish same {len(lst_same)}, diff {len(lst_diff)}")
Exemplo n.º 7
0
def test_multiple_jobs_submission(scheduler, region, pcluster_config_reader,
                                  clusters_factory, test_datadir):
    scaledown_idletime = 4
    # Test jobs should take at most 9 minutes to be executed.
    # These guarantees that the jobs are executed in parallel.
    max_jobs_execution_time = 9

    cluster_config = pcluster_config_reader(
        scaledown_idletime=scaledown_idletime)
    cluster = clusters_factory(cluster_config)
    remote_command_executor = RemoteCommandExecutor(cluster)
    scheduler_commands = get_scheduler_commands(scheduler,
                                                remote_command_executor)

    logging.info("Executing sleep job to start a dynamic node")
    result = scheduler_commands.submit_command("sleep 1")
    job_id = scheduler_commands.assert_job_submitted(result.stdout)
    retry(wait_fixed=seconds(30),
          stop_max_delay=seconds(500))(_assert_job_state)(
              scheduler_commands, job_id, job_state="COMPLETED")

    logging.info("Executing test jobs on cluster")
    remote_command_executor.run_remote_script(test_datadir /
                                              "cluster-check.sh",
                                              args=["submit", scheduler])

    logging.info("Monitoring ec2 capacity and compute nodes")
    ec2_capacity_time_series, compute_nodes_time_series, timestamps = get_compute_nodes_allocation(
        scheduler_commands=scheduler_commands,
        region=region,
        stack_name=cluster.cfn_name,
        max_monitoring_time=minutes(max_jobs_execution_time) +
        minutes(scaledown_idletime) + minutes(5),
    )

    logging.info(
        "Verifying test jobs completed successfully and in the expected time")
    _assert_test_jobs_completed(remote_command_executor,
                                max_jobs_execution_time * 60)

    logging.info("Verifying auto-scaling worked correctly")
    _assert_scaling_works(
        ec2_capacity_time_series=ec2_capacity_time_series,
        compute_nodes_time_series=compute_nodes_time_series,
        expected_ec2_capacity=(0, 3),
        expected_compute_nodes=(0, 3),
    )

    logging.info("Verifying no error in logs")
    assert_no_errors_in_logs(remote_command_executor, scheduler)
Exemplo n.º 8
0
def save_concept_data(seconds=100,way='byboot'):
    configger.init()
    engine = configger.engine
    wait_days=configger.constant_variables['low_ferquency_update_days']
    if(not utils.timeUtil.tableNeedUpdate('tb_akshare_concept_names')):
        return
    maintable = 'tb_ak_concept_names'
    indextable = 'tb_ak_concept_index'
    infotable = 'tb_ak_concept_infos'
    names = query_concept_names()
    names.to_sql(maintable, con=engine, if_exists='replace', index=False)
    print(names)
    if(not utils.timeUtil.tableNeedUpdate(infotable) or  way=='byhand'):
        flag=True
        dataList=pd.DataFrame()
        for name in names['name']:
            print(name)
            # if (data_need_update(infotable, 'update_time', '概念', name) == False):
            #     continue
            try:
                data = retry(query_concept_infos, name)
            except:
                continue
            # data = query_concept_infos(name)
            if(flag and not data.empty):
                dataList=data
                flag = False
            else:
                # print(dataList)
                dataList=dataList.append(data,ignore_index=True)
            if (data_need_update(infotable + '_his', 'update_time', '概念', name) == True):
                data.to_sql(infotable+'_his', con=engine, if_exists='append', index=False)
            print(dataList)
        dataList.to_sql(infotable, con=engine, if_exists='replace', index=False)
        utils.timeUtil.saveOperationTime(infotable)
    if(not utils.timeUtil.tableNeedUpdate(indextable,wait_days) or way=='byhand'    ):
        for name in names['name']:
            if (data_need_update(indextable, 'update_time', '概念', name) == False):
                continue
            data = retry(query_concept_index, name)
            try:
                sql = 'select * from {} where 概念="{}"'.format(indextable, name)
                saved_data = pd.read_sql(sql=sql, con=engine)
                data = data.loc[~ data['日期'].isin(saved_data['日期'])]
            except:
                traceback.print_exc()
            data.to_sql(indextable, con=engine, if_exists='append', index=False)
            print(data)
        utils.timeUtil.saveOperationTime(indextable)
    utils.timeUtil.saveOperationTime('tb_ak_concpet_names')
Exemplo n.º 9
0
def retry_on_stale_data_error(func):
    def is_staledata_error(ex):
        return isinstance(ex, exc.StaleDataError)

    wrapper = retrying.retry(stop_max_attempt_number=3,
                             retry_on_exception=is_staledata_error)
    return wrapper(func)
Exemplo n.º 10
0
 def retry_on_timeout(cls, fn):
     """Infinitely retry for timed-out exceptions."""
     if not cls.enabled:
         return fn
     cls.logger.debug("Trying to call %s with infinite retry.", fn)
     return retry(wait_exponential_multiplier=1e3, wait_exponential_max=180e3,
                  retry_on_exception=cls.exception_filter)(fn)
Exemplo n.º 11
0
def _safe_mongo_call(max_retries, retry_interval):
    return retrying.retry(
        retry_on_exception=lambda e: isinstance(
            e, pymongo.errors.AutoReconnect),
        wait_fixed=retry_interval * 1000,
        stop_max_attempt_number=max_retries if max_retries >= 0 else None
    )
Exemplo n.º 12
0
 def inner(f):
     __tracebackhide__ = True
     return retry(
         wait_exponential_multiplier=wait_exponential_multiplier,
         wait_exponential_max=wait_exponential_max,
         stop_max_attempt_number=tries,
         retry_on_exception=_retry_on_exception(retry_exceptions))(f)
Exemplo n.º 13
0
def retry_database(fn):
    """Decorator to keep probing the database untill you succeed."""
    _check_exception = functools.partial(check_exception, valid_exc=sa.exc.OperationalError)
    r = retry(
        retry_on_exception=_check_exception, wait_exponential_multiplier=1000,
        wait_exponential_max=60000, stop_max_attempt_number=7)
    return r(fn)
Exemplo n.º 14
0
def _safe_mongo_call(max_retries, retry_interval):
    return retrying.retry(
        retry_on_exception=lambda e: isinstance(
            e, pymongo.errors.AutoReconnect),
        wait_fixed=retry_interval * 1000,
        stop_max_attempt_number=max_retries if max_retries >= 0 else None
    )
Exemplo n.º 15
0
def retry_on_conflict(func):
    def is_conflict(ex):
        return isinstance(ex, exception.ConcurrentTransaction)
    wrapper = retrying.retry(stop_max_attempt_number=11,
                             wait_random_min=0.0, wait_random_max=2.0,
                             retry_on_exception=is_conflict)
    return wrapper(func)
Exemplo n.º 16
0
def rety_subprocess(fn):
    r = retry(
        retry_on_exception=lambda exc:
            _check_exception(exc, valid_exc=MySubprocessError),
        wait_exponential_multiplier=1000,
        wait_exponential_max=60000,
        stop_max_attempt_number=7)
    return r(fn)
Exemplo n.º 17
0
 def open_shm_block(self, security):
     shm_path = self._cfg.get_day_shm_path(security.code)
     try:
         array = retry(stop_max_attempt_number=3, wait_fixed=1000)(SharedArray.attach)("file://" + shm_path, readonly=True)
     except OSError as e:
         array = np.empty((0, len(security.day_column_names)+1))
     block = _ShmBlock(array, ('date',) + security.day_column_names)
     return block
Exemplo n.º 18
0
 def wrapped(*args, **kwargs):
     self = args[0]
     new_fn = retrying.retry(retry_on_result=_ofport_result_pending,
                             stop_max_delay=self.vsctl_timeout * 1000,
                             wait_exponential_multiplier=10,
                             wait_exponential_max=1000,
                             retry_on_exception=lambda _: False)(fn)
     return new_fn(*args, **kwargs)
Exemplo n.º 19
0
    def test_backoff(self):
        """ Retries with exponential backoff. """
        r = retrying.retry(wait_exponential_multiplier=1000)(fail_n(9))

        fake_time = FakeTime()
        with fake_time:
            r()
        self.assertGreaterEqual(fake_time.mock_sleep.total, 2**9 - 1)
Exemplo n.º 20
0
    def test_n_retry(self):
        """ Retries a fixed number of times. """
        r = retrying.retry(stop_max_attempt_number=10)(fail_n(9))

        fake_time = FakeTime()
        with fake_time:
            r()
        self.assertEqual(fake_time.mock_sleep.calls, 9)
def mark(f):
    """Marks an entire test as eventually consistent and retries."""
    __tracebackhide__ = True
    return retry(wait_exponential_multiplier=WAIT_EXPONENTIAL_MULTIPLIER,
                 wait_exponential_max=WAIT_EXPONENTIAL_MAX_DEFAULT,
                 stop_max_attempt_number=STOP_MAX_ATTEMPT_NUMBER_DEFAULT,
                 retry_on_exception=_retry_on_exception(
                     (AssertionError, exceptions.GoogleCloudError)))(f)
Exemplo n.º 22
0
def retry_upon_exception(exc,
                         delay=500,
                         max_delay=4000,
                         max_attempts=cfg.CONF.nsxv.retries):
    return retrying.retry(retry_on_exception=lambda e: isinstance(e, exc),
                          wait_exponential_multiplier=delay,
                          wait_exponential_max=max_delay,
                          stop_max_attempt_number=max_attempts)
def mark(f):
    """Marks an entire test as eventually consistent and retries."""
    __tracebackhide__ = True
    return retry(
        wait_exponential_multiplier=100,
        wait_exponential_max=1500,
        stop_max_attempt_number=5,
        retry_on_exception=_retry_on_exception(AssertionError))(f)
Exemplo n.º 24
0
    def test_deadline(self):
        """ Retry limit based on total time. """
        r = retrying.retry(stop_max_delay=1000, wait_fixed=200)(fail_n(5))

        fake_time = FakeTime()
        with fake_time:
            r()
        self.assertGreaterEqual(fake_time.mock_sleep.total, 1.0)
Exemplo n.º 25
0
 def wrapped(*args, **kwargs):
     self = args[0]
     new_fn = retrying.retry(
         retry_on_result=_ofport_result_pending,
         stop_max_delay=self.vsctl_timeout * 1000,
         wait_exponential_multiplier=10,
         wait_exponential_max=1000,
         retry_on_exception=lambda _: False)(fn)
     return new_fn(*args, **kwargs)
Exemplo n.º 26
0
def retry_on_conflict(func):
    def is_conflict(ex):
        return isinstance(ex, exception.ConcurrentTransaction)

    wrapper = retrying.retry(stop_max_attempt_number=11,
                             wait_random_min=0.0,
                             wait_random_max=2.0,
                             retry_on_exception=is_conflict)
    return wrapper(func)
def mark(f):
    """Marks an entire test as eventually consistent and retries."""
    __tracebackhide__ = True
    return retry(
        wait_exponential_multiplier=WAIT_EXPONENTIAL_MULTIPLIER,
        wait_exponential_max=WAIT_EXPONENTIAL_MAX_DEFAULT,
        stop_max_attempt_number=STOP_MAX_ATTEMPT_NUMBER_DEFAULT,
        retry_on_exception=_retry_on_exception(
            (AssertionError, exceptions.GoogleCloudError)))(f)
Exemplo n.º 28
0
    def test_retry(self):
        a = [1, 2]
        k = {'a': 3, 'b': 4, 'cc': 5}

        self.mox.StubOutWithMock(retrying, 'retry')
        retrying.retry(
            *a,
            retry_on_exception=mox.IgnoreArg(),
            wait_random_min=mox.IgnoreArg(),
            wait_random_max=mox.IgnoreArg(),
            **k
        ).AndReturn(lambda func: func)
        func = self.mox.CreateMockAnything()
        self.mox.StubOutWithMock(functools, 'wraps')
        functools.wraps(func).AndReturn(lambda func: func)
        func()
        self.mox.ReplayAll()
        orm.retry(*a, **k)(func)()
Exemplo n.º 29
0
def retry_ssh(fn):
    """Retry doing something over an ssh connection."""
    _check_exception = functools.partial(check_exception, valid_exc=paramiko.SSHException)
    wrapper = retry(
        retry_on_exception=_check_exception,
        wait_exponential_multiplier=1_000,
        wait_exponential_max=60_000,
        stop_max_attempt_number=7)
    return wrapper(fn)
Exemplo n.º 30
0
def retry_archive(fn):
    """Decorator to keep probing the database untill you succeed."""
    from retrying import retry
    from elaspic import errors
    r = retry(
        retry_on_exception=lambda exc:
            _check_exception(exc, valid_exc=errors.Archive7zipError),
        wait_fixed=2000,
        stop_max_attempt_number=2)
    return r(fn)
Exemplo n.º 31
0
def verifyNamespaceCreation(api_client, name):
    # Verifies the namespace is created with profile 'name' specified.
    coreV1 = k8s_client.CoreV1Api(api_client)
    retry_read_namespace = retry(
        wait_exponential_multiplier=
        1000,  # wait 2^i * 1000 ms, on the i-th retry
        wait_exponential_max=60000,  # 60 sec max
    )(coreV1.read_namespace)
    resp = retry_read_namespace(name)
    logging.info("found namespace: %s", resp)
Exemplo n.º 32
0
def process_volumes(context):
    if len(context['ebs']['volume_ids']) == 0:
        count = context['ebs']['count']
        recovery = False
    else:
        logging.debug("recovering volumes: {0}".format(
            context['ebs']['volume_ids']))
        count = len(context['ebs']['volume_ids'])
        context['ebs']['strict'] = True
        recovery = True
    attached = get_attached(context)
    device = next_device("/dev/xvdf")
    while len(attached) < count:
        if len(context['ebs']['volume_ids']) == 0:
            volumes = get_volumes(context)
        else:
            volumes = get_volumes(context,
                                  volume_ids=context['ebs']['volume_ids'])
        try:
            volume = volumes.pop()
            test = True  # only fsck reattached volumes
        except IndexError:
            if recovery or context['ebs']['strict']:
                raise Exception("failed to attach volumes: {0}".format(
                    ", ".join(context['ebs']['volume_ids'])))
            else:
                volume = create_volume(context)
                test = False
        if (recovery == False) and ("blacklist" in volume.tags):
            continue
        if attach_volume(volume, device, context):
            wait_for_volume_state(volume, "attached")
            _volume_add_tags = retry(wait_exponential_multiplier=1000,
                                     stop_max_delay=60000,
                                     retry_on_exception=retry_if_throttled)(
                                         volume.add_tags)
            _volume_add_tags(context['tags'])
            logging.info("{0} tagged".format(volume.id))
            if test:
                if not test_volume(volume, device, context):
                    continue
            else:
                mkfs(volume, context)
            mount_volume(volume, context)
            logging.info("{0} mounted ".format(volume.id))
            device = next_device(device)
            attached.append(volume)
        elif not test:
            logging.error("failed to attach newly created {0}".format(
                volume.id))
            raise Exception("failed to attach newly created {0}".format(
                volume.id))
        else:
            "maybe another instance attached this volume first. try next one"
            pass
Exemplo n.º 33
0
    def decorator(func):
        retried_func = retry(*args, **kwargs, wrap_exception=True)(func)

        @functools.wraps(retried_func)
        def wrapper(*modified_args, **modified_kwargs):
            try:
                return retried_func(*modified_args, **modified_kwargs)
            except RetryError:
                logging.warning("Max retries exceeded.")

        return wrapper
Exemplo n.º 34
0
def retry_database(fn):
    """Decorator to keep probing the database untill you succeed."""
    from retrying import retry
    import sqlalchemy as sa
    r = retry(
        retry_on_exception=lambda exc:
            _check_exception(exc, valid_exc=sa.exc.OperationalError),
        wait_exponential_multiplier=1000,
        wait_exponential_max=60000,
        stop_max_attempt_number=7)
    return r(fn)
Exemplo n.º 35
0
    def decorator(f):
        decorated = retry(*args, **kwargs)(f)

        @wraps(decorated)
        def wrapper(*args, **kwargs):
            try:
                return decorated(*args, **kwargs)
            except RetryError:
                raise RetryError("Maximo reintento de conecciones excedido")

        return wrapper
Exemplo n.º 36
0
    def reply(self, reply=None, failure=None):
        """Send back reply to the RPC client
        :param reply: Dictionary, reply. In case of exception should be None
        :param failure: Tuple, should be a sys.exc_info() tuple.
            Should be None if RPC request was successfully processed.

        :return RpcReplyPikaIncomingMessage, message with reply
        """

        if self.reply_q is None:
            return

        reply_outgoing_message = RpcReplyPikaOutgoingMessage(
            self._pika_engine, self.msg_id, reply=reply, failure_info=failure,
            content_type=self._content_type,
            content_encoding=self._content_encoding
        )

        def on_exception(ex):
            if isinstance(ex, pika_drv_exc.ConnectionException):
                LOG.warning(
                    "Connectivity related problem during reply sending. %s",
                    ex
                )
                return True
            else:
                return False

        retrier = retrying.retry(
            stop_max_attempt_number=(
                None if self._pika_engine.rpc_reply_retry_attempts == -1
                else self._pika_engine.rpc_reply_retry_attempts
            ),
            retry_on_exception=on_exception,
            wait_fixed=self._pika_engine.rpc_reply_retry_delay * 1000,
        ) if self._pika_engine.rpc_reply_retry_attempts else None

        try:
            timeout = (None if self.expiration_time is None else
                       max(self.expiration_time - time.time(), 0))
            with timeutils.StopWatch(duration=timeout) as stopwatch:
                reply_outgoing_message.send(
                    reply_q=self.reply_q,
                    stopwatch=stopwatch,
                    retrier=retrier
                )
            LOG.debug(
                "Message [id:'%s'] replied to '%s'.", self.msg_id, self.reply_q
            )
        except Exception:
            LOG.exception(
                "Message [id:'%s'] wasn't replied to : %s", self.msg_id,
                self.reply_q
            )
Exemplo n.º 37
0
 def create_image(self, name):
     """
     Create a new image based on this instance.
     """
     image_id = self._ec2_instance.create_image(name)
     # Sometimes, the image takes a while to register, so retry a few times
     # if the image cannot be found
     retry_decorator = retry(retry_on_result=lambda result: result is None,
                             stop_max_attempt_number=3, wait_fixed=1000)
     image = retry_decorator(self._provider.compute.images.get)(image_id)
     return image
Exemplo n.º 38
0
def apply_retrying(obj, cfg):
    for attr_name, attr in inspect.getmembers(obj):
        if not (inspect.ismethod(attr) or inspect.isfunction(attr)):
            continue
        if attr_name in _RETRY_METHODS:
            setattr(
                obj, attr_name,
                retry(wait_fixed=cfg.matchmaker_redis.wait_timeout,
                      stop_max_delay=cfg.matchmaker_redis.check_timeout,
                      retry_on_exception=retry_if_connection_error,
                      retry_on_result=retry_if_empty)(attr))
Exemplo n.º 39
0
 def __configure_retry(self, retry_config):
     retry_dec = retry(
         wait_exponential_multiplier=retry_config.
         wait_exponential_multiplier,
         wait_exponential_max=retry_config.wait_exponential_max,
         stop_max_attempt_number=retry_config.stop_max_attempt_number,
         retry_on_exception=retry_if_connection_not_established)
     self.__ping_connection = retry_dec(self.__ping_connection)
     self.create_store = retry_dec(self.create_store)
     self.is_store_exists = retry_dec(self.is_store_exists)
     self.delete_store = retry_dec(self.delete_store)
Exemplo n.º 40
0
 def retryable(self, function):
     @wraps(function)
     def wrapper(*args, **kwargs):
         self.maybe_wait()
         response = function(*args, **kwargs)
         backoff_interval = response.data.get('backoff', 0)
         self.set_next_request_minimum_time(backoff_interval)
         return response
     return retry(
         retry_on_exception=BackoffStrategy._retry_on_throttle_error,
         **self._retry_kwargs
     )(wrapper)
Exemplo n.º 41
0
def verifyProfileCreation(api_client, group, version, name):
    k8s_co = k8s_client.CustomObjectsApi(api_client)
    retry_read_profile = retry(
        wait_exponential_multiplier=
        1000,  # wait 2^i * 1000 ms, on the i-th retry
        wait_exponential_max=60000,  # 60 sec max
    )(k8s_co.get_cluster_custom_object)
    resp = retry_read_profile(group=group,
                              version=version,
                              plural=PLURAL,
                              name=name)
    logging.info(resp)
    def reply(self, reply=None, failure=None, log_failure=True):
        """Send back reply to the RPC client
        :param reply: Dictionary, reply. In case of exception should be None
        :param failure: Tuple, should be a sys.exc_info() tuple.
            Should be None if RPC request was successfully processed.
        :param log_failure: Boolean, not used in this implementation.
            It present here to be compatible with driver API

        :return RpcReplyPikaIncomingMessage, message with reply
        """

        if self.reply_q is None:
            return

        reply_outgoing_message = RpcReplyPikaOutgoingMessage(
            self._pika_engine, self.msg_id, reply=reply, failure_info=failure,
            content_type=self._content_type,
            content_encoding=self._content_encoding
        )

        def on_exception(ex):
            if isinstance(ex, pika_drv_exc.ConnectionException):
                LOG.warn(str(ex))
                return True
            else:
                return False

        retrier = retrying.retry(
            stop_max_attempt_number=(
                None if self._pika_engine.rpc_reply_retry_attempts == -1
                else self._pika_engine.rpc_reply_retry_attempts
            ),
            retry_on_exception=on_exception,
            wait_fixed=self._pika_engine.rpc_reply_retry_delay * 1000,
        ) if self._pika_engine.rpc_reply_retry_attempts else None

        try:
            reply_outgoing_message.send(
                reply_q=self.reply_q,
                expiration_time=self.expiration_time,
                retrier=retrier
            )
            LOG.debug(
                "Message [id:'{}'] replied to '{}'.".format(
                    self.msg_id, self.reply_q
                )
            )
        except Exception:
            LOG.exception(
                "Message [id:'{}'] wasn't replied to : {}".format(
                    self.msg_id, self.reply_q
                )
            )
Exemplo n.º 43
0
class NeiHan:
    def __init__(self):
        self.url = 'http://neihanshequ.com/'
        self.headers = {
            'User-Agent':
            'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
        }

    retry(stop_max_attempt_number=3)  # 设置超时重新连接3次

    def _parse_url(self):
        response = requests.get(self.url, headers=self.headers, timeout=3)
        assert response.status_code == 200  # 如果没有正常返回响应就抛出异常
        return response.content.decode()

    def parse_url(self):
        try:
            html = self._parse_url()
        except Exception as e:
            print(e)
            html = None
        return html

    def get_content_list(self, html_str):
        '''
        要抓取的局部:
        <div class="upload-txt  no-mb">
            <h1 class="title">
            <p>有一个盲了的女孩,她一无所有,只剩下她男朋友,男朋友问她,如果你眼睛好了,能和我结婚吗?女孩答应了。很快女孩可以移植眼角膜,也很快恢复视力,但她发现她男朋友也是盲的。男朋友向她求婚,女孩拒绝了,最后男孩直说了一句话:“take care of my eyes,”谁给我翻译下他说的什么话。</p>
            </h1>
        </div>
        '''
        content_list = re.findall(
            '<div class="upload-txt\s+no-mb">.*?<h1 class="title">.*?<p>(.*?)</p>.*?</h1>.*?</div>',
            html_str, re.DOTALL)
        print(content_list)
        return content_list

    def save_content_list(self, content_list):
        with open('./neihan.txt', 'w') as f:
            for content in content_list:
                f.write(str(content_list.index(content) + 1))
                f.write(" " + content)
                f.write('\n')

    def run(self):
        # 1 找到url
        # 2 发送请求 获取响应
        html_str = self.parse_url()
        # 3 提取数据
        content_list = self.get_content_list(html_str)
        # 4 保存
        self.save_content_list(content_list)
Exemplo n.º 44
0
def create_volume(context):
    logging.debug("Creating volume")
    _create_volume = retry(wait_exponential_multiplier=1000,
                           stop_max_delay=60000,
                           retry_on_exception=retry_if_throttled)(
                               context["ec2_connection"].create_volume)
    volume = _create_volume(context["ebs"]["size"],
                            context["az"],
                            volume_type=context["ebs"]["type"])
    logging.info("Created volume {0}".format(volume.id))
    wait_for_volume_state(volume, "available")
    return volume
Exemplo n.º 45
0
 def create_image(self, name):
     """
     Create a new image based on this instance.
     """
     image_id = self._ec2_instance.create_image(name)
     # Sometimes, the image takes a while to register, so retry a few times
     # if the image cannot be found
     retry_decorator = retry(retry_on_result=lambda result: result is None,
                             stop_max_attempt_number=3,
                             wait_fixed=1000)
     image = retry_decorator(self._provider.compute.images.get)(image_id)
     return image
Exemplo n.º 46
0
    def get_publishers_retrying(self):
        """Retry until at least one publisher appears"""

        def retry_if_empty(publishers):
            return not publishers

        _retry = retrying.retry(retry_on_result=retry_if_empty)

        @_retry
        def _get_publishers():
            return self.get_publishers()

        return _get_publishers()
Exemplo n.º 47
0
def apply_retrying(obj, cfg):
    for attr_name, attr in inspect.getmembers(obj):
        if not (inspect.ismethod(attr) or inspect.isfunction(attr)):
            continue
        if attr_name in _RETRY_METHODS:
            setattr(
                obj,
                attr_name,
                retry(
                    wait_fixed=cfg.matchmaker_redis.wait_timeout,
                    stop_max_delay=cfg.matchmaker_redis.check_timeout,
                    retry_on_exception=retry_if_connection_error,
                    retry_on_result=retry_if_empty
                )(attr))
Exemplo n.º 48
0
def with_retry(cls, methods):
    """
    Wraps the given list of methods in a class with an exponential-back
    retry mechanism.
    """
    retry_with_backoff = retry(
        retry_on_exception=lambda e: isinstance(e, BotoServerError),
        wait_exponential_multiplier=1000,
        wait_exponential_max=10000
    )
    for method in methods:
        m = getattr(cls, method, None)
        if isinstance(m, collections.Callable):
            setattr(cls, method, retry_with_backoff(m))
    return cls
Exemplo n.º 49
0
    def __init__(self, url, http_method='GET', parser=None, stop_max_attempt_number=RETRY_COUNT, **kwargs):
        self.url = url
        for name, value in kwargs.items():
            if isinstance(value, list) or isinstance(value, tuple):
                kwargs[name] = ','.join(str(i) for i in value)
            elif isinstance(value, datetime):
                kwargs[name] = value.isoformat()
        self.parser = parser
        self.params = kwargs
        self.http_method = http_method
        self._data = None
        self.error = None

        # Retry only if SOURCE_NOT_AVAILABLE error
        self._fetch_data = retry(
            stop_max_attempt_number=stop_max_attempt_number,
            retry_on_exception=lambda ex: isinstance(ex, RequestError) and ex.code == 504
        )(self._fetch_data)
Exemplo n.º 50
0
    def send_notification(self, target, ctxt, message, version, retry=None):
        if retry is None:
            retry = self._pika_engine.default_notification_retry_attempts

        def on_exception(ex):
            if isinstance(ex, (pika_drv_exc.ExchangeNotFoundException,
                               pika_drv_exc.RoutingException)):
                LOG.warning("Problem during sending notification. %s", ex)
                try:
                    self._declare_notification_queue_binding(target)
                except pika_drv_exc.ConnectionException as e:
                    LOG.warning("Problem during declaring notification queue "
                                "binding. %s", e)
                return True
            elif isinstance(ex, (pika_drv_exc.ConnectionException,
                                 pika_drv_exc.MessageRejectedException)):
                LOG.warning("Problem during sending notification. %s", ex)
                return True
            else:
                return False

        retrier = retrying.retry(
            stop_max_attempt_number=(None if retry == -1 else retry),
            retry_on_exception=on_exception,
            wait_fixed=self._pika_engine.notification_retry_delay * 1000,
        )

        msg = pika_drv_msg.PikaOutgoingMessage(self._pika_engine, message,
                                               ctxt)
        return msg.send(
            exchange=(
                target.exchange or
                self._pika_engine.default_notification_exchange
            ),
            routing_key=target.topic,
            confirm=True,
            mandatory=True,
            persistent=self._pika_engine.notification_persistence,
            retrier=retrier
        )
    def start(self, timeout=None):
        """Overrides default behaviour of start method. Base start method
        does not create connection to RabbitMQ during start method (uses
        lazy connecting during first poll method call). This class should be
        connected after start call to ensure that exchange and queue for reply
        delivery are created before RPC request sending
        """
        super(RpcReplyPikaPoller, self).start()

        def on_exception(ex):
            LOG.warn(str(ex))

            return True

        retrier = retrying.retry(
            stop_max_attempt_number=self._pika_engine.rpc_reply_retry_attempts,
            stop_max_delay=None if timeout is None else timeout * 1000,
            wait_fixed=self._pika_engine.rpc_reply_retry_delay * 1000,
            retry_on_exception=on_exception,
        )

        retrier(self.reconnect)()
Exemplo n.º 52
0
    def send(self, target, ctxt, message, wait_for_reply=None, timeout=None,
             retry=None):
        expiration_time = None if timeout is None else time.time() + timeout

        if retry is None:
            retry = self._pika_engine.default_rpc_retry_attempts

        def on_exception(ex):
            if isinstance(ex, (pika_drv_exc.ConnectionException,
                               exceptions.MessageDeliveryFailure)):
                LOG.warn(str(ex))
                return True
            else:
                return False

        retrier = (
            None if retry == 0 else
            retrying.retry(
                stop_max_attempt_number=(None if retry == -1 else retry),
                retry_on_exception=on_exception,
                wait_fixed=self._pika_engine.rpc_retry_delay * 1000,
            )
        )

        msg = pika_drv_msg.RpcPikaOutgoingMessage(self._pika_engine, message,
                                                  ctxt)
        reply = msg.send(
            target,
            reply_listener=self._reply_listener if wait_for_reply else None,
            expiration_time=expiration_time,
            retrier=retrier
        )

        if reply is not None:
            if reply.failure is not None:
                raise reply.failure

            return reply.result
def call(f, exceptions=AssertionError, tries=STOP_MAX_ATTEMPT_NUMBER_DEFAULT):
    """Call a given function and treat it as eventually consistent.

    The function will be called immediately and retried with exponential
    backoff up to the listed amount of times.

    By default, it only retries on AssertionErrors, but can be told to retry
    on other errors.

    For example:

        @eventually_consistent.call
        def _():
            results = client.query().fetch(10)
            assert len(results) == 10

    """
    __tracebackhide__ = True
    return retry(
        wait_exponential_multiplier=WAIT_EXPONENTIAL_MULTIPLIER,
        wait_exponential_max=WAIT_EXPONENTIAL_MAX_DEFAULT,
        stop_max_attempt_number=tries,
        retry_on_exception=_retry_on_exception(exceptions))(f)()
Exemplo n.º 54
0
def retry(*args, **kwargs):
    extra = {}

    def log_exception(exc):
        if RETRY_ON_EXCEPTION:
            log.exception('Got exception, retrying %r %r %r %r', extra['func'], extra['args'], extra['kwargs'], exc)
        return RETRY_ON_EXCEPTION
    kwargs.setdefault('retry_on_exception', log_exception)
    kwargs.setdefault('wait_random_min', 100)
    kwargs.setdefault('wait_random_max', 5000)
    retry_func = retrying.retry(*args, **kwargs)

    def deco(func):
        if not RETRY_ORM:
            return func
        extra['func'] = func

        @functools.wraps(func)
        def capture_func(*args, **kwargs):
            extra['args'] = args
            extra['kwargs'] = kwargs
            return func(*args, **kwargs)
        return retry_func(capture_func)
    return deco
Exemplo n.º 55
0
#!/usr/bin/env python
# -*- coding: utf8 -*-

# Copyright (C) 2013-2014 Craig Phillips.  All rights reserved.

"""The GSync Drive module that provides an interface to the Google Drive"""

import os, sys, re, datetime, shelve, time, retrying

from dateutil.tz import tzutc
from contextlib import contextmanager

# Setup default retryer.
retryer = retrying.retry( # pylint: disable-msg=C0103
    wait='fixed_sleep', wait_fixed=60000,
    stop='stop_after_attempt', stop_max_attempt_number=2
)

try:
    import simplejson as json
except ImportError: # pragma: no cover
    import json

import oauth2client.util
oauth2client.util.positional_parameters_enforcement = \
    oauth2client.util.POSITIONAL_IGNORE

from oauth2client.client import OAuth2Credentials
from apiclient.http import MediaUploadProgress
from libgsync.output import verbose, debug
from libgsync.drive.mimetypes import MimeTypes
Exemplo n.º 56
0
from dart.model.exception import DartConditionalUpdateFailedException


def patch_difference(dao, src_model, dest_model, commit=True, conditional=None):
    patch = jsonpatch.make_patch(src_model.to_dict(), dest_model.to_dict())
    return patch_data(dao, src_model.id, patch, commit, conditional)


def _retry_stale_data_error(exception):
    if isinstance(exception, StaleDataError):
        db.session.rollback()
        return True
    return False


retry_stale_data = retry(wait_random_min=1, wait_random_max=500, retry_on_exception=_retry_stale_data_error)


@retry_stale_data
def patch_data(dao, model_id, patch, commit=True, conditional=None):
    dao_instance = dao.query.get(model_id)
    model = dao_instance.to_model()
    if conditional and not conditional(model):
        raise DartConditionalUpdateFailedException('specified conditional failed')
    patched_dict = patch.apply(model.to_dict())
    for k, v in patched_dict.iteritems():
        setattr(dao_instance, k, v)
    if commit:
        db.session.commit()
    return dao_instance.to_model()
Exemplo n.º 57
0
def retry(f):
    return retrying.retry(
        retry_on_exception=retry_if_retry_raised,
        wait='exponential_sleep', wait_exponential_max=1)(f)
Exemplo n.º 58
0
def retry_upon_exception(exc, delay=500, max_delay=2000,
                         max_attempts=cfg.CONF.nsxv.retries):
    return retrying.retry(retry_on_exception=lambda e: isinstance(e, exc),
                          wait_exponential_multiplier=delay,
                          wait_exponential_max=max_delay,
                          stop_max_attempt_number=max_attempts)
Exemplo n.º 59
0
    def send(self, target, ctxt, message, wait_for_reply=None, timeout=None,
             retry=None):
        with timeutils.StopWatch(duration=timeout) as stopwatch:
            if retry is None:
                retry = self._pika_engine.default_rpc_retry_attempts

            exchange = self._pika_engine.get_rpc_exchange_name(
                target.exchange
            )

            def on_exception(ex):
                if isinstance(ex, pika_drv_exc.ExchangeNotFoundException):
                    # it is desired to create exchange because if we sent to
                    # exchange which is not exists, we get ChannelClosed
                    # exception and need to reconnect
                    try:
                        self._declare_rpc_exchange(exchange, stopwatch)
                    except pika_drv_exc.ConnectionException as e:
                        LOG.warning("Problem during declaring exchange. %s", e)
                    return True
                elif isinstance(ex, (pika_drv_exc.ConnectionException,
                                     exceptions.MessageDeliveryFailure)):
                    LOG.warning("Problem during message sending. %s", ex)
                    return True
                else:
                    return False

            retrier = (
                None if retry == 0 else
                retrying.retry(
                    stop_max_attempt_number=(None if retry == -1 else retry),
                    retry_on_exception=on_exception,
                    wait_fixed=self._pika_engine.rpc_retry_delay * 1000,
                )
            )

            if target.fanout:
                return self.cast_all_workers(
                    exchange, target.topic, ctxt, message, stopwatch, retrier
                )

            routing_key = self._pika_engine.get_rpc_queue_name(
                target.topic, target.server, retrier is None
            )

            msg = pika_drv_msg.RpcPikaOutgoingMessage(self._pika_engine,
                                                      message, ctxt)
            try:
                reply = msg.send(
                    exchange=exchange,
                    routing_key=routing_key,
                    reply_listener=(
                        self._reply_listener if wait_for_reply else None
                    ),
                    stopwatch=stopwatch,
                    retrier=retrier
                )
            except pika_drv_exc.ExchangeNotFoundException as ex:
                try:
                    self._declare_rpc_exchange(exchange, stopwatch)
                except pika_drv_exc.ConnectionException as e:
                    LOG.warning("Problem during declaring exchange. %s", e)
                raise ex

            if reply is not None:
                if reply.failure is not None:
                    raise reply.failure

                return reply.result