Example #1
0
def check_lost(cqtask, *args):
    running_task_ids = backend.get_running_tasks()
    cqtask.log('Running tasks: {}'.format(running_task_ids), logging.DEBUG)
    queued_task_ids = backend.get_queued_tasks()
    cqtask.log('Queued tasks: {}'.format(queued_task_ids), logging.DEBUG)
    queued_tasks = Task.objects.filter(status=Task.STATUS_QUEUED)
    running_tasks = Task.objects.filter(status=Task.STATUS_RUNNING)
    for task in queued_tasks:
        if str(task.id) not in queued_task_ids:
            with cache.lock(str(task.id), timeout=2):
                if task.at_risk == Task.AT_RISK_QUEUED:
                    cqtask.log('Lost in queue: {}'.format(task.id))
                    task.status = Task.STATUS_LOST
                    task._store_logs()
                    task.save(update_fields=['status', 'details'])
                else:
                    task.at_risk = Task.AT_RISK_QUEUED
                    task.save(update_fields=['at_risk'])
    for task in running_tasks:
        if str(task.id) not in running_task_ids:
            with cache.lock(str(task.id), timeout=2):
                if task.at_risk == Task.AT_RISK_RUNNING:
                    cqtask.log('Lost on worker: {}'.format(task.id))
                    task.status = Task.STATUS_LOST
                    task._store_logs()
                    task.save(update_fields=['status', 'details'])
                else:
                    task.at_risk = Task.AT_RISK_RUNNING
                    task.save(update_fields=['at_risk'])
Example #2
0
 def clean_captcha(self):
     cleaned_captcha = int(self.cleaned_data['captcha'])
     if cleaned_captcha != self.result:
         cache.lock('contact_us_captcha')
         raise forms.ValidationError(_('The result is invalid'),
                                     code='invalid_result')
     return cleaned_captcha
Example #3
0
def websocket_disconnect(message):
    if message.content['code'] in rejection_codes:
        # The connection is closed due to rejection
        # Did nothing, so do nothing
        return

    if message.user.is_authenticated:
        session_cache_key = 'session:' + message.user.username
        with cache.lock('lock:' + session_cache_key):
            session = cache.get(session_cache_key)
            if session == message.reply_channel.name:
                cache.delete(session_cache_key)

        player_room_cache_key = 'player-room:' + message.user.username
        with cache.lock('lock:' + player_room_cache_key):
            room_id = cache.get(player_room_cache_key)

            if room_id is not None:
                data = {
                    'nonce': '',
                    'username': message.user.username,
                    'disconnected': True,
                    'reply': message.reply_channel.name,
                }

                Channel('room-leave').send(data)
Example #4
0
def room_ready_consumer(message):
    data = message.content
    reply_channel = Channel(data['reply'])
    nonce = data['nonce']
    username = data['username']
    ready = data.get('ready', None)

    if ready is None:
        reply_channel.send(
            reply_error('No ready', nonce=nonce, type='room-ready'))

    player_room_cache_key = 'player-room:' + username

    with cache.lock('lock:' + player_room_cache_key):
        room_id = cache.get(player_room_cache_key)

        if room_id is None:
            reply_channel.send(
                reply_error('You are currently not in the room',
                            nonce=nonce,
                            type='room-ready'))
            return

    room_cache_key = 'room:' + room_id
    with cache.lock('lock:' + room_cache_key):
        room_cache = cache.get(room_cache_key)

        found = False
        for i, player in enumerate(room_cache['players']):
            if player['username'] == username:
                room_cache['players'][i]['ready'] = ready
                found = True
                break

        if not found:
            reply_channel.send(
                reply_error('You are currently not in the room',
                            nonce=nonce,
                            type='room-ready'))
            return

        cache.set(room_cache_key, room_cache)

        response_data = {
            'ready': ready,
        }

        event_data = {
            'player': username,
            'ready': ready,
        }

        reply_channel.send(response(response_data, nonce=nonce))
        Group(room_id).send(event('room-ready', event_data))
def test_reset_all(redis_server):
    lock1 = cache.lock("foobar1")
    lock2 = cache.lock("foobar2")
    lock1.acquire(blocking=False)
    lock2.acquire(blocking=False)
    cache.reset_all()
    lock1 = cache.lock("foobar1")
    lock2 = cache.lock("foobar2")
    lock1.acquire(blocking=False)
    lock2.acquire(blocking=False)
    lock1.release()
    lock2.release()
Example #6
0
    def clean_command_cache():
        """清除command所有信息

        清理超时未响应的命令,并返回相关信息

        :return:
        """
        timeout_command = {}
        alive_command_lock = cache.lock(ALIVE_COMMANDS_LOCK)
        # Retrieve and remove timeout command instance record
        try:
            alive_command_lock.acquire()

            alive_commands = cache.get(ALIVE_COMMANDS)
            alive_commands_backup = copy.deepcopy(alive_commands)
            if alive_commands is None:
                alive_commands = dict()
            now_timestamp = time.time()
            for command_uuid, timestamps in alive_commands_backup.items():
                for t in timestamps:
                    if now_timestamp - t > adapter_conf.COMMAND_TIMEOUT:
                        # remove t from alive_commands
                        alive_commands[command_uuid].remove(t)

                        if not command_uuid in timeout_command:
                            timeout_command[command_uuid] = dict()
                        timeout_command[command_uuid][t] = None

            cache.set(ALIVE_COMMANDS, alive_commands, timeout=None)
        finally:
            alive_command_lock.release()

        # clean all timeout command instance
        for command_uuid, command_set in list(timeout_command.items()):
            timestamps = list(command_set.keys())
            for t in timestamps:
                command_info_lock_name = COMMAND_WAIT_TERMINAL_LOCK_TEMPLATE.format(
                    command_id=command_uuid, timestamp=t)
                command_info_name = COMMAND_WAIT_TERMINAL_TEMPLATE.format(
                    command_id=command_uuid, timestamp=t)
                command_info_lock = cache.lock(command_info_lock_name)
                try:
                    command_info_lock.acquire()
                    command_info = cache.get(command_info_name)
                    if not command_info is None:
                        timeout_command[command_uuid][t] = command_info

                    cache.delete(command_info_name)
                finally:
                    command_info_lock.release()

        return timeout_command
Example #7
0
    def run(self):
        log_metric('transmission.info', tags={'method': 'async_task.run', 'module': __name__})
        from .models import JobState
        if self.async_job.state not in (JobState.RUNNING, JobState.COMPLETE):
            LOG.debug(f'Job {self.async_job.id} running with status {self.async_job.state}')
            self.async_job.last_try = datetime.utcnow().replace(tzinfo=pytz.UTC)
            self.async_job.save()

            wallet_id = self.async_job.parameters['signing_wallet_id']
            wallet_lock = cache.lock(wallet_id, timeout=settings.WALLET_TIMEOUT)
            if wallet_lock.acquire(blocking=False):  # Only one concurrent tx per wallet
                # Generate, sign, and send tx via RPC
                try:
                    LOG.debug(f'Lock on {wallet_id} acquired, attempting to send transaction')
                    self.async_job.wallet_lock_token = wallet_lock.local.token.decode()
                    self._send_transaction(*self._sign_transaction(self._get_transaction()))
                except Exception as exc:
                    # If there was an exception, release the lock and re-raise
                    wallet_lock.release()
                    raise exc
                LOG.debug(f'Transaction submitted via AsyncJob {self.async_job.id}')
                log_metric('transmission.info', tags={'method': 'async_job_fire', 'module': __name__})
            else:  # Could not lock on wallet, transaction already in progress
                log_metric('transmission.error', tags={'method': 'async_task.run', 'module': __name__,
                                                       'code': 'wallet_in_use'})
                raise WalletInUseException(f'Wallet {wallet_id} is currently already in use.')
Example #8
0
def get_squarelet_access_token():
    """Get an access token for squarelet"""

    # if not in cache, lock, acquire token, put in cache
    access_token = cache.get("squarelet_access_token")
    if access_token is None:
        with cache.lock("squarelt_access_token"):
            access_token = cache.get("squarelet_access_token")
            if access_token is None:
                token_url = f"{settings.SQUARELET_URL}/openid/token"
                auth = (
                    settings.SOCIAL_AUTH_SQUARELET_KEY,
                    settings.SOCIAL_AUTH_SQUARELET_SECRET,
                )
                data = {"grant_type": "client_credentials"}
                headers = {
                    "X-Bypass-Rate-Limit": settings.BYPASS_RATE_LIMIT_SECRET
                }
                logger.info(token_url)
                resp = requests.post(token_url,
                                     data=data,
                                     auth=auth,
                                     headers=headers)
                resp.raise_for_status()
                resp_json = resp.json()
                access_token = resp_json["access_token"]
                # expire a few seconds early to ensure its not expired
                # when we try to use it
                expires_in = int(resp_json["expires_in"]) - 10
                cache.set("squarelet_access_token", access_token, expires_in)
    return access_token
Example #9
0
def viewcountupdate():
	""" This task is being executed hourly
	and pushed the cached view counters
	into the database using a single transaction
	"""

	# This is the prefix we are going to use to distinguish the cache keys
	# we need for the view counters
	PREFIX = settings.CACHE_PREFIX
	logger.warn('Entering viewcountupdate...')
	with redis_cache.lock('lock'):
		keys = redis_cache.keys(PREFIX + "*")
		if keys:
			try:
				with transaction.atomic():
					for key in keys:
						post_id = key.split('_')[1]
						hourly_viewcount = redis_cache.get(key)
						try:
							post = Posts.objects.get(id=post_id)
						except ObjectDoesNotExist:
							continue;
						old_viewcount = post.viewcount
						post.viewcount = post.viewcount + hourly_viewcount
						new_viewcount = post.viewcount
						logger.warn('Updated: id = {0}. Oldcount = {1} -> Newcount = {2} '.format(post_id, old_viewcount, new_viewcount))
						post.save(update_fields=['viewcount'])
			except IntegrityError:
				logger.warn('Rolling back...')
				transaction.rollback()
			redis_cache.delete_pattern(PREFIX + "*")
	logger.warn('Exiting viewcountupdate...')
Example #10
0
def requeue_tasks(*args, **kwargs):
    from cq.models import Task
    lock = 'RETRY_QUEUED_TASKS'
    with cache.lock(lock, timeout=2):
        # Find all Queued tasks and set them to Retry, since they get stuck after a reboot
        Task.objects.filter(status=Task.STATUS_QUEUED).update(
            status=Task.STATUS_RETRY)
Example #11
0
def update_bubble_on_cache(user_id, bubble_id, content):
    with cache.lock("bubblelock" + str(bubble_id)):
        editor = cache.get(key_blh(bubble_id))
        if editor is None or editor != str(user_id):
            return False
        cache.set(key_bcon(bubble_id), content, timeout=None)
        return True
 def handle(self, **kwargs):
     with cache.lock(self.cache_lock_key()):
         self.read_model(**kwargs)
         self.model.handle_event(self.event)
         self.post_handle()
         cache.set(self.model_cache_key(), self.model)
     return self.event
Example #13
0
    def test_lock(self):
        lock = cache.lock("foobar")
        lock.acquire(blocking=True)

        assert cache.has_key("foobar")
        lock.release()
        assert not cache.has_key("foobar")
Example #14
0
def create_tools_access_iam_role_task(user_id):
    with cache.lock(
            "create_tools_access_iam_role_task",
            blocking_timeout=0,
            timeout=360,
    ):
        _do_create_tools_access_iam_role(user_id)
Example #15
0
def get_article_by_id(request, article_id):

    article = Article.objects.filter(id=article_id).first()
    if article:

        comments = article.comment_set.all()
        comment_size = comments.count()
        reply_size = 0
        for comment in comments:
            reply_size += comment.replies.all().count()

        key = 'lock:%s:%s' % ('article', article_id)
        with cache.lock(key):
            article.viewTime += 1
            if article.viewTime > 100:
                article.recommend = True
            article.save(update_fields=['viewTime', 'recommend'])

    else:
        return None

    context = {
        'article': article,
        'total_num': comment_size + reply_size,
        'inclusion_tag': article.tags.all()
    }

    return render(request, 'blog/article/detail.html', context)
Example #16
0
    def run_pending(self):
        """
        Run all jobs that are scheduled to run.

        Please note that it is *intended behavior that run_pending()
        does not run missed jobs*. For example, if you've registered a job
        that should run every minute and you only call run_pending()
        in one hour increments then your job won't be run 60 times in
        between but only once.
        """
        with cache.lock("qa_test_platform_get"):
            runnable_jobs = (job for job in self.getJobs() if job.should_run)
            with self.mutex:
                for job in sorted(runnable_jobs):
                    now = datetime.datetime.now()
                    if job.last_run is None:
                        self._run_job(job)
                        job.last_run = datetime.datetime.now()
                        # print("上次执行时间为空,执行!")
                        continue
                    if (now - job.last_run).total_seconds() > 50:
                        job.last_run = datetime.datetime.now()
                        self._run_job(job)
                        # print("执行任务")
                    else:
                        # print("上次执行时间非常近,跳过")
                        pass
Example #17
0
def automate():
    lock = cache.lock(PURLOVIA_LOCK_NAME)

    acquired = lock.acquire(blocking=True, timeout=1)

    if acquired:
        try:
            # make sure config is loaded at runtime
            # pylint: disable=import-outside-toplevel
            from automate.run import run
            import ue.context

            ue.context.disable_metadata()

            config = get_global_config()
            logger.info("DEV mode enabled")
            config.git.UseIdentity = False
            config.git.SkipCommit = True
            config.git.SkipPush = True
            config.errors.SendNotifications = False
            config.dev.DevMode = True

            if DO_SIMPLE_RUN:
                config.run_sections = {"asb.species": True}

            run(config)
        finally:
            lock.release()
    else:
        raise AlreadyRunning()
Example #18
0
 def screenshot(
     task,
     url,
     width=settings.BASE_WEBPAGE_PREVIEW_WIDTH,
     height=settings.BASE_WEBPAGE_PREVIEW_HEIGHT,
     lifetime=settings.BASE_WEBPAGE_PREVIEW_LIFETIME,
 ):
     url_id = sha256()
     url_id.update(url.encode("utf-8"))
     url_id.update(bytes(width))
     url_id.update(bytes(height))
     key = url_id.hexdigest()
     logger.info(f"Screenshot for {url} @ {width}x{height}: {key}")
     if key in cache:
         logger.info(f"Found {key} in cache.")
         return key
     logger.info(f"Locking {key}")
     lock = cache.lock(key)
     lock.acquire()
     logger.info("Starting WebEngineScreenshot app")
     parent_conn, child_conn = Pipe()
     p = Process(target=WebpageTasks.worker, args=(url, width, height, child_conn))
     p.start()
     image = parent_conn.recv()
     p.join()
     if not image:
         logger.info("WebEngineScreenshot app returned nothing")
         return None
     logger.info("Writing WebEngineScreenshot app result to cache")
     cache.set(key, image, timeout=lifetime)
     logger.info("Removing WebEngineScreenshot app singleton")
     return key
Example #19
0
def load_bubble_to_cache(bubble, del_flag=False):
    with cache.lock("bubblelock" + str(bubble.id)):
        cache.set(key_blh(bubble.id),
                  str(bubble.edit_lock_holder.id),
                  timeout=None)
        cache.set(key_bcon(bubble.id), bubble.content, timeout=None)
        cache.set(key_bdel(bubble.id), str(del_flag), timeout=None)
Example #20
0
def notify_players_round_start(round_, **kwargs):
    im_msg = 'You have been paired for Round {round} in {season}.\n' \
           + '<@{white}> (_white pieces_, {white_tz}) vs <@{black}> (_black pieces_, {black_tz})\n' \
           + 'Send a direct message to your opponent, <@{opponent}>, within {contact_period}.\n' \
           + 'When you have agreed on a time, post it in {scheduling_channel_link}.'

    mp_msg = 'You have been paired for Round {round} in {season}.\n' \
           + '<@{white}> (_white pieces_, {white_tz}) vs <@{black}> (_black pieces_, {black_tz})\n' \
           + 'Message your opponent here within {contact_period}.\n' \
           + 'When you have agreed on a time, post it in {scheduling_channel_link}.'

    li_subject = 'Round {round} - {league}'
    li_msg = 'You have been paired for Round {round} in {season}.\n' \
           + '@{white} (white pieces, {white_tz}) vs @{black} (black pieces, {black_tz})\n' \
           + 'Message your opponent on Slack within {contact_period}.\n' \
           + '{slack_url}\n' \
           + 'When you have agreed on a time, post it in {scheduling_channel}.'

    season = round_.season
    league = season.league
    if not league.enable_notifications:
        return
    if not round_.publish_pairings or round_.is_completed:
        logger.error('Could not send round start notifications due to incorrect round state: %s' % round_)
        return
    unavailable_players = {pa.player for pa in PlayerAvailability.objects.filter(round=round_, is_available=False) \
                                                      .select_related('player').nocache()}

    with cache.lock('round_start'):
        for pairing in round_.pairings.select_related('white', 'black'):
            if season.alternates_manager_enabled() and (pairing.white in unavailable_players or pairing.black in unavailable_players):
                # Don't send a notification, since the alternates manager will handle it
                continue
            send_pairing_notification('round_started', pairing, im_msg, mp_msg, li_subject, li_msg)
            time.sleep(1)
Example #21
0
    def generate_batches(self, worker_count, batch_by):
        batches = []
        with cache.lock(self.start_lock_key, timeout=10):
            start_datetime = self.get_next_batch_start()
            if not start_datetime:
                if AuditcareMigrationMeta.objects.count() != 0:
                    raise MissingStartTimeError()
                # For first run set the start_datetime to the event_time of the first record
                # in the SQL. If there are no records in SQL, start_time would be set as
                # current time
                start_datetime = get_sql_start_date()
                if not start_datetime:
                    start_datetime = datetime.now()

            if start_datetime < CUTOFF_TIME:
                logger.info("Migration Successfull")
                return

            start_time = start_datetime
            end_time = None

            for index in range(worker_count):
                end_time = _get_end_time(start_time, batch_by)
                if end_time < CUTOFF_TIME:
                    break
                batches.append([start_time, end_time])
                start_time = end_time
            self.set_next_batch_start(end_time)

        return batches
Example #22
0
 def initialize_in_judge(self):
     lock = cache.lock("testcase_{}_{}_{}_initialize_in_judge".format(
         self.problem.problem.pk, self.problem.pk, self.pk), timeout=60)
     if lock.acquire(blocking=False):
         try:
             refreshed_obj = type(self).objects.with_transaction(self._transaction).get(pk=self.pk)
             if refreshed_obj.judge_initialization_successful:
                 return
             if self.judge_initialization_task_id:
                 result = AsyncResult(self.judge_initialization_task_id)
                 if result.failed() or result.successful():
                     self.judge_initialization_task_id = None
                     self.save()
                 elif result.state == "PENDING":
                     result.revoke()
                     self.judge_initialization_task_id = None
                     self.save()
                 else:
                     logger.debug("Waiting for task {} in state {}".format(
                         self.judge_initialization_task_id, result.state
                     ))
             if not self.judge_initialization_task_id:
                 self.judge_initialization_task_id = TestCaseJudgeInitialization().delay(self).id
                 self.save()
         finally:
             lock.release()
Example #23
0
def restart_room(room_id):
    with cache.lock('lock:room:' + room_id):
        room = cache.get('room:' + room_id)
        if room is None:
            return
        room = reset_room_data(room)
        cache.set('room:' + room_id, room)
Example #24
0
def try_regenerate_cache(
    key: str,
    old_key: str,
    generator: Callable,
    timeout: int,
    version=None,
) -> Any:
    with cache.lock(f"lock.cachegenerate.{key}",
                    timeout=CACHE_LOCK_TIMEOUT,
                    blocking_timeout=None):
        generated = generator()
        if generated is None:
            # TODO: Use some empty object instead which can be used to
            #       recognize None was cached
            warnings.warn(
                "Attempted to set 'None' to cache, replacing with empty string",
            )
            generated = ""
        cache.set(key, generated, timeout=timeout, version=version)
        cache.set(
            old_key,
            generated,
            timeout=None,
            version=version,
        )
        return generated
def process_block_queue():
    """
    Process block queue

    - this is for primary validators only
    """
    with cache.lock(BLOCK_QUEUE_CACHE_LOCK_KEY):
        block_queue = cache.get(BLOCK_QUEUE)

        if block_queue:
            cache.set(BLOCK_QUEUE, [], None)

    for block in block_queue:
        is_valid, sender_account_balance = is_block_valid(block=block)

        if not is_valid:
            continue

        existing_accounts, new_accounts = get_updated_accounts(
            sender_account_balance=sender_account_balance,
            validated_block=block)
        update_accounts_cache(existing_accounts=existing_accounts,
                              new_accounts=new_accounts)
        update_accounts_table(existing_accounts=existing_accounts,
                              new_accounts=new_accounts)
        confirmation_block, head_block_hash = sign_block_to_confirm_and_update_head_block_hash(
            block=block,
            existing_accounts=existing_accounts,
            new_accounts=new_accounts)
        add_valid_confirmation_block(confirmation_block=confirmation_block)
        send_confirmation_block_to_all_confirmation_validators.delay(
            confirmation_block=confirmation_block)
 def run(self, routine_id: int):
     lock_key = f"lock-{self.__class__.__name__}-{routine_id}"
     routine_lock = cache.lock(key=lock_key,
                               timeout=self.LOCK_EXPIRATION,
                               blocking_timeout=self.WAIT_FOR_LOCK)
     with routine_lock:
         self._run(routine_id=routine_id)
Example #27
0
def message_post_save(sender, instance, **kwargs):
    LOG.debug(f'Message post save with message {instance.id}.')
    log_metric('transmission.info',
               tags={
                   'method': 'jobs.message_post_save',
                   'module': __name__
               })
    try:
        wallet_lock = cache.lock(
            instance.async_job.parameters['signing_wallet_id'])
        wallet_lock.local.token = instance.async_job.wallet_lock_token
        wallet_lock.release()
    except LockError:
        LOG.warning(
            f'Wallet {instance.async_job.parameters["signing_wallet_id"]} was not locked when '
            f'job {instance.async_job.id} received message {instance.id}')
    if instance.type == MessageType.ERROR:
        # Generic error handling
        LOG.error(
            f"Transaction failure for AsyncJob {instance.async_job.id}: {instance.body}"
        )
        instance.async_job.state = JobState.FAILED
        instance.async_job.save()

    # Update has been received, send signal to listener
    LOG.debug(
        f'Update has been received, and signal sent to listener {instance.id}.'
    )
    job_update.send(sender=Shipment,
                    message=instance,
                    shipment=instance.async_job.shipment)
Example #28
0
def room_reset_consumer(message):
    data = message.content
    room_id = data['room_id']

    room_cache_key = 'room:' + room_id
    with cache.lock('lock:' + room_cache_key):
        room_cache = cache.get(room_cache_key)

        if room_cache is None:
            return

        if len(room_cache['players']) == 0:
            return

        new_room_data = reset_room_data(room_cache)

        cache.set(room_cache_key, new_room_data)

        event_data = {
            'room_id':
            room_id,
            'players': [{
                'username': p['username'],
                'ready': p['ready']
            } for p in new_room_data['players']],
        }

        Group(room_id).send(event('room-reset', event_data))
Example #29
0
def cache_based_lock(lock_name, timeout=3600, blocking_timeout=None):
    if hasattr(cache, 'lock'):
        with cache.lock(lock_name,
                        timeout=timeout,
                        blocking_timeout=blocking_timeout):
            yield None
    else:
        # cache does not support locking, we use a poor mans solution
        is_free = cache.add(lock_name, 1, timeout)
        start_time = time()
        if not is_free:
            logger.warning(
                "Task is locked. Waiting for lock release of '{}'".format(
                    lock_name))
        while not is_free:
            if blocking_timeout and time() - start_time > blocking_timeout:
                raise LockError('Blocking timeout')
            sleep(0.5)
            is_free = cache.add(lock_name, 1, timeout)
        logger.info("Task is free to run")
        lock_time = time()
        try:
            yield None
        finally:
            if time() < lock_time + timeout:
                # the lock has not yet timed out, so it should be ours
                cache.delete(lock_name)
Example #30
0
    def set_dns_record(self, attempts=4, retry_delay=1, **record):
        """
        Set a DNS record - Automatically create a new version, update with the change & activate
        """
        if 'ttl' not in record.keys():
            record['ttl'] = 1200

        with cache.lock(
                'gandi_set_dns_record'):  # Only do one DNS update at a time
            for i in range(1, attempts + 1):
                try:
                    logger.info(
                        'Setting DNS record: %s (attempt %d out of %d)',
                        record, i, attempts)
                    new_zone_version = self.create_new_zone_version()
                    self.delete_dns_record(new_zone_version, record['name'])
                    returned_record = self.add_dns_record(
                        new_zone_version, record)
                    self.set_zone_version(new_zone_version)
                    break
                except xmlrpc.client.Fault:
                    if i == attempts:
                        raise
                    time.sleep(retry_delay)
                    retry_delay *= 2
        return returned_record
Example #31
0
 def _dns_operation(self,
                    callback,
                    domain,
                    log_msg,
                    attempts=4,
                    retry_delay=1):
     """
     Encapsulate logic that is common to high-level DNS operations: grab the global lock, get the
     zone_id for a domain, create a new zone version, activate the zone version after successful
     update, and retry the whole procedure multiple times if necessary.
     """
     with cache.lock(
             'gandi_set_dns_record'):  # Only do one DNS update at a time
         for i in range(1, attempts + 1):
             try:
                 logger.info('%s (attempt %d out of %d)', log_msg, i,
                             attempts)
                 zone_id = self.get_zone_id(domain)
                 new_zone_version = self.create_new_zone_version(zone_id)
                 result = callback(zone_id, new_zone_version)
                 self.set_zone_version(zone_id, new_zone_version)
                 break
             except (xmlrpc.client.Fault, TimeoutError):
                 if i == attempts:
                     raise
                 time.sleep(retry_delay)
                 retry_delay *= 2
     return result
Example #32
0
 def _initialize_in_judge(self):
     if self.judge_initialization_successful:  # Optimization
         return
     lock = cache.lock("problem_{}_{}_initialize_in_judge".format(
         self.problem.pk, self.pk),
                       timeout=60)
     if lock.acquire(blocking=False):
         try:
             refreshed_obj = type(self).objects.with_transaction(
                 self._transaction).get(pk=self.pk)
             if refreshed_obj.judge_initialization_successful:
                 return
             self.judge_initialization_successful, self.judge_initialization_message = \
                 self.get_task_type().initialize_problem(
                     problem_code=self._get_judge_code(),
                     code_name=self.problem_data.name,
                     time_limit=self.problem_data.time_limit,
                     memory_limit=self.problem_data.memory_limit,
                     task_type_parameters=self.problem_data.task_type_parameters,
                     helpers=[
                         (grader.name, grader.code) for grader in self.grader_set.all()
                     ],
                 )
             self.judge_initialization_task_id = None
             self.save()
         finally:
             lock.release()
     else:
         raise Retry()
Example #33
0
 def inner(*args, **kwargs):
     key = func.__qualname__
     if key_maker:
         key += key_maker(*args, **kwargs)
     else:
         key += str(args) + str(kwargs)
     lock_key = hashlib.md5(force_bytes(key)).hexdigest()
     with cache.lock(lock_key):
         return func(*args, **kwargs)
Example #34
0
    def set_dns_record(self, **record):
        """
        Set a DNS record - Automatically create a new version, update with the change & activate
        """
        if 'ttl' not in record.keys():
            record['ttl'] = 1200

        with cache.lock('gandi_set_dns_record'): # Only do one DNS update at a time
            logger.info('Setting DNS record: %s', record)
            new_zone_version = self.create_new_zone_version()
            self.delete_dns_record(new_zone_version, record['name'])
            returned_record = self.add_dns_record(new_zone_version, record)
            self.set_zone_version(new_zone_version)
        return returned_record
Example #35
0
def increasecnt(post_id):
	""" This is being called on each article request
	and asynchronously increases a view counter by 1 
	"""
	# A lock is being used to make this "process" thread-safe
	PREFIX = settings.CACHE_PREFIX
	endkey = prefixing(PREFIX, post_id)
	with redis_cache.lock('lock'):
		if redis_cache.get(endkey):
			redis_cache.incr(endkey)
		else:
			# Timeout is set to infinite (None)
			redis_cache.set(endkey, 1, timeout=5400)
		logger.warn("New View Count For id = {0} is {1} with TTL = {2}".format(post_id, redis_cache.get(endkey), redis_cache.ttl(endkey)))
Example #36
0
 def resolve_host(self, host):
     if self.search_ec2_public_dns:
         # We don't want all celery workers to go crazy and update the cache
         # at the same time, so we use a distributed lock here
         with cache.lock('japper:consul:search_ec2_public_dns',
                 expire=settings.EC2_DNS_LOCK_EXPIRE):
             # Look in cache
             host_cache_key = self.ec2_cache_key(host)
             resolved_host = cache.get(host_cache_key)
             # Update cache if host is not in it
             if resolved_host is None:
                 self.update_ec2_names_cache()
             # Search again in cache
             resolved_host = cache.get(host_cache_key)
             if resolved_host is not None:
                 return resolved_host
     return host
Example #37
0
def get_chrome_resource():
    """
    Return chrome resource lock and user data path
    If no resource just return a temp path
    """
    for idx, lock_name in enumerate(settings.CHROME_RESOURCE_LOCKS):
        lock = cache.lock(lock_name, expire=settings.CHROME_RESOURCE_LOCK_TIME)
        if lock.acquire(blocking=False):
            return lock, settings.CHROME_USER_DATA_DIR_POOL[idx]

    while True:
        tmp_name = "chrome_user_data_" + str(random.randint(0, 999999)) + str(int(time.time()))
        tmp_path = os.path.join("/tmp/", tmp_name)
        if os.path.exists(tmp_path):
            continue

        os.mkdir(tmp_path)
        return None, tmp_path
Example #38
0
    def run_playbook(self, ansible_vars):
        """
        Run the playbook to perform the server reconfiguration.

        This is factored out into a separate method so it can be mocked out in the tests.
        """
        playbook_path = pathlib.Path(settings.SITE_ROOT) / "playbooks/load_balancer_conf/load_balancer_conf.yml"
        with cache.lock("load_balancer_reconfigure:{}".format(self.domain), timeout=900):
            returncode = ansible.capture_playbook_output(
                requirements_path=str(playbook_path.parent / "requirements.txt"),
                inventory_str=self.domain,
                vars_str=ansible_vars,
                playbook_path=str(playbook_path),
                username=self.ssh_username,
                logger_=self.logger,
            )
        if returncode != 0:
            self.logger.error("Playbook to reconfigure load-balancing server %s failed.", self)
            raise ReconfigurationFailed
Example #39
0
 def _dns_operation(self, callback, domain, log_msg, attempts=4, retry_delay=1):
     """
     Encapsulate logic that is common to high-level DNS operations: grab the global lock, get the
     zone_id for a domain, create a new zone version, activate the zone version after successful
     update, and retry the whole procedure multiple times if necessary.
     """
     with cache.lock('gandi_set_dns_record'): # Only do one DNS update at a time
         for i in range(1, attempts + 1):
             try:
                 logger.info('%s (attempt %d out of %d)', log_msg, i, attempts)
                 zone_id = self.get_zone_id(domain)
                 new_zone_version = self.create_new_zone_version(zone_id)
                 result = callback(zone_id, new_zone_version)
                 self.set_zone_version(zone_id, new_zone_version)
                 break
             except xmlrpc.client.Fault:
                 if i == attempts:
                     raise
                 time.sleep(retry_delay)
                 retry_delay *= 2
     return result
Example #40
0
    def set_dns_record(self, attempts=4, retry_delay=1, **record):
        """
        Set a DNS record - Automatically create a new version, update with the change & activate
        """
        if 'ttl' not in record.keys():
            record['ttl'] = 1200

        with cache.lock('gandi_set_dns_record'): # Only do one DNS update at a time
            for i in range(1, attempts + 1):
                try:
                    logger.info('Setting DNS record: %s (attempt %d out of %d)', record, i, attempts)
                    new_zone_version = self.create_new_zone_version()
                    self.delete_dns_record(new_zone_version, record['name'])
                    returned_record = self.add_dns_record(new_zone_version, record)
                    self.set_zone_version(new_zone_version)
                    break
                except xmlrpc.client.Fault:
                    if i == attempts:
                        raise
                    time.sleep(retry_delay)
                    retry_delay *= 2
        return returned_record
Example #41
0
def search_public_dns(private_dns, aws_region, aws_access_key_id,
                      aws_secret_access_key):
    '''
    Retrieve the public DNS name of an EC2 instance from its private DNS name.

    Return the public DNS name, or None if it can't be found for these AWS
    credentials.
    '''
    # Multiple threads/processes might call this function at the same time, we
    # use a distributed lock here to avoir querying the ec2 API more than once
    with cache.lock('japper:search_ec2_public_dns',
                    expire=settings.EC2_DNS_LOCK_EXPIRE):
        # Look in cache
        host_cache_key = _get_cache_key(aws_region, private_dns)
        resolved_host = cache.get(host_cache_key)
        # Update cache if host is not in it
        if resolved_host is None:
            _update_ec2_names_cache(aws_region, aws_access_key_id,
                                    aws_secret_access_key)
        # Search again in cache
        resolved_host = cache.get(host_cache_key)
        if resolved_host is not None:
            return resolved_host
def test_django_works(redis_server):
    with cache.lock('whateva'):
        pass