def process(self, version): """Process a single version, figuring out if it should be auto-approved and calling the approval code if necessary.""" already_locked = AutoApprovalSummary.check_is_locked(version) if not already_locked: # Lock the addon for ourselves if possible. Even though # AutoApprovalSummary.create_summary_for_version() will do # call check_is_locked() again later when calculating the verdict, # we have to do it now to prevent overwriting an existing lock with # our own. set_reviewing_cache(version.addon.pk, settings.TASK_USER_ID) try: log.info('Processing %s version %s...', unicode(version.addon.name), unicode(version.version)) summary, info = AutoApprovalSummary.create_summary_for_version( version, dry_run=self.dry_run) log.info('Auto Approval for %s version %s: %s', unicode(version.addon.name), unicode(version.version), summary.get_verdict_display()) self.stats.update({k: int(v) for k, v in info.items()}) if summary.verdict == self.successful_verdict: self.stats['auto_approved'] += 1 if summary.verdict == amo.AUTO_APPROVED: self.approve(version) except (AutoApprovalNotEnoughFilesError, AutoApprovalNoValidationResultError): log.info( 'Version %s was skipped either because it had no ' 'file or because it had no validation attached.', version) self.stats['error'] += 1 finally: # Always clear our own lock no matter what happens (but only ours). if not already_locked: clear_reviewing_cache(version.addon.pk)
def process_addon(self, *, addon, now): latest_version = addon.find_latest_version( channel=amo.RELEASE_CHANNEL_LISTED) if (latest_version and latest_version.is_unreviewed and not latest_version.pending_rejection): # If latest version is unreviewed and not pending # rejection, we want to put the delayed rejection of all # versions of this addon on hold until a decision has been # made by reviewers on the latest one. log.info( 'Skipping rejections for add-on %s until version %s ' 'has been reviewed', addon.pk, latest_version.pk) return versions = self.fetch_version_candidates_for_addon(addon=addon, now=now) if not versions.exists(): log.info('Somehow no versions to auto-reject for add-on %s', addon.pk) return locked_by = get_reviewing_cache(addon.pk) if locked_by: # Don't auto-reject something that has been locked, even by the # task user - wait until it's free to avoid any conflicts. log.info( 'Skipping rejections for add-on %s until lock from user %s ' 'has expired', addon.pk, locked_by) return set_reviewing_cache(addon.pk, settings.TASK_USER_ID) try: self.reject_versions(addon=addon, versions=versions, latest_version=latest_version) finally: # Always clear our lock no matter what happens. clear_reviewing_cache(addon.pk)
def process(self, version): """Process a single version, figuring out if it should be auto-approved and calling the approval code if necessary.""" already_locked = AutoApprovalSummary.check_is_locked(version) if not already_locked: # Lock the addon for ourselves if possible. Even though # AutoApprovalSummary.create_summary_for_version() will do # call check_is_locked() again later when calculating the verdict, # we have to do it now to prevent overwriting an existing lock with # our own. set_reviewing_cache(version.addon.pk, settings.TASK_USER_ID) try: with transaction.atomic(): log.info('Processing %s version %s...', six.text_type(version.addon.name), six.text_type(version.version)) summary, info = AutoApprovalSummary.create_summary_for_version( version, dry_run=self.dry_run) self.stats.update({k: int(v) for k, v in info.items()}) if summary.verdict == self.successful_verdict: if summary.verdict == amo.AUTO_APPROVED: self.approve(version) self.stats['auto_approved'] += 1 verdict_string = summary.get_verdict_display() else: verdict_string = '%s (%s)' % ( summary.get_verdict_display(), ', '.join( summary.verdict_info_prettifier(info))) log.info('Auto Approval for %s version %s: %s', six.text_type(version.addon.name), six.text_type(version.version), verdict_string) # At this point, any exception should have rolled back the transaction, # so even if we did create/update an AutoApprovalSummary instance that # should have been rolled back. This ensures that, for instance, a # signing error doesn't leave the version and its autoapprovalsummary # in conflicting states. except (AutoApprovalNotEnoughFilesError, AutoApprovalNoValidationResultError): log.info( 'Version %s was skipped either because it had no ' 'files or because it had no validation attached.', version) self.stats['error'] += 1 except SigningError: log.info('Version %s was skipped because of a signing error', version) self.stats['error'] += 1 finally: # Always clear our own lock no matter what happens (but only ours). if not already_locked: clear_reviewing_cache(version.addon.pk)
def process(self, version): """Process a single version, figuring out if it should be auto-approved and calling the approval code if necessary.""" already_locked = AutoApprovalSummary.check_is_locked(version) if not already_locked: # Lock the addon for ourselves if possible. Even though # AutoApprovalSummary.create_summary_for_version() will do # call check_is_locked() again later when calculating the verdict, # we have to do it now to prevent overwriting an existing lock with # our own. set_reviewing_cache(version.addon.pk, settings.TASK_USER_ID) try: with transaction.atomic(): log.info('Processing %s version %s...', six.text_type(version.addon.name), six.text_type(version.version)) summary, info = AutoApprovalSummary.create_summary_for_version( version, dry_run=self.dry_run) log.info('Auto Approval for %s version %s: %s', six.text_type(version.addon.name), six.text_type(version.version), summary.get_verdict_display()) self.stats.update({k: int(v) for k, v in info.items()}) if summary.verdict == self.successful_verdict: if summary.verdict == amo.AUTO_APPROVED: self.approve(version) self.stats['auto_approved'] += 1 # At this point, any exception should have rolled back the transaction, # so even if we did create/update an AutoApprovalSummary instance that # should have been rolled back. This ensures that, for instance, a # signing error doesn't leave the version and its autoapprovalsummary # in conflicting states. except (AutoApprovalNotEnoughFilesError, AutoApprovalNoValidationResultError): log.info( 'Version %s was skipped either because it had no ' 'files or because it had no validation attached.', version) self.stats['error'] += 1 except SigningError: log.info( 'Version %s was skipped because of a signing error', version) self.stats['error'] += 1 finally: # Always clear our own lock no matter what happens (but only ours). if not already_locked: clear_reviewing_cache(version.addon.pk)
def review_viewing(request): if 'addon_id' not in request.POST: return {} addon_id = request.POST['addon_id'] user_id = request.user.id current_name = '' is_user = 0 key = get_reviewing_cache_key(addon_id) user_key = '%s:review_viewing_user:%s' % (settings.CACHE_PREFIX, user_id) interval = amo.REVIEWER_VIEWING_INTERVAL # Check who is viewing. currently_viewing = get_reviewing_cache(addon_id) # If nobody is viewing or current user is, set current user as viewing if not currently_viewing or currently_viewing == user_id: # Get a list of all the reviews this user is locked on. review_locks = cache.get_many(cache.get(user_key, {})) can_lock_more_reviews = ( len(review_locks) < amo.REVIEWER_REVIEW_LOCK_LIMIT or acl.action_allowed(request, amo.permissions.REVIEWER_ADMIN_TOOLS_VIEW)) if can_lock_more_reviews or currently_viewing == user_id: set_reviewing_cache(addon_id, user_id) # Give it double expiry just to be safe. cache.set(user_key, set(review_locks) | {key}, interval * 4) currently_viewing = user_id current_name = request.user.name is_user = 1 else: currently_viewing = settings.TASK_USER_ID current_name = ugettext('Review lock limit reached') is_user = 2 else: current_name = UserProfile.objects.get(pk=currently_viewing).name return { 'current': currently_viewing, 'current_name': current_name, 'is_user': is_user, 'interval_seconds': interval }
def review_viewing(request): if 'addon_id' not in request.POST: return {} addon_id = request.POST['addon_id'] user_id = request.user.id current_name = '' is_user = 0 key = get_reviewing_cache_key(addon_id) user_key = '%s:review_viewing_user:%s' % (settings.CACHE_PREFIX, user_id) interval = amo.REVIEWER_VIEWING_INTERVAL # Check who is viewing. currently_viewing = get_reviewing_cache(addon_id) # If nobody is viewing or current user is, set current user as viewing if not currently_viewing or currently_viewing == user_id: # Get a list of all the reviews this user is locked on. review_locks = cache.get_many(cache.get(user_key, {})) can_lock_more_reviews = ( len(review_locks) < amo.REVIEWER_REVIEW_LOCK_LIMIT or acl.action_allowed(request, amo.permissions.REVIEWS_ADMIN)) if can_lock_more_reviews or currently_viewing == user_id: set_reviewing_cache(addon_id, user_id) # Give it double expiry just to be safe. cache.set(user_key, set(review_locks) | {key}, interval * 4) currently_viewing = user_id current_name = request.user.name is_user = 1 else: currently_viewing = settings.TASK_USER_ID current_name = ugettext('Review lock limit reached') is_user = 2 else: current_name = UserProfile.objects.get(pk=currently_viewing).name return {'current': currently_viewing, 'current_name': current_name, 'is_user': is_user, 'interval_seconds': interval}
def process(self, version): """Process a single version, figuring out if it should be auto-approved and calling the approval code if necessary.""" already_locked = AutoApprovalSummary.check_is_locked(version) if not already_locked: # Lock the addon for ourselves if possible. Even though # AutoApprovalSummary.create_summary_for_version() will do # call check_is_locked() again later when calculating the verdict, # we have to do it now to prevent overwriting an existing lock with # our own. set_reviewing_cache(version.addon.pk, settings.TASK_USER_ID) # Discard any existing celery tasks that may have been queued before: # If there are any left at this point, it means the transaction from # the previous loop iteration was not committed and we shouldn't # trigger the corresponding tasks. _discard_tasks() # Queue celery tasks for this version, avoiding triggering them too # soon... _start_queuing_tasks() try: with transaction.atomic(): # ...and release the queued tasks to celery once transaction # is committed. transaction.on_commit(_send_tasks_and_stop_queuing) log.info( 'Processing %s version %s...', str(version.addon.name), str(version.version), ) if waffle.switch_is_active('run-action-in-auto-approve'): # We want to execute `run_action()` only once. summary_exists = AutoApprovalSummary.objects.filter( version=version).exists() if summary_exists: log.info('Not running run_action() because it has ' 'already been executed') else: ScannerResult.run_action(version) summary, info = AutoApprovalSummary.create_summary_for_version( version, dry_run=self.dry_run) self.stats.update({k: int(v) for k, v in info.items()}) if summary.verdict == self.successful_verdict: if summary.verdict == amo.AUTO_APPROVED: self.approve(version) self.stats['auto_approved'] += 1 verdict_string = summary.get_verdict_display() else: verdict_string = '%s (%s)' % ( summary.get_verdict_display(), ', '.join(summary.verdict_info_prettifier(info)), ) log.info( 'Auto Approval for %s version %s: %s', str(version.addon.name), str(version.version), verdict_string, ) # At this point, any exception should have rolled back the transaction, # so even if we did create/update an AutoApprovalSummary instance that # should have been rolled back. This ensures that, for instance, a # signing error doesn't leave the version and its autoapprovalsummary # in conflicting states. except (AutoApprovalNotEnoughFilesError, AutoApprovalNoValidationResultError): log.info( 'Version %s was skipped either because it had no ' 'files or because it had no validation attached.', version, ) self.stats['error'] += 1 except SigningError: statsd.incr('reviewers.auto_approve.approve.failure') log.info('Version %s was skipped because of a signing error', version) self.stats['error'] += 1 finally: # Always clear our own lock no matter what happens (but only ours). if not already_locked: clear_reviewing_cache(version.addon.pk) # Stop post request task queue before moving on (useful in tests to # leave a fresh state for the next test. Note that we don't want to # send or clear queued tasks (they may belong to a transaction that # has been rolled back, or they may not have been processed by the # on commit handler yet). _stop_queuing_tasks()