def test_lock_timeout(): with _run_lock_holding_process('test-lock-lock3', sleep=2): # Waiting for 3 seconds allows us to attain the lock from the parent # process. lock = utils.lock(settings.TMP_PATH, 'test-lock-lock3', timeout=3) with lock as lock_attained: assert lock_attained with _run_lock_holding_process('test-lock-lock3', sleep=2): # Waiting only 1 second fails to acquire the lock lock = utils.lock(settings.TMP_PATH, 'test-lock-lock3', timeout=1) with lock as lock_attained: assert not lock_attained
def test_prevent_multiple_runs_in_parallel(self): # Create a lock manually, the command should exit immediately without # doing anything. with lock(settings.TMP_PATH, auto_approve.LOCK_NAME): call_command('auto_approve') assert self.log_final_summary_mock.call_count == 0 assert self.file.reload().status == amo.STATUS_AWAITING_REVIEW
def handle(self, *args, **kwargs): """Command entry point.""" self.dry_run = kwargs.get('dry_run', False) now = datetime.now() # Get a lock before doing anything, we don't want to have multiple # instances of the command running in parallel. with lock(settings.TMP_PATH, LOCK_NAME) as lock_attained: if not lock_attained: log.error('auto-reject lock present, aborting') return addons = self.fetch_addon_candidates(now=now) for addon in addons: self.process_addon(addon=addon, now=now)
def extract(self): """ Will make all the directories and extract the files. Raises error on nasty files. :returns: `True` if successfully extracted, `False` in case of an existing lock. """ lock_name = f'file-viewer-{self.file.pk}' with lock(settings.TMP_PATH, lock_name, timeout=2) as lock_attained: if lock_attained: if self.is_extracted(): # Be vigilent with existing files. It's better to delete # and re-extract than to trust whatever we have # lying around. task_log.warning( 'cleaning up %s as there were files lying around' % self.dest) self.cleanup() try: os.makedirs(self.dest) except OSError as err: task_log.error( 'Error (%s) creating directories %s' % (err, self.dest)) raise if self.is_search_engine() and self.src.endswith('.xml'): shutil.copyfileobj( storage.open(self.src, 'rb'), open( os.path.join(self.dest, self.file.filename), 'wb')) else: try: extracted_files = extract_xpi(self.src, self.dest) self._verify_files(extracted_files) except Exception as err: task_log.error( 'Error (%s) extracting %s' % (err, self.src)) raise return lock_attained
def handle(self, *args, **options): if not waffle.switch_is_active(SWITCH_NAME): log.info( 'Not running git_extraction command because switch "{}" is ' 'not active.'.format(SWITCH_NAME)) return # Get a lock before doing anything, we don't want to have multiple # instances of the command running in parallel. with lock(settings.TMP_PATH, LOCK_NAME) as lock_attained: if not lock_attained: # We didn't get the lock... log.error('{} lock present, aborting.'.format(LOCK_NAME)) return # If an add-on ID is present more than once, the `extract_addon()` # method will skip all but the first occurrence because the add-on # will be locked for git extraction. entries = GitExtractionEntry.objects.order_by('created').all() for entry in entries: self.extract_addon(entry)
def handle(self, *args, **options): """Command entry point.""" self.dry_run = options.get('dry_run', False) self.successful_verdict = (amo.WOULD_HAVE_BEEN_AUTO_APPROVED if self.dry_run else amo.AUTO_APPROVED) self.stats = Counter() # Get a lock before doing anything, we don't want to have multiple # instances of the command running in parallel. with lock(settings.TMP_PATH, LOCK_NAME) as lock_attained: if lock_attained: qs = self.fetch_candidates() self.stats['total'] = len(qs) for version in qs: self.process(version) self.log_final_summary(self.stats) else: # We didn't get the lock... log.error('auto-approve lock present, aborting.')
def _other_process_holding_lock(): with utils.lock(settings.TMP_PATH, lock_name) as lock_attained: assert lock_attained time.sleep(sleep)
def test_lock_with_lock_attained(): with utils.lock(settings.TMP_PATH, 'test-lock-lock2') as lock_attained: assert lock_attained