def __enter__(self): if self.options.storage_path > 0 and not self.options.storage_path.endswith('/'): self.options.storage_path = self.options.storage_path + '/' if self.options.splay > 0: splay = randint(0, self.options.splay) self.logger.debug('Sleeping for %d seconds (splay=%d)' % (splay, self.options.splay)) time.sleep(splay) self.start_time = DT.datetime.today() if not self.options.nolock: self.logger.debug('Attempting to acquire lock %s (timeout %s)', self.options.lockfile, self.options.locktimeout) self.lock = FileLock(self.options.lockfile) try: self.lock.acquire(timeout=self.options.locktimeout) except LockFailed as e: self.logger.error("Lock could not be acquired.") self.logger.error(str(e)) sys.exit(1) except LockTimeout as e: msg = "Lock could not be acquired. Timeout exceeded." self.logger.error(msg) sys.exit(1)
def atomic_write(content, filename, overwrite=True, mode=0600): """Write `content` into the file `filename` in an atomic fashion. This requires write permissions to the directory that `filename` is in. It creates a temporary file in the same directory (so that it will be on the same filesystem as the destination) and then renames it to replace the original, if any. Such a rename is atomic in POSIX. :param overwrite: Overwrite `filename` if it already exists? Default is True. :param mode: Access permissions for the file, if written. """ temp_file = _write_temp_file(content, filename) os.chmod(temp_file, mode) try: if overwrite: os.rename(temp_file, filename) else: lock = FileLock(filename) lock.acquire() try: if not os.path.isfile(filename): os.rename(temp_file, filename) finally: lock.release() finally: if os.path.isfile(temp_file): os.remove(temp_file)
def start_up(): """Start up this MAAS server. This is used to: - make sure the singletons required by the application are created - sync the configuration of the external systems driven by MAAS This method is called when the MAAS application starts up. In production, it's called from the WSGI script so this shouldn't block at any costs. It should simply call very simple methods or Celery tasks. The method will be executed multiple times if multiple processes are used but this method uses file-based locking to ensure that the methods it calls internally are not ran concurrently. """ lock = FileLock(LOCK_FILE_NAME) # In case this process gets shut down, clean up the lock. atexit.register(lock.break_lock) lock.acquire(timeout=LOCK_TIMEOUT) try: inner_start_up() finally: lock.release() post_start_up()
def write_hostsfile(hostmapping, path='/etc/hosts'): with FileLock("/tmp/docker-update-hosts.lock"): f = open(path, 'r') fn = open(path + "-docker-update-hosts", 'w') r = f.readline() in_chunk = False chunk_found = False while r != "": if not in_chunk: fn.write(r) if re.match("\s*#DOCKER_UPDATE_HOSTS_START", r): in_chunk = True fn.write(format_for_hostsfile(hostmapping)) chunk_found = True if in_chunk: if re.match("\s*#DOCKER_UPDATE_HOSTS_END", r): in_chunk = False fn.write(r) r = f.readline() f.close() fn.close() if not chunk_found: print( "not updating hostsfile, i did not find the two necessary commpents #DOCKER_UPDATE_HOSTS_START and #DOCKER_UPDATE_HOSTS_END" ) else: import shutil shutil.move(path + "-docker-update-hosts", path) print("updated")
def __init__(self, expt_dir, variables=None, grid_size=None, grid_seed=1): self.expt_dir = expt_dir self.jobs_pkl = os.path.join(expt_dir, 'expt-grid.pkl') self.locker = FileLock(self.jobs_pkl) # Only one process at a time is allowed to have access to this. sys.stderr.write("Waiting to lock grid...") self.locker.acquire() sys.stderr.write("...acquired\n") # Does this exist already? if variables is not None and not os.path.exists(self.jobs_pkl): # Set up the grid for the first time. self.seed = grid_seed self.vmap = GridMap(variables, grid_size) self.grid = self._hypercube_grid(self.vmap.card(), grid_size) self.status = np.zeros(grid_size, dtype=int) + CANDIDATE_STATE self.values = np.zeros(grid_size) + np.nan self.durs = np.zeros(grid_size) + np.nan self.sgeids = np.zeros(grid_size, dtype=int) # Save this out. self._save_jobs() else: # Load in from the pickle. self._load_jobs()
def run(self, args): if args: self.config.verbose = True repodir = osp.normpath(_repo_path(self.config, '.')) for path in os.listdir(repodir): if args and path not in args: continue try: repo = self._check_repository(osp.join(repodir, path)) except cli.CommandError: continue distribs = set() self.debian_changes = {} # we have to launch the publication sequentially lockfile = osp.join(repo.directory, 'ldi.lock') with FileLock(lockfile): changes_files = repo.incoming_changes_files([]) if changes_files: self.logger.warning('There are incoming packages in %s', path) if self.config.verbose: self.logger.debug( 'The following changes files are ready ' 'to be published:\n%s', '\n'.join(changes_files))
def send_smses(send_deferred=False, backend=None): # Get lock so there is only one sms sender at the same time. if send_deferred: lock = FileLock('send_sms_deferred') else: lock = FileLock('send_sms') try: lock.acquire(LOCK_WAIT_TIMEOUT) except AlreadyLocked: logger.info('Could not acquire lock.') return except LockTimeout: logger.info('Lock timed out.') return successes, failures = 0, 0 try: # Get SMSes that need to be sent (deferred or non-deferred) if send_deferred: to_send = QueuedSMS.objects.filter(priority=PRIORITY_DEFERRED) else: to_send = QueuedSMS.objects.exclude(priority=PRIORITY_DEFERRED) logger.info("Trying to send %i messages." % to_send.count()) # Send each SMS for sms in to_send: if backend: sms_using = backend else: sms_using = None if sms.using == '__none__' else sms.using if send(sms.to, sms.content, sms.signature, sms_using, sms.reliable): # Successfully sent, remove from queue logger.info("SMS to %s sent." % sms.to) sms.delete() successes += 1 else: # Failed to send, defer SMS logger.info("SMS to %s failed." % sms.to) sms.defer() failures += 1 finally: lock.release() if successes and failures: statsd.gauge('smsgateway.success_rate', successes / failures) else: statsd.gauge('smsgateway.success_rate', 1)
def pickle_write(filename, value, protocol=pickle.HIGHEST_PROTOCOL): """Store value as a pickle without creating corruption.""" with FileLock(filename, timeout=DEFAULT_TIMEOUT): # Be as defensive as possible: dump the pickle data to a temporary file # first, then move the data to the requested filename second. with NamedTemporaryFile(delete=False) as fp: pickle.dump(value, fp, protocol) shutil.move(fp.name, filename)
def _cleanup_lock(config): ''' Release locks, if set. ''' if config.getboolean("Execution", "serialize"): lock = FileLock(config.get("Execution", "pidfile")) logger.debug("Releasing lock") lock.release()
def flush(self): if not self._buffer: return if sys.platform.startswith('win'): return items = [] for b in self._buffer: try: d = DictUtils.merge(self._meta, b['data']) item = b['prefix'] + ' ' + JSON.asString(d) except Exception as err: item = '>> EXCEPTION: JSON ENCODING FAILED >> ' + str(err).replace('\n', '\t') try: item = item.encode('utf8', 'ignore') except Exception as err: item = '>> EXCEPTION: UNICODE DECODING FAILED >> ' + str(err).replace('\n', '\t') items.append(item) count = self._fileCount offset = random.randint(0, count - 1) success = False path = self.getReportFolder() + self._timeCode + '/' if not os.path.exists(path): os.makedirs(path) for i in range(count): index = (i + offset) % count p = path + str(index) + '.report' lock = FileLock(p) if lock.i_am_locking() and i < count - 1: continue try: lock.acquire() except Exception: continue try: out = StringUtils.toUnicode('\n'.join(items) + '\n') f = open(p, 'a+') f.write(out.encode('utf8')) f.close() success = True except Exception as err: print("REPORTER ERROR: Unable to write report file.") print(err) lock.release() if success: break self.clear() return success
def getFileLock(location, filename): lock = FileLock("%s/%s" % (location, filename)) while not lock.i_am_locking(): try: lock.acquire(timeout=60) except: lock.break_lock() lock.acquire() return lock
def handle_noargs(self, **options): """Handle working on a single project or looping over several.""" project = options.get("project") del options["project"] cron_batches = options.get("cron_batches") if options.get("view_batches"): if project or cron_batches: raise CommandError( "view_batches can not be used with project or cron_batch") # print out each batch that is in use, and the projects # that belong to it batches = PerformanceTestModel.get_projects_by_cron_batch() for key in sorted(batches.keys()): self.stdout.write( "{0}: {1}\n".format(key, ", ".join(batches[key])), ) return if not (project or cron_batches): raise CommandError( "You must provide either a project or cron_batch value.") if project and cron_batches: raise CommandError( "You must provide either project or cron_batch, but not both.") if cron_batches: projects = PerformanceTestModel.get_cron_batch_projects( cron_batches) else: projects = [project] lock = FileLock(self.LOCK_FILE + '_' + str(project)) timeout_sec = 10 try: lock.acquire(timeout=timeout_sec) try: self.stdout.write("Starting for projects: {0}\n".format( ", ".join(projects))) for p in projects: self.handle_project(p, **options) self.stdout.write("Completed for {0} project(s).\n".format( len(projects))) finally: lock.release() except AlreadyLocked: self.stdout.write("This command is already being run elsewhere. " "Please try again later.\n") except LockTimeout: self.stdout.write("Lock timeout of {0} seconds exceeded. " "Please try again later.\n".format( str(timeout_sec)))
def send_all(): """ Send all eligible messages in the queue. """ lock = FileLock("send_mail") logging.debug("acquiring lock...") try: lock.acquire(LOCK_WAIT_TIMEOUT) except AlreadyLocked: logging.debug("lock already in place. quitting.") return except LockTimeout: logging.debug("waiting for the lock timed out. quitting.") return logging.debug("acquired.") start_time = time.time() dont_send = 0 deferred = 0 sent = 0 try: for message in prioritize(): if DontSendEntry.objects.has_address(message.to_address): logging.info("skipping email to %s as on don't send list " % message.to_address) MessageLog.objects.log(message, 2) # @@@ avoid using literal result code message.delete() dont_send += 1 else: try: logging.info("sending message '%s' to %s" % (message.subject.encode("utf-8"), message.to_address.encode("utf-8"))) if not message.message_body_html: core_send_mail(message.subject, message.message_body, message.from_address, [message.to_address]) else: email = EmailMultiAlternatives(message.subject, message.message_body, message.from_address, [message.to_address]) email.attach_alternative(message.message_body_html, 'text/html') email.send() MessageLog.objects.log(message, 1) # @@@ avoid using literal result code message.delete() sent += 1 except (socket_error, smtplib.SMTPSenderRefused, smtplib.SMTPRecipientsRefused, smtplib.SMTPAuthenticationError), err: message.defer() logging.info("message deferred due to failure: %s" % err) MessageLog.objects.log(message, 3, log_message=str(err)) # @@@ avoid using literal result code deferred += 1 finally: logging.debug("releasing lock...") lock.release() logging.debug("released.") logging.info("") logging.info("%s sent; %s deferred; %s don't send" % (sent, deferred, dont_send)) logging.info("done in %.2f seconds" % (time.time() - start_time))