Exemple #1
0
def atomic_write(content, filename, overwrite=True, mode=0600):
    """Write `content` into the file `filename` in an atomic fashion.

    This requires write permissions to the directory that `filename` is in.
    It creates a temporary file in the same directory (so that it will be
    on the same filesystem as the destination) and then renames it to
    replace the original, if any.  Such a rename is atomic in POSIX.

    :param overwrite: Overwrite `filename` if it already exists?  Default
        is True.
    :param mode: Access permissions for the file, if written.
    """
    temp_file = _write_temp_file(content, filename)
    os.chmod(temp_file, mode)
    try:
        if overwrite:
            os.rename(temp_file, filename)
        else:
            lock = FileLock(filename)
            lock.acquire()
            try:
                if not os.path.isfile(filename):
                    os.rename(temp_file, filename)
            finally:
                lock.release()
    finally:
        if os.path.isfile(temp_file):
            os.remove(temp_file)
def send_all():
    lock = FileLock("send_notices")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    batches, total_sent = 0, 0
    start_time = time.time()

    try:
        # nesting the try statement to be Python 2.4
        for queued_batch in NoticeQueueBatch.objects.order_by('-id'):
            sent = emit_batch(queued_batch)
            total_sent += sent
            if sent > 0:
                batches +=1
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

    logging.info("")
    logging.info("%s batches, %s sent" % (batches, sent,))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Exemple #3
0
class ManagementLock():
    def __init__(self):
        self.lock = None

    def aquire(self):
        self.lock = FileLock(LOCK_PATH)
        reported = False

        # Attempt to obtain a lock, retry every 10 seconds. Wait at most 10 minutes.
        # The retrying is necessary so we can report on stderr that we are waiting
        # for a lock. Otherwise, a user trying to run the command manually might
        # get confused why the command execution is delayed.
        for idx in range(0, 30):  # @UnusedVariable
            try:
                self.lock.acquire(10)
                return
            except LockTimeout:
                if not reported:
                    print(
                        "Another management command is running, waiting for lock...",
                        file=sys.stderr)
                    reported = True

        raise RuntimeError("Failed to aquire lock.")

    def release(self):
        if self.lock:
            self.lock.release()
Exemple #4
0
    def __init__(self, expt_dir, variables=None, grid_size=None, grid_seed=1):
        self.expt_dir = expt_dir
        self.jobs_pkl = os.path.join(expt_dir, 'expt-grid.pkl')
        self.locker = FileLock(self.jobs_pkl)

        # Only one process at a time is allowed to have access to this.
        sys.stderr.write("Waiting to lock grid...")
        self.locker.acquire()
        sys.stderr.write("...acquired\n")

        # Does this exist already?
        if variables is not None and not os.path.exists(self.jobs_pkl):

            # Set up the grid for the first time.
            self.seed = grid_seed
            self.vmap = GridMap(variables, grid_size)
            self.grid = self._hypercube_grid(self.vmap.card(), grid_size)
            self.status = np.zeros(grid_size, dtype=int) + CANDIDATE_STATE
            self.values = np.zeros(grid_size) + np.nan
            self.durs = np.zeros(grid_size) + np.nan
            self.sgeids = np.zeros(grid_size, dtype=int)

            # Save this out.
            self._save_jobs()
        else:

            # Load in from the pickle.
            self._load_jobs()
 def tearDown(self):
     self.del_lock()
     fl = FileLock(smtp2gs_locker.LOCK_NAME)
     fl.break_lock()
     smtp2gs_locker.LOCK_NAME = self.oldLockName
     smtp2gs_locker.BREAK_LOCK_AGE = self.oldBreakLockTimeout
     smtp2gs_locker.MAX_LOCK_TIMEOUT = self.maxLockTimeout
def send_all():
    lock = FileLock("send_notices")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    batches, sent = 0, 0
    start_time = time.time()

    try:
        # nesting the try statement to be Python 2.4
        try:
            for queued_batch in NoticeQueueBatch.objects.all():
                notices = pickle.loads(str(queued_batch.pickled_data).decode("base64"))
                batch_sent = 0
                for user, label, extra_context, on_site, sender in notices:
                    try:
                        user = User.objects.get(pk=user)
                        logging.info("emitting notice to %s" % user)
                        # call this once per user to be atomic and allow for logging to
                        # accurately show how long each takes.
                        notification.send_now([user], label, extra_context, on_site, sender)
                        sent += 1
                        batch_sent += 1
                    except:
                        # get the exception
                        _, e, _ = sys.exc_info()
                        # log it as critical
                        logging.critical("an exception occurred: %r" % e)
                        # update the queued_batch, removing notices that had been sucessfully sent
                        queued_batch.pickled_data = pickle.dumps(notices[batch_sent:]).encode("base64")
                        queued_batch.save()
                queued_batch.delete()
                batches += 1
        except:
            # get the exception
            exc_class, e, t = sys.exc_info()
            # email people
            current_site = Site.objects.get_current()
            subject = "[%s emit_notices] %r" % (current_site.name, e)
            message = "%s" % ("\n".join(traceback.format_exception(*sys.exc_info())),)
            mail_admins(subject, message, fail_silently=True)
            # log it as critical
            logging.critical("an exception occurred: %r" % e)
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")
    
    logging.info("")
    logging.info("%s batches, %s sent" % (batches, sent,))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Exemple #7
0
    def run(self):
        self.busy = True
        key = datetime.now().strftime('%Y%m%d%H%M%S') + str(randint(100, 199))
        self.processing = key
        print "--ID " + self.processing + " Running FilterInput "
        self.output = None

        lock_file = FileLock(self.__filename)
        status = lock_file.is_locked()

        while status:
            status = lock_file.is_locked()

        file_open = open(self.__filename, "rb")
        self.__plain_data = file_open.read()
        file_open.close()

        if len(self.__plain_data) < 1:
            print "Error 400! File input is empty. [File - " + self.__filename + "]\n--Terminating ID " + \
                  self.processing + " in FilterInput."
            self.taken = True
        else:
            self.output = [self.__plain_data, self.__filename]

        self.busy = False
Exemple #8
0
    def operate(self):
        self.busy = True
        print "--ID " + self.processing + " Running FilterInput "
        self.output = None

        lock_file = FileLock(self.__filename)
        status = lock_file.is_locked()

        while status:
            status = lock_file.is_locked()
        if os.path.isfile(self.__filename):
            file_open = open(self.__filename, "rb")
            self.__plain_data = file_open.read()
            file_open.close()

            if len(self.__plain_data) < 1:
                print "Error 400! File input is empty. [File - " + self.__filename + \
                  "]\n--Terminating ID " + self.processing + " in FilterInput."
            else:
                self.next_pipe.add_storage([
                    self.mode, self.processing,
                    [self.__plain_data, self.__filename]
                ])

        self.busy = False
Exemple #9
0
def daemonize(args, callback):
    with DaemonContext():
        from Pyflix.utils.logger import log_set_up
        log_set_up(True)
        log = logging.getLogger('pyflix.daemon')
        log.info("running daemon")
        create_process = False
        lock = Lock(LOCKFILE, os.getpid(), args.name, args.sea_ep[0],
                    args.sea_ep[1], args.port)
        if lock.is_locked():
            log.debug("lock active")
            lock_pid = lock.get_pid()
            if not lock.is_same_file(args.name, args.sea_ep[0],
                                     args.sea_ep[1]) \
                    or not is_process_running(lock_pid):
                try:
                    log.debug("killing process %s" % lock_pid)
                    os.kill(lock_pid, signal.SIGQUIT)
                except OSError:
                    pass
                except TypeError:
                    pass
                lock.break_lock()
                create_process = True
        else:
            create_process = True

        if create_process:
            log.debug("creating proccess")
            lock.acquire()
            callback()
            lock.release()
        else:
            log.debug("same daemon process")
Exemple #10
0
class ManagementLock():
    def __init__(self):
        self.lock = None
        
    def aquire(self):
        self.lock = FileLock(LOCK_PATH)
        reported = False
        
        # Attempt to obtain a lock, retry every 10 seconds. Wait at most 10 minutes.
        # The retrying is necessary so we can report on stderr that we are waiting
        # for a lock. Otherwise, a user trying to run the command manually might
        # get confused why the command execution is delayed.
        for idx in range(0,30):  # @UnusedVariable
            try:
                self.lock.acquire(10)
                return
            except LockTimeout:
                if not reported:
                    print("Another management command is running, waiting for lock...", file=sys.stderr)
                    reported = True
        
        raise RuntimeError("Failed to aquire lock.")
    
    def release(self):
        if self.lock:
            self.lock.release()
Exemple #11
0
def start_up():
    """Start up this MAAS server.

    This is used to:
    - make sure the singletons required by the application are created
    - sync the configuration of the external systems driven by MAAS

    This method is called when the MAAS application starts up.
    In production, it's called from the WSGI script so this shouldn't block
    at any costs.  It should simply call very simple methods or Celery tasks.

    The method will be executed multiple times if multiple processes are used
    but this method uses file-based locking to ensure that the methods it calls
    internally are not ran concurrently.
    """
    lock = FileLock(LOCK_FILE_NAME)
    # In case this process gets shut down, clean up the lock.
    atexit.register(lock.break_lock)

    lock.acquire(timeout=LOCK_TIMEOUT)
    try:
        inner_start_up()
    finally:
        lock.release()
    post_start_up()
Exemple #12
0
    def __enter__(self):
        if self.options.storage_path > 0 and not self.options.storage_path.endswith('/'):
            self.options.storage_path = self.options.storage_path + '/'

        if self.options.splay > 0:
            splay = randint(0, self.options.splay)
            self.logger.debug('Sleeping for %d seconds (splay=%d)' %
                              (splay, self.options.splay))
            time.sleep(splay)
        self.start_time = DT.datetime.today()
        if not self.options.nolock:
            self.logger.debug('Attempting to acquire lock %s (timeout %s)',
                              self.options.lockfile,
                              self.options.locktimeout)
            self.lock = FileLock(self.options.lockfile)
            try:
                self.lock.acquire(timeout=self.options.locktimeout)
            except LockFailed as e:
                self.logger.error("Lock could not be acquired.")
                self.logger.error(str(e))
                sys.exit(1)
            except LockTimeout as e:
                msg = "Lock could not be acquired. Timeout exceeded."
                self.logger.error(msg)
                sys.exit(1)
Exemple #13
0
    def break_lock(self):
        from lockfile import FileLock

        lock = FileLock(self.lock_path)

        if lock.is_locked():
            lock.break_lock()
Exemple #14
0
    def _handle(self, *args, **kwargs):
        stdout_backup = None
        if COMMAND_LOG_ROOT and self.OUTPUT_LOG:
            pass # redirect output to file, not implemented yet
        lock = None
        if COMMAND_LOCK_ROOT and (COMMAND_USE_LOCK or self.USE_LOCK):
            lock = FileLock(os.path.join(COMMAND_LOCK_ROOT, self.COMMAND_NAME))
            try:
                lock.acquire(0)
            except:
                print("Command `%s` already running" % self.COMMAND_NAME)
                return

        print("\n======\nRunning `%s` command at %s\n======\n" % (self.COMMAND_NAME, str(datetime.now())))
        try:
            # This call should call handle(...) method in the inherited class, that was renamed to _handle by BaseCommandMeta
            self._handle(*args, **kwargs)
        except Exception as e:
            if COMMAND_HANDLE_EXCEPTIONS or self.HANDLE_EXCEPTIONS:
                print("Unexpected crash:")
                print(traceback.format_exc())
                if (COMMAND_EMAIL_EXCEPTIONS or self.EMAIL_EXCEPTIONS) and not settings.DEBUG:
                    mailer.send_mail("Command %s crash" % self.COMMAND_NAME, traceback.format_exc(), settings.DEFAULT_FROM_EMAIL, [email for name, email in settings.ADMINS ])
                    print("Emails were sent to admins of the website about this crash")
            else:
                raise e
        finally:
            if lock is not None:
                lock.release()
Exemple #15
0
    def lock(self):
        """Create an external file lock for the bundle database."""

        from lockfile import FileLock, LockTimeout, AlreadyLocked
        import os
        import time
        import traceback
        from ..dbexceptions import LockedFailed

        if self._lock:
            tb = traceback.extract_stack()[-5:-4][0]
            global_logger.debug("Already has bundle lock from {}:{}".format(
                tb[0], tb[1]))
            return

        self._lock = FileLock(self.lock_path)

        for i in range(10):
            try:
                tb = traceback.extract_stack()[-5:-4][0]
                self._lock.acquire(-1)
                global_logger.debug("Acquired bundle lock from {}:{}".format(
                    tb[0], tb[1]))
                return
            except AlreadyLocked as e:
                global_logger.debug("Waiting for bundle lock")
                time.sleep(1)

        raise LockedFailed("Failed to acquire lock on {}".format(
            self.lock_path))
        self._lock = None
Exemple #16
0
 def __init__(self, file_, pid, name, season, episode, port):
     FileLock.__init__(self, file_)
     self.pid = str(pid)
     self.name = name
     self.season = season
     self.episode = episode
     self.port = port
Exemple #17
0
    def break_lock(self):
        from lockfile import FileLock

        lock = FileLock(self.lock_path)

        if lock.is_locked():
            lock.break_lock()
Exemple #18
0
def main():
    logger.info('Starting DiKBM python client')

    lock = FileLock("dikbm")
    try:
        lock.acquire(0)
    except AlreadyLocked:
        logger.info('lock %s already locked' % lock.unique_name)
    except LockFailed:
        logger.error('lock %s cant be locked' % lock.unique_name)
    else:
        logger.debug('lock %s acquired' % lock.unique_name)

        try:
            client = DiKBMClient()
        except:
            logger.exception('Connect Error')
        else:
            try:
                client.proceed_in()
                client.proceed_status()
            except:
                logger.exception('Proceed Error')
        finally:
            lock.release()
            logger.debug('lock %s released' % lock.unique_name)
    finally:
        logger.info('Finished DiKBM python client')
def send_all():
    lock = FileLock("send_notices")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(NOTIFICATION_LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    batches, sent = 0, 0
    start_time = time.time()

    try:
        # nesting the try statement to be Python 2.4
        try:
            for queued_batch in NoticeQueueBatch.objects.all():
                notices = pickle.loads(
                    str(queued_batch.pickled_data).decode("base64"))
                for user, label, extra_context, on_site in notices:
                    user = User.objects.get(pk=user)
                    logging.info("emitting notice to %s" % user)
                    # call this once per user to be atomic and allow for 
                    # logging to accurately show how long each takes.
                    notification.send_now([user], label, extra_context, on_site)
                    sent += 1
                queued_batch.delete()
                batches += 1
        except:
            # get the exception
            exc_class, e, t = sys.exc_info()
            # email people
            
            if NOTIFICATION_USE_SITE:
                name = Site.objects.get_current().name
            elif NOTIFICATION_DEFAULT_SITE_NAME:
                name = NOTIFICATION_DEFAULT_SITE_NAME
            else:
                # don't display None, display just a space
                name = ""

            subject = "[%s emit_notices] %r" % (name, e)
               
            message = "%s" % ("\n".join(
                    traceback.format_exception(*sys.exc_info())),)
            mail_admins(subject, message, fail_silently=True)
            # log it as critical
            logging.critical("an exception occurred: %r" % e)
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")
    
    logging.info("")
    logging.info("%s batches, %s sent" % (batches, sent,))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Exemple #20
0
def send_all():
    """
    Send all eligible messages in the queue.
    """

    lock = FileLock(getattr(settings, "MAILER_LOCKFILE", "send_mail"))

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    start_time = time.time()

    dont_send = 0
    deferred = 0
    sent = 0

    try:
        connection = None
        for message in prioritize():
            try:
                if connection is None:
                    connection = get_connection(backend=EMAIL_BACKEND)
                    # In order for Django to reuse the connection, it has to
                    # already be open() so it sees new_conn_created as False
                    # and does not try and close the connection anyway.
                    connection.open()
                logging.info("sending message '%s' to %s" % (message.subject.encode("utf-8"), u", ".join(message.to_addresses).encode("utf-8")))
                email = message.email
                if not email:
                    # We likely had a decoding problem when pulling it back out
                    # of the database. We should pass on this one.
                    mark_as_deferred(message, "message.email was None")
                    deferred += 1
                    continue
                email.connection = connection
                email.send()
                mark_as_sent(message)
                sent += 1
            except Exception, err:
                mark_as_deferred(message, err)
                deferred += 1
                # Get new connection, it case the connection itself has an error.
                connection = None
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

    logging.info("")
    logging.info("%s sent; %s deferred;" % (sent, deferred))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Exemple #21
0
def send_all():
    """
    Send all eligible messages in the queue.
    """

    lock = FileLock("send_mail")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    start_time = time.time()

    dont_send = 0
    deferred = 0
    sent = 0

    try:
        for message in prioritize():
            if DontSendEntry.objects.has_address(message.to_address):
                logging.info("skipping email to %s as on don't send list " % message.to_address.encode("utf-8"))
                MessageLog.objects.log(message, 2)  # @@@ avoid using literal result code
                message.delete()
                dont_send += 1
            else:
                try:
                    logging.info(
                        "sending message '%s' to %s"
                        % (message.subject.encode("utf-8"), message.to_address.encode("utf-8"))
                    )
                    core_send_mail(message.subject, message.message_body, message.from_address, [message.to_address])
                    MessageLog.objects.log(message, 1)  # @@@ avoid using literal result code
                    message.delete()
                    sent += 1
                except (
                    socket_error,
                    smtplib.SMTPSenderRefused,
                    smtplib.SMTPRecipientsRefused,
                    smtplib.SMTPAuthenticationError,
                ), err:
                    message.defer()
                    logging.info("message deferred due to failure: %s" % err)
                    MessageLog.objects.log(message, 3, log_message=str(err))  # @@@ avoid using literal result code
                    deferred += 1
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

    logging.info("")
    logging.info("%s sent; %s deferred; %s don't send" % (sent, deferred, dont_send))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
def send_all(block_size=500):
    """
    Send all non-deferred messages in the queue.
    
    A lock file is used to ensure that this process can not be started again
    while it is already running.
    
    The ``block_size`` argument allows for queued messages to be iterated in
    blocks, allowing new prioritised messages to be inserted during iteration
    of a large number of queued messages.
    
    """
    lock = FileLock(LOCK_PATH)

    logger.debug("Acquiring lock...")
    try:
        # lockfile has a bug dealing with a negative LOCK_WAIT_TIMEOUT (which
        # is the default if it's not provided) systems which use a LinkFileLock
        # so ensure that it is never a negative number.
        lock.acquire(LOCK_WAIT_TIMEOUT and max(0, LOCK_WAIT_TIMEOUT))
    except AlreadyLocked:
        logger.debug("Lock already in place. Exiting.")
        return
    except LockTimeout:
        logger.debug("Waiting for the lock timed out. Exiting.")
        return
    logger.debug("Lock acquired.")

    start_time = time.time()

    sent = deferred = skipped = 0

    connection = None

    try:
        connection = SMTPConnection()
        blacklist = models.Blacklist.objects.values_list("email", flat=True)
        connection.open()
        for message in _message_queue(block_size):
            result = send_message(message, smtp_connection=connection, blacklist=blacklist)
            if result == constants.RESULT_SENT:
                sent += 1
            elif result == constants.RESULT_FAILED:
                deferred += 1
            elif result == constants.RESULT_SKIPPED:
                skipped += 1
        connection.close()
    finally:
        logger.debug("Releasing lock...")
        lock.release()
        logger.debug("Lock released.")

    logger.debug("")
    if sent or deferred or skipped:
        log = logger.warning
    else:
        log = logger.info
    log("%s sent, %s deferred, %s skipped." % (sent, deferred, skipped))
    logger.debug("Completed in %.2f seconds." % (time.time() - start_time))
Exemple #23
0
    def handle_noargs(self, **options):
        """Handle working on a single project or looping over several."""
        project = options.get("project")
        del options["project"]
        cron_batches = options.get("cron_batches")

        if options.get("view_batches"):
            if project or cron_batches:
                raise CommandError(
                    "view_batches can not be used with project or cron_batch"
                )
            # print out each batch that is in use, and the projects
            # that belong to it
            batches = PerformanceTestModel.get_projects_by_cron_batch()
            for key in sorted(batches.keys()):
                self.stdout.write("{0}: {1}\n".format(
                    key,
                    ", ".join(batches[key])),
                    )
            return

        if not (project or cron_batches):
            raise CommandError(
                "You must provide either a project or cron_batch value."
            )

        if project and cron_batches:
            raise CommandError(
                "You must provide either project or cron_batch, but not both.")

        if cron_batches:
            projects = PerformanceTestModel.get_cron_batch_projects(cron_batches)
        else:
            projects = [project]

        lock = FileLock(self.LOCK_FILE + '_' + str(project))

        timeout_sec = 10
        try:
            lock.acquire(timeout=timeout_sec)
            try:
                self.stdout.write(
                    "Starting for projects: {0}\n".format(", ".join(projects)))

                for p in projects:
                    self.handle_project(p, **options)

                self.stdout.write(
                    "Completed for {0} project(s).\n".format(len(projects)))
            finally:
                lock.release()

        except AlreadyLocked:
            self.stdout.write("This command is already being run elsewhere.  "
            "Please try again later.\n")

        except LockTimeout:
            self.stdout.write("Lock timeout of {0} seconds exceeded. "
                "Please try again later.\n".format(str(timeout_sec)) )
def send_all():
    lock = FileLock("send_notices")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    batches, sent = 0, 0
    start_time = time.time()

    try:
        # nesting the try statement to be Python 2.4
        try:
            for queued_batch in NoticeQueueBatch.objects.all():
                notices = pickle.loads(str(queued_batch.pickled_data).decode("base64"))
                try:
                    for user, label, extra_context, on_site, sender in notices:
                        try:
                            user = User.objects.get(pk=user)
                            logging.info("emitting notice %s to %s" % (label, user))
                            # call this once per user to be atomic and allow for logging to
                            # accurately show how long each takes.
                            notification.send_now([user], label, extra_context, on_site, sender)
                        except User.DoesNotExist:
                            # Ignore deleted users, just warn about them
                            logging.warning("not emitting notice %s to user %s since it does not exist" % (label, user))
                        sent += 1
                except :
                    #if we sent half the batch, we don't want to resend notices to the first half next
                    #time we run it, so just throw away this (apparantly faulty) queued_batch
                    queued_batch.delete()
                    raise
                queued_batch.delete()
                batches += 1
        except:
            # get the exception
            exc_class, e, t = sys.exc_info()
            # email people
            current_site = Site.objects.get_current()
            subject = "[%s emit_notices] %r" % (current_site.name, e)
            message = "%s" % ("\n".join(traceback.format_exception(*sys.exc_info())),)
            mail_admins(subject, message, fail_silently=True)
            # log it as critical
            logging.critical("an exception occurred: %r" % e)
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")
    
    logging.info("")
    logging.info("%s batches, %s sent" % (batches, sent,))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Exemple #25
0
 def __init__(self, file_, pid=None, name=None, season=None, episode=None,
              port=None):
     FileLock.__init__(self, file_)
     self.pid = str(pid)
     self.name = name
     self.season = season
     self.episode = episode
     self.port = port
Exemple #26
0
def _cleanup_lock(config):
    '''
        Release locks, if set.
    '''
    if config.getboolean("Execution", "serialize"):
        lock = FileLock(config.get("Execution", "pidfile"))
        logger.debug("Releasing lock")
        lock.release()
Exemple #27
0
def send_all(batch_size=None):
    """
    Send all eligible messages in the queue.
    """

    lock = FileLock("send_mail")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    start_time = time.time()

    dont_send = 0
    deferred = 0
    sent = 0

    try:
        connection = None
        for message in prioritize():
            try:
                if connection is None:
                    connection = get_connection(backend=EMAIL_BACKEND)
                logging.info("sending message '%s' to %s" % (message.subject.encode("utf-8"), u", ".join(message.to_addresses).encode("utf-8")))
                email = message.email
                email.connection = connection
                email.send()
                MessageLog.objects.log(message, 1) # @@@ avoid using literal result code
                message.delete()
                sent += 1
            except (socket_error, smtplib.SMTPSenderRefused, smtplib.SMTPRecipientsRefused, smtplib.SMTPAuthenticationError), err:
                message.defer()
                logging.info("message deferred due to failure: %s" % err)
                MessageLog.objects.log(message, 3, log_message=str(err)) # @@@ avoid using literal result code
                deferred += 1
                # Get new connection, it case the connection itself has an error.
                connection = None

            # stop when batch size is reached
            if batch_size is not None and sent + deferred >= batch_size:
                break

    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

    logging.info("")
    logging.info("%s sent; %s deferred;" % (sent, deferred))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Exemple #28
0
def send_all(block_size=500, backend=None):
    """
    Send all non-deferred messages in the queue.

    A lock file is used to ensure that this process can not be started again
    while it is already running.

    The ``block_size`` argument allows for queued messages to be iterated in
    blocks, allowing new prioritised messages to be inserted during iteration
    of a large number of queued messages.

    """
    lock = FileLock(LOCK_PATH)

    logger.debug("Acquiring lock...")
    try:
        # lockfile has a bug dealing with a negative LOCK_WAIT_TIMEOUT (which
        # is the default if it's not provided) systems which use a LinkFileLock
        # so ensure that it is never a negative number.
        lock.acquire(settings.LOCK_WAIT_TIMEOUT or 0)
        #lock.acquire(settings.LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logger.debug("Lock already in place. Exiting.")
        return
    except LockTimeout:
        logger.debug("Waiting for the lock timed out. Exiting.")
        return
    logger.debug("Lock acquired.")

    start_time = time.time()

    sent = deferred = skipped = 0

    try:
        if constants.EMAIL_BACKEND_SUPPORT:
            connection = get_connection(backend=backend)
        else:
            connection = get_connection()
        blacklist = models.Blacklist.objects.values_list('email', flat=True)
        connection.open()
        for message in _message_queue(block_size):
            try:
                result = send_queued_message(message, smtp_connection=connection,
                                  blacklist=blacklist)
            except Exception, e:
                result = constants.RESULT_FAILED
                logger.error(e)
            
            if result == constants.RESULT_SENT:
                sent += 1
            elif result == constants.RESULT_FAILED:
                deferred += 1
            elif result == constants.RESULT_SKIPPED:
                skipped += 1
        connection.close()
Exemple #29
0
def send_all():
    """
    Send all eligible messages in the queue.
    """
    
    lock = FileLock("send_mail")
    
    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")
    
    start_time = time.time()
    
    dont_send = 0
    deferred = 0
    sent = 0

    def defer_msg(message,err):
        """ Defer, log and count a message """
        message.defer()
        logging.info('message deferred due to failure: %s' % err)
        MessageLog.objects.log(message, 3, log_message=str(err))
        
    try:
        connection = None
        for message in prioritize():
            try:
                if connection is None:
                    connection = get_connection(backend=EMAIL_BACKEND)
                logging.info("sending message '%s' to %s" % (message.subject.encode("utf-8"), u", ".join(message.to_addresses).encode("utf-8")))
                email = message.email
                email.connection = connection
                email.send()
                MessageLog.objects.log(message, 1) # @@@ avoid using literal result code
                message.delete()
                sent += 1
                
            except (socket_error, smtplib.SMTPSenderRefused, smtplib.SMTPRecipientsRefused, smtplib.SMTPAuthenticationError), err:
                defer_msg(message,err)
                deferred += 1
                
                # Get new connection, it case the connection itself has an error.
                connection = None

            except Exception, err:
                defer_msg(message,err)
                deferred += 1
                raise
Exemple #30
0
    def handle(self, *args, **options):
        """ Store pushlog data in the database. """

        repo_host = options.get("repo_host")
        enddate = options.get("enddate")
        numdays = options.get("numdays")
        hours = options.get("hours")
        branch = options.get("branch")
        verbosity = options.get("verbosity")
        project = options.get("project")

        if not repo_host:
            raise CommandError("You must supply a host name for the repo pushlogs " +
                         "to store: --repo_host hostname")

        if not numdays and not hours:
            raise CommandError("You must supply the number of days or hours of data.")
        else:
            if numdays:
                try:
                    numdays = int(numdays)
                except ValueError:
                    raise CommandError("numdays must be an integer.")

            if hours:

                try:
                    hours = int(hours)
                except ValueError:
                    raise CommandError("hours must be an integer.")

        lock = FileLock(self.LOCK_FILE)
        try:
            lock.acquire(timeout=0)
            try:
                plm = PushLogModel(project=project, out=self.stdout, verbosity=verbosity)

                # store the pushlogs for the branch specified, or all branches
                summary = plm.store_pushlogs(repo_host, numdays, hours, enddate, branch)
                self.println(("Branches: {0}\nPushlogs stored: {1}, skipped: {2}\n" +
                              "Changesets stored: {3}, skipped: {4}").format(
                        summary["branches"],
                        summary["pushlogs_stored"],
                        summary["pushlogs_skipped"],
                        summary["changesets_stored"],
                        summary["changesets_skipped"],
                        ))
                plm.disconnect()

            finally:
                lock.release()

        except AlreadyLocked:
            self.println("This command is already being run elsewhere.  Please try again later.")
    def handle(self, *args, **options):
        # Check whether it is already running or not
        lock = FileLock(os.path.join(settings.LOCK_ROOT, LOCK_FILE))
        try:
            lock.acquire(0)
        except:
            print ("It seems the command is processing already.")
            return

        import_translations_from_po()
        lock.release()
Exemple #32
0
class SingleProcessEngine(BaseEngine):
    """
    An engine that ensures only one process can run at the same time. Useful
    when being fired off by a cron and you need to ensure a lock is made so
    other processes won't handle a queue at the same time.
    """
    def __init__(self, *args, **kwargs):
        self._lock_wait_timeout = kwargs.pop("lock_wait_timeout", -1)
        super(SingleProcessEngine, self).__init__(self, *args, **kwargs)
    
    def run(self):
        """
        Executes the runner using a lock file to prevent race conditions.
        """
        self._create_lock()
        if not self._acquire_lock():
            raise SystemExit
        try:
            super(SingleProcessEngine, self).run()
        finally:
            self._release_lock()
    
    def _create_lock(self):
        """
        Create the lock.
        """
        from lockfile import FileLock
        self._lock = FileLock("%d.lock" % os.getpid())
    
    def _acquire_lock(self):
        """
        Attempt to acquire a lock. Returns False on failure or True on
        success.
        """
        from lockfile import AlreadyLocked, LockTimeout
        logging.debug("acquiring lock...")
        try:
            self._lock.acquire(self._lock_wait_timeout)
        except AlreadyLocked:
            logging.debug("lock already in place. quitting.")
            return False
        except LockTimeout:
            logging.debug("waiting for the lock timed out. quitting.")
            return False
        logging.debug("lock acquired.")
        return True
    
    def _release_lock(self):
        """
        Release the lock.
        """
        logging.debug("releasing lock...")
        self._lock.release()
        logging.debug("lock released.")
Exemple #33
0
def rotate_log_file(path):
    try:
        lockfile = FileLock(path + '.lock')
        lockfile.acquire(timeout=0)
    except LockError:
        return

    try:
        if os.path.exists(path) and os.stat(path).st_size > 1024 * 1024:
            os.rename(path, path + '.1')
    finally:
        lockfile.release()
Exemple #34
0
 def __enter__(self):
     if self.options.splay > 0:
         splay = randint(0, self.options.splay)
         self.logger.debug('Sleeping for %d seconds (splay=%d)' %
                           (splay, self.options.splay))
         time.sleep(splay)
     self.start_time = DT.datetime.today()
     if not self.options.nolock:
         self.logger.debug('Attempting to acquire lock %s (timeout %s)',
                           self.options.lockfile, self.options.locktimeout)
         self.lock = FileLock(self.options.lockfile)
         self.lock.acquire(timeout=self.options.locktimeout)
Exemple #35
0
class Transaction():
    def __init__(self, model):
        self._model = model
        self._lock = FileLock('.lock')

    def __enter__(self):
        self._lock.acquire()
        return self

    def __exit__(self, *exc_info):
        self._lock.release()
        self._model.sync()
Exemple #36
0
class Transaction():
    def __init__(self, model):
        self._model = model
        self._lock = FileLock('.lock')

    def __enter__(self):
        self._lock.acquire()
        return self

    def __exit__(self, *exc_info):
        self._lock.release()
        self._model.sync()
Exemple #37
0
    def actionStopFCGI(self):
        from lockfile import FileLock
        from os import kill
        from time import sleep

        kill(int(open("wl-fcgi.pid", "r").read()), 15)
        lock = FileLock("/tmp/wl-fcgi")
        countdown = 15
        while lock.is_locked() and countdown > 0:
            countdown -= 1
        if lock.is_locked():
            exit(1)
Exemple #38
0
    def wrapper(self, *args, **options):
        def on_interrupt(signum, frame):
            # It's necessary to release lockfile
            sys.exit()
        signal.signal(signal.SIGTERM, on_interrupt)

        start_time = time.time()
        try:
            verbosity = int(options.get('verbosity', 0))
        except ValueError:
            verbosity = 0
        logger = logging.getLogger(self.__module__)
        if verbosity == 0:
            logger.level = logging.WARNING
        elif verbosity == 1:
            logger.level = logging.INFO
        else:
            logger.level = logging.DEBUG
       
        logger.debug("-" * 72)
        
        lock_name = self.__module__.split('.').pop()
        lock = FileLock(os.path.join(LOCK_ROOT, lock_name))
        
        logger.debug("%s - acquiring lock..." % lock_name)
        try:
            lock.acquire(LOCK_WAIT_TIMEOUT)
        except AlreadyLocked:
            logger.debug("lock already in place. quitting.")
            return
        except LockTimeout:
            logger.debug("waiting for the lock timed out. quitting.")
            return
        logger.debug("acquired.")
        
        try:
            handle(self, logger, *args, **options)
        except (KeyboardInterrupt, SystemExit):
            pass
        except:
            import traceback
            logging.warn("Command Failed")
            logging.warn('=' * 72)
            logging.warn(traceback.format_exc())
            logging.warn('=' * 72)
        
        logger.debug("releasing lock...")
        lock.release()
        logger.debug("released.")
        
        logger.info("done in %.2f seconds" % (time.time() - start_time))
        return
Exemple #39
0
def getFileLock(location, filename):
    lock = FileLock("%s/%s" % (location, filename))
    while not lock.i_am_locking():
        try:
            lock.acquire(timeout=60)
        except:
            lock.break_lock()
            lock.acquire()
    return lock
Exemple #40
0
def my_lock(LockFile, TimeOut=3):
    lock = FileLock(settings.ROOT_PATH + LockFile)
    IsLock = False
    try:
        lock.acquire(timeout=TimeOut)  # wait up to 60 seconds
        IsLock = True
    except LockTimeout:
        IsLock = False

    if IsLock:
        return lock
    else:
        raise LockBusyException("Lock is busy")
Exemple #41
0
def my_lock(LockFile, TimeOut=3):
    lock = FileLock(settings.ROOT_PATH + LockFile)
    IsLock = False
    try:
        lock.acquire(timeout=TimeOut)  # wait up to 60 seconds
        IsLock = True
    except LockTimeout:
        IsLock = False

    if IsLock:
        return lock
    else:
        raise LockBusyException("Lock is busy")
Exemple #42
0
 def handle(self, **options):
     """Acquire the lock before running the method.
     """
     lock = FileLock(lockname)
     try:
         lock.acquire(timeout=-1)
     except AlreadyLocked:
         print "Lock is already set, aborting."
         return
     try:
         handler(self, **options)
     finally:
         lock.release()
Exemple #43
0
 def handle(*args):
     """Acquire the lock before running the method.
     """
     lock = FileLock(lockname)
     try:
         lock.acquire(timeout=-1)
     except AlreadyLocked: # pragma: no cover
         print('Lock is already set, aborting.')
         return
     try:
         handler(*args)
     finally:
         lock.release()
Exemple #44
0
 def wrapper(*args, **kwargs):
     lock = FileLock(lockfile_name)
     try:
         lock.acquire(lock_wait_timeout)
     except AlreadyLocked:
         return
     except LockTimeout:
         return
     try:
         result = func(*args, **kwargs)
     finally:
         lock.release()
     return result
Exemple #45
0
    def run(self):
        self.busy = True
        filename = self._filter_input[0]

        data = self._filter_input[1]
        print "--ID " + self.processing + " Running FilterWrite"
        if self.mode == "encode":
            output_file = filename + ".d2f"

        else:
            temp_pos = filename.rfind('.')
            output_file = filename[:temp_pos]

        lock_file = None

        if os.path.isfile(output_file):
            lock_file = FileLock(output_file)
            status = lock_file.is_locked()
            while status:
                status = lock_file.is_locked()
            lock_file.acquire()

        file_open = open(output_file, "wb")
        file_open.write(data)
        file_open.close()
        if lock_file:
            lock_file.release()

        self.taken = True
        print "--ID " + self.processing + " Finish"
        self.busy = False
        self.output = "Ada isinya"
Exemple #46
0
    def subprocess_loop(self):
        """
            An internal loop my subprocess maintains for outputting
        """

        # convert to a full path and make a lock
        lock = FileLock(os.path.realpath(self.path))

        while True:

            time.sleep(DELAY_TIME)

            if not self.Q.empty():
                lock.acquire()  # get the lock (or wait till we do)
                with open(self.path, 'a') as o:
                    while not self.Q.empty():  # dump the entire queue
                        x = self.Q.get()
                        if x is None:  # this is our signal we are done with input
                            lock.release()
                            return
                        else:  # this
                            for xi in x:
                                print >> o, xi,
                            # No newline by default now
                            #print >>o, "\n",
                lock.release()
Exemple #47
0
    def __init__(self,
                 model_path=DEFAULT_MODEL_PATH,
                 num_topics=DEFAULT_NUM_TOPICS,
                 lock=threading.Lock()):
        self.save_model_lock = lock

        if os.path.isfile(model_path):
            raise Exception("Invalid Model Path; Should Be a Directory")
        if not os.path.exists(model_path):
            os.makedirs(model_path)
        self._lda_model_path = os.path.join(model_path, "lda.model")
        self._dictionary_path = os.path.join(model_path, "tokens.dict")
        self.num_topics = num_topics
        self.model_folder_lock = FileLock(model_path)
Exemple #48
0
def my_lock(LockFile, TimeOut=3):
    lock = FileLock(LockFile)
    print "I locked", lock.path

    IsLock = False
    try:
        lock.acquire(timeout=TimeOut)  # wait up to 60 seconds
        IsLock = True
    except LockTimeout:
        IsLock = False

    if IsLock:
        return lock
    else:
        raise LockBusyException("Lock is busy")
Exemple #49
0
    def __init__(self,
                 expt_dir,
                 covar="Matern52",
                 mcmc_iters=10,
                 pending_samples=100,
                 noiseless=False,
                 burnin=100,
                 grid_subset=20):
        self.cov_func = getattr(gp, covar)
        self.state_pkl = os.path.join(expt_dir, self.__module__ + ".pkl")
        self.state_lock = FileLock(self.state_pkl)
        self.stats_file = os.path.join(
            expt_dir, self.__module__ + "_hyperparameters.txt")
        self.mcmc_iters = int(mcmc_iters)
        self.burnin = int(burnin)
        self.needs_burnin = True
        self.pending_samples = int(pending_samples)
        self.D = -1
        self.hyper_iters = 1
        # Number of points to optimize EI over
        self.grid_subset = int(grid_subset)
        self.noiseless = bool(int(noiseless))
        self.hyper_samples = []

        self.noise_scale = 0.1  # horseshoe prior
        self.amp2_scale = 1  # zero-mean log normal prior
        self.max_ls = 10  # top-hat prior on length scales
Exemple #50
0
    def file_composer(self, host, path, output_path):
        """Collects the file content of the specified path in the desired host and append it to the output_path file.
        Simulates the behavior of tail -f and redirect the output to output_path.

        Parameters
        ----------
        host : str
            Hostname.
        path : str
            Host file path to be collect.
        output_path : str
            Output path of the content collected from the remote host path.
        """
        try:
            truncate_file(os.path.join(self._tmp_path, output_path))
        except FileNotFoundError:
            pass
        logger.debug(f'Starting file composer for {host} and path: {path}. '
                     f'Composite file in {os.path.join(self._tmp_path, output_path)}')
        tmp_file = os.path.join(self._tmp_path, output_path)
        while True:
            with FileLock(tmp_file):
                with open(tmp_file, "r+") as file:
                    content = self.host_manager.get_file_content(host, path).split('\n')
                    file_content = file.read().split('\n')
                    for new_line in content:
                        if new_line == '':
                            continue
                        if new_line not in file_content:
                            file.write(f'{new_line}\n')
                time.sleep(self._time_step)
Exemple #51
0
 def process_IN_CLOSE_WRITE(self, event):
     if "irdisp0.fits" in event.pathname:
         with FileLock("/tmp/irdisp0.fits"):
             try:
                 self.img = pf.getdata("/tmp/irdisp0.fits")
             except:
                 self.img = None
Exemple #52
0
    def run(self, args):
        if args:
            self.config.verbose = True
        repodir = osp.normpath(_repo_path(self.config, '.'))
        for path in os.listdir(repodir):
            if args and path not in args:
                continue
            try:
                repo = self._check_repository(osp.join(repodir, path))
            except cli.CommandError:
                continue

            distribs = set()
            self.debian_changes = {}
            # we have to launch the publication sequentially
            lockfile = osp.join(repo.directory, 'ldi.lock')
            with FileLock(lockfile):
                changes_files = repo.incoming_changes_files([])
                if changes_files:
                    self.logger.warning('There are incoming packages in %s',
                                        path)
                    if self.config.verbose:
                        self.logger.debug(
                            'The following changes files are ready '
                            'to be published:\n%s', '\n'.join(changes_files))
Exemple #53
0
def dataliststore(temp_data_list, TwitterWorkDB, Data_DB_Path):
    TwitterWorkDB
    datadblock = FileLock(Data_DB_Path)
    rowlist = []
    worklist = []
    finishlist = []
    for temp_data in temp_data_list:
        buff = ''
        buff += str(temp_data[1]) + '\t'
        if temp_data[2] != None: buff += str(temp_data[2])
        buff += '\t'
        if temp_data[3] != None: buff += str(temp_data[3])
        buff += '\t'
        if temp_data[4] != None: buff += str(temp_data[4])
        buff += '\t'
        if temp_data[5] != None:
            buff += temp_data[5].encode('ascii', 'replace')
        buff += '\t'
        buff += json.dumps(temp_data[6]) + '\t'
        buff += json.dumps(temp_data[7])
        rowlist.append([temp_data[0], buff])
        worklist.extend(temp_data[6].keys())
        finishlist.append(temp_data[0])
    if len(rowlist) != 0:
        with datadblock:
            DataDB = bsddb.hashopen(Data_DB_Path, 'c')
            for row in rowlist:
                DataDB[row[0]] = row[1]
            DataDB.close()
    for workitem in set(worklist):
        TwitterWorkDB.put(workitem)  #debug pass
    for finishitem in finishlist:
        TwitterWorkDB.finish(finishitem)
    return
def write_hostsfile(hostmapping, path='/etc/hosts'):
    with FileLock("/tmp/docker-update-hosts.lock"):
        f = open(path, 'r')
        fn = open(path + "-docker-update-hosts", 'w')
        r = f.readline()
        in_chunk = False
        chunk_found = False
        while r != "":
            if not in_chunk:
                fn.write(r)
                if re.match("\s*#DOCKER_UPDATE_HOSTS_START", r):
                    in_chunk = True
                    fn.write(format_for_hostsfile(hostmapping))
                    chunk_found = True
            if in_chunk:
                if re.match("\s*#DOCKER_UPDATE_HOSTS_END", r):
                    in_chunk = False
                    fn.write(r)
            r = f.readline()
        f.close()
        fn.close()
        if not chunk_found:
            print(
                "not updating hostsfile, i did not find the two necessary commpents #DOCKER_UPDATE_HOSTS_START and #DOCKER_UPDATE_HOSTS_END"
            )
        else:
            import shutil
            shutil.move(path + "-docker-update-hosts", path)
            print("updated")
Exemple #55
0
    def run(self):
        self.busy = True
        print "--ID " + self.processing + " Running FilterDictionary"
        filename = self._filter_input[1]
        temp_pos = filename.rfind('.')
        filename = filename[:temp_pos]
        filename += ".d2c"
        temp = ""

        lock_file = FileLock(filename)
        status = lock_file.is_locked()

        while status:
            status = lock_file.is_locked()

        if os.path.isfile(filename):
            file_open = open(filename, "rb")
            temp = file_open.read()
            file_open.close()

        temp_pos = temp.find('_')
        size = int(temp[:temp_pos])
        temp = temp[temp_pos + 1:]
        temp_list = temp.split("=*")
        dict_binary = {}
        for i in range(0, len(temp_list)):
            temp = temp_list[i]
            temp_list2 = temp.split("/|")
            temp_list3 = temp_list2[0].split('-')
            temp_binary = "{0:b}".format(int(temp_list3[1]))
            temp_binary = temp_binary.replace(" ", "")
            temp_length = int(temp_list3[0])
            if temp_length < len(temp_binary):
                range_binary = len(temp_binary) - temp_length
                bin_val = temp_binary[range_binary:]
            elif temp_length > len(temp_binary):
                range_binary = temp_length - len(temp_binary)
                bin_val = "0" * range_binary
                bin_val += temp_binary
            else:
                bin_val = temp_binary

            dict_binary[bin_val] = temp_list2[1]

        self.output = [filename, size, dict_binary, self._filter_input[0]]
        self.busy = False
Exemple #56
0
def pickle_write(filename, value, protocol=pickle.HIGHEST_PROTOCOL):
    """Store value as a pickle without creating corruption."""
    with FileLock(filename, timeout=DEFAULT_TIMEOUT):
        # Be as defensive as possible: dump the pickle data to a temporary file
        # first, then move the data to the requested filename second.
        with NamedTemporaryFile(delete=False) as fp:
            pickle.dump(value, fp, protocol)
        shutil.move(fp.name, filename)
def getfitsdata(imname):
    ''' quick interface to read current img from the temp dir '''
    with FileLock(imname):
        try:
            im = pf.getdata(imname)
        except:
            im = None
    return (im)
Exemple #58
0
def database():
    dbfilename = app.config['DATABASE']
    with FileLock(dbfilename):
        db = shelve.open(dbfilename)
        try:
            yield db
        finally:
            db.close()
Exemple #59
0
class NoseLock(Plugin):
    name = 'lock'

    def options(self, parser, env=os.environ):
        super(NoseLock, self).options(parser, env=env)
        # getting reasonable defaults
        app_dir = os.getcwd()
        app_name = os.path.basename(app_dir)
        default_lock_file = os.path.join('/tmp', app_name)

        parser.add_option(
            '--lock-file', action='store',
            default=default_lock_file,
            dest='lock_file',
            help='Use this file to acquire lock (default: {0})'.format(
                default_lock_file))

    def configure(self, options, conf):
        super(NoseLock, self).configure(options, conf)
        if not self.enabled:
            self.lock = None
        else:
            lock_file = options.lock_file
            self.lock = FileLock(lock_file)

            if self.lock.is_locked():
                owner = get_owner(lock_file + '.lock')
                if owner:
                    print ('User {0} already running the tests, '
                           'please keep calm.').format(owner)
            try:
                self.lock.acquire()
                log.info('File {0} locked.'.format(self.lock.lock_file))
                print 'LOCK:', lock_file
            except KeyboardInterrupt:
                print '\nYou are so impatient today!\nBye.'
                sys.exit(1)


    def finalize(self, result):
        if self.lock:
            import pudb; pudb.set_trace()  # DEBUG
            print 'UNLOCK', self.lock.lock_file
            log.info('Unlocking {0}.'.format(self.lock.lock_file))
            self.lock.release()
Exemple #60
0
    def run(self):
        self.busy = True
        key = datetime.now().strftime('%Y%m%d%H%M%S') + str(randint(100, 199))
        self.processing = key
        print "--ID " + self.processing + " Running FilterInput "
        self.output = None

        lock_file = FileLock(self.__filename)
        status = lock_file.is_locked()

        while status:
            status = lock_file.is_locked()
        if os.path.isfile(self.__filename):
            file_open = open(self.__filename, "rb")
            self.__plain_data = file_open.read()
            file_open.close()
            self.output = [self.__plain_data, self.__filename]
        self.busy = False