def subprocess_loop(self):
		"""
			An internal loop my subprocess maintains for outputting
		"""
		
		# convert to a full path and make a lock
		path = os.path.realpath(self.path)
		lock = FileLock(self.path)
		
		while True:
			
			time.sleep(DELAY_TIME)
			if (not self.Q.empty()):
				lock.acquire() # get the lock (or wait till we do)
				with open(self.path, 'a') as o:
					while not self.Q.empty(): # dump the entire queue
						x = self.Q.get()
						if x is None: # this is our signal we are done with input 
							lock.release()
							return
						else: # this
							for xi in x: print >>o, xi,
							# No newline by default now
							#print >>o, "\n",
				lock.release()
Exemple #2
0
def main(argv=None):
    global argparser, lockfilepath
    global session, server, db
    if argv is None:
        args = argparser.parse_args()
    else:
        args = argparser.parse_args(argv)
    try:
        session, server, db = getCheshire3Env(args)
    except (EnvironmentError, ObjectDoesNotExistException):
        return 1
    with db.get_path(session, 'defaultLogger') as session.logger:
        mp = db.get_path(session, 'metadataPath')
        lock = FileLock(mp)
        if lock.is_locked() and args.unlock:
            # Forcibly unlock
            session.logger.log_warning(session, "Unlocking Database")
            lock.break_lock()
        try:
            lock.acquire(timeout=30)    # wait up to 30 seconds
        except LockTimeout:
            msg = ("The database is locked. It is possible that another"
                   "user is currently indexing this database. Please wait at "
                   "least 10 minutes and then try again. If you continue to "
                   "get this message and you are sure no one is reindexing "
                   "the database please contact the archives hub team for "
                   "advice."
                   )
            session.logger.log_critical(session, msg)
            return 1
        try:
            return args.func(args)
        finally:
            lock.release()
def send_all():
    lock = FileLock("send_notices")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    batches, total_sent = 0, 0
    start_time = time.time()

    try:
        # nesting the try statement to be Python 2.4
        for queued_batch in NoticeQueueBatch.objects.order_by('-id'):
            sent = emit_batch(queued_batch)
            total_sent += sent
            if sent > 0:
                batches +=1
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

    logging.info("")
    logging.info("%s batches, %s sent" % (batches, sent,))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Exemple #4
0
	def MakeGraphPercent(self, Data, FileName):
		"""
		Funzione che produce un grafico percentuale sotto forma di pieplot.

		:param Data: Dato da dover graficare.
		:param FileName: Nome file da assegnare al grafico prodotto.
		:returns: *nulla*

		"""
		global Interval
		Labels = ["%IN USO","TOT"]
		FileName = 'extra/MonitorGraph/'+FileName

		#print "**Data Graph**"
		#print Data

		#selezione della combinazione di colori per i grafici percentuali, a soglie [0,33],[34,66],[67,100]
		if  (Data <= 33): PercentColors = ["lime","gray"]
		elif (Data <= 66): PercentColors = ["yellow","light_gray"]
		else : PercentColors = ["red","white"]
		Data = [int(Data),100-int(Data)]

		#ogni volta cerca di acquisire il lock per creare una nuova immagine, se non riesce, rompe il lock
		TempLock = FileLock(FileName)
		try:
			TempLock.acquire(timeout=Interval)
		except LockTimeout:
			TempLock.break_lock()
		else:
			cairoplot.pie_plot(FileName, dict(zip(Labels,Data)), 185, 130, colors = PercentColors)
			TempLock.release()
Exemple #5
0
def start_up():
    """Start up this MAAS server.

    This is used to:
    - make sure the singletons required by the application are created
    - sync the configuration of the external systems driven by MAAS

    This method is called when the MAAS application starts up.
    In production, it's called from the WSGI script so this shouldn't block
    at any costs.  It should simply call very simple methods or Celery tasks.

    The method will be executed multiple times if multiple processes are used
    but this method uses file-based locking to ensure that the methods it calls
    internally are not ran concurrently.
    """
    lock = FileLock(LOCK_FILE_NAME)
    # In case this process gets shut down, clean up the lock.
    atexit.register(lock.break_lock)

    lock.acquire(timeout=LOCK_TIMEOUT)
    try:
        inner_start_up()
    finally:
        lock.release()
    post_start_up()
Exemple #6
0
	def MakeGraphTop3(self, Data, FileName):
		"""
		Funzione che produce un grafico dei nodi a maggiore latenza sotto forma di istogram.

		:param Data: Serie di dati da dover graficare.
		:param FileName: Nome file da assegnare al grafico prodotto.
		:returns: *nulla*

		"""
		global Interval
		FileName = 'extra/MonitorGraph/'+FileName

		ordered = sorted(Data.iteritems(), key=operator.itemgetter(1), reverse=True)
		first3 = []
		colors3 = []
		for item in ordered:
			if (len(first3) < 3) and (item[0] in self.ProbeList):
				colors3.append(self.Colors[sorted(self.ProbeList).index(item[0])])
				first3.append(item[1])

		#ogni volta cerca di acquisire il lock per creare una nuova immagine, se non riesce, rompe il lock
		TempLock = FileLock(FileName)
		try:
			TempLock.acquire(timeout=Interval)
		except LockTimeout:
			TempLock.break_lock()
		else:
			cairoplot.vertical_bar_plot(FileName, first3, 170, 130, display_values=True, colors=colors3)
			TempLock.release()
Exemple #7
0
def atomic_write(content, filename, overwrite=True, mode=0600):
    """Write `content` into the file `filename` in an atomic fashion.

    This requires write permissions to the directory that `filename` is in.
    It creates a temporary file in the same directory (so that it will be
    on the same filesystem as the destination) and then renames it to
    replace the original, if any.  Such a rename is atomic in POSIX.

    :param overwrite: Overwrite `filename` if it already exists?  Default
        is True.
    :param mode: Access permissions for the file, if written.
    """
    temp_file = _write_temp_file(content, filename)
    os.chmod(temp_file, mode)
    try:
        if overwrite:
            os.rename(temp_file, filename)
        else:
            lock = FileLock(filename)
            lock.acquire()
            try:
                if not os.path.isfile(filename):
                    os.rename(temp_file, filename)
            finally:
                lock.release()
    finally:
        if os.path.isfile(temp_file):
            os.remove(temp_file)
def send_all():
    lock = FileLock("send_notices")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    batches, sent = 0, 0
    start_time = time.time()

    try:
        # nesting the try statement to be Python 2.4
        try:
            for queued_batch in NoticeQueueBatch.objects.all():
                notices = pickle.loads(str(queued_batch.pickled_data).decode("base64"))
                batch_sent = 0
                for user, label, extra_context, on_site, sender in notices:
                    try:
                        user = User.objects.get(pk=user)
                        logging.info("emitting notice to %s" % user)
                        # call this once per user to be atomic and allow for logging to
                        # accurately show how long each takes.
                        notification.send_now([user], label, extra_context, on_site, sender)
                        sent += 1
                        batch_sent += 1
                    except:
                        # get the exception
                        _, e, _ = sys.exc_info()
                        # log it as critical
                        logging.critical("an exception occurred: %r" % e)
                        # update the queued_batch, removing notices that had been sucessfully sent
                        queued_batch.pickled_data = pickle.dumps(notices[batch_sent:]).encode("base64")
                        queued_batch.save()
                queued_batch.delete()
                batches += 1
        except:
            # get the exception
            exc_class, e, t = sys.exc_info()
            # email people
            current_site = Site.objects.get_current()
            subject = "[%s emit_notices] %r" % (current_site.name, e)
            message = "%s" % ("\n".join(traceback.format_exception(*sys.exc_info())),)
            mail_admins(subject, message, fail_silently=True)
            # log it as critical
            logging.critical("an exception occurred: %r" % e)
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")
    
    logging.info("")
    logging.info("%s batches, %s sent" % (batches, sent,))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Exemple #9
0
	def MakeGraph(self, Data, FileName):
		"""
		Funzione che produce un grafico temporale.

		:param Data: Serie di dati da dover graficare.
		:param FileName: Nome file da assegnare al grafico prodotto.
		:returns: *nulla*

		"""
		global Interval,TimeStep

		Markers=[]
		FileName = 'extra/MonitorGraph/'+FileName

		for x in range((TimeStep-1)*Interval,-1,-Interval): Markers.append(str(x))

		#ogni volta cerca di acquisire il lock per creare una nuova immagine, se non riesce, rompe il lock
		TempLock = FileLock(FileName)
		try:
			TempLock.acquire(timeout=Interval)
		except LockTimeout:
			TempLock.break_lock()
		else:
			cairoplot.dot_line_plot(FileName, dict(zip(self.ProbeList,Data[:])), 
600, 200, axis=True, grid=True, series_legend=False, x_labels=Markers, series_colors=self.Colors)
			TempLock.release()
def send_all():
    lock = FileLock("send_notices")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(NOTIFICATION_LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    batches, sent = 0, 0
    start_time = time.time()

    try:
        # nesting the try statement to be Python 2.4
        try:
            for queued_batch in NoticeQueueBatch.objects.all():
                notices = pickle.loads(
                    str(queued_batch.pickled_data).decode("base64"))
                for user, label, extra_context, on_site in notices:
                    user = User.objects.get(pk=user)
                    logging.info("emitting notice to %s" % user)
                    # call this once per user to be atomic and allow for 
                    # logging to accurately show how long each takes.
                    notification.send_now([user], label, extra_context, on_site)
                    sent += 1
                queued_batch.delete()
                batches += 1
        except:
            # get the exception
            exc_class, e, t = sys.exc_info()
            # email people
            
            if NOTIFICATION_USE_SITE:
                name = Site.objects.get_current().name
            elif NOTIFICATION_DEFAULT_SITE_NAME:
                name = NOTIFICATION_DEFAULT_SITE_NAME
            else:
                # don't display None, display just a space
                name = ""

            subject = "[%s emit_notices] %r" % (name, e)
               
            message = "%s" % ("\n".join(
                    traceback.format_exception(*sys.exc_info())),)
            mail_admins(subject, message, fail_silently=True)
            # log it as critical
            logging.critical("an exception occurred: %r" % e)
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")
    
    logging.info("")
    logging.info("%s batches, %s sent" % (batches, sent,))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Exemple #11
0
def main():
    logger.info('Starting DiKBM python client')

    lock = FileLock("dikbm")
    try:
        lock.acquire(0)
    except AlreadyLocked:
        logger.info('lock %s already locked' % lock.unique_name)
    except LockFailed:
        logger.error('lock %s cant be locked' % lock.unique_name)
    else:
        logger.debug('lock %s acquired' % lock.unique_name)

        try:
            client = DiKBMClient()
        except:
            logger.exception('Connect Error')
        else:
            try:
                client.proceed_in()
                client.proceed_status()
            except:
                logger.exception('Proceed Error')
        finally:
            lock.release()
            logger.debug('lock %s released' % lock.unique_name)
    finally:
        logger.info('Finished DiKBM python client')
Exemple #12
0
    def _handle(self, *args, **kwargs):
        stdout_backup = None
        if COMMAND_LOG_ROOT and self.OUTPUT_LOG:
            pass # redirect output to file, not implemented yet
        lock = None
        if COMMAND_LOCK_ROOT and (COMMAND_USE_LOCK or self.USE_LOCK):
            lock = FileLock(os.path.join(COMMAND_LOCK_ROOT, self.COMMAND_NAME))
            try:
                lock.acquire(0)
            except:
                print("Command `%s` already running" % self.COMMAND_NAME)
                return

        print("\n======\nRunning `%s` command at %s\n======\n" % (self.COMMAND_NAME, str(datetime.now())))
        try:
            # This call should call handle(...) method in the inherited class, that was renamed to _handle by BaseCommandMeta
            self._handle(*args, **kwargs)
        except Exception as e:
            if COMMAND_HANDLE_EXCEPTIONS or self.HANDLE_EXCEPTIONS:
                print("Unexpected crash:")
                print(traceback.format_exc())
                if (COMMAND_EMAIL_EXCEPTIONS or self.EMAIL_EXCEPTIONS) and not settings.DEBUG:
                    mailer.send_mail("Command %s crash" % self.COMMAND_NAME, traceback.format_exc(), settings.DEFAULT_FROM_EMAIL, [email for name, email in settings.ADMINS ])
                    print("Emails were sent to admins of the website about this crash")
            else:
                raise e
        finally:
            if lock is not None:
                lock.release()
Exemple #13
0
class ManagementLock():
    def __init__(self):
        self.lock = None
        
    def aquire(self):
        self.lock = FileLock(LOCK_PATH)
        reported = False
        
        # Attempt to obtain a lock, retry every 10 seconds. Wait at most 10 minutes.
        # The retrying is necessary so we can report on stderr that we are waiting
        # for a lock. Otherwise, a user trying to run the command manually might
        # get confused why the command execution is delayed.
        for idx in range(0,30):  # @UnusedVariable
            try:
                self.lock.acquire(10)
                return
            except LockTimeout:
                if not reported:
                    print("Another management command is running, waiting for lock...", file=sys.stderr)
                    reported = True
        
        raise RuntimeError("Failed to aquire lock.")
    
    def release(self):
        if self.lock:
            self.lock.release()
Exemple #14
0
    def handle_noargs(self, **options):
        """Handle working on a single project or looping over several."""
        project = options.get("project")
        del options["project"]
        cron_batches = options.get("cron_batches")

        if options.get("view_batches"):
            if project or cron_batches:
                raise CommandError(
                    "view_batches can not be used with project or cron_batch"
                )
            # print out each batch that is in use, and the projects
            # that belong to it
            batches = PerformanceTestModel.get_projects_by_cron_batch()
            for key in sorted(batches.keys()):
                self.stdout.write("{0}: {1}\n".format(
                    key,
                    ", ".join(batches[key])),
                    )
            return

        if not (project or cron_batches):
            raise CommandError(
                "You must provide either a project or cron_batch value."
            )

        if project and cron_batches:
            raise CommandError(
                "You must provide either project or cron_batch, but not both.")

        if cron_batches:
            projects = PerformanceTestModel.get_cron_batch_projects(cron_batches)
        else:
            projects = [project]

        lock = FileLock(self.LOCK_FILE + '_' + str(project))

        timeout_sec = 10
        try:
            lock.acquire(timeout=timeout_sec)
            try:
                self.stdout.write(
                    "Starting for projects: {0}\n".format(", ".join(projects)))

                for p in projects:
                    self.handle_project(p, **options)

                self.stdout.write(
                    "Completed for {0} project(s).\n".format(len(projects)))
            finally:
                lock.release()

        except AlreadyLocked:
            self.stdout.write("This command is already being run elsewhere.  "
            "Please try again later.\n")

        except LockTimeout:
            self.stdout.write("Lock timeout of {0} seconds exceeded. "
                "Please try again later.\n".format(str(timeout_sec)) )
def send_all():
    lock = FileLock("send_notices")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    batches, sent = 0, 0
    start_time = time.time()

    try:
        # nesting the try statement to be Python 2.4
        try:
            for queued_batch in NoticeQueueBatch.objects.all():
                notices = pickle.loads(str(queued_batch.pickled_data).decode("base64"))
                try:
                    for user, label, extra_context, on_site, sender in notices:
                        try:
                            user = User.objects.get(pk=user)
                            logging.info("emitting notice %s to %s" % (label, user))
                            # call this once per user to be atomic and allow for logging to
                            # accurately show how long each takes.
                            notification.send_now([user], label, extra_context, on_site, sender)
                        except User.DoesNotExist:
                            # Ignore deleted users, just warn about them
                            logging.warning("not emitting notice %s to user %s since it does not exist" % (label, user))
                        sent += 1
                except :
                    #if we sent half the batch, we don't want to resend notices to the first half next
                    #time we run it, so just throw away this (apparantly faulty) queued_batch
                    queued_batch.delete()
                    raise
                queued_batch.delete()
                batches += 1
        except:
            # get the exception
            exc_class, e, t = sys.exc_info()
            # email people
            current_site = Site.objects.get_current()
            subject = "[%s emit_notices] %r" % (current_site.name, e)
            message = "%s" % ("\n".join(traceback.format_exception(*sys.exc_info())),)
            mail_admins(subject, message, fail_silently=True)
            # log it as critical
            logging.critical("an exception occurred: %r" % e)
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")
    
    logging.info("")
    logging.info("%s batches, %s sent" % (batches, sent,))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Exemple #16
0
def send_all():
    """
    Send all eligible messages in the queue.
    """

    lock = FileLock(getattr(settings, "MAILER_LOCKFILE", "send_mail"))

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    start_time = time.time()

    dont_send = 0
    deferred = 0
    sent = 0

    try:
        connection = None
        for message in prioritize():
            try:
                if connection is None:
                    connection = get_connection(backend=EMAIL_BACKEND)
                    # In order for Django to reuse the connection, it has to
                    # already be open() so it sees new_conn_created as False
                    # and does not try and close the connection anyway.
                    connection.open()
                logging.info("sending message '%s' to %s" % (message.subject.encode("utf-8"), u", ".join(message.to_addresses).encode("utf-8")))
                email = message.email
                if not email:
                    # We likely had a decoding problem when pulling it back out
                    # of the database. We should pass on this one.
                    mark_as_deferred(message, "message.email was None")
                    deferred += 1
                    continue
                email.connection = connection
                email.send()
                mark_as_sent(message)
                sent += 1
            except Exception, err:
                mark_as_deferred(message, err)
                deferred += 1
                # Get new connection, it case the connection itself has an error.
                connection = None
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

    logging.info("")
    logging.info("%s sent; %s deferred;" % (sent, deferred))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Exemple #17
0
def send_all():
    """
    Send all eligible messages in the queue.
    """

    lock = FileLock("send_mail")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    start_time = time.time()

    dont_send = 0
    deferred = 0
    sent = 0

    try:
        for message in prioritize():
            if DontSendEntry.objects.has_address(message.to_address):
                logging.info("skipping email to %s as on don't send list " % message.to_address.encode("utf-8"))
                MessageLog.objects.log(message, 2)  # @@@ avoid using literal result code
                message.delete()
                dont_send += 1
            else:
                try:
                    logging.info(
                        "sending message '%s' to %s"
                        % (message.subject.encode("utf-8"), message.to_address.encode("utf-8"))
                    )
                    core_send_mail(message.subject, message.message_body, message.from_address, [message.to_address])
                    MessageLog.objects.log(message, 1)  # @@@ avoid using literal result code
                    message.delete()
                    sent += 1
                except (
                    socket_error,
                    smtplib.SMTPSenderRefused,
                    smtplib.SMTPRecipientsRefused,
                    smtplib.SMTPAuthenticationError,
                ), err:
                    message.defer()
                    logging.info("message deferred due to failure: %s" % err)
                    MessageLog.objects.log(message, 3, log_message=str(err))  # @@@ avoid using literal result code
                    deferred += 1
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

    logging.info("")
    logging.info("%s sent; %s deferred; %s don't send" % (sent, deferred, dont_send))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
def send_all(block_size=500):
    """
    Send all non-deferred messages in the queue.
    
    A lock file is used to ensure that this process can not be started again
    while it is already running.
    
    The ``block_size`` argument allows for queued messages to be iterated in
    blocks, allowing new prioritised messages to be inserted during iteration
    of a large number of queued messages.
    
    """
    lock = FileLock(LOCK_PATH)

    logger.debug("Acquiring lock...")
    try:
        # lockfile has a bug dealing with a negative LOCK_WAIT_TIMEOUT (which
        # is the default if it's not provided) systems which use a LinkFileLock
        # so ensure that it is never a negative number.
        lock.acquire(LOCK_WAIT_TIMEOUT and max(0, LOCK_WAIT_TIMEOUT))
    except AlreadyLocked:
        logger.debug("Lock already in place. Exiting.")
        return
    except LockTimeout:
        logger.debug("Waiting for the lock timed out. Exiting.")
        return
    logger.debug("Lock acquired.")

    start_time = time.time()

    sent = deferred = skipped = 0

    connection = None

    try:
        connection = SMTPConnection()
        blacklist = models.Blacklist.objects.values_list("email", flat=True)
        connection.open()
        for message in _message_queue(block_size):
            result = send_message(message, smtp_connection=connection, blacklist=blacklist)
            if result == constants.RESULT_SENT:
                sent += 1
            elif result == constants.RESULT_FAILED:
                deferred += 1
            elif result == constants.RESULT_SKIPPED:
                skipped += 1
        connection.close()
    finally:
        logger.debug("Releasing lock...")
        lock.release()
        logger.debug("Lock released.")

    logger.debug("")
    if sent or deferred or skipped:
        log = logger.warning
    else:
        log = logger.info
    log("%s sent, %s deferred, %s skipped." % (sent, deferred, skipped))
    logger.debug("Completed in %.2f seconds." % (time.time() - start_time))
Exemple #19
0
    def flush(self):
        if not self._buffer:
            return

        if sys.platform.startswith('win'):
            return

        items = []
        for b in self._buffer:
            try:
                d    = DictUtils.merge(self._meta, b['data'])
                item = b['prefix'] + ' ' + JSON.asString(d)
            except Exception as err:
                item = '>> EXCEPTION: JSON ENCODING FAILED >> ' + str(err).replace('\n', '\t')

            try:
                item = item.encode('utf8', 'ignore')
            except Exception as err:
                item = '>> EXCEPTION: UNICODE DECODING FAILED >> ' + str(err).replace('\n', '\t')

            items.append(item)

        count   = self._fileCount
        offset  = random.randint(0, count - 1)
        success = False
        path    = self.getReportFolder() + self._timeCode + '/'
        if not os.path.exists(path):
            os.makedirs(path)

        for i in range(count):
            index = (i + offset) % count
            p     = path + str(index) + '.report'
            lock  = FileLock(p)
            if lock.i_am_locking() and i < count - 1:
                continue

            try:
                lock.acquire()
            except Exception:
                continue

            try:
                out = StringUtils.toUnicode('\n'.join(items) + '\n')
                f   = open(p, 'a+')
                f.write(out.encode('utf8'))
                f.close()
                success = True
            except Exception as err:
                print("REPORTER ERROR: Unable to write report file.")
                print(err)

            lock.release()
            if success:
                break

        self.clear()
        return success
Exemple #20
0
def send_all(batch_size=None):
    """
    Send all eligible messages in the queue.
    """

    lock = FileLock("send_mail")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    start_time = time.time()

    dont_send = 0
    deferred = 0
    sent = 0

    try:
        connection = None
        for message in prioritize():
            try:
                if connection is None:
                    connection = get_connection(backend=EMAIL_BACKEND)
                logging.info("sending message '%s' to %s" % (message.subject.encode("utf-8"), u", ".join(message.to_addresses).encode("utf-8")))
                email = message.email
                email.connection = connection
                email.send()
                MessageLog.objects.log(message, 1) # @@@ avoid using literal result code
                message.delete()
                sent += 1
            except (socket_error, smtplib.SMTPSenderRefused, smtplib.SMTPRecipientsRefused, smtplib.SMTPAuthenticationError), err:
                message.defer()
                logging.info("message deferred due to failure: %s" % err)
                MessageLog.objects.log(message, 3, log_message=str(err)) # @@@ avoid using literal result code
                deferred += 1
                # Get new connection, it case the connection itself has an error.
                connection = None

            # stop when batch size is reached
            if batch_size is not None and sent + deferred >= batch_size:
                break

    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

    logging.info("")
    logging.info("%s sent; %s deferred;" % (sent, deferred))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Exemple #21
0
def getFileLock(location, filename):
    lock = FileLock("%s/%s" % (location, filename))
    while not lock.i_am_locking():
        try:
            lock.acquire(timeout=60)
        except:
            lock.break_lock()
            lock.acquire()
    return lock
Exemple #22
0
def send_all(block_size=500, backend=None):
    """
    Send all non-deferred messages in the queue.

    A lock file is used to ensure that this process can not be started again
    while it is already running.

    The ``block_size`` argument allows for queued messages to be iterated in
    blocks, allowing new prioritised messages to be inserted during iteration
    of a large number of queued messages.

    """
    lock = FileLock(LOCK_PATH)

    logger.debug("Acquiring lock...")
    try:
        # lockfile has a bug dealing with a negative LOCK_WAIT_TIMEOUT (which
        # is the default if it's not provided) systems which use a LinkFileLock
        # so ensure that it is never a negative number.
        lock.acquire(settings.LOCK_WAIT_TIMEOUT or 0)
        #lock.acquire(settings.LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logger.debug("Lock already in place. Exiting.")
        return
    except LockTimeout:
        logger.debug("Waiting for the lock timed out. Exiting.")
        return
    logger.debug("Lock acquired.")

    start_time = time.time()

    sent = deferred = skipped = 0

    try:
        if constants.EMAIL_BACKEND_SUPPORT:
            connection = get_connection(backend=backend)
        else:
            connection = get_connection()
        blacklist = models.Blacklist.objects.values_list('email', flat=True)
        connection.open()
        for message in _message_queue(block_size):
            try:
                result = send_queued_message(message, smtp_connection=connection,
                                  blacklist=blacklist)
            except Exception, e:
                result = constants.RESULT_FAILED
                logger.error(e)
            
            if result == constants.RESULT_SENT:
                sent += 1
            elif result == constants.RESULT_FAILED:
                deferred += 1
            elif result == constants.RESULT_SKIPPED:
                skipped += 1
        connection.close()
Exemple #23
0
def send_all():
    """
    Send all eligible messages in the queue.
    """
    
    lock = FileLock("send_mail")
    
    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")
    
    start_time = time.time()
    
    dont_send = 0
    deferred = 0
    sent = 0

    def defer_msg(message,err):
        """ Defer, log and count a message """
        message.defer()
        logging.info('message deferred due to failure: %s' % err)
        MessageLog.objects.log(message, 3, log_message=str(err))
        
    try:
        connection = None
        for message in prioritize():
            try:
                if connection is None:
                    connection = get_connection(backend=EMAIL_BACKEND)
                logging.info("sending message '%s' to %s" % (message.subject.encode("utf-8"), u", ".join(message.to_addresses).encode("utf-8")))
                email = message.email
                email.connection = connection
                email.send()
                MessageLog.objects.log(message, 1) # @@@ avoid using literal result code
                message.delete()
                sent += 1
                
            except (socket_error, smtplib.SMTPSenderRefused, smtplib.SMTPRecipientsRefused, smtplib.SMTPAuthenticationError), err:
                defer_msg(message,err)
                deferred += 1
                
                # Get new connection, it case the connection itself has an error.
                connection = None

            except Exception, err:
                defer_msg(message,err)
                deferred += 1
                raise
    def handle(self, *args, **options):
        # Check whether it is already running or not
        lock = FileLock(os.path.join(settings.LOCK_ROOT, LOCK_FILE))
        try:
            lock.acquire(0)
        except:
            print ("It seems the command is processing already.")
            return

        import_translations_from_po()
        lock.release()
Exemple #25
0
    def handle(self, *args, **options):
        """ Store pushlog data in the database. """

        repo_host = options.get("repo_host")
        enddate = options.get("enddate")
        numdays = options.get("numdays")
        hours = options.get("hours")
        branch = options.get("branch")
        verbosity = options.get("verbosity")
        project = options.get("project")

        if not repo_host:
            raise CommandError("You must supply a host name for the repo pushlogs " +
                         "to store: --repo_host hostname")

        if not numdays and not hours:
            raise CommandError("You must supply the number of days or hours of data.")
        else:
            if numdays:
                try:
                    numdays = int(numdays)
                except ValueError:
                    raise CommandError("numdays must be an integer.")

            if hours:

                try:
                    hours = int(hours)
                except ValueError:
                    raise CommandError("hours must be an integer.")

        lock = FileLock(self.LOCK_FILE)
        try:
            lock.acquire(timeout=0)
            try:
                plm = PushLogModel(project=project, out=self.stdout, verbosity=verbosity)

                # store the pushlogs for the branch specified, or all branches
                summary = plm.store_pushlogs(repo_host, numdays, hours, enddate, branch)
                self.println(("Branches: {0}\nPushlogs stored: {1}, skipped: {2}\n" +
                              "Changesets stored: {3}, skipped: {4}").format(
                        summary["branches"],
                        summary["pushlogs_stored"],
                        summary["pushlogs_skipped"],
                        summary["changesets_stored"],
                        summary["changesets_skipped"],
                        ))
                plm.disconnect()

            finally:
                lock.release()

        except AlreadyLocked:
            self.println("This command is already being run elsewhere.  Please try again later.")
Exemple #26
0
class SingleProcessEngine(BaseEngine):
    """
    An engine that ensures only one process can run at the same time. Useful
    when being fired off by a cron and you need to ensure a lock is made so
    other processes won't handle a queue at the same time.
    """
    def __init__(self, *args, **kwargs):
        self._lock_wait_timeout = kwargs.pop("lock_wait_timeout", -1)
        super(SingleProcessEngine, self).__init__(self, *args, **kwargs)
    
    def run(self):
        """
        Executes the runner using a lock file to prevent race conditions.
        """
        self._create_lock()
        if not self._acquire_lock():
            raise SystemExit
        try:
            super(SingleProcessEngine, self).run()
        finally:
            self._release_lock()
    
    def _create_lock(self):
        """
        Create the lock.
        """
        from lockfile import FileLock
        self._lock = FileLock("%d.lock" % os.getpid())
    
    def _acquire_lock(self):
        """
        Attempt to acquire a lock. Returns False on failure or True on
        success.
        """
        from lockfile import AlreadyLocked, LockTimeout
        logging.debug("acquiring lock...")
        try:
            self._lock.acquire(self._lock_wait_timeout)
        except AlreadyLocked:
            logging.debug("lock already in place. quitting.")
            return False
        except LockTimeout:
            logging.debug("waiting for the lock timed out. quitting.")
            return False
        logging.debug("lock acquired.")
        return True
    
    def _release_lock(self):
        """
        Release the lock.
        """
        logging.debug("releasing lock...")
        self._lock.release()
        logging.debug("lock released.")
Exemple #27
0
def rotate_log_file(path):
    try:
        lockfile = FileLock(path + '.lock')
        lockfile.acquire(timeout=0)
    except LockError:
        return

    try:
        if os.path.exists(path) and os.stat(path).st_size > 1024 * 1024:
            os.rename(path, path + '.1')
    finally:
        lockfile.release()
Exemple #28
0
class Transaction():
    def __init__(self, model):
        self._model = model
        self._lock = FileLock('.lock')

    def __enter__(self):
        self._lock.acquire()
        return self

    def __exit__(self, *exc_info):
        self._lock.release()
        self._model.sync()
Exemple #29
0
    def wrapper(self, *args, **options):
        def on_interrupt(signum, frame):
            # It's necessary to release lockfile
            sys.exit()
        signal.signal(signal.SIGTERM, on_interrupt)

        start_time = time.time()
        try:
            verbosity = int(options.get('verbosity', 0))
        except ValueError:
            verbosity = 0
        logger = logging.getLogger(self.__module__)
        if verbosity == 0:
            logger.level = logging.WARNING
        elif verbosity == 1:
            logger.level = logging.INFO
        else:
            logger.level = logging.DEBUG
       
        logger.debug("-" * 72)
        
        lock_name = self.__module__.split('.').pop()
        lock = FileLock(os.path.join(LOCK_ROOT, lock_name))
        
        logger.debug("%s - acquiring lock..." % lock_name)
        try:
            lock.acquire(LOCK_WAIT_TIMEOUT)
        except AlreadyLocked:
            logger.debug("lock already in place. quitting.")
            return
        except LockTimeout:
            logger.debug("waiting for the lock timed out. quitting.")
            return
        logger.debug("acquired.")
        
        try:
            handle(self, logger, *args, **options)
        except (KeyboardInterrupt, SystemExit):
            pass
        except:
            import traceback
            logging.warn("Command Failed")
            logging.warn('=' * 72)
            logging.warn(traceback.format_exc())
            logging.warn('=' * 72)
        
        logger.debug("releasing lock...")
        lock.release()
        logger.debug("released.")
        
        logger.info("done in %.2f seconds" % (time.time() - start_time))
        return
Exemple #30
0
 def handle(*args):
     """Acquire the lock before running the method.
     """
     lock = FileLock(lockname)
     try:
         lock.acquire(timeout=-1)
     except AlreadyLocked: # pragma: no cover
         print('Lock is already set, aborting.')
         return
     try:
         handler(*args)
     finally:
         lock.release()
Exemple #31
0
def main(argv=None):
    '''Command line options.'''

    # setup argparser
    parser = argparse.ArgumentParser()

    parser.add_argument('--version',
                        action='version',
                        version='%s v%s (%s)' %
                        (os.path.basename(__file__), __version__, __updated__))

    # Actions
    action_group = parser.add_argument_group(
        "Actions", "A single action must be selected.")
    actions = action_group.add_mutually_exclusive_group(required=True)
    actions.add_argument("--report",
                         dest="report",
                         type=str,
                         help="Submit the given textual report",
                         metavar="TEXT")
    actions.add_argument("--report-from-file",
                         dest="report_file",
                         type=str,
                         help="Submit the given file as textual report",
                         metavar="FILE")
    actions.add_argument("--cycle",
                         dest="cycle",
                         type=str,
                         help="Cycle the pool with the given ID",
                         metavar="ID")

    # Options
    parser.add_argument(
        "--keep-reporting",
        dest="keep_reporting",
        default=0,
        type=int,
        help="Keep reporting from the specified file with specified interval",
        metavar="SECONDS")
    parser.add_argument("--random-offset",
                        dest="random_offset",
                        default=0,
                        type=int,
                        help="Random offset for the reporting interval (+/-)",
                        metavar="SECONDS")

    # Settings
    parser.add_argument("--serverhost",
                        dest="serverhost",
                        help="Server hostname for remote signature management",
                        metavar="HOST")
    parser.add_argument("--serverport",
                        dest="serverport",
                        type=int,
                        help="Server port to use",
                        metavar="PORT")
    parser.add_argument("--serverproto",
                        dest="serverproto",
                        help="Server protocol to use (default is https)",
                        metavar="PROTO")
    parser.add_argument("--serverauthtokenfile",
                        dest="serverauthtokenfile",
                        help="File containing the server authentication token",
                        metavar="FILE")
    parser.add_argument("--clientid",
                        dest="clientid",
                        help="Client ID to use when submitting issues",
                        metavar="ID")

    # process options
    opts = parser.parse_args(argv)

    if opts.keep_reporting and not opts.report_file:
        print("Error: --keep-reporting is only valid with --report-from-file",
              file=sys.stderr)
        return 2

    serverauthtoken = None
    if opts.serverauthtokenfile:
        with open(opts.serverauthtokenfile) as f:
            serverauthtoken = f.read().rstrip()

    reporter = EC2Reporter(opts.serverhost, opts.serverport, opts.serverproto,
                           serverauthtoken, opts.clientid)
    report = None

    if opts.cycle:
        reporter.cycle(opts.cycle)
        return 0
    elif opts.report_file:
        if opts.keep_reporting:
            if opts.random_offset > 0:
                random.seed(reporter.clientId)

            lock = FileLock(opts.report_file)
            while True:
                try:
                    if os.path.exists(opts.report_file):
                        lock.acquire(opts.keep_reporting)
                        with open(opts.report_file) as f:
                            report = f.read()
                        try:
                            reporter.report(report)
                        except RuntimeError as e:
                            # Ignore errors if the server is temporarily unavailable
                            print("Failed to contact server: %s" % e,
                                  file=sys.stderr)
                        lock.release()

                    random_offset = 0
                    if opts.random_offset:
                        random_offset = random.randint(-opts.random_offset,
                                                       opts.random_offset)
                    time.sleep(opts.keep_reporting + random_offset)
                except LockTimeout:
                    continue
        else:
            with open(opts.report_file) as f:
                report = f.read()
    else:
        report = opts.report

    reporter.report(report)
    return 0
Exemple #32
0
        """Handle stop signal cleanly"""
        syslog.syslog(syslog.LOG_INFO, "Received signal to stop Interceptor Daemon")

        return self.stop()


if __name__ == '__main__':
    # Must not be executed simultaneously (c.f. #265)
    lock = FileLock("/tmp/interceptord")

    try:
        options = Options()
        options.parseOptions()

        # Ensure there are no paralell runs of this script
        lock.acquire(timeout=2)

        # Prepare to start
        in_d = InterceptorDaemon(options)
        # Setup signal handlers
        signal.signal(signal.SIGINT, in_d.sighandler_stop)
        # Start InterceptorDaemon
        in_d.start()

        reactor.run()
    except usage.UsageError as errortext:
        print('%s: %s' % (sys.argv[0], errortext))
        print('%s: Try --help for usage details.' % (sys.argv[0]))
    except LockTimeout:
        print("Lock not acquired ! exiting")
    except AlreadyLocked:
Exemple #33
0
def write_aggregated_stats(base_dirs, outfile):
    '''
    Generate aggregated statistics from the given base directories
    and write them to the specified output file.
    
    @type base_dirs: list
    @param base_dirs: List of AFL base directories

    @type outfile: str
    @param outfile: Output file for aggregated statistics
    '''

    # Which fields to add
    wanted_fields_total = [
        'execs_done', 'execs_per_sec', 'pending_favs', 'pending_total',
        'variable_paths', 'unique_crashes', 'unique_hangs'
    ]

    # Which fields to aggregate by mean
    wanted_fields_mean = ['exec_timeout']

    # Which fields should be displayed per fuzzer instance
    wanted_fields_all = ['cycles_done', 'bitmap_cvg']

    # Which fields should be aggregated by max
    wanted_fields_max = ['last_path']

    aggregated_stats = {}

    for field in wanted_fields_total:
        aggregated_stats[field] = 0

    for field in wanted_fields_mean:
        aggregated_stats[field] = (0, 0)

    for field in wanted_fields_all:
        aggregated_stats[field] = []

    def convert_num(num):
        if '.' in num:
            return float(num)
        return int(num)

    for base_dir in base_dirs:
        stats_path = os.path.join(base_dir, "fuzzer_stats")

        if os.path.exists(stats_path):
            with open(stats_path, 'r') as stats_file:
                stats = stats_file.read()

            for line in stats.splitlines():
                (field_name, field_val) = line.split(':', 1)
                field_name = field_name.strip()
                field_val = field_val.strip()

                if field_name in wanted_fields_total:
                    aggregated_stats[field_name] += convert_num(field_val)
                elif field_name in wanted_fields_mean:
                    (val, cnt) = aggregated_stats[field_name]
                    aggregated_stats[field_name] = (val +
                                                    convert_num(field_val),
                                                    cnt + 1)
                elif field_name in wanted_fields_all:
                    aggregated_stats[field_name].append(field_val)
                elif field_name in wanted_fields_max:
                    num_val = convert_num(field_val)
                    if (not field_name in aggregated_stats
                        ) or aggregated_stats[field_name] < num_val:
                        aggregated_stats[field_name] = num_val

    # If we don't have any data here, then the fuzzers haven't written any statistics yet
    if not aggregated_stats:
        return

    # Mean conversion
    for field_name in wanted_fields_mean:
        (val, cnt) = aggregated_stats[field_name]
        if cnt:
            aggregated_stats[field_name] = float(val) / float(cnt)
        else:
            aggregated_stats[field_name] = val

    # Write out data
    fields = []
    fields.extend(wanted_fields_total)
    fields.extend(wanted_fields_mean)
    fields.extend(wanted_fields_all)
    fields.extend(wanted_fields_max)

    max_keylen = max([len(x) for x in fields])

    lock = FileLock(outfile)
    lock.acquire()
    with open(outfile, 'w') as f:
        for field in fields:
            if not field in aggregated_stats:
                continue

            val = aggregated_stats[field]

            if isinstance(val, list):
                val = " ".join(val)

            f.write("%s%s: %s\n" % (field, " " *
                                    (max_keylen + 1 - len(field)), val))
    lock.release()

    return
Exemple #34
0
def send_all():
    lock = FileLock("send_notices")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    batches, sent = 0, 0
    start_time = time.time()

    try:
        # nesting the try statement to be Python 2.4
        for queued_batch in NoticeQueueBatch.objects.order_by('-id'):
            try:
                notices = pickle.loads(
                    str(queued_batch.pickled_data).decode("base64"))
                for user, label, extra_context, on_site, sender in notices:
                    try:
                        user = User.objects.get(pk=user)
                        logging.info("emitting notice %s to %s" %
                                     (label, user))
                        # call this once per user to be atomic and allow for logging to
                        # accurately show how long each takes.
                        notification.send_now([user], label, extra_context,
                                              on_site, sender)
                    except User.DoesNotExist:
                        # Ignore deleted users, just warn about them
                        logging.warning(
                            "not emitting notice %s to user %s since it does not exist"
                            % (label, user))
                    sent += 1
                queued_batch.delete()
                batches += 1
            except:
                # get the exception
                exc_class, e, t = sys.exc_info()
                # email people
                current_site = Site.objects.get_current()
                subject = "[%s emit_notices] %r" % (current_site.name, e)
                message = "%s" % ("\n".join(
                    traceback.format_exception(*sys.exc_info())), )
                mail_admins(subject, message, fail_silently=True)
                # log it as critical
                logging.critical("an exception occurred: %r" % e)
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

    logging.info("")
    logging.info("%s batches, %s sent" % (
        batches,
        sent,
    ))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Exemple #35
0
class ExperimentGrid:
    @staticmethod
    def job_running(expt_dir, id):
        expt_grid = ExperimentGrid(expt_dir)
        expt_grid.set_running(id)

    @staticmethod
    def job_complete(expt_dir, id, value, duration):
        expt_grid = ExperimentGrid(expt_dir)
        expt_grid.set_complete(id, value, duration)

    @staticmethod
    def job_broken(expt_dir, id):
        expt_grid = ExperimentGrid(expt_dir)
        expt_grid.set_broken(id)

    def __init__(self, expt_dir, variables=None, grid_size=None, grid_seed=1):
        self.expt_dir = expt_dir
        self.jobs_pkl = os.path.join(expt_dir, 'expt-grid.pkl')
        self.locker = FileLock(self.jobs_pkl)

        # Only one process at a time is allowed to have access to this.
        sys.stderr.write("Waiting to lock grid...")
        self.locker.acquire()
        sys.stderr.write("...acquired\n")

        # Does this exist already?
        if variables is not None and not os.path.exists(self.jobs_pkl):

            # Set up the grid for the first time.
            self.seed = grid_seed
            self.vmap = GridMap(variables, grid_size)
            self.grid = self._hypercube_grid(self.vmap.card(), grid_size)
            self.status = np.zeros(grid_size, dtype=int) + CANDIDATE_STATE
            self.values = np.zeros(grid_size) + np.nan
            self.durs = np.zeros(grid_size) + np.nan
            self.sgeids = np.zeros(grid_size, dtype=int)

            # Save this out.
            self._save_jobs()
        else:

            # Load in from the pickle.
            self._load_jobs()

    def __del__(self):
        self._save_jobs()
        self.locker.release()
        sys.stderr.write("Released lock on job grid.\n")

    def get_grid(self):
        return self.grid, self.values, self.durs

    def get_candidates(self):
        return np.nonzero(self.status == CANDIDATE_STATE)[0]

    def get_pending(self):
        return np.nonzero((self.status == SUBMITTED_STATE)
                          | (self.status == RUNNING_STATE))[0]

    def get_complete(self):
        return np.nonzero(self.status == COMPLETE_STATE)[0]

    def get_broken(self):
        return np.nonzero(self.status == BROKEN_STATE)[0]

    def get_params(self, index):
        return self.vmap.get_params(self.grid[index, :])

    def get_best(self):
        finite = self.values[np.isfinite(self.values)]
        if len(finite) > 0:
            cur_min = np.min(finite)
            index = np.nonzero(self.values == cur_min)[0][0]
            return cur_min, index
        else:
            return np.nan, -1

    def get_sgeid(self, id):
        return np.asscalar(self.sgeids[id])

    def add_to_grid(self, candidate):
        # Set up the grid
        self.grid = np.vstack((self.grid, candidate))
        self.status = np.append(self.status,
                                np.zeros(1, dtype=int) + int(CANDIDATE_STATE))

        self.values = np.append(self.values, np.zeros(1) + np.nan)
        self.durs = np.append(self.durs, np.zeros(1) + np.nan)
        self.sgeids = np.append(self.sgeids, np.zeros(1, dtype=int))

        # Save this out.
        self._save_jobs()
        return self.grid.shape[0] - 1

    def set_candidate(self, id):
        self.status[id] = CANDIDATE_STATE
        self._save_jobs()

    def set_submitted(self, id, sgeid):
        self.status[id] = SUBMITTED_STATE
        self.sgeids[id] = sgeid
        self._save_jobs()

    def set_running(self, id):
        self.status[id] = RUNNING_STATE
        self._save_jobs()

    def set_complete(self, id, value, duration):
        self.status[id] = COMPLETE_STATE
        self.values[id] = value
        self.durs[id] = duration
        self._save_jobs()

    def set_broken(self, id):
        self.status[id] = BROKEN_STATE
        self._save_jobs()

    def _load_jobs(self):
        fh = open(self.jobs_pkl, 'r')
        jobs = cPickle.load(fh)
        fh.close()

        self.vmap = jobs['vmap']
        self.grid = jobs['grid']
        self.status = jobs['status']
        self.values = jobs['values']
        self.durs = jobs['durs']
        self.sgeids = jobs['sgeids']

    def _save_jobs(self):

        # Write everything to a temporary file first.
        fh = tempfile.NamedTemporaryFile(mode='w', delete=False)
        cPickle.dump(
            {
                'vmap': self.vmap,
                'grid': self.grid,
                'status': self.status,
                'values': self.values,
                'durs': self.durs,
                'sgeids': self.sgeids
            }, fh)
        fh.close()

        # Use an atomic move for better NFS happiness.
        if os.name == 'nt':
            cmd = 'move "%s" "%s"' % (fh.name, self.jobs_pkl)
        else:
            cmd = 'mv "%s" "%s"' % (fh.name, self.jobs_pkl)
        os.system(cmd)  # TODO: Should check system-dependent return status.

    def _hypercube_grid(self, dims, size):
        # Generate from a sobol sequence
        sobol_grid = np.transpose(i4_sobol_generate(dims, size, self.seed))

        return sobol_grid
Exemple #36
0
def send_all(block_size=500, backend=None):
    """
    Send all non-deferred messages in the queue.

    A lock file is used to ensure that this process can not be started again
    while it is already running.

    The ``block_size`` argument allows for queued messages to be iterated in
    blocks, allowing new prioritised messages to be inserted during iteration
    of a large number of queued messages.

    """
    lock = FileLock(LOCK_PATH)

    logger.debug("Acquiring lock...")
    try:
        # lockfile has a bug dealing with a negative LOCK_WAIT_TIMEOUT (which
        # is the default if it's not provided) systems which use a LinkFileLock
        # so ensure that it is never a negative number.
        lock.acquire(settings.LOCK_WAIT_TIMEOUT or 0)
        #lock.acquire(settings.LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logger.debug("Lock already in place. Exiting.")
        return
    except LockTimeout:
        logger.debug("Waiting for the lock timed out. Exiting.")
        return
    logger.debug("Lock acquired.")

    start_time = time.time()

    sent = deferred = skipped = 0

    try:
        if constants.EMAIL_BACKEND_SUPPORT:
            connection = get_connection(backend=backend)
        else:
            connection = get_connection()
        blacklist = models.Blacklist.objects.values_list('email', flat=True)
        connection.open()
        for message in _message_queue(block_size):
            result = send_queued_message(message,
                                         smtp_connection=connection,
                                         blacklist=blacklist)
            if result == constants.RESULT_SENT:
                sent += 1
            elif result == constants.RESULT_FAILED:
                deferred += 1
            elif result == constants.RESULT_SKIPPED:
                skipped += 1
        connection.close()
    finally:
        logger.debug("Releasing lock...")
        lock.release()
        logger.debug("Lock released.")

    logger.debug("")
    if sent or deferred or skipped:
        log = logger.warning
    else:
        log = logger.info
    log("%s sent, %s deferred, %s skipped." % (sent, deferred, skipped))
    logger.debug("Completed in %.2f seconds." % (time.time() - start_time))
class CronScript(object):
    ''' Convenience class for writing cron scripts '''

    def __init__(self, args=None, options=None, usage=None,
                 disable_interspersed_args=False):
        self.lock = None
        self.start_time = None
        self.end_time = None

        if options is None:
            options = []

        if args is None:
            args = sys.argv[1:]

        prog = os.path.basename(main.__file__)
        logfile = os.path.join('/var/log/', "%s.log" % prog)
        lockfile = os.path.join('/var/lock/', "%s" % prog)
        stampfile = os.path.join('/var/tmp/', "%s.success" % prog)
        options.append(make_option("--debug", "-d", action="store_true",
                                   help="Minimum log level of DEBUG"))
        options.append(make_option("--quiet", "-q", action="store_true",
                                   help="Only WARN and above to stdout"))
        options.append(make_option("--nolog", action="store_true",
                                   help="Do not log to LOGFILE"))
        options.append(make_option("--logfile", type="string",
                                   default=logfile,
                                   help="File to log to, default %default"))
        options.append(make_option("--syslog", action="store_true",
                                   help="Log to syslog instead of a file"))
        options.append(make_option("--nolock", action="store_true",
                                   help="Do not use a lockfile"))
        options.append(make_option("--lockfile", type="string",
                                   default=lockfile,
                                   help="Lock file, default %default"))
        options.append(make_option("--nostamp", action="store_true",
                                   help="Do not use a success stamp file"))
        options.append(make_option("--stampfile", type="string",
                                   default=stampfile,
                                   help="Success stamp file, default %default"))
        helpmsg = "Lock timeout in seconds, default %default"
        options.append(make_option("--locktimeout", default=90, type="int",
                                   help=helpmsg))
        helpmsg = "Sleep a random time between 0 and N seconds before starting, default %default"
        options.append(make_option("--splay", default=0, type="int",
                                   help=helpmsg))

        parser = OptionParser(option_list=options, usage=usage)
        if disable_interspersed_args:
            # Stop option parsing at first non-option
            parser.disable_interspersed_args()
        (self.options, self.args) = parser.parse_args(args)

        self.logger = logging.getLogger(main.__name__)

        if self.options.debug:
            self.logger.setLevel(logging.DEBUG)
        else:
            self.logger.setLevel(logging.INFO)

        # Log to syslog
        if self.options.syslog:
            syslog_formatter = logging.Formatter("%s: %%(levelname)s %%(message)s" % prog)
            handler = logging.handlers.SysLogHandler(
                    address="/dev/log",
                    facility=logging.handlers.SysLogHandler.LOG_LOCAL3
                    )
            handler.setFormatter(syslog_formatter)
            self.logger.addHandler(handler)

        default_formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s",
                                              "%Y-%m-%d-%H:%M:%S")
        if not self.options.nolog:
            # Log to file
            try:
                handler = MultiProcessingLog(
                    "%s" % (self.options.logfile),
                    maxBytes=(50 * 1024 * 1024),
                    backupCount=10)
            except IOError:
                sys.stderr.write("Fatal: Could not open log file: %s\n"
                                 % self.options.logfile)
                sys.exit(1)

            handler.setFormatter(default_formatter)
            self.logger.addHandler(handler)

        # If quiet, only WARNING and above go to STDERR; otherwise all
        # logging goes to stderr
        handler2 = MultiProcessingLogStream(sys.stderr)
        if self.options.quiet:
            err_filter = StdErrFilter()
            handler2.addFilter(err_filter)
        handler2.setFormatter(default_formatter)
        self.logger.addHandler(handler2)

        self.logger.debug(self.options)

    def __enter__(self):
        if self.options.splay > 0:
            splay = randint(0, self.options.splay)
            self.logger.debug('Sleeping for %d seconds (splay=%d)' %
                              (splay, self.options.splay))
            time.sleep(splay)
        self.start_time = DT.datetime.today()
        if not self.options.nolock:
            self.logger.debug('Attempting to acquire lock %s (timeout %s)',
                              self.options.lockfile,
                              self.options.locktimeout)
            self.lock = FileLock(self.options.lockfile)
            try:
                self.lock.acquire(timeout=self.options.locktimeout)
            except LockFailed as e:
                self.logger.error("Lock could not be acquired.")
                self.logger.error(str(e))
                sys.exit(1)
            except LockTimeout as e:
                msg = "Lock could not be acquired. Timeout exceeded."
                self.logger.error(msg)
                sys.exit(1)

    def __exit__(self, etype, value, traceback):
        self.end_time = DT.datetime.today()
        self.logger.debug('Run time: %s', self.end_time - self.start_time)
        if not self.options.nolock:
            self.logger.debug('Attempting to release lock %s',
                              self.options.lockfile)
            self.lock.release()
        if etype is None:
            if not self.options.nostamp:
                open(self.options.stampfile, "w")
class LockTest(TestCase):
    """
    Tests for Django Mailer trying to send mail when the lock is already in
    place.
    """
    def setUp(self):
        # Create somewhere to store the log debug output.
        self.output = StringIO()
        # Create a log handler which can capture the log debug output.
        self.handler = logging.StreamHandler(self.output)
        self.handler.setLevel(logging.DEBUG)
        formatter = logging.Formatter('%(message)s')
        self.handler.setFormatter(formatter)
        # Add the log handler.
        logger = logging.getLogger('django_yubin')
        logger.addHandler(self.handler)

        # Set the LOCK_WAIT_TIMEOUT to the default value.
        self.original_timeout = settings.LOCK_WAIT_TIMEOUT
        settings.LOCK_WAIT_TIMEOUT = 0

        # Use a test lock-file name in case something goes wrong, then emulate
        # that the lock file has already been acquired by another process.
        self.original_lock_path = engine.LOCK_PATH
        engine.LOCK_PATH += '.mailer-test'
        self.lock = FileLock(engine.LOCK_PATH)
        self.lock.unique_name += '.mailer_test'
        self.lock.acquire(0)

    def tearDown(self):
        # Remove the log handler.
        logger = logging.getLogger('django_yubin')
        logger.removeHandler(self.handler)

        # Revert the LOCK_WAIT_TIMEOUT to it's original value.
        settings.LOCK_WAIT_TIMEOUT = self.original_timeout

        # Revert the lock file unique name
        engine.LOCK_PATH = self.original_lock_path
        self.lock.release()

    def test_locked(self):
        # Acquire the lock so that send_all will fail.
        engine.send_all()
        self.output.seek(0)
        self.assertEqual(self.output.readlines()[-1].strip(),
                         'Lock already in place. Exiting.')
        # Try with a timeout.
        settings.LOCK_WAIT_TIMEOUT = .1
        engine.send_all()
        self.output.seek(0)
        self.assertEqual(self.output.readlines()[-1].strip(),
                         'Waiting for the lock timed out. Exiting.')

    def test_locked_timeoutbug(self):
        # We want to emulate the lock acquiring taking no time, so the next
        # three calls to time.time() always return 0 (then set it back to the
        # real function).
        original_time = time.time
        global time_call_count
        time_call_count = 0

        def fake_time():
            global time_call_count
            time_call_count = time_call_count + 1
            if time_call_count >= 3:
                time.time = original_time
            return 0

        time.time = fake_time
        try:
            engine.send_all()
            self.output.seek(0)
            self.assertEqual(self.output.readlines()[-1].strip(),
                             'Lock already in place. Exiting.')
        finally:
            time.time = original_time