Example #1
1
	def subprocess_loop(self):
		"""
			An internal loop my subprocess maintains for outputting
		"""
		
		# convert to a full path and make a lock
		path = os.path.realpath(self.path)
		lock = FileLock(self.path)
		
		while True:
			
			time.sleep(DELAY_TIME)
			if (not self.Q.empty()):
				lock.acquire() # get the lock (or wait till we do)
				with open(self.path, 'a') as o:
					while not self.Q.empty(): # dump the entire queue
						x = self.Q.get()
						if x is None: # this is our signal we are done with input 
							lock.release()
							return
						else: # this
							for xi in x: print >>o, xi,
							# No newline by default now
							#print >>o, "\n",
				lock.release()
Example #2
0
def main(argv=None):
    global argparser, lockfilepath
    global session, server, db
    if argv is None:
        args = argparser.parse_args()
    else:
        args = argparser.parse_args(argv)
    try:
        session, server, db = getCheshire3Env(args)
    except (EnvironmentError, ObjectDoesNotExistException):
        return 1
    with db.get_path(session, 'defaultLogger') as session.logger:
        mp = db.get_path(session, 'metadataPath')
        lock = FileLock(mp)
        if lock.is_locked() and args.unlock:
            # Forcibly unlock
            session.logger.log_warning(session, "Unlocking Database")
            lock.break_lock()
        try:
            lock.acquire(timeout=30)    # wait up to 30 seconds
        except LockTimeout:
            msg = ("The database is locked. It is possible that another"
                   "user is currently indexing this database. Please wait at "
                   "least 10 minutes and then try again. If you continue to "
                   "get this message and you are sure no one is reindexing "
                   "the database please contact the archives hub team for "
                   "advice."
                   )
            session.logger.log_critical(session, msg)
            return 1
        try:
            return args.func(args)
        finally:
            lock.release()
Example #3
0
def start_up():
    """Start up this MAAS server.

    This is used to:
    - make sure the singletons required by the application are created
    - sync the configuration of the external systems driven by MAAS

    This method is called when the MAAS application starts up.
    In production, it's called from the WSGI script so this shouldn't block
    at any costs.  It should simply call very simple methods or Celery tasks.

    The method will be executed multiple times if multiple processes are used
    but this method uses file-based locking to ensure that the methods it calls
    internally are not ran concurrently.
    """
    lock = FileLock(LOCK_FILE_NAME)
    # In case this process gets shut down, clean up the lock.
    atexit.register(lock.break_lock)

    lock.acquire(timeout=LOCK_TIMEOUT)
    try:
        inner_start_up()
    finally:
        lock.release()
    post_start_up()
Example #4
0
class ManagementLock():
    def __init__(self):
        self.lock = None
        
    def aquire(self):
        self.lock = FileLock(LOCK_PATH)
        reported = False
        
        # Attempt to obtain a lock, retry every 10 seconds. Wait at most 10 minutes.
        # The retrying is necessary so we can report on stderr that we are waiting
        # for a lock. Otherwise, a user trying to run the command manually might
        # get confused why the command execution is delayed.
        for idx in range(0,30):  # @UnusedVariable
            try:
                self.lock.acquire(10)
                return
            except LockTimeout:
                if not reported:
                    print("Another management command is running, waiting for lock...", file=sys.stderr)
                    reported = True
        
        raise RuntimeError("Failed to aquire lock.")
    
    def release(self):
        if self.lock:
            self.lock.release()
Example #5
0
def atomic_write(content, filename, overwrite=True, mode=0600):
    """Write `content` into the file `filename` in an atomic fashion.

    This requires write permissions to the directory that `filename` is in.
    It creates a temporary file in the same directory (so that it will be
    on the same filesystem as the destination) and then renames it to
    replace the original, if any.  Such a rename is atomic in POSIX.

    :param overwrite: Overwrite `filename` if it already exists?  Default
        is True.
    :param mode: Access permissions for the file, if written.
    """
    temp_file = _write_temp_file(content, filename)
    os.chmod(temp_file, mode)
    try:
        if overwrite:
            os.rename(temp_file, filename)
        else:
            lock = FileLock(filename)
            lock.acquire()
            try:
                if not os.path.isfile(filename):
                    os.rename(temp_file, filename)
            finally:
                lock.release()
    finally:
        if os.path.isfile(temp_file):
            os.remove(temp_file)
def send_all():
    lock = FileLock("send_notices")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(NOTIFICATION_LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    batches, sent = 0, 0
    start_time = time.time()

    try:
        # nesting the try statement to be Python 2.4
        try:
            for queued_batch in NoticeQueueBatch.objects.all():
                notices = pickle.loads(
                    str(queued_batch.pickled_data).decode("base64"))
                for user, label, extra_context, on_site in notices:
                    user = User.objects.get(pk=user)
                    logging.info("emitting notice to %s" % user)
                    # call this once per user to be atomic and allow for 
                    # logging to accurately show how long each takes.
                    notification.send_now([user], label, extra_context, on_site)
                    sent += 1
                queued_batch.delete()
                batches += 1
        except:
            # get the exception
            exc_class, e, t = sys.exc_info()
            # email people
            
            if NOTIFICATION_USE_SITE:
                name = Site.objects.get_current().name
            elif NOTIFICATION_DEFAULT_SITE_NAME:
                name = NOTIFICATION_DEFAULT_SITE_NAME
            else:
                # don't display None, display just a space
                name = ""

            subject = "[%s emit_notices] %r" % (name, e)
               
            message = "%s" % ("\n".join(
                    traceback.format_exception(*sys.exc_info())),)
            mail_admins(subject, message, fail_silently=True)
            # log it as critical
            logging.critical("an exception occurred: %r" % e)
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")
    
    logging.info("")
    logging.info("%s batches, %s sent" % (batches, sent,))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Example #7
0
	def MakeGraph(self, Data, FileName):
		"""
		Funzione che produce un grafico temporale.

		:param Data: Serie di dati da dover graficare.
		:param FileName: Nome file da assegnare al grafico prodotto.
		:returns: *nulla*

		"""
		global Interval,TimeStep

		Markers=[]
		FileName = 'extra/MonitorGraph/'+FileName

		for x in range((TimeStep-1)*Interval,-1,-Interval): Markers.append(str(x))

		#ogni volta cerca di acquisire il lock per creare una nuova immagine, se non riesce, rompe il lock
		TempLock = FileLock(FileName)
		try:
			TempLock.acquire(timeout=Interval)
		except LockTimeout:
			TempLock.break_lock()
		else:
			cairoplot.dot_line_plot(FileName, dict(zip(self.ProbeList,Data[:])), 
600, 200, axis=True, grid=True, series_legend=False, x_labels=Markers, series_colors=self.Colors)
			TempLock.release()
Example #8
0
	def MakeGraphPercent(self, Data, FileName):
		"""
		Funzione che produce un grafico percentuale sotto forma di pieplot.

		:param Data: Dato da dover graficare.
		:param FileName: Nome file da assegnare al grafico prodotto.
		:returns: *nulla*

		"""
		global Interval
		Labels = ["%IN USO","TOT"]
		FileName = 'extra/MonitorGraph/'+FileName

		#print "**Data Graph**"
		#print Data

		#selezione della combinazione di colori per i grafici percentuali, a soglie [0,33],[34,66],[67,100]
		if  (Data <= 33): PercentColors = ["lime","gray"]
		elif (Data <= 66): PercentColors = ["yellow","light_gray"]
		else : PercentColors = ["red","white"]
		Data = [int(Data),100-int(Data)]

		#ogni volta cerca di acquisire il lock per creare una nuova immagine, se non riesce, rompe il lock
		TempLock = FileLock(FileName)
		try:
			TempLock.acquire(timeout=Interval)
		except LockTimeout:
			TempLock.break_lock()
		else:
			cairoplot.pie_plot(FileName, dict(zip(Labels,Data)), 185, 130, colors = PercentColors)
			TempLock.release()
Example #9
0
	def MakeGraphTop3(self, Data, FileName):
		"""
		Funzione che produce un grafico dei nodi a maggiore latenza sotto forma di istogram.

		:param Data: Serie di dati da dover graficare.
		:param FileName: Nome file da assegnare al grafico prodotto.
		:returns: *nulla*

		"""
		global Interval
		FileName = 'extra/MonitorGraph/'+FileName

		ordered = sorted(Data.iteritems(), key=operator.itemgetter(1), reverse=True)
		first3 = []
		colors3 = []
		for item in ordered:
			if (len(first3) < 3) and (item[0] in self.ProbeList):
				colors3.append(self.Colors[sorted(self.ProbeList).index(item[0])])
				first3.append(item[1])

		#ogni volta cerca di acquisire il lock per creare una nuova immagine, se non riesce, rompe il lock
		TempLock = FileLock(FileName)
		try:
			TempLock.acquire(timeout=Interval)
		except LockTimeout:
			TempLock.break_lock()
		else:
			cairoplot.vertical_bar_plot(FileName, first3, 170, 130, display_values=True, colors=colors3)
			TempLock.release()
Example #10
0
def send_all():
    lock = FileLock("send_notices")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    batches, sent = 0, 0
    start_time = time.time()

    try:
        # nesting the try statement to be Python 2.4
        try:
            for queued_batch in NoticeQueueBatch.objects.all():
                notices = pickle.loads(str(queued_batch.pickled_data).decode("base64"))
                batch_sent = 0
                for user, label, extra_context, on_site, sender in notices:
                    try:
                        user = User.objects.get(pk=user)
                        logging.info("emitting notice to %s" % user)
                        # call this once per user to be atomic and allow for logging to
                        # accurately show how long each takes.
                        notification.send_now([user], label, extra_context, on_site, sender)
                        sent += 1
                        batch_sent += 1
                    except:
                        # get the exception
                        _, e, _ = sys.exc_info()
                        # log it as critical
                        logging.critical("an exception occurred: %r" % e)
                        # update the queued_batch, removing notices that had been sucessfully sent
                        queued_batch.pickled_data = pickle.dumps(notices[batch_sent:]).encode("base64")
                        queued_batch.save()
                queued_batch.delete()
                batches += 1
        except:
            # get the exception
            exc_class, e, t = sys.exc_info()
            # email people
            current_site = Site.objects.get_current()
            subject = "[%s emit_notices] %r" % (current_site.name, e)
            message = "%s" % ("\n".join(traceback.format_exception(*sys.exc_info())),)
            mail_admins(subject, message, fail_silently=True)
            # log it as critical
            logging.critical("an exception occurred: %r" % e)
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")
    
    logging.info("")
    logging.info("%s batches, %s sent" % (batches, sent,))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Example #11
0
def main():
    logger.info('Starting DiKBM python client')

    lock = FileLock("dikbm")
    try:
        lock.acquire(0)
    except AlreadyLocked:
        logger.info('lock %s already locked' % lock.unique_name)
    except LockFailed:
        logger.error('lock %s cant be locked' % lock.unique_name)
    else:
        logger.debug('lock %s acquired' % lock.unique_name)

        try:
            client = DiKBMClient()
        except:
            logger.exception('Connect Error')
        else:
            try:
                client.proceed_in()
                client.proceed_status()
            except:
                logger.exception('Proceed Error')
        finally:
            lock.release()
            logger.debug('lock %s released' % lock.unique_name)
    finally:
        logger.info('Finished DiKBM python client')
Example #12
0
    def _handle(self, *args, **kwargs):
        stdout_backup = None
        if COMMAND_LOG_ROOT and self.OUTPUT_LOG:
            pass # redirect output to file, not implemented yet
        lock = None
        if COMMAND_LOCK_ROOT and (COMMAND_USE_LOCK or self.USE_LOCK):
            lock = FileLock(os.path.join(COMMAND_LOCK_ROOT, self.COMMAND_NAME))
            try:
                lock.acquire(0)
            except:
                print("Command `%s` already running" % self.COMMAND_NAME)
                return

        print("\n======\nRunning `%s` command at %s\n======\n" % (self.COMMAND_NAME, str(datetime.now())))
        try:
            # This call should call handle(...) method in the inherited class, that was renamed to _handle by BaseCommandMeta
            self._handle(*args, **kwargs)
        except Exception as e:
            if COMMAND_HANDLE_EXCEPTIONS or self.HANDLE_EXCEPTIONS:
                print("Unexpected crash:")
                print(traceback.format_exc())
                if (COMMAND_EMAIL_EXCEPTIONS or self.EMAIL_EXCEPTIONS) and not settings.DEBUG:
                    mailer.send_mail("Command %s crash" % self.COMMAND_NAME, traceback.format_exc(), settings.DEFAULT_FROM_EMAIL, [email for name, email in settings.ADMINS ])
                    print("Emails were sent to admins of the website about this crash")
            else:
                raise e
        finally:
            if lock is not None:
                lock.release()
Example #13
0
def send_all():
    lock = FileLock("send_notices")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    batches, total_sent = 0, 0
    start_time = time.time()

    try:
        # nesting the try statement to be Python 2.4
        for queued_batch in NoticeQueueBatch.objects.order_by('-id'):
            sent = emit_batch(queued_batch)
            total_sent += sent
            if sent > 0:
                batches +=1
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

    logging.info("")
    logging.info("%s batches, %s sent" % (batches, sent,))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Example #14
0
def send_all(block_size=500):
    """
    Send all non-deferred messages in the queue.
    
    A lock file is used to ensure that this process can not be started again
    while it is already running.
    
    The ``block_size`` argument allows for queued messages to be iterated in
    blocks, allowing new prioritised messages to be inserted during iteration
    of a large number of queued messages.
    
    """
    lock = FileLock(LOCK_PATH)

    logger.debug("Acquiring lock...")
    try:
        # lockfile has a bug dealing with a negative LOCK_WAIT_TIMEOUT (which
        # is the default if it's not provided) systems which use a LinkFileLock
        # so ensure that it is never a negative number.
        lock.acquire(LOCK_WAIT_TIMEOUT and max(0, LOCK_WAIT_TIMEOUT))
    except AlreadyLocked:
        logger.debug("Lock already in place. Exiting.")
        return
    except LockTimeout:
        logger.debug("Waiting for the lock timed out. Exiting.")
        return
    logger.debug("Lock acquired.")

    start_time = time.time()

    sent = deferred = skipped = 0

    connection = None

    try:
        connection = SMTPConnection()
        blacklist = models.Blacklist.objects.values_list("email", flat=True)
        connection.open()
        for message in _message_queue(block_size):
            result = send_message(message, smtp_connection=connection, blacklist=blacklist)
            if result == constants.RESULT_SENT:
                sent += 1
            elif result == constants.RESULT_FAILED:
                deferred += 1
            elif result == constants.RESULT_SKIPPED:
                skipped += 1
        connection.close()
    finally:
        logger.debug("Releasing lock...")
        lock.release()
        logger.debug("Lock released.")

    logger.debug("")
    if sent or deferred or skipped:
        log = logger.warning
    else:
        log = logger.info
    log("%s sent, %s deferred, %s skipped." % (sent, deferred, skipped))
    logger.debug("Completed in %.2f seconds." % (time.time() - start_time))
Example #15
0
def send_all():
    lock = FileLock("send_notices")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    batches, sent = 0, 0
    start_time = time.time()

    try:
        # nesting the try statement to be Python 2.4
        try:
            for queued_batch in NoticeQueueBatch.objects.all():
                notices = pickle.loads(str(queued_batch.pickled_data).decode("base64"))
                try:
                    for user, label, extra_context, on_site, sender in notices:
                        try:
                            user = User.objects.get(pk=user)
                            logging.info("emitting notice %s to %s" % (label, user))
                            # call this once per user to be atomic and allow for logging to
                            # accurately show how long each takes.
                            notification.send_now([user], label, extra_context, on_site, sender)
                        except User.DoesNotExist:
                            # Ignore deleted users, just warn about them
                            logging.warning("not emitting notice %s to user %s since it does not exist" % (label, user))
                        sent += 1
                except :
                    #if we sent half the batch, we don't want to resend notices to the first half next
                    #time we run it, so just throw away this (apparantly faulty) queued_batch
                    queued_batch.delete()
                    raise
                queued_batch.delete()
                batches += 1
        except:
            # get the exception
            exc_class, e, t = sys.exc_info()
            # email people
            current_site = Site.objects.get_current()
            subject = "[%s emit_notices] %r" % (current_site.name, e)
            message = "%s" % ("\n".join(traceback.format_exception(*sys.exc_info())),)
            mail_admins(subject, message, fail_silently=True)
            # log it as critical
            logging.critical("an exception occurred: %r" % e)
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")
    
    logging.info("")
    logging.info("%s batches, %s sent" % (batches, sent,))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Example #16
0
    def handle_noargs(self, **options):
        """Handle working on a single project or looping over several."""
        project = options.get("project")
        del options["project"]
        cron_batches = options.get("cron_batches")

        if options.get("view_batches"):
            if project or cron_batches:
                raise CommandError(
                    "view_batches can not be used with project or cron_batch"
                )
            # print out each batch that is in use, and the projects
            # that belong to it
            batches = PerformanceTestModel.get_projects_by_cron_batch()
            for key in sorted(batches.keys()):
                self.stdout.write("{0}: {1}\n".format(
                    key,
                    ", ".join(batches[key])),
                    )
            return

        if not (project or cron_batches):
            raise CommandError(
                "You must provide either a project or cron_batch value."
            )

        if project and cron_batches:
            raise CommandError(
                "You must provide either project or cron_batch, but not both.")

        if cron_batches:
            projects = PerformanceTestModel.get_cron_batch_projects(cron_batches)
        else:
            projects = [project]

        lock = FileLock(self.LOCK_FILE + '_' + str(project))

        timeout_sec = 10
        try:
            lock.acquire(timeout=timeout_sec)
            try:
                self.stdout.write(
                    "Starting for projects: {0}\n".format(", ".join(projects)))

                for p in projects:
                    self.handle_project(p, **options)

                self.stdout.write(
                    "Completed for {0} project(s).\n".format(len(projects)))
            finally:
                lock.release()

        except AlreadyLocked:
            self.stdout.write("This command is already being run elsewhere.  "
            "Please try again later.\n")

        except LockTimeout:
            self.stdout.write("Lock timeout of {0} seconds exceeded. "
                "Please try again later.\n".format(str(timeout_sec)) )
Example #17
0
def send_all():
    """
    Send all eligible messages in the queue.
    """

    lock = FileLock(getattr(settings, "MAILER_LOCKFILE", "send_mail"))

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    start_time = time.time()

    dont_send = 0
    deferred = 0
    sent = 0

    try:
        connection = None
        for message in prioritize():
            try:
                if connection is None:
                    connection = get_connection(backend=EMAIL_BACKEND)
                    # In order for Django to reuse the connection, it has to
                    # already be open() so it sees new_conn_created as False
                    # and does not try and close the connection anyway.
                    connection.open()
                logging.info("sending message '%s' to %s" % (message.subject.encode("utf-8"), u", ".join(message.to_addresses).encode("utf-8")))
                email = message.email
                if not email:
                    # We likely had a decoding problem when pulling it back out
                    # of the database. We should pass on this one.
                    mark_as_deferred(message, "message.email was None")
                    deferred += 1
                    continue
                email.connection = connection
                email.send()
                mark_as_sent(message)
                sent += 1
            except Exception, err:
                mark_as_deferred(message, err)
                deferred += 1
                # Get new connection, it case the connection itself has an error.
                connection = None
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

    logging.info("")
    logging.info("%s sent; %s deferred;" % (sent, deferred))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Example #18
0
def send_all():
    lock = FileLock("send_notices")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    batches, sent = 0, 0
    start_time = time.time()

    try:
        # nesting the try statement to be Python 2.4
        try:
            for queued_batch in NoticeQueueBatch.objects.all():
                data = str(queued_batch.pickled_data).decode("base64")
                notices = pickle.loads(data)
                for user, label, extra_context, on_site, sender in notices:
                    try:
                        user = User.objects.get(pk=user)
                        logging.info("emitting notice %s to %s" %
                                                                (label, user))
                        # call this once per user to be atomic and allow for
                        # logging to accurately show how long each takes.
                        notification.send_now([user], label, extra_context,
                                              on_site, sender)
                    except User.DoesNotExist:
                        # Ignore deleted users, just warn about them
                        logging.warning("not emitting notice %s to user %s"
                                        " since it does not exist" %
                                                                (label, user))
                    sent += 1
                queued_batch.delete()
                batches += 1
        except:
            # get the exception
            exc_class, e, t = sys.exc_info()
            # email people
            current_site = Site.objects.get_current()
            subject = "[%s emit_notices] %r" % (current_site.name, e)
            message = "%s" % ("\n".join(
                                traceback.format_exception(*sys.exc_info())),)
            mail_admins(subject, message, fail_silently=True)
            # log it as critical
            logging.critical("an exception occurred: %r" % e)
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

    logging.info("")
    logging.info("%s batches, %s sent" % (batches, sent,))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Example #19
0
def send_all():
    """
    Send all eligible messages in the queue.
    """

    lock = FileLock("send_mail")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    start_time = time.time()

    dont_send = 0
    deferred = 0
    sent = 0

    try:
        for message in prioritize():
            if DontSendEntry.objects.has_address(message.to_address):
                logging.info("skipping email to %s as on don't send list " % message.to_address.encode("utf-8"))
                MessageLog.objects.log(message, 2)  # @@@ avoid using literal result code
                message.delete()
                dont_send += 1
            else:
                try:
                    logging.info(
                        "sending message '%s' to %s"
                        % (message.subject.encode("utf-8"), message.to_address.encode("utf-8"))
                    )
                    core_send_mail(message.subject, message.message_body, message.from_address, [message.to_address])
                    MessageLog.objects.log(message, 1)  # @@@ avoid using literal result code
                    message.delete()
                    sent += 1
                except (
                    socket_error,
                    smtplib.SMTPSenderRefused,
                    smtplib.SMTPRecipientsRefused,
                    smtplib.SMTPAuthenticationError,
                ), err:
                    message.defer()
                    logging.info("message deferred due to failure: %s" % err)
                    MessageLog.objects.log(message, 3, log_message=str(err))  # @@@ avoid using literal result code
                    deferred += 1
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

    logging.info("")
    logging.info("%s sent; %s deferred; %s don't send" % (sent, deferred, dont_send))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Example #20
0
def send_all():
    """
    Send all eligible messages in the queue.
    """

    lock = FileLock("send_mail")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    start_time = time.time()

    dont_send = 0
    deferred = 0
    sent = 0

    try:
        from boto.exception import BotoServerError
        connection = None
        for message in prioritize():
            try:
                if connection is None:
                    connection = get_connection(backend=EMAIL_BACKEND)
                logging.info("sending message '%s' to %s" %
                             (message.subject.encode("utf-8"), u", ".join(
                                 message.to_addresses).encode("utf-8")))
                email = message.email
                email.connection = connection
                email.send()
                MessageLog.objects.log(
                    message, 1)  # @@@ avoid using literal result code
                message.delete()
                sent += 1
            except (socket_error, smtplib.SMTPSenderRefused,
                    smtplib.SMTPRecipientsRefused,
                    smtplib.SMTPAuthenticationError, BotoServerError), err:
                message.defer()
                logging.info("message deferred due to failure: %s" % err)
                MessageLog.objects.log(message, 3, log_message=str(
                    err))  # @@@ avoid using literal result code
                deferred += 1
                # Get new connection, it case the connection itself has an error.
                connection = None
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

    logging.info("")
    logging.info("%s sent; %s deferred;" % (sent, deferred))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Example #21
0
def _cleanup_lock(config):
    '''
        Release locks, if set.
    '''
    if config.getboolean("Execution", "serialize"):
        lock = FileLock(config.get("Execution", "pidfile"))
        logger.debug("Releasing lock")
        lock.release()
    def wrapper(self, *args, **options):
        lock_prefix = ''

        try:
            lock_prefix = settings.SITE_URL.split('//')[1].replace('/', '').replace('.', '-')
        except AttributeError:
            try:
                lock_prefix = settings.ALLOWED_HOSTS[0].replace('.', '-')
            except IndexError:
                lock_prefix = 'pdk_lock'

        lock_prefix = slugify(lock_prefix)

        start_time = time.time()
        verbosity = options.get('verbosity', 0)
        if verbosity == 0:
            level = logging.ERROR
        elif verbosity == 1:
            level = logging.WARN
        elif verbosity == 2:
            level = logging.INFO
        else:
            level = logging.DEBUG

        logging.basicConfig(level=level, format="%(message)s")
        logging.debug("-" * 72)

        lock_name = self.__module__.split('.').pop()
        lock = FileLock('%s/%s__%s' % (tempfile.gettempdir(), lock_prefix, lock_name))

        logging.debug("%s - acquiring lock...", lock_name)

        try:
            lock.acquire(LOCK_WAIT_TIMEOUT)
        except AlreadyLocked:
            logging.debug("lock already in place. quitting.")
            return
        except LockTimeout:
            logging.debug("waiting for the lock timed out. quitting.")
            return

        logging.debug("acquired.")

        try:
            handle(self, *args, **options)
        except: # pylint: disable=bare-except
            import traceback
            logging.error("Command Failed")
            logging.error('==' * 72)
            logging.error(traceback.format_exc())
            logging.error('==' * 72)

        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

        logging.info("done in %.2f seconds", (time.time() - start_time))
        return
Example #23
0
def send_all():
    """
    Send all eligible messages in the queue.
    """
    lock = FileLock("send_mail")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    start_time = time.time()

    dont_send = 0
    deferred = 0
    sent = 0

    try:
        connection = None
        for message in prioritize():
            try:
                if connection is None:
                    connection = get_connection(settings.JABBER_JID,
                                                settings.JABBER_PASSWORD)
                logging.info("sending message '%s' to %s" %
                             (message.subject.encode("utf-8"), u", ".join(
                                 message.to_addresses).encode("utf-8")))
                jabber = message.message
                jabber.connection = connection
                jabber.send()
                MessageLog.objects.log(
                    message, 1)  # @@@ avoid using literal result code
                message.delete()
                sent += 1
            except (socket_error, XMPPAuthenticationFailure,
                    XMPPConnectionError), err:
                message.defer()
                logging.info("message deferred due to failure: %s" % err)
                MessageLog.objects.log(message, 3, log_message=str(
                    err))  # @@@ avoid using literal result code
                deferred += 1
                # Get new connection, it case the connection itself has an error.
                connection = None
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

    logging.info("")
    logging.info("%s sent; %s deferred;" % (sent, deferred))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Example #24
0
def send_all(batch_size=None):
    """
    Send all eligible messages in the queue.
    """

    lock = FileLock("send_mail")

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    start_time = time.time()

    dont_send = 0
    deferred = 0
    sent = 0

    try:
        connection = None
        for message in prioritize():
            try:
                if connection is None:
                    connection = get_connection(backend=EMAIL_BACKEND)
                logging.info("sending message '%s' to %s" % (message.subject.encode("utf-8"), u", ".join(message.to_addresses).encode("utf-8")))
                email = message.email
                email.connection = connection
                email.send()
                MessageLog.objects.log(message, 1) # @@@ avoid using literal result code
                message.delete()
                sent += 1
            except (socket_error, smtplib.SMTPSenderRefused, smtplib.SMTPRecipientsRefused, smtplib.SMTPAuthenticationError), err:
                message.defer()
                logging.info("message deferred due to failure: %s" % err)
                MessageLog.objects.log(message, 3, log_message=str(err)) # @@@ avoid using literal result code
                deferred += 1
                # Get new connection, it case the connection itself has an error.
                connection = None

            # stop when batch size is reached
            if batch_size is not None and sent + deferred >= batch_size:
                break

    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

    logging.info("")
    logging.info("%s sent; %s deferred;" % (sent, deferred))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Example #25
0
    def handle_noargs(self, **options):
        """Handle working on a single project or looping over several."""
        project = options.get("project")
        del options["project"]
        cron_batches = options.get("cron_batches")

        if options.get("view_batches"):
            if project or cron_batches:
                raise CommandError(
                    "view_batches can not be used with project or cron_batch")
            # print out each batch that is in use, and the projects
            # that belong to it
            batches = PerformanceTestModel.get_projects_by_cron_batch()
            for key in sorted(batches.keys()):
                self.stdout.write(
                    "{0}: {1}\n".format(key, ", ".join(batches[key])), )
            return

        if not (project or cron_batches):
            raise CommandError(
                "You must provide either a project or cron_batch value.")

        if project and cron_batches:
            raise CommandError(
                "You must provide either project or cron_batch, but not both.")

        if cron_batches:
            projects = PerformanceTestModel.get_cron_batch_projects(
                cron_batches)
        else:
            projects = [project]

        lock = FileLock(self.LOCK_FILE + '_' + str(project))

        timeout_sec = 10
        try:
            lock.acquire(timeout=timeout_sec)
            try:
                self.stdout.write("Starting for projects: {0}\n".format(
                    ", ".join(projects)))

                for p in projects:
                    self.handle_project(p, **options)

                self.stdout.write("Completed for {0} project(s).\n".format(
                    len(projects)))
            finally:
                lock.release()

        except AlreadyLocked:
            self.stdout.write("This command is already being run elsewhere.  "
                              "Please try again later.\n")

        except LockTimeout:
            self.stdout.write("Lock timeout of {0} seconds exceeded. "
                              "Please try again later.\n".format(
                                  str(timeout_sec)))
Example #26
0
    def flush(self):
        if not self._buffer:
            return

        if sys.platform.startswith('win'):
            return

        items = []
        for b in self._buffer:
            try:
                d    = DictUtils.merge(self._meta, b['data'])
                item = b['prefix'] + ' ' + JSON.asString(d)
            except Exception as err:
                item = '>> EXCEPTION: JSON ENCODING FAILED >> ' + str(err).replace('\n', '\t')

            try:
                item = item.encode('utf8', 'ignore')
            except Exception as err:
                item = '>> EXCEPTION: UNICODE DECODING FAILED >> ' + str(err).replace('\n', '\t')

            items.append(item)

        count   = self._fileCount
        offset  = random.randint(0, count - 1)
        success = False
        path    = self.getReportFolder() + self._timeCode + '/'
        if not os.path.exists(path):
            os.makedirs(path)

        for i in range(count):
            index = (i + offset) % count
            p     = path + str(index) + '.report'
            lock  = FileLock(p)
            if lock.i_am_locking() and i < count - 1:
                continue

            try:
                lock.acquire()
            except Exception:
                continue

            try:
                out = StringUtils.toUnicode('\n'.join(items) + '\n')
                f   = open(p, 'a+')
                f.write(out.encode('utf8'))
                f.close()
                success = True
            except Exception as err:
                print("REPORTER ERROR: Unable to write report file.")
                print(err)

            lock.release()
            if success:
                break

        self.clear()
        return success
Example #27
0
def send_all():
    """
    Send all eligible messages in the queue.
    """
    
    lock = FileLock("send_mail")
    
    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")
    
    start_time = time.time()
    
    dont_send = 0
    deferred = 0
    sent = 0
    
    try:
        for message in prioritize():
            if DontSendEntry.objects.has_address(message.to_address):
                logging.info("skipping email to %s as on don't send list " % message.to_address)
                MessageLog.objects.log(message, 2) # @@@ avoid using literal result code
                message.delete()
                dont_send += 1
            else:
                try:
                    logging.info("sending message '%s' to %s" % (message.subject.encode("utf-8"), message.to_address.encode("utf-8")))
                    if not message.message_body_html:
                        core_send_mail(message.subject, message.message_body, message.from_address, [message.to_address])
                    else:
                        email = EmailMultiAlternatives(message.subject, message.message_body, message.from_address, [message.to_address])
                        email.attach_alternative(message.message_body_html, 'text/html')
                        email.send()
                    MessageLog.objects.log(message, 1) # @@@ avoid using literal result code
                    message.delete()
                    sent += 1
                except (socket_error, smtplib.SMTPSenderRefused, smtplib.SMTPRecipientsRefused, smtplib.SMTPAuthenticationError), err:
                    message.defer()
                    logging.info("message deferred due to failure: %s" % err)
                    MessageLog.objects.log(message, 3, log_message=str(err)) # @@@ avoid using literal result code
                    deferred += 1
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")
    
    logging.info("")
    logging.info("%s sent; %s deferred; %s don't send" % (sent, deferred, dont_send))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
    def handle(self, *args, **options):
        # Check whether it is already running or not
        lock = FileLock(os.path.join(settings.LOCK_ROOT, LOCK_FILE))
        try:
            lock.acquire(0)
        except:
            print ("It seems the command is processing already.")
            return

        import_translations_from_po()
        lock.release()
Example #29
0
    def handle(self, *args, **options):
        """ Store pushlog data in the database. """

        repo_host = options.get("repo_host")
        enddate = options.get("enddate")
        numdays = options.get("numdays")
        hours = options.get("hours")
        branch = options.get("branch")
        verbosity = options.get("verbosity")
        project = options.get("project")

        if not repo_host:
            raise CommandError("You must supply a host name for the repo pushlogs " +
                         "to store: --repo_host hostname")

        if not numdays and not hours:
            raise CommandError("You must supply the number of days or hours of data.")
        else:
            if numdays:
                try:
                    numdays = int(numdays)
                except ValueError:
                    raise CommandError("numdays must be an integer.")

            if hours:

                try:
                    hours = int(hours)
                except ValueError:
                    raise CommandError("hours must be an integer.")

        lock = FileLock(self.LOCK_FILE)
        try:
            lock.acquire(timeout=0)
            try:
                plm = PushLogModel(project=project, out=self.stdout, verbosity=verbosity)

                # store the pushlogs for the branch specified, or all branches
                summary = plm.store_pushlogs(repo_host, numdays, hours, enddate, branch)
                self.println(("Branches: {0}\nPushlogs stored: {1}, skipped: {2}\n" +
                              "Changesets stored: {3}, skipped: {4}").format(
                        summary["branches"],
                        summary["pushlogs_stored"],
                        summary["pushlogs_skipped"],
                        summary["changesets_stored"],
                        summary["changesets_skipped"],
                        ))
                plm.disconnect()

            finally:
                lock.release()

        except AlreadyLocked:
            self.println("This command is already being run elsewhere.  Please try again later.")
Example #30
0
class SingleProcessEngine(BaseEngine):
    """
    An engine that ensures only one process can run at the same time. Useful
    when being fired off by a cron and you need to ensure a lock is made so
    other processes won't handle a queue at the same time.
    """
    def __init__(self, *args, **kwargs):
        self._lock_wait_timeout = kwargs.pop("lock_wait_timeout", -1)
        super(SingleProcessEngine, self).__init__(self, *args, **kwargs)
    
    def run(self):
        """
        Executes the runner using a lock file to prevent race conditions.
        """
        self._create_lock()
        if not self._acquire_lock():
            raise SystemExit
        try:
            super(SingleProcessEngine, self).run()
        finally:
            self._release_lock()
    
    def _create_lock(self):
        """
        Create the lock.
        """
        from lockfile import FileLock
        self._lock = FileLock("%d.lock" % os.getpid())
    
    def _acquire_lock(self):
        """
        Attempt to acquire a lock. Returns False on failure or True on
        success.
        """
        from lockfile import AlreadyLocked, LockTimeout
        logging.debug("acquiring lock...")
        try:
            self._lock.acquire(self._lock_wait_timeout)
        except AlreadyLocked:
            logging.debug("lock already in place. quitting.")
            return False
        except LockTimeout:
            logging.debug("waiting for the lock timed out. quitting.")
            return False
        logging.debug("lock acquired.")
        return True
    
    def _release_lock(self):
        """
        Release the lock.
        """
        logging.debug("releasing lock...")
        self._lock.release()
        logging.debug("lock released.")
Example #31
0
def rotate_log_file(path):
    try:
        lockfile = FileLock(path + '.lock')
        lockfile.acquire(timeout=0)
    except LockError:
        return

    try:
        if os.path.exists(path) and os.stat(path).st_size > 1024 * 1024:
            os.rename(path, path + '.1')
    finally:
        lockfile.release()
Example #32
0
class Transaction():
    def __init__(self, model):
        self._model = model
        self._lock = FileLock('.lock')

    def __enter__(self):
        self._lock.acquire()
        return self

    def __exit__(self, *exc_info):
        self._lock.release()
        self._model.sync()
Example #33
0
    def wrapper(self, *args, **options):
        def on_interrupt(signum, frame):
            # It's necessary to release lockfile
            sys.exit()

        signal.signal(signal.SIGTERM, on_interrupt)

        start_time = time.time()
        try:
            verbosity = int(options.get('verbosity', 0))
        except ValueError:
            verbosity = 0
        logger = logging.getLogger(self.__module__)
        if verbosity == 0:
            logger.level = logging.WARNING
        elif verbosity == 1:
            logger.level = logging.INFO
        else:
            logger.level = logging.DEBUG

        logger.debug("-" * 72)

        lock_name = self.__module__.split('.').pop()
        lock = FileLock(os.path.join(LOCK_ROOT, lock_name))

        logger.debug("%s - acquiring lock..." % lock_name)
        try:
            lock.acquire(LOCK_WAIT_TIMEOUT)
        except AlreadyLocked:
            logger.debug("lock already in place. quitting.")
            return
        except LockTimeout:
            logger.debug("waiting for the lock timed out. quitting.")
            return
        logger.debug("acquired.")

        try:
            handle(self, logger, *args, **options)
        except (KeyboardInterrupt, SystemExit):
            pass
        except:
            import traceback
            logging.warn("Command Failed")
            logging.warn('=' * 72)
            logging.warn(traceback.format_exc())
            logging.warn('=' * 72)

        logger.debug("releasing lock...")
        lock.release()
        logger.debug("released.")

        logger.info("done in %.2f seconds" % (time.time() - start_time))
        return
Example #34
0
class Transaction():
    def __init__(self, model):
        self._model = model
        self._lock = FileLock('.lock')

    def __enter__(self):
        self._lock.acquire()
        return self

    def __exit__(self, *exc_info):
        self._lock.release()
        self._model.sync()
Example #35
0
    def wrapper(self, *args, **options):
        def on_interrupt(signum, frame):
            # It's necessary to release lockfile
            sys.exit()
        signal.signal(signal.SIGTERM, on_interrupt)

        start_time = time.time()
        try:
            verbosity = int(options.get('verbosity', 0))
        except ValueError:
            verbosity = 0
        logger = logging.getLogger(self.__module__)
        if verbosity == 0:
            logger.level = logging.WARNING
        elif verbosity == 1:
            logger.level = logging.INFO
        else:
            logger.level = logging.DEBUG
       
        logger.debug("-" * 72)
        
        lock_name = self.__module__.split('.').pop()
        lock = FileLock(os.path.join(LOCK_ROOT, lock_name))
        
        logger.debug("%s - acquiring lock..." % lock_name)
        try:
            lock.acquire(LOCK_WAIT_TIMEOUT)
        except AlreadyLocked:
            logger.debug("lock already in place. quitting.")
            return
        except LockTimeout:
            logger.debug("waiting for the lock timed out. quitting.")
            return
        logger.debug("acquired.")
        
        try:
            handle(self, logger, *args, **options)
        except (KeyboardInterrupt, SystemExit):
            pass
        except:
            import traceback
            logging.warn("Command Failed")
            logging.warn('=' * 72)
            logging.warn(traceback.format_exc())
            logging.warn('=' * 72)
        
        logger.debug("releasing lock...")
        lock.release()
        logger.debug("released.")
        
        logger.info("done in %.2f seconds" % (time.time() - start_time))
        return
 def handle(*args):
     """Acquire the lock before running the method.
     """
     lock = FileLock(lockname)
     try:
         lock.acquire(timeout=-1)
     except AlreadyLocked: # pragma: no cover
         print('Lock is already set, aborting.')
         return
     try:
         handler(*args)
     finally:
         lock.release()
Example #37
0
 def handle(self, **options):
     """Acquire the lock before running the method.
     """
     lock = FileLock(lockname)
     try:
         lock.acquire(timeout=-1)
     except AlreadyLocked:
         print "Lock is already set, aborting."
         return
     try:
         handler(self, **options)
     finally:
         lock.release()
Example #38
0
 def wrapper(*args, **kwargs):
     lock = FileLock(lockfile_name)
     try:
         lock.acquire(lock_wait_timeout)
     except AlreadyLocked:
         return
     except LockTimeout:
         return
     try:
         result = func(*args, **kwargs)
     finally:
         lock.release()
     return result
Example #39
0
 def wrapper(*args, **kwargs):
     lock = FileLock(lockfile_name)
     try:
         lock.acquire(lock_wait_timeout)
     except AlreadyLocked:
         return
     except LockTimeout:
         return
     try:
         result = func(*args, **kwargs)
     finally:
         lock.release()
     return result
Example #40
0
def send_all():
    """
    Send all eligible messages in the queue.
    """
    lock = FileLock("send_mail")
    
    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")
    
    start_time = time.time()
    
    dont_send = 0
    deferred = 0
    sent = 0
    
    try:
        connection = None
        for message in prioritize():
            try:
                if connection is None:
                    connection = get_connection(settings.JABBER_JID, settings.JABBER_PASSWORD)
                logging.info("sending message '%s' to %s" % (message.subject.encode("utf-8"), u", ".join(message.to_addresses).encode("utf-8")))
                jabber = message.message
                jabber.connection = connection
                jabber.send()
                MessageLog.objects.log(message, 1) # @@@ avoid using literal result code
                message.delete()
                sent += 1
            except (socket_error, XMPPAuthenticationFailure, XMPPConnectionError), err:
                message.defer()
                logging.info("message deferred due to failure: %s" % err)
                MessageLog.objects.log(message, 3, log_message=str(err)) # @@@ avoid using literal result code
                deferred += 1
                # Get new connection, it case the connection itself has an error.
                connection = None
    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")
    
    logging.info("")
    logging.info("%s sent; %s deferred;" % (sent, deferred))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Example #41
0
    def handle(self, *args, **options):
        # Check whether it is already running or not
        lock = FileLock(os.path.join(settings.MEDIA_ROOT, LOCK_FILE))
        try:
            lock.acquire(0)
        except:
            print 'It seems the command is processing already.'
            return

        feeds = Feed.objects.all()
        for feed in feeds:
            print "Processing `%s` feed..." % feed.feed_url
            self.process_feed(feed)

        lock.release()
    def wrapper(self, *args, **options):
        start_time = time.time()
        verbosity = options.get('verbosity', 0)
        if verbosity == 0:
            level = logging.ERROR
        elif verbosity == 1:
            level = logging.WARN
        elif verbosity == 2:
            level = logging.INFO
        else:
            level = logging.DEBUG

        logging.basicConfig(level=level, format="%(message)s")
        logging.debug("-" * 72)

        lock_name = self.__module__.split('.').pop()
        lock = FileLock('%s/purple_robot_lock_%s' %
                        (tempfile.gettempdir(), lock_name))

        logging.debug("%s - acquiring lock...", lock_name)

        try:
            lock.acquire(LOCK_WAIT_TIMEOUT)
        except AlreadyLocked:
            logging.debug("lock already in place. quitting.")
            return
        except LockTimeout:
            logging.debug("waiting for the lock timed out. quitting.")
            return

        logging.debug("acquired.")

        try:
            handle(self, *args, **options)
        except:  # pylint: disable=bare-except
            import traceback
            logging.error("Command Failed")
            logging.error('==' * 72)
            logging.error(traceback.format_exc())
            logging.error('==' * 72)

        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

        logging.info("done in %.2f seconds", (time.time() - start_time))
        return
Example #43
0
def send_smses(send_deferred=False, backend=None):
    # Get lock so there is only one sms sender at the same time.
    if send_deferred:
        lock = FileLock('send_sms_deferred')
    else:
        lock = FileLock('send_sms')
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logger.info('Could not acquire lock.')
        return
    except LockTimeout:
        logger.info('Lock timed out.')
        return

    successes, failures = 0, 0
    try:
        # Get SMSes that need to be sent (deferred or non-deferred)
        if send_deferred:
            to_send = QueuedSMS.objects.filter(priority=PRIORITY_DEFERRED)
        else:
            to_send = QueuedSMS.objects.exclude(priority=PRIORITY_DEFERRED)

        logger.info("Trying to send %i messages." % to_send.count())

        # Send each SMS
        for sms in to_send:
            if backend:
                sms_using = backend
            else:
                sms_using = None if sms.using == '__none__' else sms.using
            if send(sms.to, sms.content, sms.signature, sms_using, sms.reliable):
                # Successfully sent, remove from queue
                logger.info("SMS to %s sent." % sms.to)
                sms.delete()
                successes += 1
            else:
                # Failed to send, defer SMS
                logger.info("SMS to %s failed." % sms.to)
                sms.defer()
                failures += 1
    finally:
        lock.release()
        if successes and failures:
            statsd.gauge('smsgateway.success_rate', successes / failures)
        else:
            statsd.gauge('smsgateway.success_rate', 1)
Example #44
0
class NoseLock(Plugin):
    name = 'lock'

    def options(self, parser, env=os.environ):
        super(NoseLock, self).options(parser, env=env)
        # getting reasonable defaults
        app_dir = os.getcwd()
        app_name = os.path.basename(app_dir)
        default_lock_file = os.path.join('/tmp', app_name)

        parser.add_option(
            '--lock-file', action='store',
            default=default_lock_file,
            dest='lock_file',
            help='Use this file to acquire lock (default: {0})'.format(
                default_lock_file))

    def configure(self, options, conf):
        super(NoseLock, self).configure(options, conf)
        if not self.enabled:
            self.lock = None
        else:
            lock_file = options.lock_file
            self.lock = FileLock(lock_file)

            if self.lock.is_locked():
                owner = get_owner(lock_file + '.lock')
                if owner:
                    print ('User {0} already running the tests, '
                           'please keep calm.').format(owner)
            try:
                self.lock.acquire()
                log.info('File {0} locked.'.format(self.lock.lock_file))
                print 'LOCK:', lock_file
            except KeyboardInterrupt:
                print '\nYou are so impatient today!\nBye.'
                sys.exit(1)


    def finalize(self, result):
        if self.lock:
            import pudb; pudb.set_trace()  # DEBUG
            print 'UNLOCK', self.lock.lock_file
            log.info('Unlocking {0}.'.format(self.lock.lock_file))
            self.lock.release()
Example #45
0
    def handle(self, **options):
        """Command implementation.

        This implementation is pretty ugly, but does behave in the
        right way.
        """

        lock = FileLock('async_flush_queue')
        try:
            lock.acquire(timeout=-1)
        except AlreadyLocked:
            print 'Lock is already set, aborting.'
            return

        try:
            while True:
                now = datetime.now()
                by_priority = (Job.objects.filter(executed=None).exclude(
                    scheduled__gt=now).order_by('-priority'))
                number = by_priority.count()
                if number == 0:
                    print "No jobs found for execution"
                    return

                def run(jobs):
                    """Run the jobs handed to it
                    """
                    for job in jobs.iterator():
                        print "%s: %s" % (job.id, unicode(job))
                        job.execute()
                        return False
                    return True

                priority = by_priority[0].priority
                if run(
                        Job.objects.filter(executed=None,
                                           scheduled__lte=now,
                                           priority=priority).order_by(
                                               'scheduled', 'id')):
                    run(
                        Job.objects.filter(executed=None,
                                           scheduled=None,
                                           priority=priority).order_by('id'))
        finally:
            lock.release()
Example #46
0
def record_result(inj_mode, igid, bfm, app, kname, kcount, iid, opid, bid, cat,
                  pc, inst_type, tid, injBID, runtime, dmesg, value_str,
                  icount):
    res_fname = p.app_log_dir[app] + "/results-mode" + inj_mode + "-igid" + str(
        igid) + ".bfm" + str(bfm) + "." + str(p.NUM_INJECTIONS) + ".txt"
    result_str = icount + ";" + kname + ";" + kcount + ";" + iid + ";" + opid
    result_str += ";" + bid + ":" + str(pc) + ":" + str(inst_type) + ":" + str(
        tid)
    result_str += ":" + str(injBID) + ":" + str(runtime) + ":" + str(
        cat) + ":" + str(dmesg)
    result_str += ":" + str(value_str) + "\n"
    if p.verbose:
        print(result_str)

    has_filelock = False
    if pkgutil.find_loader('lockfile') is not None:
        from lockfile import FileLock
        has_filelock = True

    if has_filelock and p.use_filelock:
        lock = FileLock(res_fname)
        lock.acquire()  #acquire lock

    rf = open(res_fname, "a")
    rf.write(result_str)
    rf.close()

    if has_filelock and p.use_filelock:
        lock.release()  # release lock

    # Record the outputs if
    if cat == p.OUT_DIFF or cat == p.STDOUT_ONLY_DIFF or cat == p.APP_SPECIFIC_CHECK_FAIL:
        if not os.path.isdir(p.app_log_dir[app] + "/sdcs"):
            os.system("mkdir -p " + p.app_log_dir[app] +
                      "/sdcs")  # create directory to store sdcs
        full_sdc_dir = p.app_log_dir[
            app] + "/sdcs/sdc-" + app + "-icount" + icount
        os.system("mkdir -p " + full_sdc_dir)  # create directory to store sdc
        map((lambda x: shutil.copy(x, full_sdc_dir)), [
            stdout_fname, stderr_fname, injection_seeds_file,
            new_directory + "/" + p.output_diff_log
        ])  # copy stdout, stderr injection seeds, output diff
        shutil.make_archive(full_sdc_dir, 'gztar',
                            full_sdc_dir)  # archieve the outputs
        shutil.rmtree(full_sdc_dir, True)  # remove the directory
Example #47
0
 def wrapper(self, *args, **options):
     
     start_time = time.time()
     verbosity = options.get('verbosity', 0)
     if verbosity == 0:
         level = logging.WARNING
     elif verbosity == 1:
         level = logging.INFO
     else:
         level = logging.DEBUG
     
     logging.basicConfig(level=level, format="%(message)s")
     logging.debug("-" * 72)
     
     lock_name = self.__module__.split('.').pop()
     lock = FileLock(lock_name)
     
     logging.debug("%s - acquiring lock..." % lock_name)
     try:
         lock.acquire(LOCK_WAIT_TIMEOUT)
     except AlreadyLocked:
         logging.debug("lock already in place. quitting.")
         return
     except LockTimeout:
         logging.debug("waiting for the lock timed out. quitting.")
         return
     logging.debug("acquired.")
     
     try:
         handle(self, *args, **options)
     except:
         import traceback
         logging.warn("Command Failed")
         logging.warn('==' * 72)
         logging.warn(traceback.format_exc())
         logging.warn('==' * 72)
     
     logging.debug("releasing lock...")
     lock.release()
     logging.debug("released.")
     
     logging.info("done in %.2f seconds" % (time.time() - start_time))
     return
Example #48
0
    def filewrite(self, filename, data):
        """It is used to write response in the file.

        Parameters
        -------
        filename
            a string containing filename
        data
            file data to write it into file
        """
        try:
            filedata = data.decode("utf-8")
        except Exception:
            filedata = data
        lock = FileLock(filename)
        lock.acquire()
        with open(filename, 'w+') as f:
            f.write(filedata)
        lock.release()
Example #49
0
    def _handle(self, *args, **kwargs):
        stdout_backup = None
        if COMMAND_LOG_ROOT and self.OUTPUT_LOG:
            pass  # redirect output to file, not implemented yet
        lock = None
        if COMMAND_LOCK_ROOT and (COMMAND_USE_LOCK or self.USE_LOCK):
            lock = FileLock(os.path.join(COMMAND_LOCK_ROOT, self.COMMAND_NAME))
            try:
                lock.acquire(0)
            except:
                print("Command `%s` already running" % self.COMMAND_NAME)
                return

        print("\n======\nRunning `%s` command at %s\n======\n" %
              (self.COMMAND_NAME, str(datetime.now())))
        try:
            # This call should call handle(...) method in the inherited class, that was renamed to _handle by BaseCommandMeta
            self._handle(*args, **kwargs)
        except Exception as e:
            if COMMAND_HANDLE_EXCEPTIONS or self.HANDLE_EXCEPTIONS:
                print("Unexpected crash:")
                print(traceback.format_exc())
                if (COMMAND_EMAIL_EXCEPTIONS
                        or self.EMAIL_EXCEPTIONS) and not settings.DEBUG:
                    mailer.send_mail(
                        "Command %s crash" % self.COMMAND_NAME,
                        traceback.format_exc(), settings.DEFAULT_FROM_EMAIL,
                        [email for name, email in settings.ADMINS])
                    print(
                        "Emails were sent to admins of the website about this crash"
                    )
            else:
                raise e
        finally:
            if lock is not None:
                lock.release()
Example #50
0
def decrypt_values():
    from lockfile import FileLock
    from desktop.lib.obfuscator import Obfuscator
    lock = FileLock(os.path.join(KREDENTIALS_DIR.get(), "ENCR"))
    lock.acquire()
    OBFUSCATOR = Obfuscator()
    ENCRYPTED_VALUE_PATTERN = re.compile("\$\s?{ALIAS=(\w+)}")
    PASSWORD_VALUES = [DATABASE.PASSWORD, SMTP.PASSWORD, LDAP.BIND_PASSWORD
                       ] + [
                           LDAP.LDAP_SERVERS.get()[server].BIND_PASSWORD
                           for server in LDAP.LDAP_SERVERS
                       ]
    for val in PASSWORD_VALUES:
        if not val.get():
            continue
        match = ENCRYPTED_VALUE_PATTERN.match(val.get())
        if match:
            try:
                decrypted_value = OBFUSCATOR.get_value(match.group(1))
                val.bind_to[val.grab_key] = decrypted_value
            except Exception as ex:
                lock.release()
                sys.exit("Error occured!\n%s\n" % ex)
    lock.release()
Example #51
0
def send_all(block_size=500, backend=None, messages=None, message_limit=0):
    """
    Send all non-deferred messages in the queue.

    A lock file is used to ensure that this process can not be started again
    while it is already running.

    The ``block_size`` argument allows for queued messages to be iterated in
    blocks, allowing new prioritised messages to be inserted during iteration
    of a large number of queued messages.

    """
    lock = FileLock(LOCK_PATH)

    logger.debug("Acquiring lock...")
    try:
        # lockfile has a bug dealing with a negative LOCK_WAIT_TIMEOUT (which
        # is the default if it's not provided) systems which use a LinkFileLock
        # so ensure that it is never a negative number.
        timeout = settings.LOCK_WAIT_TIMEOUT
        if not timeout or timeout < 0:
            timeout = 0
        lock.acquire(timeout)
    except AlreadyLocked:
        logger.debug("Lock already in place. Exiting.")
        return
    except LockTimeout:
        logger.debug("Waiting for the lock timed out. Exiting.")
        return
    logger.debug("Lock acquired.")

    start_time = time.time()

    sent = deferred = skipped = 0

    try:
        messages_queue = messages or _message_queue(block_size, message_limit)
        first_message, messages_list = peek(messages_queue)

        if not first_message:
            logger.info('No messages in queue.')
            return

        connection = get_connection(backend=backend)
        blacklist = models.Blacklist.objects.values_list('email', flat=True)
        connection.open()
        for message in messages_list:
            result = send_queued_message(message,
                                         smtp_connection=connection,
                                         blacklist=blacklist)
            if result == constants.RESULT_SENT:
                sent += 1
            elif result == constants.RESULT_FAILED:
                deferred += 1
            elif result == constants.RESULT_SKIPPED:
                skipped += 1
        connection.close()
    finally:
        logger.debug("Releasing lock...")
        lock.release()
        logger.debug("Lock released.")

    logger.debug("")
    if deferred or skipped:
        log = logger.warning
    else:
        log = logger.info
    log("%s sent, %s deferred, %s skipped." % (sent, deferred, skipped))
    logger.debug("Completed in %.2f seconds." % (time.time() - start_time))
Example #52
0
    lock = None
    try:
        options = Options()
        options.parseOptions()

        # Must not be executed simultaneously (c.f. #265)
        lock = FileLock("/tmp/dlrd-%s" % options['id'])

        # Ensure there are no paralell runs of this script
        lock.acquire(timeout=2)

        # Prepare to start
        dlr_d = DlrDaemon(options)
        # Setup signal handlers
        signal.signal(signal.SIGINT, dlr_d.sighandler_stop)
        # Start DlrDaemon
        dlr_d.start()

        reactor.run()
    except usage.UsageError as errortext:
        print('%s: %s' % (sys.argv[0], errortext))
        print('%s: Try --help for usage details.' % (sys.argv[0]))
    except LockTimeout:
        print("Lock not acquired ! exiting")
    except AlreadyLocked:
        print("There's another instance on dlrd running, exiting.")
    finally:
        # Release the lock
        if lock is not None and lock.i_am_locking():
            lock.release()
Example #53
0
def send_all():
    """
    Send all eligible messages in the queue.
    """

    lock = FileLock(settings.LOCK_FOLDER)

    logging.debug("acquiring lock...")
    try:
        lock.acquire(LOCK_WAIT_TIMEOUT)
    except AlreadyLocked:
        logging.debug("lock already in place. quitting.")
        return
    except LockTimeout:
        logging.debug("waiting for the lock timed out. quitting.")
        return
    logging.debug("acquired.")

    start_time = time.time()

    dont_send = 0
    deferred = 0
    sent = 0

    try:
        connection = None
        for message in prioritize():
            try:
                if connection is None:
                    connection = get_connection(backend=EMAIL_BACKEND)
                logging.info("sending message %s" % message.id)
                #logging.info("sending message '%s' to %s" % (message.subject.encode("utf-8"), u", ".join(message.to_addresses).encode("utf-8")))
                email = message.email
                email.connection = connection
                email.send()
                MessageLog.objects.log(
                    message, 1)  # @@@ avoid using literal result code
                message.delete()
                sent += 1
            except (socket_error, smtplib.SMTPSenderRefused,
                    smtplib.SMTPRecipientsRefused,
                    smtplib.SMTPAuthenticationError), err:
                message.defer()
                logging.info("message deferred due to failure: %s" % err)
                MessageLog.objects.log(message, 3, log_message=str(
                    err))  # @@@ avoid using literal result code
                deferred += 1
                # Get new connection, in case the connection itself has an error.
                connection = None

            if sent + deferred >= settings.EMAIL_MAX_EMAILS_PER_BATCH:
                logging.info("Warning: Sent %d emails, having a break" %
                             (sent + deferred))
                mail_admins(
                    subject="email queue overflow",
                    message=
                    "Warning: more than %d emails sent from the queue, consider checking network load"
                    % settings.EMAIL_MAX_EMAILS_PER_BATCH,
                    fail_silently=True)
                break

    finally:
        logging.debug("releasing lock...")
        lock.release()
        logging.debug("released.")

    logging.info("")
    logging.info("%s sent; %s deferred;" % (sent, deferred))
    logging.info("done in %.2f seconds" % (time.time() - start_time))
Example #54
0
def precompute_synthetic_grid(output_dirname,
                              ranges,
                              wavelengths,
                              to_resolution,
                              modeled_layers_pack,
                              atomic_linelist,
                              isotopes,
                              solar_abundances,
                              segments=None,
                              number_of_processes=1,
                              code="spectrum",
                              use_molecules=False,
                              steps=False,
                              tmp_dir=None):
    """
    Pre-compute a synthetic grid with some reference ranges (Teff, log(g) and
    MH combinations) and all the steps that iSpec will perform in the
    astrophysical parameter determination process.

    All the non-convolved spectra will be saved in a subdir and a complete
    grid file with the reference points already convolved will be saved in a
    FITS file for fast comparison.

    The output directory can be used by the routines 'model_spectrum' and
    'estimate_initial_ap'.
    """
    code = code.lower()
    if code not in ['spectrum', 'turbospectrum', 'moog', 'synthe', 'sme']:
        raise Exception("Unknown radiative transfer code: %s" % (code))

    reference_list_filename = output_dirname + "/parameters.tsv"
    if to_resolution is not None:
        reference_grid_filename = output_dirname + "/convolved_grid_%i.fits.gz" % to_resolution
    fits_dir = os.path.join(output_dirname, "grid/")
    mkdir_p(fits_dir)
    if steps:
        steps_fits_dir = os.path.join(output_dirname, "steps/")
        mkdir_p(steps_fits_dir)

    import dill  # To allow pickle of lambda functions (e.g., one element in modeled_layers_pack)
    import pickle
    pickled_modeled_layers_pack = pickle.dumps(modeled_layers_pack)

    # For code != "grid", ranges are always in position 7 (for grid it would be in position 8)
    valid_ranges = modeled_layers_pack[7]
    teff_range = valid_ranges['teff']
    logg_range = valid_ranges['logg']
    MH_range = valid_ranges['MH']
    alpha_range = valid_ranges.get(
        'alpha', (-1.5, 1.5)
    )  # Default (0.,) if 'alpha' is not a free parameter for atmosphere interpolation
    vmic_range = valid_ranges.get(
        'vmic', (0.0, 50.)
    )  # Default (0.,) if 'vmic' is not a free parameter for atmosphere interpolation

    # Parallelization pool
    if number_of_processes == 1:
        pool = None
    else:
        pool = Pool(number_of_processes)

    # Create grid binary file
    elapsed = 0  # seconds

    num_ref_spec = len(ranges)
    num_spec = num_ref_spec * 9  # Reference + 8 variations in Teff, logg, MH, alpha, vmic, vmac, vsini, limb darkening coeff

    i = 0
    for teff, logg, MH, alpha, vmic in ranges:
        if vmic is None:
            vmic = estimate_vmic(teff, logg, MH)
        vmac = 0.0  # This can be modified after synthesis if needed
        vsini = 0.0  # This can be modified after synthesis if needed
        limb_darkening_coeff = 0.00  # This can be modified after synthesis if needed
        resolution = 0  # This can be modified after synthesis if needed
        is_step = False
        if not valid_atmosphere_target(modeled_layers_pack, {
                'teff': teff,
                'logg': logg,
                'MH': MH,
                'alpha': alpha
        }):
            raise Exception(
                "Target parameters out of the valid ranges: teff={} logg={} MH={} alpha={}"
                .format(teff, logg, MH, alpha))
        points = [
            (teff, logg, MH, alpha, vmic, vmac, vsini, limb_darkening_coeff,
             is_step),
        ]
        if steps:
            is_step = True
            new_teff = teff + Constants.SYNTH_STEP_TEFF if teff + Constants.SYNTH_STEP_TEFF <= teff_range[
                -1] else teff - Constants.SYNTH_STEP_TEFF
            new_logg = logg + Constants.SYNTH_STEP_LOGG if logg + Constants.SYNTH_STEP_LOGG <= logg_range[
                -1] else logg - Constants.SYNTH_STEP_LOGG
            new_MH = MH + Constants.SYNTH_STEP_MH if MH + Constants.SYNTH_STEP_MH <= MH_range[
                -1] else MH - Constants.SYNTH_STEP_MH
            new_alpha = alpha + Constants.SYNTH_STEP_ALPHA if alpha + Constants.SYNTH_STEP_ALPHA <= alpha_range[
                -1] else alpha - Constants.SYNTH_STEP_ALPHA
            new_vmic = vmic + Constants.SYNTH_STEP_VMIC if vmic + Constants.SYNTH_STEP_VMIC <= vmic_range[
                -1] else vmic - Constants.SYNTH_STEP_VMIC
            # For each reference point, calculate also the variations that iSpec will perform in the first iteration
            points += [  # Final unconvolved spectra where vmic/vmac are free and do not follow vmic/vmac empirical relations
                (new_teff, logg, MH, alpha, vmic, vmac, vsini,
                 limb_darkening_coeff, is_step),
                (teff, new_logg, MH, alpha, vmic, vmac, vsini,
                 limb_darkening_coeff, is_step),
                (teff, logg, new_MH, alpha, vmic, vmac, vsini,
                 limb_darkening_coeff, is_step),
                (teff, logg, MH, new_alpha, vmic, vmac, vsini,
                 limb_darkening_coeff, is_step),
                (teff, logg, MH, alpha, new_vmic, vmac, vsini,
                 limb_darkening_coeff, is_step),
            ]
            points += [
                # Final unconvolved spectra where vmic is not free and does follow vmic empirical relations
                (new_teff, logg, MH, alpha, estimate_vmic(new_teff, logg, MH),
                 vmac, vsini, limb_darkening_coeff, is_step),
                (teff, new_logg, MH, alpha, estimate_vmic(teff, new_logg, MH),
                 vmac, vsini, limb_darkening_coeff, is_step),
                (teff, logg, new_MH, alpha, estimate_vmic(teff, logg, new_MH),
                 vmac, vsini, limb_darkening_coeff, is_step),
            ]

        for j, (teff, logg, MH, alpha, vmic, vmac, vsini, limb_darkening_coeff,
                is_step) in enumerate(points):
            if is_step:
                filename_out = steps_fits_dir + "{0}_{1:.2f}_{2:.2f}_{3:.2f}_{4:.2f}_{5:.2f}_{6:.2f}_{7:.2f}".format(
                    int(teff), logg, MH, alpha, vmic, vmac, vsini,
                    limb_darkening_coeff) + ".fits.gz"
            else:
                filename_out = fits_dir + "{0}_{1:.2f}_{2:.2f}_{3:.2f}_{4:.2f}_{5:.2f}_{6:.2f}_{7:.2f}".format(
                    int(teff), logg, MH, alpha, vmic, vmac, vsini,
                    limb_darkening_coeff) + ".fits.gz"

            if os.path.exists(filename_out):
                print("Skipping", teff, logg, MH, alpha, vmic, vmac, vsini,
                      limb_darkening_coeff, "already computed")
                continue

            if pool is None:
                if sys.platform == "win32":
                    # On Windows, the best timer is time.clock()
                    default_timer = time.clock
                else:
                    # On most other platforms the best timer is time.time()
                    default_timer = time.time

                lock = FileLock(filename_out + ".lock")
                try:
                    lock.acquire(timeout=-1)  # Don't wait
                except (LockTimeout, AlreadyLocked) as e:
                    # Some other process is computing this spectrum, do not continue
                    print("Skipping", teff, logg, MH, alpha, vmic, vmac, vsini,
                          limb_darkening_coeff, "already locked")
                    continue

                try:
                    tcheck = default_timer()
                    # Validate parameters
                    __generate_synthetic_fits(filename_out,
                                              wavelengths,
                                              segments,
                                              teff,
                                              logg,
                                              MH,
                                              alpha,
                                              vmic,
                                              vmac,
                                              vsini,
                                              limb_darkening_coeff,
                                              resolution,
                                              pickled_modeled_layers_pack,
                                              atomic_linelist,
                                              isotopes,
                                              solar_abundances,
                                              code=code,
                                              use_molecules=use_molecules,
                                              tmp_dir=tmp_dir,
                                              locked=True)
                    elapsed = default_timer() - tcheck

                    print(
                        "-----------------------------------------------------"
                    )
                    print("Remaining time:")
                    print("\t", (num_spec - i) * elapsed, "seconds")
                    print("\t", (num_spec - i) * (elapsed / 60), "minutes")
                    print("\t", (num_spec - i) * (elapsed / (60 * 60)),
                          "hours")
                    print("\t", (num_spec - i) * (elapsed / (60 * 60 * 24)),
                          "days")
                    print(
                        "-----------------------------------------------------"
                    )
                finally:
                    lock.release()

            else:
                pool.apply_async(__generate_synthetic_fits, [
                    filename_out, wavelengths, segments, teff, logg, MH, alpha,
                    vmic, vmac, vsini, limb_darkening_coeff, resolution,
                    pickled_modeled_layers_pack, atomic_linelist, isotopes,
                    solar_abundances
                ],
                                 kwds={
                                     'code': code,
                                     'use_molecules': use_molecules,
                                     'tmp_dir': tmp_dir,
                                     'locked': False
                                 })
            i += 1

    if pool is not None:
        pool.close()
        pool.join()

    # Create parameters.tsv
    reference_list = Table()
    if len(np.unique(ranges[['logg', 'MH', 'alpha', 'vmic']])) == len(ranges):
        reference_list.add_column(Column(name='fixed_teff', dtype=int))
    else:
        reference_list.add_column(Column(name='teff', dtype=int))
    if len(np.unique(ranges[['teff', 'MH', 'alpha', 'vmic']])) == len(ranges):
        reference_list.add_column(Column(name='fixed_logg', dtype=float))
    else:
        reference_list.add_column(Column(name='logg', dtype=float))
    if len(np.unique(ranges[['teff', 'logg', 'alpha',
                             'vmic']])) == len(ranges):
        reference_list.add_column(Column(name='fixed_MH', dtype=float))
    else:
        reference_list.add_column(Column(name='MH', dtype=float))
    if len(np.unique(ranges[['teff', 'logg', 'MH', 'vmic']])) == len(ranges):
        reference_list.add_column(Column(name='fixed_alpha', dtype=float))
    else:
        reference_list.add_column(Column(name='alpha', dtype=float))
    if len(np.unique(ranges[['teff', 'logg', 'MH', 'alpha']])) == len(ranges):
        reference_list.add_column(Column(name='fixed_vmic', dtype=float))
    else:
        reference_list.add_column(Column(name='vmic', dtype=float))
    reference_list.add_column(Column(name='filename', dtype='|S100'))
    for teff, logg, MH, alpha, vmic in ranges:
        # Only use the first spectra generated for each combination
        zero_vmac = 0.0
        zero_vsini = 0.0
        zero_limb_darkening_coeff = 0.00
        reference_filename_out = "./grid/{0}_{1:.2f}_{2:.2f}_{3:.2f}_{4:.2f}_{5:.2f}_{6:.2f}_{7:.2f}".format(
            int(teff), logg, MH, alpha, vmic, zero_vmac, zero_vsini,
            zero_limb_darkening_coeff) + ".fits.gz"
        reference_list.add_row(
            (int(teff), logg, MH, alpha, vmic, reference_filename_out))

    if not os.path.exists(reference_list_filename):
        lock = FileLock(reference_list_filename + ".lock")
        try:
            lock.acquire(timeout=-1)  # Don't wait
        except (LockTimeout, AlreadyLocked) as e:
            # Some other process is writing this file, do not continue
            print("Skipping", reference_list_filename, "already locked")
        else:
            try:
                ascii.write(reference_list,
                            reference_list_filename,
                            delimiter='\t',
                            overwrite=True)
                print("Written", reference_list_filename)
            finally:
                lock.release()

    if to_resolution is not None:
        if not os.path.exists(reference_grid_filename):
            lock = FileLock(reference_grid_filename + ".lock")
            try:
                lock.acquire(timeout=-1)  # Don't wait
            except (LockTimeout, AlreadyLocked) as e:
                # Some other process is computing this spectrum, do not continue
                print("Skipping", reference_grid_filename, "already locked")
            else:
                try:
                    reference_grid = None
                    complete_reference_list = Table()
                    complete_reference_list.add_column(
                        Column(name='teff', dtype=int))
                    complete_reference_list.add_column(
                        Column(name='logg', dtype=float))
                    complete_reference_list.add_column(
                        Column(name='MH', dtype=float))
                    complete_reference_list.add_column(
                        Column(name='alpha', dtype=float))
                    complete_reference_list.add_column(
                        Column(name='vmic', dtype=float))
                    complete_reference_list.add_column(
                        Column(name='vmac', dtype=float))
                    complete_reference_list.add_column(
                        Column(name='vsini', dtype=float))
                    complete_reference_list.add_column(
                        Column(name='limb_darkening_coeff', dtype=float))
                    for teff, logg, MH, alpha, vmic in ranges:
                        # Only use the first spectra generated for each combination
                        zero_vmac = 0.0
                        zero_vsini = 0.0
                        zero_limb_darkening_coeff = 0.00
                        vmac = estimate_vmac(teff, logg, MH)
                        vsini = 1.6  # Sun
                        limb_darkening_coeff = 0.6
                        reference_filename_out = "{0}_{1:.2f}_{2:.2f}_{3:.2f}_{4:.2f}_{5:.2f}_{6:.2f}_{7:.2f}".format(
                            int(teff), logg, MH, alpha, vmic, zero_vmac,
                            zero_vsini, zero_limb_darkening_coeff) + ".fits.gz"
                        if not os.path.exists(fits_dir +
                                              reference_filename_out):
                            continue
                        complete_reference_list.add_row(
                            (int(teff), logg, MH, alpha, vmic, vmac, vsini,
                             limb_darkening_coeff))

                        # Spectra in the grid is convolved to the specified resolution for fast comparison
                        print("Quick grid:", reference_filename_out)
                        spectrum = read_spectrum(fits_dir +
                                                 reference_filename_out)

                        segments = None
                        vrad = (0, )
                        spectrum['flux'] = apply_post_fundamental_effects(spectrum['waveobs'], spectrum['flux'], segments, \
                                    macroturbulence=vmac, vsini=vsini, \
                                    limb_darkening_coeff=limb_darkening_coeff, R=to_resolution, vrad=vrad)

                        if reference_grid is None:
                            reference_grid = spectrum['flux']
                        else:
                            reference_grid = np.vstack(
                                (reference_grid, spectrum['flux']))

                    if len(ranges) == len(complete_reference_list):
                        # Generate FITS file with grid for fast comparison
                        primary_hdu = fits.PrimaryHDU(reference_grid)
                        wavelengths_hdu = fits.ImageHDU(wavelengths,
                                                        name="WAVELENGTHS")
                        params_bintable_hdu = fits.BinTableHDU(
                            complete_reference_list.as_array(), name="PARAMS")
                        fits_format = fits.HDUList([
                            primary_hdu, wavelengths_hdu, params_bintable_hdu
                        ])
                        fits_format.writeto(reference_grid_filename,
                                            overwrite=True)
                        print("Written", reference_grid_filename)
                finally:
                    lock.release()
Example #55
0
def __generate_synthetic_fits(filename_out,
                              wavelengths,
                              segments,
                              teff,
                              logg,
                              MH,
                              alpha,
                              vmic,
                              vmac,
                              vsini,
                              limb_darkening_coeff,
                              resolution,
                              pickled_modeled_layers_pack,
                              atomic_linelist,
                              isotopes,
                              solar_abundances,
                              code="spectrum",
                              use_molecules=False,
                              tmp_dir=None,
                              locked=False):
    multiprocessing.current_process().daemon = False

    import dill  # To allow pickle of lambda functions (e.g., one element in modeled_layers_pack)
    import pickle
    modeled_layers_pack = pickle.loads(pickled_modeled_layers_pack)

    if valid_atmosphere_target(modeled_layers_pack, {
            'teff': teff,
            'logg': logg,
            'MH': MH,
            'alpha': alpha
    }):
        if not locked:
            lock = FileLock(filename_out + ".lock")
            try:
                lock.acquire(timeout=-1)  # Don't wait
            except (LockTimeout, AlreadyLocked) as e:
                # Some other process is computing this spectrum, do not continue
                print("Skipping", teff, logg, MH, alpha, vmic, vmac, vsini,
                      limb_darkening_coeff, "already locked")
                return None

        try:
            print("[started]", teff, logg, MH, alpha, vmic, vmac, vsini,
                  limb_darkening_coeff, resolution)
            # Prepare atmosphere model
            atmosphere_layers = interpolate_atmosphere_layers(
                modeled_layers_pack, {
                    'teff': teff,
                    'logg': logg,
                    'MH': MH,
                    'alpha': alpha
                },
                code=code)
            fixed_abundances = None
            # Synthesis
            synth_spectrum = create_spectrum_structure(wavelengths)
            synth_spectrum['flux'] = ispec.synth.common.generate_spectrum(synth_spectrum['waveobs'], \
                    atmosphere_layers, teff, logg, MH, alpha, atomic_linelist, isotopes, solar_abundances, \
                    fixed_abundances, microturbulence_vel = vmic, \
                    macroturbulence=vmac, vsini=vsini, limb_darkening_coeff=limb_darkening_coeff, \
                    R=resolution, regions=segments, verbose=0, \
                    code=code, use_molecules=use_molecules, tmp_dir=tmp_dir)
            # FITS
            write_spectrum(synth_spectrum, filename_out)
            print("[finished]", teff, logg, MH, alpha, vmic, vmac, vsini,
                  limb_darkening_coeff, resolution)
        finally:
            if not locked:  # Not locked in this function
                lock.release()
    else:
        raise Exception("Not valid: %i %.2f %.2f" % (teff, logg, MH))
Example #56
0
class LockTest(TestCase):
    """
    Tests for Django Mailer trying to send mail when the lock is already in
    place.
    """
    def setUp(self):
        # Create somewhere to store the log debug output.
        self.output = StringIO()
        # Create a log handler which can capture the log debug output.
        self.handler = logging.StreamHandler(self.output)
        self.handler.setLevel(logging.DEBUG)
        formatter = logging.Formatter('%(message)s')
        self.handler.setFormatter(formatter)
        # Add the log handler.
        logger = logging.getLogger('django_yubin')
        logger.addHandler(self.handler)

        # Set the LOCK_WAIT_TIMEOUT to the default value.
        self.original_timeout = settings.LOCK_WAIT_TIMEOUT
        settings.LOCK_WAIT_TIMEOUT = 0

        # Use a test lock-file name in case something goes wrong, then emulate
        # that the lock file has already been acquired by another process.
        self.original_lock_path = engine.LOCK_PATH
        engine.LOCK_PATH += '.mailer-test'
        self.lock = FileLock(engine.LOCK_PATH)
        self.lock.unique_name += '.mailer_test'
        self.lock.acquire(0)

    def tearDown(self):
        # Remove the log handler.
        logger = logging.getLogger('django_yubin')
        logger.removeHandler(self.handler)

        # Revert the LOCK_WAIT_TIMEOUT to it's original value.
        settings.LOCK_WAIT_TIMEOUT = self.original_timeout

        # Revert the lock file unique name
        engine.LOCK_PATH = self.original_lock_path
        self.lock.release()

    def test_locked(self):
        # Acquire the lock so that send_all will fail.
        engine.send_all()
        self.output.seek(0)
        self.assertEqual(self.output.readlines()[-1].strip(),
                         'Lock already in place. Exiting.')
        # Try with a timeout.
        settings.LOCK_WAIT_TIMEOUT = .1
        engine.send_all()
        self.output.seek(0)
        self.assertEqual(self.output.readlines()[-1].strip(),
                         'Waiting for the lock timed out. Exiting.')

    def test_locked_timeoutbug(self):
        # We want to emulate the lock acquiring taking no time, so the next
        # three calls to time.time() always return 0 (then set it back to the
        # real function).
        original_time = time.time
        global time_call_count
        time_call_count = 0

        def fake_time():
            global time_call_count
            time_call_count = time_call_count + 1
            if time_call_count >= 3:
                time.time = original_time
            return 0

        time.time = fake_time
        try:
            engine.send_all()
            self.output.seek(0)
            self.assertEqual(self.output.readlines()[-1].strip(),
                             'Lock already in place. Exiting.')
        finally:
            time.time = original_time
Example #57
0
class SqliteDatabase(RelationalDatabase):

    EXTENSION = '.db'
    SCHEMA_VERSION = 27

    _lock = None

    def __init__(self, dbname, memory=False, **kwargs):
        """"""
        import os

        # For database bundles, where we have to pass in the whole file path
        if memory:
            base_path = ':memory:'
        else:

            if not dbname:
                raise ValueError("Must have a dbname")

            if dbname[0] != '/':
                import os
                dbname = os.path.join(os.getcwd(), dbname)

            base_path, ext = os.path.splitext(dbname)

            if ext and ext != self.EXTENSION:
                raise Exception(
                    "Bad extension to file '{}': '{}'. Expected: {}".format(
                        dbname, ext, self.EXTENSION))

            self.base_path = base_path

        self._last_attach_name = None
        self._attachments = set()

        self.memory = memory

        if not 'driver' in kwargs:
            kwargs['driver'] = 'sqlite'

        super(SqliteDatabase, self).__init__(dbname=self.path, **kwargs)

    @property
    def path(self):
        if self.memory:
            return ':memory:'
        else:
            return (self.base_path + self.EXTENSION).replace('//', '/')

    @property
    def md5(self):
        from ambry.util import md5_for_file
        return md5_for_file(self.path)

    @property
    def lock_path(self):
        return self.base_path

    def lock(self):
        """Create an external file lock for the bundle database."""

        from lockfile import FileLock, LockTimeout, AlreadyLocked
        import os
        import time
        import traceback
        from ..dbexceptions import LockedFailed

        if self._lock:
            tb = traceback.extract_stack()[-5:-4][0]
            global_logger.debug("Already has bundle lock from {}:{}".format(
                tb[0], tb[1]))
            return

        self._lock = FileLock(self.lock_path)

        for i in range(10):
            try:
                tb = traceback.extract_stack()[-5:-4][0]
                self._lock.acquire(-1)
                global_logger.debug("Acquired bundle lock from {}:{}".format(
                    tb[0], tb[1]))
                return
            except AlreadyLocked as e:
                global_logger.debug("Waiting for bundle lock")
                time.sleep(1)

        raise LockedFailed("Failed to acquire lock on {}".format(
            self.lock_path))
        self._lock = None

    def unlock(self):
        """Release the external lock on the external database."""
        global_logger.debug("Released bundle lock")
        if self._lock is not None:
            self._lock.release()
            self._lock = None

    def break_lock(self):
        from lockfile import FileLock

        lock = FileLock(self.lock_path)

        if lock.is_locked():
            lock.break_lock()

    def require_path(self):
        if not self.memory:

            dir = os.path.dirname(self.base_path)

            if dir and not os.path.exists(dir):
                os.makedirs(os.path.dirname(self.base_path))

    @property
    def version(self):
        v = self.connection.execute('PRAGMA user_version').fetchone()[0]

        try:
            return int(v)
        except:
            return 0

    def _on_create_connection(self, connection):
        """Called from get_connection() to update the database."""
        pass

    def _on_create_engine(self, engine):
        """Called just after the engine is created."""
        pass

    def get_connection(self, check_exists=True):
        """Return an SqlAlchemy connection, but allow for existence check,
        which uses os.path.exists."""

        if not os.path.exists(self.path) and check_exists and not self.memory:
            from ..dbexceptions import DatabaseMissingError

            raise DatabaseMissingError(
                "Trying to make a connection to a sqlite database " +
                "that does not exist.  path={}".format(self.path))

        return super(SqliteDatabase, self).get_connection(check_exists)

    def _create(self):
        """Need to ensure the database exists before calling for the
        connection, but the connection expects the database to exist first, so
        we create it here."""

        from sqlalchemy import create_engine

        dir_ = os.path.dirname(self.path)

        if not os.path.exists(dir_):
            os.makedirs(dir_)

        engine = create_engine(self.dsn, echo=False)
        connection = engine.connect()
        try:
            connection.execute("PRAGMA user_version = {}".format(
                self.SCHEMA_VERSION))
        except Exception as e:
            e.args = ("Failed to open database {}".format(self.dsn), )
            raise e

        connection.close()
        engine.dispose()

    MIN_NUMBER_OF_TABLES = 1

    def is_empty(self):

        if not self.memory and not os.path.exists(self.path):
            return True

        if self.version >= 12:

            if not 'config' in self.inspector.get_table_names():
                return True
            else:
                return False
        else:

            tables = self.inspector.get_table_names()

            if tables and len(tables) < self.MIN_NUMBER_OF_TABLES:
                return True
            else:
                return False

    def clean(self):
        """Remove all files generated by the build process."""
        if os.path.exists(self.path):
            os.remove(self.path)
        self.unlock()

    def delete(self):

        if os.path.exists(self.path):

            self.unlock()

            files = [
                self.path, self.path + "-wal", self.path + "-shm",
                self.path + "-journal"
            ]

            for f in files:
                if os.path.exists(f):
                    os.remove(f)

    def add_view(self, name, sql):

        e = self.connection.execute

        e('DROP VIEW IF EXISTS {}'.format(name))

        e('CREATE VIEW {} AS {} '.format(name, sql))

    def load(self, a, table=None, encoding='utf-8', caster=None, logger=None):
        """Load the database from a CSV file."""

        # return self.load_insert(a,table, encoding=encoding, caster=caster,
        # logger=logger)
        return self.load_shell(a,
                               table,
                               encoding=encoding,
                               caster=caster,
                               logger=logger)

    def load_insert(self,
                    a,
                    table=None,
                    encoding='utf-8',
                    caster=None,
                    logger=None):
        from ..partition import PartitionInterface
        from ..database.csv import CsvDb
        from ..dbexceptions import ConfigurationError
        import time

        if isinstance(a, PartitionInterface):
            db = a.database
        elif isinstance(a, CsvDb):
            db = a
        else:
            raise ConfigurationError("Can't use this type: {}".format(type(a)))

        start = time.clock()
        count = 0
        with self.inserter(table, caster=caster) as ins:
            for row in db.reader(encoding=encoding):
                count += 1

                if logger:
                    logger("Load row {}:".format(count))

                ins.insert(row)

        diff = time.clock() - start
        return count, diff

    def load_shell(self, a, table, encoding='utf-8', caster=None, logger=None):
        from ..partition import PartitionInterface
        from ..database.csv import CsvDb
        from ..dbexceptions import ConfigurationError
        import time
        import subprocess
        import uuid
        from ..util import temp_file_name
        import os

        if isinstance(a, PartitionInterface):
            db = a.database
        elif isinstance(a, CsvDb):
            db = a
        else:
            raise ConfigurationError("Can't use this type: {}".format(type(a)))

        try:
            table_name = table.name
        except AttributeError:
            table_name = table

        sql_file = temp_file_name()

        sql = '''
.mode csv
.separator '|'
select 'Loading CSV file','{path}';
.import {path} {table}
'''.format(path=db.path, table=table_name)

        sqlite = subprocess.check_output(["which", "sqlite3"]).strip()

        start = time.clock()
        count = 0

        proc = subprocess.Popen([sqlite, self.path],
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE,
                                stdin=subprocess.PIPE)

        (out, err) = proc.communicate(input=sql)

        if proc.returncode != 0:
            raise Exception("Database load failed: " + str(err))

        diff = time.clock() - start
        return count, diff

    def index_for_search(self, vid, topic, keywords):
        """
        Add a search document to the full-text search index.

        :param vid: Versioned ID for the object. Should be a dataset, partition table or column
        :param topic: A text document or description.
        :param keywords: A list of keywords
        :return:
        """

    def search(self, topic, keywords):
        """Search the full text search index.
Example #58
0
def write_aggregated_stats(base_dirs, outfile):
    '''
    Generate aggregated statistics from the given base directories
    and write them to the specified output file.
    
    @type base_dirs: list
    @param base_dirs: List of AFL base directories

    @type outfile: str
    @param outfile: Output file for aggregated statistics
    '''

    # Which fields to add
    wanted_fields_total = [
        'execs_done', 'execs_per_sec', 'pending_favs', 'pending_total',
        'variable_paths', 'unique_crashes', 'unique_hangs'
    ]

    # Which fields to aggregate by mean
    wanted_fields_mean = ['exec_timeout']

    # Which fields should be displayed per fuzzer instance
    wanted_fields_all = ['cycles_done', 'bitmap_cvg']

    # Which fields should be aggregated by max
    wanted_fields_max = ['last_path']

    aggregated_stats = {}

    for field in wanted_fields_total:
        aggregated_stats[field] = 0

    for field in wanted_fields_mean:
        aggregated_stats[field] = (0, 0)

    for field in wanted_fields_all:
        aggregated_stats[field] = []

    def convert_num(num):
        if '.' in num:
            return float(num)
        return int(num)

    for base_dir in base_dirs:
        stats_path = os.path.join(base_dir, "fuzzer_stats")

        if os.path.exists(stats_path):
            with open(stats_path, 'r') as stats_file:
                stats = stats_file.read()

            for line in stats.splitlines():
                (field_name, field_val) = line.split(':', 1)
                field_name = field_name.strip()
                field_val = field_val.strip()

                if field_name in wanted_fields_total:
                    aggregated_stats[field_name] += convert_num(field_val)
                elif field_name in wanted_fields_mean:
                    (val, cnt) = aggregated_stats[field_name]
                    aggregated_stats[field_name] = (val +
                                                    convert_num(field_val),
                                                    cnt + 1)
                elif field_name in wanted_fields_all:
                    aggregated_stats[field_name].append(field_val)
                elif field_name in wanted_fields_max:
                    num_val = convert_num(field_val)
                    if (not field_name in aggregated_stats
                        ) or aggregated_stats[field_name] < num_val:
                        aggregated_stats[field_name] = num_val

    # If we don't have any data here, then the fuzzers haven't written any statistics yet
    if not aggregated_stats:
        return

    # Mean conversion
    for field_name in wanted_fields_mean:
        (val, cnt) = aggregated_stats[field_name]
        if cnt:
            aggregated_stats[field_name] = float(val) / float(cnt)
        else:
            aggregated_stats[field_name] = val

    # Write out data
    fields = []
    fields.extend(wanted_fields_total)
    fields.extend(wanted_fields_mean)
    fields.extend(wanted_fields_all)
    fields.extend(wanted_fields_max)

    max_keylen = max([len(x) for x in fields])

    lock = FileLock(outfile)
    lock.acquire()
    with open(outfile, 'w') as f:
        for field in fields:
            if not field in aggregated_stats:
                continue

            val = aggregated_stats[field]

            if isinstance(val, list):
                val = " ".join(val)

            f.write("%s%s: %s\n" % (field, " " *
                                    (max_keylen + 1 - len(field)), val))
    lock.release()

    return