예제 #1
0
def main():
    args = _parse_args()
    completed_event = Event()
    _start_timer(args['timeout'], completed_event)
    logger.info('Starting up!')
    finder = PopularGithubRepoFinder(
        org=args['org'],
        client_id=args['client_id'],
        client_secret=args['client_secret'],
    )

    logger.info('Generating report')

    report = {
        'org': args['org'],
        'n': args['n'],
        'note': 'contribution_percentage == -1 means the repo has zero fork',
        'top_n_by_stars': finder.top_n_by_stars_count(args['n']),
        'top_n_by_forks': finder.top_n_by_forks_count(args['n']),
        'top_n_by_pull_requests': finder.top_n_by_pull_requests_count(args['n']),
        'top_n_by_contribution_percentage': finder.top_n_by_contribution_percentage(args['n']),
    }

    if args['output']:
        with open(args['output'], 'w') as output_file:
            json.dump(report, output_file)
    else:
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(report)

    logger.info('Report generated')

    completed_event.set()
    logging.shutdown()
예제 #2
0
def main():
    APPLIANCES = [
        ('a', ['fridge freezer', 'fridge'], 512),
        ('b', "'coffee maker'", 512),
        ('c', "'dish washer'", 2000),
        ('d', "'hair dryer'", 256),
        ('e', "'kettle'", 256),
        ('f', "'oven'", 2000),
        ('g', "'toaster'", 256),
        ('h', "'light'", 2000),
        ('i', ['washer dryer', 'washing machine'], 1504)
    ]
    for experiment, appliance, seq_length in APPLIANCES[-1:]:
        full_exp_name = NAME + experiment
        func_call = init_experiment(PATH, 'a', full_exp_name)
        func_call = func_call[:-1] + ", {}, {})".format(appliance, seq_length)
        logger = logging.getLogger(full_exp_name)
        try:
            net = eval(func_call)
            run_experiment(net, epochs=5000)
        except KeyboardInterrupt:
            logger.info("KeyboardInterrupt")
            break
        except Exception as exception:
            logger.exception("Exception")
            # raise
        else:
            del net.source
            del net
            gc.collect()
        finally:
            logging.shutdown()
예제 #3
0
	def __init__(self, config_path, dev=False):
		"""
		:param str config_path: The path to the config file containing json information about the job
		"""
		self._log = logging.getLogger("BOOT")
		self._log_accumulator = LogAccumulator()
		# add this to the root logger so it will capture EVERYTHING
		logging.getLogger().addHandler(self._log_accumulator)

		if not os.path.exists(config_path):
			self._log.error("ERROR, config path {} not found!".format(config_path))
			logging.shutdown()
			exit(1)

		with open(config_path, "r") as f:
			self._config = json.loads(f.read())

		self._job_id = self._config["id"]
		self._idx = self._config["idx"]
		self._tool = self._config["tool"]
		self._params = self._config["params"]
		self._fileset = self._config["fileset"]
		self._db_host = self._config["db_host"]
		self._debug = self._config["debug"]
		self._num_progresses = 0

		self.dev = dev
		self._host_comms = HostComms(self._on_host_msg_received, self._job_id, self._idx, self._tool, dev=dev)
예제 #4
0
 def run(self):
     try:
         # Log startup of parent
         _log.info('Starting supervisor %s, pid %d',
                                     opendiamond.__version__, os.getpid())
         _log.info('Server IDs: %s', ', '.join(self.config.serverids))
         if self.config.cache_server:
             _log.info('Cache: %s:%d', *self.config.cache_server)
         while True:
             # Check for search logs that need to be pruned
             self._prune_child_logs()
             # Check for blob cache objects that need to be pruned
             self._prune_blob_cache()
             # Accept a new connection pair
             control, data = self._listener.accept()
             # Fork a child for this connection pair.  In the child, this
             # does not return.
             self._children.start(self._child, control, data)
             # Close the connection pair in the parent
             control.close()
             data.close()
     except _Signalled, s:
         _log.info('Supervisor exiting on %s', s.signame)
         # Stop listening for incoming connections
         self._listener.shutdown()
         # Kill our children and clean up after them
         self._children.kill_all()
         # Shut down logging
         logging.shutdown()
         # Ensure our exit status reflects that we died on the signal
         signal.signal(s.signal, signal.SIG_DFL)
         os.kill(os.getpid(), s.signal)
def test():
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)
    logger.addHandler(BufferingSMTPHandler(MAILHOST, FROM, TO, SUBJECT, EMAIL_USERNAME, EMAIL_PASSWORD, 2000))
    for i in xrange(102):
        logger.info("Info index = %d", i)
    logging.shutdown()
예제 #6
0
파일: pipeline.py 프로젝트: kc-lab/dms2dfe
def pipeline(prj_dh,step=None,test=False):        
    from dms2dfe import configure, ana0_fastq2dplx,ana0_fastq2sbam,ana0_getfeats,ana1_sam2mutmat,ana2_mutmat2fit,ana3_fit2comparison,ana4_modeller,ana4_plotter
    if exists(prj_dh):
        if step==0 or step==None:
            configure.main(prj_dh,"deps")
            configure.main(prj_dh)          
        if step==0.1 or step==None:
            ana0_fastq2dplx.main(prj_dh)
        if step==0.2 or step==None:
            ana0_fastq2sbam.main(prj_dh,test)
        if step==0.3:
            ana0_getfeats.main(prj_dh)
        if step==1 or step==None:
            ana1_sam2mutmat.main(prj_dh)
        if step==2 or step==None:
            ana2_mutmat2fit.main(prj_dh,test)
        if step==3 or step==None:
            ana0_getfeats.main(prj_dh)
            ana4_modeller.main(prj_dh,test)
        if step==4 or step==None:
            ana3_fit2comparison.main(prj_dh,test)
        if step==5 or step==None:
            ana0_getfeats.main(prj_dh)
            ana4_plotter.main(prj_dh)
        if step==None:
            logging.info("Location of output data: %s/plots/aas/data_comparison" % (prj_dh))
            logging.info("Location of output visualizations: %s/plots/aas/" % (prj_dh))
            logging.info("For information about file formats of outputs, refer to http://kc-lab.github.io/dms2dfe/io .")
    else:
        configure.main(prj_dh)                  
    logging.shutdown()
예제 #7
0
	def onMenuLogToFile( self, event ):
		if self.menuItemLogToFile.IsChecked():
			self.__isLoggingToFile__ = True
			self.__startLogToFile__()
		else:
			self.__isLoggingToFile__ = False
			logging.shutdown()
예제 #8
0
파일: netsav.py 프로젝트: Turgon37/Netsav
  def stop(self):
    """Stop properly the server after signal received

    It is call by start() and signal handling functions
    It says to all thread to exit themself properly and run
    some system routine to terminate the entire program
    """
    # Tell to all thread to stop them at the next second
    sys_log.debug('Send exit command to all thread')
    # send stop command via synchronised event
    self.__event_stop.set()

    # ensure that all of them have exit, and add eventual event to trig queue
    sys_log.debug('Waiting for all subthread exiting')
    while (threading.enumerate().__len__()) > 1:
      self.getTrigger().serve_once()
      time.sleep(0.5)

    # serve all event which have not already been serve
    sys_log.debug('Purge and serve all event in the queue')
    while (self.getTrigger().serve_once()):
      pass

    # Close log
    logging.shutdown()
예제 #9
0
파일: server.py 프로젝트: befks/odoo
    def stop(self):
        """ Shutdown the WSGI server. Wait for non deamon threads.
        """
        _logger.info("Initiating shutdown")
        _logger.info("Hit CTRL-C again or send a second signal to force the shutdown.")

        if self.httpd:
            self.httpd.shutdown()
            self.close_socket(self.httpd.socket)

        # Manually join() all threads before calling sys.exit() to allow a second signal
        # to trigger _force_quit() in case some non-daemon threads won't exit cleanly.
        # threading.Thread.join() should not mask signals (at least in python 2.5).
        me = threading.currentThread()
        _logger.debug('current thread: %r', me)
        for thread in threading.enumerate():
            _logger.debug('process %r (%r)', thread, thread.isDaemon())
            if thread != me and not thread.isDaemon() and thread.ident != self.main_thread_id:
                while thread.isAlive():
                    _logger.debug('join and sleep')
                    # Need a busyloop here as thread.join() masks signals
                    # and would prevent the forced shutdown.
                    thread.join(0.05)
                    time.sleep(0.05)

        _logger.debug('--')
        odoo.modules.registry.Registry.delete_all()
        logging.shutdown()
    def shutdown(self):
        self.shutdownEvt.wait()
        self.logger.info('Initiating shutdown...')

        # stop services
        self.logger.info('Stopping services...')
        for service in [x for x in self.services.values()
                        if x is not self]:
            service.shutdown()

        self.running = False
        
        # shutdown session manager
        if SessionManager.sm:
            self.logger.info('Closing session manager...')
            SessionManager.close()
        
        # close database
        if _db.db_handle:
            self.logger.info('Closing database...')
            _db.close()

        self.logger.info('All services have been shut down successfully')
        # shutdown logging
        logging.shutdown()
        self.shutdowninprogress = False
예제 #11
0
def _exit(shutdown_logger=True, code=0):
    if shutdown_logger:
        log.info("Exiting PID: %s" % autosubliminal.PID)
        # Shutdown the logger to make sure the logfile is released before exiting
        logging.shutdown()
    # Exit process
    os._exit(code)
예제 #12
0
  def _setup_logging(self, log_level):
    """Reinitialize logging post-fork to clear all handlers, file descriptors, locks etc.

    This must happen first thing post-fork, before any further logging is emitted.
    """
    # Re-initialize the childs logging locks post-fork to avoid potential deadlocks if pre-fork
    # threads have any locks acquired at the time of fork.
    logging._lock = threading.RLock() if logging.thread else None
    for handler in logging.getLogger().handlers:
      handler.createLock()

    # Invoke a global teardown for all logging handlers created before now.
    logging.shutdown()

    # Reinitialize logging for the daemon context.
    setup_logging(log_level, console_stream=None, log_dir=self._log_dir, log_name=self.LOG_NAME)

    # Close out pre-fork file descriptors.
    self._close_fds()

    # Redirect stdio to the root logger.
    sys.stdout = _StreamLogger(logging.getLogger(), logging.INFO)
    sys.stderr = _StreamLogger(logging.getLogger(), logging.WARN)

    self._logger.debug('logging initialized')
예제 #13
0
def test_netreal():

  # Find the right path for files (win or unix)
  path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
  if os.name == 'nt':
    path += "\\"
  else:
    path += "/" 
  lr = [tuple(line.strip().split(':')) for line in (x for x in open(path + "hosts.txt", 'r') if not x.startswith('#'))]
  
  log.info('Testing TCP Ports...')
  for host in lr:
    for port in tcports:
      net(host,port,"tcp")
  
  log.info('Checking Hosts SSH Connectivity...')
  lr[:] = [host for host in lr if not ssh(host)]
  
  log.info('Testing UDP Ports...')
  for host in lr:
    err = 0
    for port in udports:
      err += udp(host,port)
    if err > 2:
      log.critical(host[1] + " (" + host[0] + ") has UDP ports closed")

  log.info('Done')
  # Send mail with log
  mh.flush()
  logging.shutdown()
def send(input_text):
    #if severity.lower() == 'critical' or severity.lower()  == 'error' or severity.lower()  == 'warning' or severity.lower() == 'info' or severity.lower() == 'debug':
    logger_mail = logging.getLogger("")
    logger_mail.setLevel(logging.DEBUG)
    logger_mail.addHandler(BufferingSMTPHandler(MAILHOST, FROM, TO, SUBJECT, 10))
    logger_mail.info(input_text)
    logging.shutdown()
예제 #15
0
def main():
    APPLIANCES = [
        ("a", ["fridge freezer", "fridge"], 512),
        ("b", "'coffee maker'", 512),
        ("c", "'dish washer'", 2000),
        ("d", "'hair dryer'", 256),
        ("e", "'kettle'", 256),
        ("f", "'oven'", 2000),
        ("g", "'toaster'", 256),
        ("h", "'light'", 2000),
        ("i", ["washer dryer", "washing machine"], 1504),
    ]
    for experiment, appliance, seq_length in APPLIANCES[-1:]:
        full_exp_name = NAME + experiment
        func_call = init_experiment(PATH, "a", full_exp_name)
        func_call = func_call[:-1] + ", {}, {})".format(appliance, seq_length)
        logger = logging.getLogger(full_exp_name)
        try:
            net = eval(func_call)
            run_experiment(net, epochs=None)
        except KeyboardInterrupt:
            logger.info("KeyboardInterrupt")
            break
        except Exception as exception:
            logger.exception("Exception")
            # raise
        else:
            del net.source
            del net
            gc.collect()
        finally:
            logging.shutdown()
예제 #16
0
def relaunch_on_timeout(crawler, endpoint, data, timeout=30, delay=10):
    """Receive data from the specified endpoint.

    This function handles all the intermediate operations, such as building
    the URL from the endpoint and handling log messages.

    """

    # Relaunch automatically on network error / broken pipe, ...
    while True:
        try:
            LOGGER.info("Requesting stream: %s. Params: %s", endpoint, data)
            start_time = datetime.datetime.now()
            stream = crawler.request_stream(endpoint, data, timeout)
            for count, tweet in enumerate(stream):
                if not count % 1000:
                    now = datetime.datetime.now()
                    delta = now - start_time
                    rate = float(count) / delta.total_seconds()
                    # Change this to logger TODO
                    print("Total tweets", count, "\tRate", rate)
                    start_time = datetime.datetime.now()
                #print(tweet) TODO (control)
        # Unless CTRL+C or exit
        except (KeyboardInterrupt, SystemExit):
            # TODO clean outfile?
            LOGGER.info("Shutting down (manual shutdown).")
            logging.shutdown()
            sys.exit(0)
        # Handle network timeout
        except requests.exceptions.Timeout:
            # TODO introduce exp backoff
            LOGGER.info("Request timed out (timeout=%s). Waiting and retrying (delay=%s).", timeout, delay)
            time.sleep(delay)
예제 #17
0
	def __del__(self):
		print '%s is deleted.' % self.__class__.__name__
		del self.testPool
		del self.appTestEngine
		del self.ALTList
		self.logHandler.close()
		logging.shutdown()
예제 #18
0
 def destroy(self):
     """
         Shuts down the logging framework. No logging can be done
         afterwards.
     """
     logging.getLogger(__name__).debug('shutting down logging')
     logging.shutdown()
예제 #19
0
    def stop(self):
        """
        Stop the daemon
        """
        # Get the pid from the pidfile
        try:
            pf = file(self.pidfile, "r")
            pid = int(pf.read().strip())
            pf.close()
        except IOError:
            pid = None

        if not pid:
            message = "pidfile %s does not exist. Daemon not running?\n"
            sys.stderr.write(message % self.pidfile)
            return  # not an error in a restart

        logging.shutdown()

        # Try killing the daemon process
        try:
            while 1:
                os.kill(pid, SIGTERM)
                time.sleep(0.1)
        except OSError, err:
            err = str(err)
            if err.find("No such process") > 0:
                if os.path.exists(self.pidfile):
                    os.remove(self.pidfile)
            else:
                print str(err)
                sys.exit(1)
예제 #20
0
def shutdown():
    logger.warning( "Let's stop everything before we exit" )

    global epgGrabber, contentProvider, vcasProvider, recorder, scheduler, resourceMonitor

    stopRtspServer()
    if epgGrabber:
        epgGrabber.stop()
    if contentProvider:
        contentProvider.stop()
    if vcasProvider:
        vcasProvider.shutdown()
    if recorder:
        recorder.stopAllRecordings()
    if scheduler:
        scheduler.stop()
    if resourceMonitor:
        resourceMonitor.stop()
    if watchdog:
        watchdog.stop()
    stopWebserver()
    logger.warning( "Everything has stopped, now exit" )
    logging.shutdown()
    # Remove PID file if created
    if const.CREATEPID and os.path.exists( const.PIDFILE ):
        os.unlink( const.PIDFILE )
    os._exit( 0 )
예제 #21
0
파일: daemonbase.py 프로젝트: UfSoft/EvAFM
    def _exit(self, ssignal, frame):
        import logging
        if not self.__exiting:
            def too_long(sig, frm):
                logging.getLogger(__name__).info(
                    "Taking too long to exit(>5 secs). Commit suicide!!!"
                )
                self.remove_pid()
                logging.shutdown()
                try:
                    os.kill(self.__pid, signal.SIGKILL)
                except OSError, err:
                    logging.getLogger(__name__).exception(
                        "Failed to commit suicide: %s", err
                    )
            # Setup an alarm signal so that if taking too long, commit suicide
            signal.signal(signal.SIGALRM, too_long)
            signal.alarm(5) # We have 5 secs to exit properly
            logging.getLogger(__name__).info("Exiting...")
            self.__exiting = True
            # Ignore any further signaling
            signal.signal(ssignal, signal.SIG_IGN)
            signal.signal(signal.SIGTERM, signal.SIG_IGN)
            signal.signal(signal.SIGINT, signal.SIG_IGN)
            self.exit()

            logging.getLogger(__name__).info("Exited!!!")
            logging.shutdown()
            self.remove_pid()
            os._exit(1)
예제 #22
0
 def on_moved(self, event):
     # <FileMovedEvent: src_path='/root/lzf/tmp/2', dest_path='/root/lzf/tmp/3'>
     super(MyHandler2, self).on_moved(event)
     what = 'directory' if event.is_directory else 'file'
     #logging.info("Moved %s: from %s to %s", what, event.src_path,event.dest_path)
     self.loger.info("Moved %s: from %s to %s", what, event.src_path,event.dest_path)
     logging.shutdown()
예제 #23
0
파일: mainwindow.py 프로젝트: KDE/kajongg
 def aboutToQuit():
     """now all connections to servers are cleanly closed"""
     mainWindow = Internal.mainWindow
     Internal.mainWindow = None
     if mainWindow:
         if Debug.quit:
             logDebug(u'aboutToQuit starting')
         if mainWindow.exitWaitTime > 1000.0 or Debug.quit:
             logDebug(
                 u'reactor stopped after %d ms' %
                 (mainWindow.exitWaitTime))
     else:
         if Debug.quit:
             logDebug(u'aboutToQuit: mainWindow is already None')
     StateSaver.saveAll()
     Internal.app.quit()
     try:
         # if we are killed while loading, Internal.db may not yet be
         # defined
         if Internal.db:
             Internal.db.close()
     except NameError:
         pass
     checkMemory()
     logging.shutdown()
     if Debug.quit:
         logDebug(u'aboutToQuit ending')
예제 #24
0
def main(args=sys.argv):
    """This is an example program of how to use the PresentKey widget"""
    logging.basicConfig(stream=sys.stderr, level=logging.DEBUG,
                        format='%(name)s (%(levelname)s): %(message)s')
    try:
        arguments = parse_command_line(args)
        
        #if arguments.gpg:
        #    keyid = arguments.file
        #    keyring = Keyring()
        #    # this is a dict {fpr: key-instance}
        #    found_keys = keyring.get_keys(keyid)
        #    # We take the first item we found and export the actual keydata
        #    fpr = found_keys.items()[0][0]
        #    keyring.export_data(fpr=fpr, secret=False)
        #    keydata = keyring.context.stdout
        #else:
        #    keydata = open(arguments.file, 'r').read()
        fpr = arguments.fpr

        app = KeyPresent()
        try:
            GLib.unix_signal_add_full(GLib.PRIORITY_HIGH, signal.SIGINT, lambda *args : app.quit(), None)
        except AttributeError:
            pass
    
        exit_status = app.run(fpr)
    
        return exit_status

        
    finally:
        logging.shutdown()
    def shutdown(self):
        self.shutdownEvt.wait()
        logger = serverLogging.logger
        logger.info('Initiating shutdown...')

        if self.running:
            logger.info('Shutting down main service...')
            asyncBaseServer.BaseServer.shutdown(self)

        # shutdown session manager
        if sessionManager.sm:
            logger.info('Closing session manager...')
            sessionManager.close()
        
        # close database
        if db.db_handle:
            logger.info('Closing database...')
            db.close()

        if Mgt.mgtServer:
            logger.info('Shutting down administation service')
            Mgt.mgtServer.shutdown()

        logger.info('All services have been shut down successfully')
        # shutdown logging
        logging.shutdown()
        self.shutdowninprogress = False
예제 #26
0
def close_down() -> None:
    """Execute close down activities."""
    # pigjar-#352 Suppress legacy branch coverage failures
    if config.data:  # pragma: no cover
        control.save_program_data()
    save_config_dynamic_ini()
    logging.shutdown()
예제 #27
0
 def shutdown(self):
     self.__init__()
     try:
         logging.shutdown()
         logging.captureWarnings(None)
     except:
         pass
	def start(self):
		"""
		Start the receive thread - send will run only when needed.
		"""
		
		# Clear the packet queue
		self.queueIn  = deque()
		
		# Start the packet processing thread
		thread.start_new_thread(self.packetProcessor, ())
		
		# Setup the various sockets
		## Receive
		try:
			self.socketIn =  socket.socket(socket.AF_INET, socket.SOCK_STREAM)
			self.socketIn.bind((self.config['MESSAGEINHOST'], self.config['MESSAGEINPORT']))
			self.socketIn.listen(1)
			self.client = None
			
			self.lastPacket = time.time()
			
		except socket.error, err:
			code, e = err
			self.logger.critical('Cannot bind to listening port %i: %s', self.config['MESSAGEINPORT'], str(e))
			self.logger.critical('Exiting on previous error')
			logging.shutdown()
			sys.exit(1)
def main():
    # create a HiSparc monitor object
    hsMonitor = HsMonitor()

    # start all threads
    # hsMonitor.startAll()

    # this to get the keyboard interruption
    # c = cmd.Cmd()

    # try:
    #     c.cmdloop()
    # except KeyboardInterrupt:
    #     # stop all threads
    #     hsMonitor.stopAll()

    # wait for all threads to finish
    # for thread in hsMonitor.hsThreads:
    #     thread.join()

    # DBG: test the nagios push
    # buffLis = hsMonitor.createBufferListener()
    # buffLis.test()

    # DBG: test the nagios push
    sm = StorageManager()
    it = Interpreter(sm)
    checkSched = hsMonitor.createCheckScheduler(it)
    checkSched.run()
    logging.shutdown()
예제 #30
0
def main():
    """
    Launch the Framework application command line interface.
    """
    main = CommandLine()
    main.cmdloop()
    logging.shutdown()
def shutdown(rc):
    """Exits the script, but first flushes all logging handles, etc."""
    logging.shutdown()
    sys.exit(rc)
예제 #32
0
    def main(self):
        """Main function. This should not be overridden by the subclass test scripts."""

        parser = argparse.ArgumentParser(usage="%(prog)s [options]")
        parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
                            help="Leave defids and test.* datadir on exit or error")
        parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
                            help="Don't stop defids after the test execution")
        parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
                            help="Directory for caching pregenerated datadirs (default: %(default)s)")
        parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
        parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
                            help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
        parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
                            help="Print out all RPC calls as they are made")
        parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
                            help="The seed to use for assigning port numbers (default: current process id)")
        parser.add_argument("--coveragedir", dest="coveragedir",
                            help="Write tested RPC commands into this directory")
        parser.add_argument("--configfile", dest="configfile",
                            default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
                            help="Location of the test framework config file (default: %(default)s)")
        parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
                            help="Attach a python debugger if test fails")
        parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
                            help="use defi-cli instead of RPC for all commands")
        parser.add_argument("--perf", dest="perf", default=False, action="store_true",
                            help="profile running nodes with perf for the duration of the test")
        parser.add_argument("--randomseed", type=int,
                            help="set a random seed for deterministically reproducing a previous test run")
        self.add_options(parser)
        self.options = parser.parse_args()

        PortSeed.n = self.options.port_seed

        check_json_precision()

        self.options.cachedir = os.path.abspath(self.options.cachedir)

        config = configparser.ConfigParser()
        config.read_file(open(self.options.configfile))
        self.config = config
        self.options.defid = os.getenv("DEFID", default=config["environment"]["BUILDDIR"] + '/src/defid' + config["environment"]["EXEEXT"])
        self.options.deficli = os.getenv("DEFICLI", default=config["environment"]["BUILDDIR"] + '/src/defi-cli' + config["environment"]["EXEEXT"])

        os.environ['PATH'] = os.pathsep.join([
            os.path.join(config['environment']['BUILDDIR'], 'src'),
            os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'),
            os.environ['PATH']
        ])

        # Set up temp directory and start logging
        if self.options.tmpdir:
            self.options.tmpdir = os.path.abspath(self.options.tmpdir)
            os.makedirs(self.options.tmpdir, exist_ok=False)
        else:
            self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
        self._start_logging()

        # Seed the PRNG. Note that test runs are reproducible if and only if
        # a single thread accesses the PRNG. For more information, see
        # https://docs.python.org/3/library/random.html#notes-on-reproducibility.
        # The network thread shouldn't access random. If we need to change the
        # network thread to access randomness, it should instantiate its own
        # random.Random object.
        seed = self.options.randomseed

        if seed is None:
            seed = random.randrange(sys.maxsize)
        else:
            self.log.debug("User supplied random seed {}".format(seed))

        random.seed(seed)
        self.log.debug("PRNG seed is: {}".format(seed))

        self.log.debug('Setting up network thread')
        self.network_thread = NetworkThread()
        self.network_thread.start()

        success = TestStatus.FAILED

        try:
            if self.options.usecli:
                if not self.supports_cli:
                    raise SkipTest("--usecli specified but test does not support using CLI")
                self.skip_if_no_cli()
            self.skip_test_if_missing_module()
            self.setup_chain()
            self.setup_network()
            self.run_test()
            success = TestStatus.PASSED
        except JSONRPCException:
            self.log.exception("JSONRPC error")
        except SkipTest as e:
            self.log.warning("Test Skipped: %s" % e.message)
            success = TestStatus.SKIPPED
        except AssertionError:
            self.log.exception("Assertion failed")
        except KeyError:
            self.log.exception("Key error")
        except Exception:
            self.log.exception("Unexpected exception caught during testing")
        except KeyboardInterrupt:
            self.log.warning("Exiting after keyboard interrupt")

        if success == TestStatus.FAILED and self.options.pdbonfailure:
            print("Testcase failed. Attaching python debugger. Enter ? for help")
            pdb.set_trace()

        self.log.debug('Closing down network thread')
        self.network_thread.close()
        if not self.options.noshutdown:
            self.log.info("Stopping nodes")
            if self.nodes:
                self.stop_nodes()
        else:
            for node in self.nodes:
                node.cleanup_on_exit = False
            self.log.info("Note: defids were not stopped and may still be running")

        should_clean_up = (
            not self.options.nocleanup and
            not self.options.noshutdown and
            success != TestStatus.FAILED and
            not self.options.perf
        )
        if should_clean_up:
            self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
            cleanup_tree_on_exit = True
        elif self.options.perf:
            self.log.warning("Not cleaning up dir {} due to perf data".format(self.options.tmpdir))
            cleanup_tree_on_exit = False
        else:
            self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir))
            cleanup_tree_on_exit = False

        if success == TestStatus.PASSED:
            self.log.info("Tests successful")
            exit_code = TEST_EXIT_PASSED
        elif success == TestStatus.SKIPPED:
            self.log.info("Test skipped")
            exit_code = TEST_EXIT_SKIPPED
        else:
            self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
            self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
            exit_code = TEST_EXIT_FAILED
        logging.shutdown()
        if cleanup_tree_on_exit:
            shutil.rmtree(self.options.tmpdir)
        sys.exit(exit_code)
예제 #33
0
                                format='%(asctime)s: %(message)s')

            def on_press(key):

                currentTime = time.time()
                logging.info(str(key))
                if key == Key.esc or currentTime > stoppingTime:
                    return False

            while currentTime < stoppingTime:
                currentTime = time.time()

                with Listener(on_press=on_press) as listener:
                    listener.join()

            logging.shutdown()
            print "Stopped Listening"

            fsize = os.path.getsize(str(path2))
            send_one_message(s, str(fsize).encode('utf-8'))

            key_logger_AES.encrypt_file(path2)

            BUFFER_SIZE = 1024
            with open(path2 + ".enc", 'rb') as fs:
                data = fs.read(BUFFER_SIZE)

                while data:
                    send_one_message(s, data)
                    data = fs.read(BUFFER_SIZE)
예제 #34
0
    def main(self):

        parser = optparse.OptionParser(usage="%prog [options]")
        parser.add_option(
            "--nocleanup",
            dest="nocleanup",
            default=False,
            action="store_true",
            help="Leave syncoinds and test.* datadir on exit or error")
        parser.add_option("--noshutdown",
                          dest="noshutdown",
                          default=False,
                          action="store_true",
                          help="Don't stop syncoinds after the test execution")
        parser.add_option(
            "--srcdir",
            dest="srcdir",
            default=os.path.normpath(
                os.path.dirname(os.path.realpath(__file__)) + "/../../../src"),
            help=
            "Source directory containing syncoind/syncoin-cli (default: %default)"
        )
        parser.add_option("--cachedir",
                          dest="cachedir",
                          default=os.path.normpath(
                              os.path.dirname(os.path.realpath(__file__)) +
                              "/../../cache"),
                          help="Directory for caching pregenerated datadirs")
        parser.add_option("--tmpdir",
                          dest="tmpdir",
                          help="Root directory for datadirs")
        parser.add_option(
            "-l",
            "--loglevel",
            dest="loglevel",
            default="INFO",
            help=
            "log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory."
        )
        parser.add_option("--tracerpc",
                          dest="trace_rpc",
                          default=False,
                          action="store_true",
                          help="Print out all RPC calls as they are made")
        parser.add_option(
            "--portseed",
            dest="port_seed",
            default=os.getpid(),
            type='int',
            help=
            "The seed to use for assigning port numbers (default: current process id)"
        )
        parser.add_option("--coveragedir",
                          dest="coveragedir",
                          help="Write tested RPC commands into this directory")
        parser.add_option("--configfile",
                          dest="configfile",
                          help="Location of the test framework config file")
        parser.add_option("--pdbonfailure",
                          dest="pdbonfailure",
                          default=False,
                          action="store_true",
                          help="Attach a python debugger if test fails")
        self.add_options(parser)
        (self.options, self.args) = parser.parse_args()

        PortSeed.n = self.options.port_seed

        os.environ[
            'PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ[
                'PATH']

        check_json_precision()

        # Set up temp directory and start logging
        if self.options.tmpdir:
            os.makedirs(self.options.tmpdir, exist_ok=False)
        else:
            self.options.tmpdir = tempfile.mkdtemp(prefix="test")
        self._start_logging()

        success = TestStatus.FAILED

        try:
            self.setup_chain()
            self.setup_network()
            self.run_test()
            success = TestStatus.PASSED
        except JSONRPCException as e:
            self.log.exception("JSONRPC error")
        except SkipTest as e:
            self.log.warning("Test Skipped: %s" % e.message)
            success = TestStatus.SKIPPED
        except AssertionError as e:
            self.log.exception("Assertion failed")
        except KeyError as e:
            self.log.exception("Key error")
        except Exception as e:
            self.log.exception("Unexpected exception caught during testing")
        except KeyboardInterrupt as e:
            self.log.warning("Exiting after keyboard interrupt")

        if success == TestStatus.FAILED and self.options.pdbonfailure:
            print(
                "Testcase failed. Attaching python debugger. Enter ? for help")
            pdb.set_trace()

        if not self.options.noshutdown:
            self.log.info("Stopping nodes")
            if self.nodes:
                self.stop_nodes()
        else:
            self.log.info(
                "Note: syncoinds were not stopped and may still be running")

        if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
            self.log.info("Cleaning up")
            shutil.rmtree(self.options.tmpdir)
        else:
            self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
            if os.getenv("PYTHON_DEBUG", ""):
                # Dump the end of the debug logs, to aid in debugging rare
                # travis failures.
                import glob
                filenames = [self.options.tmpdir + "/test_framework.log"]
                filenames += glob.glob(self.options.tmpdir +
                                       "/node*/regtest/debug.log")
                MAX_LINES_TO_PRINT = 1000
                for fn in filenames:
                    try:
                        with open(fn, 'r') as f:
                            print("From", fn, ":")
                            print("".join(deque(f, MAX_LINES_TO_PRINT)))
                    except OSError:
                        print("Opening file %s failed." % fn)
                        traceback.print_exc()

        if success == TestStatus.PASSED:
            self.log.info("Tests successful")
            sys.exit(TEST_EXIT_PASSED)
        elif success == TestStatus.SKIPPED:
            self.log.info("Test skipped")
            sys.exit(TEST_EXIT_SKIPPED)
        else:
            self.log.error(
                "Test failed. Test logging available at %s/test_framework.log",
                self.options.tmpdir)
            logging.shutdown()
            sys.exit(TEST_EXIT_FAILED)
예제 #35
0
파일: logger.py 프로젝트: mhpi/hydroDL
 def close(self):
     """
     Closes the logger
     :return:
     """
     logging.shutdown()
예제 #36
0
 def close():
     if LogHelper._close_imme:
         if LogHelper._logger is None:
             return
         logging.shutdown()
def main():
    w = GUI()
    try:
        w.start()
    except:
        logging.shutdown()
예제 #38
0
 def __del__(self):
     logging.shutdown()  # Don't log after this call!
     del (self.logger)
    def test1(self):
        # set the logging level to debug so that debug messages are printed
        logging.basicConfig(level=logging.DEBUG)

        proc = None
        try:
            # launch the server in a separate process
            env = dict()
            env["PYTHONPATH"] = ':'.join(sys.path)
            parent_port = avro.tether.util.find_port()

            pyfile = avro.test.mock_tether_parent.__file__
            proc = subprocess.Popen([sys.executable, pyfile, "start_server", "{0}".format(parent_port)])
            input_port = avro.tether.util.find_port()

            print("Mock server started process pid={0}".format(proc.pid))
            # Possible race condition? open tries to connect to the subprocess before the subprocess is fully started
            # so we give the subprocess time to start up
            time.sleep(1)

            runner = avro.tether.tether_task_runner.TaskRunner(avro.test.word_count_task.WordCountTask())

            runner.start(outputport=parent_port, join=False)
            for _ in range(12):
                if runner.server is not None:
                    break
                time.sleep(1)
            else:
                raise RuntimeError("Server never started")

            # Test sending various messages to the server and ensuring they are processed correctly
            requestor = avro.tether.tether_task.HTTPRequestor(
                "localhost", runner.server.server_address[1], avro.tether.tether_task.inputProtocol)

            # TODO: We should validate that open worked by grabbing the STDOUT of the subproces
            # and ensuring that it outputted the correct message.

            # Test the mapper
            requestor.request("configure", {
                "taskType": avro.tether.tether_task.TaskType.MAP,
                "inSchema": unicode(str(runner.task.inschema)),
                "outSchema": unicode(str(runner.task.midschema))
            })

            # Serialize some data so we can send it to the input function
            datum = unicode("This is a line of text")
            writer = io.BytesIO()
            encoder = avro.io.BinaryEncoder(writer)
            datum_writer = avro.io.DatumWriter(runner.task.inschema)
            datum_writer.write(datum, encoder)

            writer.seek(0)
            data = writer.read()

            # Call input to simulate calling map
            requestor.request("input", {"data": data, "count": 1})

            # Test the reducer
            requestor.request("configure", {
                "taskType": avro.tether.tether_task.TaskType.REDUCE,
                "inSchema": unicode(str(runner.task.midschema)),
                "outSchema": unicode(str(runner.task.outschema))}
            )

            # Serialize some data so we can send it to the input function
            datum = {"key": unicode("word"), "value": 2}
            writer = io.BytesIO()
            encoder = avro.io.BinaryEncoder(writer)
            datum_writer = avro.io.DatumWriter(runner.task.midschema)
            datum_writer.write(datum, encoder)

            writer.seek(0)
            data = writer.read()

            # Call input to simulate calling reduce
            requestor.request("input", {"data": data, "count": 1})

            requestor.request("complete", {})

            runner.task.ready_for_shutdown.wait()
            runner.server.shutdown()
            # time.sleep(2)
            # runner.server.shutdown()

            sthread = runner.sthread

            # Possible race condition?
            time.sleep(1)

            # make sure the other thread terminated
            self.assertFalse(sthread.is_alive())

            # shutdown the logging
            logging.shutdown()

        except Exception as e:
            raise
        finally:
            # close the process
            if not(proc is None):
                proc.kill()
예제 #40
0
def do_exit(exitcode):
  logging.shutdown()
  sys.exit(exitcode)
예제 #41
0
def brx041(path):
    List1 = []
    try:
        infile = open(path, "r", encoding="utf8")
        soup = BeautifulSoup(infile, 'lxml')
        soup1 = str(soup)
        abstract = soup.find_all('ce:abstract')
        text = [item.get_text() for item in abstract]
        if (len(text) > 0):
            abs_text = ""
            for chars in text:
                abs_text = abs_text + chars
            main = soup.find_all('ce:sections')
            text2 = [item.get_text() for item in main]
            main_text = ""
            for chars in text2:
                main_text = main_text + chars
            reference = soup.find_all('ce:bibliography')
            text1 = [item.get_text() for item in reference]
            string = ""
            for chars in text1:
                string = string + chars
            ref = string.split('[')
            for ind, item in enumerate(ref):
                if (item[:-1] != "Reference"):
                    if (']' in item):
                        ref[ind] = item[item.index(']') + 1:]

            for i in range(len(abs_text)):
                s = ""
                mnop = 0
                if abs_text[i] == "[":
                    s = s + '['
                    for ind in range(i + 1, len(abs_text)):
                        s = s + abs_text[ind]
                        if (abs_text[ind] == ']'):
                            break
                    if s not in main_text:
                        lis1 = re.findall('[a-zA-z]+\s', s)

                        if (len(lis1) != 0):
                            ih = soup1.index(s)
                            List1.append([
                                "BRX041",
                                "Bibliographic reference cross-references",
                                "For a numbered reference cited in full in the abstract and not cited anywhere else in the main body of the text replace the citation",
                                "error",
                                str(ih),
                                "Error:Sholud be replaced with name-date numbered cross-reference"
                            ])
                        else:
                            lis2 = re.findall(
                                '[a-zA-Z]+\s\(\d\d\d\d\)\s\[\d+\]',
                                abs_text[:ind + 1])
                            if len(lis2) > 0:
                                l1 = lis2[-1]
                                for references in ref:
                                    if s1 in references:
                                        mnop = 1
                                if (mnop == 0):
                                    ih = soup1.index(s)
                                    List1.append([
                                        "BRX041",
                                        "Bibliographic reference cross-references",
                                        "For a numbered reference cited in full in the abstract and not cited anywhere else in the main body of the text replace the citation",
                                        "error",
                                        str(ih),
                                        "ERROR:name date numbered cross reference not in References"
                                    ])
                            else:
                                ih = soup1.index(s)
                                List1.append([
                                    "BRX041",
                                    "Bibliographic reference cross-references",
                                    "For a numbered reference cited in full in the abstract and not cited anywhere else in the main body of the text replace the citation",
                                    "error",
                                    str(ih),
                                    "Error:citation shold be replaced with name date and numbered cross ref"
                                ])
        if len(List1) == 0:
            List1.append([
                "BRX041", "Bibliographic reference cross-references",
                "For a numbered reference cited in full in the abstract and not cited anywhere else in the main body of the text replace the citation",
                "no error"
            ])
        return List1
    except Exception as e:
        List2 = []
        List2.append([
            "BRX041", "Bibliographic reference cross-references",
            "For a numbered reference cited in full in the abstract and not cited anywhere else in the main body of the text replace the citation",
            "no error"
        ])
        logging.info('=' * 50)
        logging.exception(
            "Got exception on main handler in BRX041 : Bibliographic reference cross-references "
        )
        logging.shutdown()
        return List2
예제 #42
0
def closelog():
    logging.shutdown()
예제 #43
0
 def Shutdown(self):
     self.getAppLogger().debug("Logging shutdown")
     logging.shutdown()
예제 #44
0
def main():
    """
    The performance of 5 models are compared:
        (1) ARIMA,
        (2) ARIMA with average day trend template,
        (3) day trend average,
        (4) day trend + ewma filtered detrending residual
        (5) dummy mean
    The results are save in a log file.

    """
    # Outliers are removed before being used for forecasting.
    # But outliers are included in evaluation, so the forecasts are compared with the real raw data.

    create_folders('output', ['forecasts'])
    # turn on logging
    fname = './output/' + "forecasts" + datetime.now().time().strftime(
        "%Hh%Mm") + ".txt"
    logging.basicConfig(filename=fname,
                        level=logging.INFO,
                        format='%(message)s')

    df_test = csv_reader(s.filename,
                         field_names=s.field_names,
                         desc=s.desc_names,
                         split=s.split - 1)

    df_test_original = df_test.copy(
    )  # keep the original data for performance evaluation

    # cleaned observations for forecasting
    # now rmvoutliers_fill is on an individual day series, while in training phase, it's on an aligned series of all the historical data.
    # use ewm with a larger ALPHAl, or a moving window with smaller window
    for f in s.field_names:
        rmvoutliers_fill(df_test[f], fill='ewm', n=1)

    mdl_list = [
        'ARIMA', 'ARIMA_trend', 'average_trend', 'ewma_trend', 'ewma',
        'dummy_means'
    ]
    err_mae = pd.DataFrame(None, mdl_list, s.field_names)
    err_quant = pd.DataFrame(None, mdl_list, s.field_names)
    for mdl in mdl_list:
        df_hat = forecast(df_test, s.field_names, mdlname=mdl)
        # mean absolute error and 75% quantile
        err_mae.loc[mdl, :] = (df_test_original[s.field_names] -
                               df_hat).abs().mean()
        err_quant.loc[mdl, :] = (df_test_original[s.field_names] -
                                 df_hat).abs().quantile(q=0.75)

    if VERBOSE >= 1:
        # plot an example day series for all the stops.
        mdl_list2 = ['ARIMA_trend', 'average_trend', 'ewma_trend']
        ts = pd.DataFrame(None, columns=mdl_list2)

        date = '2016-07-22'

        for f in s.field_names:
            ts['Orignal'] = df_test_original.loc[date, f]
            for mdl in mdl_list2:
                ts[mdl] = forecast(df_test[date], [f], mdlname=mdl)

            ax = ts.plot(title="Stop-travel time forecasting for " + f + ' (' +
                         date + ')')

            import matplotlib.dates as mdates
            ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
            plt.tight_layout()

            fig = ax.get_figure()
            fig.savefig('./output/forecasts/' + f + '.pdf')
            plt.close(fig)

    logging.info('\n' * 2)
    logging.info(
        '======================================================================================================'
    )
    logging.info(
        '         Performance Evaluation of Stop/travel Time Forecast for Next Bus (in seconds)                   '
    )
    logging.info(
        '======================================================================================================'
    )

    logging.info('\n' * 2)
    logging.info(
        '                                   Mean Absolute Error                                          '
    )
    logging.info(
        '--------------------------------------------------------------------------------------------------'
    )
    logging.info(err_mae.to_string(line_width=100))
    logging.info('\n' * 2)
    logging.info('The average mean absolute error over all the stops:\n')
    logging.info(err_mae.mean(axis=1))

    logging.info('\n' * 2)
    logging.info(
        '                           75% Quantile of Forecasting Error                                   '
    )
    logging.info(
        '--------------------------------------------------------------------------------------------------'
    )
    logging.info(err_quant.to_string(line_width=100))
    logging.info('\n' * 2)
    logging.info('The average 75% error quantile over all the stops:\n')
    logging.info(err_quant.mean(axis=1))

    i = 0
    k = 10
    methods = ['average_trend', 'ewma_trend', 'dummy_means']
    while i * k < s.field_names.__len__():
        ax = plot_bar_chart(err_mae.loc[methods].values[:, i * k:i * k + k],
                            err_quant.loc[methods].values[:, i:i + k],
                            'Absolute Forcast Error and 75% Quantile',
                            s.field_names[i * k:i * k + k],
                            legend=methods)
        i += 1
        fig = ax.get_figure()
        fig.savefig('./output/forecast_error_' + str(i) + '.pdf')
        plt.close(fig)

    logging.info('\n' * 2)
    logging.info(
        """\n ----------------------    The testing is over!    ------------------------------  """
    )

    logging.shutdown()
예제 #45
0
 def test_tmpfile_goes_away(self):
     self.create('*****@*****.**', ('*****@*****.**',))
     os.remove(self.handler.filename)
     logging.shutdown()
     self.assertEqual(len(DummySMTP.sent), 1)
예제 #46
0
파일: logger.py 프로젝트: tendence/ssh
def shutdown():
    logging.shutdown()
예제 #47
0
 def test_dont_send_empty(self):
     self.create('*****@*****.**', ('*****@*****.**',),
                 send_empty_entries=False)
     logging.shutdown()
     self.assertEqual(len(DummySMTP.sent), 0)
예제 #48
0
 def closeLogger(self):
     logging.shutdown()
예제 #49
0
 def test_reopen(self):
     self.create('*****@*****.**', ('*****@*****.**',))
     self.handler.reopen()
     logging.shutdown()
     self.assertEqual(len(DummySMTP.sent), 2)
예제 #50
0
 def test_send_level_filters(self):
     self.create('*****@*****.**', ('*****@*****.**',),
                 send_level=logging.CRITICAL)
     self.logger.warning('This line will not be sent')
     logging.shutdown()
     self.assertEqual(len(DummySMTP.sent), 0)
예제 #51
0
def run_mvm_processing(input_filename,
                       diagnostic_mode=False,
                       use_defaults_configs=True,
                       input_custom_pars_file=None,
                       output_custom_pars_file=None,
                       phot_mode="both",
                       log_level=logutil.logging.INFO):
    """
    Run the HST Advanced Products (HAP) generation code.  This routine is the sequencer or
    controller which invokes the high-level functionality to process the multi-visit data.

    Parameters
    ----------
    input_filename: string
        The 'poller file' where each line contains information regarding an exposures considered
        part of the multi-visit.

    diagnostic_mode : bool, optional
        Allows printing of additional diagnostic information to the log.  Also, can turn on
        creation and use of pickled information.

    use_defaults_configs: bool, optional
        If True, use the configuration parameters in the 'default' portion of the configuration
        JSON files.  If False, use the configuration parameters in the "parameters" portion of
        the file.  The default is True.

    input_custom_pars_file: string, optional
        Represents a fully specified input filename of a configuration JSON file which has been
        customized for specialized processing.  This file should contain ALL the input parameters
        necessary for processing.  If there is a filename present for this parameter, the
        'use_defaults_configs' parameter is ignored. The default is None.

    output_custom_pars_file: string, optional
        Fully specified output filename which contains all the configuration parameters
        available during the processing session.  The default is None.

    phot_mode : str, optional
        Which algorithm should be used to generate the sourcelists? 'aperture' for aperture (point) photometry;
        'segment' for isophotal photometry; 'both' for both 'segment' and 'aperture'. Default value is 'both'.

    log_level : int, optional
        The desired level of verboseness in the log statements displayed on the screen and written to the .log file.
        Default value is 20, or 'info'.


    RETURNS
    -------
    return_value: integer
        A return exit code used by the calling Condor/OWL workflow code: 0 (zero) for success, 1 for error
    """
    # This routine needs to return an exit code, return_value, for use by the calling
    # Condor/OWL workflow code: 0 (zero) for success, 1 for error condition
    return_value = 0
    log.setLevel(log_level)
    # Define trailer file (log file) that will contain the log entries for all processing
    if isinstance(input_filename,
                  str):  # input file is a poller file -- easy case
        logname = input_filename.replace('.out', '.log')
    else:
        logname = 'mvm_process.log'

    # Initialize total trailer filename as temp logname
    logging.basicConfig(filename=logname,
                        format=SPLUNK_MSG_FORMAT,
                        datefmt=MSG_DATEFMT)
    # start processing
    starting_dt = datetime.datetime.now()
    log.info("Run start time: {}".format(str(starting_dt)))
    total_obj_list = []
    product_list = []
    try:
        # Parse the MVM poller file and generate the the obs_info_dict, as well as the total detection
        # product lists which contain the ExposureProduct, FilterProduct, and TotalProduct objects
        # A poller file contains visit data for a single instrument.  The TotalProduct discriminant
        # is the detector.  A TotalProduct object is comprised of FilterProducts and ExposureProducts
        # where its FilterProduct is distinguished by the filter in use, and the ExposureProduct
        # is the atomic exposure data.
        log.info(
            "Parse the poller and determine what exposures need to be combined into separate products.\n"
        )
        obs_info_dict, total_obj_list = poller_utils.interpret_mvm_input(
            input_filename, log_level, layer_method='all')

        # Generate the name for the manifest file which is for the entire multi-visit.  It is fine
        # to use only one of the Total Products to generate the manifest name as the name is not
        # dependent on the detector.
        # Example: instrument_programID_obsetID_manifest.txt (e.g.,wfc3_b46_06_manifest.txt)
        manifest_name = total_obj_list[0].manifest_name
        log.info("\nGenerate the manifest name for this multi-visit.")
        log.info(
            "The manifest will contain the names of all the output products.")

        # The product_list is a list of all the output products which will be put into the manifest file
        product_list = []

        # Update the SkyCellProduct objects with their associated configuration information.
        for filter_item in total_obj_list:
            filter_item.generate_metawcs()
            filter_item.generate_footprint_mask()
            log.info(
                "Preparing configuration parameter values for filter product {}"
                .format(filter_item.drizzle_filename))
            filter_item.configobj_pars = config_utils.HapConfig(
                filter_item,
                log_level=log_level,
                use_defaults=use_defaults_configs,
                input_custom_pars_file=input_custom_pars_file,
                output_custom_pars_file=output_custom_pars_file)

        log.info(
            "The configuration parameters have been read and applied to the drizzle objects."
        )

        reference_catalog = run_align_to_gaia(total_obj_list,
                                              log_level=log_level,
                                              diagnostic_mode=diagnostic_mode)
        if reference_catalog:
            product_list += [reference_catalog]

        # Run AstroDrizzle to produce drizzle-combined products
        log.info("\n{}: Create drizzled imagery products.".format(
            str(datetime.datetime.now())))
        driz_list = create_drizzle_products(total_obj_list)
        product_list += driz_list

        # Store total_obj_list to a pickle file to speed up development
        if False:
            pickle_filename = "total_obj_list_full.pickle"
            if os.path.exists(pickle_filename):
                os.remove(pickle_filename)
            pickle_out = open(pickle_filename, "wb")
            pickle.dump(total_obj_list, pickle_out)
            pickle_out.close()
            log.info(
                "Successfully wrote total_obj_list to pickle file {}!".format(
                    pickle_filename))

        # Quality assurance portion of the processing - done only if the environment
        # variable, SVM_QUALITY_TESTING, is set to 'on', 'yes', or 'true'.
        qa_switch = _get_envvar_switch(envvar_qa_svm)

        # If requested, generate quality assessment statistics for the SVM products
        if qa_switch:
            log.info(
                "SVM Quality Assurance statistics have been requested for this dataset, {}."
                .format(input_filename))

            # Number of sources in Point and Segment catalogs
            total_catalog_list = [i for i in catalog_list if 'total' in i]
            fits_list = [i for i in driz_list if 'fits' in i]
            total_drizzle_list = [i for i in fits_list if 'total' in i]
            svm_qa.compare_num_sources(total_catalog_list,
                                       total_drizzle_list,
                                       log_level=log_level)

            # Get point/segment cross-match RA/Dec statistics
            for filter_obj in total_obj_list:
                svm_qa.compare_ra_dec_crossmatches(filter_obj,
                                                   log_level=log_level)

            # Identify the number of GAIA sources in final product footprints
            for filter_obj in total_obj_list:
                svm_qa.find_gaia_sources(filter_obj, log_level=log_level)

            # Photometry of cross-matched sources in Point and Segment catalogs for Filter products
            tot_len = len(total_obj_list)
            filter_drizzle_list = []
            temp_list = []
            for tot in total_obj_list:
                temp_list = [x.drizzle_filename for x in tot.fdp_list]
                filter_drizzle_list.extend(temp_list)
            svm_qa.compare_photometry(filter_drizzle_list, log_level=log_level)

        # 9: Compare results to HLA classic counterparts (if possible)
        # if diagnostic_mode:
        # run_sourcelist_comparison(total_obj_list, diagnostic_mode=diagnostic_mode, log_level=log_level)

        # Insure manifest file does not contain duplicate entries
        # Use of numpy.unique preserves the order of the entries in the product list
        product_list = np.unique(product_list).tolist()
        # Write out manifest file listing all products generated during processing
        log.info("Creating manifest file {}.".format(manifest_name))
        log.info(
            "  The manifest contains the names of products generated during processing."
        )
        with open(manifest_name, mode='w') as catfile:
            [catfile.write("{}\n".format(name)) for name in product_list]
        # 10: Return exit code for use by calling Condor/OWL workflow code: 0 (zero) for success, 1 for error condition
        return_value = 0
    except Exception:
        return_value = 1
        print("\a\a\a")
        exc_type, exc_value, exc_tb = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout)
        logging.exception("message")

    finally:
        end_dt = datetime.datetime.now()
        log.info('Processing completed at {}'.format(str(end_dt)))
        log.info('Total processing time: {} sec'.format(
            (end_dt - starting_dt).total_seconds()))
        log.info(
            "Return exit code for use by calling Condor/OWL workflow code: 0 (zero) for success, 1 for error "
        )
        log.info("Return condition {}".format(return_value))
        logging.shutdown()
        # Append total trailer file (from astrodrizzle) to all total log files
        if total_obj_list:
            for tot_obj in total_obj_list:
                proc_utils.append_trl_file(tot_obj.trl_filename,
                                           logname,
                                           clean=False)
        # Now remove single temp log file
        if os.path.exists(logname):
            os.remove(logname)
        else:
            print(
                "Master log file not found.  Please check logs to locate processing messages."
            )
        return return_value
예제 #52
0
 def test_do_send_empty(self):
     self.create('*****@*****.**', ('*****@*****.**',))
     logging.shutdown()
     self.assertEqual(len(DummySMTP.sent), 1)
예제 #53
0
파일: logs.py 프로젝트: ipanova/pulp
def stop_logging():
    """
    Stop Pulp's logging.
    """
    # remove all the existing handlers and loggers from the logging module
    logging.shutdown()
예제 #54
0
def start(cron=True):
    """ Start server  """

    # ## get command line arguments

    (options, args) = console()

    if not options.nobanner:
        print ProgramName
        print ProgramAuthor
        print ProgramVersion

    from dal import drivers
    if not options.nobanner:
        print 'Database drivers available: %s' % ', '.join(drivers)

    # ## if -L load options from options.config file
    if options.config:
        try:
            options2 = __import__(options.config, {}, {}, '')
        except Exception:
            try:
                # Jython doesn't like the extra stuff
                options2 = __import__(options.config)
            except Exception:
                print 'Cannot import config file [%s]' % options.config
                sys.exit(1)
        for key in dir(options2):
            if hasattr(options, key):
                setattr(options, key, getattr(options2, key))

    # ## if -T run doctests (no cron)
    if hasattr(options, 'test') and options.test:
        test(options.test, verbose=options.verbose)
        return

    # ## if -S start interactive shell (also no cron)
    if options.shell:
        if options.args != None:
            sys.argv[:] = options.args
        run(options.shell,
            plain=options.plain,
            bpython=options.bpython,
            import_models=options.import_models,
            startfile=options.run)
        return

    # ## if -C start cron run (extcron) and exit
    # ## if -N or not cron disable cron in this *process*
    # ## if --softcron use softcron
    # ## use hardcron in all other cases
    if options.extcron:
        print 'Starting extcron...'
        global_settings.web2py_crontype = 'external'
        extcron = newcron.extcron(options.folder)
        extcron.start()
        extcron.join()
        return
    elif cron and not options.nocron and options.softcron:
        print 'Using softcron (but this is not very efficient)'
        global_settings.web2py_crontype = 'soft'
    elif cron and not options.nocron:
        print 'Starting hardcron...'
        global_settings.web2py_crontype = 'hard'
        newcron.hardcron(options.folder).start()

    # ## if -W install/start/stop web2py as service
    if options.winservice:
        if os.name == 'nt':
            web2py_windows_service_handler(['', options.winservice],
                                           options.config)
        else:
            print 'Error: Windows services not supported on this platform'
            sys.exit(1)
        return

    # ## if no password provided and havetk start Tk interface
    # ## or start interface if we want to put in taskbar (system tray)

    try:
        options.taskbar
    except:
        options.taskbar = False

    if options.taskbar and os.name != 'nt':
        print 'Error: taskbar not supported on this platform'
        sys.exit(1)

    root = None

    if not options.nogui:
        try:
            import Tkinter
            havetk = True
        except ImportError:
            logger.warn(
                'GUI not available because Tk library is not installed')
            havetk = False

        if options.password == '<ask>' and havetk or options.taskbar and havetk:
            try:
                root = Tkinter.Tk()
            except:
                pass

    if root:
        root.focus_force()
        if not options.quiet:
            presentation(root)
        master = web2pyDialog(root, options)
        signal.signal(signal.SIGTERM, lambda a, b: master.quit())

        try:
            root.mainloop()
        except:
            master.quit()

        sys.exit()

    # ## if no tk and no password, ask for a password

    if not root and options.password == '<ask>':
        options.password = raw_input('choose a password:'******'no password, no admin interface'

    # ## start server

    (ip, port) = (options.ip, int(options.port))

    if not options.nobanner:
        print 'please visit:'
        print '\thttp://%s:%s' % (ip, port)
        print 'use "kill -SIGTERM %i" to shutdown the web2py server' % os.getpid(
        )

    server = main.HttpServer(ip=ip,
                             port=port,
                             password=options.password,
                             pid_filename=options.pid_filename,
                             log_filename=options.log_filename,
                             profiler_filename=options.profiler_filename,
                             ssl_certificate=options.ssl_certificate,
                             ssl_private_key=options.ssl_private_key,
                             min_threads=options.minthreads,
                             max_threads=options.maxthreads,
                             server_name=options.server_name,
                             request_queue_size=options.request_queue_size,
                             timeout=options.timeout,
                             shutdown_timeout=options.shutdown_timeout,
                             path=options.folder,
                             interfaces=options.interfaces)

    try:
        server.start()
    except KeyboardInterrupt:
        server.stop()
    logging.shutdown()
예제 #55
0
 def end():
     """End the program."""
     logging.info('..Done')
     logging.shutdown()
     sys.exit()
예제 #56
0
 def shutdown():
     """
     Shut down the logger
     """
     logging.shutdown()
예제 #57
0
def main():
    ret = 0

    parser = argparse.ArgumentParser(description='project mirroring',
                                     parents=[get_baseparser(
                                         tool_version=__version__)
                                     ])

    parser.add_argument('project', nargs='*', default=None)
    parser.add_argument('-a', '--all', action='store_true',
                        help='mirror all indexed projects', default=False)
    parser.add_argument('-c', '--config',
                        help='config file in JSON/YAML format')
    parser.add_argument('-U', '--uri', default='http://localhost:8080/source',
                        help='uri of the webapp with context path')
    parser.add_argument('-b', '--batch', action='store_true',
                        help='batch mode - will log into a file')
    parser.add_argument('-B', '--backupcount', default=8,
                        help='how many log files to keep around in batch mode')
    parser.add_argument('-I', '--incoming', action='store_true',
                        help='Check for incoming changes, terminate the '
                             'processing if not found.')
    parser.add_argument('-w', '--workers', default=cpu_count(),
                        help='Number of worker processes')

    try:
        args = parser.parse_args()
    except ValueError as e:
        fatal(e)

    logger = get_console_logger(get_class_basename(), args.loglevel)

    if len(args.project) > 0 and args.all:
        fatal("Cannot use both project list and -a/--all")

    if not args.all and len(args.project) == 0:
        fatal("Need at least one project or --all")

    if args.config:
        config = read_config(logger, args.config)
        if config is None:
            fatal("Cannot read config file from {}".format(args.config))
    else:
        config = {}

    uri = args.uri
    if not is_web_uri(uri):
        fatal("Not a URI: {}".format(uri))
    logger.debug("web application URI = {}".format(uri))

    if not check_configuration(config):
        sys.exit(1)

    # Save the source root to avoid querying the web application.
    source_root = get_config_value(logger, 'sourceRoot', uri)
    if not source_root:
        sys.exit(1)

    logger.debug("Source root = {}".format(source_root))

    hookdir = config.get(HOOKDIR_PROPERTY)
    if hookdir:
        logger.debug("Hook directory = {}".format(hookdir))

    command_timeout = get_int(logger, "command timeout",
                              config.get(CMD_TIMEOUT_PROPERTY))
    if command_timeout:
        logger.debug("Global command timeout = {}".format(command_timeout))

    hook_timeout = get_int(logger, "hook timeout",
                           config.get(HOOK_TIMEOUT_PROPERTY))
    if hook_timeout:
        logger.debug("Global hook timeout = {}".format(hook_timeout))

    # Log messages to dedicated log file if running in batch mode.
    if args.batch:
        logdir = config.get(LOGDIR_PROPERTY)
        if not logdir:
            fatal("The {} property is required in batch mode".
                  format(LOGDIR_PROPERTY))

    projects = args.project
    if len(projects) == 1:
        lockfile = projects[0] + "-mirror"
    else:
        lockfile = os.path.basename(sys.argv[0])

    if args.all:
        projects = list_indexed_projects(logger, args.uri)

    lock = FileLock(os.path.join(tempfile.gettempdir(), lockfile + ".lock"))
    try:
        with lock.acquire(timeout=0):
            with Pool(processes=int(args.workers)) as pool:
                worker_args = []
                for x in projects:
                    worker_args.append([x, logdir, args.loglevel,
                                        args.backupcount, config,
                                        args.incoming,
                                        args.uri, source_root,
                                        args.batch])
                print(worker_args)
                try:
                    project_results = pool.map(worker, worker_args, 1)
                except KeyboardInterrupt:
                    sys.exit(1)
                else:
                    if any([True for x in project_results if x == 1]):
                        ret = 1
    except Timeout:
        logger.warning("Already running, exiting.")
        sys.exit(1)

    logging.shutdown()
    sys.exit(ret)
예제 #58
0
 def teardown_class(cls):
     logging.shutdown()
예제 #59
0
def metric_cleanup():
	logging.shutdown()
예제 #60
0
 def restore_io():
     logging.shutdown()