def main(): """Program entry point""" apr = argparse.ArgumentParser() apr.add_argument("--logfile", help=("Log file location (defaults to %s)" % LOGFILE_LOCATION)) apr.add_argument("--config", help=("Config file location (defaults to %s)" % CONFIG_LOCATION)) app = apr.parse_args() logfile = app.logfile or LOGFILE_LOCATION configf = app.config or CONFIG_LOCATION try: logging.basicConfig( format=("%(asctime)s [%(levelname)s]" " -\t%(message)s"), filename=logfile, level=logging.INFO ) except: sys.exit() logging.info("SBScan started!") try: config = load_config(configf) except CfgErr: logging.critical(str(sys.exc_info()[1])) sys.exit() else: logging.info("Loaded config file.") params = dict(config.items("scanner")) with Session(params) as session: try: with open(params["nmea_file"]) as f: logging.info("Established connection to NMEAd.") while not stopped: with file_lock(f) as lock: if not lock: time.sleep(LOCK_SLEEP_TIME) continue try: f.seek(0) json_file = json.load(f) except ValueError: continue print "Check!" session.check_add_position(json_file) time.sleep(float(params["polling_interval"])) except IOError, err: if err.errno == errno.ENOENT: logging.critical( ( "Failed to establish connection to " "NMEAd through %s. Is NMEAd running? " "Does SBScan have the " "required permissions?" ), params["nmea_file"], ) sys.exit() else: raise
def repeat(self): updated = self.struct.updated.is_set() if updated and self.struct.lock.acquire(False): self.structcpy = self.struct.struct.copy() self.struct.lock.release() self.struct.updated.clear() logging.debug('Copied struct!') if self.structcpy is not None: with file_lock(self.file, exclusive=True) as lock_established: if lock_established: self.file.truncate(0) self.file.seek(0) json.dump(self.struct.struct, self.file) self.file.flush() self.structcpy = None logging.debug('Written struct to file.') time.sleep(WRITER_SLEEP_TIME)
under certain conditions; type `info gpl' for details. """) parser.print_help() sys.exit(11) if cfg.log_file: log_file = cfg.log_file else: log_file = "/tmp/{}.log".format (cfg.cluster_name) if cfg.lock_dir: lock_filename = cfg.lock_dir + '/' + os.path.basename (__file__).split('.')[0] + '.lock' else: lock_filename = '/dev/shm/' + os.path.basename (__file__).split('.')[0] + '.lock' lock = fl.file_lock(lock_filename) with lock: time_logger_handler = TimedRotatingFileHandler(filename=log_file, when='D', # 'H' Hours 'D' Days interval=1, backupCount=0, encoding=None, utc=False) logging.basicConfig(level=cfg.loglevel, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S', handlers=[time_logger_handler]) ssh_login = None if cfg.ssh_login: ssh_login = cfg.ssh_login nodes = patt.to_nodes (cfg.nodes, ssh_login, cfg.ssh_keyfile)
def lock(self): with file_lock(self._lock_file): logger.debug("acquired lock") yield