def run(self): alerts = [] if not os.path.exists(SMART_FILE): return alerts lock = LockFile(SMART_FILE) while not lock.i_am_locking(): try: lock.acquire(timeout=5) except LockTimeout: return alerts with open(SMART_FILE, 'rb') as f: try: data = pickle.loads(f.read()) except: data = {} msg = '' for msgs in data.itervalues(): if not msgs: continue msg += '<br />\n'.join(msgs) if msg: alerts.append(Alert(Alert.CRIT, msg)) lock.release() return alerts
def check_sync(self): if not os.path.exists(COLLECTD_FILE): return lock = LockFile(COLLECTD_FILE) while not lock.i_am_locking(): try: lock.acquire(timeout=5) except LockTimeout: return with open(COLLECTD_FILE, "rb") as f: try: data = pickle.loads(f.read()) except Exception: data = {} lock.release() alerts = [] for k, v in list(data.items()): if k == "ctl-ha/disk_octets": title = "CTL HA link is actively used, check initiators connectivity" else: title = k if v["Severity"] == "WARNING": level = AlertLevel.WARNING else: level = AlertLevel.CRITICAL alerts.append(Alert(title, level=level)) return alerts
def write_relay_log(relayNumber, relaySeconds, sensor, gpio): if not os.path.exists(lock_directory): os.makedirs(lock_directory) lock = LockFile(relay_log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Write Relay Log] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Write Relay Log] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.debug("[Write Relay Log] Gained lock: %s", lock.path) try: with open(relay_log_file_tmp, "ab") as relaylog: relaylog.write('{0} {1:d} {2:d} {3:d} {4:.2f}\n'.format( datetime.datetime.now().strftime("%Y/%m/%d-%H:%M:%S"), sensor, relayNumber, gpio, relaySeconds)) except: logging.warning("[Write Relay Log] Unable to append data to %s", relay_log_file_tmp) logging.debug("[Write Relay Log] Removing lock: %s", lock.path) lock.release()
def write_ht_sensor_log(sensor_ht_read_temp_c, sensor_ht_read_hum, sensor_ht_dewpt_c, sensor): if not os.path.exists(lock_directory): os.makedirs(lock_directory) lock = LockFile(sensor_ht_log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Write Sensor Log] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Write Sensor Log] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.debug("[Write Sensor Log] Gained lock: %s", lock.path) try: with open(sensor_ht_log_file_tmp, "ab") as sensorlog: sensorlog.write('{0} {1:.1f} {2:.1f} {3:.1f} {4:d}\n'.format( datetime.datetime.now().strftime("%Y/%m/%d-%H:%M:%S"), sensor_ht_read_temp_c[sensor], sensor_ht_read_hum[sensor], sensor_ht_dewpt_c[sensor], sensor)) logging.debug("[Write Sensor Log] Data appended to %s", sensor_ht_log_file_tmp) except: logging.warning("[Write Sensor Log] Unable to append data to %s", sensor_ht_log_file_tmp) logging.debug("[Write Sensor Log] Removing lock: %s", lock.path) lock.release()
def main(): lock = LockFile(SMART_FILE) while not lock.i_am_locking(): try: lock.acquire(timeout=5) except LockTimeout: lock.break_lock() data = {} if os.path.exists(SMART_FILE): with open(SMART_FILE, 'rb') as f: try: data = pickle.loads(f.read()) except: pass device = os.environ.get('SMARTD_DEVICE') if device not in data: data[device] = [] message = os.environ.get('SMARTD_MESSAGE') if message not in data[device]: data[device].append(message) with open(SMART_FILE, 'wb') as f: f.write(pickle.dumps(data)) lock.release()
def read(self): """ Takes a reading from the K30 and updates the self._co2 value :returns: None on success or 1 on error """ lock = LockFile(K30_LOCK_FILE) try: # Acquire lock on K30 to ensure more than one read isn't # being attempted at once. while not lock.i_am_locking(): try: lock.acquire(timeout=60) # wait up to 60 seconds before breaking lock except Exception as e: logger.error("{cls} 60 second timeout, {lock} lock broken: " "{err}".format(cls=type(self).__name__, lock=K30_LOCK_FILE, err=e)) lock.break_lock() lock.acquire() self._co2 = self.get_measurement() lock.release() if self._co2 is None: return 1 return # success - no errors except Exception as e: logger.error("{cls} raised an exception when taking a reading: " "{err}".format(cls=type(self).__name__, err=e)) lock.release() return 1
def write_relay_log(relayNumber, relaySeconds): config = ConfigParser.RawConfigParser() if not os.path.exists(lock_directory): os.makedirs(lock_directory) if not Terminate: lock = LockFile(relay_lock_path) while not lock.i_am_locking(): try: logging.info("[Write Relay Log] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Write Relay Log] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.info("[Write Relay Log] Gained lock: %s", lock.path) relay = [0] * 9 for n in range(1, 9): if n == relayNumber: relay[relayNumber] = relaySeconds try: with open(relay_log_file_tmp, "ab") as relaylog: relaylog.write('{0} {1} {2} {3} {4} {5} {6} {7} {8}\n'.format( datetime.datetime.now().strftime("%Y %m %d %H %M %S"), relay[1], relay[2], relay[3], relay[4], relay[5], relay[6], relay[7], relay[8])) except: logging.warning("[Write Relay Log] Unable to append data to %s", relay_log_file_tmp) logging.info("[Write Relay Log] Removing lock: %s", lock.path) lock.release()
def write_sensor_log(): config = ConfigParser.RawConfigParser() if not os.path.exists(lock_directory): os.makedirs(lock_directory) if not Terminate: lock = LockFile(sensor_lock_path) while not lock.i_am_locking(): try: logging.info("[Write Sensor Log] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Write Sensor Log] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.info("[Write Sensor Log] Gained lock: %s", lock.path) try: with open(sensor_log_file_tmp, "ab") as sensorlog: sensorlog.write('{0} {1:.1f} {2:.1f} {3:.1f}\n'.format( datetime.datetime.now().strftime("%Y %m %d %H %M %S"), tempc, humidity, dewpointc)) logging.info("[Write Sensor Log] Data appended to %s", sensor_log_file_tmp) except: logging.warning("[Write Sensor Log] Unable to append data to %s", sensor_log_file_tmp) logging.info("[Write Sensor Log] Removing lock: %s", lock.path) lock.release()
def main(username, password): # Ignore error, logging set up in logging utils from . import logging_utils from .navigation import Leifur from .config import get_config, set_config, get_config_from_user lock = LockFile('/tmp/spoppy') try: # Try for 5s to acquire the lock lock.acquire(5) except LockTimeout: click.echo('Could not acquire lock, is spoppy running?') click.echo( 'If you\'re sure that spoppy is not running, ' 'try removing the lock file %s' % lock.lock_file ) else: if username and password: set_config(username, password) else: username, password = get_config() if not (username and password): username, password = get_config_from_user() navigator = None try: navigator = Leifur(username, password) navigator.start() finally: if navigator: navigator.shutdown() logger.debug('Finally, bye!') finally: if lock.i_am_locking(): lock.release()
def run(self): alerts = [] if not os.path.exists(SMART_FILE): return alerts lock = LockFile(SMART_FILE) while not lock.i_am_locking(): try: lock.acquire(timeout=5) except LockTimeout: return alerts with open(SMART_FILE, 'rb') as f: try: data = pickle.loads(f.read()) except: data = {} for msgs in data.itervalues(): if not msgs: continue for msg in msgs: if msg is None: continue alerts.append(Alert(Alert.CRIT, msg, hardware=True)) lock.release() return alerts
class MCP342x_read(object): def __init__(self, logger, address, channel, gain, resolution): self.logger = logger self.i2c_address = address self.channel = channel self.gain = gain self.resolution = resolution if GPIO.RPI_INFO['P1_REVISION'] in [2, 3]: self.I2C_bus_number = 1 else: self.I2C_bus_number = 0 self.bus = smbus.SMBus(self.I2C_bus_number) self.lock_file = "/var/lock/mycodo_adc_0x{:02X}.pid".format( self.i2c_address) def setup_lock(self): self.execution_timer = timeit.default_timer() try: self.lock = LockFile(self.lock_file) while not self.lock.i_am_locking(): try: self.logger.debug( "[Analog->Digital Converter 0x{:02X}] Acquiring Lock: {}" .format(self.i2c_address, self.lock.path)) self.lock.acquire(timeout=60) # wait up to 60 seconds except: self.logger.warning( "[Analog->Digital Converter 0x{:02X}] Waited 60 seconds. Breaking lock to acquire {}" .format(self.i2c_address, self.lock.path)) self.lock.break_lock() self.lock.acquire() self.logger.debug( "[Analog->Digital Converter 0x{:02X}] Acquired Lock: {}". format(self.i2c_address, self.lock.path)) self.logger.debug( "[Analog->Digital Converter 0x{:02X}] Executed in {}ms".format( self.i2c_address, (timeit.default_timer() - self.execution_timer) * 1000)) return 1, "Success" except Exception as msg: return 0, "Analog->Digital Converter Fail: {}".format(msg) def release_lock(self): self.lock.release() def read(self): try: time.sleep(0.1) self.setup_lock() adc = MCP342x(self.bus, self.i2c_address, channel=self.channel - 1, gain=self.gain, resolution=self.resolution) response = adc.convert_and_read() self.release_lock() return 1, response except Exception as msg: self.release_lock() return 0, "Fail: {}".format(msg)
def release(self): """ Method used to release a lock using the lockfile module. """ lock = LockFile(self.lockfile) if lock.i_am_locking(): lock.release()
def run_function_with_lock(self, function, lock_file, timeout=30, args=[], kwargs={}): self.logger.debug('starting function with lock: %s' % lock_file) lock = LockFile(lock_file) try: while not lock.i_am_locking(): try: lock.acquire(timeout=timeout) except (LockTimeout, NotMyLock) as e: self.logger.debug('breaking lock') lock.break_lock() lock.acquire() self.logger.exception(e) self.logger.debug('lock acquired: starting function') return function(*args, **kwargs) finally: self.logger.debug('function done, releasing lock') if lock.is_locked(): try: lock.release() except NotMyLock: try: os.remove(lock_file) except Exception as e: self.logger.exception(e) self.logger.debug('lock released')
def run(self): alerts = [] if not os.path.exists(COLLECTD_FILE): return alerts lock = LockFile(COLLECTD_FILE) while not lock.i_am_locking(): try: lock.acquire(timeout=5) except LockTimeout: return alerts with open(COLLECTD_FILE, 'rb') as f: try: data = pickle.loads(f.read()) except: data = {} lock.release() for k, v in list(data.items()): if v['Severity'] == 'WARNING': l = Alert.WARN else: l = Alert.CRIT if k == 'ctl-ha/disk_octets': msg = "CTL HA link is actively used, check initiators connectivity" else: msg = k alerts.append(Alert(l, msg)) return alerts
def run(self): self.set_arguments() args = self.parser.parse_args() if args.verbose: set_logging(level=logging.DEBUG) elif args.quiet: set_logging(level=logging.WARNING) else: set_logging(level=logging.INFO) lock = LockFile(os.path.join(Utils.getRoot(), LOCK_FILE)) try: lock.acquire(timeout=-1) args.func(args) except AttributeError: if hasattr(args, 'func'): raise else: self.parser.print_help() except KeyboardInterrupt: pass except AlreadyLocked: logger.error("Could not proceed - there is probably another instance of Atomic App running on this machine.") except Exception as ex: if args.verbose: raise else: logger.error("Exception caught: %s", repr(ex)) logger.error( "Run the command again with -v option to get more information.") finally: if lock.i_am_locking(): lock.release()
def write_ht_sensor_log(sensor_ht_read_temp_c, sensor_ht_read_hum, sensor_ht_dewpt_c, sensor): if not os.path.exists(lock_directory): os.makedirs(lock_directory) lock = LockFile(sensor_ht_log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Write Sensor Log] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Write Sensor Log] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.debug("[Write Sensor Log] Gained lock: %s", lock.path) try: with open(sensor_ht_log_file_tmp, "ab") as sensorlog: sensorlog.write('{0} {1:.1f} {2:.1f} {3:.1f} {4}\n'.format( datetime.datetime.now().strftime("%Y %m %d %H %M %S"), sensor_ht_read_temp_c[sensor], sensor_ht_read_hum[sensor], sensor_ht_dewpt_c[sensor], sensor)) logging.debug("[Write Sensor Log] Data appended to %s", sensor_ht_log_file_tmp) except: logging.warning("[Write Sensor Log] Unable to append data to %s", sensor_ht_log_file_tmp) logging.debug("[Write Sensor Log] Removing lock: %s", lock.path) lock.release()
def query(self, query_str): """ Send command and return reply """ lock_file_amend = '{lf}.{dev}'.format(lf=ATLAS_PH_LOCK_FILE, dev=self.serial_device.replace( "/", "-")) lock = LockFile(lock_file_amend) try: while not lock.i_am_locking(): try: lock.acquire( timeout=10 ) # wait up to 10 seconds before breaking lock except Exception as e: logger.exception( "{cls} 10 second timeout, {lock} lock broken: " "{err}".format(cls=type(self).__name__, lock=lock_file_amend, err=e)) lock.break_lock() lock.acquire() self.send_cmd(query_str) time.sleep(1.3) response = self.read_lines() lock.release() return response except Exception as err: logger.exception( "{cls} raised an exception when taking a reading: " "{err}".format(cls=type(self).__name__, err=err)) lock.release() return None
def read(self): """ Takes a reading from the MH-Z19 and updates the self._co2 value :returns: None on success or 1 on error """ if not self.serial_device: # Don't measure if device isn't validated return None lock = LockFile(self.k30_lock_file) try: # Acquire lock on MHZ19 to ensure more than one read isn't # being attempted at once. while not lock.i_am_locking(): try: # wait 60 seconds before breaking lock lock.acquire(timeout=60) except Exception as e: self.logger.error( "{cls} 60 second timeout, {lock} lock broken: " "{err}".format(cls=type(self).__name__, lock=self.k30_lock_file, err=e)) lock.break_lock() lock.acquire() self._co2 = self.get_measurement() lock.release() if self._co2 is None: return 1 return # success - no errors except Exception as e: self.logger.error( "{cls} raised an exception when taking a reading: " "{err}".format(cls=type(self).__name__, err=e)) lock.release() return 1
def run(self): cmdline = sys.argv[1:] # Grab args from cmdline # If we are running in an openshift pod (via `oc new-app`) then # there is no cmdline but we want to default to "atomicapp run". # In this case copy files to cwd and use the working directory. if Utils.running_on_openshift(): cmdline = 'run -v --dest=none /{}'.format(APP_ENT_PATH).split() # We want to be able to place options anywhere on the command # line. We have added all global options to each subparser, # but subparsers require all options to be after the 'action' # keyword. In order to handle this we just need to figure out # what subparser will be used and move it's keyword to the front # of the line. # NOTE: Also allow "mode" to override 'action' if specified args, _ = self.parser.parse_known_args(cmdline) cmdline.remove(args.action) # Remove 'action' from the cmdline if args.mode: args.action = args.mode # Allow mode to override 'action' cmdline.insert(0, args.action) # Place 'action' at front logger.info("Action/Mode Selected is: %s" % args.action) # Finally, parse args and give error if necessary args = self.parser.parse_args(cmdline) # Set logging level if args.verbose: set_logging(level=logging.DEBUG) elif args.quiet: set_logging(level=logging.WARNING) else: set_logging(level=logging.INFO) lock = LockFile(os.path.join(Utils.getRoot(), LOCK_FILE)) try: lock.acquire(timeout=-1) args.func(args) except AttributeError: if hasattr(args, 'func'): raise else: self.parser.print_help() except KeyboardInterrupt: pass except AlreadyLocked: logger.error("Could not proceed - there is probably another instance of Atomic App running on this machine.") except Exception as ex: if args.verbose: raise else: logger.error("Exception caught: %s", repr(ex)) logger.error( "Run the command again with -v option to get more information.") finally: if lock.i_am_locking(): lock.release()
def main(username, password): # Ignore error, logging set up in logging utils from . import logging_utils from .navigation import Leifur from .config import get_config, set_config, get_config_from_user from .connectivity import check_internet_connection from .update_checker import check_for_updates lock = LockFile('/tmp/spoppy') try: try: # Try for 1s to acquire the lock lock.acquire(1) except LockTimeout: click.echo('Could not acquire lock, is spoppy running?') click.echo( 'If you\'re sure that spoppy is not running, ' 'try removing the lock file %s' % lock.lock_file ) click.echo( 'You can try removing the lock file by responding [rm]. ' 'spoppy will exit on all other inputs' ) try: response = raw_input('') except NameError: response = input('') if response == 'rm': lock.break_lock() else: raise TypeError('Could not get lock') except TypeError: pass else: check_internet_connection() # Check for updates check_for_updates(click, get_version(), lock) if username and password: set_config(username, password) else: username, password = get_config() if not (username and password): username, password = get_config_from_user() navigator = None try: navigator = Leifur(username, password) navigator.start() finally: if navigator: navigator.shutdown() logger.debug('Finally, bye!') finally: if lock.i_am_locking(): lock.release()
def main(username, password): from . import logging_utils logging_utils.configure_logging() from .navigation import Leifur from .config import get_config, set_config, get_config_from_user from .connectivity import check_internet_connection from .update_checker import check_for_updates lock = LockFile('/tmp/spoppy') try: try: # Try for 1s to acquire the lock lock.acquire(1) except LockTimeout: click.echo('Could not acquire lock, is spoppy running?') click.echo( 'If you\'re sure that spoppy is not running, ' 'try removing the lock file %s' % lock.lock_file ) click.echo( 'You can try removing the lock file by responding [rm]. ' 'spoppy will exit on all other inputs' ) try: response = raw_input('') except NameError: response = input('') if response == 'rm': lock.break_lock() else: raise TypeError('Could not get lock') except TypeError: pass else: check_internet_connection() # Check for updates check_for_updates(click, get_version(), lock) if username and password: set_config(username, password) else: username, password = get_config() if not (username and password): username, password = get_config_from_user() navigator = None try: navigator = Leifur(username, password) navigator.start() finally: if navigator: navigator.shutdown() logger.debug('Finally, bye!') finally: if lock.i_am_locking(): lock.release()
class Lock(): """Simple implementation of a mutex lock using the file systems. Works on *nix systems.""" path = None lock = None def __init__(self, path): try: from lockfile import LockFile except ImportError: from lockfile import FileLock # Different naming in older versions of lockfile LockFile = FileLock self.path = path self.lock = LockFile(path) def obtain(self): import os import logging from lockfile import AlreadyLocked logger = logging.getLogger() try: self.lock.acquire(0) logger.debug("Successfully obtained lock: %s" % self.path) except AlreadyLocked: return False return True def release(self): import os import logging logger = logging.getLogger() if not self.has_lock(): raise Exception( "Unable to release lock that is owned by another process") self.lock.release() logger.debug("Successfully released lock: %s" % self.path) def has_lock(self): return self.lock.i_am_locking() def clear(self): import os import logging logger = logging.getLogger() self.lock.break_lock() logger.debug("Successfully cleared lock: %s" % self.path)
class Lock(): """Simple implementation of a mutex lock using the file systems. Works on *nix systems.""" path = None lock = None def __init__(self, path): try: from lockfile import LockFile except ImportError: from lockfile import FileLock # Different naming in older versions of lockfile LockFile = FileLock self.path = path self.lock = LockFile(path) def obtain(self): import os import logging from lockfile import AlreadyLocked logger = logging.getLogger() try: self.lock.acquire(0) logger.debug("Successfully obtained lock: %s" % self.path) except AlreadyLocked: return False return True def release(self): import os import logging logger = logging.getLogger() if not self.has_lock(): raise Exception("Unable to release lock that is owned by another process") self.lock.release() logger.debug("Successfully released lock: %s" % self.path) def has_lock(self): return self.lock.i_am_locking() def clear(self): import os import logging logger = logging.getLogger() self.lock.break_lock() logger.debug("Successfully cleared lock: %s" % self.path)
def append_line_to_mycodo_log(log_file_path, log_lock_path, log_line): """ Appends given line to log file. :return: :rtype: :param log_file_path: Path to the Log File :type log_file_path: str :param log_lock_path: Path to the Lock File :type log_lock_path: str :param log_line: String to write to the Log File :type log_line: str """ lock = LockFile(log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Write Sensor Log] Acquiring Lock: {}".format(lock.path)) lock.acquire(timeout=60) # wait up to 60 seconds except: # TODO Needs better catch statement logging.warning("[Write Sensor Log] Breaking Lock to Acquire: {}".format(lock.path)) lock.break_lock() lock.acquire() finally: logging.debug("[Write Sensor Log] Gained lock: {}".format(lock.path)) try: with open(log_file_path, "ab") as sensorlog: pass sensorlog.write(log_line + "\n") # Temperature: # sensorlog.write('{"%Y/%m/%d-%H:%M:%S"} {:.1f} {}'.format(now(), sensor_t_read_temp_c[sensor], sensor)) # Temperature/Humidity: # sensorlog.write('{"%Y/%m/%d-%H:%M:%S"} {:.1f} {:.1f} {:.1f} {}'.format(now(), sensor_ht_read_temp_c[sensor], sensor_ht_read_hum[sensor], sensor_ht_dewpt_c[sensor], sensor)) # CO2 # sensorlog.write('{"%Y/%m/%d-%H:%M:%S"} {} {}'.format(now(), sensor_co2_read_co2[sensor], sensor)) # Pressure # sensorlog.write('{"%Y/%m/%d-%H:%M:%S"} {:.1f} {} {:.1f} {}'.format(now(), sensor_press_read_temp_c[sensor], sensor_press_read_press[sensor], sensor_press_read_alt[sensor], sensor)) # Relay # relaylog.write('{"%Y/%m/%d-%H:%M:%S"} {} {} {} {:.2f}'.format(now(), sensor, relayNumber, gpio, relaySeconds)) logging.debug("[Write Sensor Log] Data appended to {}".format( log_file_path)) except: # TODO Needs better catch statement logging.warning("[Write Sensor Log] Unable to append data to %s", log_file_path) logging.debug("[Write Sensor Log] Removing lock: {}".format(lock.path)) lock.release()
def do_lock(*args, **kwargs): lock = LockFile(path) while not lock.i_am_locking(): try: lock.acquire(timeout=5) except LockTimeout: lock.break_lock() try: rv = f(*args, **kwargs) finally: lock.release() return rv
class MailQueue(object): QUEUE_FILE = '/tmp/mail.queue' MAX_ATTEMPTS = 3 def __init__(self): self.queue = None def append(self, message): self.queue.append(QueueItem(message)) @classmethod def is_empty(cls): if not os.path.exists(cls.QUEUE_FILE): return True try: return os.stat(cls.QUEUE_FILE).st_size == 0 except OSError: return True def _get_queue(self): try: with open(self.QUEUE_FILE, 'rb') as f: self.queue = pickle.loads(f.read()) except (pickle.PickleError, EOFError): self.queue = [] def __enter__(self): self._lock = LockFile(self.QUEUE_FILE) while not self._lock.i_am_locking(): try: self._lock.acquire(timeout=330) except LockTimeout: self._lock.break_lock() if not os.path.exists(self.QUEUE_FILE): open(self.QUEUE_FILE, 'a').close() self._get_queue() return self def __exit__(self, typ, value, traceback): with open(self.QUEUE_FILE, 'wb+') as f: if self.queue: f.write(pickle.dumps(self.queue)) self._lock.release() if typ is not None: raise
def sync_sources(labels): """ Attempts to run several methods Certificate discovery. This is run on a periodic basis and updates the Lemur datastore with the information it discovers. """ if not labels: sys.stdout.write("Active\tLabel\tDescription\n") for source in source_service.get_all(): sys.stdout.write( "{active}\t{label}\t{description}!\n".format( label=source.label, description=source.description, active=source.active ) ) else: start_time = time.time() lock_file = "/tmp/.lemur_lock" sync_lock = LockFile(lock_file) while not sync_lock.i_am_locking(): try: sync_lock.acquire(timeout=10) # wait up to 10 seconds sys.stdout.write("[+] Staring to sync sources: {labels}!\n".format(labels=labels)) labels = labels.split(",") if labels[0] == 'all': sync() else: sync(labels=labels) sys.stdout.write( "[+] Finished syncing sources. Run Time: {time}\n".format( time=(time.time() - start_time) ) ) except LockTimeout: sys.stderr.write( "[!] Unable to acquire file lock on {file}, is there another sync running?\n".format( file=lock_file ) ) sync_lock.break_lock() sync_lock.acquire() sync_lock.release() sync_lock.release()
class Journal(object): """ Interface for accessing the journal for the queries that couldn't run in the remote side, either for it being offline or failed to execute. This should be used in a context and provides file locking by itself. """ JOURNAL_FILE = '/data/ha-journal' @classmethod def is_empty(cls): if not os.path.exists(cls.JOURNAL_FILE): return True try: return os.stat(cls.JOURNAL_FILE).st_size == 0 except OSError: return True def _get_queries(self): try: with open(self.JOURNAL_FILE, 'rb') as f: self.queries = pickle.loads(f.read()) except (pickle.PickleError, EOFError): self.queries = [] def __enter__(self): self._lock = LockFile(self.JOURNAL_FILE) while not self._lock.i_am_locking(): try: self._lock.acquire(timeout=5) except LockTimeout: self._lock.break_lock() if not os.path.exists(self.JOURNAL_FILE): open(self.JOURNAL_FILE, 'a').close() self._get_queries() return self def __exit__(self, typ, value, traceback): with open(self.JOURNAL_FILE, 'w+') as f: if self.queries: f.write(pickle.dumps(self.queries)) self._lock.release() if typ is not None: raise
def sync(labels): """ Attempts to run several methods Certificate discovery. This is run on a periodic basis and updates the Lemur datastore with the information it discovers. """ if not labels: sys.stdout.write("Active\tLabel\tDescription\n") for source in source_service.get_all(): sys.stdout.write( "{active}\t{label}\t{description}!\n".format( label=source.label, description=source.description, active=source.active ) ) else: start_time = time.time() lock_file = "/tmp/.lemur_lock" sync_lock = LockFile(lock_file) while not sync_lock.i_am_locking(): try: sync_lock.acquire(timeout=10) # wait up to 10 seconds sys.stdout.write("[+] Staring to sync sources: {labels}!\n".format(labels=labels)) labels = labels.split(",") if labels[0] == 'all': source_sync() else: source_sync(labels=labels) sys.stdout.write( "[+] Finished syncing sources. Run Time: {time}\n".format( time=(time.time() - start_time) ) ) except LockTimeout: sys.stderr.write( "[!] Unable to acquire file lock on {file}, is there another sync running?\n".format( file=lock_file ) ) sync_lock.break_lock() sync_lock.acquire() sync_lock.release() sync_lock.release()
class MCP342x_read(object): def __init__(self, logger, address, channel, resolution): self.logger = logger self.i2c_address = address self.channel = channel self.resolution = resolution if GPIO.RPI_INFO['P1_REVISION'] in [2, 3]: self.I2C_bus_number = 1 else: self.I2C_bus_number = 0 self.bus = smbus.SMBus(self.I2C_bus_number) self.lock_file = "/var/lock/mycodo_adc_0x{:02X}.pid".format(self.i2c_address) def setup_lock(self): self.execution_timer = timeit.default_timer() try: self.lock = LockFile(self.lock_file) while not self.lock.i_am_locking(): try: self.logger.debug("[Analog->Digital Converter 0x{:02X}] Acquiring Lock: {}".format(self.i2c_address, self.lock.path)) self.lock.acquire(timeout=60) # wait up to 60 seconds except: self.logger.warning("[Analog->Digital Converter 0x{:02X}] Waited 60 seconds. Breaking lock to acquire {}".format(self.i2c_address, self.lock.path)) self.lock.break_lock() self.lock.acquire() self.logger.debug("[Analog->Digital Converter 0x{:02X}] Acquired Lock: {}".format(self.i2c_address, self.lock.path)) self.logger.debug("[Analog->Digital Converter 0x{:02X}] Executed in {}ms".format(self.i2c_address, (timeit.default_timer()-self.execution_timer)*1000)) return 1, "Success" except Exception as msg: return 0, "Analog->Digital Converter Fail: {}".format(msg) def release_lock(self): self.lock.release() def read(self): try: time.sleep(0.1) self.setup_lock() adc = MCP342x(self.bus, self.i2c_address, channel=self.channel-1, resolution=self.resolution) response = adc.convert_and_read() self.release_lock() return 1, response except Exception as msg: self.release_lock() return 0, "Fail: {}".format(msg)
def safe_flush_file(file_path): lock_f = '%s' % os.path.basename(file_path) lock = LockFile(lock_f) while not lock.i_am_locking(): try: lock.acquire(timeout=5) except LockTimeout: lock.break_lock() finally: lock.acquire() with open(file_path, 'w') as cf: try: yield cf finally: cf.close() lock.release()
def read(self): lock = LockFile(K30_LOCK_FILE) try: # Acquire lock on K30 to ensure more than one read isn't # being attempted at once. while not lock.i_am_locking(): try: lock.acquire(timeout=60) # wait up to 60 seconds before breaking lock except: lock.break_lock() lock.acquire() self._co2 = self.get_measurement() lock.release() except: lock.release() return 1
def run(self): self.set_arguments() # Set our arguments cmdline = sys.argv[1:] # Grab args from cmdline # We want to be able to place options anywhere on the command # line. We have added all global options to each subparser, # but subparsers require all options to be after the 'action' # keyword. In order to handle this we just need to figure out # what subparser will be used and move it's keyword to the front # of the line. args, _ = self.parser.parse_known_args(cmdline) cmdline.remove(args.action) # Remove 'action' from the cmdline cmdline.insert(0, args.action) # Place 'action' at front # Finally, parse args and give error if necessary args = self.parser.parse_args(cmdline) if args.verbose: set_logging(level=logging.DEBUG) elif args.quiet: set_logging(level=logging.WARNING) else: set_logging(level=logging.INFO) lock = LockFile(os.path.join(Utils.getRoot(), LOCK_FILE)) try: lock.acquire(timeout=-1) args.func(args) except AttributeError: if hasattr(args, "func"): raise else: self.parser.print_help() except KeyboardInterrupt: pass except AlreadyLocked: logger.error( "Could not proceed - there is probably another instance of Atomic App running on this machine." ) except Exception as ex: if args.verbose: raise else: logger.error("Exception caught: %s", repr(ex)) logger.error("Run the command again with -v option to get more information.") finally: if lock.i_am_locking(): lock.release()
def concat_log_tmp_to_perm(log_file_tmp, log_file_perm, log_lock_path): """ Combines logs on the temporary file system with the logs on the SD card. :return: :rtype: :param log_file_tmp: Path to the Log File on the tmpfs :type log_file_tmp: str :param log_file_perm: Path to the Log File on the SD Card :type log_file_perm: str :param log_lock_path: Path to the lock file :type log_lock_path: str """ # Daemon Logs if not filecmp.cmp(log_file_tmp, log_file_perm): logging.debug("[Log Backup] Concatenating log cache" " ({}) to permanent storage ({})".format(log_file_tmp, log_file_perm)) lock = LockFile(log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Log Backup] Acquiring Lock: {}".format(lock.path)) lock.acquire(timeout=60) # wait up to 60 seconds except: # TODO Needs better catch statement logging.warning("[Log Backup] Breaking Lock to Acquire: {}".format(lock.path)) lock.break_lock() lock.acquire() finally: logging.debug("[Log Backup] Gained lock: {}".format(lock.path)) try: with open(log_file_perm, 'a') as fout, open(log_file_tmp, 'r+') as tmp_log: for line in tmp_log: fout.write(line) logging.debug("[Log Backup] Appended log data to {}".format(log_file_perm)) tmp_log.truncate() # Clear tmp_log if we've copied the lines over except: # TODO Needs better catch statement logging.warning("[Log Backup] Unable to append data to {}".format(log_file_perm)) logging.debug("[Log Backup] Removing lock: {}".format(lock.path)) lock.release() else: logging.debug( "[Log Backup] Logs the same, skipping. ({}) ({})".format(log_file_tmp, log_file_perm))
def read(self): lock = LockFile(K30_LOCK_FILE) try: # Acquire lock on K30 to ensure more than one read isn't # being attempted at once. while not lock.i_am_locking(): try: lock.acquire( timeout=60 ) # wait up to 60 seconds before breaking lock except: lock.break_lock() lock.acquire() self._co2 = self.get_measurement() lock.release() except: lock.release() return 1
class SmartAlert(object): SMART_FILE = '/tmp/.smartalert' def __init__(self): self.data = {} self.lock = LockFile(self.SMART_FILE) def __enter__(self): while not self.lock.i_am_locking(): try: self.lock.acquire(timeout=5) except LockTimeout: self.lock.break_lock() if os.path.exists(self.SMART_FILE): with open(self.SMART_FILE, 'rb') as f: try: self.data = pickle.loads(f.read()) except Exception: pass return self def __exit__(self, typ, value, traceback): with open(self.SMART_FILE, 'wb') as f: f.write(pickle.dumps(self.data)) self.lock.release() if typ is not None: raise def message_add(self, dev, message): if not dev.startswith('/dev/'): dev = '/dev/{0}'.format(dev) if dev not in self.data: self.data[dev] = [] if message not in self.data[dev]: self.data[dev].append(message) def device_delete(self, dev): if not dev.startswith('/dev/'): dev = '/dev/{0}'.format(dev) self.data.pop(dev, None)
def query(self, query_str): """ Send command to board and read response """ lock_file_amend = '{lf}.{dev}'.format(lf=ATLAS_PH_LOCK_FILE, dev=self.current_addr) lock = LockFile(lock_file_amend) try: while not lock.i_am_locking(): try: lock.acquire( timeout=10 ) # wait up to 60 seconds before breaking lock except Exception as e: logger.exception( "{cls} 10 second timeout, {lock} lock broken: " "{err}".format(cls=type(self).__name__, lock=ATLAS_PH_LOCK_FILE, err=e)) lock.break_lock() lock.acquire() # write a command to the board, wait the correct timeout, and read the response self.write(query_str) # the read and calibration commands require a longer timeout if ((query_str.upper().startswith("R")) or (query_str.upper().startswith("CAL"))): time.sleep(self.long_timeout) elif query_str.upper().startswith("SLEEP"): return "sleep mode" else: time.sleep(self.short_timeout) response = self.read() lock.release() return response except Exception as err: logger.exception( "{cls} raised an exception when taking a reading: " "{err}".format(cls=type(self).__name__, err=err)) lock.release() return None
def combine_filecontents(self, forcebuild=False): """ ..py:method:: combine_filecontents([forcebuild : boolean]) -> boolean Combines all content of scripts into one file. Returns True if file generated and False if file already exists. :param boolean forcebuild: should re-build scripts everytime :rtype boolean: """ _built_fname = os.path.join(self.dest, self.get_output_filename()) if self.exists_andnot_force(forcebuild, _built_fname): return False lock_ = LockFile(os.path.join(settings.STATIC_ROOT, _built_fname)) while lock_.i_am_locking(): try: lock_.acquire(timeout=20) except LockTimeout: lock_.break_lock() lock_.acquire() try: _built_fd = self._openfile(_built_fname, 'w') # collect all content of scripts into files to be compressed for script in self.scripts: fd = self._openfile(self.backend.pre_open(script)) content = self.backend.read(fd) _built_fd.write(content + '\n') fd.close() _built_fd.close() except: try: lock_.release() except NotLocked: pass return False return lock_
class SmartAlert(object): SMART_FILE = "/tmp/.smartalert" def __init__(self): self.data = {} self.lock = LockFile(self.SMART_FILE) def __enter__(self): while not self.lock.i_am_locking(): try: self.lock.acquire(timeout=5) except LockTimeout: self.lock.break_lock() if os.path.exists(self.SMART_FILE): with open(self.SMART_FILE, "rb") as f: try: self.data = pickle.loads(f.read()) except: pass return self def __exit__(self, typ, value, traceback): with open(self.SMART_FILE, "wb") as f: f.write(pickle.dumps(self.data)) self.lock.release() if typ is not None: raise def message_add(self, dev, message): if dev not in self.data: self.data[dev] = [] if message not in self.data[dev]: self.data[dev].append(message) def device_delete(self, dev): self.data.pop(dev, None)
def check_sync(self): if not os.path.exists(COLLECTD_FILE): return lock = LockFile(COLLECTD_FILE) while not lock.i_am_locking(): try: lock.acquire(timeout=5) except LockTimeout: return with open(COLLECTD_FILE, "rb") as f: try: data = pickle.loads(f.read()) except Exception: data = {} lock.release() alerts = [] for k, v in list(data.items()): if k == "ctl-ha/disk_octets": text = ( "Storage Controller HA link is in use. Please check that all iSCSI and FC initiators support ALUA " "and are able to connect to the active node." ) else: text = k if v["Severity"] == "WARNING": klass = CollectdWarningAlertClass else: klass = CollectdCriticalAlertClass alerts.append(Alert(klass, text)) return alerts
def check_sync(self): if not os.path.exists(COLLECTD_FILE): return lock = LockFile(COLLECTD_FILE) while not lock.i_am_locking(): try: lock.acquire(timeout=5) except LockTimeout: return with open(COLLECTD_FILE, "rb") as f: try: data = pickle.loads(f.read()) except Exception: data = {} lock.release() alerts = [] for k, v in list(data.items()): if k == "ctl-ha/disk_octets": text = ( "Storage Controller HA link is in use. Please check that all iSCSI and FC initiators support ALUA " "and are able to connect to the active node.") else: text = k if v["Severity"] == "WARNING": klass = CollectdWarningAlertClass else: klass = CollectdCriticalAlertClass alerts.append(Alert(klass, text)) return alerts
def main(): lock = LockFile(COLLECTD_FILE) while not lock.i_am_locking(): try: lock.acquire(timeout=5) except LockTimeout: lock.break_lock() data = {} if os.path.exists(COLLECTD_FILE): with open(COLLECTD_FILE, 'rb') as f: try: data = pickle.loads(f.read()) except: pass text = sys.stdin.read().replace('\n\n', '\nMessage: ', 1) v = dict(re.findall(r"(?P<name>.*?): (?P<value>.*?)\n", text)) k = v["Plugin"] if "PluginInstance" in v.keys(): k += "-" + v["PluginInstance"] k += "/" + v["Type"] if "TypeInstance" in v.keys(): k += "-" + v["TypeInstance"] if v["Severity"] == "OKAY": data.pop(k, None) else: data[k] = v; with open(COLLECTD_FILE, 'wb') as f: f.write(pickle.dumps(data)) lock.release()
def run(self): self.set_arguments() args = self.parser.parse_args() if args.verbose: set_logging(level=logging.DEBUG) elif args.quiet: set_logging(level=logging.WARNING) else: set_logging(level=logging.INFO) lock = LockFile(os.path.join(Utils.getRoot(), LOCK_FILE)) try: lock.acquire(timeout=-1) args.func(args) except AttributeError: if hasattr(args, 'func'): raise else: self.parser.print_help() except KeyboardInterrupt: pass except AlreadyLocked: logger.error( "Could not proceed - there is probably another instance of Atomic App running on this machine." ) except Exception as ex: if args.verbose: raise else: logger.error("Exception caught: %s", repr(ex)) logger.error( "Run the command again with -v option to get more information." ) finally: if lock.i_am_locking(): lock.release()
def run(self): cmdline = sys.argv[1:] # Grab args from cmdline # Initial setup of logging (to allow for a few early debug statements) Logging.setup_logging(verbose=True, quiet=False) # If we are running in an openshift pod (via `oc new-app`) then # there is no cmdline but we want to default to "atomicapp run". if Utils.running_on_openshift(): cmdline = 'run -v --dest=none --provider=openshift /{}' cmdline = cmdline.format(APP_ENT_PATH).split() # now a list # If the user has elected to provide all arguments via the # ATOMICAPP_ARGS environment variable then set it now argstr = os.environ.get('ATOMICAPP_ARGS') if argstr: logger.debug("Setting cmdline args to: {}".format(argstr)) cmdline = argstr.split() # If the user has elected to provide some arguments via the # ATOMICAPP_APPEND_ARGS environment variable then add those now argstr = os.environ.get('ATOMICAPP_APPEND_ARGS') if argstr: logger.debug("Appending args to cmdline: {}".format(argstr)) cmdline.extend(argstr.split()) # We want to be able to place options anywhere on the command # line. We have added all global options to each subparser, # but subparsers require all options to be after the 'action' # keyword. In order to handle this we just need to figure out # what subparser will be used and move it's keyword to the front # of the line. # NOTE: Also allow "mode" to override 'action' if specified args, _ = self.parser.parse_known_args(cmdline) cmdline.remove(args.action) # Remove 'action' from the cmdline if args.mode: args.action = args.mode # Allow mode to override 'action' cmdline.insert(0, args.action) # Place 'action' at front # Finally, parse args and give error if necessary args = self.parser.parse_args(cmdline) # Setup logging (now with arguments from cmdline) and log a few msgs Logging.setup_logging(args.verbose, args.quiet, args.logtype) logger.info("Action/Mode Selected is: %s" % args.action) logger.debug("Final parsed cmdline: {}".format(' '.join(cmdline))) # In the case of Atomic CLI we want to allow the user to specify # a directory if they want to for "run". For that reason we won't # default the RUN label for Atomic App to provide an app_spec argument. # In this case pick up app_spec from $IMAGE env var (set by RUN label). if args.app_spec is None: if os.environ.get('IMAGE') is not None: logger.debug("Setting app_spec based on $IMAGE env var") args.app_spec = os.environ['IMAGE'] else: print("Error. Too few arguments. Must provide app_spec.") print("Run with '--help' for more info") sys.exit(1) # Take the arguments that correspond to "answers" config file data # and make a dictionary of it to pass along in args. setattr(args, 'cli_answers', {}) for item in ['providerapi', 'providercafile', 'providerconfig', 'providertlsverify', 'namespace']: if hasattr(args, item) and getattr(args, item) is not None: args.cli_answers[item] = getattr(args, item) lock = LockFile(os.path.join(Utils.getRoot(), LOCK_FILE)) try: lock.acquire(timeout=-1) args.func(args) except AttributeError: if hasattr(args, 'func'): raise else: self.parser.print_help() except KeyboardInterrupt: pass except AlreadyLocked: logger.error("Could not proceed - there is probably another instance of Atomic App running on this machine.") except Exception as ex: if args.verbose: raise else: logger.error("Exception caught: %s", repr(ex)) logger.error( "Run the command again with -v option to get more information.") finally: if lock.i_am_locking(): lock.release()
def Concatenate_Logs(): # Temperature Sensor Logs if not filecmp.cmp(sensor_t_log_file_tmp, sensor_t_log_file): logging.debug("[Log Backup] Concatenating T sensor logs to %s", sensor_t_log_file) lock = LockFile(sensor_t_log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Log Backup] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Log Backup] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.debug("[Log Backup] Gained lock: %s", lock.path) try: with open(sensor_t_log_file, 'a') as fout: for line in fileinput.input(sensor_t_log_file_tmp): fout.write(line) logging.debug("[Log Backup] Appended T data to %s", sensor_t_log_file) except: logging.warning("[Log Backup] Unable to append data to %s", sensor_t_log_file) open(sensor_t_log_file_tmp, 'w').close() logging.debug("[Log Backup] Removing lock: %s", lock.path) lock.release() else: logging.debug("[Log Backup] T Sensor logs the same, skipping.") # Humidity/Temperature Sensor Logs if not filecmp.cmp(sensor_ht_log_file_tmp, sensor_ht_log_file): logging.debug("[Log Backup] Concatenating HT sensor logs to %s", sensor_ht_log_file) lock = LockFile(sensor_ht_log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Log Backup] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Log Backup] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.debug("[Log Backup] Gained lock: %s", lock.path) try: with open(sensor_ht_log_file, 'a') as fout: for line in fileinput.input(sensor_ht_log_file_tmp): fout.write(line) logging.debug("[Log Backup] Appended HT data to %s", sensor_ht_log_file) except: logging.warning("[Log Backup] Unable to append data to %s", sensor_ht_log_file) open(sensor_ht_log_file_tmp, 'w').close() logging.debug("[Log Backup] Removing lock: %s", lock.path) lock.release() else: logging.debug("[Log Backup] HT Sensor logs the same, skipping.") # CO2 Sensor Logs if not filecmp.cmp(sensor_co2_log_file_tmp, sensor_co2_log_file): logging.debug("[Log Backup] Concatenating CO2 sensor logs to %s", sensor_co2_log_file) lock = LockFile(sensor_co2_log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Log Backup] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Log Backup] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.debug("[Log Backup] Gained lock: %s", lock.path) try: with open(sensor_co2_log_file, 'a') as fout: for line in fileinput.input(sensor_co2_log_file_tmp): fout.write(line) logging.debug("[Log Backup] Appended CO2 data to %s", sensor_co2_log_file) except: logging.warning("[Log Backup] Unable to append data to %s", sensor_co2_log_file) open(sensor_co2_log_file_tmp, 'w').close() logging.debug("[Log Backup] Removing lock: %s", lock.path) lock.release() else: logging.debug("[Log Backup] CO2 Sensor logs the same, skipping.") # Pressure Sensor Logs if not filecmp.cmp(sensor_press_log_file_tmp, sensor_press_log_file): logging.debug("[Log Backup] Concatenating Press sensor logs to %s", sensor_press_log_file) lock = LockFile(sensor_press_log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Log Backup] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Log Backup] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.debug("[Log Backup] Gained lock: %s", lock.path) try: with open(sensor_press_log_file, 'a') as fout: for line in fileinput.input(sensor_press_log_file_tmp): fout.write(line) logging.debug("[Log Backup] Appended Press data to %s", sensor_press_log_file) except: logging.warning("[Log Backup] Unable to append data to %s", sensor_press_log_file) open(sensor_press_log_file_tmp, 'w').close() logging.debug("[Log Backup] Removing lock: %s", lock.path) lock.release() else: logging.debug("[Log Backup] Press Sensor logs the same, skipping.") # Relay Logs if not filecmp.cmp(relay_log_file_tmp, relay_log_file): logging.debug("[Log Backup] Concatenating relay logs to %s", relay_log_file) lock = LockFile(relay_log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Log Backup] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Log Backup] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.debug("[Log Backup] Gained lock: %s", lock.path) try: with open(relay_log_file, 'a') as fout: for line in fileinput.input(relay_log_file_tmp): fout.write(line) logging.debug("[Log Backup] Appended data to %s", relay_log_file) except: logging.warning("[Log Backup] Unable to append data to %s", relay_log_file) open(relay_log_file_tmp, 'w').close() logging.debug("[Log Backup] Removing lock: %s", lock.path) lock.release() else: logging.debug("[Log Backup] Relay logs the same, skipping.") # Daemon Logs if not filecmp.cmp(daemon_log_file_tmp, daemon_log_file): logging.debug("[Log Backup] Concatenating daemon logs to %s", daemon_log_file) lock = LockFile(daemon_log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Log Backup] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Log Backup] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.debug("[Log Backup] Gained lock: %s", lock.path) try: with open(daemon_log_file, 'a') as fout: for line in fileinput.input(daemon_log_file_tmp): fout.write(line) logging.debug("[Log Backup] Appended daemon log data to %s", daemon_log_file) except: logging.warning("[Log Backup] Unable to append data to %s", daemon_log_file) open(daemon_log_file_tmp, 'w').close() logging.debug("[Log Backup] Removing lock: %s", lock.path) lock.release() else: logging.debug("[Log Backup] Daemon logs the same, skipping.")
def write_config(): config = ConfigParser.RawConfigParser() if not os.path.exists(lock_directory): os.makedirs(lock_directory) lock = LockFile(config_lock_path) while not lock.i_am_locking(): try: logging.info("[Write Config] Waiting, Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Write Config] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.info("[Write Config] Gained lock: %s", lock.path) logging.info("[Write Config] Writing config file %s", config_file) config.add_section('Sensor') config.set('Sensor', 'dhtsensor', DHTSensor) config.set('Sensor', 'dhtpin', DHTPin) config.set('Sensor', 'dhtseconds', DHTSeconds) config.add_section('RelayNames') config.set('RelayNames', 'relay1name', relayName[1]) config.set('RelayNames', 'relay2name', relayName[2]) config.set('RelayNames', 'relay3name', relayName[3]) config.set('RelayNames', 'relay4name', relayName[4]) config.set('RelayNames', 'relay5name', relayName[5]) config.set('RelayNames', 'relay6name', relayName[6]) config.set('RelayNames', 'relay7name', relayName[7]) config.set('RelayNames', 'relay8name', relayName[8]) config.add_section('RelayPins') config.set('RelayPins', 'relay1pin', relayPin[1]) config.set('RelayPins', 'relay2pin', relayPin[2]) config.set('RelayPins', 'relay3pin', relayPin[3]) config.set('RelayPins', 'relay4pin', relayPin[4]) config.set('RelayPins', 'relay5pin', relayPin[5]) config.set('RelayPins', 'relay6pin', relayPin[6]) config.set('RelayPins', 'relay7pin', relayPin[7]) config.set('RelayPins', 'relay8pin', relayPin[8]) config.add_section('RelayTriggers') config.set('RelayTriggers', 'relay1trigger', relayTrigger[1]) config.set('RelayTriggers', 'relay2trigger', relayTrigger[2]) config.set('RelayTriggers', 'relay3trigger', relayTrigger[3]) config.set('RelayTriggers', 'relay4trigger', relayTrigger[4]) config.set('RelayTriggers', 'relay5trigger', relayTrigger[5]) config.set('RelayTriggers', 'relay6trigger', relayTrigger[6]) config.set('RelayTriggers', 'relay7trigger', relayTrigger[7]) config.set('RelayTriggers', 'relay8trigger', relayTrigger[8]) config.add_section('PID') config.set('PID', 'relaytemp', relayTemp) config.set('PID', 'relayhum', relayHum) config.set('PID', 'tempor', TempOR) config.set('PID', 'humor', HumOR) config.set('PID', 'settemp', setTemp) config.set('PID', 'sethum', setHum) config.set('PID', 'hum_p', Hum_P) config.set('PID', 'hum_i', Hum_I) config.set('PID', 'hum_d', Hum_D) config.set('PID', 'temp_p', Temp_P) config.set('PID', 'temp_i', Temp_I) config.set('PID', 'temp_d', Temp_D) config.set('PID', 'factorhumseconds', factorHumSeconds) config.set('PID', 'factortempseconds', factorTempSeconds) config.add_section('Misc') config.set('Misc', 'numtimers', numTimers) config.set('Misc', 'cameralight', cameraLight) config.add_section('TimerState') config.set('TimerState', 'timer1state', timerState[1]) config.set('TimerState', 'timer2state', timerState[2]) config.set('TimerState', 'timer3state', timerState[3]) config.set('TimerState', 'timer4state', timerState[4]) config.set('TimerState', 'timer5state', timerState[5]) config.set('TimerState', 'timer6state', timerState[6]) config.set('TimerState', 'timer7state', timerState[7]) config.set('TimerState', 'timer8state', timerState[8]) config.add_section('TimerRelay') config.set('TimerRelay', 'timer1relay', timerRelay[1]) config.set('TimerRelay', 'timer2relay', timerRelay[2]) config.set('TimerRelay', 'timer3relay', timerRelay[3]) config.set('TimerRelay', 'timer4relay', timerRelay[4]) config.set('TimerRelay', 'timer5relay', timerRelay[5]) config.set('TimerRelay', 'timer6relay', timerRelay[6]) config.set('TimerRelay', 'timer7relay', timerRelay[7]) config.set('TimerRelay', 'timer8relay', timerRelay[8]) config.add_section('TimerDurationOn') config.set('TimerDurationOn', 'timer1durationon', timerDurationOn[1]) config.set('TimerDurationOn', 'timer2durationon', timerDurationOn[2]) config.set('TimerDurationOn', 'timer3durationon', timerDurationOn[3]) config.set('TimerDurationOn', 'timer4durationon', timerDurationOn[4]) config.set('TimerDurationOn', 'timer5durationon', timerDurationOn[5]) config.set('TimerDurationOn', 'timer6durationon', timerDurationOn[6]) config.set('TimerDurationOn', 'timer7durationon', timerDurationOn[7]) config.set('TimerDurationOn', 'timer8durationon', timerDurationOn[8]) config.add_section('TimerDurationOff') config.set('TimerDurationOff', 'timer1durationoff', timerDurationOff[1]) config.set('TimerDurationOff', 'timer2durationoff', timerDurationOff[2]) config.set('TimerDurationOff', 'timer3durationoff', timerDurationOff[3]) config.set('TimerDurationOff', 'timer4durationoff', timerDurationOff[4]) config.set('TimerDurationOff', 'timer5durationoff', timerDurationOff[5]) config.set('TimerDurationOff', 'timer6durationoff', timerDurationOff[6]) config.set('TimerDurationOff', 'timer7durationoff', timerDurationOff[7]) config.set('TimerDurationOff', 'timer8durationoff', timerDurationOff[8]) config.add_section('Notification') config.set('Notification', 'smtp_host', smtp_host) config.set('Notification', 'smtp_port', smtp_port) config.set('Notification', 'smtp_user', smtp_user) config.set('Notification', 'smtp_pass', smtp_pass) config.set('Notification', 'email_from', email_from) config.set('Notification', 'email_to', email_to) try: with open(config_file, 'wb') as configfile: config.write(configfile) except: logging.warning("[Write Config] Unable to write config: %s", config_lock_path) logging.info("[Write Config] Removing lock: %s", lock.path) lock.release()
class MHZ16Sensor(AbstractSensor): """ A sensor support class that monitors the MH-Z16's CO2 concentration """ def __init__(self, interface, device_loc=None, baud_rate=None, i2c_address=None, i2c_bus=None): super(MHZ16Sensor, self).__init__() self.k30_lock_file = None self._co2 = 0 self.interface = interface if self.interface == 'UART': self.logger = logging.getLogger( "mycodo.sensors.mhz16.{dev}".format( dev=device_loc.replace('/', ''))) # Check if device is valid self.serial_device = is_device(device_loc) if self.serial_device: try: self.k30_lock_file = "/var/lock/sen-mhz16-{}".format( device_loc.replace('/', '')) self.lock = LockFile(self.k30_lock_file) self.ser = serial.Serial(self.serial_device, baudrate=baud_rate, timeout=1) except serial.SerialException: self.logger.exception('Opening serial') else: self.logger.error( 'Could not open "{dev}". ' 'Check the device location is correct.'.format( dev=device_loc)) elif self.interface == 'I2C': self.logger = logging.getLogger( "mycodo.sensors.mhz16.{dev}".format(dev=i2c_address)) self.cmd_measure = [ 0xFF, 0x01, 0x9C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63 ] self.IOCONTROL = 0X0E << 3 self.FCR = 0X02 << 3 self.LCR = 0X03 << 3 self.DLL = 0x00 << 3 self.DLH = 0X01 << 3 self.THR = 0X00 << 3 self.RHR = 0x00 << 3 self.TXLVL = 0X08 << 3 self.RXLVL = 0X09 << 3 self.i2c_address = i2c_address self.i2c = smbus.SMBus(i2c_bus) self.begin() def __repr__(self): """ Representation of object """ return "<{cls}(co2={co2})>".format(cls=type(self).__name__, co2="{0:.2f}".format(self._co2)) def __str__(self): """ Return CO2 information """ return "CO2: {co2}".format(co2="{0:.2f}".format(self._co2)) def __iter__(self): # must return an iterator """ MH-Z16 iterates through live CO2 readings """ return self def next(self): """ Get next CO2 reading """ if self.read(): # raised an error raise StopIteration # required return dict(co2=float('{0:.2f}'.format(self._co2))) def info(self): conditions_measured = [("CO2", "co2", "float", "0.00", self._co2, self.co2)] return conditions_measured @property def co2(self): """ CO2 concentration in ppmv """ if not self._co2: # update if needed self.read() return self._co2 def get_measurement(self): """ Gets the MH-Z16's CO2 concentration in ppmv via UART""" self._co2 = None if self.interface == 'UART': self.ser.flushInput() time.sleep(1) self.ser.write("\xff\x01\x86\x00\x00\x00\x00\x00\x79") time.sleep(.01) resp = self.ser.read(9) if len(resp) != 0: high_level = struct.unpack('B', resp[2])[0] low_level = struct.unpack('B', resp[3])[0] co2 = high_level * 256 + low_level return co2 elif self.interface == 'I2C': self.write_register(self.FCR, 0x07) self.send(self.cmd_measure) try: co2 = self.parse(self.receive()) except Exception: co2 = None return co2 return None def read(self): """ Takes a reading from the MH-Z16 and updates the self._co2 value :returns: None on success or 1 on error """ try: if self.interface == 'UART': if not self.serial_device: # Don't measure if device isn't validated return None # Acquire lock on MHZ16 to ensure more than one read isn't # being attempted at once on the same interface while not self.lock.i_am_locking(): try: # wait 60 seconds before breaking lock self.lock.acquire(timeout=60) except Exception as e: self.logger.error( "{cls} 60 second timeout, {lock} lock broken: " "{err}".format(cls=type(self).__name__, lock=self.k30_lock_file, err=e)) self.lock.break_lock() self.lock.acquire() self._co2 = self.get_measurement() self.lock.release() elif self.interface == 'I2C': self._co2 = self.get_measurement() if self._co2 is None: return 1 return # success - no errors except Exception as e: self.logger.error( "{cls} raised an exception when taking a reading: " "{err}".format(cls=type(self).__name__, err=e)) if self.interface == 'UART': self.lock.release() return 1 def begin(self): try: self.write_register(self.IOCONTROL, 0x08) except IOError: pass self.write_register(self.FCR, 0x07) self.write_register(self.LCR, 0x83) self.write_register(self.DLL, 0x60) self.write_register(self.DLH, 0x00) self.write_register(self.LCR, 0x03) @staticmethod def parse(response): checksum = 0 if len(response) < 9: return None for i in range(0, 9): checksum += response[i] if response[0] == 0xFF: if response[1] == 0x9C: if checksum % 256 == 0xFF: return (response[2] << 24) + (response[3] << 16) + ( response[4] << 8) + response[5] return None def read_register(self, reg_addr): time.sleep(0.01) return self.i2c.read_byte_data(self.i2c_address, reg_addr) def write_register(self, reg_addr, val): time.sleep(0.01) self.i2c.write_byte_data(self.i2c_address, reg_addr, val) def send(self, command): if self.read_register(self.TXLVL) >= len(command): self.i2c.write_i2c_block_data(self.i2c_address, self.THR, command) def receive(self): n = 9 buf = [] start = time.clock() while n > 0: rx_level = self.read_register(self.RXLVL) if rx_level > n: rx_level = n buf.extend( self.i2c.read_i2c_block_data(self.i2c_address, self.RHR, rx_level)) n = n - rx_level if time.clock() - start > 0.2: break return buf
def get(self): """Upon request from a manager, picks the first task available and returns it in JSON format. """ # Validate request args = task_generator_parser.parse_args() task_nolocked = None task = None tasks = {} percentage_done = 0 manager_uuid = args['uuid'] job_types = args['job_types'] worker = args['worker'] if manager_uuid: manager = Manager.query.filter_by(uuid=manager_uuid).one() else: ip_address = request.remote_addr manager = Manager.query.filter_by(ip_address=ip_address).first() if not manager: print('Could not find manager, returning 404') return '', 404 # Get active Jobs if job_types: job_types_list = job_types.split(',') job_type_clauses = or_(*[Job.type == j for j in job_types_list]) active_jobs = Job.query \ .filter(job_type_clauses) \ .filter(or_( Job.status == 'waiting', Job.status == 'active')) \ .order_by(Job.priority.desc(), Job.id.asc()) \ .all() else: active_jobs = Job.query \ .filter(or_( Job.status == 'waiting', Job.status == 'active')) \ .order_by(Job.priority.desc(), Job.id.asc()) \ .all() lock = LockFile(os.path.join(app.config['TMP_FOLDER'], 'server.lock')) while not lock.i_am_locking(): try: lock.acquire(timeout=60) # wait up to 60 seconds except LockTimeout: lock.break_lock() lock.acquire() for job in active_jobs: tasks = Task.query.filter( Task.job_id == job.id, or_(Task.status == 'waiting', Task.status == 'canceled'), Task.manager_id == manager.id). \ order_by(Task.priority.desc(), Task.id.desc()) task = None incomplete_parents = False for t in tasks: # All the parents are completed? for tt in tasks: if tt.child_id == t.id and tt.status != 'completed': incomplete_parents = True break # If False skip this task if incomplete_parents: continue task = t break # Task found? Then break if task: break if not task: # Unlocking Task table on ROLLBACK db.session.rollback() lock.release() print('Unlocking Task table on ROLLBACK, returning 404') return '', 404 task.status = 'processing' job.status = 'active' if worker: task.worker = worker task.last_activity = datetime.now() db.session.commit() lock.release() # job = Job.query.get(task.job_id) frame_count = 1 current_frame = 0 percentage_done = Decimal(current_frame) / Decimal(frame_count) * Decimal(100) percentage_done = round(percentage_done, 1) task = { "id": task.id, "job_id": task.job_id, "name": task.name, "status": task.status, "type": task.type, "settings": json.loads(task.settings), "log": None, "activity": task.activity, "manager_id": task.manager_id, "priority": task.priority, "child_id": task.child_id, "parser": task.parser, "time_cost": task.time_cost, "project_id": job.project_id, "current_frame": 0, "percentage_done": percentage_done} return jsonify(**task)
from lockfile import LockFile, LockTimeout import CONSTANTS filename= CONSTANTS.FILE_DIR+"/password.txt" lock = LockFile(filename) #lock.break_lock() print(lock.i_am_locking()) # lock.break_lock() print(lock.is_locked()) # lock.release() lock.acquire() while lock.i_am_locking(): try: print(lock.is_locked()) # wait up to 60 seconds print(lock.i_am_locking()) fb = open(filename, 'r') da = fb.readlines() for d in da[:1000000]: print(d) fb.close() lock.release() except LockTimeout: print("enterd into exception") lock.break_lock() lock.acquire() print(lock.is_locked()) print(lock.i_am_locking()) print("I locked", lock.path) if lock.is_locked(): lock.release()