class MCP342x_read(object): def __init__(self, logger, address, channel, gain, resolution): self.logger = logger self.i2c_address = address self.channel = channel self.gain = gain self.resolution = resolution if GPIO.RPI_INFO['P1_REVISION'] in [2, 3]: self.I2C_bus_number = 1 else: self.I2C_bus_number = 0 self.bus = smbus.SMBus(self.I2C_bus_number) self.lock_file = "/var/lock/mycodo_adc_0x{:02X}.pid".format( self.i2c_address) def setup_lock(self): self.execution_timer = timeit.default_timer() try: self.lock = LockFile(self.lock_file) while not self.lock.i_am_locking(): try: self.logger.debug( "[Analog->Digital Converter 0x{:02X}] Acquiring Lock: {}" .format(self.i2c_address, self.lock.path)) self.lock.acquire(timeout=60) # wait up to 60 seconds except: self.logger.warning( "[Analog->Digital Converter 0x{:02X}] Waited 60 seconds. Breaking lock to acquire {}" .format(self.i2c_address, self.lock.path)) self.lock.break_lock() self.lock.acquire() self.logger.debug( "[Analog->Digital Converter 0x{:02X}] Acquired Lock: {}". format(self.i2c_address, self.lock.path)) self.logger.debug( "[Analog->Digital Converter 0x{:02X}] Executed in {}ms".format( self.i2c_address, (timeit.default_timer() - self.execution_timer) * 1000)) return 1, "Success" except Exception as msg: return 0, "Analog->Digital Converter Fail: {}".format(msg) def release_lock(self): self.lock.release() def read(self): try: time.sleep(0.1) self.setup_lock() adc = MCP342x(self.bus, self.i2c_address, channel=self.channel - 1, gain=self.gain, resolution=self.resolution) response = adc.convert_and_read() self.release_lock() return 1, response except Exception as msg: self.release_lock() return 0, "Fail: {}".format(msg)
def read(self): """ Takes a reading from the MH-Z19 and updates the self._co2 value :returns: None on success or 1 on error """ if not self.serial_device: # Don't measure if device isn't validated return None lock = LockFile(self.k30_lock_file) try: # Acquire lock on MHZ19 to ensure more than one read isn't # being attempted at once. while not lock.i_am_locking(): try: # wait 60 seconds before breaking lock lock.acquire(timeout=60) except Exception as e: self.logger.error( "{cls} 60 second timeout, {lock} lock broken: " "{err}".format(cls=type(self).__name__, lock=self.k30_lock_file, err=e)) lock.break_lock() lock.acquire() self._co2 = self.get_measurement() lock.release() if self._co2 is None: return 1 return # success - no errors except Exception as e: self.logger.error( "{cls} raised an exception when taking a reading: " "{err}".format(cls=type(self).__name__, err=e)) lock.release() return 1
def run_function_with_lock(self, function, lock_file, timeout=30, args=[], kwargs={}): self.logger.debug('starting function with lock: %s' % lock_file) lock = LockFile(lock_file) try: while not lock.i_am_locking(): try: lock.acquire(timeout=timeout) except (LockTimeout, NotMyLock) as e: self.logger.debug('breaking lock') lock.break_lock() lock.acquire() self.logger.exception(e) self.logger.debug('lock acquired: starting function') return function(*args, **kwargs) finally: self.logger.debug('function done, releasing lock') if lock.is_locked(): try: lock.release() except NotMyLock: try: os.remove(lock_file) except Exception as e: self.logger.exception(e) self.logger.debug('lock released')
def write_relay_log(relayNumber, relaySeconds): config = ConfigParser.RawConfigParser() if not os.path.exists(lock_directory): os.makedirs(lock_directory) if not Terminate: lock = LockFile(relay_lock_path) while not lock.i_am_locking(): try: logging.info("[Write Relay Log] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Write Relay Log] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.info("[Write Relay Log] Gained lock: %s", lock.path) relay = [0] * 9 for n in range(1, 9): if n == relayNumber: relay[relayNumber] = relaySeconds try: with open(relay_log_file_tmp, "ab") as relaylog: relaylog.write('{0} {1} {2} {3} {4} {5} {6} {7} {8}\n'.format( datetime.datetime.now().strftime("%Y %m %d %H %M %S"), relay[1], relay[2], relay[3], relay[4], relay[5], relay[6], relay[7], relay[8])) except: logging.warning("[Write Relay Log] Unable to append data to %s", relay_log_file_tmp) logging.info("[Write Relay Log] Removing lock: %s", lock.path) lock.release()
def write_sensor_log(): config = ConfigParser.RawConfigParser() if not os.path.exists(lock_directory): os.makedirs(lock_directory) if not Terminate: lock = LockFile(sensor_lock_path) while not lock.i_am_locking(): try: logging.info("[Write Sensor Log] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Write Sensor Log] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.info("[Write Sensor Log] Gained lock: %s", lock.path) try: with open(sensor_log_file_tmp, "ab") as sensorlog: sensorlog.write('{0} {1:.1f} {2:.1f} {3:.1f}\n'.format( datetime.datetime.now().strftime("%Y %m %d %H %M %S"), tempc, humidity, dewpointc)) logging.info("[Write Sensor Log] Data appended to %s", sensor_log_file_tmp) except: logging.warning("[Write Sensor Log] Unable to append data to %s", sensor_log_file_tmp) logging.info("[Write Sensor Log] Removing lock: %s", lock.path) lock.release()
def write_ht_sensor_log(sensor_ht_read_temp_c, sensor_ht_read_hum, sensor_ht_dewpt_c, sensor): if not os.path.exists(lock_directory): os.makedirs(lock_directory) lock = LockFile(sensor_ht_log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Write Sensor Log] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Write Sensor Log] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.debug("[Write Sensor Log] Gained lock: %s", lock.path) try: with open(sensor_ht_log_file_tmp, "ab") as sensorlog: sensorlog.write('{0} {1:.1f} {2:.1f} {3:.1f} {4}\n'.format( datetime.datetime.now().strftime("%Y %m %d %H %M %S"), sensor_ht_read_temp_c[sensor], sensor_ht_read_hum[sensor], sensor_ht_dewpt_c[sensor], sensor)) logging.debug("[Write Sensor Log] Data appended to %s", sensor_ht_log_file_tmp) except: logging.warning("[Write Sensor Log] Unable to append data to %s", sensor_ht_log_file_tmp) logging.debug("[Write Sensor Log] Removing lock: %s", lock.path) lock.release()
def write_relay_log(relayNumber, relaySeconds, sensor, gpio): if not os.path.exists(lock_directory): os.makedirs(lock_directory) lock = LockFile(relay_log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Write Relay Log] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Write Relay Log] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.debug("[Write Relay Log] Gained lock: %s", lock.path) try: with open(relay_log_file_tmp, "ab") as relaylog: relaylog.write('{0} {1:d} {2:d} {3:d} {4:.2f}\n'.format( datetime.datetime.now().strftime("%Y/%m/%d-%H:%M:%S"), sensor, relayNumber, gpio, relaySeconds)) except: logging.warning("[Write Relay Log] Unable to append data to %s", relay_log_file_tmp) logging.debug("[Write Relay Log] Removing lock: %s", lock.path) lock.release()
def query(self, query_str): """ Send command and return reply """ lock_file_amend = '{lf}.{dev}'.format(lf=ATLAS_PH_LOCK_FILE, dev=self.serial_device.replace( "/", "-")) lock = LockFile(lock_file_amend) try: while not lock.i_am_locking(): try: lock.acquire( timeout=10 ) # wait up to 10 seconds before breaking lock except Exception as e: logger.exception( "{cls} 10 second timeout, {lock} lock broken: " "{err}".format(cls=type(self).__name__, lock=lock_file_amend, err=e)) lock.break_lock() lock.acquire() self.send_cmd(query_str) time.sleep(1.3) response = self.read_lines() lock.release() return response except Exception as err: logger.exception( "{cls} raised an exception when taking a reading: " "{err}".format(cls=type(self).__name__, err=err)) lock.release() return None
def main(): lock = LockFile(SMART_FILE) while not lock.i_am_locking(): try: lock.acquire(timeout=5) except LockTimeout: lock.break_lock() data = {} if os.path.exists(SMART_FILE): with open(SMART_FILE, 'rb') as f: try: data = pickle.loads(f.read()) except: pass device = os.environ.get('SMARTD_DEVICE') if device not in data: data[device] = [] message = os.environ.get('SMARTD_MESSAGE') if message not in data[device]: data[device].append(message) with open(SMART_FILE, 'wb') as f: f.write(pickle.dumps(data)) lock.release()
def write_ht_sensor_log(sensor_ht_read_temp_c, sensor_ht_read_hum, sensor_ht_dewpt_c, sensor): if not os.path.exists(lock_directory): os.makedirs(lock_directory) lock = LockFile(sensor_ht_log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Write Sensor Log] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Write Sensor Log] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.debug("[Write Sensor Log] Gained lock: %s", lock.path) try: with open(sensor_ht_log_file_tmp, "ab") as sensorlog: sensorlog.write('{0} {1:.1f} {2:.1f} {3:.1f} {4:d}\n'.format( datetime.datetime.now().strftime("%Y/%m/%d-%H:%M:%S"), sensor_ht_read_temp_c[sensor], sensor_ht_read_hum[sensor], sensor_ht_dewpt_c[sensor], sensor)) logging.debug("[Write Sensor Log] Data appended to %s", sensor_ht_log_file_tmp) except: logging.warning("[Write Sensor Log] Unable to append data to %s", sensor_ht_log_file_tmp) logging.debug("[Write Sensor Log] Removing lock: %s", lock.path) lock.release()
def read(self): """ Takes a reading from the K30 and updates the self._co2 value :returns: None on success or 1 on error """ lock = LockFile(K30_LOCK_FILE) try: # Acquire lock on K30 to ensure more than one read isn't # being attempted at once. while not lock.i_am_locking(): try: lock.acquire(timeout=60) # wait up to 60 seconds before breaking lock except Exception as e: logger.error("{cls} 60 second timeout, {lock} lock broken: " "{err}".format(cls=type(self).__name__, lock=K30_LOCK_FILE, err=e)) lock.break_lock() lock.acquire() self._co2 = self.get_measurement() lock.release() if self._co2 is None: return 1 return # success - no errors except Exception as e: logger.error("{cls} raised an exception when taking a reading: " "{err}".format(cls=type(self).__name__, err=e)) lock.release() return 1
class Repo: """ Class to deal with the metadata surrounding a Git repository """ def __init__(self, parent, repo_url): self.parent = parent self.url = repo_url self.folder_name = os.path.splitext(os.path.basename(repo_url))[0] self.containing_folder = os.path.join(parent.repo_folder, self.folder_name) if not os.path.exists(self.containing_folder): os.makedirs(self.containing_folder) self.path = os.path.join(self.containing_folder, 'repo') self.lockfile_path = os.path.join(self.containing_folder, 'lock') self.lock = LockFile(self.lockfile_path) self.json_path = os.path.join(self.containing_folder, 'metadata.json') self.data = {} if os.path.exists(self.json_path): with open(self.json_path) as json_file: self.data = json.load(json_file) self.__git = None def __enter__(self): """ Update context """ self.lock.acquire(timeout=0) logger.info('Git: Updating %s', self.url) if not os.path.exists(self.path): logger.debug('Cloning %s', self.url) git.Git(self.containing_folder).clone(self.url, self.path) else: try: repo = self.git(is_updater=True) logger.debug('Pulling %s', self.url) repo.git.pull() except Exception as e: logger.debug('Re-Cloning %s because %s', self.url, str(e)) shutil.rmtree(self.path) git.Git(self.containing_folder).clone(self.url, self.path) return self def __exit__(self, type, value, traceback): # Save the updated time self.data['last_updated'] = str(datetime.datetime.utcnow()) self.save() logger.info('Git: Update completed for %s', self.url) self.lock.break_lock() def save(self): with open(self.json_path, 'w') as f: json.dump(self.data, f) def git(self, is_updater=False): if self.lock.is_locked() and (not self.parent.is_updater and not is_updater): raise AlreadyLocked('This repository is being updated, if you can see this message delete {}'.format(self.lockfile_path)) else: if self.__git is None or is_updater: self.__git = git.Repo(self.path) return self.__git
def main(username, password): # Ignore error, logging set up in logging utils from . import logging_utils from .navigation import Leifur from .config import get_config, set_config, get_config_from_user from .connectivity import check_internet_connection from .update_checker import check_for_updates lock = LockFile('/tmp/spoppy') try: try: # Try for 1s to acquire the lock lock.acquire(1) except LockTimeout: click.echo('Could not acquire lock, is spoppy running?') click.echo( 'If you\'re sure that spoppy is not running, ' 'try removing the lock file %s' % lock.lock_file ) click.echo( 'You can try removing the lock file by responding [rm]. ' 'spoppy will exit on all other inputs' ) try: response = raw_input('') except NameError: response = input('') if response == 'rm': lock.break_lock() else: raise TypeError('Could not get lock') except TypeError: pass else: check_internet_connection() # Check for updates check_for_updates(click, get_version(), lock) if username and password: set_config(username, password) else: username, password = get_config() if not (username and password): username, password = get_config_from_user() navigator = None try: navigator = Leifur(username, password) navigator.start() finally: if navigator: navigator.shutdown() logger.debug('Finally, bye!') finally: if lock.i_am_locking(): lock.release()
def main(username, password): from . import logging_utils logging_utils.configure_logging() from .navigation import Leifur from .config import get_config, set_config, get_config_from_user from .connectivity import check_internet_connection from .update_checker import check_for_updates lock = LockFile('/tmp/spoppy') try: try: # Try for 1s to acquire the lock lock.acquire(1) except LockTimeout: click.echo('Could not acquire lock, is spoppy running?') click.echo( 'If you\'re sure that spoppy is not running, ' 'try removing the lock file %s' % lock.lock_file ) click.echo( 'You can try removing the lock file by responding [rm]. ' 'spoppy will exit on all other inputs' ) try: response = raw_input('') except NameError: response = input('') if response == 'rm': lock.break_lock() else: raise TypeError('Could not get lock') except TypeError: pass else: check_internet_connection() # Check for updates check_for_updates(click, get_version(), lock) if username and password: set_config(username, password) else: username, password = get_config() if not (username and password): username, password = get_config_from_user() navigator = None try: navigator = Leifur(username, password) navigator.start() finally: if navigator: navigator.shutdown() logger.debug('Finally, bye!') finally: if lock.i_am_locking(): lock.release()
class Lock(): """Simple implementation of a mutex lock using the file systems. Works on *nix systems.""" path = None lock = None def __init__(self, path): try: from lockfile import LockFile except ImportError: from lockfile import FileLock # Different naming in older versions of lockfile LockFile = FileLock self.path = path self.lock = LockFile(path) def obtain(self): import os import logging from lockfile import AlreadyLocked logger = logging.getLogger() try: self.lock.acquire(0) logger.debug("Successfully obtained lock: %s" % self.path) except AlreadyLocked: return False return True def release(self): import os import logging logger = logging.getLogger() if not self.has_lock(): raise Exception( "Unable to release lock that is owned by another process") self.lock.release() logger.debug("Successfully released lock: %s" % self.path) def has_lock(self): return self.lock.i_am_locking() def clear(self): import os import logging logger = logging.getLogger() self.lock.break_lock() logger.debug("Successfully cleared lock: %s" % self.path)
class State(object): def __init__(self, path=None, lock=False): self.path = path self.lock = lock if not self.path: self.path = join(util.get_home_dir(), "appstate.json") self._state = {} self._prev_state = {} self._lockfile = None def __enter__(self): try: self._lock_state_file() if isfile(self.path): self._state = util.load_json(self.path) except ValueError: self._state = {} self._prev_state = deepcopy(self._state) return self._state def __exit__(self, type_, value, traceback): if self._prev_state != self._state: with open(self.path, "w") as fp: if "dev" in __version__: json.dump(self._state, fp, indent=4) else: json.dump(self._state, fp) self._unlock_state_file() def _lock_state_file(self): if not self.lock: return self._lockfile = LockFile(self.path) if self._lockfile.is_locked() and \ (time() - getmtime(self._lockfile.lock_file)) > 10: self._lockfile.break_lock() try: self._lockfile.acquire() except LockFailed: raise exception.PlatformioException( "The directory `{0}` or its parent directory is not owned by " "the current user and PlatformIO can not store configuration " "data. \nPlease check the permissions and owner of that " "directory. Otherwise, please remove manually `{0}` " "directory and PlatformIO will create new from the current " "user.".format(dirname(self.path))) def _unlock_state_file(self): if self._lockfile: self._lockfile.release()
class Lock(): """Simple implementation of a mutex lock using the file systems. Works on *nix systems.""" path = None lock = None def __init__(self, path): try: from lockfile import LockFile except ImportError: from lockfile import FileLock # Different naming in older versions of lockfile LockFile = FileLock self.path = path self.lock = LockFile(path) def obtain(self): import os import logging from lockfile import AlreadyLocked logger = logging.getLogger() try: self.lock.acquire(0) logger.debug("Successfully obtained lock: %s" % self.path) except AlreadyLocked: return False return True def release(self): import os import logging logger = logging.getLogger() if not self.has_lock(): raise Exception("Unable to release lock that is owned by another process") self.lock.release() logger.debug("Successfully released lock: %s" % self.path) def has_lock(self): return self.lock.i_am_locking() def clear(self): import os import logging logger = logging.getLogger() self.lock.break_lock() logger.debug("Successfully cleared lock: %s" % self.path)
class State(object): def __init__(self, path=None, lock=False): self.path = path self.lock = lock if not self.path: self.path = join(util.get_home_dir(), "appstate.json") self._state = {} self._prev_state = {} self._lockfile = None def __enter__(self): try: self._lock_state_file() if isfile(self.path): self._state = util.load_json(self.path) except exception.PlatformioException: self._state = {} self._prev_state = deepcopy(self._state) return self._state def __exit__(self, type_, value, traceback): if self._prev_state != self._state: with open(self.path, "w") as fp: if "dev" in __version__: json.dump(self._state, fp, indent=4) else: json.dump(self._state, fp) self._unlock_state_file() def _lock_state_file(self): if not self.lock: return self._lockfile = LockFile(self.path) if self._lockfile.is_locked() and \ (time() - getmtime(self._lockfile.lock_file)) > 10: self._lockfile.break_lock() try: self._lockfile.acquire() except LockFailed: raise exception.PlatformioException( "The directory `{0}` or its parent directory is not owned by " "the current user and PlatformIO can not store configuration " "data. \nPlease check the permissions and owner of that " "directory. Otherwise, please remove manually `{0}` " "directory and PlatformIO will create new from the current " "user.".format(dirname(self.path))) def _unlock_state_file(self): if self._lockfile: self._lockfile.release()
def append_line_to_mycodo_log(log_file_path, log_lock_path, log_line): """ Appends given line to log file. :return: :rtype: :param log_file_path: Path to the Log File :type log_file_path: str :param log_lock_path: Path to the Lock File :type log_lock_path: str :param log_line: String to write to the Log File :type log_line: str """ lock = LockFile(log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Write Sensor Log] Acquiring Lock: {}".format(lock.path)) lock.acquire(timeout=60) # wait up to 60 seconds except: # TODO Needs better catch statement logging.warning("[Write Sensor Log] Breaking Lock to Acquire: {}".format(lock.path)) lock.break_lock() lock.acquire() finally: logging.debug("[Write Sensor Log] Gained lock: {}".format(lock.path)) try: with open(log_file_path, "ab") as sensorlog: pass sensorlog.write(log_line + "\n") # Temperature: # sensorlog.write('{"%Y/%m/%d-%H:%M:%S"} {:.1f} {}'.format(now(), sensor_t_read_temp_c[sensor], sensor)) # Temperature/Humidity: # sensorlog.write('{"%Y/%m/%d-%H:%M:%S"} {:.1f} {:.1f} {:.1f} {}'.format(now(), sensor_ht_read_temp_c[sensor], sensor_ht_read_hum[sensor], sensor_ht_dewpt_c[sensor], sensor)) # CO2 # sensorlog.write('{"%Y/%m/%d-%H:%M:%S"} {} {}'.format(now(), sensor_co2_read_co2[sensor], sensor)) # Pressure # sensorlog.write('{"%Y/%m/%d-%H:%M:%S"} {:.1f} {} {:.1f} {}'.format(now(), sensor_press_read_temp_c[sensor], sensor_press_read_press[sensor], sensor_press_read_alt[sensor], sensor)) # Relay # relaylog.write('{"%Y/%m/%d-%H:%M:%S"} {} {} {} {:.2f}'.format(now(), sensor, relayNumber, gpio, relaySeconds)) logging.debug("[Write Sensor Log] Data appended to {}".format( log_file_path)) except: # TODO Needs better catch statement logging.warning("[Write Sensor Log] Unable to append data to %s", log_file_path) logging.debug("[Write Sensor Log] Removing lock: {}".format(lock.path)) lock.release()
def do_lock(*args, **kwargs): lock = LockFile(path) while not lock.i_am_locking(): try: lock.acquire(timeout=5) except LockTimeout: lock.break_lock() try: rv = f(*args, **kwargs) finally: lock.release() return rv
class MailQueue(object): QUEUE_FILE = '/tmp/mail.queue' MAX_ATTEMPTS = 3 def __init__(self): self.queue = None def append(self, message): self.queue.append(QueueItem(message)) @classmethod def is_empty(cls): if not os.path.exists(cls.QUEUE_FILE): return True try: return os.stat(cls.QUEUE_FILE).st_size == 0 except OSError: return True def _get_queue(self): try: with open(self.QUEUE_FILE, 'rb') as f: self.queue = pickle.loads(f.read()) except (pickle.PickleError, EOFError): self.queue = [] def __enter__(self): self._lock = LockFile(self.QUEUE_FILE) while not self._lock.i_am_locking(): try: self._lock.acquire(timeout=330) except LockTimeout: self._lock.break_lock() if not os.path.exists(self.QUEUE_FILE): open(self.QUEUE_FILE, 'a').close() self._get_queue() return self def __exit__(self, typ, value, traceback): with open(self.QUEUE_FILE, 'wb+') as f: if self.queue: f.write(pickle.dumps(self.queue)) self._lock.release() if typ is not None: raise
def relay_command(command, rid): if (rid and rid != -1 and not stateless_command(command)): state = relay_status(rid) else: state = -1 if (command == 'g'): if (state == -1): print "Will return an invalid status" return '1' if state else '0' lock = LockFile("/tmp/relay", timeout=5) try: with lock: log = open(log_file, "a") log.write(">" + str(command) + " " + str(rid) + "\n") ser = serial.Serial('/dev/ttyUSB0', 19200, timeout=1) ser.flush() ser.write(command) if (rid >= 0): ser.write(chr((int(rid) >> 8) & 0xff)) ser.write(chr((int(rid)) & 0xff)) log.close() else: response = ser.readline() log.write("<" + response + "\n") log.close() return response except LockTimeout: lock.break_lock() if state != -1 and (int(rid) in http_url): url, consumption = http_url[int(rid)] else: url = False if (url != False): try: if (command == 'A' and not state): urllib2.urlopen(url + str(float(0))) urllib2.urlopen(url + str(float(consumption))) elif (command == 'E' and state): urllib2.urlopen(url + str(float(consumption))) urllib2.urlopen(url + str(float(0))) except Exception: pass if (rid != -1): update_status(rid, command) return ''
class Journal(object): """ Interface for accessing the journal for the queries that couldn't run in the remote side, either for it being offline or failed to execute. This should be used in a context and provides file locking by itself. """ JOURNAL_FILE = '/data/ha-journal' @classmethod def is_empty(cls): if not os.path.exists(cls.JOURNAL_FILE): return True try: return os.stat(cls.JOURNAL_FILE).st_size == 0 except OSError: return True def _get_queries(self): try: with open(self.JOURNAL_FILE, 'rb') as f: self.queries = pickle.loads(f.read()) except (pickle.PickleError, EOFError): self.queries = [] def __enter__(self): self._lock = LockFile(self.JOURNAL_FILE) while not self._lock.i_am_locking(): try: self._lock.acquire(timeout=5) except LockTimeout: self._lock.break_lock() if not os.path.exists(self.JOURNAL_FILE): open(self.JOURNAL_FILE, 'a').close() self._get_queries() return self def __exit__(self, typ, value, traceback): with open(self.JOURNAL_FILE, 'w+') as f: if self.queries: f.write(pickle.dumps(self.queries)) self._lock.release() if typ is not None: raise
def sync_sources(labels): """ Attempts to run several methods Certificate discovery. This is run on a periodic basis and updates the Lemur datastore with the information it discovers. """ if not labels: sys.stdout.write("Active\tLabel\tDescription\n") for source in source_service.get_all(): sys.stdout.write( "{active}\t{label}\t{description}!\n".format( label=source.label, description=source.description, active=source.active ) ) else: start_time = time.time() lock_file = "/tmp/.lemur_lock" sync_lock = LockFile(lock_file) while not sync_lock.i_am_locking(): try: sync_lock.acquire(timeout=10) # wait up to 10 seconds sys.stdout.write("[+] Staring to sync sources: {labels}!\n".format(labels=labels)) labels = labels.split(",") if labels[0] == 'all': sync() else: sync(labels=labels) sys.stdout.write( "[+] Finished syncing sources. Run Time: {time}\n".format( time=(time.time() - start_time) ) ) except LockTimeout: sys.stderr.write( "[!] Unable to acquire file lock on {file}, is there another sync running?\n".format( file=lock_file ) ) sync_lock.break_lock() sync_lock.acquire() sync_lock.release() sync_lock.release()
class State(object): def __init__(self, path=None, lock=False): self.path = path self.lock = lock if not self.path: self.path = join(util.get_home_dir(), "appstate.json") self._state = {} self._prev_state = {} self._lockfile = None def __enter__(self): try: self._lock_state_file() if isfile(self.path): self._state = util.load_json(self.path) except exception.PlatformioException: self._state = {} self._prev_state = deepcopy(self._state) return self._state def __exit__(self, type_, value, traceback): if self._prev_state != self._state: try: with open(self.path, "w") as fp: if "dev" in __version__: json.dump(self._state, fp, indent=4) else: json.dump(self._state, fp) except IOError: raise exception.HomeDirPermissionsError(util.get_home_dir()) self._unlock_state_file() def _lock_state_file(self): if not self.lock: return self._lockfile = LockFile(self.path) if self._lockfile.is_locked() and \ (time() - getmtime(self._lockfile.lock_file)) > 10: self._lockfile.break_lock() try: self._lockfile.acquire() except LockFailed: raise exception.HomeDirPermissionsError(dirname(self.path)) def _unlock_state_file(self): if self._lockfile: self._lockfile.release()
def sync(labels): """ Attempts to run several methods Certificate discovery. This is run on a periodic basis and updates the Lemur datastore with the information it discovers. """ if not labels: sys.stdout.write("Active\tLabel\tDescription\n") for source in source_service.get_all(): sys.stdout.write( "{active}\t{label}\t{description}!\n".format( label=source.label, description=source.description, active=source.active ) ) else: start_time = time.time() lock_file = "/tmp/.lemur_lock" sync_lock = LockFile(lock_file) while not sync_lock.i_am_locking(): try: sync_lock.acquire(timeout=10) # wait up to 10 seconds sys.stdout.write("[+] Staring to sync sources: {labels}!\n".format(labels=labels)) labels = labels.split(",") if labels[0] == 'all': source_sync() else: source_sync(labels=labels) sys.stdout.write( "[+] Finished syncing sources. Run Time: {time}\n".format( time=(time.time() - start_time) ) ) except LockTimeout: sys.stderr.write( "[!] Unable to acquire file lock on {file}, is there another sync running?\n".format( file=lock_file ) ) sync_lock.break_lock() sync_lock.acquire() sync_lock.release() sync_lock.release()
class MCP342x_read(object): def __init__(self, logger, address, channel, resolution): self.logger = logger self.i2c_address = address self.channel = channel self.resolution = resolution if GPIO.RPI_INFO['P1_REVISION'] in [2, 3]: self.I2C_bus_number = 1 else: self.I2C_bus_number = 0 self.bus = smbus.SMBus(self.I2C_bus_number) self.lock_file = "/var/lock/mycodo_adc_0x{:02X}.pid".format(self.i2c_address) def setup_lock(self): self.execution_timer = timeit.default_timer() try: self.lock = LockFile(self.lock_file) while not self.lock.i_am_locking(): try: self.logger.debug("[Analog->Digital Converter 0x{:02X}] Acquiring Lock: {}".format(self.i2c_address, self.lock.path)) self.lock.acquire(timeout=60) # wait up to 60 seconds except: self.logger.warning("[Analog->Digital Converter 0x{:02X}] Waited 60 seconds. Breaking lock to acquire {}".format(self.i2c_address, self.lock.path)) self.lock.break_lock() self.lock.acquire() self.logger.debug("[Analog->Digital Converter 0x{:02X}] Acquired Lock: {}".format(self.i2c_address, self.lock.path)) self.logger.debug("[Analog->Digital Converter 0x{:02X}] Executed in {}ms".format(self.i2c_address, (timeit.default_timer()-self.execution_timer)*1000)) return 1, "Success" except Exception as msg: return 0, "Analog->Digital Converter Fail: {}".format(msg) def release_lock(self): self.lock.release() def read(self): try: time.sleep(0.1) self.setup_lock() adc = MCP342x(self.bus, self.i2c_address, channel=self.channel-1, resolution=self.resolution) response = adc.convert_and_read() self.release_lock() return 1, response except Exception as msg: self.release_lock() return 0, "Fail: {}".format(msg)
def read(self): lock = LockFile(K30_LOCK_FILE) try: # Acquire lock on K30 to ensure more than one read isn't # being attempted at once. while not lock.i_am_locking(): try: lock.acquire(timeout=60) # wait up to 60 seconds before breaking lock except: lock.break_lock() lock.acquire() self._co2 = self.get_measurement() lock.release() except: lock.release() return 1
def safe_flush_file(file_path): lock_f = '%s' % os.path.basename(file_path) lock = LockFile(lock_f) while not lock.i_am_locking(): try: lock.acquire(timeout=5) except LockTimeout: lock.break_lock() finally: lock.acquire() with open(file_path, 'w') as cf: try: yield cf finally: cf.close() lock.release()
def concat_log_tmp_to_perm(log_file_tmp, log_file_perm, log_lock_path): """ Combines logs on the temporary file system with the logs on the SD card. :return: :rtype: :param log_file_tmp: Path to the Log File on the tmpfs :type log_file_tmp: str :param log_file_perm: Path to the Log File on the SD Card :type log_file_perm: str :param log_lock_path: Path to the lock file :type log_lock_path: str """ # Daemon Logs if not filecmp.cmp(log_file_tmp, log_file_perm): logging.debug("[Log Backup] Concatenating log cache" " ({}) to permanent storage ({})".format(log_file_tmp, log_file_perm)) lock = LockFile(log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Log Backup] Acquiring Lock: {}".format(lock.path)) lock.acquire(timeout=60) # wait up to 60 seconds except: # TODO Needs better catch statement logging.warning("[Log Backup] Breaking Lock to Acquire: {}".format(lock.path)) lock.break_lock() lock.acquire() finally: logging.debug("[Log Backup] Gained lock: {}".format(lock.path)) try: with open(log_file_perm, 'a') as fout, open(log_file_tmp, 'r+') as tmp_log: for line in tmp_log: fout.write(line) logging.debug("[Log Backup] Appended log data to {}".format(log_file_perm)) tmp_log.truncate() # Clear tmp_log if we've copied the lines over except: # TODO Needs better catch statement logging.warning("[Log Backup] Unable to append data to {}".format(log_file_perm)) logging.debug("[Log Backup] Removing lock: {}".format(lock.path)) lock.release() else: logging.debug( "[Log Backup] Logs the same, skipping. ({}) ({})".format(log_file_tmp, log_file_perm))
class State(object): def __init__(self, path=None, lock=False): self.path = path self.lock = lock if not self.path: self.path = join(get_home_dir(), "appstate.json") self._state = {} self._prev_state = {} self._lockfile = None def __enter__(self): try: self._lock_state_file() if isfile(self.path): with open(self.path, "r") as fp: self._state = json.load(fp) except ValueError: self._state = {} self._prev_state = deepcopy(self._state) return self._state def __exit__(self, type_, value, traceback): if self._prev_state != self._state: with open(self.path, "w") as fp: if "dev" in __version__: json.dump(self._state, fp, indent=4) else: json.dump(self._state, fp) self._unlock_state_file() def _lock_state_file(self): if not self.lock: return self._lockfile = LockFile(self.path) if (self._lockfile.is_locked() and (time() - getmtime(self._lockfile.lock_file)) > 10): self._lockfile.break_lock() self._lockfile.acquire() def _unlock_state_file(self): if self._lockfile: self._lockfile.release()
def read(self): lock = LockFile(K30_LOCK_FILE) try: # Acquire lock on K30 to ensure more than one read isn't # being attempted at once. while not lock.i_am_locking(): try: lock.acquire( timeout=60 ) # wait up to 60 seconds before breaking lock except: lock.break_lock() lock.acquire() self._co2 = self.get_measurement() lock.release() except: lock.release() return 1
class SmartAlert(object): SMART_FILE = '/tmp/.smartalert' def __init__(self): self.data = {} self.lock = LockFile(self.SMART_FILE) def __enter__(self): while not self.lock.i_am_locking(): try: self.lock.acquire(timeout=5) except LockTimeout: self.lock.break_lock() if os.path.exists(self.SMART_FILE): with open(self.SMART_FILE, 'rb') as f: try: self.data = pickle.loads(f.read()) except Exception: pass return self def __exit__(self, typ, value, traceback): with open(self.SMART_FILE, 'wb') as f: f.write(pickle.dumps(self.data)) self.lock.release() if typ is not None: raise def message_add(self, dev, message): if not dev.startswith('/dev/'): dev = '/dev/{0}'.format(dev) if dev not in self.data: self.data[dev] = [] if message not in self.data[dev]: self.data[dev].append(message) def device_delete(self, dev): if not dev.startswith('/dev/'): dev = '/dev/{0}'.format(dev) self.data.pop(dev, None)
def query(self, query_str): """ Send command to board and read response """ lock_file_amend = '{lf}.{dev}'.format(lf=ATLAS_PH_LOCK_FILE, dev=self.current_addr) lock = LockFile(lock_file_amend) try: while not lock.i_am_locking(): try: lock.acquire( timeout=10 ) # wait up to 60 seconds before breaking lock except Exception as e: logger.exception( "{cls} 10 second timeout, {lock} lock broken: " "{err}".format(cls=type(self).__name__, lock=ATLAS_PH_LOCK_FILE, err=e)) lock.break_lock() lock.acquire() # write a command to the board, wait the correct timeout, and read the response self.write(query_str) # the read and calibration commands require a longer timeout if ((query_str.upper().startswith("R")) or (query_str.upper().startswith("CAL"))): time.sleep(self.long_timeout) elif query_str.upper().startswith("SLEEP"): return "sleep mode" else: time.sleep(self.short_timeout) response = self.read() lock.release() return response except Exception as err: logger.exception( "{cls} raised an exception when taking a reading: " "{err}".format(cls=type(self).__name__, err=err)) lock.release() return None
class SmartAlert(object): SMART_FILE = "/tmp/.smartalert" def __init__(self): self.data = {} self.lock = LockFile(self.SMART_FILE) def __enter__(self): while not self.lock.i_am_locking(): try: self.lock.acquire(timeout=5) except LockTimeout: self.lock.break_lock() if os.path.exists(self.SMART_FILE): with open(self.SMART_FILE, "rb") as f: try: self.data = pickle.loads(f.read()) except: pass return self def __exit__(self, typ, value, traceback): with open(self.SMART_FILE, "wb") as f: f.write(pickle.dumps(self.data)) self.lock.release() if typ is not None: raise def message_add(self, dev, message): if dev not in self.data: self.data[dev] = [] if message not in self.data[dev]: self.data[dev].append(message) def device_delete(self, dev): self.data.pop(dev, None)
def combine_filecontents(self, forcebuild=False): """ ..py:method:: combine_filecontents([forcebuild : boolean]) -> boolean Combines all content of scripts into one file. Returns True if file generated and False if file already exists. :param boolean forcebuild: should re-build scripts everytime :rtype boolean: """ _built_fname = os.path.join(self.dest, self.get_output_filename()) if self.exists_andnot_force(forcebuild, _built_fname): return False lock_ = LockFile(os.path.join(settings.STATIC_ROOT, _built_fname)) while lock_.i_am_locking(): try: lock_.acquire(timeout=20) except LockTimeout: lock_.break_lock() lock_.acquire() try: _built_fd = self._openfile(_built_fname, 'w') # collect all content of scripts into files to be compressed for script in self.scripts: fd = self._openfile(self.backend.pre_open(script)) content = self.backend.read(fd) _built_fd.write(content + '\n') fd.close() _built_fd.close() except: try: lock_.release() except NotLocked: pass return False return lock_
def main(): lock = LockFile(COLLECTD_FILE) while not lock.i_am_locking(): try: lock.acquire(timeout=5) except LockTimeout: lock.break_lock() data = {} if os.path.exists(COLLECTD_FILE): with open(COLLECTD_FILE, 'rb') as f: try: data = pickle.loads(f.read()) except: pass text = sys.stdin.read().replace('\n\n', '\nMessage: ', 1) v = dict(re.findall(r"(?P<name>.*?): (?P<value>.*?)\n", text)) k = v["Plugin"] if "PluginInstance" in v.keys(): k += "-" + v["PluginInstance"] k += "/" + v["Type"] if "TypeInstance" in v.keys(): k += "-" + v["TypeInstance"] if v["Severity"] == "OKAY": data.pop(k, None) else: data[k] = v; with open(COLLECTD_FILE, 'wb') as f: f.write(pickle.dumps(data)) lock.release()
class ContentCache(object): def __init__(self, cache_dir=None): self.cache_dir = None self._db_path = None self._lockfile = None if not get_setting("enable_cache"): return self.cache_dir = cache_dir or join(util.get_home_dir(), ".cache") self._db_path = join(self.cache_dir, "db.data") def __enter__(self): if not self._db_path or not isfile(self._db_path): return self self.delete() return self def __exit__(self, type_, value, traceback): pass def _lock_dbindex(self): if not self.cache_dir: os.makedirs(self.cache_dir) self._lockfile = LockFile(self.cache_dir) if self._lockfile.is_locked() and \ (time() - getmtime(self._lockfile.lock_file)) > 10: self._lockfile.break_lock() try: self._lockfile.acquire() except LockFailed: return False return True def _unlock_dbindex(self): if self._lockfile: self._lockfile.release() return True def get_cache_path(self, key): assert len(key) > 3 return join(self.cache_dir, key[-2:], key) @staticmethod def key_from_args(*args): h = hashlib.md5() for data in args: h.update(str(data)) return h.hexdigest() def get(self, key): cache_path = self.get_cache_path(key) if not isfile(cache_path): return None with open(cache_path, "rb") as fp: data = fp.read() if data and data[0] in ("{", "["): return json.loads(data) return data def set(self, key, data, valid): cache_path = self.get_cache_path(key) if isfile(cache_path): self.delete(key) if not data: return if not isdir(self.cache_dir): os.makedirs(self.cache_dir) tdmap = {"s": 1, "m": 60, "h": 3600, "d": 86400} assert valid.endswith(tuple(tdmap.keys())) expire_time = int(time() + tdmap[valid[-1]] * int(valid[:-1])) if not self._lock_dbindex(): return False if not isdir(dirname(cache_path)): os.makedirs(dirname(cache_path)) with open(cache_path, "wb") as fp: if isinstance(data, (dict, list)): json.dump(data, fp) else: fp.write(str(data)) with open(self._db_path, "a") as fp: fp.write("%s=%s\n" % (str(expire_time), cache_path)) return self._unlock_dbindex() def delete(self, keys=None): """ Keys=None, delete expired items """ if not keys: keys = [] if not isinstance(keys, list): keys = [keys] paths_for_delete = [self.get_cache_path(k) for k in keys] found = False newlines = [] with open(self._db_path) as fp: for line in fp.readlines(): if "=" not in line: continue line = line.strip() expire, path = line.split("=") if time() < int(expire) and isfile(path) and \ path not in paths_for_delete: newlines.append(line) continue found = True if isfile(path): try: remove(path) if not listdir(dirname(path)): util.rmtree_(dirname(path)) except OSError: pass if found and self._lock_dbindex(): with open(self._db_path, "w") as fp: fp.write("\n".join(newlines) + "\n") self._unlock_dbindex() return True def clean(self): if not self.cache_dir or not isdir(self.cache_dir): return util.rmtree_(self.cache_dir)
def mirror_main(): """Entry point.""" signal.signal(signal.SIGINT, lock_ctrl_c_handler) parser = argparse.ArgumentParser() parser.add_argument('-H', '--host', required=True) parser.add_argument('-P', '--port', type=int, default=22) parser.add_argument('-c', '--netrc-path', default=expanduser('~/.netrc')) parser.add_argument('-r', '--resume', action='store_true', help='Resume incomplete files (experimental)') parser.add_argument('-T', '--move-to', required=True) parser.add_argument('-L', '--label', default='Seeding') parser.add_argument('-d', '--debug', action='store_true') parser.add_argument('-v', '--verbose', action='store_true') parser.add_argument('-s', '--syslog', action='store_true') parser.add_argument('--no-preserve-permissions', action='store_false') parser.add_argument('--no-preserve-times', action='store_false') parser.add_argument('--max-retries', type=int, default=10) parser.add_argument('remote_dir', metavar='REMOTEDIR', nargs=1) parser.add_argument('local_dir', metavar='LOCALDIR', nargs=1) args = parser.parse_args() log = get_logger('xirvik', verbose=args.verbose, debug=args.debug, syslog=args.syslog) if args.debug: logs_to_follow = ( 'requests', ) for name in logs_to_follow: _log = logging.getLogger(name) formatter = logging.Formatter('%(asctime)s - %(name)s - ' '%(levelname)s - %(message)s') channel = logging.StreamHandler(sys.stderr) _log.setLevel(logging.DEBUG) channel.setLevel(logging.DEBUG) channel.setFormatter(formatter) _log.addHandler(channel) local_dir = realpath(args.local_dir[0]) user, _, password = netrc(args.netrc_path).authenticators(args.host) sftp_host = 'sftp://{user:s}@{host:s}'.format( user=user, host=args.host, ) lf_hash = hashlib.sha256(json.dumps( args._get_kwargs()).encode('utf-8')).hexdigest() lf_path = path_join(gettempdir(), 'xirvik-mirror-{}'.format(lf_hash)) log.debug('Acquiring lock at {}.lock'.format(lf_path)) _lock = LockFile(lf_path) if _lock.is_locked(): psax = [x for x in sp.check_output(['ps', 'ax']).decode('utf-8').split('\n') if sys.argv[0] in x] if len(psax) == 1: log.info('Breaking lock') _lock.break_lock() _lock.acquire() log.info('Lock acquired') log.debug('Local directory to sync to: {}'.format(local_dir)) log.debug('Read user and password from netrc file') log.debug('SFTP URI: {}'.format(sftp_host)) client = ruTorrentClient(args.host, user, password, max_retries=args.max_retries) assumed_path_prefix = '/torrents/{}'.format(user) look_for = '{}/{}/'.format(assumed_path_prefix, args.remote_dir[0]) move_to = '{}/{}'.format(assumed_path_prefix, args.move_to) names = {} log.debug('Full completed directory path name: {}'.format(look_for)) log.debug('Moving finished torrents to: {}'.format(move_to)) log.info('Getting current torrent information (ruTorrent)') try: torrents = client.list_torrents() except requests.exceptions.ConnectionError as e: # Assume no Internet connection at this point log.error('Failed to connect: {}'.format(e)) try: _lock.release() except NotLocked: pass cleanup_and_exit(1) for hash, v in torrents.items(): if not v[TORRENT_PATH_INDEX].startswith(look_for): continue bn = basename(v[TORRENT_PATH_INDEX]) names[bn] = (hash, v[TORRENT_PATH_INDEX],) log.info('Completed torrent "{}" found with hash {}'.format(bn, hash,)) sftp_client_args = dict( hostname=args.host, username=user, password=password, port=args.port, ) try: with SFTPClient(**sftp_client_args) as sftp_client: log.info('Verifying contents of {} with previous ' 'response'.format(look_for)) sftp_client.chdir(args.remote_dir[0]) for item in sftp_client.listdir_iter(read_aheads=10): if item.filename not in names: log.error('File or directory "{}" not found in previous ' 'response body'.format(item.filename)) continue log.debug('Found matching torrent "{}" from ls output'.format( item.filename)) if not len(names.items()): log.info('Nothing found to mirror') _lock.release() cleanup_and_exit() mirror(sftp_client, client, destroot=local_dir, keep_modes=not args.no_preserve_permissions, keep_times=not args.no_preserve_times) except Exception as e: if args.debug: _lock.release() cleanup() raise e else: log.error(str(e)) _lock.release() cleanup_and_exit() _all = names.items() exit_status = 0 bad = [] for bn, (hash, fullpath) in _all: # There is a warning that can get raised here by urllib3 if # Content-Disposition header's filename field has any # non-ASCII characters. It is ignorable as the content still gets # downloaded correctly log.info('Verifying "{}"'.format(bn)) r, _ = client.get_torrent(hash) try: verify_torrent_contents(r.content, local_dir) except VerificationError as e: log.error('Could not verify "{}" contents against piece hashes ' 'in torrent file'.format(bn)) exit_status = 1 bad.append(hash) # Move to _seeding directory and set label # Unfortunately, there is no method, via the API, to do this one HTTP # request for bn, (hash, fullpath) in _all: if hash in bad: continue log.info('Moving "{}" to "{}" directory'.format(bn, move_to)) try: client.move_torrent(hash, move_to) except UnexpectedruTorrentError as e: log.error(str(e)) log.info('Setting label to "{}" for downloaded items'.format(args.label)) client.set_label_to_hashes(hashes=[hash for bn, (hash, fullpath) in names.items() if hash not in bad], label=args.label) if exit_status != 0: log.error('Could not verify torrent checksums') _lock.release() cleanup_and_exit(exit_status)
class EqueueServer(SocketServer.ThreadingUnixStreamServer): daemon_threads = True def __init__(self, *args, **kw): self.options = kw.pop('equeue_options') SocketServer.ThreadingUnixStreamServer.__init__(self, RequestHandlerClass=None, *args, **kw) # Equeue Specific elements self.setLogger(self.options.logfile[0], self.options.loglevel[0]) self.setDB(self.options.database[0]) if getattr(self.options, 'takeover_triggered_file_path', None): self.takeover_triggered_file_path = self.options.takeover_triggered_file_path[0] # Lock to only have one command running at the time self.thread_lock = threading.Lock() # Lockfile is used by other commands to know if an import is ongoing. self.lockfile = LockFile(self.options.lockfile) self.lockfile.break_lock() def setLogger(self, logfile, loglevel): self.logger = logging.getLogger("EQueue") handler = logging.handlers.WatchedFileHandler(logfile, mode='a') # Natively support logrotate level = logging._levelNames.get(loglevel, logging.INFO) self.logger.setLevel(level) formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") handler.setFormatter(formatter) self.logger.addHandler(handler) def setDB(self, database): self.db = gdbm.open(database, 'cs', 0700) def _hasTakeoverBeenTriggered(self): if hasattr(self, 'takeover_triggered_file_path') and \ os.path.exists(self.takeover_triggered_file_path): return True return False def _runCommandIfNeeded(self, command, timestamp): with self.thread_lock as thread_lock, self.lockfile as lockfile: if self._hasTakeoverBeenTriggered(): self.logger.info('Takeover has been triggered, preventing to run import script.') return cmd_list = command.split('\0') cmd_readable = ' '.join(cmd_list) cmd_executable = cmd_list[0] if cmd_executable in self.db and timestamp <= int(self.db[cmd_executable]): self.logger.info("%s already run.", cmd_readable) return self.logger.info("Running %s, %s with output:", cmd_readable, timestamp) try: sys.stdout.flush() p = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE) subprocess_capture(p, self.logger.info, '', True) if p.returncode == 0: self.logger.info("%s finished successfully.", cmd_readable) self.db[cmd_executable] = str(timestamp) else: self.logger.warning("%s exited with status %s." % (cmd_readable, p.returncode)) except subprocess.CalledProcessError as e: self.logger.warning("%s exited with status %s. output is: \n %s" % ( cmd_readable, e.returncode, e.output, )) def process_request_thread(self, request, client_address): # Handle request self.logger.debug("Connection with file descriptor %d", request.fileno()) request.settimeout(self.options.timeout) request_string = StringIO.StringIO() segment = None try: while segment != '': segment = request.recv(1024) request_string.write(segment) except socket.timeout: pass command = '127' try: request_parameters = json.loads(request_string.getvalue()) timestamp = request_parameters['timestamp'] command = str(request_parameters['command']) self.logger.info("New command %r at %s", command, timestamp) except (ValueError, IndexError) : self.logger.warning("Error during the unserialization of json " "message of %r file descriptor. The message " "was %r", request.fileno(), request_string.getvalue()) try: request.send(command) except: self.logger.warning("Couldn't respond to %r", request.fileno()) self.close_request(request) self._runCommandIfNeeded(command, timestamp)
def Concatenate_Logs(): # Temperature Sensor Logs if not filecmp.cmp(sensor_t_log_file_tmp, sensor_t_log_file): logging.debug("[Log Backup] Concatenating T sensor logs to %s", sensor_t_log_file) lock = LockFile(sensor_t_log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Log Backup] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Log Backup] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.debug("[Log Backup] Gained lock: %s", lock.path) try: with open(sensor_t_log_file, 'a') as fout: for line in fileinput.input(sensor_t_log_file_tmp): fout.write(line) logging.debug("[Log Backup] Appended T data to %s", sensor_t_log_file) except: logging.warning("[Log Backup] Unable to append data to %s", sensor_t_log_file) open(sensor_t_log_file_tmp, 'w').close() logging.debug("[Log Backup] Removing lock: %s", lock.path) lock.release() else: logging.debug("[Log Backup] T Sensor logs the same, skipping.") # Humidity/Temperature Sensor Logs if not filecmp.cmp(sensor_ht_log_file_tmp, sensor_ht_log_file): logging.debug("[Log Backup] Concatenating HT sensor logs to %s", sensor_ht_log_file) lock = LockFile(sensor_ht_log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Log Backup] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Log Backup] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.debug("[Log Backup] Gained lock: %s", lock.path) try: with open(sensor_ht_log_file, 'a') as fout: for line in fileinput.input(sensor_ht_log_file_tmp): fout.write(line) logging.debug("[Log Backup] Appended HT data to %s", sensor_ht_log_file) except: logging.warning("[Log Backup] Unable to append data to %s", sensor_ht_log_file) open(sensor_ht_log_file_tmp, 'w').close() logging.debug("[Log Backup] Removing lock: %s", lock.path) lock.release() else: logging.debug("[Log Backup] HT Sensor logs the same, skipping.") # CO2 Sensor Logs if not filecmp.cmp(sensor_co2_log_file_tmp, sensor_co2_log_file): logging.debug("[Log Backup] Concatenating CO2 sensor logs to %s", sensor_co2_log_file) lock = LockFile(sensor_co2_log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Log Backup] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Log Backup] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.debug("[Log Backup] Gained lock: %s", lock.path) try: with open(sensor_co2_log_file, 'a') as fout: for line in fileinput.input(sensor_co2_log_file_tmp): fout.write(line) logging.debug("[Log Backup] Appended CO2 data to %s", sensor_co2_log_file) except: logging.warning("[Log Backup] Unable to append data to %s", sensor_co2_log_file) open(sensor_co2_log_file_tmp, 'w').close() logging.debug("[Log Backup] Removing lock: %s", lock.path) lock.release() else: logging.debug("[Log Backup] CO2 Sensor logs the same, skipping.") # Pressure Sensor Logs if not filecmp.cmp(sensor_press_log_file_tmp, sensor_press_log_file): logging.debug("[Log Backup] Concatenating Press sensor logs to %s", sensor_press_log_file) lock = LockFile(sensor_press_log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Log Backup] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Log Backup] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.debug("[Log Backup] Gained lock: %s", lock.path) try: with open(sensor_press_log_file, 'a') as fout: for line in fileinput.input(sensor_press_log_file_tmp): fout.write(line) logging.debug("[Log Backup] Appended Press data to %s", sensor_press_log_file) except: logging.warning("[Log Backup] Unable to append data to %s", sensor_press_log_file) open(sensor_press_log_file_tmp, 'w').close() logging.debug("[Log Backup] Removing lock: %s", lock.path) lock.release() else: logging.debug("[Log Backup] Press Sensor logs the same, skipping.") # Relay Logs if not filecmp.cmp(relay_log_file_tmp, relay_log_file): logging.debug("[Log Backup] Concatenating relay logs to %s", relay_log_file) lock = LockFile(relay_log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Log Backup] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Log Backup] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.debug("[Log Backup] Gained lock: %s", lock.path) try: with open(relay_log_file, 'a') as fout: for line in fileinput.input(relay_log_file_tmp): fout.write(line) logging.debug("[Log Backup] Appended data to %s", relay_log_file) except: logging.warning("[Log Backup] Unable to append data to %s", relay_log_file) open(relay_log_file_tmp, 'w').close() logging.debug("[Log Backup] Removing lock: %s", lock.path) lock.release() else: logging.debug("[Log Backup] Relay logs the same, skipping.") # Daemon Logs if not filecmp.cmp(daemon_log_file_tmp, daemon_log_file): logging.debug("[Log Backup] Concatenating daemon logs to %s", daemon_log_file) lock = LockFile(daemon_log_lock_path) while not lock.i_am_locking(): try: logging.debug("[Log Backup] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Log Backup] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.debug("[Log Backup] Gained lock: %s", lock.path) try: with open(daemon_log_file, 'a') as fout: for line in fileinput.input(daemon_log_file_tmp): fout.write(line) logging.debug("[Log Backup] Appended daemon log data to %s", daemon_log_file) except: logging.warning("[Log Backup] Unable to append data to %s", daemon_log_file) open(daemon_log_file_tmp, 'w').close() logging.debug("[Log Backup] Removing lock: %s", lock.path) lock.release() else: logging.debug("[Log Backup] Daemon logs the same, skipping.")
def Concatenate_Logs(): logging.info("[Timer Expiration] Run every 6 hours: Concatenate logs") if not filecmp.cmp(daemon_log_file_tmp, daemon_log_file): logging.info("[Daemon Log] Concatenating daemon logs to %s", daemon_log_file) lock = LockFile(daemon_lock_path) while not lock.i_am_locking(): try: logging.info("[Daemon Log] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Daemon Log] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.info("[Daemon Log] Gained lock: %s", lock.path) try: with open(daemon_log_file, 'a') as fout: for line in fileinput.input(daemon_log_file_tmp): fout.write(line) logging.info("[Daemon Log] Appended data to %s", daemon_log_file) except: logging.warning("[Daemon Log] Unable to append data to %s", daemon_log_file) open(daemon_log_file_tmp, 'w').close() logging.info("[Daemon Log] Removing lock: %s", lock.path) lock.release() else: logging.info("[Daemon Log] Daemon logs the same, skipping.") if not filecmp.cmp(sensor_log_file_tmp, sensor_log_file): logging.info("[Sensor Log] Concatenating sensor logs to %s", sensor_log_file) lock = LockFile(sensor_lock_path) while not lock.i_am_locking(): try: logging.info("[Sensor Log] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Sensor Log] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.info("[Sensor Log] Gained lock: %s", lock.path) try: with open(sensor_log_file, 'a') as fout: for line in fileinput.input(sensor_log_file_tmp): fout.write(line) logging.info("[Daemon Log] Appended data to %s", sensor_log_file) except: logging.warning("[Sensor Log] Unable to append data to %s", sensor_log_file) open(sensor_log_file_tmp, 'w').close() logging.info("[Sensor Log] Removing lock: %s", lock.path) lock.release() else: logging.info("[Sensor Log] Sensor logs the same, skipping.") if not filecmp.cmp(relay_log_file_tmp, relay_log_file): logging.info("[Relay Log] Concatenating relay logs to %s", relay_log_file) lock = LockFile(relay_lock_path) while not lock.i_am_locking(): try: logging.info("[Relay Log] Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Relay Log] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.info("[Relay Log] Gained lock: %s", lock.path) try: with open(relay_log_file, 'a') as fout: for line in fileinput.input(relay_log_file_tmp): fout.write(line) logging.info("[Daemon Log] Appended data to %s", relay_log_file) except: logging.warning("[Relay Log] Unable to append data to %s", relay_log_file) open(relay_log_file_tmp, 'w').close() logging.info("[Relay Log] Removing lock: %s", lock.path) lock.release() else: logging.info("[Relay Log] Relay logs the same, skipping.")
def write_config(): config = ConfigParser.RawConfigParser() if not os.path.exists(lock_directory): os.makedirs(lock_directory) lock = LockFile(config_lock_path) while not lock.i_am_locking(): try: logging.info("[Write Config] Waiting, Acquiring Lock: %s", lock.path) lock.acquire(timeout=60) # wait up to 60 seconds except: logging.warning("[Write Config] Breaking Lock to Acquire: %s", lock.path) lock.break_lock() lock.acquire() logging.info("[Write Config] Gained lock: %s", lock.path) logging.info("[Write Config] Writing config file %s", config_file) config.add_section('Sensor') config.set('Sensor', 'dhtsensor', DHTSensor) config.set('Sensor', 'dhtpin', DHTPin) config.set('Sensor', 'dhtseconds', DHTSeconds) config.add_section('RelayNames') config.set('RelayNames', 'relay1name', relayName[1]) config.set('RelayNames', 'relay2name', relayName[2]) config.set('RelayNames', 'relay3name', relayName[3]) config.set('RelayNames', 'relay4name', relayName[4]) config.set('RelayNames', 'relay5name', relayName[5]) config.set('RelayNames', 'relay6name', relayName[6]) config.set('RelayNames', 'relay7name', relayName[7]) config.set('RelayNames', 'relay8name', relayName[8]) config.add_section('RelayPins') config.set('RelayPins', 'relay1pin', relayPin[1]) config.set('RelayPins', 'relay2pin', relayPin[2]) config.set('RelayPins', 'relay3pin', relayPin[3]) config.set('RelayPins', 'relay4pin', relayPin[4]) config.set('RelayPins', 'relay5pin', relayPin[5]) config.set('RelayPins', 'relay6pin', relayPin[6]) config.set('RelayPins', 'relay7pin', relayPin[7]) config.set('RelayPins', 'relay8pin', relayPin[8]) config.add_section('RelayTriggers') config.set('RelayTriggers', 'relay1trigger', relayTrigger[1]) config.set('RelayTriggers', 'relay2trigger', relayTrigger[2]) config.set('RelayTriggers', 'relay3trigger', relayTrigger[3]) config.set('RelayTriggers', 'relay4trigger', relayTrigger[4]) config.set('RelayTriggers', 'relay5trigger', relayTrigger[5]) config.set('RelayTriggers', 'relay6trigger', relayTrigger[6]) config.set('RelayTriggers', 'relay7trigger', relayTrigger[7]) config.set('RelayTriggers', 'relay8trigger', relayTrigger[8]) config.add_section('PID') config.set('PID', 'relaytemp', relayTemp) config.set('PID', 'relayhum', relayHum) config.set('PID', 'tempor', TempOR) config.set('PID', 'humor', HumOR) config.set('PID', 'settemp', setTemp) config.set('PID', 'sethum', setHum) config.set('PID', 'hum_p', Hum_P) config.set('PID', 'hum_i', Hum_I) config.set('PID', 'hum_d', Hum_D) config.set('PID', 'temp_p', Temp_P) config.set('PID', 'temp_i', Temp_I) config.set('PID', 'temp_d', Temp_D) config.set('PID', 'factorhumseconds', factorHumSeconds) config.set('PID', 'factortempseconds', factorTempSeconds) config.add_section('Misc') config.set('Misc', 'numtimers', numTimers) config.set('Misc', 'cameralight', cameraLight) config.add_section('TimerState') config.set('TimerState', 'timer1state', timerState[1]) config.set('TimerState', 'timer2state', timerState[2]) config.set('TimerState', 'timer3state', timerState[3]) config.set('TimerState', 'timer4state', timerState[4]) config.set('TimerState', 'timer5state', timerState[5]) config.set('TimerState', 'timer6state', timerState[6]) config.set('TimerState', 'timer7state', timerState[7]) config.set('TimerState', 'timer8state', timerState[8]) config.add_section('TimerRelay') config.set('TimerRelay', 'timer1relay', timerRelay[1]) config.set('TimerRelay', 'timer2relay', timerRelay[2]) config.set('TimerRelay', 'timer3relay', timerRelay[3]) config.set('TimerRelay', 'timer4relay', timerRelay[4]) config.set('TimerRelay', 'timer5relay', timerRelay[5]) config.set('TimerRelay', 'timer6relay', timerRelay[6]) config.set('TimerRelay', 'timer7relay', timerRelay[7]) config.set('TimerRelay', 'timer8relay', timerRelay[8]) config.add_section('TimerDurationOn') config.set('TimerDurationOn', 'timer1durationon', timerDurationOn[1]) config.set('TimerDurationOn', 'timer2durationon', timerDurationOn[2]) config.set('TimerDurationOn', 'timer3durationon', timerDurationOn[3]) config.set('TimerDurationOn', 'timer4durationon', timerDurationOn[4]) config.set('TimerDurationOn', 'timer5durationon', timerDurationOn[5]) config.set('TimerDurationOn', 'timer6durationon', timerDurationOn[6]) config.set('TimerDurationOn', 'timer7durationon', timerDurationOn[7]) config.set('TimerDurationOn', 'timer8durationon', timerDurationOn[8]) config.add_section('TimerDurationOff') config.set('TimerDurationOff', 'timer1durationoff', timerDurationOff[1]) config.set('TimerDurationOff', 'timer2durationoff', timerDurationOff[2]) config.set('TimerDurationOff', 'timer3durationoff', timerDurationOff[3]) config.set('TimerDurationOff', 'timer4durationoff', timerDurationOff[4]) config.set('TimerDurationOff', 'timer5durationoff', timerDurationOff[5]) config.set('TimerDurationOff', 'timer6durationoff', timerDurationOff[6]) config.set('TimerDurationOff', 'timer7durationoff', timerDurationOff[7]) config.set('TimerDurationOff', 'timer8durationoff', timerDurationOff[8]) config.add_section('Notification') config.set('Notification', 'smtp_host', smtp_host) config.set('Notification', 'smtp_port', smtp_port) config.set('Notification', 'smtp_user', smtp_user) config.set('Notification', 'smtp_pass', smtp_pass) config.set('Notification', 'email_from', email_from) config.set('Notification', 'email_to', email_to) try: with open(config_file, 'wb') as configfile: config.write(configfile) except: logging.warning("[Write Config] Unable to write config: %s", config_lock_path) logging.info("[Write Config] Removing lock: %s", lock.path) lock.release()
def get(self): """Upon request from a manager, picks the first task available and returns it in JSON format. """ # Validate request args = task_generator_parser.parse_args() task_nolocked = None task = None tasks = {} percentage_done = 0 manager_uuid = args['uuid'] job_types = args['job_types'] worker = args['worker'] if manager_uuid: manager = Manager.query.filter_by(uuid=manager_uuid).one() else: ip_address = request.remote_addr manager = Manager.query.filter_by(ip_address=ip_address).first() if not manager: print('Could not find manager, returning 404') return '', 404 # Get active Jobs if job_types: job_types_list = job_types.split(',') job_type_clauses = or_(*[Job.type == j for j in job_types_list]) active_jobs = Job.query \ .filter(job_type_clauses) \ .filter(or_( Job.status == 'waiting', Job.status == 'active')) \ .order_by(Job.priority.desc(), Job.id.asc()) \ .all() else: active_jobs = Job.query \ .filter(or_( Job.status == 'waiting', Job.status == 'active')) \ .order_by(Job.priority.desc(), Job.id.asc()) \ .all() lock = LockFile(os.path.join(app.config['TMP_FOLDER'], 'server.lock')) while not lock.i_am_locking(): try: lock.acquire(timeout=60) # wait up to 60 seconds except LockTimeout: lock.break_lock() lock.acquire() for job in active_jobs: tasks = Task.query.filter( Task.job_id == job.id, or_(Task.status == 'waiting', Task.status == 'canceled'), Task.manager_id == manager.id). \ order_by(Task.priority.desc(), Task.id.desc()) task = None incomplete_parents = False for t in tasks: # All the parents are completed? for tt in tasks: if tt.child_id == t.id and tt.status != 'completed': incomplete_parents = True break # If False skip this task if incomplete_parents: continue task = t break # Task found? Then break if task: break if not task: # Unlocking Task table on ROLLBACK db.session.rollback() lock.release() print('Unlocking Task table on ROLLBACK, returning 404') return '', 404 task.status = 'processing' job.status = 'active' if worker: task.worker = worker task.last_activity = datetime.now() db.session.commit() lock.release() # job = Job.query.get(task.job_id) frame_count = 1 current_frame = 0 percentage_done = Decimal(current_frame) / Decimal(frame_count) * Decimal(100) percentage_done = round(percentage_done, 1) task = { "id": task.id, "job_id": task.job_id, "name": task.name, "status": task.status, "type": task.type, "settings": json.loads(task.settings), "log": None, "activity": task.activity, "manager_id": task.manager_id, "priority": task.priority, "child_id": task.child_id, "parser": task.parser, "time_cost": task.time_cost, "project_id": job.project_id, "current_frame": 0, "percentage_done": percentage_done} return jsonify(**task)
class MHZ16Sensor(AbstractSensor): """ A sensor support class that monitors the MH-Z16's CO2 concentration """ def __init__(self, interface, device_loc=None, baud_rate=None, i2c_address=None, i2c_bus=None): super(MHZ16Sensor, self).__init__() self.k30_lock_file = None self._co2 = 0 self.interface = interface if self.interface == 'UART': self.logger = logging.getLogger( "mycodo.sensors.mhz16.{dev}".format( dev=device_loc.replace('/', ''))) # Check if device is valid self.serial_device = is_device(device_loc) if self.serial_device: try: self.k30_lock_file = "/var/lock/sen-mhz16-{}".format( device_loc.replace('/', '')) self.lock = LockFile(self.k30_lock_file) self.ser = serial.Serial(self.serial_device, baudrate=baud_rate, timeout=1) except serial.SerialException: self.logger.exception('Opening serial') else: self.logger.error( 'Could not open "{dev}". ' 'Check the device location is correct.'.format( dev=device_loc)) elif self.interface == 'I2C': self.logger = logging.getLogger( "mycodo.sensors.mhz16.{dev}".format(dev=i2c_address)) self.cmd_measure = [ 0xFF, 0x01, 0x9C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63 ] self.IOCONTROL = 0X0E << 3 self.FCR = 0X02 << 3 self.LCR = 0X03 << 3 self.DLL = 0x00 << 3 self.DLH = 0X01 << 3 self.THR = 0X00 << 3 self.RHR = 0x00 << 3 self.TXLVL = 0X08 << 3 self.RXLVL = 0X09 << 3 self.i2c_address = i2c_address self.i2c = smbus.SMBus(i2c_bus) self.begin() def __repr__(self): """ Representation of object """ return "<{cls}(co2={co2})>".format(cls=type(self).__name__, co2="{0:.2f}".format(self._co2)) def __str__(self): """ Return CO2 information """ return "CO2: {co2}".format(co2="{0:.2f}".format(self._co2)) def __iter__(self): # must return an iterator """ MH-Z16 iterates through live CO2 readings """ return self def next(self): """ Get next CO2 reading """ if self.read(): # raised an error raise StopIteration # required return dict(co2=float('{0:.2f}'.format(self._co2))) def info(self): conditions_measured = [("CO2", "co2", "float", "0.00", self._co2, self.co2)] return conditions_measured @property def co2(self): """ CO2 concentration in ppmv """ if not self._co2: # update if needed self.read() return self._co2 def get_measurement(self): """ Gets the MH-Z16's CO2 concentration in ppmv via UART""" self._co2 = None if self.interface == 'UART': self.ser.flushInput() time.sleep(1) self.ser.write("\xff\x01\x86\x00\x00\x00\x00\x00\x79") time.sleep(.01) resp = self.ser.read(9) if len(resp) != 0: high_level = struct.unpack('B', resp[2])[0] low_level = struct.unpack('B', resp[3])[0] co2 = high_level * 256 + low_level return co2 elif self.interface == 'I2C': self.write_register(self.FCR, 0x07) self.send(self.cmd_measure) try: co2 = self.parse(self.receive()) except Exception: co2 = None return co2 return None def read(self): """ Takes a reading from the MH-Z16 and updates the self._co2 value :returns: None on success or 1 on error """ try: if self.interface == 'UART': if not self.serial_device: # Don't measure if device isn't validated return None # Acquire lock on MHZ16 to ensure more than one read isn't # being attempted at once on the same interface while not self.lock.i_am_locking(): try: # wait 60 seconds before breaking lock self.lock.acquire(timeout=60) except Exception as e: self.logger.error( "{cls} 60 second timeout, {lock} lock broken: " "{err}".format(cls=type(self).__name__, lock=self.k30_lock_file, err=e)) self.lock.break_lock() self.lock.acquire() self._co2 = self.get_measurement() self.lock.release() elif self.interface == 'I2C': self._co2 = self.get_measurement() if self._co2 is None: return 1 return # success - no errors except Exception as e: self.logger.error( "{cls} raised an exception when taking a reading: " "{err}".format(cls=type(self).__name__, err=e)) if self.interface == 'UART': self.lock.release() return 1 def begin(self): try: self.write_register(self.IOCONTROL, 0x08) except IOError: pass self.write_register(self.FCR, 0x07) self.write_register(self.LCR, 0x83) self.write_register(self.DLL, 0x60) self.write_register(self.DLH, 0x00) self.write_register(self.LCR, 0x03) @staticmethod def parse(response): checksum = 0 if len(response) < 9: return None for i in range(0, 9): checksum += response[i] if response[0] == 0xFF: if response[1] == 0x9C: if checksum % 256 == 0xFF: return (response[2] << 24) + (response[3] << 16) + ( response[4] << 8) + response[5] return None def read_register(self, reg_addr): time.sleep(0.01) return self.i2c.read_byte_data(self.i2c_address, reg_addr) def write_register(self, reg_addr, val): time.sleep(0.01) self.i2c.write_byte_data(self.i2c_address, reg_addr, val) def send(self, command): if self.read_register(self.TXLVL) >= len(command): self.i2c.write_i2c_block_data(self.i2c_address, self.THR, command) def receive(self): n = 9 buf = [] start = time.clock() while n > 0: rx_level = self.read_register(self.RXLVL) if rx_level > n: rx_level = n buf.extend( self.i2c.read_i2c_block_data(self.i2c_address, self.RHR, rx_level)) n = n - rx_level if time.clock() - start > 0.2: break return buf
# lock.release() lock.acquire() while lock.i_am_locking(): try: print(lock.is_locked()) # wait up to 60 seconds print(lock.i_am_locking()) fb = open(filename, 'r') da = fb.readlines() for d in da[:1000000]: print(d) fb.close() lock.release() except LockTimeout: print("enterd into exception") lock.break_lock() lock.acquire() print(lock.is_locked()) print(lock.i_am_locking()) print("I locked", lock.path) if lock.is_locked(): lock.release() # while not lock.i_am_locking(): # try: # lock.acquire(timeout=5) # print(lock.is_locked()) # wait up to 60 seconds # print(lock.i_am_locking()) # with open(filename, 'a') as fb: