class TestDigitalSource(SignalSource): """ Random test digital input signal source. """ def __init__(self, identifier: str, interval: float, text_0: str = 'off', text_1: str = 'on', **kwargs): super().__init__(identifier, **kwargs) self.text_0 = text_0 self.text_1 = text_1 self._timer = RepeatTimer(interval, self._send_value) def _send_value(self) -> None: self._send(random.choice([0, 1]), self.STATUS_OK) def start(self) -> None: super().start() self._timer.start() def stop(self) -> None: super().stop() self._timer.cancel() def format(self, value: float) -> str: return self.text_1 if value != 0 else self.text_0
def start(self): """ Runs once the agent core is initialized. @return: Nothing """ # 43200 seconds == 12 hours self._timer = RepeatTimer(43200, self.run_refresh_apps_operation) self._timer.start()
def __init__(self, ipAddress, port, cert_server_name, status_time = 30): self._server_address = ipAddress self._server_name = cert_server_name self._port = port self._timer = RepeatTimer(status_time, self._send_status_message) self._timer.start()
def start(self): if self._timer is None: logger.info('Starting to record history every ' + str(self.delta_seconds) + 's for ' + str(self.max_seconds) + 's') self._begin_new_csv_file() self._timer = RepeatTimer(self.delta_seconds, self.record) self._timer.start()
class PowerMeterApatorEC3Repeating: min_averaging_secs: float _power_meter: PowerMeterApatorEC3 _timer: RepeatTimer reading: Optional[PowerMeterReading] reading_ts: Optional[float] success: bool high: SingleCounter low: SingleCounter callbacks: List[Callable[[Optional[PowerMeterReading]], None]] def __init__(self, power_meter: PowerMeterApatorEC3, interval: float, min_averaging_secs: float): self.min_averaging_secs = min_averaging_secs self._power_meter = power_meter self._timer = RepeatTimer(interval, self._acquire) self.reading = None self.reading_ts = None self.success = False self.high = SingleCounter() self.low = SingleCounter() self.callbacks = [] def add_callback(self, callback: Callable[[Optional[PowerMeterReading]], None]): self.callbacks.append(callback) def start(self): if not self._timer.is_alive(): self._timer.start() def stop(self): self._timer.cancel() self._power_meter.close() def _acquire(self): try: ts = time.time() self.reading = self._power_meter.read() self.reading_ts = ts self._update_high_power() self._update_low_power() self.success = True except SerialException: self.success = False self._fire() def _update_low_power(self): self.low.update(self.reading.consumption_low_sum_kwh, self.reading_ts, self.min_averaging_secs, self.high) def _update_high_power(self): self.high.update(self.reading.consumption_high_sum_kwh, self.reading_ts, self.min_averaging_secs, self.low) def _fire(self): for callback in self.callbacks: callback(self.reading)
def __init__(self, power_meter: PowerMeterApatorEC3, interval: float, min_averaging_secs: float): self.min_averaging_secs = min_averaging_secs self._power_meter = power_meter self._timer = RepeatTimer(interval, self._acquire) self.reading = None self.reading_ts = None self.success = False self.high = SingleCounter() self.low = SingleCounter() self.callbacks = []
def __init__(self, identifier: str, interval: float, text_0: str = 'off', text_1: str = 'on', **kwargs): super().__init__(identifier, **kwargs) self.text_0 = text_0 self.text_1 = text_1 self._timer = RepeatTimer(interval, self._send_value)
class NetworkSender(): def __init__(self, ipAddress, port, cert_server_name, status_time = 30): self._server_address = ipAddress self._server_name = cert_server_name self._port = port self._timer = RepeatTimer(status_time, self._send_status_message) self._timer.start() #self._send_status_message() def _send_status_message(self): root = {} root['operation'] = 'status' root['agent_id'] = settings.AgentId self.send_results(json.dumps(root)) def send_results(self, data): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # require a certificate from the server ssl_sock = ssl.wrap_socket(s, ca_certs=certificate.ServerCert, cert_reqs=ssl.CERT_OPTIONAL, ssl_version=ssl.PROTOCOL_SSLv3) ssl_sock.settimeout(5) try: ssl_sock.connect((self._server_address, self._port)) ssl_sock.sendall(data) ssl_sock.shutdown(socket.SHUT_WR) except Exception as e: logger.error("NetworkSender.send_results: Couldn't send data.") logger.exception("Exception error(%s): %s" % (e.errno, e.strerror)) finally: # note that closing the SSLSocket will also close the underlying socket ssl_sock.close() @staticmethod def send_nonssl_message(message, ip_address, port): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((ip_address, port)) s.sendall(message) s.close()
class DigitalInSource(SignalSource): """ Digital GPIO input signal source. """ pi: pigpio def __init__(self, identifier: str, pigpio_pi: pigpio, gpio_bcm: int, interval: float, text_0: str = 'off', text_1: str = 'on', **kwargs): super().__init__(identifier, **kwargs) self.pi = pigpio_pi self.gpio_bcm = gpio_bcm self.interval = interval self.text_0 = text_0 self.text_1 = text_1 if self.pi.connected: self.pi.set_mode(self.gpio_bcm, pigpio.INPUT) self.pi.set_pull_up_down(self.gpio_bcm, pigpio.PUD_OFF) else: raise PigpioNotConnectedError( 'pigpio.pi is not connected, input for gpio ' + str(gpio_bcm) + ' will not work') self._timer = RepeatTimer(interval, self._read_and_send_value) def start(self) -> None: super().start() self._timer.start() def stop(self) -> None: super().stop() self._timer.cancel() def _read_and_send_value(self) -> None: reading = self.read_once() if reading is not None: self._send(reading, self.STATUS_OK) else: self._send(0, self.STATUS_MISSING) def read_once(self) -> Optional[int]: return self.pi.read(self.gpio_bcm) if self.pi.connected else None def format(self, value: float) -> str: return self.text_1 if value != 0 else self.text_0 def __repr__(self) -> str: return super().__repr__() + ' gpio_bcm=' + str(self.gpio_bcm)
def start(self): """Runs once the agent core is initialized. Returns: - Nothing """ self._timer = RepeatTimer( 300, # 300 seconds == 5 minutes self._create_monit_operation) self._timer.start()
def __init__(self, pigpio_pi, gpio_bcm, interval, text_0='off', text_1='on', **kwargs): super().__init__(**kwargs) self.pi = pigpio_pi self.gpio_bcm = gpio_bcm self.interval = interval self.text_0 = text_0 self.text_1 = text_1 if self.pi.connected: self.pi.set_mode(self.gpio_bcm, pigpio.INPUT) self.pi.set_pull_up_down(self.gpio_bcm, pigpio.PUD_OFF) else: raise PigpioNotConnectedError('pigpio.pi is not connected, input for gpio ' + str(gpio_bcm) + ' will not work') self._timer = RepeatTimer(interval, self._read_and_send_value)
def __init__(self, seconds_to_checkin=60): """ Args: - seconds_to_checkin: Time, in seconds, to check into the server and it defaults to 1 minute. Return: - Nothing """ self._server_url = 'https://{0}/'.format(settings.ServerAddress) self._timer = RepeatTimer(seconds_to_checkin, self._agent_checkin)
def start(self): if not self.__started: if self.broker_host == '': logger.info( "NOT starting MQTT client because of config with empty broker" ) else: logger.info("Starting MQTT client for broker " + self.broker_host) if self.broker_user != '': self.client.username_pw_set(self.broker_user, self.broker_password) if self.use_ssl: self.client.tls_set(ca_certs=self.broker_ca_certs) self.client.connect_async(self.broker_host, self.broker_port) self.client.loop_start() self.__timer = RepeatTimer(self.delta_seconds, self.publish) self.__timer.start() self.__started = True
class TestSource(SignalSource): """ Random test measurement signal source. """ def __init__(self, value, interval, **kwargs): super().__init__(**kwargs) self.value = value self.interval = interval self._timer = RepeatTimer(interval, self._send_value) def _send_value(self): self._send(random.gauss(self.value, 2), self.STATUS_OK) def start(self, *args): super().start(*args) self._timer.start() def stop(self, *args): super().stop(*args) self._timer.cancel()
def apply(self, instance, *args, **kwargs): if instance in self._tasks: return False interval = getattr(instance, 'interval') try: interval = TimeString(interval).to_second() except ValueError as e: m_logger.error(e, exc_info=True) return False timer = RepeatTimer(interval, instance.run, *args, **kwargs) self._tasks[instance] = timer return True
class TestSource(SignalSource): """ Random test measurement signal source. """ def __init__(self, identifier: str, value: float, interval: float, **kwargs): super().__init__(identifier, **kwargs) self.value = value self.interval = interval self._timer = RepeatTimer(interval, self._send_random_value) def _send_random_value(self) -> None: self._send(round(random.gauss(self.value, 2), 3), self.STATUS_OK) def start(self) -> None: super().start() self._timer.start() def stop(self) -> None: super().stop() self._timer.cancel()
def apply(self, instance, args=None, kwargs=None): if instance in self._tasks: return False logger.info('add [{task}] in Monitor task......'.format(task=getattr(instance, 'metric_name'))) interval = getattr(instance, 'forecast_interval') try: interval = transform_time_string(interval, mode='to_second') except ValueError as e: logger.error(e, exc_info=True) return timer = RepeatTimer(interval=interval, function=instance.run, args=args, kwargs=kwargs) self._tasks[instance] = timer return True
class Ds1820Source(SignalSource): """ Temperature measurement signal source from DS18x20 connected to W1 bus GPIO. """ def __init__(self, identifier: str, sensor_id: str, interval: float, **kwargs): super().__init__(identifier, **kwargs) self.sensor_id = sensor_id self._timer = RepeatTimer(interval, self._read_and_send_value) def start(self) -> None: super().start() self._timer.start() def stop(self) -> None: super().stop() self._timer.cancel() def _read_and_send_value(self) -> None: temp = self.read_once() if temp is not None: self._send(round(temp, 3), self.STATUS_OK) def read_once(self) -> Optional[float]: try: with open('/sys/bus/w1/devices/' + self.sensor_id + '/w1_slave', 'r') as file: file.readline() temp_line = file.readline() match = re.search('.*t=(-?[0-9]+)', temp_line) if match is not None: return float(match.group(1)) / 1000. except OSError: logger.warning("Failed to read DS1820 file for " + self.sensor_id) return None def __repr__(self) -> str: return super().__repr__() + ' id=' + self.sensor_id
def start(self): """Runs once the agent core is initialized. Returns: - Nothing """ self._timer = RepeatTimer( 300, # 300 seconds == 5 minutes self._create_monit_operation ) self._timer.start()
class TestDigitalSource(SignalSource): """ Random test digital input signal source. """ def __init__(self, interval, text_0='off', text_1='on', **kwargs): super().__init__(**kwargs) self.text_0 = text_0 self.text_1 = text_1 self._timer = RepeatTimer(interval, self._send_value) def _send_value(self): self._send(random.choice([0, 1]), self.STATUS_OK) def start(self, *args): super().start(*args) self._timer.start() def stop(self, *args): super().stop(*args) self._timer.cancel() def format(self, value): return self.text_1 if value != 0 else self.text_0
def __init__(self, seconds_to_checkin=60): """ Args: - seconds_to_checkin: Time, in seconds, to check into the server and it defaults to 1 minute. Return: - Nothing """ self._server_url = 'https://{0}/'.format( settings.ServerAddress ) self._timer = RepeatTimer(seconds_to_checkin, self._agent_checkin)
def __init__(self, identifier: str, sensor_id: str, interval: float, **kwargs): super().__init__(identifier, **kwargs) self.sensor_id = sensor_id self._timer = RepeatTimer(interval, self._read_and_send_value)
class SignalHistory: MAX_SECONDS_DEFAULT = 24 * 3600 # 1 day DELTA_SECONDS_DEFAULT = 60 # every minute MAX_SECONDS_CSV_FILES = 24 * 3600 * 32 # 32 days def __init__(self): self.max_seconds = SignalHistory.MAX_SECONDS_DEFAULT self.delta_seconds = SignalHistory.DELTA_SECONDS_DEFAULT self.max_seconds_csv_files = SignalHistory.MAX_SECONDS_CSV_FILES self.max_csv_lines = self.max_seconds // self.delta_seconds + 1 self.sources = [] self._values_by_source_id = {} self._timer = None self._data_lock = RLock() self._csv_file_basename = None self._csv_file = None self._csv_writer = None self._csv_lines = 0 def __enter__(self): self._data_lock.acquire() def __exit__(self, exc_type, exc_value, traceback): self._data_lock.release() def add_source(self, signal_source): with self._data_lock: if signal_source in self.sources: self.sources.remove(signal_source) self.sources.append(signal_source) self._values_by_source_id[id(signal_source)] = [] def remove_source(self, signal_source): with self._data_lock: self.sources.remove(signal_source) self._values_by_source_id.pop(id(signal_source)) def start(self): if self._timer is None: logger.info('Starting to record history every ' + str(self.delta_seconds) + 's for ' + str(self.max_seconds) + 's') self._begin_new_csv_file() self._timer = RepeatTimer(self.delta_seconds, self.record) self._timer.start() def stop(self): if not self._timer is None: self._timer.cancel() self._timer = None self._close_csv_file() logger.info('Stopped recording history') def get_values(self, signal_source): with self._data_lock: return self._values_by_source_id[id(signal_source)] def record(self): row = [] with self._data_lock: now = time.time() row.append(round(now, 3)) self.__clean_old_history(now) for source in self.sources: value = source.last_value if (value is not None and value.status == SignalSource.STATUS_OK and value.timestamp > now - self.delta_seconds and source.running): self._values_by_source_id[id(source)].append( (now, value.value)) row.append(float(source.value_format.format(value.value))) else: row.append(None) if self._csv_writer is not None: self._csv_writer.writerow(row) self._csv_lines += 1 self._csv_file.flush() if self._csv_lines >= self.max_csv_lines: self._begin_new_csv_file() def __clean_old_history(self, now): with self._data_lock: for source_id in self._values_by_source_id: values = self._values_by_source_id[source_id] while len(values) > 1 and (now - values[1][0] > self.max_seconds): values.pop(0) def write_to_csv(self, file_basename): self._close_csv_file() self._csv_file_basename = file_basename def _begin_new_csv_file(self): self._close_csv_file() if self._csv_file_basename is not None: self._delete_old_csv_files() file_name = self._new_csv_file_name() dir_name = os.path.split(file_name)[0] if dir_name != '': os.makedirs(dir_name, 0o775, True) logger.info("Writing new CSV file '" + file_name + "'") self._csv_file = open(file_name, 'w', newline='', encoding='utf-8') self._csv_writer = csv.writer(self._csv_file) self._csv_writer.writerow( ['Time'] + [source.label for source in self.sources]) self._csv_lines = 1 def _close_csv_file(self): if self._csv_file is not None: logger.info("'Closing CSV file '" + self._csv_file.name + "'") self._csv_file.close() self._csv_file = None self._csv_writer = None self._csv_lines = 0 def _new_csv_file_name(self): return '{:}-{:%Y-%m-%d-%H%M%S}.csv'.format(self._csv_file_basename, datetime.now()) def _delete_old_csv_files(self): try: for file_info in self._list_csv_files(): if file_info[1] + self.max_seconds < time.time( ) - self.max_seconds_csv_files: logger.info("Deleting old CSV file '" + file_info[0] + "'") os.remove(file_info[0]) except: logger.exception('Failed to delete old CSV files') def _list_csv_files(self): """ Read list of existing CSV files and their begin and modification times as list of tuples [(full_file_name, begin_timestamp, last_modified_timestamp), ...]. """ dir_name, file_prefix = os.path.split(self._csv_file_basename) file_pattern = re.compile( '^' + file_prefix + '-(([0-9]{4})-([0-9]{2})-([0-9]{2})-([0-9]{2})([0-9]{2})([0-9]{2})).csv$' ) csv_files = [] for file in os.listdir(dir_name): match = file_pattern.match(file) if match is not None: full_path = os.path.join(dir_name, file) if os.path.isfile(full_path): begin = datetime.strptime(match.group(1), '%Y-%m-%d-%H%M%S').timestamp() modified = os.stat(full_path).st_mtime csv_files.append((full_path, begin, modified)) return sorted(csv_files, key=lambda x: x[1]) def load_from_csv_files(self): logger.info('Trying to restore history from CSV files...') try: begin_time = time.time() - self.max_seconds for file_info in self._list_csv_files(): if file_info[2] > begin_time: self._load_rows_from_csv_file(begin_time, file_info[0]) except: logger.exception('Failed to restore history from CSV files') def _load_rows_from_csv_file(self, begin_time, csv_file): logger.info("Restoring history from CSV file '" + csv_file + "'") len_sources = len(self.sources) with open(csv_file, 'r', encoding='utf-8') as file: csv_reader = csv.reader(file) first_line = True for row in csv_reader: if first_line: first_line = False elif len(row) == 1 + len_sources: row_time = float(row[0]) if row_time >= begin_time: for source, value_string in zip(self.sources, row[1:]): if len(value_string) > 0: value = float(value_string) self._values_by_source_id[id(source)].append( (row_time, value))
def __init__(self, sensor_id, interval, **kwargs): super().__init__(**kwargs) self.sensor_id = sensor_id self._timer = RepeatTimer(interval, self._read_and_send_value)
class RvPlugin(AgentPlugin): Name = 'rv' def __init__(self): self._name = RvPlugin.Name self._update_directory = settings.UpdatesDirectory self._operation_handler = self._create_op_handler() self.uninstaller = uninstaller.Uninstaller() def start(self): """ Runs once the agent core is initialized. @return: Nothing """ # 43200 seconds == 12 hours self._timer = RepeatTimer(43200, self.run_refresh_apps_operation) self._timer.start() def stop(self): """ Runs once the agent core is shutting down. @return: Nothing """ logger.error("stop() method not implemented.") def run_operation(self, operation): """ Executes an operation given to it. """ if not isinstance(operation, RvSofOperation): operation = RvSofOperation(operation.raw_operation) try: operation_methods = { RvOperationValue.InstallUpdate: self._install_operation, RvOperationValue.InstallSupportedApps: self._install_operation, RvOperationValue.InstallCustomApps: self._install_operation, RvOperationValue.InstallAgentUpdate: self._install_operation, RvOperationValue.Uninstall: self._uninstall_operation, RvOperationValue.UninstallAgent: self._uninstall_agent_operation, RvOperationValue.UpdatesAvailable: self.available_updates_operation, RvOperationValue.ApplicationsInstalled: self.installed_applications_operation, RvOperationValue.RefreshApps: self.refresh_apps_operation, RvOperationValue.AgentLogRetrieval: self.retrieve_agent_log, RvOperationValue.ExecuteCommand: self.execute_command } # Calling method operation_methods[operation.type](operation) except KeyError as ke: logging_message = ("Received unrecognized operation: {0}".format( operation.__dict__)) logger.error(logging_message) logger.exception(ke) raise Exception(logging_message) def _get_install_method(self, operation_type): installation_methods = { RvOperationValue.InstallUpdate: self._operation_handler.install_update, RvOperationValue.InstallSupportedApps: self._operation_handler.install_supported_apps, RvOperationValue.InstallCustomApps: self._operation_handler.install_custom_apps, RvOperationValue.InstallAgentUpdate: self._operation_handler.install_agent_update } return installation_methods[operation_type] def _restart_if_needed(self, operation_restart, restart_needed): restart = False if operation_restart == RvOperationValue.ForcedRestart: restart = True elif (operation_restart == RvOperationValue.OptionalRestart and restart_needed): restart = True if restart: restart_op = SofOperation() restart_op.type = OperationValue.Reboot self._register_operation(restart_op) def _install_operation(self, operation): # TODO: if operation specifies update directory, change to that update_dir = settings.UpdatesDirectory failed_to_download = False urn_response = RvUrn.get_operation_urn(operation.type) try: self._download_updates(operation) except Exception as e: logger.error("Error occured while downloading updates.") logger.exception(e) failed_to_download = True if not operation.install_data_list or failed_to_download: error = RvError.UpdatesNotFound if failed_to_download: error = 'Failed to download packages.' rvsof_result = RvSofResult( operation.id, operation.type, '', # app id [], # apps_to_delete [], # apps_to_add 'false', # success 'false', # restart error, # error CreateApplication.null_application().to_dict(), # app json urn_response, RequestMethod.PUT) self._send_results(rvsof_result) else: if operation.type == RvOperationValue.InstallAgentUpdate: self._agent_update(operation, update_dir) # TODO(urgent): remove this, only for testing #elif operation.type == RvOperationValue.InstallCustomApps: # self._agent_update(operation, update_dir) else: self._regular_update(operation, update_dir) def _regular_update(self, operation, update_dir): urn_response = RvUrn.get_operation_urn(operation.type) install_method = self._get_install_method(operation.type) restart_needed = False for install_data in operation.install_data_list: install_result = install_method(install_data, update_dir) if install_result.restart == 'true': restart_needed = True rvsof_result = RvSofResult( operation.id, operation.type, install_data.id, # app id install_result.apps_to_delete, # apps_to_delete install_result.apps_to_add, # apps_to_add install_result.successful, # success install_result.restart, # restart install_result.error, # error install_result.app_json, # app json urn_response, RequestMethod.PUT) # TODO(urgent): always leave commented out, or remove #loaded = json.loads(rvsof_result.raw_result) #print json.dumps(loaded, indent=4) self._send_results(rvsof_result) # TODO(urgent): should I call a handlers cleaning method from here? if os.path.isdir(self._update_directory): shutil.rmtree(self._update_directory) logger.info('Done installing updates.') self._restart_if_needed(operation.restart, restart_needed) def _agent_update(self, operation, update_dir): urn_response = RvUrn.get_operation_urn(operation.type) install_method = self._get_install_method(operation.type) # TODO(urgent): remove this, only for testing #install_method = self._operation_handler.install_agent_update restart_needed = False for install_data in operation.install_data_list: install_result = install_method(install_data, operation.id, update_dir) if install_result.restart == 'true': restart_needed = True rvsof_result = RvSofResult( operation.id, operation.type, install_data.id, # app id install_result.apps_to_delete, # apps_to_delete install_result.apps_to_add, # apps_to_add install_result.successful, # success install_result.restart, # restart install_result.error, # error install_result.app_json, # app json urn_response, RequestMethod.PUT) if rvsof_result.success != '': self._send_results(rvsof_result) #if os.path.isdir(self._update_directory): # shutil.rmtree(self._update_directory) logger.info('Done attempting to update agent.') self._restart_if_needed(operation.restart, restart_needed) def _get_pkg_sizes(self, pkgs_path): pkg_names = os.listdir(pkgs_path) pkg_sizes = {} for pkg in pkg_names: pkg_sizes[pkg] = os.path.getsize(os.path.join(pkgs_path, pkg)) return pkg_sizes def _check_if_downloaded(self, file_path, expected_size): file_name = os.path.basename(file_path) downloaded_size = os.path.getsize(file_path) if isinstance(expected_size, basestring): logger.debug("expected_size is instance of basestring.") try: expected_size = int(expected_size) except: logger.error("Failed to convert expected_size to int.") if downloaded_size == expected_size: logger.debug("{0} IS the right size: {1} == {2}".format( file_name, downloaded_size, expected_size)) return True else: logger.critical("{0} is NOT the right size: {1} != {2}".format( file_name, downloaded_size, expected_size)) logger.debug("types: {0} and {1}".format(type(downloaded_size), type(expected_size))) return False def _uninstall_agent_operation(self, operation): logger.debug("Attempting to uninstall agent.") self.uninstaller.uninstall() logger.debug("Done attempting to uninstall agent.") def _uninstall_operation(self, operation): restart_needed = False urn_response = RvUrn.get_operation_urn(operation.type) if not operation.uninstall_data_list: error = "No applications specified to uninstall." rvsof_result = RvSofResult( operation.id, operation.type, '', # app id [], # apps_to_delete [], # apps_to_add 'false', # success 'false', # restart error, # error [], # data urn_response, RequestMethod.PUT) self._send_results(rvsof_result) else: for uninstall_data in operation.uninstall_data_list: uninstall_result = \ self._operation_handler.uninstall_application(uninstall_data) if uninstall_result.restart == 'true': restart_needed = True rvsof_result = RvSofResult( operation.id, operation.type, uninstall_data.id, # app id [], # apps_to_delete [], # apps_to_add uninstall_result.success, # success uninstall_result.restart, # restart uninstall_result.error, # error [], # data urn_response, RequestMethod.PUT) self._send_results(rvsof_result) logger.info('Done uninstalling applications.') self.run_refresh_apps_operation() # TODO(urgent): get restart working for uninstalls try: self._restart_if_needed(operation.restart, restart_needed) except AttributeError: logger.error("Failed to check if restart was needed due to no" "restart attribute in operation.") def _check_if_updated(self): logger.info("Checking if agent updated.") update_result = {} try: if os.path.exists(settings.update_file): with open(settings.update_file, 'r') as _file: update_result = json.load(_file) app_id = update_result['app_id'] operation_id = update_result['operation_id'] success = update_result['success'] error = update_result.get('error', '') rvsof_result = RvSofResult( operation_id, RvOperationValue.InstallAgentUpdate, app_id, # app id [], # apps_to_delete [], # apps_to_add success, # success 'false', # restart error, # error "{}", # app json RvUrn.get_install_agent_update_urn(), RequestMethod.PUT) logger.info(rvsof_result.__dict__) self._send_results(rvsof_result) os.remove(settings.update_file) except Exception as e: logger.error("Failure while sending agent update result.") logger.exception(e) def initial_data(self, operation_type): """ Retrieves current installed applications and available updates. Args: operation_type - The type of operation determines what the plugin should return. Returns: (dict) Dictionary contains all installed and available applications. """ # TODO: not sure if this should go here or in the core self._check_if_updated() if operation_type == OperationValue.Startup: self.run_refresh_apps_operation() return None data = {} data['data'] = self.refresh_apps() return data def name(self): """ Retrieves the name for this plugin. @return: Nothing """ return self._name def send_results_callback(self, callback): """ Sets the callback used to send results back to the server. @requires: Nothing """ self._send_results = callback def register_operation_callback(self, callback): """ Sets the callback used to register/save operations with the agent core. @requires: Nothing """ self._register_operation = callback def _create_op_handler(self): plat = systeminfo.code() if plat == 'darwin': from operationhandler.machandler import MacOpHandler return MacOpHandler() elif plat == 'linux': distro = platform.linux_distribution()[0].lower() # List to check RedHat derived distros that use yum. _redhat = 'red hat enterprise linux server' _rpm_distros = ['fedora', 'centos', 'centos linux'] _debian_distros = ['debian', 'ubuntu', 'linuxmint'] if distro == _redhat: from operationhandler.rhelhandler import RhelOpHandler logger.debug('Using RhelOpHandler.') return RhelOpHandler() if distro in _rpm_distros: from operationhandler.rpmhandler import RpmOpHandler logger.debug('Using RpmOpHandler.') return RpmOpHandler() elif distro in _debian_distros: from operationhandler.debhandler import DebianHandler logger.debug('Using DebianHandler.') return DebianHandler() else: logger.critical( "Current platform '%s' isn't supported. Ignoring operations." % plat) return None def _recreate_db_tables(self): """ Drops all tables and calls respected methods to populate them with new data. @return: Nothing """ # self._sqlite.recreate_application_table() self._operation_handler.recreate_tables() self._operation_handler.get_available_updates() self._operation_handler.get_installed_updates() self._operation_handler.get_installed_applications() def _download_file(self, download_dir, file_uris, file_size): """ Loops through all the file_uris provided and terminates when downloaded successfully or exhausts the file_uris list. Returns: (bool) - Success download. """ # Loop through each possible uri for the package for file_uri in file_uris: logger.debug("Downloading from: {0}".format(file_uri)) file_name = os.path.basename(file_uri) download_path = os.path.join(download_dir, file_name) try: urllib.urlretrieve(file_uri, download_path) if self._check_if_downloaded(download_path, file_size): logger.debug("Downloaded successfully.") return True else: logger.error( "Failed to download from: {0}".format(file_uri)) except Exception as dlerr: logger.error("Failed to download from: {0}".format(file_uri)) logger.exception(dlerr) continue logger.debug("Failed to download.") return False def _download_updates(self, operation): """ Download updates from the urls provided in the 'operation' parameter. Args: - operation: Operation to be worked with. Returns: Nothing """ if not os.path.isdir(self._update_directory): os.mkdir(self._update_directory) # Loop through every app for install_data in operation.install_data_list: app_dir = os.path.join(self._update_directory, install_data.id) if os.path.isdir(app_dir): shutil.rmtree(app_dir) try: os.mkdir(app_dir) install_data.downloaded = True # Loop through the individual packages that make up the app for uri in install_data.uris: file_uris = uri[RvOperationKey.FileUris] file_size = uri[RvOperationKey.FileSize] logger.debug("File uris: {0}".format(file_uris)) if not self._download_file(app_dir, file_uris, file_size): # On failure to download a single file, quit. install_data.downloaded = False break if install_data.downloaded: # Known file extensions to work on. self._untar_files(app_dir) self._unzip_files(app_dir) except Exception as e: logger.error("Failed while downloading update {0}.".format( install_data.name)) logger.exception(e) logger.debug("Setting downloaded to false for: " + install_data.name) install_data.downloaded = False def _untar_files(self, directory): """ Scans a directory for any tar files and 'untars' them. Scans recursively just in case there's tars within tars. Deletes tar files when done. @param directory: Directory to be scanned. @return: Nothing """ tars = glob.glob(os.path.join(directory, '*.tar*')) if not tars: return import tarfile try: for tar_file in tars: tar = tarfile.open(tar_file) tar.extractall(path=directory) tar.close() os.remove(tar_file) self._untar_files(directory) except OSError as e: logger.info("Could not extract tarball.") logger.exception(e) def _unzip_files(self, directory): zips = glob.glob(os.path.join(directory, '*.zip')) if not zips: return import zipfile try: for zip_file in zips: zip = zipfile.ZipFile(zip_file) zip.extractall(directory) zip.close() os.remove(zip_file) self._unzip_files(directory) except OSError as e: logger.info("Could not extract zipfile.") logger.exception(e) def available_updates_operation(self, operation): operation.applications = self.get_available_updates() operation.raw_result = rvformatter.applications(operation) return operation def get_available_updates(self): """ Wrapper around the operation handler's call to get available updates. """ return self._operation_handler.get_available_updates() def installed_applications_operation(self, operation): operation.applications = self.get_applications_installed() operation.raw_result = rvformatter.applications(operation) return operation def get_applications_installed(self): """ Wrapper around the operation handler's call to get installed applications. """ apps = [] apps.extend(self._operation_handler.get_installed_applications()) apps.extend(self._operation_handler.get_installed_updates()) return apps def refresh_apps(self): applications = self.get_installed_and_available_applications() data = [] for app in applications: data.append(app.to_dict()) agent_app = self.get_agent_app() if agent_app: data.append(agent_app.to_dict()) ### TODO(remove): remove or leave commented #try: # with open('all_data_rpm6', 'w') as _file: # json.dump({'data': data}, _file, indent=4) #except Exception: # pass ############################################ ## TODO(remove): remove or leave commented #all_data = {} #with open('all_data', 'r') as _file: # all_data = json.load(_file) #return all_data ############################################# return data def refresh_apps_operation(self, operation): raw = {} # TODO: don't hardcode if not operation.id.endswith('-agent'): raw[OperationKey.OperationId] = operation.id raw[OperationKey.Data] = self.refresh_apps() operation.raw_result = json.dumps(raw) operation.urn_response = RvUrn.get_refresh_apps_urn() operation.request_method = RequestMethod.PUT self._send_results(operation) def get_agent_app(self): try: agent_app = CreateApplication.create( settings.AgentName, settings.AgentVersion, settings.AgentDescription, # description [], # file_data [], # dependencies '', # support_url '', # vendor_severity '', # file_size '', # vendor_id, '', # vendor_name settings.AgentInstallDate, # install_date None, # release_date True, # installed "", # repo "no", # reboot_required "no" # uninstallable ) return agent_app except Exception as e: logger.error("Failed to create agent application instance.") logger.exception(e) return {} def get_installed_and_available_applications(self): """ Wrapper around the operation handler's call to get available updates and installed applications. """ apps = [] apps.extend(self._operation_handler.get_installed_updates()) apps.extend(self._operation_handler.get_installed_applications()) apps.extend(self._operation_handler.get_available_updates()) return apps def run_refresh_apps_operation(self): """Creates and runs a refresh apps operation. Returns: Nothing """ operation = RvSofOperation() operation.type = RvOperationValue.RefreshApps self._register_operation(operation) def retrieve_agent_log(self, operation): """ Adds the content from the log file, specified by date in operation, to the operation's raw_result. Date must be of format 'yyyy-mm-dd'. """ # TODO: get date or date intervals date = None log_content = [] try: logs = logger.retrieve_log_path(date) for log_path in logs: with open(log_path, 'r') as log_file: log_content.append(log_file.read()) except Exception as e: logger.error("Failed to retrieve log file.") logger.exception(e) operation.raw_result = ''.join(log_content) return operation def execute_command(self, operation): """ Execute command line command from operation. """ pass
class RvPlugin(AgentPlugin): Name = 'rv' def __init__(self): self._name = RvPlugin.Name self._update_directory = settings.UpdatesDirectory self._operation_handler = self._create_op_handler() self.uninstaller = uninstaller.Uninstaller() def start(self): """ Runs once the agent core is initialized. @return: Nothing """ # 43200 seconds == 12 hours self._timer = RepeatTimer(43200, self.run_refresh_apps_operation) self._timer.start() def stop(self): """ Runs once the agent core is shutting down. @return: Nothing """ logger.error("stop() method not implemented.") def run_operation(self, operation): """ Executes an operation given to it. """ if not isinstance(operation, RvSofOperation): operation = RvSofOperation(operation.raw_operation) try: operation_methods = { RvOperationValue.InstallUpdate: self._install_operation, RvOperationValue.InstallSupportedApps: self._install_operation, RvOperationValue.InstallCustomApps: self._install_operation, RvOperationValue.InstallAgentUpdate: self._install_operation, RvOperationValue.Uninstall: self._uninstall_operation, RvOperationValue.UninstallAgent: self._uninstall_agent_operation, RvOperationValue.UpdatesAvailable: self.available_updates_operation, RvOperationValue.ApplicationsInstalled: self.installed_applications_operation, RvOperationValue.RefreshApps: self.refresh_apps_operation, RvOperationValue.AgentLogRetrieval: self.retrieve_agent_log, RvOperationValue.ExecuteCommand: self.execute_command } # Calling method operation_methods[operation.type](operation) except KeyError as ke: logging_message = ( "Received unrecognized operation: {0}" .format(operation.__dict__) ) logger.error(logging_message) logger.exception(ke) raise Exception(logging_message) def _get_install_method(self, operation_type): installation_methods = { RvOperationValue.InstallUpdate: self._operation_handler.install_update, RvOperationValue.InstallSupportedApps: self._operation_handler.install_supported_apps, RvOperationValue.InstallCustomApps: self._operation_handler.install_custom_apps, RvOperationValue.InstallAgentUpdate: self._operation_handler.install_agent_update } return installation_methods[operation_type] def _restart_if_needed(self, operation_restart, restart_needed): restart = False if operation_restart == RvOperationValue.ForcedRestart: restart = True elif (operation_restart == RvOperationValue.OptionalRestart and restart_needed): restart = True if restart: restart_op = SofOperation() restart_op.type = OperationValue.Reboot self._register_operation(restart_op) def _install_operation(self, operation): # TODO: if operation specifies update directory, change to that update_dir = settings.UpdatesDirectory failed_to_download = False urn_response = RvUrn.get_operation_urn(operation.type) try: self._download_updates(operation) except Exception as e: logger.error("Error occured while downloading updates.") logger.exception(e) failed_to_download = True if not operation.install_data_list or failed_to_download: error = RvError.UpdatesNotFound if failed_to_download: error = 'Failed to download packages.' rvsof_result = RvSofResult( operation.id, operation.type, '', # app id [], # apps_to_delete [], # apps_to_add 'false', # success 'false', # restart error, # error CreateApplication.null_application().to_dict(), # app json urn_response, RequestMethod.PUT ) self._send_results(rvsof_result) else: if operation.type == RvOperationValue.InstallAgentUpdate: self._agent_update(operation, update_dir) # TODO(urgent): remove this, only for testing #elif operation.type == RvOperationValue.InstallCustomApps: # self._agent_update(operation, update_dir) else: self._regular_update(operation, update_dir) def _regular_update(self, operation, update_dir): urn_response = RvUrn.get_operation_urn(operation.type) install_method = self._get_install_method(operation.type) restart_needed = False for install_data in operation.install_data_list: install_result = install_method(install_data, update_dir) if install_result.restart == 'true': restart_needed = True rvsof_result = RvSofResult( operation.id, operation.type, install_data.id, # app id install_result.apps_to_delete, # apps_to_delete install_result.apps_to_add, # apps_to_add install_result.successful, # success install_result.restart, # restart install_result.error, # error install_result.app_json, # app json urn_response, RequestMethod.PUT ) # TODO(urgent): always leave commented out, or remove #loaded = json.loads(rvsof_result.raw_result) #print json.dumps(loaded, indent=4) self._send_results(rvsof_result) # TODO(urgent): should I call a handlers cleaning method from here? if os.path.isdir(self._update_directory): shutil.rmtree(self._update_directory) logger.info('Done installing updates.') self._restart_if_needed(operation.restart, restart_needed) def _agent_update(self, operation, update_dir): urn_response = RvUrn.get_operation_urn(operation.type) install_method = self._get_install_method(operation.type) # TODO(urgent): remove this, only for testing #install_method = self._operation_handler.install_agent_update restart_needed = False for install_data in operation.install_data_list: install_result = install_method( install_data, operation.id, update_dir ) if install_result.restart == 'true': restart_needed = True rvsof_result = RvSofResult( operation.id, operation.type, install_data.id, # app id install_result.apps_to_delete, # apps_to_delete install_result.apps_to_add, # apps_to_add install_result.successful, # success install_result.restart, # restart install_result.error, # error install_result.app_json, # app json urn_response, RequestMethod.PUT ) if rvsof_result.success != '': self._send_results(rvsof_result) #if os.path.isdir(self._update_directory): # shutil.rmtree(self._update_directory) logger.info('Done attempting to update agent.') self._restart_if_needed(operation.restart, restart_needed) def _get_pkg_sizes(self, pkgs_path): pkg_names = os.listdir(pkgs_path) pkg_sizes = {} for pkg in pkg_names: pkg_sizes[pkg] = os.path.getsize(os.path.join(pkgs_path, pkg)) return pkg_sizes def _check_if_downloaded(self, file_path, expected_size): file_name = os.path.basename(file_path) downloaded_size = os.path.getsize(file_path) if isinstance(expected_size, basestring): logger.debug("expected_size is instance of basestring.") try: expected_size = int(expected_size) except: logger.error("Failed to convert expected_size to int.") if downloaded_size == expected_size: logger.debug( "{0} IS the right size: {1} == {2}" .format(file_name, downloaded_size, expected_size) ) return True else: logger.critical( "{0} is NOT the right size: {1} != {2}" .format(file_name, downloaded_size, expected_size) ) logger.debug( "types: {0} and {1}".format( type(downloaded_size), type(expected_size) ) ) return False def _uninstall_agent_operation(self, operation): logger.debug("Attempting to uninstall agent.") self.uninstaller.uninstall() logger.debug("Done attempting to uninstall agent.") def _uninstall_operation(self, operation): restart_needed = False urn_response = RvUrn.get_operation_urn(operation.type) if not operation.uninstall_data_list: error = "No applications specified to uninstall." rvsof_result = RvSofResult( operation.id, operation.type, '', # app id [], # apps_to_delete [], # apps_to_add 'false', # success 'false', # restart error, # error [], # data urn_response, RequestMethod.PUT ) self._send_results(rvsof_result) else: for uninstall_data in operation.uninstall_data_list: uninstall_result = \ self._operation_handler.uninstall_application(uninstall_data) if uninstall_result.restart == 'true': restart_needed = True rvsof_result = RvSofResult( operation.id, operation.type, uninstall_data.id, # app id [], # apps_to_delete [], # apps_to_add uninstall_result.success, # success uninstall_result.restart, # restart uninstall_result.error, # error [], # data urn_response, RequestMethod.PUT ) self._send_results(rvsof_result) logger.info('Done uninstalling applications.') self.run_refresh_apps_operation() # TODO(urgent): get restart working for uninstalls try: self._restart_if_needed(operation.restart, restart_needed) except AttributeError: logger.error( "Failed to check if restart was needed due to no" "restart attribute in operation." ) def _check_if_updated(self): logger.info("Checking if agent updated.") update_result = {} try: if os.path.exists(settings.update_file): with open(settings.update_file, 'r') as _file: update_result = json.load(_file) app_id = update_result['app_id'] operation_id = update_result['operation_id'] success = update_result['success'] error = update_result.get('error', '') rvsof_result = RvSofResult( operation_id, RvOperationValue.InstallAgentUpdate, app_id, # app id [], # apps_to_delete [], # apps_to_add success, # success 'false', # restart error, # error "{}", # app json RvUrn.get_install_agent_update_urn(), RequestMethod.PUT ) logger.info(rvsof_result.__dict__) self._send_results(rvsof_result) os.remove(settings.update_file) except Exception as e: logger.error("Failure while sending agent update result.") logger.exception(e) def initial_data(self, operation_type): """ Retrieves current installed applications and available updates. Args: operation_type - The type of operation determines what the plugin should return. Returns: (dict) Dictionary contains all installed and available applications. """ # TODO: not sure if this should go here or in the core self._check_if_updated() if operation_type == OperationValue.Startup: self.run_refresh_apps_operation() return None data = {} data['data'] = self.refresh_apps() return data def name(self): """ Retrieves the name for this plugin. @return: Nothing """ return self._name def send_results_callback(self, callback): """ Sets the callback used to send results back to the server. @requires: Nothing """ self._send_results = callback def register_operation_callback(self, callback): """ Sets the callback used to register/save operations with the agent core. @requires: Nothing """ self._register_operation = callback def _create_op_handler(self): plat = systeminfo.code() if plat == 'darwin': from operationhandler.machandler import MacOpHandler return MacOpHandler() elif plat == 'linux': distro = platform.linux_distribution()[0].lower() # List to check RedHat derived distros that use yum. _redhat = 'red hat enterprise linux server' _rpm_distros = ['fedora', 'centos', 'centos linux'] _debian_distros = ['debian', 'ubuntu', 'linuxmint'] if distro == _redhat: from operationhandler.rhelhandler import RhelOpHandler logger.debug('Using RhelOpHandler.') return RhelOpHandler() if distro in _rpm_distros: from operationhandler.rpmhandler import RpmOpHandler logger.debug('Using RpmOpHandler.') return RpmOpHandler() elif distro in _debian_distros: from operationhandler.debhandler import DebianHandler logger.debug('Using DebianHandler.') return DebianHandler() else: logger.critical( "Current platform '%s' isn't supported. Ignoring operations." % plat ) return None def _recreate_db_tables(self): """ Drops all tables and calls respected methods to populate them with new data. @return: Nothing """ # self._sqlite.recreate_application_table() self._operation_handler.recreate_tables() self._operation_handler.get_available_updates() self._operation_handler.get_installed_updates() self._operation_handler.get_installed_applications() def _download_file(self, download_dir, file_uris, file_size): """ Loops through all the file_uris provided and terminates when downloaded successfully or exhausts the file_uris list. Returns: (bool) - Success download. """ # Loop through each possible uri for the package for file_uri in file_uris: logger.debug("Downloading from: {0}".format(file_uri)) file_name = os.path.basename(file_uri) download_path = os.path.join(download_dir, file_name) try: urllib.urlretrieve(file_uri, download_path) if self._check_if_downloaded(download_path, file_size): logger.debug("Downloaded successfully.") return True else: logger.error( "Failed to download from: {0}".format(file_uri) ) except Exception as dlerr: logger.error("Failed to download from: {0}".format(file_uri)) logger.exception(dlerr) continue logger.debug("Failed to download.") return False def _download_updates(self, operation): """ Download updates from the urls provided in the 'operation' parameter. Args: - operation: Operation to be worked with. Returns: Nothing """ if not os.path.isdir(self._update_directory): os.mkdir(self._update_directory) # Loop through every app for install_data in operation.install_data_list: app_dir = os.path.join(self._update_directory, install_data.id) if os.path.isdir(app_dir): shutil.rmtree(app_dir) try: os.mkdir(app_dir) install_data.downloaded = True # Loop through the individual packages that make up the app for uri in install_data.uris: file_uris = uri[RvOperationKey.FileUris] file_size = uri[RvOperationKey.FileSize] logger.debug("File uris: {0}".format(file_uris)) if not self._download_file(app_dir, file_uris, file_size): # On failure to download a single file, quit. install_data.downloaded = False break if install_data.downloaded: # Known file extensions to work on. self._untar_files(app_dir) self._unzip_files(app_dir) except Exception as e: logger.error( "Failed while downloading update {0}." .format(install_data.name) ) logger.exception(e) logger.debug( "Setting downloaded to false for: " + install_data.name ) install_data.downloaded = False def _untar_files(self, directory): """ Scans a directory for any tar files and 'untars' them. Scans recursively just in case there's tars within tars. Deletes tar files when done. @param directory: Directory to be scanned. @return: Nothing """ tars = glob.glob(os.path.join(directory, '*.tar*')) if not tars: return import tarfile try: for tar_file in tars: tar = tarfile.open(tar_file) tar.extractall(path=directory) tar.close() os.remove(tar_file) self._untar_files(directory) except OSError as e: logger.info("Could not extract tarball.") logger.exception(e) def _unzip_files(self, directory): zips = glob.glob(os.path.join(directory, '*.zip')) if not zips: return import zipfile try: for zip_file in zips: zip = zipfile.ZipFile(zip_file) zip.extractall(directory) zip.close() os.remove(zip_file) self._unzip_files(directory) except OSError as e: logger.info("Could not extract zipfile.") logger.exception(e) def available_updates_operation(self, operation): operation.applications = self.get_available_updates() operation.raw_result = rvformatter.applications(operation) return operation def get_available_updates(self): """ Wrapper around the operation handler's call to get available updates. """ return self._operation_handler.get_available_updates() def installed_applications_operation(self, operation): operation.applications = self.get_applications_installed() operation.raw_result = rvformatter.applications(operation) return operation def get_applications_installed(self): """ Wrapper around the operation handler's call to get installed applications. """ apps = [] apps.extend(self._operation_handler.get_installed_applications()) apps.extend(self._operation_handler.get_installed_updates()) return apps def refresh_apps(self): applications = self.get_installed_and_available_applications() data = [] for app in applications: data.append(app.to_dict()) agent_app = self.get_agent_app() if agent_app: data.append(agent_app.to_dict()) ### TODO(remove): remove or leave commented #try: # with open('all_data_rpm6', 'w') as _file: # json.dump({'data': data}, _file, indent=4) #except Exception: # pass ############################################ ## TODO(remove): remove or leave commented #all_data = {} #with open('all_data', 'r') as _file: # all_data = json.load(_file) #return all_data ############################################# return data def refresh_apps_operation(self, operation): raw = {} # TODO: don't hardcode if not operation.id.endswith('-agent'): raw[OperationKey.OperationId] = operation.id raw[OperationKey.Data] = self.refresh_apps() operation.raw_result = json.dumps(raw) operation.urn_response = RvUrn.get_refresh_apps_urn() operation.request_method = RequestMethod.PUT self._send_results(operation) def get_agent_app(self): try: agent_app = CreateApplication.create( settings.AgentName, settings.AgentVersion, settings.AgentDescription, # description [], # file_data [], # dependencies '', # support_url '', # vendor_severity '', # file_size '', # vendor_id, '', # vendor_name settings.AgentInstallDate, # install_date None, # release_date True, # installed "", # repo "no", # reboot_required "no" # uninstallable ) return agent_app except Exception as e: logger.error("Failed to create agent application instance.") logger.exception(e) return {} def get_installed_and_available_applications(self): """ Wrapper around the operation handler's call to get available updates and installed applications. """ apps = [] apps.extend(self._operation_handler.get_installed_updates()) apps.extend(self._operation_handler.get_installed_applications()) apps.extend(self._operation_handler.get_available_updates()) return apps def run_refresh_apps_operation(self): """Creates and runs a refresh apps operation. Returns: Nothing """ operation = RvSofOperation() operation.type = RvOperationValue.RefreshApps self._register_operation(operation) def retrieve_agent_log(self, operation): """ Adds the content from the log file, specified by date in operation, to the operation's raw_result. Date must be of format 'yyyy-mm-dd'. """ # TODO: get date or date intervals date = None log_content = [] try: logs = logger.retrieve_log_path(date) for log_path in logs: with open(log_path, 'r') as log_file: log_content.append(log_file.read()) except Exception as e: logger.error("Failed to retrieve log file.") logger.exception(e) operation.raw_result = ''.join(log_content) return operation def execute_command(self, operation): """ Execute command line command from operation. """ pass
class NetManager(): def __init__(self, seconds_to_checkin=60): """ Args: - seconds_to_checkin: Time, in seconds, to check into the server and it defaults to 1 minute. Return: - Nothing """ self._server_url = 'https://{0}/'.format(settings.ServerAddress) self._timer = RepeatTimer(seconds_to_checkin, self._agent_checkin) def incoming_callback(self, callback): """ Sets the callback to be used when operations were received during agent check-in. @param callback: The operation callback. @return: Nothing """ self._incoming_callback = callback def _agent_checkin(self): """ Checks in to the server to retrieve all pending operations. @return: Nothing """ if allow_checkin: root = {} root[OperationKey.Operation] = OperationValue.CheckIn root[OperationKey.OperationId] = '' root[OperationKey.AgentId] = settings.AgentId success = self.send_message(json.dumps(root), CoreUrn.get_checkin_urn(), RequestMethod.GET) if not success: logger.error( "Could not check-in to server. See logs for details.") else: logger.info("Checkin set to false.") def start(self): """ Starts the repeating timer that checks-in to the server at set intervals. @return: Nothing """ self._timer.start() def login(self): try: logger.debug('Logging into server') self.http_session = requests.session() url = os.path.join(self._server_url, CoreUrn.get_login_urn()) headers = {'content-type': 'application/json'} payload = { 'name': settings.Username, 'password': settings.Password } response = self.http_session.post(url, data=json.dumps(payload), headers=headers, verify=False, timeout=30) logger.debug("Login status code: %s " % response.status_code) logger.debug("Login server text: %s " % response.text) if response.status_code == 200: return True except Exception as e: logger.error("Agent was unable to login.") logger.exception(e) return False def _get_request_method(self, req_method): if req_method == RequestMethod.POST: return self.http_session.post if req_method == RequestMethod.PUT: return self.http_session.put if req_method == RequestMethod.GET: return self.http_session.get def send_message(self, data, urn, req_method): """Sends a message to the server and waits for data in return. Args: - data: JSON formatted str to send the server. - urn: RESTful urn to send the data. - req_method: HTTP Request Method Returns: - True if message was sent successfully. False otherwise. """ logger.debug('Sending message to server') url = os.path.join(self._server_url, urn) headers = {'content-type': 'application/json'} payload = data sent = False logger.debug(url) try: if not self.login(): logger.error("Agent was unable to login.") return False request_method = self._get_request_method(req_method) response = request_method(url, data=payload, headers=headers, verify=False, timeout=30) logger.debug("Url: %s " % url) logger.debug("Status code: %s " % response.status_code) logger.debug("Server text: %s " % response.text) if response.status_code == 200: sent = True received_data = [] try: received_data = response.json() except Exception as e: logger.error("Unable to read data from server. Invalid JSON?") logger.exception(e) self._incoming_callback(received_data) except Exception as e: logger.error("Unable to send data to server.") logger.exception(e) return sent
class NetManager(): def __init__(self, seconds_to_checkin=60): """ Args: - seconds_to_checkin: Time, in seconds, to check into the server and it defaults to 1 minute. Return: - Nothing """ self._server_url = 'https://{0}/'.format( settings.ServerAddress ) self._timer = RepeatTimer(seconds_to_checkin, self._agent_checkin) def incoming_callback(self, callback): """ Sets the callback to be used when operations were received during agent check-in. @param callback: The operation callback. @return: Nothing """ self._incoming_callback = callback def _agent_checkin(self): """ Checks in to the server to retrieve all pending operations. @return: Nothing """ if allow_checkin: root = {} root[OperationKey.Operation] = OperationValue.CheckIn root[OperationKey.OperationId] = '' root[OperationKey.AgentId] = settings.AgentId success = self.send_message( json.dumps(root), CoreUrn.get_checkin_urn(), RequestMethod.GET ) if not success: logger.error( "Could not check-in to server. See logs for details." ) else: logger.info("Checkin set to false.") def start(self): """ Starts the repeating timer that checks-in to the server at set intervals. @return: Nothing """ self._timer.start() def login(self): try: logger.debug('Logging into server') self.http_session = requests.session() url = os.path.join(self._server_url, CoreUrn.get_login_urn()) headers = {'content-type': 'application/json'} payload = { 'name': settings.Username, 'password': settings.Password } response = self.http_session.post( url, data=json.dumps(payload), headers=headers, verify=False, timeout=30 ) logger.debug("Login status code: %s " % response.status_code) logger.debug("Login server text: %s " % response.text) if response.status_code == 200: return True except Exception as e: logger.error("Agent was unable to login.") logger.exception(e) return False def _get_request_method(self, req_method): if req_method == RequestMethod.POST: return self.http_session.post if req_method == RequestMethod.PUT: return self.http_session.put if req_method == RequestMethod.GET: return self.http_session.get def send_message(self, data, urn, req_method): """Sends a message to the server and waits for data in return. Args: - data: JSON formatted str to send the server. - urn: RESTful urn to send the data. - req_method: HTTP Request Method Returns: - True if message was sent successfully. False otherwise. """ logger.debug('Sending message to server') url = os.path.join(self._server_url, urn) headers = {'content-type': 'application/json'} payload = data sent = False logger.debug(url) try: if not self.login(): logger.error("Agent was unable to login.") return False request_method = self._get_request_method(req_method) response = request_method( url, data=payload, headers=headers, verify=False, timeout=30 ) logger.debug("Url: %s " % url) logger.debug("Status code: %s " % response.status_code) logger.debug("Server text: %s " % response.text) if response.status_code == 200: sent = True received_data = [] try: received_data = response.json() except Exception as e: logger.error("Unable to read data from server. Invalid JSON?") logger.exception(e) self._incoming_callback(received_data) except Exception as e: logger.error("Unable to send data to server.") logger.exception(e) return sent
class MonitorPlugin(AgentPlugin): Name = 'monitor' def __init__(self): self._name = MonitorPlugin.Name self._previous_user_cpu = 0 self._previous_sys_cpu = 0 self._previous_idle_cpu = 0 self._previous_total_cpu = 0 if systeminfo.code() == systeminfo.OSCode.Mac: self._mac_monitor = MacMonitor() else: self._mac_monitor = None def start(self): """Runs once the agent core is initialized. Returns: - Nothing """ self._timer = RepeatTimer( 300, # 300 seconds == 5 minutes self._create_monit_operation) self._timer.start() def stop(self): """Runs once the agent core is shutting down. Returns: - Nothing """ logger.error("stop() method not implemented.") def run_operation(self, operation): """Executes an operation given to it by the agent core. Returns: - Nothing """ logger.debug("agent-id: {0}, agent-version: {1}".format( settings.AgentId, settings.AgentVersion)) if not isinstance(operation, MonitOperation): operation = MonitOperation(operation.raw_operation) if operation.type == MonitOperationValue.MonitorData: monit_data = self.get_monit_data() operation.raw_result = json.dumps(monit_data) operation.urn_response = MonitUrn.get_monit_data_urn() operation.request_method = RequestMethod.POST else: logger.warning("Unknown operation %s. Ignoring." % operation.type) self._send_results(operation, retry=False) def initial_data(self, operation_type): """Any initial data the server should have on first run. Args: operation_type - The type of operation determines what the plugin should return. Currently ignored for MonitPlugin. Returns: (dict) Dictionary with monitoring data. """ logger.debug("Getting initial monitoring data.") data = self.get_monit_data() logger.debug("Done with initial monitoring data.") return data def get_monit_data(self): """Gets the given operation with all monitoring data. Returns: - Dictionary containing monitoring data. """ logger.debug("Gathering monitoring data.") #operation.memory = self.current_memory_data() #operation.cpu = self.current_cpu_data() #operation.file_system = self.current_filesystem_data() monit_data = {} monit_data['memory'] = self._get_memory_data() monit_data['cpu'] = self._get_cpu_data() monit_data['file_system'] = self._get_file_system_data() logger.debug("Done gathering monitoring data.") return {'data': monit_data} def _get_memory_data(self): memory = self.current_memory_data() memory['used_percent'] = float(memory['used_percent']) memory['free_percent'] = float(memory['free_percent']) memory['used'] = float(memory['used']) memory['free'] = float(memory['free']) return memory def _get_cpu_data(self): cpu = self.current_cpu_data() cpu['idle'] = float(cpu['idle']) cpu['user'] = float(cpu['user']) cpu['system'] = float(cpu['system']) return cpu def _get_file_system_data(self): file_system = self.current_filesystem_data() for fs in file_system: fs['used_percent'] = float(fs['used_percent']) fs['free_percent'] = float(fs['free_percent']) fs['used'] = float(fs['used']) fs['free'] = float(fs['free']) return file_system def _create_monit_operation(self): """A wrapper method to create a monit operation and register it with the core. Returns: - Nothing """ if settings.AgentId: operation = MonitOperation() operation.type = MonitOperationValue.MonitorData self._register_operation(operation.to_json()) def current_memory_data(self): """Gets the current memeory stats. Returns: - Memory data dict using applicable MonitKey keys. """ if self._mac_monitor: return self._mac_monitor.current_memory_data() cmd = ['cat', '/proc/meminfo'] try: process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, errors = process.communicate() mem_info = output.splitlines()[:2] total_memory = 0 total_free = 0 for info in mem_info: if 'MemTotal:' in info: total_info = info.partition(':')[2].rsplit(' ')[-2:] total = total_info[0] units = total_info[1] if 'kB' in units: total_memory = long(total) continue elif 'MemFree:' in info: free_info = info.partition(':')[2].rsplit(' ')[-2:] free = free_info[0] units = free_info[1] if 'kB' in units: total_free = long(free) continue total_used = total_memory - total_free used_percent = self.calculate_percentage(total_memory, total_used) free_percent = self.calculate_percentage(total_memory, total_free) stats = { MonitKey.Used: total_used, MonitKey.PercentUsed: used_percent, MonitKey.Free: total_free, MonitKey.PercentFree: free_percent } except Exception as e: logger.error("Could not get memory data.") logger.exception(e) stats = { MonitKey.Used: 0, MonitKey.PercentUsed: 0, MonitKey.Free: 0, MonitKey.PercentFree: 0 } return stats def current_cpu_data(self): """Gets the current cpu stats. Returns: - Cpu data dict using applicable MonitKey keys. """ if self._mac_monitor: return self._mac_monitor.current_cpu_data() cmd = ['cat', '/proc/stat'] try: process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, errors = process.communicate() cpu = output.splitlines()[0] cpu_numbers = cpu.split(' ')[2:] current_total = 0 for usage in cpu_numbers: current_total += long(usage) total = current_total - self._previous_total_cpu current_user = long(cpu_numbers[0]) + long(cpu_numbers[1]) current_sys = long(cpu_numbers[2]) current_idle = long(cpu_numbers[3]) user = current_user - self._previous_user_cpu sys = current_sys - self._previous_sys_cpu idle = current_idle - self._previous_idle_cpu self._previous_total_cpu = current_total self._previous_user_cpu = current_user self._previous_sys_cpu = current_sys self._previous_idle_cpu = current_idle stats = { MonitKey.User: self.calculate_percentage(total, user), MonitKey.System: self.calculate_percentage(total, sys), MonitKey.Idle: self.calculate_percentage(total, idle) } except Exception as e: logger.error("Could not get cpu data.") logger.exception(e) stats = {MonitKey.User: 0, MonitKey.System: 0, MonitKey.Idle: 0} return stats def current_filesystem_data(self): """Gets the current file system stats. Returns: - File system data dict using applicable MonitKey keys. """ if self._mac_monitor: return self._mac_monitor.current_filesystem_data() stats = [] try: fs_data = self._get_fs_data() for fs in fs_data: fs_dict = {} fs_dict[MonitKey.Name] = fs[0] fs_dict[MonitKey.Used] = fs[1] fs_dict[MonitKey.PercentUsed] = fs[2] fs_dict[MonitKey.Free] = fs[3] fs_dict[MonitKey.PercentFree] = fs[4] fs_dict[MonitKey.Mount] = fs[5] stats.append(fs_dict.copy()) except Exception as e: logger.error("Could not retrieve file system data.") logger.exception(e) return stats def _get_fs_data(self): cmd = ['df', '-hklT'] fs_data = [] process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) raw_output, errors = process.communicate() lines = [] output = [] for line in raw_output.splitlines(): lines.append([x for x in line.split(' ') if x != '']) _partition_blacklist = [ 'tmpfs', 'rootfs', 'none', 'devtmpfs', 'Filesystem' ] for i in range(len(lines)): if len(lines[i]) == 7: output.append(lines[i]) if len(lines[i]) == 1: if len(lines[i + 1]) == 6: lines[i].extend(lines[i + 1]) output.append(lines[i]) for entry in output: if entry[0] in _partition_blacklist: continue try: # An ideal entry would consist of 7 items. It would look like: # ['/dev/sda1', 'ext4', '495844', '38218', '432026', '9%', '/boot'] if len(entry) == 7: name = entry[0] total = int(entry[3]) + int(entry[4]) used = entry[3] percent_used = self.calculate_percentage(total, used) available = entry[4] percent_available = self.calculate_percentage( total, available) mount = entry[6] fs_data.append((name, str(used), percent_used, str(available), percent_available, mount)) except Exception as e: logger.error( "Could not read file system data '%s'. Skipping." % entry) logger.exception(e) return fs_data def calculate_percentage(self, total, diff): if total == 0: return 0 try: return str(round(100 * float(diff) / float(total), 2)) except Exception as e: logger.error("Could not calculate percentage.") logger.exception(e) return 0 def name(self): """Retrieves the name for this plugin. Return: - The plugin's name. """ return self._name def send_results_callback(self, callback): """Sets the callback used to send results back to the server. Returns: - Nothing """ self._send_results = callback def register_operation_callback(self, callback): """Sets the callback used to register/save operations with the agent core. Returns: - Nothing """ self._register_operation = callback
class MqttClient: """ Sends all signal changes to a MQTT broker. """ DELTA_SECONDS_DEFAULT = 10 # seconds def __init__(self): self.broker_host = 'localhost' self.broker_user = '' self.broker_port = 1883 self.broker_password = '' self.use_ssl = False self.broker_ca_certs = None self.broker_base_topic = 'datalogger' self.client = mqtt.Client() # self.client.enable_logger(logger) self.client.on_connect = self._on_connect self.client.on_disconnect = self._on_disconnect self.client.on_message = self._on_message self.__started = False self.__timer = None self.delta_seconds = MqttClient.DELTA_SECONDS_DEFAULT self.sources = [] def use_signals_config(self, signal_sources_config): self.broker_host = signal_sources_config['mqtt_broker_host'] self.broker_port = signal_sources_config['mqtt_broker_port'] self.broker_user = signal_sources_config['mqtt_broker_user'] self.broker_password = signal_sources_config['mqtt_broker_password'] self.use_ssl = signal_sources_config['mqtt_use_ssl'] self.broker_ca_certs = signal_sources_config['mqtt_broker_ca_certs'] self.broker_base_topic = signal_sources_config[ 'mqtt_broker_base_topic'] for group in signal_sources_config['groups']: for source in group['sources']: self.sources.append(source) def start(self): if not self.__started: if self.broker_host == '': logger.info( "NOT starting MQTT client because of config with empty broker" ) else: logger.info("Starting MQTT client for broker " + self.broker_host) if self.broker_user != '': self.client.username_pw_set(self.broker_user, self.broker_password) if self.use_ssl: self.client.tls_set(ca_certs=self.broker_ca_certs) self.client.connect_async(self.broker_host, self.broker_port) self.client.loop_start() self.__timer = RepeatTimer(self.delta_seconds, self.publish) self.__timer.start() self.__started = True def stop(self): if self.__started: logger.info("Stopping MQTT client for broker " + self.broker_host) self.__started = False self.__timer.cancel() self.__timer = None self.client.disconnect() self.client.loop_stop(True) def publish(self): for source in self.sources: signal_value = source.last_value if signal_value is not None: topic = self.broker_base_topic + '/' + source.identifier json_value = json.dumps({ 'value': signal_value.value, 'status': signal_value.status, 'formatted': '---' if signal_value.status != SignalSource.STATUS_OK else source.format(signal_value.value), 'timestamp': datetime.fromtimestamp(signal_value.timestamp).astimezone( timezone.utc).strftime('%Y-%m-%dT%H:%M:%S.%fZ'), 'unit': source.unit }) self.client.publish(topic, json_value, 0, True) def _on_connect(self, client, userdata, flags, rc): if rc == 0: logger.info("Connected to MQTT broker " + self.broker_host) else: logger.error("Failed to connect to MQTT broker " + self.broker_host + " rc=" + str(rc)) # this would be the place to client.subscribe("#") def _on_disconnect(self, client, userdata, rc): if rc == 0: logger.info("Disconnected from MQTT broker " + self.broker_host) else: logger.error("Connection lost to MQTT broker " + self.broker_host + " rc=" + str(rc)) def _on_message(self, client, userdata, message): # this would be the place to receive subscription messages pass
class MonitorPlugin(AgentPlugin): Name = 'monitor' def __init__(self): self._name = MonitorPlugin.Name self._previous_user_cpu = 0 self._previous_sys_cpu = 0 self._previous_idle_cpu = 0 self._previous_total_cpu = 0 if systeminfo.code() == systeminfo.OSCode.Mac: self._mac_monitor = MacMonitor() else: self._mac_monitor = None def start(self): """Runs once the agent core is initialized. Returns: - Nothing """ self._timer = RepeatTimer( 300, # 300 seconds == 5 minutes self._create_monit_operation ) self._timer.start() def stop(self): """Runs once the agent core is shutting down. Returns: - Nothing """ logger.error("stop() method not implemented.") def run_operation(self, operation): """Executes an operation given to it by the agent core. Returns: - Nothing """ logger.debug("agent-id: {0}, agent-version: {1}" .format(settings.AgentId, settings.AgentVersion)) if not isinstance(operation, MonitOperation): operation = MonitOperation(operation.raw_operation) if operation.type == MonitOperationValue.MonitorData: monit_data = self.get_monit_data() operation.raw_result = json.dumps(monit_data) operation.urn_response = MonitUrn.get_monit_data_urn() operation.request_method = RequestMethod.POST else: logger.warning("Unknown operation %s. Ignoring." % operation.type) self._send_results(operation, retry=False) def initial_data(self, operation_type): """Any initial data the server should have on first run. Args: operation_type - The type of operation determines what the plugin should return. Currently ignored for MonitPlugin. Returns: (dict) Dictionary with monitoring data. """ logger.debug("Getting initial monitoring data.") data = self.get_monit_data() logger.debug("Done with initial monitoring data.") return data def get_monit_data(self): """Gets the given operation with all monitoring data. Returns: - Dictionary containing monitoring data. """ logger.debug("Gathering monitoring data.") #operation.memory = self.current_memory_data() #operation.cpu = self.current_cpu_data() #operation.file_system = self.current_filesystem_data() monit_data = {} monit_data['memory'] = self._get_memory_data() monit_data['cpu'] = self._get_cpu_data() monit_data['file_system'] = self._get_file_system_data() logger.debug("Done gathering monitoring data.") return {'data': monit_data} def _get_memory_data(self): memory = self.current_memory_data() memory['used_percent'] = float(memory['used_percent']) memory['free_percent'] = float(memory['free_percent']) memory['used'] = float(memory['used']) memory['free'] = float(memory['free']) return memory def _get_cpu_data(self): cpu = self.current_cpu_data() cpu['idle'] = float(cpu['idle']) cpu['user'] = float(cpu['user']) cpu['system'] = float(cpu['system']) return cpu def _get_file_system_data(self): file_system = self.current_filesystem_data() for fs in file_system: fs['used_percent'] = float(fs['used_percent']) fs['free_percent'] = float(fs['free_percent']) fs['used'] = float(fs['used']) fs['free'] = float(fs['free']) return file_system def _create_monit_operation(self): """A wrapper method to create a monit operation and register it with the core. Returns: - Nothing """ if settings.AgentId: operation = MonitOperation() operation.type = MonitOperationValue.MonitorData self._register_operation(operation.to_json()) def current_memory_data(self): """Gets the current memeory stats. Returns: - Memory data dict using applicable MonitKey keys. """ if self._mac_monitor: return self._mac_monitor.current_memory_data() cmd = ['cat', '/proc/meminfo'] try: process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, errors = process.communicate() mem_info = output.splitlines()[:2] total_memory = 0 total_free = 0 for info in mem_info: if 'MemTotal:' in info: total_info = info.partition(':')[2].rsplit(' ')[-2:] total = total_info[0] units = total_info[1] if 'kB' in units: total_memory = long(total) continue elif 'MemFree:' in info: free_info = info.partition(':')[2].rsplit(' ')[-2:] free = free_info[0] units = free_info[1] if 'kB' in units: total_free = long(free) continue total_used = total_memory - total_free used_percent = self.calculate_percentage(total_memory, total_used) free_percent = self.calculate_percentage(total_memory, total_free) stats = { MonitKey.Used: total_used, MonitKey.PercentUsed: used_percent, MonitKey.Free: total_free, MonitKey.PercentFree: free_percent } except Exception as e: logger.error("Could not get memory data.") logger.exception(e) stats = { MonitKey.Used: 0, MonitKey.PercentUsed: 0, MonitKey.Free: 0, MonitKey.PercentFree: 0 } return stats def current_cpu_data(self): """Gets the current cpu stats. Returns: - Cpu data dict using applicable MonitKey keys. """ if self._mac_monitor: return self._mac_monitor.current_cpu_data() cmd = ['cat', '/proc/stat'] try: process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, errors = process.communicate() cpu = output.splitlines()[0] cpu_numbers = cpu.split(' ')[2:] current_total = 0 for usage in cpu_numbers: current_total += long(usage) total = current_total - self._previous_total_cpu current_user = long(cpu_numbers[0]) + long(cpu_numbers[1]) current_sys = long(cpu_numbers[2]) current_idle = long(cpu_numbers[3]) user = current_user - self._previous_user_cpu sys = current_sys - self._previous_sys_cpu idle = current_idle - self._previous_idle_cpu self._previous_total_cpu = current_total self._previous_user_cpu = current_user self._previous_sys_cpu = current_sys self._previous_idle_cpu = current_idle stats = { MonitKey.User: self.calculate_percentage(total, user), MonitKey.System: self.calculate_percentage(total, sys), MonitKey.Idle: self.calculate_percentage(total, idle) } except Exception as e: logger.error("Could not get cpu data.") logger.exception(e) stats = { MonitKey.User: 0, MonitKey.System: 0, MonitKey.Idle: 0 } return stats def current_filesystem_data(self): """Gets the current file system stats. Returns: - File system data dict using applicable MonitKey keys. """ if self._mac_monitor: return self._mac_monitor.current_filesystem_data() stats = [] try: fs_data = self._get_fs_data() for fs in fs_data: fs_dict = {} fs_dict[MonitKey.Name] = fs[0] fs_dict[MonitKey.Used] = fs[1] fs_dict[MonitKey.PercentUsed] = fs[2] fs_dict[MonitKey.Free] = fs[3] fs_dict[MonitKey.PercentFree] = fs[4] fs_dict[MonitKey.Mount] = fs[5] stats.append(fs_dict.copy()) except Exception as e: logger.error("Could not retrieve file system data.") logger.exception(e) return stats def _get_fs_data(self): cmd = ['df', '-hklT'] fs_data = [] process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) raw_output, errors = process.communicate() lines = [] output = [] for line in raw_output.splitlines(): lines.append([x for x in line.split(' ') if x != '']) _partition_blacklist = [ 'tmpfs', 'rootfs', 'none', 'devtmpfs', 'Filesystem' ] for i in range(len(lines)): if len(lines[i]) == 7: output.append(lines[i]) if len(lines[i]) == 1: if len(lines[i + 1]) == 6: lines[i].extend(lines[i + 1]) output.append(lines[i]) for entry in output: if entry[0] in _partition_blacklist: continue try: # An ideal entry would consist of 7 items. It would look like: # ['/dev/sda1', 'ext4', '495844', '38218', '432026', '9%', '/boot'] if len(entry) == 7: name = entry[0] total = int(entry[3]) + int(entry[4]) used = entry[3] percent_used = self.calculate_percentage(total, used) available = entry[4] percent_available = self.calculate_percentage( total, available ) mount = entry[6] fs_data.append( (name, str(used), percent_used, str(available), percent_available, mount) ) except Exception as e: logger.error( "Could not read file system data '%s'. Skipping." % entry ) logger.exception(e) return fs_data def calculate_percentage(self, total, diff): if total == 0: return 0 try: return str(round(100 * float(diff) / float(total), 2)) except Exception as e: logger.error("Could not calculate percentage.") logger.exception(e) return 0 def name(self): """Retrieves the name for this plugin. Return: - The plugin's name. """ return self._name def send_results_callback(self, callback): """Sets the callback used to send results back to the server. Returns: - Nothing """ self._send_results = callback def register_operation_callback(self, callback): """Sets the callback used to register/save operations with the agent core. Returns: - Nothing """ self._register_operation = callback
def __init__(self, interval, text_0='off', text_1='on', **kwargs): super().__init__(**kwargs) self.text_0 = text_0 self.text_1 = text_1 self._timer = RepeatTimer(interval, self._send_value)
def __init__(self, value, interval, **kwargs): super().__init__(**kwargs) self.value = value self.interval = interval self._timer = RepeatTimer(interval, self._send_value)
def __init__(self, identifier: str, value: float, interval: float, **kwargs): super().__init__(identifier, **kwargs) self.value = value self.interval = interval self._timer = RepeatTimer(interval, self._send_random_value)
def reset(self): self.timer.cancel() self.timer = RepeatTimer(UPDATE_FREQUENCY, self._task)