def run(self): """ the main loop """ try: master_process = BackgroundProcess.objects.filter( pk=self.process_id).first() if master_process: master_process.last_update = datetime_now() master_process.message = 'init child processes' master_process.save() else: self.delete_pid(force_del=True) self.stderr.write("no such process in BackgroundProcesses") sys.exit(0) self.manage_processes() while True: # handle signals sig = self.SIG_QUEUE.pop(0) if len(self.SIG_QUEUE) else None # check the DB connection check_db_connection() # update the P BackgroundProcess.objects.filter(pk=self.process_id).update( last_update=datetime_now(), message='running..') if sig is None: self.manage_processes() elif sig not in self.SIGNALS: logger.error('%s, unhandled signal %d' % (self.label, sig)) continue elif sig == signal.SIGTERM: logger.debug('%s, termination signal' % self.label) raise StopIteration elif sig == signal.SIGHUP: # todo handle sighup pass elif sig == signal.SIGUSR1: # restart all child processes logger.debug('PID %d, processed SIGUSR1 (%d) signal' % (self.pid, sig)) self.restart() elif sig == signal.SIGUSR2: # write the process status to stdout self.status() pass sleep(5) except StopIteration: self.stop() self.delete_pid() sys.exit(0) except SystemExit: raise except: logger.error('%s(%d), unhandled exception\n%s' % (self.label, getpid(), traceback.format_exc()))
def stop(self, signum=None, frame=None): """ handel's a termination signal """ BackgroundProcess.objects.filter(pk=self.process_id).update( pid=0, last_update=datetime_now(), message='stopping..') # run the cleanup self.cleanup() BackgroundProcess.objects.filter(pk=self.process_id).update( pid=0, last_update=datetime_now(), message='stopped')
def pre_init_process(self): """ will be executed after process fork """ db.connections.close_all() # update process info BackgroundProcess.objects.filter(pk=self.process_id).update( pid=self.pid, last_update=datetime_now(), running_since=datetime_now(), done=False, failed=False, message='init process..', ) [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS] # reset [signal.signal(s, self.signal) for s in self.SIGNALS] # set
def stop(self, sig=signal.SIGTERM): """ stop the scheduler and stop all processes """ if self.pid is None: self.pid = self.read_pid() if self.pid is None: sp = BackgroundProcess.objects.filter(pk=1).first() if sp: self.pid = sp.pid if self.pid is None or self.pid == 0: logger.error("can't determine process id exiting.") return False if self.pid != getpid(): # calling from outside the daemon instance logger.debug('send sigterm to daemon') try: kill(self.pid, sig) return True except OSError as e: if e.errno == errno.ESRCH: return False else: return False logger.debug('start termination of the daemon') BackgroundProcess.objects.filter(pk=self.process_id).update( last_update=datetime_now(), message='stopping..') timeout = time() + 60 # wait max 60 seconds self.kill_processes(signal.SIGTERM) while self.PROCESSES and time() < timeout: self.kill_processes(signal.SIGTERM) sleep(1) self.kill_processes(signal.SIGKILL) BackgroundProcess.objects.filter(pk=self.process_id).update( last_update=datetime_now(), message='stopped') logger.debug('termination of the daemon done') return True
def loop(self): # todo try catch or filter.last() job = ExportTask.objects.get(pk=self.job_id) if job.file_format.upper() == 'HDF5': file_ext = '.h5' elif job.file_format.upper() == 'MAT': file_ext = '.mat' elif job.file_format.upper() == 'CSV_EXCEL': file_ext = '.csv' else: return -1, None bp = BackgroundProcess.objects.filter( enabled=True, done=False, pid=self.pid, parent_process__pk=self.parent_process_id).first() if bp is None: logger.debug('export job %d no BP found' % self.job_id) return -1, None job.busy = True job.backgroundprocess = bp job.save() export_recordeddata_to_file(job.time_min(), job.time_max(), filename=None, active_vars=job.variables.values_list( 'pk', flat=True), file_extension=file_ext, filename_suffix=job.filename_suffix, backgroundprocess_id=bp.pk, export_task_id=job.pk, mean_value_period=job.mean_value_period) job = ExportTask.objects.get(pk=job.pk) job.done = True job.busy = False job.datetime_finished = datetime.now(UTC) job.save() bp = BackgroundProcess.objects.filter( enabled=True, done=False, pid=self.pid, parent_process__pk=self.parent_process_id).first() if bp: bp.done = True bp.last_update = datetime_now() bp.message = 'stopped' bp.save() return 0, None
def init_db(self, sig=0): """ """ for process in BackgroundProcess.objects.filter(done=False, pid__gt=0): try: kill(process.pid, sig) self.stderr.write( "init db aborted, at least one process is alive\n") return False except OSError as e: if e.errno == errno.ESRCH: # no such process process.message = 'stopped' process.pid = 0 process.last_update = datetime_now() process.save() elif e.errno == errno.EPERM: # Operation not permitted self.stderr.write( "can't stop process %d: %s with pid %d, 'Operation not permitted'\n" % (process.pk, process.label, process.pid)) BackgroundProcess.objects.all().delete() # add the Scheduler Process parent_process = BackgroundProcess( pk=1, label='pyscada.utils.scheduler.Scheduler', enabled=True, process_class='pyscada.utils.scheduler.Process') parent_process.save() # check for processes to add in init block of each app for app in settings.INSTALLED_APPS: if app == 'pyscada': self.stderr.write( colorize( "Warning: please change 'pyscada' to 'pyscada.core' in the INSTALLED_APPS section of the settings.py!\n", fg='red', opts=('bold', ))) app = 'pyscada.core' m = __import__(app, fromlist=[str('a')]) self.stderr.write("app %s\n" % app) if hasattr(m, 'parent_process_list'): for process in m.parent_process_list: self.stderr.write("--> add %s\n" % process['label']) if 'enabled' not in process: process['enabled'] = True if 'parent_process' not in process: process['parent_process'] = parent_process bp = BackgroundProcess(**process) bp.save() self.stderr.write("init db completed\n") return True
def restart(self): """ restart all child processes """ BackgroundProcess.objects.filter(pk=self.process_id).update( last_update=datetime_now(), message='restarting..') timeout = time() + 60 # wait max 60 seconds self.kill_processes(signal.SIGTERM) while self.PROCESSES and time() < timeout: sleep(0.1) self.kill_processes(signal.SIGKILL) self.manage_processes() logger.debug('BD %d: restarted' % self.process_id)
def start(self): """ start the scheduler """ # demonize if self.run_as_daemon: if not self.demonize(): self.delete_pid() sys.exit(0) # recreate the DB connection if connection.connection is not None: connection.connection.close() connection.connection = None master_process = BackgroundProcess.objects.filter( parent_process__isnull=True, label=self.label, enabled=True).first() self.pid = getpid() if not master_process: self.delete_pid(force_del=True) logger.debug('no such process in BackgroundProcesses\n') sys.exit(0) self.process_id = master_process.pk master_process.pid = self.pid master_process.last_update = datetime_now() master_process.running_since = datetime_now() master_process.done = False master_process.failed = False master_process.message = 'init master process' master_process.save() BackgroundProcess.objects.filter(parent_process__pk=self.process_id, done=False).update(message='stopped') # register signals [signal.signal(s, self.signal) for s in self.SIGNALS] #signal.signal(signal.SIGCHLD, self.handle_chld) # start the main loop self.run() self.delete_pid() sys.exit(0)
def export_recordeddata_to_file(time_min=None, time_max=None, filename=None, active_vars=None, file_extension=None, append_to_file=False, no_mean_value=False, mean_value_period=5.0, backgroundprocess_id=None, export_task_id=None, **kwargs): """ read all data """ if backgroundprocess_id is not None: tp = BackgroundProcess.objects.get(id=backgroundprocess_id) tp.message = 'init' tp.last_update = datetime_now() tp.save() else: tp = None if isinstance(time_max, string_types): # convert date strings time_max = mktime( datetime.strptime(time_max, "%d-%b-%Y %H:%M:%S").timetuple()) if isinstance(time_min, string_types): # convert date strings time_min = mktime( datetime.strptime(time_min, "%d-%b-%Y %H:%M:%S").timetuple()) # add default time_min if time_max is None: time_max = time() # now if time_min is None: time_min = time() - 24 * 60 * 60 # last 24 hours # add default extension if no extension is given if file_extension is None and filename is None: file_extension = '.h5' elif filename is not None: file_extension = '.' + filename.split('.')[-1] # validate file type if file_extension not in ['.h5', '.mat', '.csv']: if tp is not None: tp.last_update = datetime_now() tp.message = 'failed wrong file type' tp.failed = 1 tp.save() return # if filename is None: if hasattr(settings, 'PYSCADA_EXPORT'): if 'output_folder' in settings.PYSCADA_EXPORT: backup_file_path = os.path.expanduser( settings.PYSCADA_EXPORT['output_folder']) else: backup_file_path = os.path.expanduser( '~/measurement_data_dumps') else: backup_file_path = os.path.expanduser('~/measurement_data_dumps') # add filename prefix backup_file_name = 'measurement_data' if hasattr(settings, 'PYSCADA_EXPORT'): if 'file_prefix' in settings.PYSCADA_EXPORT: backup_file_name = settings.PYSCADA_EXPORT[ 'file_prefix'] + backup_file_name # create output dir if not existing if not os.path.exists(backup_file_path): os.mkdir(backup_file_path) # validate time values db_time_min = RecordedData.objects.first() if not db_time_min: if tp is not None: tp.last_update = datetime_now() tp.message = 'no data to export' tp.failed = 1 tp.save() return time_min = max(db_time_min.time_value(), time_min) db_time_max = RecordedData.objects.last() if not db_time_max: if tp is not None: tp.last_update = datetime_now() tp.message = 'no data to export' tp.failed = 1 tp.save() return time_max = min(db_time_max.time_value(), time_max) # filename and suffix cdstr_from = datetime.fromtimestamp(time_min).strftime("%Y_%m_%d_%H%M") cdstr_to = datetime.fromtimestamp(time_max).strftime("%Y_%m_%d_%H%M") if 'filename_suffix' in kwargs: filename = os.path.join( backup_file_path, backup_file_name + '_' + cdstr_from + '_' + cdstr_to + '_' + kwargs['filename_suffix']) else: filename = os.path.join( backup_file_path, backup_file_name + '_' + cdstr_from + '_' + cdstr_to) # check if file exists if os.path.exists(filename + file_extension) and not append_to_file: count = 0 filename_old = filename while os.path.exists(filename + file_extension): filename = filename_old + '_%03.0f' % count count += 1 # append the extension filename = filename + file_extension # add Filename to ExportTask if export_task_id is not None: job = ExportTask.objects.filter(pk=export_task_id).first() if job: job.filename = filename job.save() # if active_vars is None: active_vars = Variable.objects.filter(active=1, device__active=1) else: if type(active_vars) is str: if active_vars == 'all': active_vars = Variable.objects.all() else: active_vars = Variable.objects.filter(active=1, device__active=1) else: active_vars = Variable.objects.filter(pk__in=active_vars, active=1, device__active=1) if mean_value_period == 0: no_mean_value = True mean_value_period = 5.0 # todo get from DB, default is 5 seconds # calculate time vector timevalues = arange( math.ceil(time_min / mean_value_period) * mean_value_period, math.floor(time_max / mean_value_period) * mean_value_period, mean_value_period) # get Meta from Settings if hasattr(settings, 'PYSCADA_META'): if 'description' in settings.PYSCADA_META: description = settings.PYSCADA_META['description'] else: description = 'None' if 'name' in settings.PYSCADA_META: name = settings.PYSCADA_META['name'] else: name = 'None' else: description = 'None' name = 'None' if file_extension in ['.h5', '.mat']: bf = MatCompatibleH5(filename, version='1.1', description=description, name=name, creation_date=strftime('%d-%b-%Y %H:%M:%S')) out_timevalues = [ unix_time_stamp_to_matlab_datenum(element) for element in timevalues ] elif file_extension in ['.csv']: bf = ExcelCompatibleCSV(filename, version='1.1', description=description, name=name, creation_date=strftime('%d-%b-%Y %H:%M:%S')) out_timevalues = [ unix_time_stamp_to_excel_datenum(element) for element in timevalues ] else: return # less then 24 # read everything bf.write_data('time', float64(out_timevalues), id=0, description="global time vector", value_class=validate_value_class('FLOAT64'), unit="Days since 0000-1-1 00:00:00", color='#000000', short_name='time', chart_line_thickness=3) for var_idx in range(0, active_vars.count(), 10): if tp is not None: tp.last_update = datetime_now() tp.message = 'reading values from database (%d)' % var_idx tp.save() # query data var_slice = active_vars[var_idx:var_idx + 10] data = RecordedData.objects.get_values_in_time_range( variable_id__in=list(var_slice.values_list('pk', flat=True)), time_min=time_min, time_max=time_max, query_first_value=True) for var in var_slice: # write background task info if tp is not None: tp.last_update = datetime_now() tp.message = 'writing values for %s (%d) to file' % (var.name, var.pk) tp.save() # check if variable is scalled if var.scaling is None or var.value_class.upper() in [ 'BOOL', 'BOOLEAN' ]: value_class = var.value_class else: value_class = 'FLOAT64' # read unit if hasattr(var.unit, 'udunit'): udunit = var.unit.udunit else: udunit = 'None' if var.pk not in data: # write dummy data bf.write_data(var.name, _cast_value([0] * len(timevalues), validate_value_class(value_class)), id=var.pk, description=var.description, value_class=validate_value_class(value_class), unit=udunit, color=var.chart_line_color_code(), short_name=var.short_name, chart_line_thickness=var.chart_line_thickness) continue out_data = np.zeros(len(timevalues)) # i # time data index ii = 0 # source data index # calculate mean values last_value = None max_ii = len(data[var.pk]) - 1 for i in range(len(timevalues)): # iter over time values if ii >= max_ii + 1: # if not more data in data source break if last_value is not None: out_data[i] = last_value continue # init mean value vars tmp = 0.0 # sum tmp_i = 0.0 # count if data[var.pk][ii][0] < timevalues[i]: # skip elements that are befor current time step while data[var.pk][ii][0] < timevalues[i] and ii < max_ii: last_value = data[var.pk][ii][1] ii += 1 if ii >= max_ii: if last_value is not None: out_data[i] = last_value continue # calc mean value if timevalues[i] <= data[ var.pk][ii][0] < timevalues[i] + mean_value_period: # there is data in time range while timevalues[i] <= data[var.pk][ii][0] < timevalues[ i] + mean_value_period and ii < max_ii: # calculate mean value if no_mean_value: tmp = data[var.pk][ii][1] tmp_i = 1 else: tmp += data[var.pk][ii][1] tmp_i += 1 last_value = data[var.pk][ii][1] ii += 1 # calc and store mean value if tmp_i > 0: out_data[i] = tmp / tmp_i else: out_data[i] = data[var.pk][ii][1] last_value = data[var.pk][ii][1] else: # there is no data in time range, keep last value, not mean value if last_value is not None: out_data[i] = last_value # write data bf.write_data(var.name, _cast_value(out_data, validate_value_class(value_class)), id=var.pk, description=var.description, value_class=validate_value_class(value_class), unit=udunit, color=var.chart_line_color_code(), short_name=var.short_name, chart_line_thickness=var.chart_line_thickness) bf.close_file() if tp is not None: tp.last_update = datetime_now() tp.message = 'done' tp.done = True tp.save()
def run(self): BackgroundProcess.objects.filter(pk=self.process_id).update(last_update=datetime_now(), message='running..') exec_loop = True try: while True: t_start = time() # handle signals sig = self.SIG_QUEUE.pop(0) if len(self.SIG_QUEUE) else None # check the DB connection check_db_connection() # update progress BackgroundProcess.objects.filter(pk=self.process_id).update(last_update=datetime_now()) if sig is None and exec_loop: # run loop action status, data = self.loop() if data is not None: # write data to the database for item in data: RecordedData.objects.bulk_create(item) if status == 1: # Process OK pass elif status == -1: # some thing went wrong # todo handle # raise StopIteration BackgroundProcess.objects.filter(pk=self.process_id).update(last_update=datetime_now(), failed=True) exec_loop = False elif status == 0: # loop is done exit BackgroundProcess.objects.filter(pk=self.process_id).update(last_update=datetime_now(), done=True) #raise StopIteration exec_loop = False else: pass elif sig is None: continue elif sig not in self.SIGNALS: logger.debug('%s, unhandled signal %d' % (self.label, sig)) continue elif sig == signal.SIGTERM: raise StopIteration elif sig == signal.SIGHUP: raise StopIteration elif sig == signal.SIGUSR1: logger.debug('PID %d, process SIGUSR1 (%d) signal' % (self.pid, sig)) self.restart() elif sig == signal.SIGUSR2: # todo handle restart pass dt = self.dt_set - (time() - t_start) if dt > 0: sleep(dt) except StopIteration: self.stop() sys.exit(0) except: logger.debug('%s, unhandled exception\n%s' % (self.label, traceback.format_exc())) self.stop() sys.exit(0)
def manage_processes(self): """ """ # check for new processes to spawn process_list = [] for process in BackgroundProcess.objects.filter(parent_process__pk=self.process_id): process_list.append(process) process_list += list(process.backgroundprocess_set.filter()) for process in process_list: # if not process.enabled or not process.parent_process.enabled or process.done: if process.pk in self.PROCESSES: timeout = time() + 60 # wait max 60 seconds while True: if process.pk not in self.PROCESSES or time() > timeout: break self.kill_process(process.pk) sleep(1) continue continue if process.pk in self.PROCESSES: continue if process.parent_process.pk not in self.PROCESSES and process.parent_process.pk != self.process_id: continue # spawn new process process_inst = process.get_process_instance() if process_inst is not None: self.spawn_process(process_inst) logger.debug('process %s started' % process.label) else: logger.debug('process %s returned None' % process.label) # check all running processes process_list = list(self.PROCESSES.values()) for process in process_list: try: kill(process.pid, 0) except OSError as e: if e.errno == errno.ESRCH: logger.debug('process %d is dead' % process.process_id) try: self.PROCESSES.pop(process.process_id) except: pass # process is dead, delete process if process.parent_process_id == self.process_id: p = BackgroundProcess.objects.filter(pk=process.process_id).first() if p: p.pid = 0 p.last_update = datetime_now() p.failed = True p.save() else: # delete grandchild process BackgroundProcess.objects.filter(pk=process.process_id).delete()
def loop(self): """ this function will be called every self.dt_set seconds request data tm_wday 0=Monday tm_yday """ today = date.today() # only start new jobs after change the day changed if self._current_day != gmtime().tm_yday: self._current_day = gmtime().tm_yday for job in ScheduledExportTask.objects.filter( active=1): # get all active jobs add_task = False if job.export_period == 1: # daily start_time = '%s %02d:00:00' % ( (today - timedelta(1)).strftime('%d-%b-%Y'), job.day_time) # "%d-%b-%Y %H:%M:%S" start_time = mktime( datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple()) filename_suffix = 'daily_export_%d_%s' % (job.pk, job.label) add_task = True elif job.export_period == 2 and gmtime( ).tm_yday % 2 == 0: # on even days (2,4,...) start_time = '%s %02d:00:00' % ( (today - timedelta(2)).strftime('%d-%b-%Y'), job.day_time) # "%d-%b-%Y %H:%M:%S" start_time = mktime( datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple()) filename_suffix = 'two_day_export_%d_%s' % (job.pk, job.label) add_task = True elif job.export_period == 7 and gmtime( ).tm_wday == 0: # on every monday start_time = '%s %02d:00:00' % ( (today - timedelta(7)).strftime('%d-%b-%Y'), job.day_time) # "%d-%b-%Y %H:%M:%S" start_time = mktime( datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple()) filename_suffix = 'weekly_export_%d_%s' % (job.pk, job.label) add_task = True elif job.export_period == 14 and gmtime( ).tm_yday % 14 == 0: # on every second monday start_time = '%s %02d:00:00' % ( (today - timedelta(14)).strftime('%d-%b-%Y'), job.day_time) # "%d-%b-%Y %H:%M:%S" start_time = mktime( datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple()) filename_suffix = 'two_week_export_%d_%s' % (job.pk, job.label) add_task = True elif job.export_period == 30 and gmtime( ).tm_yday % 30 == 0: # on every 30 days start_time = '%s %02d:00:00' % ( (today - timedelta(30)).strftime('%d-%b-%Y'), job.day_time) # "%d-%b-%Y %H:%M:%S" start_time = mktime( datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple()) filename_suffix = '30_day_export_%d_%s' % (job.pk, job.label) add_task = True if job.day_time == 0: end_time = '%s %02d:59:59' % ( (today - timedelta(1)).strftime('%d-%b-%Y'), 23 ) # "%d-%b-%Y %H:%M:%S" else: end_time = '%s %02d:59:59' % ( today.strftime('%d-%b-%Y'), job.day_time - 1 ) # "%d-%b-%Y %H:%M:%S" end_time = mktime( datetime.strptime(end_time, "%d-%b-%Y %H:%M:%S").timetuple()) # create ExportTask if add_task: et = ExportTask( label=filename_suffix, datetime_max=datetime.fromtimestamp(end_time, UTC), datetime_min=datetime.fromtimestamp(start_time, UTC), filename_suffix=filename_suffix, mean_value_period=job.mean_value_period, file_format=job.file_format, datetime_start=datetime.fromtimestamp( end_time + 60, UTC)) et.save() et.variables.add(*job.variables.all()) # check running tasks and start the next Export Task running_jobs = ExportTask.objects.filter(busy=True, failed=False) if running_jobs: for job in running_jobs: if time() - job.start() < 30: # only check Task when it is running longer then 30s continue if job.backgroundprocess is None: # if the job has no backgroundprocess assosiated mark as failed job.failed = True job.save() continue if datetime_now() - timedelta( hours=1) > job.backgroundprocess.last_update: # if the Background Process has been updated in the past 60s wait continue if job.backgroundprocess.pid == 0: # if the job has no valid pid mark as failed job.failed = True job.save() continue else: # start the next Export Task job = ExportTask.objects.filter( done=False, busy=False, failed=False, datetime_start__lte=datetime.now(UTC)).first() # get all jobs if job: bp = BackgroundProcess( label='pyscada.export.ExportProcess-%d' % job.pk, message='waiting..', enabled=True, parent_process_id=self.parent_process_id, process_class='pyscada.export.worker.ExportProcess', process_class_kwargs=json.dumps({"job_id": job.pk})) bp.save() if job.datetime_start is None: job.datetime_start = datetime.now(UTC) job.busy = True job.save() # delete all done jobs older the 60 days for job in ExportTask.objects.filter( done=True, busy=False, datetime_start__gte=datetime.fromtimestamp( time() + 60 * 24 * 60 * 60, UTC)): job.delete() # delete all failed jobs older the 60 days for job in ExportTask.objects.filter( failed=True, datetime_start__gte=datetime.fromtimestamp( time() + 60 * 24 * 60 * 60, UTC)): job.delete() return 1, None # because we have no data to store