def init_process(self): self.processes = [] for process in BackgroundProcess.objects.filter( parent_process__pk=self.process_id, done=False): try: kill(process.pid, 0) except OSError as e: if e.errno == errno.ESRCH: process.delete() continue logger.debug('process %d is alive' % process.pk) process.stop(cleanup=True) # clean up BackgroundProcess.objects.filter(parent_process__pk=self.process_id, done=False).delete() grouped_ids = {} for item in Device.objects.filter(active=True, **self.device_filter): bp = BackgroundProcess(label=self.bp_label % item.pk, message='waiting..', enabled=True, parent_process_id=self.process_id, process_class=self.process_class, process_class_kwargs=json.dumps( {'device_id': item.pk})) bp.save() self.processes.append({ 'id': bp.id, 'key': item.pk, 'device_id': item.pk, 'failed': 0 })
def loop(self): """ """ # check if all smbus processes are running for smbus_process in self.SMbus_PROCESSES: try: BackgroundProcess.objects.get(pk=smbus_process['id']) except BackgroundProcess.DoesNotExist or BackgroundProcess.MultipleObjectsReturned: # Process is dead, spawn new instance if smbus_process['failed'] < 3: bp = BackgroundProcess( label='pyscada.smbus-%s' % smbus_process['key'], message='waiting..', enabled=True, parent_process_id=self.process_id, process_class= 'pyscada.utils.scheduler.SingleDeviceDAQProcess', process_class_kwargs=json.dumps( {'device_id': smbus_process['device_id']})) bp.save() smbus_process['id'] = bp.id smbus_process['failed'] += 1 else: logger.error( 'process pyscada.smbus-%s failed more then 3 times' % smbus_process['key']) return 1, None
def loop(self): """ """ # check if all modbus processes are running for script_process in self.SCRIPT_PROCESSES: try: BackgroundProcess.objects.get(pk=script_process['id']) except BackgroundProcess.DoesNotExist or BackgroundProcess.MultipleObjectsReturned: # Process is dead, spawn new instance if script_process['failed'] < 3: script = Script.objects.get(pk=script_process['script_id']) bp = BackgroundProcess(label='pyscada.scripting.ScriptingProcess-%d' % script.pk, message='waiting..', enabled=True, parent_process_id=self.process_id, process_class='pyscada.scripting.worker.ScriptingProcess', process_class_kwargs=json.dumps({"script_id": script.pk, 'script_file': script.script_file, 'dt_set': script.interval})) bp.save() script_process['id'] = bp.id script_process['failed'] += 1 else: logger.error('process pyscada.scripting.user_script-%d failed more then 3 times' % script_process['script_id']) except: logger.debug('%s, unhandled exception\n%s' % (self.label, traceback.format_exc())) return 1, None
def loop(self): """ """ # check if all modbus processes are running for modbus_process in self.MODBUS_PROCESSES: try: BackgroundProcess.objects.get(pk=modbus_process['id']) except BackgroundProcess.DoesNotExist or BackgroundProcess.MultipleObjectsReturned: # Process is dead, spawn new instance if modbus_process['failed'] < 3: bp = BackgroundProcess(label='pyscada.modbus-%s' % modbus_process['key'], message='waiting..', enabled=True, parent_process_id=self.process_id, process_class='pyscada.utils.scheduler.MultiDeviceDAQProcess', process_class_kwargs=json.dumps( {'device_ids': modbus_process['device_ids']})) bp.save() modbus_process['id'] = bp.id modbus_process['failed'] += 1 else: logger.error('process pyscada.modbus-%s failed more then 3 times' % modbus_process['key']) except: logger.debug('%s, unhandled exception\n%s' % (self.label, traceback.format_exc())) return 1, None
def loop(self): """ """ # check if all modbus processes are running for modbus_process in self.MODBUS_PROCESSES: try: BackgroundProcess.objects.get(pk=modbus_process['id']) except BackgroundProcess.DoesNotExist or BackgroundProcess.MultipleObjectsReturned: # Process is dead, spawn new instance if modbus_process['failed'] < 3: bp = BackgroundProcess( label='pyscada.modbus-%s' % modbus_process['key'], message='waiting..', enabled=True, parent_process_id=self.process_id, process_class= 'pyscada.utils.scheduler.MultiDeviceDAQProcess', process_class_kwargs=json.dumps( {'device_ids': modbus_process['device_ids']})) bp.save() modbus_process['id'] = bp.id modbus_process['failed'] += 1 else: logger.error( 'process pyscada.modbus-%s failed more then 3 times' % modbus_process['key']) except: logger.debug('%s, unhandled exception\n%s' % (self.label, traceback.format_exc())) return 1, None
def init_process(self): for process in BackgroundProcess.objects.filter(parent_process__pk=self.process_id, done=False): try: kill(process.pid, 0) except OSError as e: if e.errno == errno.ESRCH: process.delete() continue logger.debug('process %d is alive' % process.pk) process.stop() # clean up BackgroundProcess.objects.filter(parent_process__pk=self.process_id, done=False).delete() for script_process in Script.objects.filter(active=True): bp = BackgroundProcess(label='pyscada.scripting.ScriptingProcess-%d' % script_process.pk, message='waiting..', enabled=True, parent_process_id=self.process_id, process_class='pyscada.scripting.worker.ScriptingProcess', process_class_kwargs=json.dumps({"script_id": script_process.pk, 'script_file': script_process.script_file, 'dt_set': script_process.interval})) bp.save() self.SCRIPT_PROCESSES.append({'id': bp.id, 'script_id':script_process.pk, 'failed': 0})
def loop(self): """ """ # check if all smbus processes are running for smbus_process in self.SMbus_PROCESSES: try: BackgroundProcess.objects.get(pk=smbus_process['id']) except BackgroundProcess.DoesNotExist or BackgroundProcess.MultipleObjectsReturned: # Process is dead, spawn new instance if smbus_process['failed'] < 3: bp = BackgroundProcess(label='pyscada.smbus-%s' % smbus_process['key'], message='waiting..', enabled=True, parent_process_id=self.process_id, process_class='pyscada.utils.scheduler.SingleDeviceDAQProcess', process_class_kwargs=json.dumps( {'device_id': smbus_process['device_id']})) bp.save() smbus_process['id'] = bp.id smbus_process['failed'] += 1 else: logger.error('process pyscada.smbus-%s failed more then 3 times' % smbus_process['key']) return 1, None
def init_process(self): # clean up BackgroundProcess.objects.filter(parent_process__pk=self.process_id, done=False).delete() grouped_ids = {} for item in GPIODevice.objects.filter(gpio_device__active=True): if item.protocol == PROTOCOL_ID: # GPIO IP # every device gets its own process grouped_ids['%d-%s:%s-%d' % (item.gpio_device.pk, item.ip_address, item.port, item.unit_id)] = [item] continue # every port gets its own process if item.port not in grouped_ids: grouped_ids[item.port] = [] grouped_ids[item.port].append(item) for key, values in grouped_ids.items(): bp = BackgroundProcess(label='pyscada.gpio-%s' % key, message='waiting..', enabled=True, parent_process_id=self.process_id, process_class='pyscada.utils.scheduler.SingleDeviceDAQProcess', process_class_kwargs=json.dumps( {'device_ids': [i.gpio_device.pk for i in values]})) bp.save() self.GPIO_PROCESSES.append({'id': bp.id, 'key': key, 'device_ids': [i.gpio_device.pk for i in values], 'failed': 0})
def loop(self): """ """ # check if all processes are running for process in self.processes: try: BackgroundProcess.objects.get(pk=process['id']) except BackgroundProcess.DoesNotExist or BackgroundProcess.MultipleObjectsReturned: # Process is dead, spawn new instance if process['failed'] < 3: bp = BackgroundProcess( label=self.bp_label % process['key'], message='waiting..', enabled=True, parent_process_id=self.process_id, process_class=self.process_class, process_class_kwargs=json.dumps( {'device_ids': process['device_ids']})) bp.save() process['id'] = bp.id process['failed'] += 1 else: logger.error('process %s failed more then 3 times' % (self.bp_label % process['key'])) except: logger.debug('%s, unhandled exception\n%s' % (self.label, traceback.format_exc())) return 1, None
def init_db(self, sig=0): """ """ for process in BackgroundProcess.objects.filter(done=False, pid__gt=0): try: kill(process.pid, sig) self.stderr.write( "init db aborted, at least one process is alive\n") return False except OSError as e: if e.errno == errno.ESRCH: # no such process process.message = 'stopped' process.pid = 0 process.last_update = now() process.save() elif e.errno == errno.EPERM: # Operation not permitted self.stderr.write( "can't stop process %d: %s with pid %d, 'Operation not permitted'\n" % (process.pk, process.label, process.pid)) BackgroundProcess.objects.all().delete() # add the Scheduler Process parent_process = BackgroundProcess( pk=1, label='pyscada.utils.scheduler.Scheduler', enabled=True, process_class='pyscada.utils.scheduler.Process') parent_process.save() # check for processes to add in init block of each app for app in settings.INSTALLED_APPS: if app == 'pyscada': self.stderr.write( colorize( "Warning: please change 'pyscada' to 'pyscada.core' in the INSTALLED_APPS section of the settings.py!\n", fg='red', opts=('bold', ))) app = 'pyscada.core' m = __import__(app, fromlist=[str('a')]) self.stderr.write("app %s\n" % app) if hasattr(m, 'parent_process_list'): for process in m.parent_process_list: self.stderr.write("--> add %s\n" % process['label']) if 'enabled' not in process: process['enabled'] = True if 'parent_process' not in process: process['parent_process'] = parent_process bp = BackgroundProcess(**process) bp.save() self.stderr.write("init db completed\n") return True
def init_process(self): for process in BackgroundProcess.objects.filter( parent_process__pk=self.process_id, done=False): try: kill(process.pid, 0) except OSError as e: if e.errno == errno.ESRCH: process.delete() continue logger.debug('process %d is alive' % process.pk) process.stop() # clean up BackgroundProcess.objects.filter(parent_process__pk=self.process_id, done=False).delete() grouped_ids = {} for item in ModbusDevice.objects.filter(modbus_device__active=True): if item.protocol == 0: # Modbus IP # every device gets its own process grouped_ids['%d-%s:%s-%d' % (item.modbus_device.pk, item.ip_address, item.port, item.unit_id)] = [item] continue # every port gets its own process if item.port not in grouped_ids: grouped_ids[item.port] = [] grouped_ids[item.port].append(item) for key, values in grouped_ids.items(): bp = BackgroundProcess( label='pyscada.modbus-%s' % key, message='waiting..', enabled=True, parent_process_id=self.process_id, process_class='pyscada.utils.scheduler.MultiDeviceDAQProcess', process_class_kwargs=json.dumps( {'device_ids': [i.modbus_device.pk for i in values]})) bp.save() self.MODBUS_PROCESSES.append({ 'id': bp.id, 'key': key, 'device_ids': [i.modbus_device.pk for i in values], 'failed': 0 })
def _add_process_to_list(self, script_process): bp = BackgroundProcess( label='pyscada.scripting-%d' % script_process.pk, message='waiting..', enabled=True, parent_process_id=self.process_id, process_class='pyscada.scripting.worker.ScriptingProcess', process_class_kwargs=json.dumps({ "script_id": script_process.pk, 'script_file': script_process.script_file, 'dt_set': script_process.interval })) bp.save() self.SCRIPT_PROCESSES.append({ 'id': bp.id, 'script_id': script_process.pk, 'failed': 0 })
def init_db(self, sig=0): """ """ for process in BackgroundProcess.objects.filter(done=False, pid__gt=0): try: kill(process.pid, sig) self.stderr.write("init db aborted, at least one process is alive\n") return False except OSError as e: if e.errno == errno.ESRCH: # no such process process.message = 'stopped' process.pid = 0 process.last_update = now() process.save() elif e.errno == errno.EPERM: # Operation not permitted self.stderr.write("can't stop process %d: %s with pid %d, 'Operation not permitted'\n" % ( process.pk, process.label, process.pid)) BackgroundProcess.objects.all().delete() # add the Scheduler Process parent_process = BackgroundProcess(pk=1, label='pyscada.utils.scheduler.Scheduler', enabled=True, process_class='pyscada.utils.scheduler.Process') parent_process.save() # check for processes to add in init block of each app for app in settings.INSTALLED_APPS: if app == 'pyscada': self.stderr.write(colorize("Warning: please change 'pyscada' to 'pyscada.core' in the INSTALLED_APPS section of the settings.py!\n",fg='red',opts=('bold',))) app = 'pyscada.core' m = __import__(app, fromlist=[str('a')]) self.stderr.write("app %s\n" % app) if hasattr(m, 'parent_process_list'): for process in m.parent_process_list: self.stderr.write("--> add %s\n" % process['label']) if 'enabled' not in process: process['enabled'] = True if 'parent_process' not in process: process['parent_process'] = parent_process bp = BackgroundProcess(**process) bp.save() self.stderr.write("init db completed\n") return True
def init_process(self): # clean up BackgroundProcess.objects.filter(parent_process__pk=self.process_id, done=False).delete() for item in Device.objects.filter(active=True, protocol_id=PROTOCOL_ID): # every device gets its own process bp = BackgroundProcess(label='pyscada.systemstat-%s' % item.id, message='waiting..', enabled=True, parent_process_id=self.process_id, process_class='pyscada.utils.scheduler.SingleDeviceDAQProcess', process_class_kwargs=json.dumps( {'device_id': item.pk})) bp.save() self.SYSTEMSTAT_PROCESSES.append({'id': bp.id, 'key': item.id, 'device_id': item.pk, 'failed': 0})
def init_process(self): # clean up BackgroundProcess.objects.filter(parent_process__pk=self.process_id, done=False).delete() for item in VISADevice.objects.filter(visa_device__active=True): # every device gets its own process bp = BackgroundProcess(label='pyscada.visa-%s' % item.id, message='waiting..', enabled=True, parent_process_id=self.process_id, process_class='pyscada.utils.scheduler.SingleDeviceDAQProcess', process_class_kwargs=json.dumps( {'device_id': item.visa_device.pk})) bp.save() self.VISA_PROCESSES.append({'id': bp.id, 'key': item.id, 'device_id': item.visa_device.pk, 'failed': 0})
def init_process(self): for process in BackgroundProcess.objects.filter(parent_process__pk=self.process_id, done=False): try: kill(process.pid, 0) except OSError as e: if e.errno == errno.ESRCH: process.delete() continue logger.debug('process %d is alive' % process.pk) process.stop() # clean up BackgroundProcess.objects.filter(parent_process__pk=self.process_id, done=False).delete() grouped_ids = {} for item in ModbusDevice.objects.filter(modbus_device__active=True): if item.protocol == 0: # Modbus IP # every device gets its own process grouped_ids['%d-%s:%s-%d' % (item.modbus_device.pk, item.ip_address, item.port, item.unit_id)] = [item] continue # every port gets its own process if item.port not in grouped_ids: grouped_ids[item.port] = [] grouped_ids[item.port].append(item) for key, values in grouped_ids.items(): bp = BackgroundProcess(label='pyscada.modbus-%s' % key, message='waiting..', enabled=True, parent_process_id=self.process_id, process_class='pyscada.utils.scheduler.MultiDeviceDAQProcess', process_class_kwargs=json.dumps( {'device_ids': [i.modbus_device.pk for i in values]})) bp.save() self.MODBUS_PROCESSES.append({'id': bp.id, 'key': key, 'device_ids': [i.modbus_device.pk for i in values], 'failed': 0})
def init_process(self): # clean up BackgroundProcess.objects.filter(parent_process__pk=self.process_id, done=False).delete() for item in OneWireDevice.objects.filter(onewire_device__active=True): # every device gets its own process bp = BackgroundProcess( label='pyscada.onewire-%s' % item.id, message='waiting..', enabled=True, parent_process_id=self.process_id, process_class='pyscada.utils.scheduler.SingleDeviceDAQProcess', process_class_kwargs=json.dumps( {'device_id': item.onewire_device.pk})) bp.save() self.ONEWIRE_PROCESSES.append({ 'id': bp.id, 'key': item.id, 'device_id': item.onewire_device.pk, 'failed': 0 })
def loop(self): """ this function will be called every self.dt_set seconds request data tm_wday 0=Monday tm_yday """ today = date.today() # only start new jobs after change the day changed if self._current_day != gmtime().tm_yday: self._current_day = gmtime().tm_yday for job in ScheduledExportTask.objects.filter(active=1): # get all active jobs add_task = False if job.export_period == 1: # daily start_time = '%s %02d:00:00' % ((today - timedelta(1)).strftime('%d-%b-%Y'), job.day_time) # "%d-%b-%Y %H:%M:%S" start_time = mktime(datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple()) filename_suffix = 'daily_export_%d_%s' % (job.pk, job.label) add_task = True elif job.export_period == 2 and gmtime().tm_yday % 2 == 0: # on even days (2,4,...) start_time = '%s %02d:00:00' % ((today - timedelta(2)).strftime('%d-%b-%Y'), job.day_time) # "%d-%b-%Y %H:%M:%S" start_time = mktime(datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple()) filename_suffix = 'two_day_export_%d_%s' % (job.pk, job.label) add_task = True elif job.export_period == 7 and gmtime().tm_wday == 0: # on every monday start_time = '%s %02d:00:00' % ((today - timedelta(7)).strftime('%d-%b-%Y'), job.day_time) # "%d-%b-%Y %H:%M:%S" start_time = mktime(datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple()) filename_suffix = 'weekly_export_%d_%s' % (job.pk, job.label) add_task = True elif job.export_period == 14 and gmtime().tm_yday % 14 == 0: # on every second monday start_time = '%s %02d:00:00' % ((today - timedelta(14)).strftime('%d-%b-%Y'), job.day_time) # "%d-%b-%Y %H:%M:%S" start_time = mktime(datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple()) filename_suffix = 'two_week_export_%d_%s' % (job.pk, job.label) add_task = True elif job.export_period == 30 and gmtime().tm_yday % 30 == 0: # on every 30 days start_time = '%s %02d:00:00' % ((today - timedelta(30)).strftime('%d-%b-%Y'), job.day_time) # "%d-%b-%Y %H:%M:%S" start_time = mktime(datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple()) filename_suffix = '30_day_export_%d_%s' % (job.pk, job.label) add_task = True if job.day_time == 0: end_time = '%s %02d:59:59' % ((today - timedelta(1)).strftime('%d-%b-%Y'), 23) # "%d-%b-%Y %H:%M:%S" else: end_time = '%s %02d:59:59' % (today.strftime('%d-%b-%Y'), job.day_time - 1) # "%d-%b-%Y %H:%M:%S" end_time = mktime(datetime.strptime(end_time, "%d-%b-%Y %H:%M:%S").timetuple()) # create ExportTask if add_task: et = ExportTask( label=filename_suffix, datetime_max=datetime.fromtimestamp(end_time, UTC), datetime_min=datetime.fromtimestamp(start_time, UTC), filename_suffix=filename_suffix, mean_value_period=job.mean_value_period, file_format=job.file_format, datetime_start=datetime.fromtimestamp(end_time + 60, UTC) ) et.save() et.variables.add(*job.variables.all()) # check running tasks and start the next Export Task running_jobs = ExportTask.objects.filter(busy=True, failed=False) if running_jobs: for job in running_jobs: if time() - job.start() < 30: # only check Task when it is running longer then 30s continue if job.backgroundprocess is None: # if the job has no backgroundprocess assosiated mark as failed job.failed = True job.save() continue if now() - timedelta(hours=1) > job.backgroundprocess.last_update: # if the Background Process has been updated in the past 60s wait continue if job.backgroundprocess.pid == 0: # if the job has no valid pid mark as failed job.failed = True job.save() continue else: # start the next Export Task job = ExportTask.objects.filter( done=False, busy=False, failed=False, datetime_start__lte=datetime.now(UTC)).first() # get all jobs if job: bp = BackgroundProcess(label='pyscada.export.ExportProcess-%d' % job.pk, message='waiting..', enabled=True, parent_process_id=self.parent_process_id, process_class='pyscada.export.worker.ExportProcess', process_class_kwargs=json.dumps({"job_id": job.pk})) bp.save() if job.datetime_start is None: job.datetime_start = datetime.now(UTC) job.busy = True job.save() # delete all done jobs older the 60 days for job in ExportTask.objects.filter(done=True, busy=False, datetime_start__gte=datetime.fromtimestamp(time() + 60 * 24 * 60 * 60, UTC)): job.delete() # delete all failed jobs older the 60 days for job in ExportTask.objects.filter(failed=True, datetime_start__gte=datetime.fromtimestamp(time() + 60 * 24 * 60 * 60, UTC)): job.delete() return 1, None # because we have no data to store
def loop(self): """ """ #logger.debug(self.SCRIPT_PROCESSES) #logger.debug(Script.objects.filter(active=True)) # add new active scripts for s in Script.objects.filter(active=True): script_found = False for script_process in self.SCRIPT_PROCESSES: if s.pk == script_process['script_id']: script_found = True if not script_found: logger.info("%s not found - add script to %s" % (s.label, self.label)) self._add_process_to_list(s) #logger.debug("script master loop") # check if all scripting processes are running for script_process in self.SCRIPT_PROCESSES: try: bp = BackgroundProcess.objects.get(pk=script_process['id']) # stop deactivated script and remove from list try: if not Script.objects.get( pk=script_process['script_id']).active: bp.stop(cleanup=True) logger.debug("stop %s" % bp) logger.debug("remove %s" % script_process) self.SCRIPT_PROCESSES.remove(script_process) except Script.DoesNotExist: bp.stop(cleanup=True) logger.debug("stop %s" % bp) logger.debug("remove %s" % script_process) self.SCRIPT_PROCESSES.remove(script_process) except (BackgroundProcess.DoesNotExist, BackgroundProcess.MultipleObjectsReturned): # Process is dead, spawn new instance if script_process['failed'] < 3: try: script = Script.objects.get( pk=script_process['script_id'], active=True) bp = BackgroundProcess( label='pyscada.scripting-%d' % script.pk, message='waiting..', enabled=True, parent_process_id=self.process_id, process_class= 'pyscada.scripting.worker.ScriptingProcess', process_class_kwargs=json.dumps({ "script_id": script.pk, 'script_file': script.script_file, 'dt_set': script.interval })) bp.save() script_process['id'] = bp.id script_process['failed'] += 1 except Script.DoesNotExist: logger.debug("removing %s from list" % script_process) self.SCRIPT_PROCESSES.remove(script_process) else: logger.error( 'process pyscada.scripting.user_script-%d failed more than 3 times' % script_process['script_id']) except: logger.debug('%s, unhandled exception\n%s' % (self.label, traceback.format_exc())) return 1, None
def loop(self): """ this function will be called every self.dt_set seconds request data tm_wday 0=Monday tm_yday """ today = date.today() # only start new jobs after change the day changed if self._current_day != gmtime().tm_yday: self._current_day = gmtime().tm_yday for job in ScheduledExportTask.objects.filter( active=1): # get all active jobs add_task = False if job.export_period == 1: # daily start_time = '%s %02d:00:00' % ( (today - timedelta(1)).strftime('%d-%b-%Y'), job.day_time) # "%d-%b-%Y %H:%M:%S" start_time = mktime( datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple()) filename_suffix = 'daily_export_%d_%s' % (job.pk, job.label) add_task = True elif job.export_period == 2 and gmtime( ).tm_yday % 2 == 0: # on even days (2,4,...) start_time = '%s %02d:00:00' % ( (today - timedelta(2)).strftime('%d-%b-%Y'), job.day_time) # "%d-%b-%Y %H:%M:%S" start_time = mktime( datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple()) filename_suffix = 'two_day_export_%d_%s' % (job.pk, job.label) add_task = True elif job.export_period == 7 and gmtime( ).tm_wday == 0: # on every monday start_time = '%s %02d:00:00' % ( (today - timedelta(7)).strftime('%d-%b-%Y'), job.day_time) # "%d-%b-%Y %H:%M:%S" start_time = mktime( datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple()) filename_suffix = 'weekly_export_%d_%s' % (job.pk, job.label) add_task = True elif job.export_period == 14 and gmtime( ).tm_yday % 14 == 0: # on every second monday start_time = '%s %02d:00:00' % ( (today - timedelta(14)).strftime('%d-%b-%Y'), job.day_time) # "%d-%b-%Y %H:%M:%S" start_time = mktime( datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple()) filename_suffix = 'two_week_export_%d_%s' % (job.pk, job.label) add_task = True elif job.export_period == 30 and gmtime( ).tm_yday % 30 == 0: # on every 30 days start_time = '%s %02d:00:00' % ( (today - timedelta(30)).strftime('%d-%b-%Y'), job.day_time) # "%d-%b-%Y %H:%M:%S" start_time = mktime( datetime.strptime(start_time, "%d-%b-%Y %H:%M:%S").timetuple()) filename_suffix = '30_day_export_%d_%s' % (job.pk, job.label) add_task = True if job.day_time == 0: end_time = '%s %02d:59:59' % ( (today - timedelta(1)).strftime('%d-%b-%Y'), 23 ) # "%d-%b-%Y %H:%M:%S" else: end_time = '%s %02d:59:59' % ( today.strftime('%d-%b-%Y'), job.day_time - 1 ) # "%d-%b-%Y %H:%M:%S" end_time = mktime( datetime.strptime(end_time, "%d-%b-%Y %H:%M:%S").timetuple()) # create ExportTask if add_task: et = ExportTask( label=filename_suffix, datetime_max=datetime.fromtimestamp(end_time, UTC), datetime_min=datetime.fromtimestamp(start_time, UTC), filename_suffix=filename_suffix, mean_value_period=job.mean_value_period, file_format=job.file_format, datetime_start=datetime.fromtimestamp( end_time + 60, UTC)) et.save() et.variables.add(*job.variables.all()) # check running tasks and start the next Export Task running_jobs = ExportTask.objects.filter(busy=True, failed=False) if running_jobs: for job in running_jobs: if time() - job.start() < 30: # only check Task when it is running longer then 30s continue if job.backgroundprocess is None: # if the job has no backgroundprocess assosiated mark as failed job.failed = True job.save() continue if datetime_now() - timedelta( hours=1) > job.backgroundprocess.last_update: # if the Background Process has been updated in the past 60s wait continue if job.backgroundprocess.pid == 0: # if the job has no valid pid mark as failed job.failed = True job.save() continue else: # start the next Export Task job = ExportTask.objects.filter( done=False, busy=False, failed=False, datetime_start__lte=datetime.now(UTC)).first() # get all jobs if job: bp = BackgroundProcess( label='pyscada.export.ExportProcess-%d' % job.pk, message='waiting..', enabled=True, parent_process_id=self.parent_process_id, process_class='pyscada.export.worker.ExportProcess', process_class_kwargs=json.dumps({"job_id": job.pk})) bp.save() if job.datetime_start is None: job.datetime_start = datetime.now(UTC) job.busy = True job.save() # delete all done jobs older the 60 days for job in ExportTask.objects.filter( done=True, busy=False, datetime_start__gte=datetime.fromtimestamp( time() + 60 * 24 * 60 * 60, UTC)): job.delete() # delete all failed jobs older the 60 days for job in ExportTask.objects.filter( failed=True, datetime_start__gte=datetime.fromtimestamp( time() + 60 * 24 * 60 * 60, UTC)): job.delete() return 1, None # because we have no data to store