def check_history_tables(): """Add below all history tables you want to be initialised""" table_collection_list = [ models.NodeHistory, models.SensorHistory, models.SystemDiskHistory, models.SystemMonitorHistory, models.UpsHistory, models.PresenceHistory, models.SensorErrorHistory, models.ZoneHeatRelayHistory, models.ZoneCustomRelayHistory ] for table in table_collection_list: table_str = utils.get_table_name(table) # do not drop and recreate historic ones # if table is models.PresenceHistory: # check_table_schema(table, model_auto_update=True) # else: check_table_schema(table, model_auto_update=False)
def readBase(self): sql = "SELECT" \ " country_code, exchange_market_code, security_code, company_code, " \ "display_label, information, gmt_create, user_create " \ "FROM" \ " `{}` " \ "ORDER BY" \ " id " \ "ASC ;" executavle_sql = sql.format(str(get_table_name())) try: self.cursor.execute(executavle_sql) feild_values = self.cursor.fetchall() return feild_values except Exception as e: return "ERROR Reason Is : %s" % e
def do_insert(self, cursor, item, spider): # if spider.name == 'SZ_list_insert': # if isinstance(item, ChnSzseItem): # cursor.execute(toInsert('company_data_source_complete20191022_copy', item)) # elif isinstance(item, ChnSzseListItem): # for vals in item: # sql = "INSERT INTO `company_origin_info_complete20191022` (" \ # "country_code, company_code, display_label, information, gmt_create, user_create" \ # ") VALUES (%s, %s, %s, %s, %s, %s);" # print('$' * 20, sql) # cursor.execute( # sql, # ['chn', # str(item['company_code'][1]), # str(item[vals][0]), # str(item[vals][1]), # str(datetime.now()), # 'xfc']) # # elif isinstance(item, ChnSzseItem): # cursor.execute(toInsert('company_data_source_complete20191022_copy', item)) # elif isinstance(item, ChnSzseListItem): # for vals in item: # sql = "INSERT INTO `company_origin_info_complete20191022` (" \ # "country_code, company_code, display_label, information, gmt_create, user_create" \ # ") VALUES (%s, %s, %s, %s, %s, %s);" # print('$' * 20, sql) # cursor.execute( # sql, # ['chn', # str(item['company_code'][1]), # str(item[vals][0]), # str(item[vals][1]), # str(datetime.now()), # 'xfc']) # # elif isinstance(item, ChnSzseItem): # cursor.execute(toInsert('company_data_source_complete20191022_copy', item)) # elif isinstance(item, ChnSzseListItem): # for vals in item: # sql = "INSERT INTO `company_origin_info_complete20191022` (" \ # "country_code, company_code, display_label, information, gmt_create, user_create" \ # ") VALUES (%s, %s, %s, %s, %s, %s);" # print('$' * 20, sql) # cursor.execute( # sql, # ['chn', # str(item['company_code'][1]), # str(item[vals][0]), # str(item[vals][1]), # str(datetime.now()), # 'xfc']) # if spider.name == 'secretary_seniority': # if isinstance(item, SecretaryItem): # # cursor.execute(toInsert('company_original_information_detail_c20191104', item)) # pass # elif isinstance(item, SecretaryTwoItem): # cursor.execute(toInsert('company_original_information_detail_c20191104', item)) # elif isinstance(item, SecretaryThreeItem): # cursor.execute(toInsert('company_original_information_detail_c20191104', item)) # # elif spider.name == 'Regulatory_discipline': # if isinstance(item, RegItem): # cursor.execute(toInsert('company_original_information_detail_c20191104', item)) # elif isinstance(item, DisItem): # cursor.execute(toInsert('company_original_information_detail_c20191104', item)) # # elif spider.name == 'stock_change': # if isinstance(item, StockItem): # cursor.execute(toInsert('company_original_information_detail_c20191104', item)) if spider.name == 'SZSE_company_list': pass if isinstance(item, SzseCollectionItem1): cursor.execute(toInsert('company_base_info', item)) elif isinstance(item, SzseCollectionItem2): cursor.execute(toInsert('company_base_info', item)) elif isinstance(item, SzseCollectionItem3): cursor.execute(toInsert('company_base_info', item)) elif spider.name == 'company_info': tablename = get_table_name() # tablename = 'company_raw_info_20191120' if isinstance(item, CompanyInfoItem): for vals in item: sql = "INSERT INTO `%s` (" \ "country_code, exchange_market_code, security_code, company_code, display_label, information, " \ "gmt_create, user_create) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)" cursor.execute(sql, [ tablename, 'chn', 'SZSE', str(item['security_code'][1]), str(item['company_code'][1]), str(item[vals][0]), str(item[vals][1]), str(datetime.now()), 'xfc' ])
def populate_tables(model_auto_update=False): """Add below all tables you want to be initialised""" global table_collection table_collection = [ models.Node, models.Parameter, models.Module, models.Area, models.Zone, models.ZoneArea, models.ZoneCustomRelay, models.TemperatureTarget, models.SchedulePattern, models.HeatSchedule, models.ZoneHeatRelay, models.ZoneSensor, models.ZoneAlarm, models.ZoneThermostat, models.SystemMonitor, models.SystemDisk, models.Sensor, models.DustSensor, models.Ups, models.Rule, models.CommandOverrideRelay, models.PlotlyCache, models.Utility, models.Presence, models.SensorError, models.State, models.People, models.Device, models.PeopleDevice, models.Position, models.PowerMonitor, models.Music, models.MusicLoved, models.Pwm ] # tables that will be cleaned on every app start table_force_clean = [ models.Parameter, models.Zone, models.Presence, models.Module, models.Node, models.Rule, models.ZoneHeatRelay ] # , models.Sensor] for table in table_collection: table_str = utils.get_table_name(table) if table in table_force_clean: read_drop_table(table, "Forcing table clean", drop_without_user_ask=True) else: check_table_schema(table, model_auto_update) if table_str in Constant.db_values_json: default_values = Constant.db_values_json[table_str] if len(table().query_all()) != len(default_values): L.l.info( 'Populating {} with default values as config record count != db count' .format(table_str)) table().delete() commit() for config_record in default_values: new_record = table() # setattr(new_record, config_record, default_values[config_record]) for field in config_record: # if type(getattr(new_record, field)) is bool: # val = config_record[field] setattr(new_record, field, config_record[field]) # Log.logger.info("Adding conf record: {}".format(new_record)) db.session.add(new_record) commit() check_table_schema(models.Node, model_auto_update) # reseting execute_command field to avoid running last command before shutdown node_obj = models.Node.query.filter_by(name=Constant.HOST_NAME).first() if node_obj: node_obj.execute_command = '' else: node_obj = models.Node() node_obj.add_record_to_session() # let this commented for test purposes (make netbook Windows look like PI) if Constant.HOST_NAME != 'netbook': # Log.logger.info("Setting current machine type to {}".format(Constant.HOST_MACHINE_TYPE)) node_obj.machine_type = Constant.HOST_MACHINE_TYPE Constant.HOST_PRIORITY = node_obj.priority commit() node_list = models.Node().query_all() check_table_schema(models.GpioPin, model_auto_update) # todo: worth implementing beabglebone white? # populate all beaglebone black pins in db for all nodes. only used ones are mapped below, extend if more are used # mapping found here: https://insigntech.files.wordpress.com/2013/09/bbb_pinouts.jpg bbb_bcm_map = { 'P9_11': 30, 'P9_12': 60, 'P9_13': 31, 'P9_14': 40, 'P9_15': 48, 'P9_16': 51, 'P9_24': 15, 'P9_23': 49, 'P9_22': 2, 'P9_21': 3, 'P8_07': 66, 'P8_08': 67, 'P8_09': 69, 'P8_11': 45, 'P8_12': 44, 'P8_15': 47, 'P8_16': 46 } for node in node_list: if node.machine_type == Constant.MACHINE_TYPE_BEAGLEBONE: if len( models.GpioPin.query.filter_by( pin_type=Constant.GPIO_PIN_TYPE_BBB, host_name=node.name).all() ) != 46 * 2: # P8_ & P9_ rows have 46 pins models.GpioPin.query.filter_by( pin_type=Constant.GPIO_PIN_TYPE_BBB, host_name=node.name).delete() commit() L.l.info('Populating default {} GpioPins on {} '.format( node.machine_type, node.name)) for rail in range(8, 10): # last range is not part of the loop for pin in range(1, 47): gpio = models.GpioPin() gpio.pin_type = Constant.GPIO_PIN_TYPE_BBB gpio.host_name = node.name pincode = '0' + str(pin) gpio.pin_code = 'P' + str(rail) + '_' + pincode[-2:] if bbb_bcm_map.has_key(gpio.pin_code): gpio.pin_index_bcm = bbb_bcm_map[gpio.pin_code] else: gpio.pin_index_bcm = '' db.session.add(gpio) commit() # fixme: check for other PI revisions elif node.machine_type in [ Constant.MACHINE_TYPE_RASPBERRY, Constant.MACHINE_TYPE_ODROID ]: if len( models.GpioPin.query.filter_by( pin_type=Constant.GPIO_PIN_TYPE_PI_STDGPIO, host_name=node.name).all()) != 40: models.GpioPin.query.filter_by( pin_type=Constant.GPIO_PIN_TYPE_PI_STDGPIO, host_name=node.name).delete() commit() L.l.info('Populating standard {} GpioPins on {} '.format( node.machine_type, node.name)) for pin in range(0, 40): gpio = models.GpioPin() gpio.pin_type = Constant.GPIO_PIN_TYPE_PI_STDGPIO gpio.host_name = node.name gpio.pin_code = str(pin) gpio.pin_index_bcm = pin db.session.add(gpio) if len(models.GpioPin.query.filter_by(pin_type=Constant.GPIO_PIN_TYPE_PI_FACE_SPI, host_name=node.name).all()) \ != 2 * 8 * 4: # input/output * 8 pins * max 4 boards models.GpioPin.query.filter_by( pin_type=Constant.GPIO_PIN_TYPE_PI_FACE_SPI, host_name=node.name).delete() commit() L.l.info('Populating piface {} pins on {} '.format( node.machine_type, node.name)) for board in range(0, 4): for pin_dir in (Constant.GPIO_PIN_DIRECTION_IN, Constant.GPIO_PIN_DIRECTION_OUT): for pin in range(0, 8): # -1 gpio = models.GpioPin() gpio.pin_type = Constant.GPIO_PIN_TYPE_PI_FACE_SPI gpio.host_name = node.name gpio.pin_code = str( board) + ":" + pin_dir + ":" + str( pin) # same as piface.format_pin_code() gpio.pin_index_bcm = pin gpio.board_index = board db.session.add(gpio) commit() else: L.l.warning("Unknown machine type {} for node {}".format( node.machine_type, node))
def _process_obj(obj): try: prctl.set_name("event_thread_run") threading.current_thread().name = "event_thread_run" # events received via mqtt transport # fixme: make it generic to work with any transport source_host = obj[Constant.JSON_PUBLISH_SOURCE_HOST] if Constant.JSON_PUBLISH_TABLE in obj: table = str(obj[Constant.JSON_PUBLISH_TABLE]) if table == utils.get_table_name(models.Node): node.node_run.node_update(obj) # fixme: remove hardcoded strings if 'execute_command' in obj: execute_command = obj['execute_command'] host_name = obj['name'] # execute command on target host or on current host # (usefull when target is down - e.g. wake cmd if (host_name == Constant.HOST_NAME or source_host == Constant.HOST_NAME) \ and execute_command != '': server_node = models.Node.query.filter_by( name=host_name).first() main.execute_command(execute_command, node=server_node) elif table == utils.get_table_name(models.ZoneHeatRelay): # if heat.initialised: heat.record_update(obj) elif table == utils.get_table_name(models.Sensor): # sensor.not_used_record_update(obj) pass elif table == utils.get_table_name(models.DustSensor): pass elif table == utils.get_table_name(models.ZoneCustomRelay): gpio.zone_custom_relay_record_update(obj) elif table == utils.get_table_name(models.GpioPin): gpio.not_used_gpio_record_update(obj) # elif table == utils.get_table_name(models.Rule): # rule.record_update(obj) elif table == utils.get_table_name(models.Presence): presence.not_used_record_update(obj) # elif table == utils.get_table_name(models.PlotlyCache): # graph_plotly.record_update(obj) elif table == utils.get_table_name(models.ZoneAlarm): # no processing (no local save) pass elif table == utils.get_table_name(models.Utility): utility.not_used_record_update(obj, source_host) elif table == utils.get_table_name(models.Ups): # no additional processing pass elif table == utils.get_table_name(models.SystemDisk): # no additional processing pass elif table == utils.get_table_name(models.Music): # no additional processing pass elif table == utils.get_table_name(models.MusicLoved): # no additional processing pass elif table == utils.get_table_name(models.PowerMonitor): health_monitor.not_used_powermonitor_record_update(obj) elif table == utils.get_table_name(models.ZoneThermostat): heat.zone_thermo_record_update(obj) elif table == utils.get_table_name(models.Pwm): gpio.pigpio_gpio.not_used_pwm_record_update(obj) else: L.l.warning('Table %s content from %s is not mqtt processed' % (table, source_host)) if Constant.JSON_MESSAGE_TYPE in obj: if variable.NODE_THIS_IS_MASTER_LOGGING: if source_host != Constant.HOST_NAME: levelname = obj['level'] msg = obj['message'] msgdatetime = obj['datetime'] message = '{}, {}, {}'.format(source_host, msgdatetime, msg) if levelname == 'INFO': L.remote_logger.info(message) elif levelname == 'WARNING': L.remote_logger.warning(message) elif levelname == 'CRITICAL': L.remote_logger.critical(message) elif levelname == 'ERROR': L.remote_logger.error(message) elif levelname == 'DEBUG': L.remote_logger.debug(message) # else: # Log.logger.warning('This node is master logging but emits remote logs, is a circular reference') # if record has fields that enables persistence (in cloud or local) # if variable.NODE_THIS_IS_MASTER_OVERALL: if source_host == Constant.HOST_NAME: if Constant.JSON_PUBLISH_SAVE_TO_HISTORY in obj: # if record must be saved to local db if obj[Constant. JSON_PUBLISH_SAVE_TO_HISTORY] and Constant.HAS_LOCAL_DB_REPORTING_CAPABILITY: persistence.save_to_history_db(obj) else: # L.l.info("Not saving to db, json publish={} has db={}, obj={}".format( # obj[Constant.JSON_PUBLISH_SAVE_TO_HISTORY], Constant.HAS_LOCAL_DB_REPORTING_CAPABILITY, # obj)) pass else: L.l.info( "Received mqtt object without history save tag: {}".format( obj)) # else: # L.l.info("Dropping message from {}, not matching {}".format(source_host, Constant.HOST_NAME)) except Exception as ex: L.l.error("Error processing event err={}, mqtt={}".format(ex, obj), exc_info=True)
def mqtt_thread_run(): prctl.set_name("event_thread_run") threading.current_thread().name = "event_thread_run" global __mqtt_lock __mqtt_lock.acquire() # from cloud import graph_plotly try: last_count = len(__mqtt_event_list) for obj in list(__mqtt_event_list): try: __mqtt_event_list.remove(obj) # events received via mqtt transport # fixme: make it generic to work with any transport source_host = obj[Constant.JSON_PUBLISH_SOURCE_HOST] if Constant.JSON_PUBLISH_TABLE in obj: table = str(obj[Constant.JSON_PUBLISH_TABLE]) if table == utils.get_table_name(models.Node): node.node_run.node_update(obj) # fixme: remove hardcoded strings if 'execute_command' in obj: execute_command = obj['execute_command'] host_name = obj['name'] # execute command on target host or on current host # (usefull when target is down - e.g. wake cmd if (host_name == Constant.HOST_NAME or source_host == Constant.HOST_NAME) \ and execute_command != '': server_node = models.Node.query.filter_by( name=host_name).first() main.execute_command(execute_command, node=server_node) elif table == utils.get_table_name(models.ZoneHeatRelay): # if heat.initialised: heat.record_update(obj) elif table == utils.get_table_name(models.Sensor): sensor.record_update(obj) elif table == utils.get_table_name(models.ZoneCustomRelay): gpio.zone_custom_relay_record_update(obj) elif table == utils.get_table_name(models.GpioPin): gpio.gpio_record_update(obj) # elif table == utils.get_table_name(models.Rule): # rule.record_update(obj) elif table == utils.get_table_name(models.Presence): presence.record_update(obj) # elif table == utils.get_table_name(models.PlotlyCache): # graph_plotly.record_update(obj) elif table == utils.get_table_name(models.ZoneAlarm): # no processing (no local save) pass elif table == utils.get_table_name(models.Utility): # no additional processing pass elif table == utils.get_table_name(models.Ups): # no additional processing pass elif table == utils.get_table_name(models.SystemDisk): # no additional processing pass elif table == utils.get_table_name(models.Music): # no additional processing pass elif table == utils.get_table_name(models.MusicLoved): # no additional processing pass else: L.l.warning( 'Table %s content from %s is not mqtt processed' % (table, source_host)) if Constant.JSON_MESSAGE_TYPE in obj: if variable.NODE_THIS_IS_MASTER_LOGGING: if source_host != Constant.HOST_NAME: levelname = obj['level'] msg = obj['message'] msgdatetime = obj['datetime'] message = '{}, {}, {}'.format( source_host, msgdatetime, msg) if levelname == 'INFO': L.remote_logger.info(message) elif levelname == 'WARNING': L.remote_logger.warning(message) elif levelname == 'CRITICAL': L.remote_logger.critical(message) elif levelname == 'ERROR': L.remote_logger.error(message) elif levelname == 'DEBUG': L.remote_logger.debug(message) # else: # Log.logger.warning('This node is master logging but emits remote logs, is a circular reference') # if record has fields that enables persistence (in cloud or local) # if variable.NODE_THIS_IS_MASTER_OVERALL: if source_host == Constant.HOST_NAME: if Constant.JSON_PUBLISH_SAVE_TO_HISTORY in obj: # if record must be saved to local db if obj[Constant. JSON_PUBLISH_SAVE_TO_HISTORY] and Constant.HAS_LOCAL_DB_REPORTING_CAPABILITY: persistence.save_to_history_db(obj) else: # L.l.info("Not saving to db, json publish={} has db={}, obj={}".format( # obj[Constant.JSON_PUBLISH_SAVE_TO_HISTORY], Constant.HAS_LOCAL_DB_REPORTING_CAPABILITY, # obj)) pass # if record is marked to be uploaded to a graph # if Constant.JSON_PUBLISH_SAVE_TO_GRAPH in obj and obj[Constant.JSON_PUBLISH_SAVE_TO_GRAPH]: # pass # persistence.save_to_history(obj, upload_to_cloud=True) # lazy init as plotly is an optional module # from cloud import graph_plotly # if graph_plotly.initialised: # start = utils.get_base_location_now_date() # initial implementation # graph_plotly.upload_data(obj) # persistence.save_to_history_cloud(obj) # elapsed = (utils.get_base_location_now_date() - start).total_seconds() # Log.logger.debug('Plotly upload took {}s'.format(elapsed)) # else: # Log.logger.debug('Graph not initialised on obj upload to graph') else: L.l.info( "Received mqtt object without history save tag: {}" .format(obj)) # else: # L.l.info("Dropping message from {}, not matching {}".format(source_host, Constant.HOST_NAME)) if len(__mqtt_event_list) > last_count: L.l.debug('Not keeping up with {} mqtt events'.format( len(__mqtt_event_list))) except Exception as ex: L.l.critical("Error processing event err={}, mqtt={}".format( ex, obj)) except Exception as ex: L.l.critical("General error processing mqtt: {}".format(ex)) finally: __mqtt_lock.release() prctl.set_name("idle") threading.current_thread().name = "idle"