Esempio n. 1
0
def _check_presence():
    devs = models.Device().query_filter_all()
    for dev in devs:
        if dev.bt_address is not None:
            result = None
            btrssi = None
            try:
                result = bluetooth.lookup_name(dev.bt_address.upper(),
                                               timeout=2)
            except Exception, ex:
                print "BT scan error: {}".format(ex)
            if result is not None:
                try:
                    #if rssi_initialised:
                    #    btrssi = BluetoothRSSI(addr=dev.bt_address.upper())
                    dev.last_bt_active = utils.get_base_location_now_date()
                    dev.last_active = utils.get_base_location_now_date()
                    models.commit()
                    pd = models.PeopleDevice
                    peopledev = pd().query_filter_first(pd.device_id == dev.id)
                    if peopledev is not None and peopledev.give_presence:
                        p = models.People
                        people = p().query_filter_first(
                            p.id == peopledev.people_id)
                        if people is not None:
                            dispatcher.send(Constant.SIGNAL_PRESENCE,
                                            device=dev.name,
                                            people=people.name)
                            #if btrssi is not None:
                            #    print "Rssi for {}={}".format(people.name, btrssi.get_rssi())
                except Exception, ex:
                    print "Error on bt presence".format(ex)
Esempio n. 2
0
def _check_wifi(test=False):
    if test:
        wlist = "bssid / frequency / signal level / flags / ssid\n"
        wlist = wlist + "a4:2b:b0:fe:8c:2e       2462    -52     [WPA2-PSK-CCMP][ESS]    home2"
        wlist = wlist.split('\n')
        wl = 'wlan0'
    else:
        wl = _get_wlan()
        if wl is not None:
            wlist = subprocess.check_output(['wpa_cli', '-i', wl, 'scan_results']).split('\n')
    if len(wlist) >= 1:
        for line in wlist:
            line = " ".join(line.split())
            atoms = line.split(' ')
            if len(atoms) > 4 and atoms[0] != 'bssid':
                ssid = atoms[0].upper()
                freq = atoms[1]
                signal = atoms[2]
                flags = atoms[3]
                name = atoms[4]
                d = models.Device
                dev = d().query_filter_first(d.wifi_address == ssid)
                if dev is not None:
                    dev.last_wifi_active = utils.get_base_location_now_date()
                    dev.last_active = utils.get_base_location_now_date()
                    dev.wifi_signal = signal
                    models.commit()
                    pd = models.PeopleDevice
                    peopledev = pd().query_filter_first(pd.device_id == dev.id)
                    if peopledev is not None and peopledev.give_presence:
                        dispatcher.send(Constant.SIGNAL_PRESENCE, device=dev.name)
Esempio n. 3
0
def send_message(title,
                 message=None,
                 url=None,
                 priority=None,
                 deviceid=None,
                 image_url=None):
    """
    https://newtifry.appspot.com/page/api

    format	json
    Determines the format of the response.
    Only JSON is supported at the moment, so you should always send the string json.
    Required

    source	32 character hash string
    The source key, given to you by the user. They generate it when they create a source.
    You can supply up to 10 sources at a time separated by a comma, or a single source.	Required

    title	string
    The string title of this notification. Keep this short and relevant to the message.	Required

    message	string
    The body of the message. This can contain a lot more detail of the message.
    This is optional, and will be sent as an empty string if not provided.	Optional

    url	string
    An optional URL to pass along, that would give more information about the message.	Optional

    priority	integer
    An optional message priority (0-3). 0 : no priority - 1 : info - 2 : warning - 3 : alert	Optional

    image	string
    An optional bitmap URL to pass along, that would be displayed in the message detail screen (new in version 2.4.0).
    Optional
    """
    global _last_send_date, _message_queue, __queue_lock
    obj = type(
        'obj', (object, ), {
            'title': title,
            'message': message,
            'url': url,
            'priority': priority,
            'deviceid': deviceid,
            'image_url': image_url,
            'date': utils.get_base_location_now_date()
        })
    try:
        __queue_lock.acquire()
        _message_queue.append(obj)
    finally:
        __queue_lock.release()
    # avoid sending notifications too often
    if (utils.get_base_location_now_date() - _last_send_date).seconds < 30:
        L.l.info('Queuing newtifry message [%s] count %d' %
                 (title, len(_message_queue)))
        return
    else:
        _send_queue()
Esempio n. 4
0
def loop_zones():
    try:
        heat_is_on = False
        zone_list = models.Zone().query_all()
        global progress_status
        for zone in zone_list:
            progress_status = 'do zone {}'.format(zone.name)
            heat_schedule = models.HeatSchedule.query.filter_by(
                zone_id=zone.id).first()
            zonesensor_list = models.ZoneSensor.query.filter_by(
                zone_id=zone.id).all()
            for zonesensor in zonesensor_list:
                if heat_schedule is not None and zonesensor is not None:
                    sensor = models.Sensor.query.filter_by(
                        address=zonesensor.sensor_address).first()
                    if heat_schedule.active and sensor is not None:
                        # sensor_last_update_seconds = (utils.get_base_location_now_date() - sensor.updated_on).total_seconds()
                        # if sensor_last_update_seconds > 120 * 60:
                        #    Log.logger.warning('Sensor {} not updated in last 120 minutes, unusual'.format(
                        # sensor.sensor_name))
                        heat_state, main_source_needed = __update_zone_heat(
                            zone, heat_schedule, sensor)
                        if not heat_is_on:
                            heat_is_on = main_source_needed and heat_state
        # turn on/off the main heating system based on zone heat needs
        # check first to find alternate valid heat sources
        heatrelay_main_source = models.ZoneHeatRelay.query.filter_by(
            is_alternate_heat_source=1).first()
        if heatrelay_main_source is None:
            heatrelay_main_source = models.ZoneHeatRelay.query.filter_by(
                is_main_heat_source=1).first()
        if heatrelay_main_source is not None:
            L.l.info("Main heat relay={}".format(heatrelay_main_source))
            main_source_zone = models.Zone.query.filter_by(
                id=heatrelay_main_source.zone_id).first()
            if main_source_zone is not None:
                update_age_mins = (
                    utils.get_base_location_now_date() -
                    P.last_main_heat_update).total_seconds() / 60
                # # avoid setting relay state too often but do periodic refreshes every x minutes
                if main_source_zone.heat_is_on != heat_is_on or update_age_mins >= int(
                        get_param(Constant.P_HEAT_STATE_REFRESH_PERIOD)):
                    L.l.info("Setting main heat on={}, zone={}".format(
                        heat_is_on, main_source_zone))
                    __save_heat_state_db(zone=main_source_zone,
                                         heat_is_on=heat_is_on)
                    P.last_main_heat_update = utils.get_base_location_now_date(
                    )
            else:
                L.l.critical('No heat main_src found using zone id {}'.format(
                    heatrelay_main_source.zone_id))
        else:
            L.l.critical('No heat main source is defined in db')
    except Exception as ex:
        L.l.error('Error loop_zones, err={}'.format(ex, exc_info=True))
Esempio n. 5
0
def thread_run():
    prctl.set_name("rfxcom")
    threading.current_thread().name = "rfxcom"
    try:
        if not P.initialised:
            _init_board()
        if P.initialised:
            L.l.debug('Waiting for RFX event')
            time_elapsed_minutes = (utils.get_base_location_now_date() -
                                    P.last_packet_received).seconds / 60
            if time_elapsed_minutes > P.MAX_MINUTES_SILENCE:
                L.l.warning(
                    'RFX event not received since {} mins, device err? Reseting!'
                    .format(time_elapsed_minutes))
                P.transport.reset()
            event = P.transport.receive_blocking()
            __rfx_reading(event)
        else:
            if P.init_failed_count > P.MAX_FAILED_RETRY:
                unload()
    except IndexError as iex:
        P.initialised = False
        P.init_failed_count += 1
        utils.sleep(10)
    except Exception as ex:
        L.l.error('Error read RFX tty port, err={}'.format(ex), exc_info=True)
        P.initialised = False
        P.init_failed_count += 1
        utils.sleep(10)
    prctl.set_name("idle")
    threading.current_thread().name = "idle"
Esempio n. 6
0
def __save_sensor_db(p_id='', p_type='', value_list=None):
    if not value_list:
        value_list = []

    record = m.Sensor.find_one({m.Sensor.address: p_id})
    if record is None:
        record = m.Sensor()
        record.address = p_id
    zone_sensor = m.ZoneSensor.find_one({m.ZoneSensor.sensor_address: p_id})
    if zone_sensor:
        record.sensor_name = zone_sensor.sensor_name
    else:
        record.sensor_name = '(not defined) ' + p_id
    record.updated_on = utils.get_base_location_now_date()
    record.type = p_type
    if 'Humidity' in value_list:
        record.humidity = utils.round_sensor_value(value_list['Humidity'])
    if 'Temperature' in value_list:
        record.temperature = utils.round_sensor_value(
            value_list['Temperature'])
    if 'Battery numeric' in value_list:
        record.battery_level = value_list['Battery numeric']
    if 'Rssi numeric' in value_list:
        record.rssi = value_list['Rssi numeric']
    # L.l.info('Saving RFX object {}'.format(record))
    record.save_changed_fields(broadcast=True, persist=True)
Esempio n. 7
0
def __save_sensor_db(p_id='', p_type='', value_list=None):
    if not value_list:
        value_list = []
    record = models.Sensor(address=p_id)
    assert isinstance(record, models.Sensor)
    zone_sensor = models.ZoneSensor.query.filter_by(
        sensor_address=p_id).first()
    if zone_sensor:
        record.sensor_name = zone_sensor.sensor_name
    else:
        record.sensor_name = '(not defined) ' + p_id
    record.updated_on = utils.get_base_location_now_date()
    record.type = p_type
    if 'Humidity' in value_list:
        record.humidity = utils.round_sensor_value(value_list['Humidity'])
    if 'Temperature' in value_list:
        record.temperature = utils.round_sensor_value(
            value_list['Temperature'])
    if 'Battery numeric' in value_list:
        record.battery_level = value_list['Battery numeric']
    if 'Rssi numeric' in value_list:
        record.rssi = value_list['Rssi numeric']
    current_record = models.Sensor.query.filter_by(address=p_id).first()
    record.save_changed_fields(current_record=current_record,
                               new_record=record,
                               notify_transport_enabled=True,
                               save_to_graph=True,
                               ignore_only_updated_on_change=True)
Esempio n. 8
0
def node_update(obj=None):
    if not obj:
        obj = {}
    try:
        node_host_name = utils.get_object_field_value(obj, 'name')
        L.l.debug('Received node state update from {}'.format(node_host_name))
        #avoid node to update itself in infinite recursion
        if node_host_name != Constant.HOST_NAME:
            models.Node().save_changed_fields_from_json_object(
                json_object=obj,
                unique_key_name='name',
                notify_transport_enabled=False,
                save_to_graph=False)
        else:
            L.l.debug('Skipping node DB save, this node is master = {}'.format(
                variable.NODE_THIS_IS_MASTER_OVERALL))
            sent_date = utils.get_object_field_value(obj,
                                                     'event_sent_datetime')
            if sent_date is not None:
                event_sent_date_time = utils.parse_to_date(sent_date)
                seconds_elapsed = (utils.get_base_location_now_date() -
                                   event_sent_date_time).total_seconds()
                if seconds_elapsed > 15:
                    L.l.warning(
                        'Very slow mqtt, delay is {} seconds rate msg {}/min'.
                        format(seconds_elapsed,
                               mqtt_io.mqtt_msg_count_per_minute))
    except Exception as ex:
        L.l.warning('Error on node update, err {}'.format(ex))
Esempio n. 9
0
def check_inactive():
    """check for inactive sensors not read recently but in database"""
    sensor_list = models.Sensor().query_all()
    defined_sensor_list = models.ZoneSensor().query_all()
    ref_list = []
    delta = (datetime.datetime.now() - P.last_warning).total_seconds()
    log_warn = (delta > 60 * 15)
    for zone_sensor in defined_sensor_list:
        ref_list.append(zone_sensor.sensor_address)
    for sensor in sensor_list:
        elapsed = round((utils.get_base_location_now_date() -
                         sensor.updated_on).total_seconds() / 60, 0)
        if sensor.address not in ref_list:
            L.l.warning('Sensor {} {} not defined'.format(
                sensor.address, sensor.sensor_name))
            #current_record = models.SensorError.query.filter_by(sensor_address=sensor.address).first()
            #record = models.SensorError()
            #record.sensor_name = sensor.sensor_name
            #if current_record is not None:
            #    record.error_count = current_record.error_count
            #else:
            #    record.error_count = 0
            #record.error_count += 1
            #record.error_type = 0
            #record.save_changed_fields(current_record=None, new_record=record, save_to_graph=True, save_all_fields=True)
        if log_warn and elapsed > 2 * P.sampling_period_seconds:
            L.l.warning('Sensor {} type {} not updated since {} min'.format(
                sensor.sensor_name, sensor.type, elapsed))
            P.last_warning = datetime.datetime.now()
Esempio n. 10
0
def thread_run_send():
    prctl.set_name("mqtt_send")
    threading.current_thread().name = "mqtt_send"
    P.thread_send = threading.current_thread()
    if mqtt_io.P.client_connected:
        start_len = len(P.send_json_queue)
        if start_len > 50:
            L.l.info('Mqtt SEND len={}'.format(start_len))
        # FIXME: complete this, will potentially accumulate too many requests
        P.mqtt_send_lock.acquire()
        for [json, topic] in list(P.send_json_queue):
            res = transport.mqtt_io._send_message(json, topic)
            if res:
                P.send_json_queue.remove([json, topic])
            else:
                L.l.info('Failed to send mqtt message, res={}'.format(res))
        P.mqtt_send_lock.release()
        end_len = len(P.send_json_queue)
        if end_len > 10:
            L.l.warning(
                "{} messages are pending for transport, start was {}".format(
                    end_len, start_len))
    else:
        elapsed = (utils.get_base_location_now_date() -
                   mqtt_io.P.last_connect_attempt).total_seconds()
        if elapsed > 10:
            L.l.info(
                "Initialising mqtt as message needs to be sent, elapsed={}".
                format(elapsed))
            mqtt_io.init()
    prctl.set_name("idle_mqtt_send")
    threading.current_thread().name = "idle_mqtt_send"
Esempio n. 11
0
    def upload_data(self):
        try:
            self.uploading_data = True
            L.l.info("Uploading plotly grid {}".format(self.grid_unique_name))
            if self.grid_url:
                self._update_grid()
            else:
                self._create_or_get_grid()
            # delete from cache all rows that have been uploaded
            for column_name in self.columns_cache.keys():
                self.columns_cache[column_name] = []
            self.last_save = utils.get_base_location_now_date()
            PlotlyGrid.save_interval_seconds = max(
                300, PlotlyGrid.save_interval_seconds - 60)
        except HTTPError, er:
            L.l.warning(
                "Error uploading plotly grid={}, er={} cause={}".format(
                    self.grid_unique_name, er, er.response.text))
            if "file already exists" in er.response.text:
                L.l.critical(
                    "Fatal error, unable to resume saving data to plotly grid")

            if "throttled" in er.response.text:
                PlotlyGrid.save_interval_seconds = min(
                    1200, PlotlyGrid.save_interval_seconds + 60)
                L.l.info("Plotly upload interval is {}s".format(
                    PlotlyGrid.save_interval_seconds))
Esempio n. 12
0
def upload_file(file):
    global initialised, __args, __uploaded_file_list_date, __youtube
    if initialised:
        if not os.path.exists(file):
            L.l.warning('Not existent file={} to be uploaded to youtube'.format(file))
            del __file_list_last_change[file]
        else:
            __args.file = file
            __args.title = os.path.basename(file)
            time.sleep(1)

            if not os.access(file, os.R_OK):
                L.l.warning('Cannot access for upload file {}'.format(file))
            else:
                try:
                    test_open = open(file, 'r')
                    test_open.close()
                    try:
                        initialize_upload(__youtube, __args)
                        __uploaded_file_list_date[file] = utils.get_base_location_now_date()
                        del __file_list_last_change[file]
                    except errors.HttpError, ex:
                        L.l.warning('Error while uploading file={}, err={}'.format(file, ex))
                    except Exception, ex:
                        L.l.info('Unexpected error on upload, file {}, err={}'.format(file, ex))
                except Exception, ex:
                    L.l.info('Locked file {}, err={}'.format(file, ex))
Esempio n. 13
0
def thread_run():
    prctl.set_name("youtube")
    threading.current_thread().name = "youtube"
    global __file_list_last_change, __uploaded_file_list_date
    try:
        found_for_upload = True
        while found_for_upload:
            found_for_upload = False
            for file in __file_list_last_change.keys():
                lapsed = (utils.get_base_location_now_date() -
                          __file_list_last_change[file]).total_seconds()
                if lapsed > 30:
                    if file in __uploaded_file_list_date.keys():
                        L.l.debug(
                            'Skip duplicate video upload for file {}'.format(
                                file))
                        # pass
                    else:
                        upload_file(file)
                        found_for_upload = True
                        if len(__uploaded_file_list_date) > 100:
                            __uploaded_file_list_date.clear()
                        # force upload on fifo principles to maintain movie time order when uploaded
                        break
    except Exception as ex:
        L.l.warning('Exception on youtube thread run, err={}'.format(ex))
    prctl.set_name("idle_youtube")
    threading.current_thread().name = "idle_youtube"
Esempio n. 14
0
def __get_uptime_win_days():
    """Returns a datetime.timedelta instance representing the uptime in a Windows 2000/NT/XP machine"""
    try:
        cmd = "net statistics server"
        p = subprocess.Popen(cmd,
                             shell=True,
                             stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE)
        (child_stdin, child_stdout) = (p.stdin, p.stdout)
        lines = child_stdout.readlines()
        child_stdin.close()
        child_stdout.close()
        lines = [line.strip() for line in lines if line.strip()]
        date, time, ampm = lines[1].split()[2:5]
        # print date, time, ampm
        if str(date[2]).isdigit():
            separator = date[1]
        else:
            separator = date[2]
        d, m, y = [v for v in date.split(separator)]
        m = datetime.datetime.strptime(m, '%b').month
        y = datetime.datetime.strptime(y, '%y').year
        H, M, S = [int(v) for v in time.split(':')]
        if ampm.lower() == 'pm':
            H += 12
        now = utils.get_base_location_now_date()
        then = datetime.datetime(int(y), int(m), int(d), H, M)
        diff = now - then
        return diff.days
    except Exception as ex:
        L.l.warning("Unable to get uptime windows, err={}".format(ex))
        return 0
Esempio n. 15
0
def _get_heat_on_keep_warm(schedule_pattern, temp_code, temp_target,
                           temp_actual):
    force_on = False
    if schedule_pattern.keep_warm and temp_actual is not None:
        minute = utils.get_base_location_now_date().minute
        if len(schedule_pattern.keep_warm_pattern) == 12:
            interval = int(minute / 5)
            delta_warm = temp_actual - temp_target
            if delta_warm <= P.MAX_DELTA_TEMP_KEEP_WARM:
                force_on = ((schedule_pattern.keep_warm_pattern[interval]
                             == "1") and temp_code is not P.TEMP_NO_HEAT)
                # if force_on:
                if P.verbose:
                    L.l.info(
                        "Forcing heat {} keep warm, zone {} interval {} pattern {}"
                        .format(force_on, schedule_pattern.name, interval,
                                schedule_pattern.keep_warm_pattern[interval]))
            else:
                if P.verbose:
                    L.l.info("Temp too high in {} with {}, ignoring keep warm".
                             format(schedule_pattern.name, delta_warm))
        else:
            L.l.critical(
                "Missing or incorrect keep warm pattern for zone {}={}".format(
                    schedule_pattern.name, schedule_pattern.keep_warm_pattern))
    else:
        # L.l.info('Keep warm off {} pattern.keep {} temp_act {}'.format(
        #    schedule_pattern.name, schedule_pattern.keep_warm, temp_actual))
        pass
    return force_on
Esempio n. 16
0
def _send_queue():
    prctl.set_name("newtifry")
    threading.current_thread().name = 'newtifry'
    global _source_key, _last_send_date, _message_queue

    if len(_message_queue) == 0:
        return

    params = {}
    params['format'] = 'json'
    params['source'] = _source_key
    if len(_message_queue) > 1:
        params['title'] = 'Multiple notifs: %s [%s]' % (
            _message_queue[0].title, Constant.HOST_NAME)
    else:
        params['title'] = '%s [%s]' % (_message_queue[0].title,
                                       Constant.HOST_NAME)
    global __queue_lock
    try:
        __queue_lock.acquire()
        params['message'] = ''
        params['priority'] = 0
        for item in _message_queue:
            if item.title is not None:
                params['message'] += 'Title: {}\n'.format(item.title)
            if item.message is not None:
                params['message'] += '{}\n'.format(item.message)
            params['message'] += '\n'
            if item.url is not None:
                params['url'] = item.url
            if item.priority is not None:
                params['priority'] = max(item.priority, params['priority'])
            if item.deviceid is not None:
                params['deviceid'] = item.deviceid
            if item.image_url is not None:
                params['image'] = item.image_url
        # Prepare our request
        try:
            response = urllib.request.urlopen(
                BACKEND,
                urllib.parse.urlencode(params).encode("utf8"),
                timeout=Constant.URL_OPEN_TIMEOUT)
            # Read the body
            body = response.read()
            # It's JSON - parse it
            contents = json.loads(body)
            if 'error' in contents.keys():
                L.l.warning("Newtifry server did not accept our message: %s" %
                            contents['error'])
            else:
                L.l.info("Newtifry message sent OK. Size: %d." %
                         contents['size'])
                del _message_queue[:]
                _last_send_date = utils.get_base_location_now_date()
        except Exception as ex:
            L.l.warning("Newtifry failed to make request to the server: " +
                        str(ex))
    finally:
        __queue_lock.release()
Esempio n. 17
0
def __upload_cached_plotly_data():
    #for graph in graph_list.values():
    #    if (utils.get_base_location_now_date() - graph.last_save).total_seconds() > 300:
    #        graph.upload_data()

    for grid in __grid_list.values():
        if (utils.get_base_location_now_date() - grid.last_save).total_seconds() > PlotlyGrid.save_interval_seconds:
            grid.upload_data()
Esempio n. 18
0
 class LogMessage:
     def __init__(self):
         pass
     message_type = 'logging'
     message = ''
     level = ''
     source_host_ = Constant.HOST_NAME  # field name must be identical with constant.JSON_PUBLISH_SOURCE_HOST
     datetime = utils.date_serialised(utils.get_base_location_now_date())
Esempio n. 19
0
def test3():
    column_1 = Column([1, 2, 3], 'column 1')
    column_2 = Column(['a', 'b', utils.get_base_location_now_date()],
                      'column 2')
    grid = Grid([column_1, column_2])

    unique_url = py.grid_ops.upload(grid, 'grid1', world_readable=True)
    print unique_url
Esempio n. 20
0
class P:
    topic = 'no_topic_defined'
    topic_main = 'no_main_topic_defined'
    mqtt_client = None
    client_connected = False
    is_client_connecting = False
    mqtt_msg_count_per_minute = 0
    last_connect_attempt = utils.get_base_location_now_date()
    mqtt_mosquitto_exists = False
    mqtt_paho_exists = False
    last_rec = utils.get_base_location_now_date()
    last_minute = 0
    received_mqtt_list = []
    message_callbacks = {}

    def __init__(self):
        pass
Esempio n. 21
0
def _read_disk_stats():
    #fixme
    return
    if Constant.is_os_linux():
        with open('/proc/diskstats') as f:
            for line in f:
                words = line.split()
                if len(words) > 8:
                    device_major = words[0]
                    device_name = words[2]

                    # skip for non hdds and partitions (ending with digit)
                    if device_major != '8' or device_name[-1:].isdigit():
                        continue  # just to avoid another tab
                    record = m.SystemDisk.find_one({m.SystemDisk.hdd_disk_dev: '/dev/' + device_name,
                                                    m.SystemDisk.system_name: Constant.HOST_NAME})
                    if record is None:
                        record = models.SystemDisk()
                    reads_completed = utils.round_sensor_value(words[3])
                    writes_completed = utils.round_sensor_value(words[7])
                    record.hdd_disk_dev = '/dev/' + device_name
                    record.last_reads_completed_count = reads_completed
                    record.last_writes_completed_count = writes_completed
                    record.system_name = Constant.HOST_NAME
                    record.updated_on = utils.get_base_location_now_date()
                    # save read/write date time only if count changes
                    if current_record is not None:
                        if current_record.serial is None or current_record.serial == '':
                            record.serial = 'serial not available {} {}'.format(Constant.HOST_NAME, record.hdd_disk_dev)
                        if current_record.hdd_name is None or current_record.hdd_name == '':
                            record.hdd_name = '{} {}'.format(Constant.HOST_NAME, record.hdd_disk_dev)
                        read_elapsed = -1
                        write_elapsed = -1
                        if record.last_reads_completed_count != current_record.last_reads_completed_count:
                            record.last_reads_datetime = utils.get_base_location_now_date()
                        else:
                            record.last_reads_datetime = current_record.last_reads_datetime
                        if record.last_writes_completed_count != current_record.last_writes_completed_count:
                            record.last_writes_datetime = utils.get_base_location_now_date()
                        else:
                            record.last_writes_datetime = current_record.last_writes_datetime
                        if current_record.last_reads_datetime:
                            read_elapsed = (utils.get_base_location_now_date()
                                            - record.last_reads_datetime).total_seconds()
                            record.last_reads_elapsed = utils.round_sensor_value(read_elapsed)
                        if current_record.last_writes_datetime:
                            write_elapsed = (utils.get_base_location_now_date()
                                             - record.last_writes_datetime).total_seconds()
                            record.last_writes_elapsed = utils.round_sensor_value(write_elapsed)
                        L.l.debug('Disk {} elapsed read {}s write {}s'.format(
                            device_name, int(read_elapsed), int(write_elapsed)))
                    else:
                        record.last_reads_datetime = utils.get_base_location_now_date()
                        record.last_writes_datetime = utils.get_base_location_now_date()
                        record.serial = 'serial not available {} {}'.format(Constant.HOST_NAME, record.hdd_disk_dev)
                    record.save_changed_fields(current_record=current_record, new_record=record,
                                               notify_transport_enabled=True, save_to_graph=True, debug=False)
                else:
                    L.l.warning(
                        'Unexpected lower number of split atoms={} in diskstat={}'.format(len(words), line))
Esempio n. 22
0
def get_reference_trace_for_append(graph_unique_name='', shape_type=''):
    global g_reference_trace_id
    return graph_objs.Scatter(x=[utils.get_base_location_now_date()],
                              y=[0],
                              name=graph_unique_name,
                              text=g_reference_trace_id,
                              mode='none',
                              showlegend=False,
                              line=graph_objs.Line(shape=shape_type))
Esempio n. 23
0
def __save_heat_state_db(zone, heat_is_on):
    assert isinstance(zone, models.Zone)
    zone_heat_relay = models.ZoneHeatRelay.query.filter_by(
        zone_id=zone.id).first()
    if zone_heat_relay is not None:
        zone_heat_relay.heat_is_on = heat_is_on
        zone_heat_relay.updated_on = utils.get_base_location_now_date()
        L.l.debug(
            'Heat state changed to is-on={} via pin={} in zone={}'.format(
                heat_is_on, zone_heat_relay.heat_pin_name, zone.name))
        zone_heat_relay.notify_transport_enabled = True
        zone_heat_relay.save_to_graph = True
        zone_heat_relay.save_to_history = True
        # save latest heat state for caching purposes
        zone.heat_is_on = heat_is_on
        zone.last_heat_status_update = utils.get_base_location_now_date()
        commit()
    else:
        L.l.warning('No heat relay found in zone {}'.format(zone.name))
Esempio n. 24
0
def on_message(client, userdata, msg):
    json = msg
    try:
        if utils.get_base_location_now_date().minute != P.last_minute:
            P.last_minute = utils.get_base_location_now_date().minute
            transport.mqtt_io.mqtt_msg_count_per_minute = 0
        transport.mqtt_io.mqtt_msg_count_per_minute += 1
        P.last_rec = utils.get_base_location_now_date()
        #L.l.debug('Received from client [{}] userdata [{}] msg [{}] at {} '.format(client._client_id,
        #                                                                           userdata, msg.topic,
        #                                                                           utils.get_base_location_now_date()))
        # locate json string
        start = msg.payload.find('{')
        end = msg.payload.find('}')
        json = msg.payload[start:end + 1]
        if '"source_host_": "{}"'.format(Constant.HOST_NAME) not in json:
            # ignore messages send by this host
            x = json2obj(json)
            #if x[Constant.JSON_PUBLISH_SOURCE_HOST] != str(Constant.HOST_NAME):
            start = utils.get_base_location_now_date()
            dispatcher.send(signal=Constant.SIGNAL_MQTT_RECEIVED,
                            client=client,
                            userdata=userdata,
                            topic=msg.topic,
                            obj=x)
            elapsed = (utils.get_base_location_now_date() -
                       start).total_seconds()
            if elapsed > 5:
                L.l.warning('Command received took {} seconds'.format(elapsed))
            if False:
                if hasattr(x, 'command') and hasattr(
                        x, 'command_id') and hasattr(x, 'host_target'):
                    if x.host_target == Constant.HOST_NAME:
                        L.l.info('Executing command {}'.format(x.command))
                    else:
                        L.l.info(
                            "Received command {} for other host {}".format(
                                x, x.host_target))
    except AttributeError as ex:
        L.l.warning('Unknown attribute error in msg {} err {}'.format(
            json, ex))
    except ValueError as e:
        L.l.warning('Invalid JSON {} {}'.format(json, e))
Esempio n. 25
0
def record_update(obj):
    # save sensor state to db, except for current node
    try:
        sensor_host_name = utils.get_object_field_value(obj, 'name')
        L.l.debug('Received sensor state update from {}'.format(sensor_host_name))
        # avoid node to update itself in infinite recursion
        if sensor_host_name != Constant.HOST_NAME:
            address = utils.get_object_field_value(obj, 'address')
            n_address = utils.get_object_field_value(obj, 'n_address')
            sensor_type = utils.get_object_field_value(obj, 'type')
            record = models.Sensor(address=address)
            assert isinstance(record, models.Sensor)
            zone_sensor = models.ZoneSensor.query.filter_by(sensor_address=address).first()
            if zone_sensor is not None:
                record.sensor_name = zone_sensor.sensor_name
            else:
                record.sensor_name = '(n/a) {} {} {}'.format(address, n_address, sensor_type)
            record.type = utils.get_object_field_value(obj, 'type')
            record.updated_on = utils.get_base_location_now_date()
            if obj.has_key('counters_a'): record.counters_a = utils.get_object_field_value(obj, 'counters_a')
            if obj.has_key('counters_b'): record.counters_b = utils.get_object_field_value(obj, 'counters_b')
            if obj.has_key('delta_counters_a'):
                record.delta_counters_a = utils.get_object_field_value(obj, 'delta_counters_a')
            if obj.has_key('delta_counters_b'):
                record.delta_counters_b = utils.get_object_field_value(obj, 'delta_counters_b')
            if obj.has_key('temperature'): record.temperature = utils.get_object_field_value(obj, 'temperature')
            if obj.has_key('humidity'): record.humidity = utils.get_object_field_value(obj, 'humidity')
            if obj.has_key('iad'): record.iad = utils.get_object_field_value(obj, 'iad')
            if obj.has_key('vad'): record.vad = utils.get_object_field_value(obj, 'vad')
            if obj.has_key('vdd'): record.vdd = utils.get_object_field_value(obj, 'vdd')
            if obj.has_key('pio_a'): record.pio_a = utils.get_object_field_value(obj, 'pio_a')
            if obj.has_key('pio_b'): record.pio_b = utils.get_object_field_value(obj, 'pio_b')
            if obj.has_key('sensed_a'): record.sensed_a = utils.get_object_field_value(obj, 'sensed_a')
            if obj.has_key('sensed_b'): record.sensed_b = utils.get_object_field_value(obj, 'sensed_b')

            current_record = models.Sensor.query.filter_by(address=address).first()
            # force field changed detection for delta_counters
            if current_record:
                current_record.delta_counters_a = 0
                current_record.delta_counters_b = 0
            record.save_changed_fields(current_record=current_record, new_record=record, notify_transport_enabled=False,
                                       save_to_graph=False)
            # commit() # not needed?

            # enable below only for testing on netbook
            # if Constant.HOST_NAME == 'xxxnetbook' and (record.delta_counters_a or record.delta_counters_b):
            #    dispatcher.send(Constant.SIGNAL_UTILITY, sensor_name=record.sensor_name,
            #                    units_delta_a=record.delta_counters_a,
            #                    units_delta_b=record.delta_counters_b, total_units_a=record.counters_a,
            #                    total_units_b=record.counters_b,
            #                    sampling_period_seconds=owsensor_loop.sampling_period_seconds)
    except Exception as ex:
        L.l.error('Error on sensor update, err {}'.format(ex), exc_info=True)
        db.session.rollback()
Esempio n. 26
0
 def _broadcast(record, update, class_name):
     try:
         record[Constant.JSON_PUBLISH_SOURCE_HOST] = str(Constant.HOST_NAME)
         record[Constant.JSON_PUBLISH_TABLE] = class_name
         record[Constant.JSON_PUBLISH_FIELDS_CHANGED] = list(update.keys())
         record['_sent_on'] = utils.get_base_location_now_date()
         js = utils.safeobj2json(record)
         transport.send_message_json(json=js)
     except Exception as ex:
         L.l.error('Unable to broadcast {} rec={}'.format(
             class_name, record))
Esempio n. 27
0
def _decide_action(zone,
                   current_temperature,
                   target_temperature,
                   force_on=False,
                   force_off=False,
                   zone_thermo=None,
                   direction=1):
    heat_is_on = None
    if force_on:
        heat_is_on = True
    if force_off:
        if heat_is_on:
            L.l.warning(
                'Conflicting heat states on {} force_on={}, force_off={}'.
                format(zone, force_on, force_off))
        heat_is_on = False
    if heat_is_on is None:
        heat_is_on = zone_thermo.heat_is_on
        if heat_is_on is None:
            heat_is_on = False
        if direction >= 0:
            # for heating
            if current_temperature < target_temperature:
                heat_is_on = True
                P.heat_status += 'temp low {}<{} {} '.format(
                    current_temperature, target_temperature, heat_is_on)
            if current_temperature > (target_temperature + P.threshold):
                heat_is_on = False
                P.heat_status += 'temp high {}>{} {} '.format(
                    current_temperature, target_temperature + P.threshold,
                    heat_is_on)
        else:
            # for cooling
            if current_temperature > target_temperature:
                heat_is_on = True
                P.heat_status += 'temp too high {}<{} {} - cooling '.format(
                    current_temperature, target_temperature, heat_is_on)
            if current_temperature < (target_temperature + P.threshold):
                heat_is_on = False
                P.heat_status += 'temp too low {}>{} {} - no cool'.format(
                    current_temperature, target_temperature + P.threshold,
                    heat_is_on)

    # trigger if state is different and every 5 minutes (in case other hosts with relays have restarted)
    if zone_thermo.last_heat_status_update is not None:
        last_heat_update_age_sec = (
            utils.get_base_location_now_date() -
            zone_thermo.last_heat_status_update).total_seconds()
    else:
        last_heat_update_age_sec = P.check_period
    if zone_thermo.heat_is_on != heat_is_on or last_heat_update_age_sec >= P.check_period \
            or zone_thermo.last_heat_status_update is None:
        _save_heat_state_db(zone=zone, heat_is_on=heat_is_on)
    return heat_is_on
Esempio n. 28
0
def __announce_grid_cache():
    grids_i_created = models.PlotlyCache().query_filter_all(models.PlotlyCache.created_by_node_name.in_(
        [Constant.HOST_NAME]))
    for grid in grids_i_created:
        new_record = models.PlotlyCache()
        new_record.created_by_node_name = grid.created_by_node_name
        new_record.column_name_list = grid.column_name_list
        new_record.grid_url = grid.grid_url
        new_record.grid_name = grid.grid_name
        new_record.announced_on = utils.get_base_location_now_date()
        grid.save_changed_fields(current_record=grid, new_record=new_record, notify_transport_enabled=True,
                                   save_to_graph=False)
Esempio n. 29
0
def announce_node_state():
    global progress_status
    try:
        L.l.debug('I tell everyone my node state')
        #current_record = models.Node.query.filter_by(name=constant.HOST_NAME).first()
        node = models.Node()
        current_record = models.Node().query_filter_first(
            models.Node.name.in_([Constant.HOST_NAME, ""]))

        node.name = Constant.HOST_NAME
        if not current_record:
            node.priority = random.randint(1, 100)  # todo: clarify why 1 -100?
            node.run_overall_cycles = 0
            node.master_overall_cycles = 0
        else:
            node.run_overall_cycles = current_record.run_overall_cycles
            node.master_overall_cycles = current_record.master_overall_cycles
            node.is_master_logging = current_record.is_master_logging
            node.is_master_overall = current_record.is_master_overall
            if not node.run_overall_cycles:
                node.run_overall_cycles = 0
            if not node.master_overall_cycles:
                node.master_overall_cycles = 0
        node.event_sent_datetime = utils.get_base_location_now_date()
        node.updated_on = utils.get_base_location_now_date()
        node.ip = Constant.HOST_MAIN_IP
        if variable.NODE_THIS_IS_MASTER_OVERALL:
            node.master_overall_cycles += 1
        node.run_overall_cycles += 1
        node.os_type = Constant.OS
        node.machine_type = Constant.HOST_MACHINE_TYPE
        node.notify_transport_enabled = True
        progress_status = 'Announce node status before save fields'
        node.save_changed_fields(current_record=current_record,
                                 new_record=node,
                                 notify_transport_enabled=True,
                                 save_to_graph=True,
                                 graph_save_frequency=120)
    except Exception as ex:
        L.l.error('Unable to announce my state, err={}'.format(ex))
Esempio n. 30
0
def download_trace_id_list(graph_unique_name='', shape_type=''):
    L.l.info('Downloading online traces in memory, graph {} shape {}'.format(graph_unique_name, shape_type))
    start_date = utils.get_base_location_now_date()
    result = -1
    try:
        result=py.file_ops.mkdirs(get_folder_name())
        L.l.debug('Created archiving folder {} result {}'.format(get_folder_name(), result))
    except PlotlyRequestError, ex:
        if hasattr(ex, 'HTTPError'):
            msg = str(ex.HTTPError) + ex.HTTPError.response.content
        else:
            msg = str(ex)
        L.l.info('Ignoring error on create archive folder {} err={}'.format(get_folder_name(), msg))