def get(self):
		args = self.parser.parse_args()
		sensor, sensor_name, sensor_attribute = None, None, None


		if 'sensor' in args and args['sensor'] is not None:
			sensor = args['sensor']
			if sensor != '':
				if sensor == 'all':
					sensors = Sensor.get_all()
					return [a.json() for a in sensors], 200
				else:
					return (Sensor.get_by_id(sensor)).json(), 200

		if 'sensorname' in args and args['sensorname'] is not None:
			sensor_name = args['sensorname']
			if sensor_name != '':
				_sensors = sensor_name.split(',')
				_by_name = Sensor.get_by_name_in(_sensors)
				return [a.json() for a in _by_name], 200

		if 'sensorattribute' in args and args['sensorattribute'] is not None:
			sensor_attribute = args['sensorattribute']
			if sensor_attribute != '':
				_sen_attrs_ids = sensor_attribute.split(',')
				_sen_attrs = SensorAttribute.get_by_id_in(_sen_attrs_ids)
				attrs_ids = [_id.a_id for _id in _sen_attrs]
				_attributes = Attributes.get_by_id_in(attrs_ids)
				return [a.json() for a in _attributes], 200

		

		return {
			"error": "error occured while processing request"
		}, 400
    def get_sensor(self, sensor_id: str) -> {str: Any}:
        """
        Get Sensor entry from database
        :param sensor_id: Sensors identification number
        :return: A dictionary of the Sensor entries with the column name as keys and the value as values
        """
        sensor_entry = {}

        sensor = Sensor.get_by_id(sensor_id)
        if sensor:
            sensor_entry['id'] = sensor.id
            sensor_entry['name'] = sensor.name
            sensor_entry['a_id'] = sensor.name
            location = self.get_location(sensor.l_id)
            sensor_entry['latitude'] = location.lat if location else np.nan
            sensor_entry['longitude'] = location.lon if location else np.nan
            return sensor_entry
        return {'id': sensor_id, 'name': "", 'a_id': None, 'latitude': np.nan,
                'longitude': np.nan}
	def get(self):
		args = self.parser.parse_args()
		theme, subtheme, attribute_data, sensor, sensor_name, sensor_attribute, attributes, sensorid, n_predictions, predictions, grouped, harmonising_method, per_sensor, freq = None, None, None, None, None, None, [], None, 100, None, None, None, None, '1H'

		if 'theme' in args:
			theme = args['theme']

		if 'subtheme' in args:
			subtheme = args['subtheme']

		if 'attributedata' in args:
			attribute_data = args['attributedata']

		if 'attribute' in args and args['attribute'] is not None:
			_attributes = args['attribute']
			if _attributes != '':
				attributes = _attributes.split(',')

		if 'sensor' in args and args['sensor'] is not None:
			sensor = args['sensor']
			if sensor != '':
				if sensor == 'all':
					sensors = Sensor.get_all()
					return [a.json() for a in sensors], 200
				else:
					return (Sensor.get_by_id(sensor)).json(), 200

		if 'sensorname' in args and args['sensorname'] is not None:
			sensor_name = args['sensorname']
			if sensor_name != '':
				_sensors = sensor_name.split(',')
				_by_name = Sensor.get_by_name_in(_sensors)
				return [a.json() for a in _by_name], 200

		if 'sensorattribute' in args and args['sensorattribute'] is not None:
			sensor_attribute = args['sensorattribute']
			if sensor_attribute != '':
				_sen_attrs_ids = sensor_attribute.split(',')
				_sen_attrs = SensorAttribute.get_by_id_in(_sen_attrs_ids)
				attrs_ids = [_id.a_id for _id in _sen_attrs]
				_attributes = Attributes.get_by_id_in(attrs_ids)
				return [a.json() for a in _attributes], 200

		if 'grouped' in args:
			grouped = args['grouped']

		if 'harmonising_method' in args:
			harmonising_method = args['harmonising_method']

		if 'per_sensor' in args:
			per_sensor = args['per_sensor']

		if 'freq' in args:
			freq = args['freq']

		if 'predictions' in args:
			predictions = args['predictions']
			if predictions >=100:
				predictions = 100

		if 'n_predictions' in args:
			n_predictions = args['n_predictions']

		if 'sensorid' in args:
			sensorid = args['sensorid']

		if theme is None and subtheme is None \
			and len(attributes) == 0 and attribute_data is None \
			and sensor is None and sensor_name is None and sensor_attribute is None:
			themes = Theme.get_all()
			return [a.json() for a in themes], 200

		if attribute_data is not None:
			global LIMIT, OFFSET
			data = None
			operation = None
			if 'limit' in args and args['limit'] is not None:
				LIMIT = args['limit']

			if 'offset' in args and args['offset'] is not None:
				OFFSET = args['offset']

			if 'operation' in args and args['operation'] is not None:
				operation = args['operation']

			if ('fromdate' in args and args['fromdate'] is not None 
				and 'todate' in args and args['todate'] is not None):
				data = self.get_attribute_data(attribute_data, LIMIT, OFFSET, 
												args['fromdate'], args['todate'], operation)
				if predictions:
					data.append(self.get_predictions(attribute_table = data[0]["Attribute_Table"],
														sensor_id = sensorid,
														n_pred = n_predictions))
			else:
				if grouped:
					if harmonising_method:
						data = self.get_attribute_data(attribute_data, LIMIT, OFFSET, operation=operation)
						data = request_harmonised_data(data, harmonising_method=harmonising_method)
					else:
						data = self.get_attribute_data(attribute_data, LIMIT, OFFSET, operation=operation)
						data = request_grouped_data(data, per_sensor=per_sensor, freq=freq)
				else:
					data = self.get_attribute_data(attribute_data, LIMIT, OFFSET, operation=operation)

				if predictions:
					#### Ceck for data
					if data[0]["Total_Records"] != 0:
					#### Check for non numeric data
						if is_number(data[0]["Attribute_Values"][0]["Value"]):
							data.append(self.get_predictions(attribute_table = data[0]["Attribute_Table"],
																sensor_id = sensorid,
																n_pred = n_predictions))
						else:
							print("Cannot predict non-numeric data")
							pass
					else:
						pass
			return data, 200

		if attributes:
			_attrs = []
			attr = Attributes.get_by_name_in(attributes)
			for a in attr:
				_attrs.append(a.json())
			return _attrs, 200

		if subtheme is not None and subtheme != '':
			attributes = Attributes.get_by_sub_theme_id(subtheme)
			return [a.json() for a in attributes], 200

		if theme is not None and theme != '':
			subthemes = SubTheme.get_by_theme_id(theme)
			return [a.json() for a in subthemes], 200

		return {
			"error": "error occured while processing request"
		}, 400
Esempio n. 4
0
def request_grouped_data(data, per_sensor, freq):
    df = pd.read_json(json.dumps(data), orient='records')
    harm_df = pd.DataFrame(columns=[
        'Attribute_Name', 'Attribute_Table', 'Sensor_id', 'Timestamp', 'Value'
    ])
    ## Get the attributes
    for attribute in range(len(df)):
        attr = df.iloc[attribute].Attribute_Name
        attr_table = df.iloc[attribute].Attribute_Table
        attr_values = df.iloc[attribute].Attribute_Values

        harm_df_temp = pd.DataFrame(columns=[
            'Attribute_Name', 'Attribute_Table', 'Sensor_id', 'Timestamp',
            'Value'
        ])

        harm_df_temp['Attribute_Name'] = [
            attr for i in range(len(attr_values))
        ]
        harm_df_temp['Attribute_Table'] = [
            attr_table for i in range(len(attr_values))
        ]

        harm_df_temp['Sensor_id'] = [i['Sensor_id'] for i in attr_values]
        harm_df_temp['Timestamp'] = [i['Timestamp'] for i in attr_values]
        harm_df_temp['Value'] = [i['Value'] for i in attr_values]

        ### append to dataframe
        harm_df = harm_df.append(harm_df_temp, ignore_index=True)

        ### set datetime index
        harm_df['Timestamp'] = pd.to_datetime(harm_df['Timestamp'])
        harm_df = harm_df.set_index('Timestamp')

        ### TODO add informative message in the case the user provides an atribute with non-numeric attributes.
        ### Related to issue #109
        harm_df.Value = harm_df.Value.astype(float)

        if not per_sensor:
            ### Using the median to preserve the data type during groupby aggregation
            harm_df = harm_df.groupby(
                [pd.Grouper(freq=freq), 'Attribute_Name']).median()
            harm_df.reset_index(inplace=True)
        else:
            harm_df = harm_df.groupby(
                [pd.Grouper(freq=freq), 'Attribute_Name',
                 'Sensor_id']).median()
            harm_df.reset_index(inplace=True)

            ### get coordinates and sensor names in readable format
            _sensor_names = []
            _l_id = []
            _lat = []
            _lon = []

            for sensor in harm_df.Sensor_id.unique():
                _temp_s = (Sensor.get_by_id(sensor)).json()
                _sensor_names.append(_temp_s["Name"])
                _l_id.append(_temp_s["Location id"])

            for l in _l_id:
                _temp_l = (Location.get_by_id(l)).json()
                _lat.append(_temp_l["Latitude"])
                _lon.append(_temp_l["Longitude"])

            temp = pd.DataFrame(data=harm_df.Sensor_id.unique(),
                                columns=['Sensor_id'])

            temp['Name'] = _sensor_names
            temp['Latitude'] = _lat
            temp['Longitude'] = _lon

    if per_sensor:
        harm_df = pd.merge(harm_df.reset_index(drop=True),
                           temp,
                           on='Sensor_id',
                           how='left')

    data = json.loads(harm_df.to_json(orient='records'))

    return data
Esempio n. 5
0
def request_harmonised_data(data, harmonising_method):
    per_sensor = True

    df = pd.read_json(json.dumps(data), orient='records')
    harm_df = pd.DataFrame(columns=[
        'Attribute_Name', 'Attribute_Table', 'Sensor_id', 'Timestamp', 'Value'
    ])
    ### Get the attributes
    for attribute in range(len(df)):

        attr = df.iloc[attribute].Attribute_Name
        attr_table = df.iloc[attribute].Attribute_Table
        attr_values = df.iloc[attribute].Attribute_Values

        harm_df_temp = pd.DataFrame(columns=[
            'Attribute_Name', 'Attribute_Table', 'Sensor_id', 'Timestamp',
            'Value'
        ])

        harm_df_temp['Attribute_Name'] = [
            attr for i in range(len(attr_values))
        ]
        harm_df_temp['Attribute_Table'] = [
            attr_table for i in range(len(attr_values))
        ]

        harm_df_temp['Sensor_id'] = [i['Sensor_id'] for i in attr_values]
        harm_df_temp['Timestamp'] = [i['Timestamp'] for i in attr_values]
        harm_df_temp['Value'] = [i['Value'] for i in attr_values]

        ### append to dataframe
        harm_df = harm_df.append(harm_df_temp, ignore_index=True)

        ### Commenting out the dtype conversion: related to issue #109
        # harm_df.Value = harm_df.Value.astype(float)

        ### get coordinates and sensor names in readable format
        _sensor_names = []
        _l_id = []
        _lat = []
        _lon = []

        for sensor in harm_df.Sensor_id.unique():
            _temp_s = (Sensor.get_by_id(sensor)).json()
            _sensor_names.append(_temp_s["Name"])
            _l_id.append(_temp_s["Location id"])

        for l in _l_id:
            _temp_l = (Location.get_by_id(l)).json()
            _lat.append(_temp_l["Latitude"])
            _lon.append(_temp_l["Longitude"])

        temp = pd.DataFrame(data=harm_df.Sensor_id.unique(),
                            columns=['Sensor_id'])

        temp['Name'] = _sensor_names
        temp['Latitude'] = _lat
        temp['Longitude'] = _lon

    if per_sensor:
        harm_df = pd.merge(harm_df.reset_index(drop=True),
                           temp,
                           on='Sensor_id',
                           how='left')

    ## set datetime index
    harm_df['Timestamp'] = pd.to_datetime(harm_df['Timestamp'])
    harm_df = harm_df.set_index('Timestamp')

    ### get attribute with the highest number of records (=proxy for greater frequency)
    _value_len = 0
    _benchmark_attr = harm_df['Attribute_Name'].iloc[0]

    for name, group in harm_df.groupby('Attribute_Name'):
        _temp = len(np.unique(group.index))

        if _temp > _value_len:
            _value_len = _temp
            _benchmark_attr = name

    ### make it the attribute which will be used for benchmarikg all other attributes
    _df = harm_df[harm_df.Attribute_Name == _benchmark_attr]
    _benchmark_attr_index = _df.index.unique()
    _df.reset_index(inplace=True)

    ### loop through all other attributes and sensors
    for i in np.delete(
            harm_df.Attribute_Name.unique(),
            np.argwhere(harm_df.Attribute_Name.unique() == _benchmark_attr)):
        for sensor in harm_df.Sensor_id.unique():
            _temp_df = harm_df[(harm_df.Attribute_Name == i)
                               & (harm_df.Sensor_id == sensor)]

            ### relate _temp_df to the benchmarking index (it gets the closest value by default)
            _temp_df = _temp_df.asof(_benchmark_attr_index)
            _temp_df.reset_index(inplace=True)
            _df = _df.append(_temp_df)

    ### clean the dataset of nan's (originating by the records that dont match the benchmark daterange)
    _df.dropna(inplace=True)

    ### check for missing attributes resulting from non overlapping temporal windows
    miss_attr = set(_df.Attribute_Name.unique().tolist()).symmetric_difference(
        harm_df.Attribute_Name.unique().tolist())
    if bool(miss_attr):
        ### if found, append it to the dataframe as is
        for i in miss_attr:
            _df = _df.append(
                harm_df[harm_df.Attribute_Name == i].reset_index())
    else:
        pass

    ### pass the long vs wide data format
    if harmonising_method == 'wide':
        _df.reset_index(inplace=True)
        _df['Value'] = _df['Value'].astype(float)
        _df['timestamp'] = _df['Timestamp'].astype(
            int) / 10**6  ### milliseconds
        _df = _df.pivot_table(
            index='Timestamp',
            columns='Attribute_Name',
            values=['Value', 'timestamp', 'Latitude', 'Longitude'])

        data = json.loads(
            _df.to_json(orient='records').replace('["', '').replace(
                '"]', '').replace('","', ','))
    elif harmonising_method == 'long':
        data = json.loads(_df.to_json(orient='records'))

    else:
        ### using geolachemy's to_shape function to grab the geometry of sensors (instead of lat lon).
        _df['wkt_geom'] = _df['Sensor_id'].apply(lambda x: str(
            to_shape(Location.get_by_id_in([Sensor.get_by_id(x).l_id])[0].geo))
                                                 )
        data = data_geojson(_df)

    return data
def request_grouped_data(data: {str: Any}, per_sensor: bool, freq: str,
                         method: str) -> {
                             str: Any
                         }:
    """
    Get grouped sensor data with different temporal frequencies by grouping at
    at hourly intervals. The grouping is based on the median of all sensor
    values that fall within the hourly interval for the following reasons:

    a) The purpose of this function is to provide the frontend with a convenient
       form of visualising the data. as such, it is not used in any subsequent
       analytics
    b) Median is more robust to outliers and Median preserves the original
       data type format.

    :param data: Attribute data in json format
    :param per_sensor: A boolean flag of whether the reformatted request
                       returns data per individual sensor or per attribute
    :param freq: temporal frequency of aggregation 'W', '1D', '1H', '1Min'
    :param method: Harmonises attributes relative to the one with the higher
           frequency and delivers the data either on a long/wide format or a
           geojson
    :return: Requested data grouped by frequency and method
    """
    try:
        df = pd.read_json(json.dumps(data), orient='records')
        harm_df = pd.DataFrame(columns=[
            'Attribute_Name', 'Attribute_Table', 'Sensor_id', 'Timestamp',
            'Value'
        ])
        # Get the attributes
        for attribute in range(len(df)):
            attr = df.iloc[attribute].Attribute_Name
            attr_table = df.iloc[attribute].Attribute_Table
            attr_values = df.iloc[attribute].Attribute_Values

            harm_df_temp = pd.DataFrame(columns=[
                'Attribute_Name', 'Attribute_Table', 'Sensor_id', 'Timestamp',
                'Value'
            ])

            harm_df_temp['Attribute_Name'] = [
                attr for i in range(len(attr_values))
            ]
            harm_df_temp['Attribute_Table'] = [
                attr_table for i in range(len(attr_values))
            ]

            harm_df_temp['Sensor_id'] = [i['Sensor_id'] for i in attr_values]
            harm_df_temp['Timestamp'] = [i['Timestamp'] for i in attr_values]
            harm_df_temp['Value'] = [i['Value'] for i in attr_values]

            # append to dataframe
            harm_df = harm_df.append(harm_df_temp, ignore_index=True)

            # set datetime index
            harm_df['Timestamp'] = pd.to_datetime(harm_df['Timestamp'])
            harm_df = harm_df.set_index('Timestamp')

            try:
                # If Attributes with non-numeric values are provided a TypeError
                # will be raised
                # Related to issue #109
                harm_df.Value = harm_df.Value.astype(float)
            except TypeError as te:
                logger.log(logging.CRITICAL, "non-numeric attributes provided")

            if not per_sensor:
                if method == 'median':
                    harm_df = harm_df.groupby(
                        [pd.Grouper(freq=freq), 'Attribute_Name']).median()
                elif method == 'min':
                    harm_df = harm_df.groupby(
                        [pd.Grouper(freq=freq), 'Attribute_Name']).min()
                elif method == 'max':
                    harm_df = harm_df.groupby(
                        [pd.Grouper(freq=freq), 'Attribute_Name']).max()
                else:
                    harm_df = harm_df.groupby(
                        [pd.Grouper(freq=freq), 'Attribute_Name']).mean()
                harm_df.reset_index(inplace=True)
                _harm_df = pd.DataFrame(columns=harm_df.columns)
                for i in harm_df.Attribute_Name.unique():
                    temp = harm_df[harm_df.Attribute_Name == i]
                    temp['Timestamp'] = pd.to_datetime(temp['Timestamp'])
                    temp = temp.set_index('Timestamp')
                    temp = temp.resample(freq).ffill()
                    _harm_df = _harm_df.append(temp)
            else:
                if method == 'median':
                    harm_df = harm_df.groupby(
                        [pd.Grouper(freq=freq), 'Attribute_Name',
                         'Sensor_id']).median()
                    harm_df.reset_index(inplace=True)
                elif method == 'min':
                    harm_df = harm_df.groupby(
                        [pd.Grouper(freq=freq), 'Attribute_Name',
                         'Sensor_id']).min()
                    harm_df.reset_index(inplace=True)
                elif method == 'max':
                    harm_df = harm_df.groupby(
                        [pd.Grouper(freq=freq), 'Attribute_Name',
                         'Sensor_id']).max()
                    harm_df.reset_index(inplace=True)
                else:
                    harm_df = harm_df.groupby(
                        [pd.Grouper(freq=freq), 'Attribute_Name',
                         'Sensor_id']).mean()
                    harm_df.reset_index(inplace=True)

        if per_sensor:
            _harm_df = pd.DataFrame(columns=harm_df.columns)
            for i in harm_df.Sensor_id.unique():
                for j in harm_df.Attribute_Name.unique():
                    temp = harm_df[(harm_df.Sensor_id == i)
                                   & (harm_df.Attribute_Name == j)]
                    temp['Timestamp'] = pd.to_datetime(temp['Timestamp'])
                    temp = temp.set_index('Timestamp')
                    temp = temp.resample(freq).ffill()
                    _harm_df = _harm_df.append(temp)

        _harm_df.Timestamp = _harm_df.index
        _harm_df.reset_index(inplace=True, drop=True)

        # get coordinates and sensor names in readable format
        if per_sensor:
            _sensor_names = []
            _l_id = []
            _lat = []
            _lon = []

            for sensor in harm_df.Sensor_id.unique():
                _temp_s = Sensor.get_by_id(sensor)
                _sensor_names.append(_temp_s.name)
                _l_id.append(_temp_s.l_id)

            for l in _l_id:
                _temp_l = Location.get_by_id(l)
                _lat.append(_temp_l.lat)
                _lon.append(_temp_l.lon)

            temp = pd.DataFrame(data=_harm_df.Sensor_id.unique(),
                                columns=['Sensor_id'])

            temp['Name'] = _sensor_names
            temp['Latitude'] = _lat
            temp['Longitude'] = _lon

            _harm_df = pd.merge(_harm_df.reset_index(drop=True),
                                temp,
                                on='Sensor_id',
                                how='left')

        data = json.loads(_harm_df.to_json(orient='records'))

        return data, 200
    except Exception as e:
        # Broad exception case used to catch unexpected exceptions. This is the
        # end of the line to stop the frontend from hanging when loading widget
        # data and provides logs for unexpected exception being raised during
        # the formatting of the data due to incorrect / missing database
        # entries
        logger.log(
            logging.WARNING, "Unexpected Exception raise during grouping"
            "of attribute data: {}".format(e))
        return [], 422
Esempio n. 7
0
def send_alert_email(email: str, username: str, attribute: str,
                     value: Union[int, float], recorded_date: datetime,
                     sensor_id: str, threshold: Union[int, float],
                     alert_description: str) -> bool:
    """
    Send attribute alert email to a user
    :param email: User's email address
    :param username: User's name in the Users table
    :param attribute: Attribute name
    :param value: maximum value for the respective attribute
    :param recorded_date: Date which the maximum value was recorded
    :param sensor_id: ID of sensor which recorded the
    :param threshold: Value set by user to be checked
    :param alert_description: Describes whether an attribute has been
                              exceeded or fell short of the threshold value
    :return: Whether the alert email was sent
    """

    sensor = Sensor.get_by_id(sensor_id)
    sensor_lat, sensor_lon = None, None
    if sensor:
        loc = Location.get_by_id(sensor.l_id)
        if loc:
            sensor_lat = loc.lat
            sensor_lon = loc.lon
    else:
        logger.info("Could not include sensor information in alert "
                    "email as sensor with ID {} does not "
                    "exist".format(sensor_id))

    if alert_description == "exceeded":
        diff = value - threshold
    else:
        diff = threshold - value

    text_version = GetConfig.configure('alert', 'text_template').format(
        username=username,
        attribute=attribute,
        threshold=threshold,
        sensor_id=sensor_id,
        lat=sensor_lat,
        lon=sensor_lon,
        value=value,
        recorded_date=recorded_date,
        verb=alert_description,
        diff=diff)

    html_version = flask.render_template(GetConfig.configure(
        'alert', 'html_template'),
                                         username=username,
                                         attribute=attribute,
                                         threshold=threshold,
                                         sensor_id=sensor_id,
                                         lat=sensor_lat,
                                         lon=sensor_lon,
                                         value=value,
                                         recorded_date=recorded_date,
                                         verb=alert_description,
                                         diff=diff)

    sg = sendgrid.SendGridAPIClient(
        apikey=GetConfig.configure('sendgrid', 'api_key'))

    from_email = Email(GetConfig.configure('alert', 'sender_email'))
    to_email = Email(email)
    subject = GetConfig.configure('alert', 'email_subject')
    content_text = Content("text/plain", text_version)
    send_alert = Mail(from_email, subject, to_email, content_text)
    content_html = Content("text/html", html_version)
    send_alert.add_content(content_html)

    try:
        email_response = sg.client.mail.send.post(
            request_body=send_alert.get())
        logger.info("Sent alert email to {} with "
                    "response code : {}".format(email,
                                                email_response.status_code))
        return True
    except http.client.IncompleteRead as e:
        logger.error(
            "Sendgrid API Key may not be set correctly or be "
            "invalid", e)
        return False