Esempio n. 1
0
def generic_testing(directory):
    end = int(
        DateTimeUtil.local_time_str_to_utc('2019/04/29 18:00:00').timestamp())

    # David
    start = int(
        DateTimeUtil.local_time_str_to_utc('2019/04/03 18:00:00').timestamp())
    testing_set('measured_david', start, end,
                '{0}/gt_david.csv'.format(directory))

    # Martin
    start = int(
        DateTimeUtil.local_time_str_to_utc('2019/04/01 18:00:00').timestamp())
    testing_set('measured_martin', start, end,
                '{0}/gt_martin.csv'.format(directory))

    # Peto , februar, marec, april
    start = int(
        DateTimeUtil.local_time_str_to_utc('2019/02/04 18:00:00').timestamp())
    testing_set('measured_filtered_peto', start, end,
                '{0}/gt_peto.csv'.format(directory))

    # Klarka
    start = int(
        DateTimeUtil.local_time_str_to_utc('2018/12/18 18:00:00').timestamp())
    testing_set('measured_klarka', start, end,
                '{0}/gt_klarka.csv'.format(directory))
Esempio n. 2
0
    def additional_training_set(con,
                                table_name,
                                no_event_records,
                                func,
                                row_selector,
                                interval_selector,
                                print_each=10):
        """Supplementary generation of training data based on given time points.

        :param con:
        :param table_name: table name
        :param no_event_records: list of pairs for attribute generation
        :param func:
        :param row_selector:
        :param interval_selector:
        :return:
        """

        attrs = []
        for k in range(0, len(no_event_records)):
            row = no_event_records[k]

            if k % print_each == 0:
                logging.debug('{0}/{1} events'.format(k,
                                                      len(no_event_records)))

            if row[0] == '':
                logging.warning('empty row in additional sets')
                continue

            start = int(DateTimeUtil.local_time_str_to_utc(row[0]).timestamp())

            try:
                data1 = func(con, table_name, start, row_selector,
                             interval_selector)

                time = DateTimeUtil.utc_timestamp_to_str(
                    start, '%Y/%m/%d %H:%M:%S')
                data1.insert(0, ('datetime', time))
                data1.insert(1, ('event', row[1]))
                attrs.append(OrderedDict(data1))
            except Exception as e:
                logging.error(str(e))
                continue

        return attrs
Esempio n. 3
0
    def __read(self):
        event_types = {}

        with open(self.__filename, mode='r') as csv_file:
            csv_reader = csv.DictReader(csv_file, delimiter=',')
            for row in csv_reader:
                record = {
                    'datetime': int(DateTimeUtil.local_time_str_to_utc(row['datetime']).timestamp()),
                    'readable': row['datetime'],
                    'event': row['event'],
                    'prediction': row['prediction(event)'],
                    'valid': row['valid']
                }

                if record['prediction'] == '':
                    record['valid'] = 'no'

                if row['valid'] == 'no':
                    self.count -= 1

                if record['prediction'] == '' and record['event'] == 'nothing':
                    continue

                self.__data.append(record)
                event_types[row['event']] = None

        self.count += len(self.__data)

        if len(event_types) == 2:
            if 'open' in event_types and 'nothing' in event_types:
                self.__event_type = 'open'
            elif 'close' in event_types and 'nothing' in event_types:
                self.__event_type = 'close'
            else:
                raise ValueError('%s must contains only 2 types of event column')
        elif len(event_types) == 1 and 'nothing' in event_types:
            self.__event_type = 'open'
        else:
            raise ValueError('%s must contains only 2 types of event column')
Esempio n. 4
0
        interval = intervals[k]
        interval['avg'] = round(
            (interval['to'] - interval['from']) / 2 + interval['from'], 3)

    out = []
    with open(output_example_set, mode='r') as csv_file:
        csv_reader = csv.DictReader(csv_file, delimiter=',')
        for row in csv_reader:
            predicted_param = extract_interval(
                row['prediction(Regression_co2_in_ppm_before_0)'], default_min,
                default_max)

            record = {
                'datetime':
                int(
                    DateTimeUtil.local_time_str_to_utc(
                        row['datetime']).timestamp()),
                'readable':
                row['datetime'],
                'event':
                extract_interval(row['Regression_co2_in_ppm_before_0'],
                                 default_min, default_max),
                'prediction':
                predicted_param,
                'measured':
                float(row['actual_value']),
                'co2_start':
                float(row['co2_start']),
            }

            out.append(record)
Esempio n. 5
0
    testing_set('measured_filtered_peto', start, end,
                '{0}/gt_peto.csv'.format(directory))

    # Klarka
    start = int(
        DateTimeUtil.local_time_str_to_utc('2018/12/18 18:00:00').timestamp())
    testing_set('measured_klarka', start, end,
                '{0}/gt_klarka.csv'.format(directory))


if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG,
                        format='%(asctime)s %(levelname)s %(message)s')

    # tabulka s CO2, ktora neprekroci hranicu 2000ppm
    table_name = 'measured_filtered_peto'

    directory = 'co2_t_h'
    if not os.path.isdir(directory):
        os.mkdir(directory)

    training_set('examples/events_peto.json', -500, table_name, directory)

    start = int(
        DateTimeUtil.local_time_str_to_utc('2018/10/07 06:00:00').timestamp())
    testing_set(table_name, start, start + 100,
                '{0}/testing.csv'.format(directory))

    # testing_month(table_name, start)
    # generic_testing(directory)
Esempio n. 6
0
def main(events_file: str, start_shift: int, end_shift: int,
         output_filename: str, output_records: int):
    logging.info('start')
    graphs = Graph("./../../src/graph")

    # download data
    con = ConnectionUtil.create_con()
    storage = Storage(events_file, 0, 'measured_klarka')
    d = storage.load_data(con, start_shift, end_shift,
                          'temperature_in_celsius')
    logging.info('downloaded events: %d' % len(d))

    # apply filters to events
    filtered = FilterUtil.only_valid_events(d)
    filtered = FilterUtil.temperature_diff(filtered, 5, 100)
    filtered = FilterUtil.temperature_out_max(filtered, 15)
    filtered = FilterUtil.humidity(filtered, 6, 1.6, 100)

    min_timestamp = int(
        DateTimeUtil.local_time_str_to_utc('2018/11/01 00:01:00').timestamp())
    filtered = FilterUtil.min_timestamp(filtered, min_timestamp)

    filtered = FilterUtil.min_max_time_interval(filtered, 1440, 1620)

    # for travis
    if ConnectionUtil.is_testable_system():
        filtered = filtered[:ConnectionUtil.MAX_TESTABLE_EVENTS]

    logging.info('events after applying the filter: %d' % len(filtered))

    # data for graph generation measured using sensor 1
    sensor1_events = filtered
    logging.info('event count: %d for senzor 1' % len(sensor1_events))
    linear_reg(sensor1_events, 'rh_in_specific_g_kg', 'linear1_sh')
    linear_reg(sensor1_events, 'rh_in_absolute_g_m3', 'linear1_ah')
    linear_reg(sensor1_events, 'temperature_in_celsius', 'linear1_temp')

    # graph generation - sensor 1
    logging.info('start generating graphs of events from sensor 1')
    graphs_sensor_1 = []
    for event in sensor1_events:
        graphs_sensor_1 += gen_graphs(event, output_records, [
            'rh_in_specific_g_kg', 'rh_in_absolute_g_m3',
            'temperature_in_celsius'
        ], ['linear1_sh', 'linear1_ah', 'linear1_temp'])

    graphs.gen(graphs_sensor_1,
               'sensor1_' + output_filename,
               0,
               0,
               global_range=True)
    logging.info('end generating graphs of events from sensor 1')

    # data for graph generation measured using sensor 2
    sensor2_events = filtered
    logging.info('event count: %d for sensor 2' % len(sensor2_events))

    sensor2_events = FilterUtil.measured_values_not_empty(
        sensor2_events, 'rh_in2_specific_g_kg')
    sensor2_events = FilterUtil.measured_values_not_empty(
        sensor2_events, 'rh_in2_absolute_g_m3')
    sensor2_events = FilterUtil.measured_values_not_empty(
        sensor2_events, 'temperature_in2_celsius')
    logging.info('events after applying the filter: %d' % len(sensor2_events))

    linear_reg(sensor2_events, 'rh_in2_specific_g_kg', 'linear2_sh')
    linear_reg(sensor2_events, 'rh_in2_absolute_g_m3', 'linear2_ah')
    linear_reg(sensor2_events, 'temperature_in2_celsius', 'linear2_temp')

    humidity_info_csv(sensor2_events, start_shift, end_shift)

    # graph generation - sensor 2
    logging.info('start generating graphs of events from sensor 2')
    graphs_sensor_2 = []
    for event in sensor2_events:
        graphs_sensor_2 += gen_graphs(event, output_records, [
            'rh_in2_specific_g_kg', 'rh_in2_absolute_g_m3',
            'temperature_in2_celsius'
        ], ['linear2_sh', 'linear2_ah', 'linear2_temp'])

    graphs.gen(graphs_sensor_2,
               'sensor2_' + output_filename,
               0,
               0,
               global_range=True)
    logging.info('end generating graphs of events from sensor 2')

    logging.info('end')
Esempio n. 7
0
    # minimum
    ax.set_ylim(min(y1) - 50, max(y1)+50)
    ax.set_xlim(dt.datetime.fromtimestamp(raw_t[0]-5), dt.datetime.fromtimestamp(raw_t[-1]+5))

    filename = simple_graph.__name__ + '.eps'
    fig.canvas.set_window_title(filename)

    # nastavenie, aby sa aj pri malej figsize zobrazoval nazov X osy
    plt.tight_layout()

    fig.savefig(filename, bbox_inches='tight', pad_inches=0)


if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG,
                        format='%(asctime)s %(levelname)s %(message)s')

    con = ConnectionUtil.create_con()

    start = int(DateTimeUtil.local_time_str_to_utc('2018/10/07 06:00:00').timestamp())
    end = int(DateTimeUtil.local_time_str_to_utc('2018/10/07 09:00:00').timestamp())
    table_name = 'measured_filtered_peto'

    all = Storage.dw_columns_ordered(con, start, end, 'measured_time,co2_in_ppm', table_name)
    CSVUtil.create_csv_file(all, 'test.csv')

    simple_graph('test.csv')

    # plt.show()
Esempio n. 8
0
    logging.info('end')


def testing_month(table_name, start):
    mesiac = 30 * 24 * 3600

    file_names = [
        '1_listopad.csv',
        '2_prosinec.csv',
        '3_leden.csv',
        '4_unor.csv',
    ]

    for file_name in file_names:
        testing_set(table_name, start, start + mesiac, file_name)
        start += mesiac


if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG,
                        format='%(asctime)s %(levelname)s %(message)s')

    table_name = 'measured_klarka_shower'

    training_set('examples/events_klarka_shower.json', -500, table_name)

    start = int(DateTimeUtil.local_time_str_to_utc('2018/11/01 05:30:00').timestamp())
    testing_set(table_name, start, start + 100, 'testing.csv')
    # testing_month(table_name, start)
Esempio n. 9
0
    def read_meta(self):
        with open(self.__filename) as f:
            events = json.load(f)

        out = []
        for event in events['events']:
            # atributy, ktore su spolocne pre vsetky udalosti
            attributes = {
                'e_start': {
                    'readable':
                    event['times']['event_start'],
                    'timestamp':
                    int(
                        DateTimeUtil.local_time_str_to_utc(
                            event['times']['event_start']).timestamp())
                },
                'e_end': {
                    'readable':
                    event['times']['event_end'],
                    'timestamp':
                    int(
                        DateTimeUtil.local_time_str_to_utc(
                            event['times']['event_end']).timestamp())
                },
                'measured': {
                    'pressure_in_hpa': [],
                    'temperature_in_celsius': [],
                    'temperature_in2_celsius': [],
                    'temperature_out_celsius': [],
                    'rh_in_percentage': [],
                    'rh_in_absolute_g_m3': [],
                    'rh_in_specific_g_kg': [],
                    'rh_in2_percentage': [],
                    'rh_in2_absolute_g_m3': [],
                    'rh_in2_specific_g_kg': [],
                    'rh_out_percentage': [],
                    'rh_out_absolute_g_m3': [],
                    'rh_out_specific_g_kg': [],
                    'co2_in_ppm': []
                },
                'derivation': {
                    'after': [],
                    'before': [],
                    'no_event_after': [],
                    'no_event_before': []
                },
                'no_event_time_shift': self.__no_event_time_shift,
                'no_event_values': None,
                'valid_event': True
            }

            start = attributes['e_start']['timestamp']
            end = attributes['e_end']['timestamp']
            attributes['event_duration'] = end - start

            # doplnenie atributov, ktore su specificke pre dany json
            # len sa nakopuruju jednotlive polozky json struktury
            for key, value in event.items():
                if key in ['times', 'devices']:
                    continue
                attributes[key] = value

            out.append(attributes)

        return out