Example #1
0
def main():
    """The main function"""
    # Setup logger and live socket
    codenames = [
        'work_estimate', 'work_left', 'work_left_minus_started',
        'work_completed', 'work_total'
    ]
    logger = ContinuousDataSaver(
        'dateplots_hall',
        credentials_hall.username,
        credentials_hall.password,
        codenames
    )
    logger.start()
    livesocket = LiveSocket('workday', codenames)
    livesocket.start()

    try:
        while True:
            # Get board status
            card_counts, times, high_scores = get_board_status()
            print('Card counts')
            pprint(card_counts)
            print('\nTimes')
            pprint(times)
            print('\nHigh Scores')
            pprint(high_scores)

            # format highscore
            highscore_str = 'High Score\n----------'
            if high_scores:
                largest_name = max(len(name) for name, value in high_scores)
            for name, value in high_scores:
                highscore_str += '\n{{: <{}}}: {{:.2f}}'.format(largest_name).format(name, value)

            # Total work 80 and 10 people
            total_work = 66.33
            estimate = max(total_work - since_9am() * 10, 0)
            batch = {
                'work_estimate': estimate,
                'work_left': times['ToDo'] + times['In Progress'],
                'work_left_minus_started': times['ToDo'],
                'work_completed': times['Done'],
                'work_total': sum(times.values()),
                'work_highscore': highscore_str,
            }
            # Send to live socket and database
            print('\n##########\nbatch')
            pprint(batch)
            livesocket.set_batch_now(batch)

            batch.pop('work_highscore')
            for codename, value in batch.items():
                logger.save_point_now(codename, value)

            print('Sent', datetime.datetime.now())
            sleep(600)
    except KeyboardInterrupt:
        livesocket.stop()
        logger.stop()
Example #2
0
def main():
    """Main function"""
    pvci = PVCi('/dev/serial/by-id/'
                'usb-FTDI_USB-RS485_Cable_FTY3M2GN-if00-port0')

    # Start live socket
    live_socket = LiveSocket(
        'thetaprobe_pvci',
        list(CODENAME_TRANSLATION.keys()) + list(EXTRA_LIVE.keys()),
    )
    live_socket.start()

    # Start database saver
    database_saver = ContinuousDataSaver('dateplots_thetaprobe',
                                         credentials.USERNAME,
                                         credentials.PASSWORD,
                                         list(CODENAME_TRANSLATION.keys()))
    database_saver.start()

    # Set up criterium checker
    criterium_checker = LoggingCriteriumChecker(
        codenames=list(CODENAME_TRANSLATION.keys()),
        types=['log', 'log', 'lin'],
        criteria=[0.1, 0.1, 1.0],
    )

    try:
        run(pvci, live_socket, database_saver, criterium_checker)
    except KeyboardInterrupt:
        pvci.close()
        live_socket.stop()
        database_saver.stop()
Example #3
0
def main():
    """Main function"""
    pvci = PVCi('/dev/serial/by-id/'
                'usb-FTDI_USB-RS485_Cable_FTY3M2GN-if00-port0')

    # Start live socket
    live_socket = LiveSocket('thetaprobe_pvci', list(CODENAME_TRANSLATION.keys()),
                             sane_interval=0.5)
    live_socket.start()

    # Start database saver
    database_saver = ContinuousDataSaver(
        'dateplots_thetaprobe', credentials.USERNAME,
        credentials.PASSWORD, list(CODENAME_TRANSLATION.keys())
    )
    database_saver.start()

    # Set up criterium checker
    criterium_checker = LoggingCriteriumChecker(
        codenames=list(CODENAME_TRANSLATION.keys()),
        types=['log', 'log', 'lin'],
        criteria=[0.1, 0.1, 1.0],
    )


    try:
        run(pvci, live_socket, database_saver, criterium_checker)
    except KeyboardInterrupt:
        pvci.close()
        live_socket.stop()
        database_saver.stop()
def main():
    """Main function """
    pvci = epimax.PVCi(
        '/dev/serial/by-id/'
        'usb-FTDI_USB-RS485_Cable_FT0N0UFX-if00-port0',
        slave_address=1)

    # Start live socket
    live_socket = LiveSocket(
        'omicron_pvci',
        list(CODENAME_TRANSLATION.keys()),
    )
    live_socket.start()

    # Start pull socket
    pullsocket = DateDataPullSocket(
        'omicron_pvci_pull',
        list(CODENAME_TRANSLATION.keys()),
        timeouts=2.5,
    )
    pullsocket.start()

    # Start database saver
    database_saver = ContinuousDataSaver(
        'dateplots_omicron',
        credentials.USERNAME,
        credentials.PASSWORD,
        list(CODENAME_TRANSLATION.keys()),
    )
    database_saver.start()

    # Criterium checker
    criterium_checker = LoggingCriteriumChecker(
        codenames=list(CODENAME_TRANSLATION.keys()),
        types=['log'] * len(CODENAME_TRANSLATION.keys()),
        criteria=[0.05] * len(CODENAME_TRANSLATION.keys()),
        time_outs=[300] * len(CODENAME_TRANSLATION.keys()),
    )

    # Main loop
    try:
        run(pvci, live_socket, pullsocket, database_saver, criterium_checker)
    except KeyboardInterrupt:
        sleep(WAIT)
        pvci.close()
        live_socket.stop()
        pullsocket.stop()
        database_saver.stop()
    except:
        LOG.exception("Omicron pvci data logger stopped")
Example #5
0
    def test_enqueue_point(self):
        """Test the continous logger by sending data with timestamps"""
        # Make timestamps to use
        number_of_points = 10
        start = time.time()
        times = [start + increment for increment in range(number_of_points)]

        # Form the lists for local storage of data, for tests
        data1 = []
        data2 = []

        # Init the db_saver
        db_saver = ContinuousDataSaver('dateplots_dummy', 'dummy', 'dummy',
                                       ['dummy_sine_one', 'dummy_sine_two'])
        db_saver.start()

        # Save the points
        for now in times:
            value = math.sin(now)
            data1.append([now, value])
            db_saver.save_point('dummy_sine_one', (now, value))
            value = math.sin(now + math.pi)
            data2.append([now, value])
            db_saver.save_point('dummy_sine_two', (now, value))

        # Make sure the queue has been cleared
        while db_saver.sql_saver.queue.qsize() > 0:
            time.sleep(0.01)

        # Get the measurement code numbers from the logger
        codes = (db_saver.codename_translation['dummy_sine_one'],
                 db_saver.codename_translation['dummy_sine_two'])

        # Check if the data has been properly written to the db
        for data, code in zip((data1, data2), codes):
            query = 'SELECT UNIX_TIMESTAMP(time), value FROM dateplots_dummy '\
                'WHERE type={} ORDER BY id DESC LIMIT {}'\
                    .format(code, number_of_points)
            CURSOR.execute(query)
            fetched = reversed(CURSOR.fetchall())
            for point_original, point_control in zip(data, fetched):
                # Time is rounded, so it is only correct to within ~0.51 s
                assert np.isclose(point_original[0],
                                  point_control[0],
                                  atol=0.51)
                assert np.isclose(point_original[1], point_control[1])

        db_saver.stop()
Example #6
0
def main():
    """The main function"""
    # Setup logger and live socket
    codenames = [
        'work_estimate', 'work_left', 'work_left_minus_started',
        'work_completed', 'work_total'
    ]
    logger = ContinuousDataSaver(
        'dateplots_hall',
        credentials_hall.username,
        credentials_hall.password,
        codenames
    )
    logger.start()

    try:
        while True:
            # Get board status
            #card_counts, times, high_scores = get_board_status()
            card_counts, times = get_board_status()
            print('Card counts')
            pprint(card_counts)
            print('\nTimes')
            pprint(times)

            # Total work 80 and 14 people
            total_work = 66.337
            estimate = max(total_work - since_9am() * 14, 0)
            batch = {
                'work_estimate': estimate,
                'work_left': times['ToDo'] + times['In Progress'],
                'work_left_minus_started': times['ToDo'],
                'work_completed': times['Done'],
                'work_total': sum(times.values()),
            }
            # Send to and database
            print('\n##########\nbatch')
            #pprint(batch)

            for codename, value in batch.items():
                #print(codename, value)
                logger.save_point_now(codename, value)

            print('Sent', datetime.datetime.now())
            sleep(120)
    except (KeyboardInterrupt, ZeroDivisionError):
        print("Done")
        logger.stop()
Example #7
0
    def test_enqueue_point(self):
        """Test the continous logger by sending data with timestamps"""
        # Make timestamps to use
        number_of_points = 10
        start = time.time()
        times = [start + increment for increment in range(number_of_points)]

        # Form the lists for local storage of data, for tests
        data1 = []
        data2 = []

        # Init the db_saver
        db_saver = ContinuousDataSaver("dateplots_dummy", "dummy", "dummy", ["dummy_sine_one", "dummy_sine_two"])
        db_saver.start()

        # Save the points
        for now in times:
            value = math.sin(now)
            data1.append([now, value])
            db_saver.save_point("dummy_sine_one", (now, value))
            value = math.sin(now + math.pi)
            data2.append([now, value])
            db_saver.save_point("dummy_sine_two", (now, value))

        # Make sure the queue has been cleared
        while db_saver.sql_saver.queue.qsize() > 0:
            time.sleep(0.01)

        # Get the measurement code numbers from the logger
        codes = (db_saver.codename_translation["dummy_sine_one"], db_saver.codename_translation["dummy_sine_two"])

        # Check if the data has been properly written to the db
        for data, code in zip((data1, data2), codes):
            query = (
                "SELECT UNIX_TIMESTAMP(time), value FROM dateplots_dummy "
                "WHERE type={} ORDER BY id DESC LIMIT {}".format(code, number_of_points)
            )
            CURSOR.execute(query)
            fetched = reversed(CURSOR.fetchall())
            for point_original, point_control in zip(data, fetched):
                # Time is rounded, so it is only correct to within ~0.51 s
                assert np.isclose(point_original[0], point_control[0], atol=0.51)
                assert np.isclose(point_original[1], point_control[1])

        db_saver.stop()
Example #8
0
def main():
    """The main function"""
    # Setup logger and live socket
    codenames = [
        'work_estimate', 'work_left', 'work_left_minus_started',
        'work_completed', 'work_total'
    ]
    logger = ContinuousDataSaver('dateplots_hall', credentials_hall.username,
                                 credentials_hall.password, codenames)
    logger.start()

    try:
        while True:
            # Get board status
            #card_counts, times, high_scores = get_board_status()
            card_counts, times = get_board_status()
            print('Card counts')
            pprint(card_counts)
            print('\nTimes')
            pprint(times)

            # Total work 80 and 14 people
            total_work = 66.337
            estimate = max(total_work - since_9am() * 14, 0)
            batch = {
                'work_estimate': estimate,
                'work_left': times['ToDo'] + times['In Progress'],
                'work_left_minus_started': times['ToDo'],
                'work_completed': times['Done'],
                'work_total': sum(times.values()),
            }
            # Send to and database
            print('\n##########\nbatch')
            #pprint(batch)

            for codename, value in batch.items():
                #print(codename, value)
                logger.save_point_now(codename, value)

            print('Sent', datetime.datetime.now())
            sleep(120)
    except (KeyboardInterrupt, ZeroDivisionError):
        print("Done")
        logger.stop()
Example #9
0
def main():
    LOGGER.info('main started')
    cni = CNi3244_C24(5)
    socket = DateDataPullSocket(FULL_NAME, [SHORT_NAME], timeouts=1.0)
    socket.start()
    db_logger = ContinuousDataSaver(
        continuous_data_table='dateplots_tower',
        username='******',
        password='******',
        measurement_codenames=[NAME],
    )
    db_logger.start()
    time.sleep(0.1)

    # Main part
    try:
        main_measure_loop(cni, socket, db_logger)
    except KeyboardInterrupt:
        LOGGER.info('Keyboard Interrupt. Shutting down!')
        db_logger.stop()
        cni.close()
        socket.stop()
Example #10
0
    db_logger = ContinuousDataSaver(
        'dateplots_omicron',
        credentials.user,
        credentials.passwd,
        codenames)
    db_logger.daemon = True
    db_logger.start()
    print('Starting database logger')
    time.sleep(1)

    run = True
    t0 = time.time()
    while run:
        try:
            time.sleep(1)
            for codename in codenames:
                value = reader.values[codename]
                if value is None:
                    print('NONE encountered - check equiptment!')
                else:
                    if criterium_checker.check(codename, value):
                        print(codename, value, (time.time() - t0)/60.)
                        db_logger.save_point_now(codename, value)
        except:
            print('Stopping everything:')
            run = False
            reader.stop()
            db_logger.stop()
            print('Everything is stopped!')
            raise
def main():
    """The main function"""
    # Setup logger and live socket
    codenames = [
        'work_estimate', 'work_left', 'work_left_minus_started',
        'work_completed', 'work_total'
    ]
    logger = ContinuousDataSaver(
        'dateplots_hall',
        credentials_hall.username,
        credentials_hall.password,
        codenames
    )
    logger.start()
    livesocket = LiveSocket('workday', codenames)
    livesocket.start()

    try:
        while True:
            # Get board status
            card_counts, times, high_scores = get_board_status()
            print('Card counts')
            pprint(card_counts)
            print('\nTimes')
            pprint(times)
            print('\nHigh Scores')
            pprint(high_scores)

            # format highscore
            highscore_str = 'High Score\n----------'
            if high_scores:
                largest_name = max(len(name) for name, value in high_scores)
            for name, value in high_scores:
                highscore_str += '\n{{: <{}}}: {{:.2f}}'.\
                                 format(largest_name).format(name, value)

            # Total work 80 and 10 people
            total_work = 66.33
            estimate = max(total_work - since_9am() * 10, 0)
            batch = {
                'work_estimate': estimate,
                'work_left': times['ToDo'] + times['In Progress'],
                'work_left_minus_started': times['ToDo'],
                'work_completed': times['Done'],
                'work_total': sum(times.values()),
                'work_highscore': highscore_str,
            }
            # Send to live socket and database
            print('\n##########\nbatch')
            pprint(batch)
            livesocket.set_batch_now(batch)

            batch.pop('work_highscore')
            for codename, value in batch.items():
                logger.save_point_now(codename, value)

            print('Sent', datetime.datetime.now())
            sleep(600)
    except KeyboardInterrupt:
        livesocket.stop()
        logger.stop()
Example #12
0
        time_outs=[600, 600],
    )

    db_logger = ContinuousDataSaver('dateplots_omicron', credentials.user,
                                    credentials.passwd, codenames)
    db_logger.daemon = True
    db_logger.start()
    print('Starting database logger')
    time.sleep(1)

    run = True
    t0 = time.time()
    while run:
        try:
            time.sleep(1)
            for codename in codenames:
                value = reader.values[codename]
                if value is None:
                    print('NONE encountered - check equiptment!')
                else:
                    if criterium_checker.check(codename, value):
                        print(codename, value, (time.time() - t0) / 60.)
                        db_logger.save_point_now(codename, value)
        except:
            print('Stopping everything:')
            run = False
            reader.stop()
            db_logger.stop()
            print('Everything is stopped!')
            raise
Example #13
0
    def test_save_point_now(self):
        """Test the continous logger by sending data without timestamps"""
        # The save_point_now method uses time.time to get a unix timestamp to attach. However,
        # since points are only saved with a seconds precision, testing multiple points would
        # take multiple seconds. To avoid this, we mock all the calls to time.
        number_of_points = 10
        start = time.time() - number_of_points
        # Form times 1 second apart from 100 seconds ago
        times = [start + increment for increment in range(number_of_points)]
        # For return values of time, we need each of them twice, because we store two datasets
        double_times = []
        for increment in range(number_of_points):
            double_times.append(start + increment)
            double_times.append(start + increment)

        # Init lists for local storage of the data
        data1 = []
        data2 = []

        # Init continuous database saver
        db_saver = ContinuousDataSaver('dateplots_dummy', 'dummy', 'dummy',
                                       ['dummy_sine_one', 'dummy_sine_two'])
        db_saver.start()

        def mytime():
            """Replacement function for time"""
            return double_times.pop(0)

        with mock.patch('time.time') as mock_time:
            mock_time.side_effect = mytime
            for now in times:
                # The data points are just sines to the unix timestamp
                value = math.sin(now)
                data1.append([now, value])
                db_saver.save_point_now('dummy_sine_one', value)

                value = math.sin(now + math.pi)
                data2.append([now, value])
                db_saver.save_point_now('dummy_sine_two', value)
            assert mock_time.call_count == number_of_points * 2

        # Make sure all points have been saved
        while db_saver.sql_saver.queue.qsize() > 0:
            time.sleep(0.01)

        # Get the measurement code numbers from the saver
        codes = (db_saver.codename_translation['dummy_sine_one'],
                 db_saver.codename_translation['dummy_sine_two'])

        # Check if the data has been properly written to the db
        for data, code in zip((data1, data2), codes):
            # Get the last number_of_points points for this code
            query = 'SELECT UNIX_TIMESTAMP(time), value FROM dateplots_dummy '\
                    'WHERE type={} ORDER BY id DESC LIMIT {}'\
                    .format(code, number_of_points)
            CURSOR.execute(query)
            # Reverse the points to get oldest first
            fetched = reversed(CURSOR.fetchall())
            for point_original, point_control in zip(data, fetched):
                # Times are rounded to integers, so it should just be a difference
                # of less than ~0.51 seconds
                assert np.isclose(point_original[0],
                                  point_control[0],
                                  atol=0.51)
                assert np.isclose(point_original[1], point_control[1])

        db_saver.stop()
Example #14
0
    def test_save_point_now(self):
        """Test the continous logger by sending data without timestamps"""
        # The save_point_now method uses time.time to get a unix timestamp to attach. However,
        # since points are only saved with a seconds precision, testing multiple points would
        # take multiple seconds. To avoid this, we mock all the calls to time.
        number_of_points = 10
        start = time.time() - number_of_points
        # Form times 1 second apart from 100 seconds ago
        times = [start + increment for increment in range(number_of_points)]
        # For return values of time, we need each of them twice, because we store two datasets
        double_times = []
        for increment in range(number_of_points):
            double_times.append(start + increment)
            double_times.append(start + increment)

        # Init lists for local storage of the data
        data1 = []
        data2 = []

        # Init continuous database saver
        db_saver = ContinuousDataSaver("dateplots_dummy", "dummy", "dummy", ["dummy_sine_one", "dummy_sine_two"])
        db_saver.start()

        def mytime():
            """Replacement function for time"""
            return double_times.pop(0)

        with mock.patch("time.time") as mock_time:
            mock_time.side_effect = mytime
            for now in times:
                # The data points are just sines to the unix timestamp
                value = math.sin(now)
                data1.append([now, value])
                db_saver.save_point_now("dummy_sine_one", value)

                value = math.sin(now + math.pi)
                data2.append([now, value])
                db_saver.save_point_now("dummy_sine_two", value)
            assert mock_time.call_count == number_of_points * 2

        # Make sure all points have been saved
        while db_saver.sql_saver.queue.qsize() > 0:
            time.sleep(0.01)

        # Get the measurement code numbers from the saver
        codes = (db_saver.codename_translation["dummy_sine_one"], db_saver.codename_translation["dummy_sine_two"])

        # Check if the data has been properly written to the db
        for data, code in zip((data1, data2), codes):
            # Get the last number_of_points points for this code
            query = (
                "SELECT UNIX_TIMESTAMP(time), value FROM dateplots_dummy "
                "WHERE type={} ORDER BY id DESC LIMIT {}".format(code, number_of_points)
            )
            CURSOR.execute(query)
            # Reverse the points to get oldest first
            fetched = reversed(CURSOR.fetchall())
            for point_original, point_control in zip(data, fetched):
                # Times are rounded to integers, so it should just be a difference
                # of less than ~0.51 seconds
                assert np.isclose(point_original[0], point_control[0], atol=0.51)
                assert np.isclose(point_original[1], point_control[1])

        db_saver.stop()
Example #15
0
def main():
    """ Main function """
    log = get_logger('pressure readout', level='debug')
    #logging.basicConfig(filename="logger.txt", level=logging.ERROR)
    #logging.basicConfig(level=logging.ERROR)

    port = '/dev/serial/by-id/usb-Prolific_Technology_Inc._USB-Serial_Controller_D-if00-port'
    port = '/dev/ttyUSB0'
    codenames = [
        'xrd_pressure_turbo_gas_system', 'xrd_pressure_gas_system_wrg',
        'xrd_pressure_mass_spec_wrg', 'xrd_pressure_gas_system_baratron'
    ]
    reader = PressureReader(port)
    reader.daemon = True
    reader.start()

    loggers = {}
    loggers[codenames[0]] = ValueLogger(reader,
                                        comp_val=0.02,
                                        comp_type='log',
                                        channel=1)
    loggers[codenames[1]] = ValueLogger(reader,
                                        comp_val=0.02,
                                        comp_type='log',
                                        channel=2)
    loggers[codenames[2]] = ValueLogger(reader,
                                        comp_val=0.02,
                                        comp_type='log',
                                        channel=3)
    loggers[codenames[3]] = ValueLogger(reader,
                                        comp_val=2,
                                        comp_type='lin',
                                        channel=4)

    for i in range(0, 4):
        loggers[codenames[i]].start()

    socket = DateDataPullSocket('XRD Pressure',
                                codenames,
                                timeouts=[2.0] * len(codenames))
    socket.start()
    log.info('DateDataPullSocket started')
    live_socket = LiveSocket('XRD pressure', codenames)
    live_socket.start()
    log.info('LiveSocket started')

    db_logger = ContinuousDataSaver(continuous_data_table='dateplots_xrd',
                                    username=credentials.user,
                                    password=credentials.passwd,
                                    measurement_codenames=codenames)
    db_logger.start()
    log.info('ContinuousDataSaver started')

    time.sleep(5)

    try:
        while True:
            time.sleep(0.25)
            for name in codenames:
                value = loggers[name].read_value()
                log.debug('Read codename %s value %s', name, value)
                socket.set_point_now(name, value)
                live_socket.set_point_now(name, value)
                if loggers[name].read_trigged():
                    log.debug('Saved codename %s value %s', name, value)
                    db_logger.save_point_now(name, value)
                    loggers[name].clear_trigged()
    except KeyboardInterrupt:
        log.info('Stopping everything and waiting 5 s')
        socket.stop()
        live_socket.stop()
        db_logger.stop()
        time.sleep(5)
        log.info('Everything stopped, bye!')
    except Exception:
        # Unexpected exception, log it
        log.exception('Unexpected exception during main loop')
        raise
Example #16
0
class GasAlarmMonitor(object):
    """Class that monitors the gas alarm the building 312"""
    def __init__(self):
        # Start logger
        codenames = list(CONF_TO_NAME.values())
        self.db_saver = ContinuousDataSaver(
            continuous_data_table='dateplots_b312gasalarm',
            username=credentials.USERNAME,
            password=credentials.PASSWORD,
            measurement_codenames=codenames,
        )
        self.db_saver.start()
        LOGGER.info('Logger started')

        # Init live socket
        self.live_socket = LiveSocket(name='gas_alarm_312_live',
                                      codenames=codenames)
        self.live_socket.start()
        LOGGER.info('Live socket started')

        # Start driver
        self.vortex = Vortex(
            '/dev/serial/by-id/usb-FTDI_USB-RS485_Cable_FTWGRKRA-if00-port0',
            1)
        LOGGER.info('Vortex driver opened')

        # Init database connection
        self.db_connection = MySQLdb.connect(host='servcinf-sql',
                                             user=credentials.USERNAME,
                                             passwd=credentials.PASSWORD,
                                             db='cinfdata')
        self.db_cursor = self.db_connection.cursor()

        # Initiate static information. All information about the except for
        # the list of their numbers are placed in dicts because the numbering
        # starts at 1.
        # Detector numbers: [1, 2, 3, ..., 12]
        self.detector_numbers = range(
            1,
            self.vortex.get_number_installed_detectors() + 1)
        self.detector_info = {
            detector_num: self.vortex.detector_configuration(detector_num)
            for detector_num in self.detector_numbers
        }
        # trip_levels are the differences that are required to force a log
        # The levels are set to 2 * the communication resolution
        # (1000 values / full range)
        self.trip_levels = {
            detector_num: info.range * 2.0 / 1000.0
            for detector_num, info in self.detector_info.items()
        }

        # Initiate last measured values and their corresponding times
        self.detector_levels_last_values = \
            {detector_num: - (10 ** 9)
             for detector_num in self.detector_numbers}
        self.detector_levels_last_times = \
            {detector_num: 0 for detector_num in self.detector_numbers}
        self.detector_status_last_values = \
            {detector_num: {'inhibit': False, 'status': ['OK'],
                            'codename': self.detector_info[detector_num].identity}
             for detector_num in self.detector_numbers}
        self.detector_status_last_times = \
            {detector_num: 0 for detector_num in self.detector_numbers}

        # Initiate variables for system power status
        self.central_power_status_last_value = 'OK'
        self.central_power_status_last_time = -(10**9)

        # Initiate variables for system status
        self.central_status_last_value = ['All OK']
        self.central_status_last_time = 0

    def close(self):
        """Close the logger and the connection to the Vortex"""
        self.db_saver.stop()
        LOGGER.info('Logger stopped')
        self.live_socket.stop()
        LOGGER.info('Live socket stopped')
        self.vortex.close()
        LOGGER.info('Vortex driver closed')

    @staticmethod
    def conf_to_codename(conf):
        """Convert the identity the sensor returns to the codename used in the
        database
        """
        conf = '{conf.number} {conf.identity} {conf.unit}'.format(conf=conf)
        return CONF_TO_NAME[conf]

    def main(self):
        """Main monitoring and logging loop"""
        # Each iteration takes about 5 sec
        while True:
            # Log detectors
            for detector_num in self.detector_numbers:
                self.log_detector(detector_num)

            # Log Vortex unit status (force log every 24 hours)
            self.log_central_unit()

    def log_detector(self, detector_num):
        """Get the levels from one detector and log if required"""
        # Get detector info and levels for this detector
        conf = self.detector_info[detector_num]
        codename = self.conf_to_codename(conf)
        LOGGER.debug('Use detector {} \'{}\''.format(detector_num, codename))
        levels = self.vortex.get_detector_levels(detector_num)
        LOGGER.debug('Levels read: {}'.format(levels))

        # Detector level
        now = time.time()
        # Always send to live socket
        self.live_socket.set_point_now(codename, levels.level)
        # Force log every 10 m
        time_condition = now - self.detector_levels_last_times[
            detector_num] > 600
        value_condition = abs(self.detector_levels_last_values[detector_num] - levels.level)\
                          >= self.trip_levels[detector_num]
        if time_condition or value_condition:
            LOGGER.debug('Send level to db trigged in time: {} or value: '
                         '{}'.format(time_condition, value_condition))
            self.db_saver.save_point(codename, (now, levels.level))
            # Update last values
            self.detector_levels_last_values[detector_num] = levels.level
            self.detector_levels_last_times[detector_num] = now
        else:
            LOGGER.debug('Level logging condition false')

        self.log_detector_status(detector_num, levels, conf)

    def log_detector_status(self, detector_num, levels, conf):
        """Sub function to log single detector status"""
        now = time.time()
        # Force log every 24 hours
        time_condition = now - self.detector_status_last_times[
            detector_num] > 86400
        status = {
            'inhibit': levels.inhibit,
            'status': levels.status,
            'codename': conf.identity
        }
        value_condition = (status !=
                           self.detector_status_last_values[detector_num])

        # Check if we should log
        if time_condition or value_condition:
            check_in = time_condition and not value_condition
            LOGGER.info('Send detector status to db trigged on time: {} or '
                        'value: {}'.format(time_condition, value_condition))
            query = 'INSERT INTO status_b312gasalarm '\
                '(time, device, status, check_in) '\
                'VALUES (FROM_UNIXTIME(%s), %s, %s, %s);'
            values = (now, detector_num, json.dumps(status), check_in)
            self._wake_mysql()
            self.db_cursor.execute(query, values)
            # Update last values
            self.detector_status_last_times[detector_num] = now
            self.detector_status_last_values[detector_num] = status
        else:
            LOGGER.debug('Detector status logging condition false')

    def log_central_unit(self):
        """Log the status of the central unit"""
        power_status = self.vortex.get_system_power_status().value
        now = time.time()
        # Force a log once per 24 hours
        time_condition = now - self.central_power_status_last_time > 86400
        value_condition = self.central_power_status_last_value != power_status
        LOGGER.debug('Read central power status: \'{}\''.format(power_status))
        if time_condition or value_condition:
            check_in = time_condition and not value_condition
            LOGGER.info('Send power status to db trigged in time: {} or '
                        'value: {}'.format(time_condition, value_condition))
            query = 'INSERT INTO status_b312gasalarm '\
                '(time, device, status, check_in) '\
                'VALUES (FROM_UNIXTIME(%s), %s, %s, %s);'
            values = (now, 255, json.dumps(power_status), check_in)
            self._wake_mysql()
            self.db_cursor.execute(query, values)
            # Update last values
            self.central_power_status_last_time = now
            self.central_power_status_last_value = power_status
        else:
            LOGGER.debug('Power status logging condition false')

        self.log_central_unit_generel()

    def log_central_unit_generel(self):
        """Log the generel status from the central"""
        generel_status = self.vortex.get_system_status()
        now = time.time()
        # Force a log once per 24 hours
        time_condition = now - self.central_status_last_time > 86400
        value_condition = generel_status != self.central_status_last_value
        LOGGER.debug(
            'Read central generel status: \'{}\''.format(generel_status))
        if time_condition or value_condition:
            check_in = time_condition and not value_condition
            LOGGER.info('Send central generel status to db trigged in time: {}'
                        ' or value: {}'.format(time_condition,
                                               value_condition))
            query = 'INSERT INTO status_b312gasalarm '\
                '(time, device, status, check_in) '\
                'VALUES (FROM_UNIXTIME(%s), %s, %s, %s);'
            values = (now, 254, json.dumps(generel_status), check_in)
            self._wake_mysql()
            self.db_cursor.execute(query, values)
            # Update last values
            self.central_status_last_time = now
            self.central_status_last_value = generel_status
        else:
            LOGGER.debug('Central generel status logging confition false')

    def _wake_mysql(self):
        """Send a ping via the connection and re-initialize the cursor"""
        self.db_connection.ping(True)
        self.db_cursor = self.db_connection.cursor()
Example #17
0
class GasAlarmMonitor(object):
    """Class that monitors the gas alarm the building 312"""

    def __init__(self):
        # Start logger
        codenames = list(CONF_TO_NAME.values())
        self.db_saver = ContinuousDataSaver(
            continuous_data_table='dateplots_b312gasalarm',
            username=credentials.USERNAME,
            password=credentials.PASSWORD,
            measurement_codenames=codenames,
        )
        self.db_saver.start()
        LOGGER.info('Logger started')

        # Init live socket
        self.live_socket = LiveSocket(name='gas_alarm_312_live',
                                      codenames=codenames)
        self.live_socket.start()
        LOGGER.info('Live socket started')

        # Start driver
        self.vortex = Vortex('/dev/serial/by-id/usb-FTDI_USB-RS485_Cable_FTWGRKRA-if00-port0', 1)
        LOGGER.info('Vortex driver opened')

        # Init database connection
        self.db_connection = MySQLdb.connect(
            host='servcinf-sql', user=credentials.USERNAME,
            passwd=credentials.PASSWORD, db='cinfdata')
        self.db_cursor = self.db_connection.cursor()

        # Initiate static information. All information about the except for
        # the list of their numbers are placed in dicts because the numbering
        # starts at 1.
        # Detector numbers: [1, 2, 3, ..., 12]
        self.detector_numbers = range(1, self.vortex.get_number_installed_detectors() + 1)
        self.detector_info = {detector_num: self.vortex.detector_configuration(detector_num)
                              for detector_num in self.detector_numbers}
        # trip_levels are the differences that are required to force a log
        # The levels are set to 2 * the communication resolution
        # (1000 values / full range)
        self.trip_levels = {detector_num: info.range * 2.0 / 1000.0 for
                            detector_num, info in self.detector_info.items()}

        # Initiate last measured values and their corresponding times
        self.detector_levels_last_values = \
            {detector_num: - (10 ** 9)
             for detector_num in self.detector_numbers}
        self.detector_levels_last_times = \
            {detector_num: 0 for detector_num in self.detector_numbers}
        self.detector_status_last_values = \
            {detector_num: {'inhibit': False, 'status': ['OK'],
                            'codename': self.detector_info[detector_num].identity}
             for detector_num in self.detector_numbers}
        self.detector_status_last_times = \
            {detector_num: 0 for detector_num in self.detector_numbers}

        # Initiate variables for system power status
        self.central_power_status_last_value = 'OK'
        self.central_power_status_last_time = - (10 ** 9)

        # Initiate variables for system status
        self.central_status_last_value = ['All OK']
        self.central_status_last_time = 0

    def close(self):
        """Close the logger and the connection to the Vortex"""
        self.db_saver.stop()
        LOGGER.info('Logger stopped')
        self.live_socket.stop()
        LOGGER.info('Live socket stopped')
        self.vortex.close()
        LOGGER.info('Vortex driver closed')

    @staticmethod
    def conf_to_codename(conf):
        """Convert the identity the sensor returns to the codename used in the
        database
        """
        conf = '{conf.number} {conf.identity} {conf.unit}'.format(conf=conf)
        return CONF_TO_NAME[conf]

    def main(self):
        """Main monitoring and logging loop"""
        # Each iteration takes about 5 sec
        while True:
            # Log detectors
            for detector_num in self.detector_numbers:
                self.log_detector(detector_num)

            # Log Vortex unit status (force log every 24 hours)
            self.log_central_unit()

    def log_detector(self, detector_num):
        """Get the levels from one detector and log if required"""
        # Get detector info and levels for this detector
        conf = self.detector_info[detector_num]
        codename = self.conf_to_codename(conf)
        LOGGER.debug('Use detector {} \'{}\''.format(detector_num, codename))
        levels = self.vortex.get_detector_levels(detector_num)
        LOGGER.debug('Levels read: {}'.format(levels))

        # Detector level
        now = time.time()
        # Always send to live socket
        self.live_socket.set_point_now(codename, levels.level)
        # Force log every 10 m
        time_condition = now - self.detector_levels_last_times[detector_num] > 600
        value_condition = abs(self.detector_levels_last_values[detector_num] - levels.level)\
                          >= self.trip_levels[detector_num]
        if time_condition or value_condition:
            LOGGER.debug('Send level to db trigged in time: {} or value: '
                         '{}'.format(time_condition, value_condition))
            self.db_saver.save_point(codename, (now, levels.level))
            # Update last values
            self.detector_levels_last_values[detector_num] = levels.level
            self.detector_levels_last_times[detector_num] = now
        else:
            LOGGER.debug('Level logging condition false')

        self.log_detector_status(detector_num, levels, conf)

    def log_detector_status(self, detector_num, levels, conf):
        """Sub function to log single detector status"""
        now = time.time()
        # Force log every 24 hours
        time_condition = now - self.detector_status_last_times[detector_num] > 86400
        status = {'inhibit': levels.inhibit, 'status': levels.status, 'codename': conf.identity}
        value_condition = (status != self.detector_status_last_values[detector_num])

        # Check if we should log
        if time_condition or value_condition:
            check_in = time_condition and not value_condition
            LOGGER.info('Send detector status to db trigged on time: {} or '
                        'value: {}'.format(time_condition, value_condition))
            query = 'INSERT INTO status_b312gasalarm '\
                '(time, device, status, check_in) '\
                'VALUES (FROM_UNIXTIME(%s), %s, %s, %s);'
            values = (now, detector_num, json.dumps(status), check_in)
            self._wake_mysql()
            self.db_cursor.execute(query, values)
            # Update last values
            self.detector_status_last_times[detector_num] = now
            self.detector_status_last_values[detector_num] = status
        else:
            LOGGER.debug('Detector status logging condition false')

    def log_central_unit(self):
        """Log the status of the central unit"""
        power_status = self.vortex.get_system_power_status().value
        now = time.time()
        # Force a log once per 24 hours
        time_condition = now - self.central_power_status_last_time > 86400
        value_condition = self.central_power_status_last_value != power_status
        LOGGER.debug('Read central power status: \'{}\''.format(power_status))
        if time_condition or value_condition:
            check_in = time_condition and not value_condition
            LOGGER.info('Send power status to db trigged in time: {} or '
                        'value: {}'.format(time_condition, value_condition))
            query = 'INSERT INTO status_b312gasalarm '\
                '(time, device, status, check_in) '\
                'VALUES (FROM_UNIXTIME(%s), %s, %s, %s);'
            values = (now, 255, json.dumps(power_status), check_in)
            self._wake_mysql()
            self.db_cursor.execute(query, values)
            # Update last values
            self.central_power_status_last_time = now
            self.central_power_status_last_value = power_status
        else:
            LOGGER.debug('Power status logging condition false')

        self.log_central_unit_generel()

    def log_central_unit_generel(self):
        """Log the generel status from the central"""
        generel_status = self.vortex.get_system_status()
        now = time.time()
        # Force a log once per 24 hours
        time_condition = now - self.central_status_last_time > 86400
        value_condition = generel_status != self.central_status_last_value
        LOGGER.debug(
            'Read central generel status: \'{}\''.format(generel_status))
        if time_condition or value_condition:
            check_in = time_condition and not value_condition
            LOGGER.info('Send central generel status to db trigged in time: {}'
                        ' or value: {}'.format(time_condition,
                                               value_condition))
            query = 'INSERT INTO status_b312gasalarm '\
                '(time, device, status, check_in) '\
                'VALUES (FROM_UNIXTIME(%s), %s, %s, %s);'
            values = (now, 254, json.dumps(generel_status), check_in)
            self._wake_mysql()
            self.db_cursor.execute(query, values)
            # Update last values
            self.central_status_last_time = now
            self.central_status_last_value = generel_status
        else:
            LOGGER.debug('Central generel status logging confition false')

    def _wake_mysql(self):
        """Send a ping via the connection and re-initialize the cursor"""
        self.db_connection.ping(True)
        self.db_cursor = self.db_connection.cursor()
Example #18
0
def main():
    """ Main function """
    log = get_logger('pressure readout', level='debug')
    #logging.basicConfig(filename="logger.txt", level=logging.ERROR)
    #logging.basicConfig(level=logging.ERROR)

    port = '/dev/serial/by-id/usb-Prolific_Technology_Inc._USB-Serial_Controller_D-if00-port'
    port = '/dev/ttyUSB0'
    codenames = ['xrd_pressure_turbo_gas_system', 'xrd_pressure_gas_system_wrg',
                 'xrd_pressure_mass_spec_wrg', 'xrd_pressure_gas_system_baratron']
    reader = PressureReader(port)
    reader.daemon = True
    reader.start()

    loggers = {}
    loggers[codenames[0]] = ValueLogger(reader, comp_val=0.02, comp_type='log', channel=1)
    loggers[codenames[1]] = ValueLogger(reader, comp_val=0.02, comp_type='log', channel=2)
    loggers[codenames[2]] = ValueLogger(reader, comp_val=0.02, comp_type='log', channel=3)
    loggers[codenames[3]] = ValueLogger(reader, comp_val=2, comp_type='lin', channel=4)

    for i in range(0, 4):
        loggers[codenames[i]].start()

    socket = DateDataPullSocket('XRD Pressure', codenames, timeouts=[2.0] * len(codenames))
    socket.start()
    log.info('DateDataPullSocket started')
    live_socket = LiveSocket('XRD pressure', codenames)
    live_socket.start()
    log.info('LiveSocket started')

    db_logger = ContinuousDataSaver(continuous_data_table='dateplots_xrd',
                                    username=credentials.user,
                                    password=credentials.passwd,
                                    measurement_codenames=codenames)
    db_logger.start()
    log.info('ContinuousDataSaver started')

    time.sleep(5)

    try:
        while True:
            time.sleep(0.25)
            for name in codenames:
                value = loggers[name].read_value()
                log.debug('Read codename %s value %s', name, value)
                socket.set_point_now(name, value)
                live_socket.set_point_now(name, value)
                if loggers[name].read_trigged():
                    log.debug('Saved codename %s value %s', name, value)
                    db_logger.save_point_now(name, value)
                    loggers[name].clear_trigged()
    except KeyboardInterrupt:
        log.info('Stopping everything and waiting 5 s')
        socket.stop()
        live_socket.stop()
        db_logger.stop()
        time.sleep(5)
        log.info('Everything stopped, bye!')
    except Exception:
        # Unexpected exception, log it
        log.exception('Unexpected exception during main loop')
        raise