Ejemplo n.º 1
0
    def stop(self):
        """
        Stop the daemon
        """
        # Get the pid from the pidfile
        pid = self.loadpid()

        if not pid:
            message = "pidfile %s does not exist. Daemon not running?\n"
            sys.stderr.write(message % self.pidfile)
            logging.error(message % self.pidfile)
            return  # not an error in a restart

        # Try killing the daemon process
        try:
            while 1:
                os.kill(pid, SIGTERM)
                time.sleep(0.1)
        except OSError as err:
            err = str(err)
            if err.find("No such process") > 0:
                if os.path.exists(self.pidfile):
                    os.remove(self.pidfile)
            else:
                print(str(err))
                sys.exit(1)
Ejemplo n.º 2
0
def open_files(settings, iterations, mode=READ_MODES.CONTINUOUS):
    path_list = settings["path_list"]
    index_mnemonic = settings["index_mnemonic"]

    if mode == READ_MODES.CONTINUOUS:
        path_index = iterations % len(path_list)
    else:
        path_index = iterations

    try:
        las_path, chat_path = path_list[path_index]
        with open(las_path, "r") as las_file:
            data = lasio.read(las_file)

        if chat_path:
            with open(chat_path, "r") as chat_file:
                chat_data = list(csv.DictReader(chat_file))

            logging.debug("Success opening files {} and {}>".format(
                las_path, chat_path))
        else:
            chat_data = []
            logging.debug("Success opening file {}>".format(las_path))

        success = True
    except Exception as e:
        data = e
        chat_data = None
        success = False
        logging.error("Error opening file {}, {}<{}>".format(
            las_path, e, type(e)))

    return success, data, chat_data, index_mnemonic
Ejemplo n.º 3
0
def refresh_accumulator(latest_events, accumulator, index_mnemonic,
                        window_duration):
    # Purge old events and add the new ones
    latest_event = latest_events[-1]
    window_end = latest_event.get(index_mnemonic, 0)
    window_start = window_end - window_duration
    last_index = window_start

    if index_mnemonic not in latest_event:
        mnemonics_list = latest_event.keys()
        logging.error(
            f"Mnemonic '{index_mnemonic}' not found. Available mnemonics are: '{mnemonics_list}'"
        )

    accumulator.extend(latest_events)
    purged_accumulator = []
    for item in accumulator:
        index = item.get(index_mnemonic, 0)
        if (window_start <= index <= window_end) and (index >= last_index):
            purged_accumulator.append(item)
            last_index = index
        elif index < last_index:
            # Reset the accumulator
            purged_accumulator = [item]
            last_index = index
        elif index == 0:
            logging.error(f"{index_mnemonic} not found, ignoring event")

    logging.debug("{} of {} events between {} and {}".format(
        len(purged_accumulator), len(accumulator), window_start, window_end))

    return purged_accumulator, window_start, window_end
Ejemplo n.º 4
0
def start(settings, **kwargs):
    event_type = settings["output"]["event_type"]
    setproctitle('DDA: LAS replayer for "{}"'.format(event_type))

    handling_func = export_curves_data

    iterations = 0
    while True:
        try:
            success, las_data, chat_data, index_mnemonic = open_files(
                settings, iterations)

            if success:
                handling_func(event_type, las_data, chat_data, index_mnemonic,
                              settings)
                logging.info("{}: Iteration {} successful".format(
                    event_type, iterations))

            else:
                logging.info(
                    "{}: Single pass mode, exiting".format(event_type))
                break

        except Exception as e:
            logging.error("{}: Error processing events, {}<{}>".format(
                event_type, e, type(e)))

        iterations += 1

    return
Ejemplo n.º 5
0
def get_processes(global_settings: Mapping,
                  process_handlers: Mapping) -> Mapping:
    processes = filter_dict(global_settings.get("processes", {}),
                            lambda _k, v: v.get("enabled") is True)

    invalid_processes = filter_dict(
        processes, lambda _k, v: (v.get("type") not in process_handlers))

    for name, info in invalid_processes.items():
        logging.error("Invalid process configured: {}, {}".format(name, info))

    valid_processes = filter_dict(
        processes, lambda name, _v: name not in invalid_processes)

    return valid_processes
Ejemplo n.º 6
0
    def daemonize(self):
        """
        do the UNIX double-fork magic, see Stevens' "Advanced
        Programming in the UNIX Environment" for details (ISBN 0201563177)
        http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
        """
        try:
            pid = os.fork()
            if pid > 0:
                # exit first parent
                sys.exit(0)
        except OSError as e:
            logging.error("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
            sys.exit(1)

        # decouple from parent environment
        os.chdir("/")
        os.setsid()
        os.umask(0)

        # do second fork
        try:
            pid = os.fork()
            if pid > 0:
                # exit from second parent
                sys.exit(0)
        except OSError as e:
            logging.error("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
            sys.exit(1)

        # redirect standard file descriptors
        sys.stdout.flush()
        sys.stderr.flush()

        with open(self.stdin, "r") as si:
            os.dup2(si.fileno(), sys.stdin.fileno())

        with open(self.stdout, "a+") as so:
            os.dup2(so.fileno(), sys.stdout.fileno())

        with open(self.stderr, "ab+", 0) as se:
            os.dup2(se.fileno(), sys.stderr.fileno())

        # write pidfile
        atexit.register(self.delpid)
        pid = str(os.getpid())
        with open(self.pidfile, "w+") as pf:
            pf.write("%s\n" % pid)
Ejemplo n.º 7
0
    def start(self):
        """
        Start the daemon
        """
        # Check for a pidfile to see if the daemon already runs
        pid = self.loadpid()

        if pid:
            message = "pidfile %s already exist. Daemon already running?\n"
            sys.stderr.write(message % self.pidfile)
            logging.error(message % self.pidfile)
            sys.exit(1)

        # Start the daemon
        self.daemonize()

        self.run()
Ejemplo n.º 8
0
def retry_on_failure(timeout=3.05, max_retries=0):
    request_finished = False
    retries = 0

    while request_finished is False:
        try:
            with ensure_timeout(timeout):
                yield
        except (socket.timeout, Timeout):
            if max_retries and (retries < max_retries):
                logging.info(
                    f"Operation timed out, retrying ({retries}/{max_retries})")
                retries += 1
                continue
            else:
                logging.error(f"Operation timed out")
        finally:
            request_finished = True
Ejemplo n.º 9
0
    def format_response(self, response_content, target_curve=None):
        if not response_content:
            result = "No information about {target_curve}".format(
                target_curve=target_curve)
        else:
            results = []
            for item in response_content:
                query_result = json.loads(item.get(target_curve, "{}"))
                timestamp = int(item.get(TIMESTAMP_KEY, 0)) or None

                try:
                    value = query_result.get(VALUE_KEY)
                    uom = query_result.get(UOM_KEY)

                    if uom:
                        query_result = "{0:.2f} {1}".format(value, uom)
                    else:
                        query_result = "{0:.2f}".format(value)

                except Exception as e:
                    logging.error("{}: {} ({})".format(self.__class__.__name__,
                                                       e, type(e)))

                if timestamp:
                    time_diff = time.time() - (timestamp / 1000)

                if timestamp < 2:
                    response_age = f"{time_diff:.1f} second ago"
                else:
                    response_age = f"{time_diff:.1f} seconds ago"

                results.append(
                    f"{target_curve} was *{query_result}* {response_age}.")

            result = ITEM_PREFIX.join(results)

        return result
Ejemplo n.º 10
0
def start(settings, **kwargs):
    """
    Starts the LAS replayer, based on the process settings.

    Replays sensors and chat data.

    :param settings: Parameters for this `las_replayer` instance
    :type settings: dict

    :param state_manager: StateManager injected by `live-agent`
    :type state_manager: live_agent.services.StateManager

    :rtype: NoneType

    The settings for this process have the following format::

      {
        "type": "las_replay",
        "enabled": true,  # Self explanatory
        "index_mnemonic": "TIME",  # Curve used as index for the LAS data
        "path_list": [
          # A list of filename pairs containing the data to be replayed
          [<path for a LAS file>, <path for a CSV file containing the chat logs>],
          ...
        ]
        "output": {
          "event_type": "raw_wellX", The name of the event type which should be sent to Intelie Live
          "author": {
            "id": <user id>  # User id of the author for the messages
            "name": "Linguistics monitor"  # User name of the author for the messages
          },
          "room": {
            "id": <room id>  # Id of the room where the messages should be sent
          },
          "message_event": {
            # Information for generating markers on charts
            "event_type": "raw_wellX",  # Usually the raw event type of the asset being monitored
            "mnemonic": "MSG"  # Mnemonic used for messages normalization, usually named `MSG`
          }
        }
      }

    The LAS file *must* be SPACE delimited.

    The CSV file must contain at least 3 columns:

    - `MESSAGE`: The text of the message
    - `SOURCE`: The name of the message sender
    - A column with the same name a the `index_mnemonic` defined on the process settings,
    used for correlating messages with the events from the LAS file.

    """
    event_type = settings["output"]["event_type"]
    cooldown_time = settings.get("cooldown_time", 300)
    setproctitle('DDA: LAS replayer for "{}"'.format(event_type))

    state_manager = kwargs.get("state_manager")
    state = state_manager.load()
    iterations = state.get("iterations", 0)

    while True:
        try:
            success, las_data, chat_data, index_mnemonic = open_files(
                settings, iterations, mode=READ_MODES.CONTINUOUS)

            if success:
                generate_events(event_type, las_data, chat_data,
                                index_mnemonic, settings, state_manager)
                logging.info("Iteration {} successful".format(iterations))
            else:
                logging.warn("Could not open files")

            state_manager.save({
                "last_timestamp": 0,
                "iterations": iterations
            },
                               force=True)
            loop.await_next_cycle(
                cooldown_time,
                message="Sleeping for {:.1f} minutes between runs".format(
                    cooldown_time / 60.0),
                log_func=logging.info,
            )

        except KeyboardInterrupt:
            logging.info("Stopping after {} iterations".format(iterations))
            raise

        except Exception as e:
            logging.error(
                "Error processing events during iteration {}, {}<{}>".format(
                    iterations, e, type(e)))

        iterations += 1
        del las_data
        del chat_data

    return