Ejemplo n.º 1
0
    def post(self):
        """
        Add a new value and possibly a metric if needed.

        Expected JSON payload has the following key/value pairs::

          metric: string
          value: numeric
          time: integer or missing
        """
        data = request.get_json()
        logger.debug("Received POST /api/v1/data: {}".format(data))

        try:
            metric = data['metric']
            value = data['value']
        except KeyError:
            logger.warning("Missing JSON keys 'metric' or 'value'.")
            return "Missing required key. Required keys are:", 400

        time = data.get('time', None)

        db.add_metric(metric)
        new = db.insert_datapoint(metric, value, time)

        msg = "Added DataPoint to Metric {}\n".format(new.metric)
        logger.info("Added value %s to metric '%s'" % (value, metric))
        return msg, 201
Ejemplo n.º 2
0
def add_metric(name, units=None, lower_limit=None, upper_limit=None):
    """
    Add a new metric to the database.

    If both ``lower_limit`` and ``upper_limit`` are given, then
    ``upper_limit`` must be greater than ``lower_limit``.

    Parameters
    ----------
    name : str
        The full metric name. Eg. `tphweb.master.coverage`
    units : str, optional
        List the units for this metric.
    lower_limit: float, optional
        The lower limit for data. Data values below this limit will trigger
        email alerts.
    upper_limit: float, optional
        The upper limit for data. Data values above this limit will trigger
        email alerts.

    Returns
    -------
    metric : :class:`orm.Metric` object
        The metric that was added.

    Raises
    ------
    ValueError
        The provide limits do not satisfy ``upper_limit <= lower_limit``.
    TypeError
        The provided limits are not numeric or ``None``.
    """
    logger.debug("Querying metric '%s'" % name)

    _t = (int, float, type(None))
    if not isinstance(lower_limit, _t) or not isinstance(upper_limit, _t):
        msg = "Invalid type for limits. lower_limit: {}. upper_limit: {}"
        logger.error(msg.format(type(lower_limit), type(upper_limit)))
        raise TypeError(
            "upper_limit and lower_limit must be numerics or None.")

    if lower_limit is not None and upper_limit is not None:
        if upper_limit <= lower_limit:
            logger.error("upper_limit not greater than lower_limit.")
            raise ValueError("upper_limit must be greater than lower_limit")

    metric, created = Metric.get_or_create(
        name=name,
        units=units,
        lower_limit=lower_limit,
        upper_limit=upper_limit,
    )
    if created:
        logger.info("Metric '%s' created." % name)
    else:
        logger.debug("Found existing metric '%s'." % name)
    return metric
Ejemplo n.º 3
0
def create_celery():
    celery = Celery(__name__, autofinalize=False)

    # Pull config from file. This is basically the same as what is
    # done in app_factory.create_app()
    celery.config_from_object('trendlines.default_config')
    try:
        config_from_envvar(celery, CFG_VAR)
        logger.info("Loaded config file '%s'" % os.environ[CFG_VAR])
    except FileNotFoundError:
        msg = "Failed to load config file. The file %s='%s' was not found."
        logger.warning(msg % (CFG_VAR, os.environ[CFG_VAR]))
    except (RuntimeError, ImproperlyConfigured) as err:
        # Celery's error for missing env var is sufficient.
        logger.warning(str(err))
    except Exception as err:
        logger.warning("An unknown error occured while reading from the"
                       " config file. See debug stack trace for details.")
        logger.debug(format_exc())

    UDP_PORT = celery.conf['UDP_PORT']
    TCP_PORT = celery.conf['TCP_PORT']
    HOST = celery.conf['TARGET_HOST']
    URL = celery.conf['TRENDLINES_API_URL']
    celery.finalize()
    logger.debug("Celery has been finalized.")

    class TCPHandler(socketserver.BaseRequestHandler):
        def handle(self):
            data = self.request.recv(1024).strip()
            try:
                parsed = utils.parse_socket_data(data)
                logger.debug("TCP: {}".format(parsed))
            except ValueError:
                logger.warn("TCP: Failed to parse `%s`." % data)
                return

            try:
                r = requests.post(URL, json=parsed)
                logger.info(r.status_code)
            except Exception:
                raise
            self.request.sendall(b"accepted")

    @celery.task
    def listen_to_tcp():
        hp = (HOST, TCP_PORT)
        logger.info("listening for TCP on %s:%s" % hp)
        with socketserver.TCPServer(hp, TCPHandler) as server:
            server.serve_forever()

    # Start our tasks
    logger.debug("Starting tasks")
    #  listen_to_udp.delay()
    listen_to_tcp.delay()

    return celery
Ejemplo n.º 4
0
        def handle(self):
            data = self.request.recv(1024).strip()
            try:
                parsed = utils.parse_socket_data(data)
                logger.debug("TCP: {}".format(parsed))
            except ValueError:
                logger.warn("TCP: Failed to parse `%s`." % data)
                return

            try:
                r = requests.post(URL, json=parsed)
                logger.info(r.status_code)
            except Exception:
                raise
            self.request.sendall(b"accepted")
Ejemplo n.º 5
0
def create_db(name):
    """
    Create the database and the tables.

    Applies any missing migrations. Does nothing if all migrations
    have been applied.

    Parameters
    ----------
    name : str
        The name/path of the database, as given by ``app.config['DATABASE']``.
    """
    #  import pdb; pdb.set_trace()
    # Convert to a Path object because I like working with those better.
    full_path = Path(name).resolve()

    file_exists = full_path.exists()
    if file_exists:
        logger.debug("Connecting to existing database: '%s'." % full_path)
    else:
        logger.debug("Creating new database: '%s'" % full_path)

    db.init(str(full_path), pragmas=DB_OPTS)

    try:
        # This will create the file if it doesn't exist.
        db.connect()
    except OperationalError:
        # Try to figure out why OperationalError happened.
        if file_exists:
            msg = ("Database file %s exists, but we're unable to connect."
                   " Perhaps the permissions are incorrect?")
            logger.error(msg % full_path)
        else:
            msg = ("Unable to create %s. Perhaps the parent folder is missing"
                   " or permissions are incorrect?")
            logger.error(msg % full_path)
        logger.error("Unable to create/open database file '%s'" % full_path)
        raise

    # Either way, we want to run migrations. However, we only need to make a
    # backup if the file already exists.
    if file_exists:
        # Create a backup before doing anything.
        backup_file = utils.backup_file(full_path)
        logger.debug("Created database backup file: {}".format(backup_file))

    # This will edit the database file, creating the `migration_history`
    # table if needed. Hence why we do it *after* the backup.
    try:
        manager = DatabaseManager(db)
    except PermissionError:
        # When runnng in the docker container, this will attempt to create
        # a `/migrations` directory. It should be `/trendlines/migrations`.
        # If this still fails, let the error propogate but make sure to
        # close the db connection
        try:
            msg = "Failed to open default migration directory, trying '%s'"
            alt_dir = "/trendlines/migrations"
            logger.debug(msg % alt_dir)
            manager = DatabaseManager(db, directory=alt_dir)
            logger.debug("Success")
        except PermissionError:
            raise
        finally:
            db.close()

    # Check the status. Creating a new file means we'll need migrations.
    # However, we don't need to check for that because it's guaranteed
    # that a new file will have len(manager.diff) > 0
    needs_migrations = len(manager.diff) > 0

    if needs_migrations:
        logger.info("Missing migrations: {}".format(manager.diff))

        # Apply the migrations
        success = manager.upgrade()
        if success:
            logger.info("Successfully applied database migrations.")
        elif file_exists:
            # revert our changes by restoring the backup
            msg = ("Failed to apply database migrations. Reverting to backup"
                   " file. Please submit an issue at {} with details.")
            logger.critical(msg.format(__project_url__))
            shutil.copy(str(backup_file), str(full_path))
        else:
            # It's a new file, so no backup was made.
            msg = ("Failed to apply database migrations to the new file."
                   " Please see the logs for more info.")
            logger.critical(msg)
    else:
        logger.info("Database is up to date. No migrations to apply.")
        # Since we didn't make any changes, we can remove the backup file.
        # Is it possible to ever have backup_file not exist if we didn't
        # apply migrations?
        # No, because not applying migrations implies that the file already
        # existed, and if the file already existed then a backup was made.
        # Thus we don't need to check for FileNotFoundError.
        backup_file.unlink()
        logger.debug("Removed superfluous backup file: %s" % backup_file)

    # Make sure to close the database if things went well.
    db.close()
Ejemplo n.º 6
0
def create_app():
    """
    Primary application factory.
    """
    _logging.setup_logging(logger)

    logger.debug("Creating app.")
    app = Flask(__name__)
    app.config.from_object('trendlines.default_config')

    try:
        app.config.from_envvar(CFG_VAR)
        logger.info("Loaded config file '%s'" % os.environ[CFG_VAR])
    except FileNotFoundError:
        msg = "Failed to load config file. The file %s='%s' was not found."
        logger.warning(msg % (CFG_VAR, os.environ[CFG_VAR]))
    except RuntimeError as err:
        # Flask's error for missing env var is sufficient.
        logger.warning(str(err))
    except Exception as err:
        logger.warning("An unknown error occured while reading from the"
                       " config file. See debug stack trace for details.")
        logger.debug(format_exc())

    logger.debug("Registering blueprints.")
    app.register_blueprint(routes.pages)

    # Initialize flask-rest-api for OpenAPI (Swagger) documentation
    routes.api_class.init_app(app)
    routes.api_class.register_blueprint(routes.api)
    routes.api_class.register_blueprint(routes.api_datapoint)
    routes.api_class.register_blueprint(routes.api_metric)

    # Create the database file and populate initial tables if needed.
    orm.create_db(app.config['DATABASE'])

    # If I redesign the architecture a bit, then these could be moved so
    # that they only act on the `api` blueprint instead of the entire app.
    #
    # Also, note that this is safe for multiple simultaneous requests, since
    # the database object is thread local:
    #   "Peewee uses thread local storage to manage connection state, so
    #    this pattern can be used with multi-threaded WSGI servers."
    # http://docs.peewee-orm.com/en/latest/peewee/example.html#establishing-a-database-connection
    @app.before_request
    def before_request():
        """
        Attach the ORM to the flask ``g`` object before every request.

        Why not just use ``from orm import db`` most of the time? Well, it's
        because:
        (a) that's how I'm used to from SQLAlchemy
        (b) I may need ``db`` somewhere where I *can't* import ``orm``
        (c) because that's how the example PeeWee project is set up. /shrug.
            https://github.com/coleifer/peewee/blob/master/examples/twitter/app.py#L152
        """
        g.db = orm.db
        try:
            g.db.connect()
        except OperationalError:
            pass

    @app.after_request
    def after_request(response):
        g.db.close()
        return response

    return app
Ejemplo n.º 7
0
 def listen_to_tcp():
     hp = (HOST, TCP_PORT)
     logger.info("listening for TCP on %s:%s" % hp)
     with socketserver.TCPServer(hp, TCPHandler) as server:
         server.serve_forever()