Beispiel #1
0
def clean_tables():
    tables = Base.metadata.tables.keys()
    transaction.begin()
    for t in tables:
        if not t.startswith('alembic_'):
            DBSession.execute('truncate %s cascade' % t)
    session = DBSession()
    mark_changed(session)
    transaction.commit()
Beispiel #2
0
def transaction_test(request):
    """
    Test transactions
    """
    try:
        result = DBSession.execute("SELECT 1/0")
    except:
        request.tm.abort()
    result = DBSession.execute("SELECT 1")
    return "OK"
Beispiel #3
0
def default_application(default_user):
    from appenlight.models import DBSession
    from appenlight.models.application import Application

    transaction.begin()
    session = DBSession()
    application = Application(resource_id=1,
                              resource_name="testapp",
                              api_key="xxxx")
    session.add(application)
    default_user.resources.append(application)
    session.execute("SELECT nextval('resources_resource_id_seq')")
    transaction.commit()
    return application
Beispiel #4
0
def default_user():
    from appenlight.models import DBSession
    from appenlight.models.user import User
    from appenlight.models.auth_token import AuthToken

    transaction.begin()
    session = DBSession()
    user = User(id=1, user_name="testuser", status=1, email="*****@*****.**")
    session.add(user)
    token = AuthToken(token="1234")
    user.auth_tokens.append(token)
    session.execute("SELECT nextval('users_id_seq')")
    transaction.commit()
    return user
Beispiel #5
0
def detect_tables(table_prefix):
    found_tables = []
    db_tables_query = '''
    SELECT tablename FROM pg_tables WHERE tablename NOT LIKE 'pg_%' AND
    tablename NOT LIKE 'sql_%' ORDER BY tablename ASC;'''

    for table in DBSession.execute(db_tables_query).fetchall():
        tablename = table.tablename
        if tablename.startswith(table_prefix):
            t = sa.Table(tablename,
                         metadata,
                         autoload=True,
                         autoload_with=DBSession.bind.engine)
            found_tables.append(t)
    return found_tables
Beispiel #6
0
def system(request):
    current_time = datetime.utcnow().replace(
        second=0, microsecond=0) - timedelta(minutes=1)
    # global app counter
    processed_reports = request.registry.redis_conn.get(
        REDIS_KEYS["counters"]["reports_per_minute"].format(current_time))
    processed_reports = int(processed_reports) if processed_reports else 0
    processed_logs = request.registry.redis_conn.get(
        REDIS_KEYS["counters"]["logs_per_minute"].format(current_time))
    processed_logs = int(processed_logs) if processed_logs else 0
    processed_metrics = request.registry.redis_conn.get(
        REDIS_KEYS["counters"]["metrics_per_minute"].format(current_time))
    processed_metrics = int(processed_metrics) if processed_metrics else 0

    waiting_reports = 0
    waiting_logs = 0
    waiting_metrics = 0
    waiting_other = 0

    if "redis" in request.registry.settings["celery.broker_type"]:
        redis_client = redis.StrictRedis.from_url(
            request.registry.settings["celery.broker_url"])
        waiting_reports = redis_client.llen("reports")
        waiting_logs = redis_client.llen("logs")
        waiting_metrics = redis_client.llen("metrics")
        waiting_other = redis_client.llen("default")

    # process
    def replace_inf(val):
        return val if val != psutil.RLIM_INFINITY else "unlimited"

    p = psutil.Process()
    fd = p.rlimit(psutil.RLIMIT_NOFILE)
    memlock = p.rlimit(psutil.RLIMIT_MEMLOCK)
    self_info = {
        "fds": {
            "soft": replace_inf(fd[0]),
            "hard": replace_inf(fd[1])
        },
        "memlock": {
            "soft": replace_inf(memlock[0]),
            "hard": replace_inf(memlock[1])
        },
    }

    # disks
    disks = []
    for part in psutil.disk_partitions(all=False):
        if os.name == "nt":
            if "cdrom" in part.opts or part.fstype == "":
                continue
        usage = psutil.disk_usage(part.mountpoint)
        disks.append({
            "device": part.device,
            "total": bytes2human(usage.total),
            "used": bytes2human(usage.used),
            "free": bytes2human(usage.free),
            "percentage": int(usage.percent),
            "mountpoint": part.mountpoint,
            "fstype": part.fstype,
        })

    # memory
    memory_v = psutil.virtual_memory()
    memory_s = psutil.swap_memory()

    memory = {
        "total": bytes2human(memory_v.total),
        "available": bytes2human(memory_v.available),
        "percentage": memory_v.percent,
        "used": bytes2human(memory_v.used),
        "free": bytes2human(memory_v.free),
        "active": bytes2human(memory_v.active),
        "inactive": bytes2human(memory_v.inactive),
        "buffers": bytes2human(memory_v.buffers),
        "cached": bytes2human(memory_v.cached),
        "swap_total": bytes2human(memory_s.total),
        "swap_used": bytes2human(memory_s.used),
    }

    # load
    system_load = os.getloadavg()

    # processes
    min_mem = 1024 * 1024 * 40  # 40MB
    process_info = []
    for p in psutil.process_iter():
        mem_used = p.memory_info().rss
        if mem_used < min_mem:
            continue
        process_info.append({
            "owner": p.username(),
            "pid": p.pid,
            "cpu": round(p.cpu_percent(interval=0), 1),
            "mem_percentage": round(p.memory_percent(), 1),
            "mem_usage": bytes2human(mem_used),
            "name": p.name(),
            "command": " ".join(p.cmdline()),
        })
    process_info = sorted(process_info,
                          key=lambda x: x["mem_percentage"],
                          reverse=True)

    # pg tables

    db_size_query = """
    SELECT tablename, pg_total_relation_size(tablename::text) size
    FROM pg_tables WHERE tablename NOT LIKE 'pg_%' AND 
    tablename NOT LIKE 'sql_%' ORDER BY size DESC;"""

    db_tables = []
    for row in DBSession.execute(db_size_query):
        db_tables.append({
            "size_human": bytes2human(row.size),
            "table_name": row.tablename
        })

    # es indices
    es_indices = []
    result = Datastores.es.indices.stats(metric=["store, docs"])
    for ix, stats in result["indices"].items():
        size = stats["primaries"]["store"]["size_in_bytes"]
        es_indices.append({
            "name": ix,
            "size": size,
            "size_human": bytes2human(size)
        })

    # packages

    packages = ({
        "name": p.project_name,
        "version": p.version
    } for p in pkg_resources.working_set)

    return {
        "db_tables": db_tables,
        "es_indices": sorted(es_indices, key=lambda x: x["size"],
                             reverse=True),
        "process_info": process_info,
        "system_load": system_load,
        "disks": disks,
        "memory": memory,
        "packages": sorted(packages, key=lambda x: x["name"].lower()),
        "current_time": current_time,
        "queue_stats": {
            "processed_reports": processed_reports,
            "processed_logs": processed_logs,
            "processed_metrics": processed_metrics,
            "waiting_reports": waiting_reports,
            "waiting_logs": waiting_logs,
            "waiting_metrics": waiting_metrics,
            "waiting_other": waiting_other,
        },
        "self_info": self_info,
    }
Beispiel #7
0
def system(request):
    current_time = datetime.utcnow(). \
                       replace(second=0, microsecond=0) - timedelta(minutes=1)
    # global app counter
    processed_reports = request.registry.redis_conn.get(
        REDIS_KEYS['counters']['reports_per_minute'].format(current_time))
    processed_reports = int(processed_reports) if processed_reports else 0
    processed_logs = request.registry.redis_conn.get(
        REDIS_KEYS['counters']['logs_per_minute'].format(current_time))
    processed_logs = int(processed_logs) if processed_logs else 0
    processed_metrics = request.registry.redis_conn.get(
        REDIS_KEYS['counters']['metrics_per_minute'].format(current_time))
    processed_metrics = int(processed_metrics) if processed_metrics else 0

    waiting_reports = 0
    waiting_logs = 0
    waiting_metrics = 0
    waiting_other = 0

    if 'redis' in request.registry.settings['celery.broker_type']:
        redis_client = redis.StrictRedis.from_url(
            request.registry.settings['celery.broker_url'])
        waiting_reports = redis_client.llen('reports')
        waiting_logs = redis_client.llen('logs')
        waiting_metrics = redis_client.llen('metrics')
        waiting_other = redis_client.llen('default')

    # process
    def replace_inf(val):
        return val if val != psutil.RLIM_INFINITY else 'unlimited'

    p = psutil.Process()
    fd = p.rlimit(psutil.RLIMIT_NOFILE)
    memlock = p.rlimit(psutil.RLIMIT_MEMLOCK)
    self_info = {
        'fds': {
            'soft': replace_inf(fd[0]),
            'hard': replace_inf(fd[1])
        },
        'memlock': {
            'soft': replace_inf(memlock[0]),
            'hard': replace_inf(memlock[1])
        },
    }

    # disks
    disks = []
    for part in psutil.disk_partitions(all=False):
        if os.name == 'nt':
            if 'cdrom' in part.opts or part.fstype == '':
                continue
        usage = psutil.disk_usage(part.mountpoint)
        disks.append({
            'device': part.device,
            'total': bytes2human(usage.total),
            'used': bytes2human(usage.used),
            'free': bytes2human(usage.free),
            'percentage': int(usage.percent),
            'mountpoint': part.mountpoint,
            'fstype': part.fstype
        })

    # memory
    memory_v = psutil.virtual_memory()
    memory_s = psutil.swap_memory()

    memory = {
        'total': bytes2human(memory_v.total),
        'available': bytes2human(memory_v.available),
        'percentage': memory_v.percent,
        'used': bytes2human(memory_v.used),
        'free': bytes2human(memory_v.free),
        'active': bytes2human(memory_v.active),
        'inactive': bytes2human(memory_v.inactive),
        'buffers': bytes2human(memory_v.buffers),
        'cached': bytes2human(memory_v.cached),
        'swap_total': bytes2human(memory_s.total),
        'swap_used': bytes2human(memory_s.used)
    }

    # load
    system_load = os.getloadavg()

    # processes
    min_mem = 1024 * 1024 * 40  # 40MB
    process_info = []
    for p in psutil.process_iter():
        mem_used = p.get_memory_info().rss
        if mem_used < min_mem:
            continue
        process_info.append({
            'owner': p.username(),
            'pid': p.pid,
            'cpu': round(p.get_cpu_percent(interval=0), 1),
            'mem_percentage': round(p.get_memory_percent(), 1),
            'mem_usage': bytes2human(mem_used),
            'name': p.name(),
            'command': ' '.join(p.cmdline())
        })
    process_info = sorted(process_info,
                          key=lambda x: x['mem_percentage'],
                          reverse=True)

    # pg tables

    db_size_query = '''
    SELECT tablename, pg_total_relation_size(tablename::text) size
    FROM pg_tables WHERE tablename NOT LIKE 'pg_%' AND 
    tablename NOT LIKE 'sql_%' ORDER BY size DESC;'''

    db_tables = []
    for row in DBSession.execute(db_size_query):
        db_tables.append({
            "size_human": bytes2human(row.size),
            "table_name": row.tablename
        })

    # es indices
    es_indices = []
    result = Datastores.es.send_request('GET', ['_stats', 'store, docs'],
                                        query_params={})
    for ix, stats in result['indices'].items():
        size = stats['primaries']['store']['size_in_bytes']
        es_indices.append({
            'name': ix,
            'size': size,
            'size_human': bytes2human(size)
        })

    # packages

    packages = ({
        'name': p.project_name,
        'version': p.version
    } for p in pkg_resources.working_set)

    return {
        'db_tables': db_tables,
        'es_indices': sorted(es_indices, key=lambda x: x['size'],
                             reverse=True),
        'process_info': process_info,
        'system_load': system_load,
        'disks': disks,
        'memory': memory,
        'packages': sorted(packages, key=lambda x: x['name'].lower()),
        'current_time': current_time,
        'queue_stats': {
            'processed_reports': processed_reports,
            'processed_logs': processed_logs,
            'processed_metrics': processed_metrics,
            'waiting_reports': waiting_reports,
            'waiting_logs': waiting_logs,
            'waiting_metrics': waiting_metrics,
            'waiting_other': waiting_other
        },
        'self_info': self_info
    }
Beispiel #8
0
def get_partition_stats():
    table_query = """
        SELECT table_name
        FROM information_schema.tables
        GROUP BY table_name
        ORDER BY table_name
    """

    permanent_partitions = {}
    daily_partitions = {}

    def is_int(data):
        try:
            int(data)
            return True
        except Exception:
            pass
        return False

    def add_key(key, holder):
        if not ix_time in holder:
            holder[ix_time] = {"pg": [], "elasticsearch": []}

    for partition in list(Datastores.es.indices.get_alias("rcae*")):
        if not partition.startswith("rcae"):
            continue
        split_data = partition.split("_")
        permanent = False
        # if we dont have a day then treat it as permanent partion
        if False in list(map(is_int, split_data[-3:])):
            ix_time = datetime(year=int(split_data[-2]),
                               month=int(split_data[-1]),
                               day=1).date()
            permanent = True
        else:
            ix_time = datetime(
                year=int(split_data[-3]),
                month=int(split_data[-2]),
                day=int(split_data[-1]),
            ).date()

        ix_time = str(ix_time)
        if permanent:
            add_key(ix_time, permanent_partitions)
            if ix_time not in permanent_partitions:
                permanent_partitions[ix_time]["elasticsearch"] = []
            permanent_partitions[ix_time]["elasticsearch"].append(partition)
        else:
            add_key(ix_time, daily_partitions)
            if ix_time not in daily_partitions:
                daily_partitions[ix_time]["elasticsearch"] = []
            daily_partitions[ix_time]["elasticsearch"].append(partition)

    for row in DBSession.execute(table_query):
        splitted = row["table_name"].split("_")
        if "p" in splitted:
            # dealing with partition
            split_data = [int(x) for x in splitted[splitted.index("p") + 1:]]
            if len(split_data) == 3:
                ix_time = datetime(split_data[0], split_data[1],
                                   split_data[2]).date()
                ix_time = str(ix_time)
                add_key(ix_time, daily_partitions)
                daily_partitions[ix_time]["pg"].append(row["table_name"])
            else:
                ix_time = datetime(split_data[0], split_data[1], 1).date()
                ix_time = str(ix_time)
                add_key(ix_time, permanent_partitions)
                permanent_partitions[ix_time]["pg"].append(row["table_name"])

    return permanent_partitions, daily_partitions