Example #1
0
 def set_intvl(self, intvl):
     paras = util.load_conf('conf')
     if intvl >= 1:
         MonitorManager._intvl = intvl
         paras['intvl'] = intvl
         util.update_conf('conf', paras)
         self.restart()
def main():
    cfg = load_conf()

    if cfg["useSSL"]:
        create_ssl_cert([cfg["backendIP"]])

    if cfg["dbMode"] and cfg["sqlDatabase"] and cfg["sqlPassword"] and cfg[
            "sqlServerIP"] and cfg["sqlUsername"]:
        print("Using " + cfg["dbMode"] + " DB")

        try:
            conn = load_db_conn()[0]
            if cfg["dbMode"] == "mssql":
                init_mssql_db(conn)
            elif cfg["dbMode"] == "mysql":
                init_mysql_db(conn)
            else:
                print(
                    "Error! No valid db mode found. Please use mssql or mysql")
        except Exception as e:
            print(e)
    else:
        print("No db mode set.")

    check_existing_token()
    create_web_config()
    server()
def server():
    cfg = load_conf()
    api_token = check_existing_token()

    if cfg["useSSL"]:
        http_server = WSGIServer(
            (cfg["backendIP"], int(cfg["backendPort"])),
            app,
            certfile="ssl/cert.crt",
            keyfile="ssl/key.pem",
        )
        print(
            "Server started. Running on https://"
            + str(cfg["backendIP"])
            + ":"
            + str(cfg["backendPort"])
        )
    else:
        http_server = WSGIServer((cfg["backendIP"], int(cfg["backendPort"])), app)
        print(
            "Server started. Running on http://"
            + str(cfg["backendIP"])
            + ":"
            + str(cfg["backendPort"])
        )

    print("API Token: " + api_token)
    if cfg['parserIP']:
        print("Parser IP set to: " + str(cfg["parserIP"]) + ":" + str(cfg["parserPort"]))
    else:
        print("No parser IP set.")

    http_server.serve_forever()
Example #4
0
    def __init__(self,
               pidfile='/tmp/test-monitor.pid',
               stdin='/dev/stdin',
               stdout='/dev/stdout',
               stderr='/dev/stderr',
               intvl=10,
               logfile='/opt/monitor.log'):
        super(MonitorManager, self).__init__(pidfile=pidfile, stdin=stdin, stdout=stdout, stderr=stderr)

        paras = util.load_conf('conf')

        MonitorManager._logfile = logfile

        if not paras.has_key('intvl'):
            MonitorManager._intvl = intvl
            paras['intvl'] = intvl
        else:
            MonitorManager._intvl = int(paras['intvl'])

        if paras.has_key('pollsters'):
            tmp_list = eval(paras['pollsters'])
            for poll in tmp_list:
                p_name, cls = util.load_class(poll)
                if p_name and cls:
                    MonitorManager._pollsters[p_name] = cls()
        else:
            MonitorManager._pollsters = OrderedDict()

        util.update_conf('conf', paras)
Example #5
0
 def set_intvl(self, intvl):
     paras = util.load_conf('conf')
     if intvl >= 1:
         MonitorManager._intvl = intvl
         paras['intvl'] = intvl
         util.update_conf('conf', paras)
         self.restart()
Example #6
0
    def __init__(self,
                 pidfile='/tmp/test-monitor.pid',
                 stdin='/dev/stdin',
                 stdout='/dev/stdout',
                 stderr='/dev/stderr',
                 intvl=10,
                 logfile='/opt/monitor.log'):
        super(MonitorManager, self).__init__(pidfile=pidfile,
                                             stdin=stdin,
                                             stdout=stdout,
                                             stderr=stderr)

        paras = util.load_conf('conf')

        MonitorManager._logfile = logfile

        if not paras.has_key('intvl'):
            MonitorManager._intvl = intvl
            paras['intvl'] = intvl
        else:
            MonitorManager._intvl = int(paras['intvl'])

        if paras.has_key('pollsters'):
            tmp_list = eval(paras['pollsters'])
            for poll in tmp_list:
                p_name, cls = util.load_class(poll)
                if p_name and cls:
                    MonitorManager._pollsters[p_name] = cls()
        else:
            MonitorManager._pollsters = OrderedDict()

        util.update_conf('conf', paras)
Example #7
0
 def set_pollsters(self, poll_list):
     paras = util.load_conf('conf')
     MonitorManager._pollsters = OrderedDict()
     for poll in poll_list:
         p_name, cls = util.load_class(poll)
         if p_name and cls:
             MonitorManager._pollsters[p_name] = cls()
     if poll_list:
         paras['pollsters']='%s' %poll_list
         util.update_conf('conf', paras)
     self.restart()
Example #8
0
 def set_pollsters(self, poll_list):
     paras = util.load_conf('conf')
     MonitorManager._pollsters = OrderedDict()
     for poll in poll_list:
         p_name, cls = util.load_class(poll)
         if p_name and cls:
             MonitorManager._pollsters[p_name] = cls()
     if poll_list:
         paras['pollsters'] = '%s' % poll_list
         util.update_conf('conf', paras)
     self.restart()
def before_request():
    if request.endpoint in ('static', 'index'):
        return

    if not request.args:
        return "No token provided! Add &token= to URL", 401

    if api_token != request.args["token"]:
        return "Unauthorized", 401

    global cfg
    cfg = load_conf()
    if ((not cfg["dbMode"] or not cfg["sqlDatabase"] or not cfg["sqlPassword"]
         or not cfg["sqlServerIP"] or not cfg["sqlUsername"])
            and request.endpoint != 'updateConfig'
            and request.endpoint != 'getBackendConfig'):
        return "Settings incomplete!", 512
Example #10
0
def optimise():

    conf = load_conf('geojson/counties.geojson', 'Name')
    #conf = load_conf('geojson/us.geojson', 'NAME')
    #conf = load_conf('geojson/constituencies.geojson', 'pcon16nm')

    regions = conf['regions']
    neighbours = conf['neighbours']

    rows = 13
    cols = 16 
    #rows = 27 
    #cols = 45

    grid = random_grid(regions, rows, cols)
    adj_matrix = adjacency_matrix(neighbours, regions)


    old_score = best_score = eval_candidate_mod(grid, adj_matrix)

    # highest possible score.
    max_score = len(neighbours) 

    #t = 0.05
    #a = 0.9999999

    # ['{:.9f}'.format(100*math.exp(-x/1)) for x in range(1, 9)]
    # ['36.787944117', '13.533528324', '4.978706837', '1.831563889', '0.673794700', '0.247875218', '0.091188197', '0.033546263']
    t = 0.6

    # cooling schedule
    a = 0.999999999

    # ['{:.9f}'.format(100*math.exp(-x/0.3)) for x in range(1, 9)]
    # ['3.567399335', '0.127263380', '0.004539993', '0.000161960', '0.000005778', '0.000000206', '0.000000007', '0.000000000']
    min_temp = 0.3

    # stash best result for final output + restarts.
    best_grid = None
    restarts = 0
    restart_limit = 20 

    print_every = 100
    best_s = ''
    i = 0

    last_cell = rows*cols - 1 



    #while 1:
    for _ in range(0, 10000000): 
        # check if we should restart
        # https://en.wikipedia.org/wiki/Simulated_annealing#Restarts
        if best_score - old_score > restart_limit:
            # we have deviated too far from a good solution..
            grid = copy.deepcopy(best_grid)
            old_score = best_score
            restarts += 1
            # and also restart the cooling schedule?

        r1 = random_int(last_cell)
        r2 = random_int(last_cell)
        i1, j1 = r1 // cols, r1 % cols
        i2, j2 = r2 // cols, r2 % cols


        new_score = old_score

        v1 = grid[i1][j1]
        v2 = grid[i2][j2]
        new_score -= eval_position(grid, adj_matrix, i1, j1)
        new_score -= eval_position(grid, adj_matrix, i2, j2)

        # dont swap empty positions...    
        if v1 is None and v1 is None:
            continue

        i = i+1
        if t > min_temp: t = t*a

        grid[i1][j1] = v2
        grid[i2][j2] = v1
        new_score += eval_position(grid, adj_matrix, i1, j1)
        new_score += eval_position(grid, adj_matrix, i2, j2)

        if acceptance_probability(old_score, new_score, t) >= random.random():
            # accept the candidate move
            old_score = new_score
            if new_score > best_score:
                # new maxima found
                best_score = new_score
                best_s = res_to_string(grid, regions, 'white', 'on_blue', 'on_grey')
                best_grid = copy.deepcopy(grid)
            else:
                # exploring
                if i % print_every == 0:
                    subprocess.call('clear', shell=True)
                    print(best_s)
                    print(res_to_string(grid, regions, 'cyan', 'on_grey', 'on_grey'))
                    print('temperature = {:.6f} state restarts = {}'.format(t, restarts))
                    graph_data = [('best result', best_score), ('current result', new_score)]
                    for g in ascii_graph.Pyasciigraph(force_max_value=max_score).graph(data=graph_data):
                        print(g)
                
        else:
            # reject the candidate move
            grid[i1][j1] = v1
            grid[i2][j2] = v2
Example #11
0
import pickle
import os
import json
import math
import util

conf = util.load_conf()
data_folder_path = os.path.join(conf["input_folder"], '*.json')
print(data_folder_path)

filteredPlane = {}

# filter parameter
minDataCnt = 1
minTotalTime = util.min2ms(75)  # 60min
minSequenceDuration = util.min2ms(75)  # 30min
minSequenceCnt = 1
maxSequenceCnt = 1
maxSpeed = util.km2m(700) / util.hour2ms(1)
minSpeed = util.km2m(600) / util.hour2ms(1)


def filterPlanes(stat, filteredPlane):
    longFlightCnt = 0

    for key in stat:
        # if stat[key]
        #     print(stat[key])
        #     break
        if stat[key]['Count'] < minDataCnt: continue
        if stat[key]['TotalTime'] < minTotalTime: continue
def first():
    global cfg
    global api_token
    cfg = load_conf()
    api_token = check_existing_token()
Example #13
0
#!/usr/bin/env python

import logging, urllib
import datetime
from datetime import date, timedelta
import math

import mysql as Mysql
import domino_twilio as Twilio
import user as User
import team as Team
import email as Email
import notification as Notification
import util as Util

conf = Util.load_conf()

def frequent_alerts(since=7):
    '''
    Get the most frequent alerts
    '''
    alerts = Mysql.rawquery('''SELECT DISTINCT environment,colo,host,service, COUNT(*) AS count FROM alerts_history WHERE createDate BETWEEN DATE_SUB(CURDATE(),INTERVAL %s DAY) AND DATE_SUB(CURDATE(),INTERVAL -1 DAY) GROUP BY environment,colo,host,service  ORDER BY count DESC;''' % (since))
    for alert in alerts:
        for key in alert:
            if isinstance(alert[key], str):
                if (alert[key].startswith("'") and alert[key].endswith("'")) or (alert[key].startswith('"') and alert[key].endswith('"')): 
                    alert[key] = alert[key][1:-1]
    return alerts

def round_date(t,unit):
    '''
Example #14
0
# use pulp - a python abstraction layer for linear programming modeling.
import multiprocessing
import math
from pulp import *
from util import load_conf

#conf = load_conf('geojson/countries.geojson', 'ctry16nm')
conf = load_conf('../geojson/regions.geojson', 'rgn16nm')
#conf = load_conf('geojson/counties.geojson', 'Name')

regions = len(conf['regions'])
# (decision variables). N_0_2 = region 0 and 2 are neighbours.
neighbours = {
    k: LpVariable(cat=LpBinary, name='N_{}_{}'.format(*k))
    for k in conf['neighbours']
}

# maximise... (maybe later want to minimise or penalise non-assignments..
model = LpProblem('tiles', LpMaximize)
model += lpSum(neighbours.values())

# subject to...

# all possibl8e assignments for regions to cells matrix.
rows = cols = 10
keys = itertools.product(range(0, rows), range(0, cols), range(0, regions))
cell_assignments = {
    k: LpVariable(cat=LpBinary, name='ASS_{}_{}_{}'.format(*k))
    for k in keys
}
#for _, ass in cell_assignments.items():
Example #15
0
def train():
    conf = load_conf()

    no_replay = conf['noreplay']

    solver = conf['solver']
    n = conf['n']
    eps = conf['eps']

    savedir = conf['dirname']
    makedir(savedir)
    tmpdir = os.path.join(savedir, 'tmp')
    makedir(tmpdir)

    np.random.seed(conf['seed'])

    logfile = os.path.join(savedir, 'log')

    ave = 0
    aves = []
    ma = 0
    global_ma = 0

    channels = [10, 100, 500, n * (n - 1) // 2]
    if 'channels' in conf:
        channels = conf['channels']
        channels.append(n * (n - 1) // 2)

    bias = -np.log(1.0 / conf['p'] - 1)
    net = MLP(channels, bias)

    if conf['gpu'] != -1:
        chainer.cuda.get_device_from_id(conf['gpu']).use()
        net.to_gpu()

    if conf['opt'] == 'SGD':
        opt = chainer.optimizers.SGD(lr=conf['lr'])
    elif conf['opt'] == 'Adam':
        opt = chainer.optimizers.Adam(alpha=conf['lr'])
    opt.setup(net)

    stop = 0

    pool_size = 10
    start_training = 20
    r_bests = []
    edges_bests = []
    z_bests = []

    if no_replay:
        pool_size = 1
        start_training = 1e9

    iteration = 0
    from_restart = 0

    start_time = time.time()

    while True:
        iteration += 1
        from_restart += 1

        z = net.z(1)
        x = net(z)[0]
        edges_li, edges, lp = gen_edges(n, x, net.xp)
        r = calc_reward(n, edges, solver, tmpdir)

        entropy = F.mean(x * F.log(x + 1e-6) + (1 - x) * F.log(1 - x + 1e-6))

        if no_replay:
            loss = -r * lp

            net.cleargrads()
            loss.backward()
            opt.update()

        if r > ma:
            ma = r
            stop = 0
        else:
            stop += 1
        if r > global_ma:
            global_ma = r
            output_graph(os.path.join(savedir, 'output_{}.txt'.format(r)), n,
                         edges)
            output_distribution(
                os.path.join(savedir, 'distribution_{}.txt'.format(r)), n,
                x.data)
            chainer.serializers.save_npz(
                os.path.join(savedir, 'snapshot_at_reward_{}'.format(r)), net)

        elapsed = time.time() - start_time

        ave = ave * (1 - conf['eps']) + r * conf['eps']
        aves.append(ave)
        with open(logfile, 'a') as f:
            print(savedir,
                  iteration,
                  elapsed,
                  r,
                  len(edges),
                  entropy.data,
                  global_ma,
                  ma,
                  ave,
                  flush=True)
            print(iteration,
                  elapsed,
                  r,
                  len(edges),
                  entropy.data,
                  global_ma,
                  ma,
                  ave,
                  flush=True,
                  file=f)

        f = False
        for es in edges_bests:
            if (es == edges_li).all():
                f = True
        if not f:
            r_bests.append(r)
            edges_bests.append(edges_li)
            z_bests.append(z)

        while len(r_bests) > pool_size:
            mi = 0
            for j in range(len(r_bests)):
                if r_bests[j] < r_bests[mi]:
                    mi = j
            r_bests.pop(mi)
            edges_bests.pop(mi)
            z_bests.pop(mi)

        if from_restart >= start_training:
            ind = np.random.randint(len(r_bests))
            x = net(z_bests[ind])[0]
            lp = calc_lp(n, x, edges_bests[ind], net.xp)

            loss = -r_bests[ind] * lp

            net.cleargrads()
            loss.backward()
            opt.update()

        if stop >= conf['restart']:
            stop = 0
            ma = 0
            r_bests = []
            edges_bests = []
            z_bests = []
            from_restart = 0
            net = MLP(channels, bias)
            if conf['gpu'] != -1:
                chainer.cuda.get_device_from_id(conf['gpu']).use()
                net.to_gpu()
            if conf['opt'] == 'SGD':
                opt = chainer.optimizers.SGD(lr=conf['lr'])
            elif conf['opt'] == 'Adam':
                opt = chainer.optimizers.Adam(alpha=conf['lr'])
            opt.setup(net)
            continue

        if iteration % 100 == 0:
            plt.clf()
            plt.plot(range(len(aves)), aves)
            plt.savefig(os.path.join(savedir, 'graph.png'))
        if iteration % 1000 == 0:
            plt.savefig(os.path.join(savedir,
                                     'graph_{}.png'.format(iteration)))
            plt.savefig(os.path.join(savedir,
                                     'graph_{}.eps'.format(iteration)))
            chainer.serializers.save_npz(
                os.path.join(savedir, 'snapshot_{}'.format(iteration)), net)
            chainer.serializers.save_npz(
                os.path.join(savedir, 'opt_{}'.format(iteration)), opt)