Exemple #1
0
def run(args):
    def ip_netns_exec(namespace, command, **kwargs):
        ip_netns_cmd = f"ip netns exec {namespace} {command}"
        return utils.start_process(ip_netns_cmd, echo=True, **kwargs)
    server_dict = dict()
    server_dict["python"] = "./python/server.py"
    server_dict["c++"]    = "./cpp/bin/server"
    server_dict["ada"]    = "./ada/bin/server_ada"
    client_dict = dict()
    client_dict["python"] = "./python/client.py"
    client_dict["c++"]    = "./cpp/bin/client"
    client_dict["ada"]    = "./ada/bin/client_ada"
    sink_dict   = dict()
    sink_dict["python"]   = "./python/sink.py"
    sink_dict["c++"]      = "./cpp/bin/sink"
    sink_dict["ada"]      = "./ada/bin/sink_ada"
    network_ip = IPv4Addr(args.network)
    bc_ip     = IPv4Addr(args.network)
    bc_ip[-1] = 255
    bc_addr   = f"{bc_ip}:{args.broadcast_port}"
    sink_ip     = IPv4Addr(args.network)
    sink_ip[-1] = 2
    sink_addr   = f"{sink_ip}:{args.sink_port}"
    if_prefix = args.if_prefix
    sink_exe   = sink_dict[args.implementation]
    client_exe = client_dict[args.implementation]
    server_exe = server_dict[args.implementation]
    sink    =   ip_netns_exec(f"{if_prefix}vhost2",
                    f"{sink_exe} {args.sink_port}", stdout=None, stderr=None)
    clients = [ ip_netns_exec(f"{if_prefix}vhost{3+i_}",
                    f"{client_exe} {args.broadcast_port} {sink_addr} {i_}",
                    logfilename=f"client.{i_}.out" if args.logging else None)
                for i_ in range(args.num_clients) ]
    servers = [ ip_netns_exec(f"{if_prefix}vhost{3+i_}",
                    f"{server_exe} {bc_addr} {i_} -p {args.period}",
                    logfilename=f"server.{i_}.out" if args.logging else None)
                for i_ in range(args.num_servers) ]
    try:
        while all([utils.check_process(server) for server in servers]):
            if not all([utils.check_process(client) for client in clients]):
                break
    except KeyboardInterrupt as ki:
        print(f"\nKeyboardInterrupt Received")
    except Exception as e:
        print(f"! Error: Caught unexpected exception {e}")
    finally:
        print("Terminating subprocesses")
        server_exit_codes = [ utils.terminate_process(server) for server in servers ]
        client_exit_codes = [ utils.terminate_process(client) for client in clients ]
        sink_exit_code    = utils.terminate_process(sink)
    def find_wow(self):

        process_running = utils.check_process(
            self.game_process_name.edit.text())
        window = utils.get_window(self.window_name.edit.text())

        # check Wow is running
        if process_running and window:
            self.bot.set_wow_frame(window)
            self.log_viewer.emitter.emit("Wow window at" + str(self.bot.frame))
            self.fish_button.setEnabled(True)
            self.find_wow_button.setStyleSheet("background: green;")
        else:
            self.log_viewer.emitter.emit("Wow not found running")
            self.fish_button.setEnabled(False)
            self.find_wow_button.setStyleSheet("")
from django.db.models import Q
from prodsys.models import Task, Job
from schedconfig.models import Jobsactive4, Jobsarchived4

from utils import check_process, getRotatingFileHandler

logger = logging.getLogger('periodic_tasks_logger')
getRotatingFileHandler(logger, 'periodic_tasks.check_merging_dump_job_status.log')

today = datetime.datetime.today()
logger.info('Starting %s' % __file__)

pid = str(os.getpid())
logger.info('pid: %s' % pid)
if check_process(__file__, pid):
    logger.info('Another %s process is running, exiting' % __file__)
    sys.exit(0)

def main():
    logger.info('Getting tasks with status send, running and paused')
    tasks_list = Task.objects.all().filter(Q(status='send') | Q(status='running') | Q(status='paused'))
    #tasks_list = Task.objects.all().filter(name='dvcs2017P01t1_mu-_part2')
    logger.info('Got list of %s tasks' % len(tasks_list))
    
    for t in tasks_list:
        logger.info('Getting jobs with merging dumps status sent and running for task %s' % t.name)
        jobs_list = Job.objects.filter(task=t).filter(Q(status_merging_evntdmp='sent') | Q(status_merging_evntdmp='running')).values('panda_id_merging_evntdmp', 'status_merging_evntdmp').distinct()
        logger.info('Got list of %s jobs' % len(jobs_list))
        
        for j in jobs_list:
from django.db.models import Q
from prodsys.models import Task, Job

from utils import check_process, getRotatingFileHandler

logger = logging.getLogger('periodic_tasks_logger')
getRotatingFileHandler(logger, 'periodic_tasks.define_jobs_from_runs.log')

today = datetime.datetime.today()
logger.info('Starting %s' % __file__)

pid = str(os.getpid())
logger.info('pid: %s' % pid)
logger.info('__file__: %s' % __file__)

if check_process("define_jobs_from_runs.py", pid):
    logger.info('Another %s process is running, exiting' % __file__)
    sys.exit(0)

env.hosts = []
env.hosts.append(settings.COMPASS_HOST)
env.user = settings.COMPASS_USER
env.password = settings.COMPASS_PASS


def exec_remote_cmd(cmd):
    with hide('output', 'running', 'warnings'), sett(warn_only=True):
        return run(cmd)


def define_jobs_from_runs():
application = get_wsgi_application()

from django.db.models import Q
from prodsys.models import Task, Job

from utils import check_process, getRotatingFileHandler

logger = logging.getLogger('periodic_tasks_logger')
getRotatingFileHandler(logger, 'periodic_tasks.check_castor_mdst_status.log')

today = datetime.datetime.today()
logger.info('Starting %s' % __file__)

pid = str(os.getpid())
logger.info('pid: %s' % pid)
if check_process('check_castor_status_mdst.py', pid):
    logger.info('Another %s process is running, exiting' % __file__)
    sys.exit(0)

env.hosts = []
env.hosts.append(settings.COMPASS_HOST)
env.user = settings.COMPASS_USER
env.password = settings.COMPASS_PASS


def exec_remote_cmd(cmd):
    with hide('output', 'running', 'warnings'), sett(warn_only=True):
        return run(cmd)


def restart_transfer(logger, task, run_number, chunk_number):
Exemple #6
0
from django.db.models import Q
from prodsys.models import Task, Job
from schedconfig.models import Jobsactive4

from utils import check_process, getRotatingFileHandler

logger = logging.getLogger('periodic_tasks_logger')
getRotatingFileHandler(logger, 'periodic_tasks.delete_panda_log_files.log')

logger.info('Starting %s' % __file__)

pid = str(os.getpid())
logger.info('pid: %s' % pid)
logger.info('__file__: %s' % __file__)

if check_process("delete_panda_log_files.py", pid):
    logger.info('Another %s process is running, exiting' % __file__)
    sys.exit(0)

env.hosts = []
env.hosts.append(settings.COMPASS_HOST)
env.user = settings.COMPASS_USER
env.password = settings.COMPASS_PASS


def exec_remote_cmd(cmd):
    with hide('output', 'running', 'warnings'), sett(warn_only=True):
        return run(cmd)


def delete_panda_log_files():
Exemple #7
0
from django.db.models import Q
from prodsys.models import Task, Job

from utils import check_process, getRotatingFileHandler

logger = logging.getLogger('periodic_tasks_logger')
getRotatingFileHandler(logger, 'periodic_tasks.send_castor_jobs_dump.log')

today = datetime.datetime.today()
logger.info('Starting %s' % __file__)

pid = str(os.getpid())
logger.info('pid: %s' % pid)
logger.info('__file__: %s' % __file__)

if check_process("send_castor_jobs_dump.py", pid):
    logger.info('Another %s process is running, exiting' % __file__)
    sys.exit(0)

env.hosts = []
env.hosts.append(settings.COMPASS_HOST)
env.user = settings.COMPASS_USER
env.password = settings.COMPASS_PASS


def exec_remote_cmd(cmd):
    with hide('output', 'running', 'warnings'), sett(warn_only=True):
        return run(cmd)


def copy_to_castor():
from django.db.models import Q
from prodsys.models import Task, Job

from utils import check_process, getRotatingFileHandler

logger = logging.getLogger('periodic_tasks_logger')
getRotatingFileHandler(logger, 'periodic_tasks.send_castor_jobs_mdst.log')

today = datetime.datetime.today()
logger.info('Starting %s' % __file__)

pid = str(os.getpid())
logger.info('pid: %s' % pid)
logger.info('__file__: %s' % __file__)

if check_process("send_castor_jobs_mdst.py", pid):
    logger.info('Another %s process is running, exiting' % __file__)
    sys.exit(0)

env.hosts = []
env.hosts.append(settings.COMPASS_HOST)
env.user = settings.COMPASS_USER
env.password = settings.COMPASS_PASS

runs_to_send = 50


def exec_remote_cmd(cmd):
    with hide('output', 'running', 'warnings'), sett(warn_only=True):
        return run(cmd)
Exemple #9
0
from django.db.models import Q
from prodsys.models import Task, Job
from schedconfig.models import Jobsactive4

from utils import check_process, getRotatingFileHandler

logger = logging.getLogger('periodic_tasks_logger')
getRotatingFileHandler(logger, 'periodic_tasks.archive_logs.log')

logger.info('Starting %s' % __file__)

pid = str(os.getpid())
logger.info('pid: %s' % pid)
logger.info('__file__: %s' % __file__)

if check_process("archive_logs.py", pid):
    logger.info('Another %s process is running, exiting' % __file__)
    sys.exit(0)

env.hosts = []
env.hosts.append(settings.COMPASS_HOST)
env.user = settings.COMPASS_USER
env.password = settings.COMPASS_PASS


def exec_remote_cmd(cmd):
    with hide('output', 'running', 'warnings'), sett(warn_only=True):
        return run(cmd)


def archive_logs():
Exemple #10
0
def server_furnace(sock: socket.socket, number: int, datas: Datas,
                   order_queue: list, dbconn, specific_lock: threading.Lock,
                   end_queue: list, end_queue_lock: threading.Lock):
    """
    server's thread which connected with furnace                                                                                           \n
    socket(socket.socket) : 열처리로와 연결한 socket                                                                                        \n
    number(int) : 열처리로의 번호                                                                                                           \n
    datas(Datas class) : 열처리로의 상태, 해당 열처리로가 진행하고 있는 공정에 대한 정보를 담고 있는 instance                                   \n
    order_queue(list) : 해당 열처리로가 수행해야할 명령(공정시작, 공정수정, 공정중지)이 담긴 queue, 근데 list를 사용해서 구현한                   \n
              본 쓰레드에서는 queue에 담긴 명령을 하나씩 pop해서 수행한다.                                                                     \n
    dbconn(database connector) : 데이터베이스에 센서값을 저장하는데 사용                                                                       \n
    specific_lock(threading.lock) : datas[number - 1], order_queue[number - 1]에 접근할 때 critical section이 발생하는 것을 막기 위해 사용     \n
    end_queue(list) : 정상적으로 종료된 공정의 정보를 담을 queue, list를 사용하여 구현하였음                                                    \n
                    [process_id, mete, manu, inp, count, temp_list, heattime_list, staytime_list, gas]가 list의 element                     \n
                    temp_list, heattime_list, staytime_list 는 list                                                                         \n
    end_queue_lock(threading.lock) : end_queue에 접근할 때 critical section이 발생하는 것을 막기 위해 사용                                      \n

    큰 구조는 아래와 같다                                                                                                                     \n
    while:                                                                                                                                  \n
        \t공정시작신호를 받을 때까지 while:                                                                                                    \n
            \t\t~~~~~~                                                                                                                      \n
        \t공정이 종료될 때까지 while:                                                                                                         \n
            \t\t~~~~~~                                                                                                                       \n
    """
    dbcur = dbconn.cursor()
    process_id = ''
    is_running = False
    end_flag = False
    index = number - 1
    process_start_time = None

    str_number = '{:02d}'.format(int(number))
    sql = f"""UPDATE process SET output = 3 where output is Null and id like '{str_number}%'"""
    dbcur.execute(sql)
    dbconn.commit()

    while True:
        while datas.datas[index][
                'state'] == 'on':  #서버와 연결된 상태라면(on) 공정시작신호가 올 때까지 대기
            for elem in order_queue:
                if elem[1] == 'start':  #공정시작신호를 받은 경우
                    process_id, mete, manu, inp, count, temp_list, heattime_list, staytime_list, gas = utils.extract_detail_option(
                        elem)

                    if not utils.check_process(
                            dbcur, process_id
                    ):  #만에하나 현 공정과 동일한 id를 가진 공정이 감지된 경우, 그 공정에 대한 정보를 삭제하고 현 공정으로 덮어씀
                        sql = "UPDATE process SET material = %s, amount = %s, manufacture = %s, count = %s, temper1 = %s, temper2 = %s, temper3 = %s, temper4 = %s, temper5 = %s, temper6 = %s, temper7 = %s, temper8 = %s, temper9 = %s, temper10 = %s, heattime1 = %s, heattime2 = %s, heattime3 = %s, heattime4 = %s, heattime5 = %s, heattime6 = %s, heattime7 = %s, heattime8 = %s, heattime9 = %s, heattime10 = %s, staytime1 = %s, staytime2 = %s, staytime3 = %s, staytime4 = %s, staytime5 = %s, staytime6 = %s, staytime7 = %s, staytime8 = %s, staytime9 = %s, staytime10 = %s, gas = %s, starttime = %s, output = %s WHERE id = %s"
                        val = (mete, int(inp), manu, count, temp_list[0],
                               temp_list[1], temp_list[2], temp_list[3],
                               temp_list[4], temp_list[5], temp_list[6],
                               temp_list[7], temp_list[8], temp_list[9],
                               heattime_list[0], heattime_list[1],
                               heattime_list[2], heattime_list[3],
                               heattime_list[4], heattime_list[5],
                               heattime_list[6], heattime_list[7],
                               heattime_list[8], heattime_list[9],
                               staytime_list[0], staytime_list[1],
                               staytime_list[2], staytime_list[3],
                               staytime_list[4], staytime_list[5],
                               staytime_list[6], staytime_list[7],
                               staytime_list[8], staytime_list[9], gas, None,
                               None, process_id)
                        dbcur.execute(sql, val)
                        dbconn.commit()

                        sql = f"""DELETE from furnace{number} WHERE id = '{process_id}'"""
                        dbcur.execute(sql)
                        dbconn.commit()

                    else:
                        sql = "INSERT INTO process(id, material, amount, manufacture, count, temper1, temper2, temper3, temper4, temper5, temper6, temper7, temper8, temper9, temper10, heattime1, heattime2, heattime3, heattime4, heattime5, heattime6, heattime7, heattime8, heattime9, heattime10, staytime1, staytime2, staytime3, staytime4, staytime5, staytime6, staytime7, staytime8, staytime9, staytime10, gas) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
                        val = (process_id, mete, int(inp), manu, count,
                               temp_list[0], temp_list[1], temp_list[2],
                               temp_list[3], temp_list[4], temp_list[5],
                               temp_list[6], temp_list[7], temp_list[8],
                               temp_list[9], heattime_list[0],
                               heattime_list[1], heattime_list[2],
                               heattime_list[3], heattime_list[4],
                               heattime_list[5], heattime_list[6],
                               heattime_list[7], heattime_list[8],
                               heattime_list[9], staytime_list[0],
                               staytime_list[1], staytime_list[2],
                               staytime_list[3], staytime_list[4],
                               staytime_list[5], staytime_list[6],
                               staytime_list[7], staytime_list[8],
                               staytime_list[9], gas)
                        dbcur.execute(sql, val)
                        dbconn.commit()

                    send_pkt = packet_detail_setting(
                        count, elem[7:7 + count],
                        elem[7 + count:7 + (2 * count)],
                        elem[7 + (2 * count):7 + (3 * count)], gas)
                    sock.sendall(send_pkt)
                    process_start_time = sock.recv(
                        1024).decode()  # "%m/%d/%y %H:%M:%S" 형식을 가진 start time

                    sql = "UPDATE process SET starttime = %s WHERE id = %s"
                    val = (process_start_time, process_id)
                    dbcur.execute(sql, val)
                    dbconn.commit()

                    specific_lock.acquire()
                    datas.working_furnace_data(number, process_id,
                                               process_start_time)
                    order_queue.remove(elem)
                    specific_lock.release()
                    break
                else:  #공정시작 전 공정수정/공정중지 신호가 온 경우 이를 무시하기 위해 존재
                    print('[server] ignore msg in q : ' + str(elem[0]) +
                          " : " + elem[1])
                    specific_lock.acquire()
                    order_queue.remove(elem)
                    specific_lock.release()

        #공정이 시작된 이후 부분(공정수정/공정중지 메세지를 송신, 센서값 수신)
        while True:
            no_signal = True
            end_flag = False
            for elem in order_queue:
                if elem[1] == 'end':
                    end_flag = True
                    sock.sendall(b'end signal')
                    no_signal = False
                    specific_lock.acquire()
                    order_queue.clear()
                    specific_lock.release()
                    break
                elif elem[1] == 'fix':
                    sock.sendall(b'fix signal')
                    _ = sock.recv(
                        1024
                    )  #열처리로가 recv하기 전 바로 뒤에 sendall이 보낸 메세지와 fix signal이 합쳐지지 않게 하기 위해
                    process_id, mete, manu, inp, count, temp_list, heattime_list, staytime_list, gas = utils.extract_detail_option(
                        elem)

                    send_pkt = packet_detail_setting(
                        count, elem[7:7 + count],
                        elem[7 + count:7 + (2 * count)],
                        elem[7 + (2 * count):7 + (3 * count)], gas)
                    sock.sendall(send_pkt)

                    #데이터베이스에 존재하는 공정정보를 수정한 값으로 갱신
                    sql = "UPDATE process SET material = %s, amount = %s, manufacture = %s, count = %s, temper1 = %s, temper2 = %s, temper3 = %s, temper4 = %s, temper5 = %s, temper6 = %s, temper7 = %s, temper8 = %s, temper9 = %s, temper10 = %s, heattime1 = %s, heattime2 = %s, heattime3 = %s, heattime4 = %s, heattime5 = %s, heattime6 = %s, heattime7 = %s, heattime8 = %s, heattime9 = %s, heattime10 = %s, staytime1 = %s, staytime2 = %s, staytime3 = %s, staytime4 = %s, staytime5 = %s, staytime6 = %s, staytime7 = %s, staytime8 = %s, staytime9 = %s, staytime10 = %s, gas = %s WHERE id = %s"
                    val = (mete, int(inp), manu, count, temp_list[0],
                           temp_list[1], temp_list[2], temp_list[3],
                           temp_list[4], temp_list[5], temp_list[6],
                           temp_list[7], temp_list[8], temp_list[9],
                           heattime_list[0], heattime_list[1],
                           heattime_list[2], heattime_list[3],
                           heattime_list[4], heattime_list[5],
                           heattime_list[6], heattime_list[7],
                           heattime_list[8], heattime_list[9],
                           staytime_list[0], staytime_list[1],
                           staytime_list[2], staytime_list[3],
                           staytime_list[4], staytime_list[5],
                           staytime_list[6], staytime_list[7],
                           staytime_list[8], staytime_list[9], gas, process_id)

                    dbcur.execute(sql, val)
                    dbconn.commit()

                    no_signal = False
                specific_lock.acquire()
                order_queue.remove(elem)
                specific_lock.release()

            if no_signal:
                sock.sendall(b'no_signal')  #어떠한 명령도 입력되지 않음을 열처리로에 전송

            if end_flag:  #공정 중지버튼으로 인한 중단(비정상 종료)의 경우
                sql = "UPDATE process SET output = %s WHERE id = %s"
                val = (int(1), process_id)
                dbcur.execute(sql, val)
                dbconn.commit()

                specific_lock.acquire()
                datas.on_furnace_data(number)
                specific_lock.release()
                break

            pkt = sock.recv(1024)  #센서값 수신

            current_time, touch, temp1, temp2, temp3, temp4, temp5, temp6, flow, press, last = read_packet(
                pkt)

            #센서값을 데이터베이스에 저장
            sql = """INSERT INTO furnace%s(current, id, touch, temp1, temp2, temp3, temp4, temp5, temp6, flow, press) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
            val = (number, current_time, process_id, touch, temp1, temp2,
                   temp3, temp4, temp5, temp6, flow, press)
            dbcur.execute(sql, val)
            dbconn.commit()

            #모든 공정과정이 끝난 경우(정상종료)
            #중지신호를 열처리로에 전송하는 비정상종료와는 다르게, 열처리로 자체에서 종료 처리를 하므로 중지신호를 보낼 필요가 없다
            if last == 'True':
                end_queue_lock.acquire()
                end_queue.append([
                    process_id, mete, manu, inp, count, temp_list,
                    heattime_list, staytime_list, gas
                ])
                end_queue_lock.release()
                sql = "UPDATE process SET output = %s WHERE id = %s"
                val = (int(0), process_id)
                dbcur.execute(sql, val)
                dbconn.commit()

                specific_lock.acquire()
                datas.on_furnace_data(number)
                specific_lock.release()
                break

    dbconn.close()
    sock.close()
from django.db.models import Q
from prodsys.models import Task, Job
from schedconfig.models import Jobsactive4

from utils import check_process, getRotatingFileHandler

logger = logging.getLogger('periodic_tasks_logger')
getRotatingFileHandler(logger, 'periodic_tasks.prepare_files_on_castor.log')

logger.info('Starting %s' % __file__)

pid = str(os.getpid())
logger.info('pid: %s' % pid)
logger.info('__file__: %s' % __file__)

if check_process("prepare_files_on_castor.py", pid):
    logger.info('Another %s process is running, exiting' % __file__)
    sys.exit(0)

env.hosts = []
env.hosts.append(settings.COMPASS_HOST)
env.user = settings.COMPASS_USER
env.password = settings.COMPASS_PASS


def exec_remote_cmd(cmd):
    with hide('output', 'running', 'warnings'), sett(warn_only=True):
        return run(cmd)


def prepare_on_castor():
application = get_wsgi_application()

from django.db.models import Q
from prodsys.models import Task, Job

from utils import check_process, getRotatingFileHandler

logger = logging.getLogger('periodic_tasks_logger')
getRotatingFileHandler(logger, 'periodic_tasks.check_castor_dump_status.log')

today = datetime.datetime.today()
logger.info('Starting %s' % __file__)

pid = str(os.getpid())
logger.info('pid: %s' % pid)
if check_process('check_castor_status_dump.py', pid):
    logger.info('Another %s process is running, exiting' % __file__)
    sys.exit(0)

env.hosts = []
env.hosts.append(settings.COMPASS_HOST)
env.user = settings.COMPASS_USER
env.password = settings.COMPASS_PASS


def exec_remote_cmd(cmd):
    with hide('output', 'running', 'warnings'), sett(warn_only=True):
        return run(cmd)


def check_files_on_castor():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "compass.settings")
application = get_wsgi_application()

from django.db.models import Q
from prodsys.models import Task, Job

from utils import check_process, getRotatingFileHandler

logger = logging.getLogger('periodic_tasks_logger')
getRotatingFileHandler(logger, 'periodic_tasks.check_castor_logs_status.log')

logger.info('Starting %s' % __file__)

pid = str(os.getpid())
logger.info('pid: %s' % pid)
if check_process('check_castor_logs_status.py', pid):
    logger.info('Another %s process is running, exiting' % __file__)
    sys.exit(0)

env.hosts = []
env.hosts.append(settings.COMPASS_HOST)
env.user = settings.COMPASS_USER
env.password = settings.COMPASS_PASS


def exec_remote_cmd(cmd):
    with hide('output', 'running', 'warnings'), sett(warn_only=True):
        return run(cmd)


def check_files_on_castor():
from utils import check_process, getRotatingFileHandler

max_check_amount = 3000

logger = logging.getLogger('periodic_tasks_logger')
getRotatingFileHandler(logger, 'periodic_tasks.get_number_of_events.log')

today = datetime.datetime.today()
logger.info('Starting %s' % __file__)

pid = str(os.getpid())
logger.info('pid: %s' % pid)
logger.info('__file__: %s' % __file__)

if check_process("get_number_of_events.py", pid):
    logger.info('Another %s process is running, exiting' % __file__)
    sys.exit(0)

env.hosts = []
env.hosts.append(settings.COMPASS_HOST)
env.user = settings.COMPASS_USER
env.password = settings.COMPASS_PASS


def exec_remote_cmd(cmd):
    with hide('output', 'running', 'warnings'), sett(warn_only=True):
        return run(cmd)


def get_number_of_events():