__author__ = 'AbdullahS' import time import logging import os import psutil import pika import json from pprint import pprint, pformat # NOQA from hydra.lib import util from hydra.lib.hdaemon import HDaemonRepSrv l = util.createlogger('HPub', logging.INFO) class HDRmqpRepSrv(HDaemonRepSrv): def __init__(self, port, run_data, pub_metrics): self.run_data = run_data self.pub_metrics = pub_metrics self.init_pub_metrics() HDaemonRepSrv.__init__(self, port) self.register_fn('teststart', self.test_start) self.register_fn('getstats', self.get_stats) self.register_fn('teststatus', self.test_status) self.register_fn('updateconfig', self.update_config) def test_start(self): process = psutil.Process() self.run_data['start'] = True self.run_data['test_status'] = 'running' self.run_data['stats'] = {'net:start': json.dumps(psutil.net_io_counters()), 'cpu:start': json.dumps(process.cpu_times()),
from docopt import docopt from pprint import pprint, pformat # NOQA from hydra.lib import util, mmapi import os import sys import logging try: # Python 2.x from ConfigParser import ConfigParser except ImportError: # Python 3.x from configparser import ConfigParser l = util.createlogger('cli', logging.INFO) # l.setLevel(logging.DEBUG) def cli(argv): config = ConfigParser() config_file_name = 'hydra.ini' if len(argv) >= 2 and argv[1].find('.ini') != -1: config_file_name = argv[1] del argv[1] if not os.path.isfile(config_file_name): l.error("Unable to open config file %s" % config_file_name) sys.exit(1) config.read(config_file_name) mesos_addr = 'http://' + config.get('mesos', 'ip') + ':' + \
__author__ = 'AbdullahS, sushil' from pprint import pprint, pformat # NOQA import zmq import logging import json import traceback from hydra.lib import util from hydra.lib.utility.h_threading import HThreading from hydra.lib import hdaemon_pb2 l = util.createlogger('HDaemon', logging.INFO) # l.setLevel(logging.DEBUG) class HDaemonRepSrv(object): def __init__(self, port): l.info("initiated..., REP port[%s]", port) self.port = port self.data = {} # Dict calling class can use to store data, can be fetched later self.t_exceptions = [] self.h_threading = HThreading() self.cbfn = {} def thread_cb(self, t_exceptions): for exception in t_exceptions: self.t_exceptions.append(exception) l.error(exception) def run(self): l.info("spawning run thread...")
import sys from pprint import pprint, pformat # NOQA from optparse import OptionParser import logging from sets import Set from hydra.lib import util from hydra.lib.h_analyser import HAnalyser from hydra.lib.hydrabase import HydraBase try: # Python 2.x from ConfigParser import ConfigParser except ImportError: # Python 3.x from configparser import ConfigParser l = util.createlogger('runTest', logging.INFO) # l.setLevel(logging.DEBUG) tout_60s = 60000 tout_30s = 30000 tout_10s = 10000 class ZMQPubAnalyser(HAnalyser): def __init__(self, server_ip, server_port, task_id): HAnalyser.__init__(self, server_ip, server_port, task_id) class ZMQSubAnalyser(HAnalyser): def __init__(self, server_ip, server_port, task_id): HAnalyser.__init__(self, server_ip, server_port, task_id)
__author__ = 'AbdullahS' from pprint import pprint, pformat # NOQA from hydra.lib import util import time import sys import logging import os import random from hydra.lib.childmgr import ChildManager l = util.createlogger('MOCKAPI', logging.INFO) # l.setLevel(logging.DEBUG) class TaskInfo(object): """ TaskInfo class that mimics marathon TaskInfo object. Note: This should not be considered a copy of marathon TaskInfo. Any required attribtue will need to be added on need basis. """ def __init__(self): self.id = None self.ports = [] class AppInfo(object):
#!/usr/bin/env python import zmq import sys import logging import os from sys import path path.append("hydra/src/main/python") from hydra.lib import util from hydra.lib.hdaemon import HDaemonRepSrv l = util.createlogger('HWSrv', logging.INFO) class HDHelloWorldSub(HDaemonRepSrv): def __init__(self, port, stats): HDaemonRepSrv.__init__(self, port) self.stats = stats self.register_fn('getstats', self.get_stats) # Handler for 'getstas' signal. def get_stats(self): l.info("stats counter %s" % self.stats.counter) return 'ok', self.stats.counter class Stats(object): def __init__(self): self.counter = 0
import sys from pprint import pprint, pformat # NOQA from optparse import OptionParser import logging from hydra.lib import util from hydra.lib.h_analyser import HAnalyser from hydra.lib.hydrabase import HydraBase try: # Python 2.x from ConfigParser import ConfigParser except ImportError: # Python 3.x from configparser import ConfigParser l = util.createlogger('runTest', logging.INFO) # l.setLevel(logging.DEBUG) tout_60s = 60000 tout_30s = 30000 tout_10s = 10000 class RMQPubAnalyser(HAnalyser): def __init__(self, server_ip, server_port, task_id): HAnalyser.__init__(self, server_ip, server_port, task_id) class RMQSubAnalyser(HAnalyser): def __init__(self, server_ip, server_port): HAnalyser.__init__(self, server_ip, server_port)
__author__ = 'annyz' from pprint import pprint, pformat # NOQA import logging from hydra.lib import util from hydra.kafkatest.runtest import RunTestKAFKA import os l = util.createlogger('batchTest', logging.INFO) class RunBatchTest(object): def __init__(self, argv): l.info(" Starting Kafka Batch Size Test") pwd = os.getcwd() def options(): None setattr(options, 'test_duration', 10) setattr(options, 'msg_batch', 1000) setattr(options, 'msg_rate', 30000) setattr(options, 'config_file', pwd + '/hydra.ini') setattr(options, 'keep_running', False) setattr(options, 'acks', 1) setattr(options, 'linger_ms', 0) setattr(options, 'consumer_max_buffer_size', 0) first_test = True # Parameters client_set = [30, 60, 120, 240, 480, 960, 1920] msg_batch_set = [100, 200, 500, 1000, 2000, 5000]
from datetime import datetime, timedelta from optparse import OptionParser from pprint import pformat # NOQA from hydra.lib import util from hydra.lib.h_analyser import HAnalyser from hydra.lib.hydrabase import HydraBase from cassandra.cluster import Cluster try: # Python 2.x from ConfigParser import ConfigParser except ImportError: # Python 3.x from configparser import ConfigParser l = util.createlogger('hCassandra', logging.DEBUG) class RunTestCassandra(HydraBase): def __init__(self, options, runtest=True, mock=False): self.options = options self.config = ConfigParser() HydraBase.__init__(self, 'CassandraStressTest', self.options, self.config, startappserver=runtest, mock=mock, app_dirs=['src', 'hydra']) self.stress_client = '/stress-client' self.add_appid(self.stress_client)
__author__ = 'sushil' from pprint import pprint, pformat # NOQA import logging import os import sys from datetime import datetime from hydra.lib import util from hydra.zmqtest.runtest import RunTestZMQ from hydra.lib.boundary import Scanner from optparse import OptionParser l = util.createlogger('runSuitMaxRate', logging.INFO) # l.setLevel(logging.DEBUG) class RunSuitMaxRate(object): def __init__(self, options): l.info(" Starting Max Rate ....") pwd = os.getcwd() fname = 'zmqsuit.test.log' ofile = open(pwd + '/' + fname, 'w') ofile.truncate() ofile.write('Starting at :' + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '\n') # def options = lambda: None # NOQA # setattr(options, 'test_duration', 15) # setattr(options, 'msg_batch', 100) setattr(options, 'msg_rate', 10000) setattr(options, 'keep_running', False)
__author__ = 'sushil' import subprocess import signal import sys import time import atexit import psutil import logging from hydra.lib import util l = util.createlogger('cmgr', logging.INFO) class ChildManager(object): def __init__(self, sighandler=True): if sighandler: signal.signal(signal.SIGCHLD, self.sigchild) self.taskdone = False self.jobs = {} def sigchild(self, signum, frame): print("Received SIGCHILD parent will exit as well") for name in self.jobs: if 'fout' in self.jobs[name]: self.jobs[name]['fout'].close() self.jobs[name]['ferr'].close() self.taskdone = True sys.exit(0) def add_child(self, name, cmd, wdir=None, env=None):
__author__ = 'AbdullahS' from sys import path path.append("src/main/python") import unittest import logging import time import socket from pprint import pprint, pformat # NOQA from hydra.lib import util from hydra.lib.hydrabase import HydraBase from hydra.lib.utility.h_threading import HThreading l = util.createlogger('RuntestLocalTest', logging.INFO) class RuntestLocalTest(unittest.TestCase): """ Test class that attempts to unit test kraken functionality. Will have more things being added on as it matures. """ def setUp(self): l.info("LocalTest initialized") def test_ip_block(self): l.info("test ip block launched") self.TCP_IP = '127.0.0.1' self.TCP_PORT = 5005 self.BUFFER_SIZE = 5
__author__ = 'annyz' from pprint import pprint, pformat # NOQA import logging import os import sys from datetime import datetime from hydra.lib import util from hydra.kafkatest.runtest import RunTestKAFKA from hydra.lib.boundary import Scanner from optparse import OptionParser l = util.createlogger('runSuitMaxRate', logging.INFO) class RunSuitMaxRate(object): def __init__(self, options): l.info(" Starting Max Rate ....") pwd = os.getcwd() fname = 'kafkasuit.test.log' ofile = open(pwd + '/' + fname, 'w') ofile.truncate() ofile.write('Starting at :' + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '\n') # setattr(options, 'test_duration', 15) setattr(options, 'msg_batch', 100) setattr(options, 'msg_rate', 10000) setattr(options, 'keep_running', False) setattr(options, 'acks', 0) setattr(options, 'linger_ms', 0) setattr(options, 'consumer_max_buffer_size', 0)
import logging import os import psutil import json import time import subprocess import re import sys from datetime import datetime from apscheduler.schedulers.background import BackgroundScheduler from hydra.lib import util from hydra.lib.hdaemon import HDaemonRepSrv from pprint import pformat # NOQA l = util.createlogger('StressClient', logging.DEBUG) class HDCStressRepSrv(HDaemonRepSrv): """ HDaemon Cassandra Stress REP Server (to control and collect stats from stress client) """ def __init__(self, port, run_data, stress_metrics): self.run_data = run_data self.stress_metrics = stress_metrics self.init_stress_metrics() HDaemonRepSrv.__init__(self, port) # Register Functions self.register_fn('teststart', self.test_start) self.register_fn('getstats', self.get_stats) self.register_fn('teststatus', self.test_status)
__author__ = 'AbdullahS' from sys import path path.append("src/main/python") import unittest import logging import os from pprint import pprint, pformat # NOQA from hydra.lib import util from hydra.rmqtest.runtest import RunTestRMQ l = util.createlogger('RMQLocalTest', logging.INFO) class RMQLocalTest(unittest.TestCase): """ Test class that runs a full RabbitMQ PUB/SUB test locally. The pub and sub processes are goverened and launched by a mock backend hydra/src/main/python/hydra/lib/mock_backend.py The test is given an illusion that its running on hydra mesosphere infra. """ def setUp(self): l.info("RMQLocalTest initated") def test1(self): l.info("test1 launched") pwd = os.getcwd() l.info("CWD = " + pformat(pwd))
from datetime import datetime, timedelta from optparse import OptionParser from pprint import pformat # NOQA from hydra.lib import util from hydra.lib.h_analyser import HAnalyser from hydra.lib.hydrabase import HydraBase from cassandra.cluster import Cluster try: # Python 2.x from ConfigParser import ConfigParser except ImportError: # Python 3.x from configparser import ConfigParser l = util.createlogger('hCassandra', logging.DEBUG) class RunTestCassandra(HydraBase): def __init__(self, options, runtest=True, mock=False): self.options = options self.config = ConfigParser() HydraBase.__init__(self, 'CassandraStressTest', self.options, self.config, startappserver=runtest, mock=mock, app_dirs=['src', 'hydra']) self.stress_client = '/stress-client' self.add_appid(self.stress_client) if runtest: self.run_test() self.stop_appserver() def rerun_test(self, options):
import os import time import sys import code import traceback import signal import random from random import randint from pprint import pprint, pformat # NOQA from hydra.lib import appserver, mmapi, util, mock_backend from hydra.lib.boundary import BoundaryRunnerBase from hydra.lib.h_analyser import HAnalyser from hydra.lib import common from ConfigParser import ConfigParser l = util.createlogger('HydraBase', logging.INFO) from marathon.models import MarathonApp, MarathonConstraint # l.setLevel(logging.DEBUG) def debug(sig, frame): """Interrupt running process, and provide a python prompt for interactive debugging.""" d = {'_frame': frame} # Allow access to frame object. d.update(frame.f_globals) # Unless shadowed by global d.update(frame.f_locals) i = code.InteractiveConsole(d) message = "Signal received : entering python shell.\nTraceback:\n" message += ''.join(traceback.format_stack(frame)) i.interact(message)
__author__ = 'sushil, abdullahS' import zmq import logging import os import time import psutil import json from hydra.lib import util from hydra.lib.hdaemon import HDaemonRepSrv from hydra.lib.childmgr import ChildManager from pprint import pformat l = util.createlogger('HSub', logging.INFO) class HDZmqsRepSrv(HDaemonRepSrv): def __init__(self, port): self.msg_cnt = 0 # message count, other option is global, making progress self.recv_rate = 0 self.reconnect_cnt = 0 self.reconnect_rate = 0 HDaemonRepSrv.__init__(self, port) self.register_fn('getstats', self.get_stats) self.register_fn('resetstats', self.reset_stats) self.register_fn('updateconfig', self.update_config) self.reset_stats() def get_stats(self): process = psutil.Process() self.run_data['stats']['msg_cnt'] = self.msg_cnt
__author__ = 'AbdullahS, sushil' from pprint import pprint, pformat # NOQA import zmq import logging import json import traceback from hydra.lib import util from hydra.lib.utility.h_threading import HThreading from hydra.lib import hdaemon_pb2 l = util.createlogger('HDaemon', logging.INFO) # l.setLevel(logging.DEBUG) class HDaemonRepSrv(object): def __init__(self, port): l.info("initiated... REP port[%s]", port) self.port = port self.data = { } # Dict calling class can use to store data, can be fetched later self.t_exceptions = [] self.h_threading = HThreading() self.cbfn = {} def thread_cb(self, t_exceptions): for exception in t_exceptions: self.t_exceptions.append(exception) l.error(exception) def run(self):