示例#1
0
class SystemMonitor:
	handler = None
	pmon = None
	nhmon = None
	ui = None
	continue_loop = True
	
	def __init__(self):
		self.handler = AppHandler(self)
		self.pmon = ProcessMonitor()
		self.nhmon = NethogsMonitor(self.handler)
		self.choose_ui()
	
	def choose_ui(self):
		if config.type_ui == "text":
			self.ui = TextUI(self)
		elif config.type_ui == "curses":
			self.ui = CursesUI(self)
			
	def main_loop(self):
		while self.continue_loop:
			self.ui.start()
	
	def stop(self):
		self.ui.stop()
		self.nhmon.stop()
		self.continue_loop = False
	
	def update(self):
		self.pmon.update()
示例#2
0
    def __init__(self, bridge_ip, group_name, username='', password=''):
        self.bridge = None
        self.light_controller = None
        self._bridge_ip = bridge_ip
        self._group_name = group_name
        self.process_monitor = ProcessMonitor("vlc")
        self.vlc_tracker = VLCTracker(username, password)

        self._vlc_state_previous = None
        self._vlc_state_now = None
示例#3
0
class TestAeolusHA(unittest.TestCase):
    def setUp(self):
        self.qpidd = ProcessMonitor(['qpidd', '-p', '49000', '--auth', 'no'])
        time.sleep(1)
        self.cped = ProcessMonitor(['../src/cped', '-v', '-v', '-v'])
        self.manufacturer = manufacturer.Manufacturer('rhel6')
        time.sleep(2)

    def test_one_assembly(self):
        '''
        Start a deployable wth one assembly.
        Assertion: it is started and we can run acommand on it.
        '''
        self.assertTrue(self.qpidd.is_running())
        self.assertTrue(self.cped.is_running())

        d = deployable.Deployable('test')
        ai1 = self.manufacturer.assemble('rhel6-cpe-test', 2)
        d.assembly_add(ai1)
        d.start()
        (rc, out) = d.assemblies['rhel6-cpe-test-2'].rsh('hostname')
        self.assertEqual(rc, 0)
        self.assertEqual(out.strip(), 'rhel6-cpe-test-2')
        d.stop()

        self.assertTrue(self.qpidd.is_running())
        self.assertTrue(self.cped.is_running())

    def tearDown(self):
        #self.manufacturer.stop()
        self.cped.stop()
        self.qpidd.stop()
        pass
示例#4
0
class TestAeolusHA(unittest.TestCase):

    def setUp(self):
        self.qpidd = ProcessMonitor(['qpidd', '-p', '49000', '--auth', 'no'])
        time.sleep(1)
        self.cped = ProcessMonitor(['../src/cped', '-v', '-v', '-v'])
        self.manufacturer = manufacturer.Manufacturer('rhel6')
        time.sleep(2)

    def test_one_assembly(self):
        '''
        Start a deployable wth one assembly.
        Assertion: it is started and we can run acommand on it.
        '''
        self.assertTrue(self.qpidd.is_running())
        self.assertTrue(self.cped.is_running())

        d = deployable.Deployable('test')
        ai1 = self.manufacturer.assemble('rhel6-cpe-test', 2)
        d.assembly_add(ai1)
        d.start()
        (rc, out) = d.assemblies['rhel6-cpe-test-2'].rsh('hostname')
        self.assertEqual(rc, 0)
        self.assertEqual(out.strip(), 'rhel6-cpe-test-2')
        d.stop()

        self.assertTrue(self.qpidd.is_running())
        self.assertTrue(self.cped.is_running())

    def tearDown(self):
        #self.manufacturer.stop()
        self.cped.stop()
        self.qpidd.stop()
        pass
示例#5
0
def hack():
    only_startup = False
    dist = "rhel6"

    qpidd = ProcessMonitor(['qpidd', '-p', '49000', '--auth', 'no'])
    time.sleep(1)
    cped = ProcessMonitor(['../src/cped', '-v', '-v', '-v'])
    manu = manufacturer.Manufacturer(dist)
    time.sleep(2)

    print 'qpidd and cped running, moving on ...'
    d = deployable.Deployable('test')
    print 'assemling guest'
    ai1 = manu.assemble('%s-cpe-test' % dist, 2)
    print 'adding to dep'
    d.assembly_add(ai1)

    httpd = deployable.Service('httpd')
    d.service_add(httpd)

    print 'starting dep'
    d.start(only_startup)
    while (only_startup):
        time.sleep(1)

    ai1 = d.assemblies['%s-cpe-test-2' % dist]
    print "rsh'ing to assembly"
    ai1.rsh('hostname')

    time.sleep(90)

    d.stop()
    time.sleep(10)
    cped.stop()
    qpidd.stop()
    def run_sync(self):
        with ProcessMonitor("monitor_{}".format(self.args.log_filename),
                            "producer_stress_sync",
                            print_console=self.args.print_console):

            class EventHubProducerClientTest(EventHubProducerClient):
                def get_partition_ids(self_inner):
                    if self.args.partitions != 0:
                        return [str(i) for i in range(self.args.partitions)]
                    else:
                        return super(EventHubProducerClientTest,
                                     self_inner).get_partition_ids()

            method_name = self.args.method
            logger = get_logger(self.args.log_filename,
                                method_name,
                                level=logging.INFO,
                                print_console=self.args.print_console)
            test_method = globals()[method_name]
            self.running = True

            if self.args.parallel_send_cnt and self.args.parallel_send_cnt > 1:
                if self.args.parallel_create_new_client:
                    clients = [
                        self.create_client(EventHubProducerClientTest)
                        for _ in range(self.args.parallel_send_cnt)
                    ]
                else:
                    clients = [self.create_client(EventHubProducerClientTest)]
                self.run_test_method_parallel(test_method, clients, logger)
            else:
                client = self.create_client(EventHubProducerClientTest)
                self.run_test_method(test_method, client, logger)
示例#7
0
async def run(args):

    with ProcessMonitor("monitor_{}".format(args.log_filename), "consumer_stress_async", print_console=args.print_console):
        kwargs_dict = {
            "prefetch": args.link_credit,
            "partition_id": str(args.recv_partition_id) if args.recv_partition_id else None,
            "track_last_enqueued_event_properties": args.track_last_enqueued_event_properties,
            "starting_position": starting_position
        }
        if args.parallel_recv_cnt and args.parallel_recv_cnt > 1:
            clients = [create_client(args) for _ in range(args.parallel_recv_cnt)]
            tasks = [
                asyncio.ensure_future(
                    clients[i].receive(
                        on_event_received,
                        **kwargs_dict
                    )
                ) for i in range(args.parallel_recv_cnt)
            ]
        else:
            clients = [create_client(args)]
            tasks = [asyncio.ensure_future(
                clients[0].receive(
                    on_event_received,
                    prefetch=args.link_credit,
                )
            )]

        await asyncio.sleep(args.duration)
        await asyncio.gather(*[clients[i].close() for i in range(args.parallel_recv_cnt)])
        await asyncio.gather(*tasks)
示例#8
0
 def __init__(self, settings):
     # Initialise the parent class
     self.settings = settings
     self.conf = None
     self.tmp_folder = os.path.join(tempfile.gettempdir(), 'shadowsocks')
     self.ss_cmd = "/system/bin/ss-server -c /etc/shadowsocks/config.json"
     self.settings['APP'] = 'ss_server'
     self.settings['CMD'] = self.ss_cmd.split()
     self.settings['PIDFILE'] = self.tmp_folder + '/ss_server.pid'
     self.settings['LOG'] = self.tmp_folder + '/ss_server.log'
     self.settings['TMP'] = self.tmp_folder + '/ss_server.tmp'
     self.ssMgr = ProcessMonitor(settings)
     self.__do_init()
     logger = logging.getLogger("ss-server")
     logger.setLevel(logging.DEBUG)
     formatter = logging.Formatter(
         '%(asctime)s %(levelname)-8s %(message)s', '%Y-%m-%d %H:%M:%S')
     file_handler = logging.FileHandler(self.settings['LOG'])
     file_handler.setFormatter(formatter)
     logger.addHandler(file_handler)
     self.portInfo = {}
示例#9
0
def hack():
    only_startup = False
    dist = "rhel6"

    qpidd = ProcessMonitor(['qpidd', '-p', '49000', '--auth', 'no'])
    time.sleep(1)
    cped = ProcessMonitor(['../src/cped', '-v', '-v', '-v'])
    manu = manufacturer.Manufacturer(dist)
    time.sleep(2)

    print 'qpidd and cped running, moving on ...'
    d = deployable.Deployable('test')
    print 'assemling guest'
    ai1 = manu.assemble('%s-cpe-test' % dist, 2)
    print 'adding to dep'
    d.assembly_add(ai1)

    httpd = deployable.Service('httpd')
    d.service_add(httpd)

    print 'starting dep'
    d.start(only_startup)
    while (only_startup):
        time.sleep(1)

    ai1 = d.assemblies['%s-cpe-test-2' % dist]
    print "rsh'ing to assembly"
    ai1.rsh('hostname')

    time.sleep(90)

    d.stop()
    time.sleep(10)
    cped.stop()
    qpidd.stop()
def run(args):
    with ProcessMonitor("monitor_{}".format(args.log_filename),
                        "consumer_stress_sync",
                        print_console=args.print_console):
        kwargs_dict = {
            "prefetch":
            args.link_credit,
            "partition_id":
            str(args.recv_partition_id) if args.recv_partition_id else None,
            "track_last_enqueued_event_properties":
            args.track_last_enqueued_event_properties,
            "starting_position":
            starting_position
        }
        if args.max_batch_size:
            kwargs_dict["max_batch_size"] = args.max_batch_size
        if args.max_wait_time:
            kwargs_dict["max_wait_time"] = args.max_wait_time
        if args.parallel_recv_cnt and args.parallel_recv_cnt > 1:
            clients = [
                create_client(args) for _ in range(args.parallel_recv_cnt)
            ]
            threads = [
                threading.Thread(
                    target=clients[i].receive_batch
                    if args.max_batch_size else clients[i].receive,
                    args=(on_event_batch_received
                          if args.max_batch_size else on_event_received, ),
                    kwargs=kwargs_dict,
                    daemon=True) for i in range(args.parallel_recv_cnt)
            ]
        else:
            clients = [create_client(args)]
            threads = [
                threading.Thread(
                    target=clients[0].receive_batch
                    if args.max_batch_size else clients[0].receive,
                    args=(on_event_batch_received
                          if args.max_batch_size else on_event_received, ),
                    kwargs=kwargs_dict,
                    daemon=True)
            ]

        for thread in threads:
            thread.start()
        time.sleep(args.duration)
        for client in clients:
            client.close()
        for thread in threads:
            thread.join()
async def async_send(client, args):
    azure_monitor_metric = AzureMonitorMetric("Async ServiceBus Sender")
    process_monitor = ProcessMonitor("monitor_sender_stress_async.log",
                                     "sender_stress_async")
    stress_test = StressTestRunnerAsync(
        senders=[client.get_queue_sender(QUEUE_NAME)],
        receivers=[],
        message_size=args.message_size,
        send_batch_size=args.send_batch_size,
        duration=timedelta(seconds=args.duration),
        azure_monitor_metric=azure_monitor_metric,
        process_monitor=process_monitor,
        fail_on_exception=False)
    await stress_test.run_async()
    def __init__(
        self,
        senders,
        receivers,
        duration=timedelta(minutes=15),
        receive_type=ReceiveType.push,
        send_batch_size=None,
        message_size=10,
        max_wait_time=10,
        send_delay=0.01,
        receive_delay=0,
        should_complete_messages=True,
        max_message_count=1,
        send_session_id=None,
        fail_on_exception=True,
        azure_monitor_metric=None,
        process_monitor=None,
    ):
        self.senders = senders
        self.receivers = receivers
        self.duration = duration
        self.receive_type = receive_type
        self.message_size = message_size
        self.send_batch_size = send_batch_size
        self.max_wait_time = max_wait_time
        self.send_delay = send_delay
        self.receive_delay = receive_delay
        self.should_complete_messages = should_complete_messages
        self.max_message_count = max_message_count
        self.fail_on_exception = fail_on_exception
        self.send_session_id = send_session_id
        self.azure_monitor_metric = azure_monitor_metric or AbstractMonitorMetric(
            "fake_test_name")
        self.process_monitor = process_monitor or ProcessMonitor(
            "monitor_{}".format(LOGFILE_NAME),
            "test_stress_queues",
            print_console=PRINT_CONSOLE,
        )

        # Because of pickle we need to create a state object and not just pass around ourselves.
        # If we ever require multiple runs of this one after another, just make Run() reset this.
        self._state = StressTestRunnerState()

        self._duration_override = None
        for arg in sys.argv:
            if arg.startswith("--stress_test_duration_seconds="):
                self._duration_override = timedelta(
                    seconds=int(arg.split("=")[1]))

        self._should_stop = False
async def async_receive(client, args):
    azure_monitor_metric = AzureMonitorMetric("Async ServiceBus Receiver")
    process_monitor = ProcessMonitor("monitor_receiver_stress_async.log",
                                     "receiver_stress_async")
    stress_test = StressTestRunnerAsync(
        senders=[],
        receivers=[client.get_queue_receiver(QUEUE_NAME)],
        max_message_count=args.max_message_count,
        receive_type=args.receive_type,
        max_wait_time=args.max_wait_time,
        duration=timedelta(seconds=args.duration),
        azure_monitor_metric=azure_monitor_metric,
        process_monitor=process_monitor,
        fail_on_exception=False)
    await stress_test.run_async()
示例#14
0
 def __init__(self, settings):
     # Initialise the parent class
     self.settings = settings
     self.conf = None
     self.tmp_folder = os.path.join(tempfile.gettempdir(), 'openvpn')
     self.openvpn_cmd = "/system/bin/openvpn --dev-node /dev/tun --config /etc/openvpn/openvpn_server.conf --tmp-dir %s" % self.tmp_folder
     self.settings['APP'] = 'openvpn_server'
     self.settings['CMD'] = self.openvpn_cmd.split()
     self.settings['PIDFILE'] = self.tmp_folder + '/openvpn_server.pid'
     self.settings['LOG'] = self.tmp_folder + '/openvpn_server.log'
     self.settings['TMP'] = self.tmp_folder + '/openvpn_server.tmp'
     self.openvpnMgr = ProcessMonitor(settings)
     self.__do_init()
     logger = logging.getLogger("openvpn")
     logger.setLevel(logging.DEBUG)
     formatter = logging.Formatter(
         '%(asctime)s %(levelname)-8s %(message)s', '%Y-%m-%d %H:%M:%S')
     file_handler = logging.FileHandler(self.settings['LOG'])
     file_handler.setFormatter(formatter)
     logger.addHandler(file_handler)
     #logger.basicConfig(level=logger.DEBUG,
     #                format='%(asctime)s %(levelname)-8s %(message)s',
     #                filename=self.settings['LOG'],datefmt='%Y-%m-%d %H:%M:%S', filemode='a+')
     self.portInfo = {}
示例#15
0
 def setUp(self):
     self.qpidd = ProcessMonitor(['qpidd', '-p', '49000', '--auth', 'no'])
     time.sleep(1)
     self.cped = ProcessMonitor(['../src/cped', '-v', '-v', '-v'])
     self.manufacturer = manufacturer.Manufacturer('rhel6')
     time.sleep(2)
    def _handle_process_on_error(self, pm: ProcessMonitor):
        config = self._configuration[pm.name]

        if 'on_error' in config:
            if config['on_error'] == 'restart':
                pm.restart()
示例#17
0
class SsManager():
    def __init__(self, settings):
        # Initialise the parent class
        self.settings = settings
        self.conf = None
        self.tmp_folder = os.path.join(tempfile.gettempdir(), 'shadowsocks')
        self.ss_cmd = "/system/bin/ss-server -c /etc/shadowsocks/config.json"
        self.settings['APP'] = 'ss_server'
        self.settings['CMD'] = self.ss_cmd.split()
        self.settings['PIDFILE'] = self.tmp_folder + '/ss_server.pid'
        self.settings['LOG'] = self.tmp_folder + '/ss_server.log'
        self.settings['TMP'] = self.tmp_folder + '/ss_server.tmp'
        self.ssMgr = ProcessMonitor(settings)
        self.__do_init()
        logger = logging.getLogger("ss-server")
        logger.setLevel(logging.DEBUG)
        formatter = logging.Formatter(
            '%(asctime)s %(levelname)-8s %(message)s', '%Y-%m-%d %H:%M:%S')
        file_handler = logging.FileHandler(self.settings['LOG'])
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)
        self.portInfo = {}

    def start(self):
        logger.info('...shadowsocks start')
        self.ret, self.portInfo = add_socks_port()
        if self.ret is not True:
            logger.info('Added upnp port failed')
            self.portInfo = setup_relay_info()

        self.ret, self.conf = generate_client_conf(self.portInfo)
        if self.ret is not True:
            return self.ret[0]
        logger.info('client conf path: %s' % self.get_client_conf())

        if self.ssMgr.start() is True:
            logger.info('...shadowsocks start success')

        return 0

    def stop(self):
        logger.info('...shadowsocks stop')
        self.ret = self.ssMgr.stop()
        if self.portInfo.has_key('exPort'):
            del_socks_port(self.portInfo)
        if self.ret is not True:
            return self.ret[0]
        logger.info('...shadowsocks stop success')

    def check_state(self):
        if self.ssMgr.status() is None:
            return False
        else:
            return True

    def restart(self):
        self.ssMgr.restart()

    def __init_folder(self):
        if not os.path.exists(self.tmp_folder):
            os.makedirs(self.tmp_folder)

    def __is_reboot(self):
        if (os.path.exists(self.tmp_folder)):
            return False
        else:
            os.mkdir(self.tmp_folder)
            return True

    def __do_init(self):
        if self.__is_reboot() is True:
            print '...shadowsocks do init'
            self.__init_folder()

    def get_client_conf(self):
        cert_list = check_ss_conf()
        if self.conf is None:
            return None
        if self.conf in cert_list:
            cert_path = get_conf_dir() + '/' + self.conf
            return cert_path
        else:
            return None
示例#18
0
文件: command.py 项目: mportiz08/psio
 def __init__(self, args):
     self.monitor = ProcessMonitor()
示例#19
0
def run_process_monitor(process_monitor: ProcessMonitor, options):
    logger = logging.getLogger(__name__)
    logger.info('Start the process runner..')
    process_monitor.run(time_to_stop=options.collect_time_in_second)
    pass
示例#20
0
        time.sleep(total_monitor_time / 10)
        complete_part += 10
        logger.info(f'{complete_part}% Done')


if __name__ == '__main__':
    setup_logging('INFO')
    data_time_str = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    args = config_argument_parser()
    logger = logging.getLogger(__name__)
    locust_runner = LocustRunner(args.target_server_address, AccessLogUser)
    can_draw_process = False
    try:
        process_monitor = ProcessMonitor(
            pid=args.process_pid,
            name=args.process_name,
            csv_file=get_default_process_monitor_csv_prefix(
                args.process_pid, args.process_name, args.total_user))
        process_monitor_task = threading.Thread(target=run_process_monitor,
                                                args=(process_monitor, args))
        process_monitor_task.start()
        can_draw_process = True
    except:
        logger.warning('Cannot find target process')
        logger.warning('Will not run process monitor')
        process_monitor_task = None
        pass
    locust_task = threading.Thread(target=run_locust,
                                   args=(locust_runner, args))
    logger.info(
        f"Bring yourself a cup of coffee, this should done in {args.collect_time_in_second / 60} mins"
示例#21
0
	def __init__(self):
		self.handler = AppHandler(self)
		self.pmon = ProcessMonitor()
		self.nhmon = NethogsMonitor(self.handler)
		self.choose_ui()
 def _start_process_from_config(self, process_name: str, parameters):
     return ProcessMonitor(self._queue, process_name, parameters['command'],
                           parameters['cwd'])
示例#23
0
class OpenvpnManager():
    def __init__(self, settings):
        # Initialise the parent class
        self.settings = settings
        self.conf = None
        self.tmp_folder = os.path.join(tempfile.gettempdir(), 'openvpn')
        self.openvpn_cmd = "/system/bin/openvpn --dev-node /dev/tun --config /etc/openvpn/openvpn_server.conf --tmp-dir %s" % self.tmp_folder
        self.settings['APP'] = 'openvpn_server'
        self.settings['CMD'] = self.openvpn_cmd.split()
        self.settings['PIDFILE'] = self.tmp_folder + '/openvpn_server.pid'
        self.settings['LOG'] = self.tmp_folder + '/openvpn_server.log'
        self.settings['TMP'] = self.tmp_folder + '/openvpn_server.tmp'
        self.openvpnMgr = ProcessMonitor(settings)
        self.__do_init()
        logger = logging.getLogger("openvpn")
        logger.setLevel(logging.DEBUG)
        formatter = logging.Formatter(
            '%(asctime)s %(levelname)-8s %(message)s', '%Y-%m-%d %H:%M:%S')
        file_handler = logging.FileHandler(self.settings['LOG'])
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)
        #logger.basicConfig(level=logger.DEBUG,
        #                format='%(asctime)s %(levelname)-8s %(message)s',
        #                filename=self.settings['LOG'],datefmt='%Y-%m-%d %H:%M:%S', filemode='a+')
        self.portInfo = {}
        #self.stop()

    def start(self):
        logger.info('...openvpn start')
        self.ret = cert_init()
        if self.ret is not True:
            return self.ret[0]
        self.ret, self.portInfo = add_openvpn_port()
        if self.ret is not True:
            logger.info('Added upnp port failed')
            self.portInfo = setup_relay_info()

        self.ret, self.conf = generate_client_conf(self.portInfo)
        if self.ret is not True:
            return self.ret[0]
        logger.info('client conf path: %s' % self.get_client_conf())

        self.ret = self.openvpnMgr.start()

        if self.ret is not True:
            return self.ret[0]

        #if self.portInfo['ip']=='relay_ip':
        #    self.ret=lunch_ecs()

        logger.info('...openvpn start success')
        return 0

    def stop(self):
        logger.info('...openvpn stop')
        self.ret = self.openvpnMgr.stop()
        if self.portInfo.has_key('exPort'):
            del_openvpn_port(self.portInfo)
        if self.ret is not True:
            return self.ret[0]
        logger.info('...openvpn stop success')
        return 0

    def restart(self):
        self.openvpnMgr.restart()

    def check_state(self):
        if self.openvpnMgr.status() is None:
            return False
        else:
            return True

    def __insert_tunko(self):
        cmd = 'insmod /system/lib/tun.ko'
        ret = os.system(cmd)

    def __init_folder(self):
        if not os.path.exists(self.tmp_folder):
            os.makedirs(self.tmp_folder)
        os.system('chmod -R 755 /etc/openvpn')  #generate crt in the folder

    def __iptable_setup(self):
        ret = 0
        cmd = 'echo "1" > /proc/sys/net/ipv4/ip_forward'
        ret |= os.system(cmd)
        cmd = 'iptables -A INPUT -i eth0 -p tcp --dport 1194 -j ACCEPT'
        ret |= os.system(cmd)
        # Allow TUN interface connections to OpenVPN server
        cmd = 'iptables -A INPUT -i tun+ -j ACCEPT'
        ret |= os.system(cmd)
        # Allow TUN interface connections to be forwarded through other interfaces
        cmd = 'iptables -A FORWARD -i tun+ -j ACCEPT'
        ret |= os.system(cmd)
        cmd = 'iptables -A FORWARD -i tun+ -o eth0 -j ACCEPT'
        ret |= os.system(cmd)
        cmd = 'iptables -A FORWARD -i eth0 -o tun+ -j ACCEPT'
        ret |= os.system(cmd)
        # NAT the VPN client traffic to the internet
        cmd = 'iptables -t nat -A POSTROUTING -s 10.8.0.0/24 -o eth0 -j MASQUERADE'
        ret |= os.system(cmd)
        return ret

    def __is_reboot(self):
        if (os.path.exists(self.tmp_folder)):
            return False
        else:
            os.mkdir(self.tmp_folder)
            return True

    def __do_init(self):
        if self.__is_reboot() is True:
            print '...openvpn do init'
            if os.path.exists('/dev/tun') is not True:
                self.__insert_tunko()
            self.__iptable_setup()
            self.__init_folder()

    def get_client_conf(self):
        cert_list = check_ovpn_file()
        if self.conf is None:
            return None
        if self.conf in cert_list:
            cert_path = get_key_dir() + '/' + self.conf
            return cert_path
        else:
            return None

    def gen_client_conf(self):
        logger.info('...generate client conf')
        self.ret = cert_init()
        if self.ret is not True:
            return self.ret[0]
        self.ret, self.portInfo = add_openvpn_port()
        if self.ret is not True:
            return self.ret[0]
        self.ret, self.conf = generate_client_conf(self.portInfo)
        if self.ret is not True:
            return self.ret[0]
        logger.info('client conf path: %s' % self.get_client_conf())
        return 0

    def revoke_client(self):
        return revoke_client_crt()
示例#24
0
class VLCLightControl(object):
    def __init__(self, bridge_ip, group_name, username='', password=''):
        self.bridge = None
        self.light_controller = None
        self._bridge_ip = bridge_ip
        self._group_name = group_name
        self.process_monitor = ProcessMonitor("vlc")
        self.vlc_tracker = VLCTracker(username, password)

        self._vlc_state_previous = None
        self._vlc_state_now = None

    def setup(self):
        self.bridge = Bridge(self._bridge_ip)
        self.bridge.connect()
        time.sleep(0.250)
        self.light_controller = LightController(self.bridge, self._group_name)
        
    def start(self):
        if self.bridge is None or self.light_controller is None:
            print("Setting up a connection to the Hue Bridge")
            self.setup()
        self.process_monitor.block_until_process_detected()
        print("Backing up the lights"); self.light_controller.save_state()
        print("Getting initial state of VLC"); self._update_vlc_state()
        while True:
            self._block_until_state_change()
            self._logic_controller(self._vlc_state_previous, self._vlc_state_now)

    def _update_vlc_state(self):
        self._vlc_state_previous = self._vlc_state_now
        self._vlc_state_now = self.vlc_tracker.get_state()

    def _block_until_state_change(self, polling_rate=5):
        """
        Block until VLC state has changed.
        :param double polling_rate: Checking frequency per second
        :returns bool:
        """
        sleep_time = 1 / polling_rate
        comparison = self._vlc_state_now
        print("Waiting for VLC state to change")
        while self.vlc_tracker.get_state() == comparison:
            time.sleep(sleep_time)
        self._update_vlc_state()
        print("VLC has changed from '{0}' to '{1}'".format(comparison, self._vlc_state_now))
        return True
    
    def _logic_controller(self, before, after):
        if before == after:
            return
        # Make a backup each time we go from a stopped state to playing state
        if before == self.vlc_tracker.STATE_STOPPED and \
            after == self.vlc_tracker.STATE_PLAYING:
            self.light_controller.save_state()


        if after == self.vlc_tracker.STATE_PAUSED:
            # Low light ambience
            self.light_controller.ambient_lighting()
            print("Setting ambient lighting")
        elif after == self.vlc_tracker.STATE_PLAYING:
            # Turn off the lights
            self.light_controller.turn_off()
            print("Turning lights off")
        elif after == self.vlc_tracker.STATE_STOPPED:
            # Restore the lights
            self.light_controller.restore()
            print("Restoring lights")
        else:
            raise ValueError("Unhandled state value: " + after)
示例#25
0
 def setUp(self):
     self.qpidd = ProcessMonitor(['qpidd', '-p', '49000', '--auth', 'no'])
     time.sleep(1)
     self.cped = ProcessMonitor(['../src/cped', '-v', '-v', '-v'])
     self.manufacturer = manufacturer.Manufacturer('rhel6')
     time.sleep(2)