예제 #1
0
    def __init__(self, directory, port):
        self.comm = communication.Communicator(port, self.on_data_received)
        self.directory = directory
        self.vcs = vcs.VCS(directory)
        self.monitor = monitor.Monitor(self.directory, self)

        logging.info("Started")
예제 #2
0
    def test_open_logfile_do_not_exist(self):

        m = monitor.Monitor()
        with patch('builtins.open') as mock_open:
            mock_open.side_effect = FileNotFoundError
            with self.assertRaises(FileNotFoundError):
                m.open_logfile('./mylogfile.log')
예제 #3
0
파일: __init__.py 프로젝트: m4tlch/aaPanel
def panel_monitor(pdata=None):
    comReturn = comm.local()
    if comReturn: return comReturn
    import monitor
    dataObject = monitor.Monitor()
    defs = ('get_access_ip', 'get_exception', 'get_exception_logs', 'get_attack_nums', 'php_count', 'return_php', 'mysql_client_count')
    return publicObject(dataObject, defs, None, pdata)
예제 #4
0
    def run(self):
        #set logging
        loglevel = rc.config.getint('logging', 'loglevel')
        logFilemode = rc.config.get('logging', 'logFilemode')
        logging.basicConfig(
            filename=self.logfile,
            filemode=logFilemode,
            level=loglevel,
            format=
            "%(threadName)s: %(asctime)s  %(name)s [%(levelname)-8s] %(message)s"
        )
        logging.info("seheiahd started")

        mythreads = []
        mon = monitor.Monitor()
        alarm = alarmcascade.AlarmCascade()
        check = checkBehavior.Check(mon)
        #pocketsphinx = gstSphinxCli.GstSphinxCli()
        mythreads.append(mon)
        mythreads.append(alarm)
        mythreads.append(check)
        #mythreads.append(pocketsphinx)

        for thread in mythreads:
            thread.start()
        #pocketsphinx.run()

        while True:
            try:
                time.sleep(0.01)
            except:
                for thread in mythreads:
                    thread.stop()
                print "\b\bexit"
예제 #5
0
def panel_monitor(pdata=None):
    comReturn = comm.local()
    if comReturn: return comReturn
    import monitor
    dataObject = monitor.Monitor()
    defs = ('get_spider', 'get_exception', 'get_request_count_qps', 'load_and_up_flow', 'get_request_count_by_hour')
    return publicObject(dataObject, defs, None, pdata)
예제 #6
0
파일: util.py 프로젝트: stefankoegl/bwmon
def read_monitor_config(configfile):
    """Reads the monitor configuration file for the Aggregator

    @param configfile: path of the config file
    @return: a list of Monitor or PipeMonitor objects
    """
    config = ConfigParser.ConfigParser()
    config.read(configfile)
    for section in config.sections():
        c = dict(config.items(section))

        if c['type'] == 'monitor':
            ignorelocal = parse_bool(c.get('ignorelocal', False))
            import monitor
            mon = monitor.Monitor(ignorelocal=ignorelocal)
            inc = [c.get('include', '')]
            exc = [c.get('exclude', '')]
            mon.set_filter(inc, exc)

        elif c['type'] == 'pipe':
            import pipe
            port = int(c['port'])
            newhost = c['newhost']
            newport = int(c['newport'])

            mon = pipe.PipeMonitor(pipe.Pipe(port, newhost, newport))
            #mon.set_shape(c.get('shape_threshold', 0))

        else:
            mon = None
            print 'unknown monitor type %s' % c['type']

        if mon:
            yield mon
예제 #7
0
def main():
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument(
        '-mt', '--maxthreads', default=multiprocessing.cpu_count(),
        type=int, help='Maximum number of threads for the measurement')
    parser.add_argument('-l', '--loop', default=True, type=bool,
                        help='if more than one processors is specified, ' +
                        'choose wether to loop until the number of ' +
                        'maxthreads has been reached or use that number' +
                        ' of threads from the start')
    parser.add_argument('-pa', '--pipeargs', default='', type=str,
                        help="Arguments that should be passed to " +
                        "run_multi_ctobssim. For more details, see the " +
                        "help of run_multi_ctobssim")
    parser.add_argument('-log', default=False, type=bool,
                        help='if gammaspeed logging statements have been ' +
                        'added to ctools and gammalib, this option should ' +
                        ' be set to True')

    args = parser.parse_args()

    if args.loop:
        for nthrd in xrange(int(args.maxthreads)):
            pipe_monitor = mt.Monitor("./run_pipeline.py" + args.pipeargs, nthrd + 1)
            pipe_monitor.monitor("monitor_CPUs_" + str(nthrd + 1) + ".csv", 0.1)
            if args.log:
                try:
                    pipe_monitor.parse_extension(
                        logext='*.log',
                        outname='pipeline_CPUs_' + str(nthrd + 1) + '.csv',
                        time_shift=TIME_ZONE_SHIFT)
                    shutil.copy2('ctlike.log','original_ctlike_CPUs_'+str(nthrd+1)+'.log')
                    shutil.copy2('ctobssim.log','original_ctobssim_CPUs_'+str(nthrd+1)+'.log')
                    shutil.copy2('ctbin.log','original_ctbin_CPUs_'+str(nthrd+1)+'.log')
                except ValueError:
                    print 'no log file(s) found'
    else:
        pipe_monitor = mt.Monitor("./run_pipeline.py" + args.pipeargs, args.maxthreads)
        pipe_monitor.monitor("monitor_CPUs_" + str(args.maxthreads) + ".csv", 0.1)
        if args.log:
            try:
                pipe_monitor.parse_extension(
                    logext='*.log',
                    outname='pipeline_CPUs_' + str(args.maxthreads) + '.csv',
                    time_shift=TIME_ZONE_SHIFT)
            except ValueError:
                print 'no log file(s) found'
예제 #8
0
    def service(self):
        ''' Keeps the service monitor going.
            Exit on Kodi shutdown or profile switch.

            if profile switch happens more than once,
            Threads depending on abortRequest will not trigger.
        '''
        self.monitor = monitor.Monitor()
        player = self.monitor.player
        self.connect = connect.Connect()
        self.start_default()

        self.settings['mode'] = settings('useDirectPaths')

        while self.running:
            if window('jellyfin_online.bool'):

                if self.settings['profile'] != window('jellyfin_kodiProfile'):
                    LOG.info("[ profile switch ] %s", self.settings['profile'])

                    break

                if player.isPlaying() and player.is_playing_file(
                        player.get_playing_file()):
                    difference = datetime.today(
                    ) - self.settings['last_progress']

                    if difference.seconds > 10:
                        self.settings['last_progress'] = datetime.today()

                        update = (datetime.today() -
                                  self.settings['last_progress_report']
                                  ).seconds > 250
                        event('ReportProgressRequested', {'Report': update})

                        if update:
                            self.settings[
                                'last_progress_report'] = datetime.today()

            if window('jellyfin.restart.bool'):

                window('jellyfin.restart', clear=True)
                dialog("notification",
                       heading="{jellyfin}",
                       message=translate(33193),
                       icon="{jellyfin}",
                       time=1000,
                       sound=False)

                raise Exception('RestartService')

            if self.waitForAbort(1):
                break

        self.shutdown()

        raise Exception("ExitService")
예제 #9
0
    def test_malformed_sample(self):
        record_queue = Queue.Queue()
        m = monitor.Monitor(record_queue)
        m.start()

        # Try to insert incorrect sample.
        record_queue.put([{'foobar': 'baz'}])

        m.stop()
        m.join()
예제 #10
0
 def test_check_alert_off(self):
     t = datetime(1970, 10, 1, 12, 0, 0, 0)
     with patch('monitor.datetime') as mock_datetime:
         mock_datetime.now.return_value = t
         m = monitor.Monitor()
         m.one_min_ave = 10
         m.alert_on = True
         m.check_alert(self.Alert)
         self.assertEquals(m.alert_history[0],
                           ('High traffic alert terminated '
                            'at 01/Oct/1970:12:00:00'))
예제 #11
0
 def test_check_alert_on(self):
     t = datetime(1970, 10, 1, 12, 0, 0, 0)
     with patch('monitor.datetime') as mock_datetime:
         mock_datetime.now.return_value = t
         m = monitor.Monitor()
         m.one_min_ave = 30
         m.check_alert(self.Alert)
         alert_suffix = m.alert_history[0].partition(
             'High traffic generated an alert - hits = ')[-1]
         self.assertEquals(
             alert_suffix, '30 hits/min, triggered at 01/Oct/1970:12:00:00')
예제 #12
0
    def on_monitor(self, e):
        dlg = monitor.Monitor(self,
                              None,
                              title=_("Monitor"),
                              style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)

        self.monitor_menu = False
        dlg.ShowModal()
        dlg.Destroy()

        self.monitor_menu = True
예제 #13
0
    def test_scan_log_non_cumulative(self):

        m = monitor.Monitor()
        m.log = MagicMock()
        m.log.readlines.return_value = [
            ('121.17.198.11 - - [09/Jun/2017:15:35:07 +0200] '
             '"DELETE /posts/posts/explore HTTP/1.0" 200 5038'
             '"http://cook.com/login/"')
        ]
        m.scan_log(False)
        m.scan_log(False)
        self.assertEquals(m.top_section_hits['posts'], 1)
예제 #14
0
 def __init__(self, logger, datadir):
     self.logger = logger
     self.weather = weather.Weather(self.logger, os.environ['OWM_API_KEY'],
                                    'default', os.environ['LAT'],
                                    os.environ['LON'],
                                    os.environ['KISHODAI'],
                                    os.environ['CITY'], datadir)
     self.monitor = monitor.Monitor(self.logger, self.weather,
                                    os.environ.get('DARK') is not None,
                                    datadir)
     self.weather.update()
     self.monitor.clear()
예제 #15
0
    def __init__(self,exec_path, config_path, duration, host_list):

        self.exec_path  = exec_path 
        self.config_path = config_path
        self.duration = duration
        self.host_list = host_list

        self.started = False
        self.thread = threading.Thread(target=self.loop)
        self.thread.setDaemon(True)
        self.monitor = monitor.Monitor(host_list)
        self.last_fast = None
        self.popen = None
예제 #16
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-r", "--range", help="ipv4 range of assets")

    args = parser.parse_args(sys.argv[1:])

    if not args.range:
        return 

    ip_range = args.range

    mon = monitor.Monitor(ip_range) 
    mon.run()
예제 #17
0
    def test_scan_log_with_average(self):

        m = monitor.Monitor()
        m.log = MagicMock()
        m.log.readlines.return_value = [
            ('121.17.198.11 - - [09/Jun/2017:15:35:07 +0200] '
             '"DELETE /posts/posts/explore HTTP/1.0" 200 5038'
             '"http://cook.com/login/"')
        ]
        for _ in range(13):
            m.scan_log(False)
        self.assertEquals(len(m.two_min_hits), 12)
        self.assertEquals(m.one_min_ave, 6)
예제 #18
0
 def get_root_file(self, filename):
     if filename in self._open_root_files:
         file_handle = io_ref_pool._open_root_files[filename]
     else:
         if len(self._open_root_files) > 998:
             monitor.Monitor().message(
                 "diskio", "WARNING to many open root files. Closing all. "
                 "Please check for lost histograms. (Use hist.SetDirectory(0) to keep them)"
             )
             self.close_files()
         file_handle = TFile.Open(filename, "READ")
         self._open_root_files[filename] = file_handle
     return file_handle
예제 #19
0
    def __init__(self):
        super(Controller, self).__init__()
        self.waiting_pros = []
        self.running_pros = []
        self.finished_pros = []
        self.failed_pros = []

        mon = monitor.Monitor()
        mon.connect_controller(self)
        mon.message(
            self, "INFO: Using " + str(settings.max_num_processes) +
            " cpu cores at max.")

        settings.controller = self
예제 #20
0
    def test_multiple_tasks(self):
        record_queue = Queue.Queue()
        m = monitor.Monitor(record_queue, 6, 10)
        m.start()

        # Get base for timestamps
        base = time.time()

        cpu_allocation_slack = 4
        cpu_usage_slack = 0.5
        cpu_usage = 0.5
        mem_allocation_slack = 45
        mem_usage_slack = 32
        mem_usage = 512

        framework_id = "foobarbaz"
        executor_id = "foobarbaz"

        for j in range(1200):
            for i in range(10):
                record_queue.put([{
                    'timestamp': base + j,
                    'framework_id': framework_id + str(i),
                    'executor_id': executor_id,
                    'cpu_allocation_slack': cpu_allocation_slack,
                    'cpu_usage_slack': cpu_usage_slack,
                    'cpu_usage': cpu_usage,
                    'mem_allocation_slack': mem_allocation_slack,
                    'mem_usage_slack': mem_usage_slack,
                    'mem_usage': mem_usage
                }])

        sample_min1 = m.cluster()

        self.assertEqual(len(sample_min1), 1)

        # All samples are identical and average should be the input samples.
        self.assertEqual(sample_min1[0]['cpu_allocation_slack'],
                         cpu_allocation_slack * 10)
        self.assertEqual(sample_min1[0]['cpu_usage_slack'],
                         cpu_usage_slack * 10)
        self.assertEqual(sample_min1[0]['cpu_usage'], cpu_usage * 10)
        self.assertEqual(sample_min1[0]['mem_allocation_slack'],
                         mem_allocation_slack * 10)
        self.assertEqual(sample_min1[0]['mem_usage_slack'],
                         mem_usage_slack * 10)
        self.assertEqual(sample_min1[0]['mem_usage'], mem_usage * 10)

        m.stop()
        m.join()
예제 #21
0
    def __init__(self, orch_filename):

        # Initialise the thread
        Thread.__init__(self)
        # Set up the log file for logging
        logging.basicConfig(format='%(asctime)s %(message)s',
                            filename='logs/log1.log',
                            filemode='w',
                            level=logging.DEBUG)
        # load orch file info, these methods are static so easy testing
        self.orch_dict = Member._get_orch_parameters(orch_filename)
        logging.info('MEMBER: Orch Dict %s', self.orch_dict)
        # Make a dummy file if the file does not exist
        Member._write_dummy_composition(self.orch_dict.get('composition_name'),
                                        self.orch_dict.get('total_bytes'))
        #
        self.parts_dict = Member._get_parts_dict(
            self.orch_dict.get('composition_name'),
            self.orch_dict.get('bytes_per_part'),
            self.orch_dict.get('parts_checksum_dict'))
        # Initialise dicts that will be for quick lookup of ips, parts lists, transfers etc...
        self.connections_ip_dict = {}
        self.connections_parts_dict = {}
        self.active_transfers = {}
        self.connections_queue_dict = {}
        self.list_of_orch_ips = {}

        # The "director" queue for recieving messages, we add a message to it to give the conductor
        self.director_queue = queue.Queue()
        message = {'msg': 'CONDUCTOR'}
        self.director_queue.put(message)

        # Initialise the connection handler
        self.connect_queue = queue.Queue()
        self.con_handler = connection_handler.ConnectionHandler(
            self.connect_queue, self.director_queue, self.orch_dict)
        # Starts a thread that handles reading & writing parts to disk
        self.file_queue = queue.Queue()
        self.f_handler = file_handler.FileHandler(self.orch_dict,
                                                  self.file_queue,
                                                  self.director_queue)
        # Starts a thread that handles monitoring progress for output in terminal
        # First count to see how many parts we have already
        has_parts = len([i for i in self.parts_dict.values() if i is True])

        # A monitor thread the shows a progress bar in the terminal
        self.monitor_queue = queue.Queue()
        self.mon = monitor.Monitor(has_parts, self.orch_dict['num_parts'],
                                   self.monitor_queue, self.director_queue)
예제 #22
0
    def test_redraw_screen_top_section(self):

        m = monitor.Monitor()
        m.logname = './mylogfile.log'
        m.alert_history
        m.redraw_screen()
        output = sys.stdout.getvalue().strip()
        self.assertEquals(
            output, '''\x1bcStatistics for ./mylogfile.log
Unique IP addresses : 0	0 new entries of 0 total entries
Last two minutes hits/min average : 0 (alert level 0)

### Top sections hit ###

### Alerts ###''')
예제 #23
0
    def __init__(self):
        # Set to False to kill
        self.running = True

        # Accept all note as default
        self._enables = 0x07
        # Say LRC is present for now
        self._lrc_ok = True
        # Acceptor has it's own lock
        self._mutex = Lock()
        # data byte 0
        self._state = 0x01
        # data byte 1
        self._event = 0x10
        # byte 2 - lower 3 bits
        self._ext = 0x01
        # byte 2 Upper 5 bits
        self._value = 0x00
        # byte 3 is reserverd
        self._resd = 0x00
        # byte 4 is model (00-7FH)
        self._model = 0x01
        # byte 5 is software revision (00-7FH)
        self._rev = 0x01

        self._note_count = 0
        self._cheat_flag = False

        self._ack = -1

        # Some states are only sent once, handle them in a queue
        self._b0_ephemeral = Queue()
        self._b1_ephemeral = Queue()
        self._b2_ephemeral = Queue()

        # Background worker thread
        self._serial_thread = None

        # Used to recall in case of NAK
        self._last_msg = None

        #
        self._mon = monitor.Monitor(5, self._timedout)
        self._mon.start()

        # Simulate power up
        power_up = Thread(target=self._power_up)
        power_up.start()
예제 #24
0
def main():
  logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
  # TODO: the 2 service don't really maintain stages, make them static
  monitor_s3 = monitor.Monitor()
  report_s3 = report.Report()
  p1 = Process(
      target=run_repeatly, args=(monitor_s3.run, config.S3_MONITOR_TIME))
  p2 = Process(
      target=run_repeatly, args=(report_s3.run, config.S3_REPORT_TIME))
  # Terminate them if the main program is terminated
  p1.daemon = True
  p2.daemon = True
  p1.start()
  p2.start()
  p1.join()
  p2.join()
예제 #25
0
 def OnSubmit(self, e):
     #send j for Authorization
     try:
         Authresp = self.Auth(self.emailBox.GetValue(), self.passwordBox.GetValue(), self.apikeyBox.GetValue())
         if Authresp['ERROR'] == 0:
             self.sizer2.Hide(self.emailLabel)
             self.sizer2.Hide(self.emailBox)
             self.sizer2.Hide(self.passwordLabel)
             self.sizer2.Hide(self.passwordBox)
             self.sizer2.Hide(self.apikeyLabel)
             self.sizer2.Hide(self.apikeyBox)
             self.sizer2.Hide(self.locationLabel)
             self.sizer2.Hide(self.pathBox)
             self.sizer2.Hide(self.emailBox)
             self.sizer2.Hide(self.browseButton)
             self.sizer2.Hide(self.browseButton)
             self.sizer2.Hide(self.submitButton)
             
             self.gauge.Show()
             self.syncstatusLabel.Show()
             self.bigBar.Show()
             self.gauge.Show()
             self.square1.Show()
             self.st4.Show()
             self.square2.Show()
             self.st5.Show()
             self.square3.Show()
             self.st6.Show()
             self.st2.Show()
             
             self.gauge.SetValue(0)
             
             self.monitor = monitor.Monitor(self.sessionkey, self.pathBox.GetValue())
             self.monitor.start()
             self.gauge.SetValue(100)
             dialog.browser.LoadURL("http://www.ADDPREDICTIONAPIOAUTH.com")
             dialog.Show()
             print "awesome"
             self.isLogged = 1
         else:
             wx.MessageBox('Bad Login', 'Error', wx.OK | wx.ICON_INFORMATION)
             self.isLogged = 0
         return
     except:
         wx.MessageBox('Bad Login', 'Error', wx.OK | wx.ICON_INFORMATION)
         self.isLogged = 0
         return
예제 #26
0
    def OnCode(self, e):
        self.gauge.Pulse()
        self.oauthBox.Hide()
        self.gauge.Pulse()
        self.oauthButton.Hide()
        self.gauge.Pulse()
        self.fp.auth(self.oauthBox.GetValue())

        self.monitor = monitor.Monitor(self.sessionkey,
                                       self.pathBox.GetValue())
        self.monitor.start()
        self.monitor.setProphet(self.fp)
        self.monitor.sync.setProgressCallback(self.OnSyncUpdate)

        self.gauge.Pulse()
        self.gauge.SetValue(100)

        self.monitor.synchronize(True)
        return
예제 #27
0
    def __init__(self):

        self.monitor = monitor.Monitor()
        self.monitor.start()
        frame_size = (100, 100
                      )  # doesn't really matter since it will never be shown
        wx.Frame.__init__(self, None, -1, "familysysadmin",
                          size=frame_size)  # todo: reach into app to get name
        icon = uiicon.uiicon.getIcon()
        self.SetIcon(icon)
        # setup a taskbar icon, and catch some events from it
        self.tbicon = wx.TaskBarIcon()
        self.tbicon.SetIcon(
            icon, "familysysadmin")  # todo: reach into app to get name
        wx.EVT_TASKBAR_LEFT_DOWN(self.tbicon, self.OnTaskBarMenu)
        wx.EVT_TASKBAR_RIGHT_DOWN(self.tbicon, self.OnTaskBarMenu)
        wx.EVT_MENU(self.tbicon, self.TBMENU_REFRESH, self.OnTaskBarRefresh)
        wx.EVT_MENU(self.tbicon, self.TBMENU_STATUS, self.OnTaskBarStatus)
        wx.EVT_MENU(self.tbicon, self.TBMENU_SETTINGS, self.OnTaskBarSettings)
        wx.EVT_MENU(self.tbicon, self.TBMENU_CLOSE, self.OnTaskBarClose)
예제 #28
0
    def add_instance(self, addr):
        """Adds a new instance to the cluster.

		New asynchronous monitor is created.

		Args:
			addr: address in the form of ip:port or host:port
		"""
        # Check that we are not trying to add ourselves
        if addr == self.me:
            return
        # Check that the address is not in our list already
        if any([addr == x.address for x in self.instance_monitors]):
            return

        # Create a new state for the instance
        # Instantiate and start a monitor
        mon = monitor.Monitor(addr)
        mon.start()
        self.instance_monitors.append(mon)
예제 #29
0
 def exec_with_cpu(self, cmd):
     '''If cpu monitoring is enabled (--monitor) collect CPU in the background
     while the test is running
     :param duration: how long the command will run in seconds
     :return:  a tuple (cmd_output, cpu_load)
     '''
     # ssh timeout should be at least set to the command duration
     # we add 20 seconds to it as a safety
     timeout = self.get_cmd_duration() + 20
     if self.gmond_svr:
         gmon = monitor.Monitor(self.gmond_svr, self.gmond_port)
         # Adjust this frequency based on the collectors update frequency
         # Here we assume 10 second and a max of 20 samples
         gmon.start_monitoring_thread(freq=10, count=20)
         cmd_output = self.exec_command(cmd, timeout)
         gmon.stop_monitoring_thread()
         # insert the cpu results into the results
         cpu_load = gmon.build_cpu_metrics()
     else:
         cmd_output = self.exec_command(cmd, timeout)
         cpu_load = None
     return (cmd_output, cpu_load)
예제 #30
0
파일: main.py 프로젝트: red031000/pbrEngine
def main():
    global checker, display, pbr
    #logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
    # init the PBR engine and hook everything up
    pbr = PBREngine(actionCallback, onCrash)

    # command line monitor for displaying states, events etc.
    display = monitor.Monitor(pbr)

    # start the crash detection thingy
    #checker = crashchecker.Checker(pbr, onCrash)

    pbr.on_state += onState
    pbr.on_win += onWin
    pbr.on_attack += onAttack
    pbr.on_faint += onFaint
    pbr.on_switch += onSwitch
    pbr.start()
    pbr.on_gui += lambda gui: display.reprint()
    pbr.setVolume(20)
    pbr.matchFov = 0.7

    # don't terminate please
    gevent.sleep(100000000000)