Exemple #1
0
def clientLoop(clientType, settings):
	"""Start and monitor the client.

	Arguments:
	  settings (json) : global settings

	"""
	logger = logging.getLogger('ClientStartup')
	errorCount = 0
	firstErrorTime = None
	restartThresholdMaxErrors = int(settings.get('clientRestartThresholdMaxCountInTimeframe', 5))
	restartThresholdTimeframe = int(settings.get('clientRestartThresholdTimeframeInSeconds', 600))
	watcherWaitCycle = int(settings.get('statusReportingInterval'))
	exitWaitCycle = int(settings.get('waitSecondsBeforeExiting'))
	shutdownEvent = multiprocessing.Event()
	canceledEvent = multiprocessing.Event()
	watcherPID = os.getpid()
	logger = logging.getLogger('ClientStatus')

	## Construct and start the client
	thisClient = spinUpTheClient(logger, clientType, settings, shutdownEvent, canceledEvent, watcherPID)

	## Wait loop
	logger.info('Starting client watcher loop - {}'.format(time.strftime('%X %x')))
	while True:
		try:
			## Evaluate the running client
			if thisClient.is_alive() and canceledEvent is not None and canceledEvent.is_set():
				## If the child process is requesting a restart
				logger.error('  {}: still alive (PID: {}), but requested a restart'.format(thisClient.name, thisClient.pid))
				thisClient.terminate()
				canceledEvent.clear()
				del thisClient
				thisClient = spinUpTheClient(logger, clientType, settings, shutdownEvent, canceledEvent, watcherPID)
				logger.info('    Started {} with PID {}'.format(thisClient.name, thisClient.pid))

			elif not thisClient.is_alive():
				logger.error('  {}: stopped with exit code {}.'.format(thisClient.name, thisClient.exitcode))

				## If the child process is requesting a restart
				if canceledEvent is not None and canceledEvent.is_set():
					logger.info('    Client {} requested a restart'.format(thisClient.name))
					canceledEvent.clear()
					del thisClient
					thisClient = spinUpTheClient(logger, clientType, settings, shutdownEvent, canceledEvent, watcherPID)
					logger.info('    Started {} with PID {}'.format(thisClient.name, thisClient.pid))

				## If something went wrong, conditionally restart the client
				else:
					thisErrorTime = int(time.time())
					if firstErrorTime is None or ((thisErrorTime - firstErrorTime) > restartThresholdTimeframe):
						## Re-initialize if timeframe has passed w/o passing max errors
						firstErrorTime = thisErrorTime
						errorCount = 0
					errorCount += 1
					if errorCount <= restartThresholdMaxErrors:
						del thisClient
						## Restart the client
						thisClient = spinUpTheClient(logger, clientType, settings, shutdownEvent, canceledEvent, watcherPID)
						logger.info('Restarted the stopped client. Restart count {}.'.format(errorCount))
					else:
						logger.error('Too many restarts within the client restart threshold timeframe. Exiting...')
						raise EnvironmentError('Client stopped more than it was allowed to auto-restart in the provided timeframe. Watcher loop shutting down.')
			else:
				#logger.debug('Status of {}: running with PID {} and PPID {}'.format(thisClient.name, thisClient.pid, watcherPID))
				logger.debug('  {}: running'.format(thisClient.name))

			## Avoiding join() with the processes (from the multiprocessing
			## internals), since we're not waiting for them to finish. They
			## will always be running, so this loop is just for monitoring
			## and messaging. Any interrupt signals will be sent to the sub-
			## processes, and intentional shutdown requests are handled here.
			time.sleep(watcherWaitCycle)

		except (KeyboardInterrupt, SystemExit):
			logger.info('Status of {}: interrupt received... shutting down PID [{}]'.format(thisClient.name, thisClient.pid))
			print('Interrrupt received; notifying client process [{}] to stop...'.format(thisClient.pid))
			logger = logging.getLogger('ClientStartup')
			logger.error('Interrrupt received; notifying services to stop...')
			shutdownEvent.set()
			## Wait for thread to finish graceful shutdown
			time.sleep(exitWaitCycle)
			try:
				print('Checking if client process is still running')
				if thisClient.is_alive():
					print('Stopping client process in clientLoop...')
					with suppress(Exception):
						logger.debug('  process still running; stopping {} with PID {}'.format(thisClient.name, thisClient.pid))
					thisClient.terminate()
			except:
				stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
				print('Exception in killing process in clientLoop: {}'.format(stacktrace))
				with suppress(Exception):
					logger.debug('Exception in killing process in clientLoop: {}'.format(stacktrace))
			break
		except:
			stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
			logger.info('Status of {}: exception hit... shutting down PID [{}]'.format(thisClient.name, thisClient.pid))
			print('Exception in watcher loop: {}'.format(stacktrace))
			print('Notifying client process [{}] to stop...'.format(thisClient.pid))
			with suppress(Exception):
				logger = logging.getLogger('ClientStartup')
				logger.error('Exception in watcher loop: {}'.format(stacktrace))
				logger.debug('Notifying services to stop...')
			shutdownEvent.set()
			time.sleep(exitWaitCycle)
			break

	## end clientLoop
	return
Exemple #2
0
def wait_for_event(e):
    '''Waiting for the event to be set befor doing anything'''
    print 'wait_for_event : starting'
    e.wait()
    print 'wait_for_event:e.is_set()->', e.is_set()


def wait_for_event_timeout(e, t):
    '''wait seconds and then timeout'''
    print 'wait_for_event_timeout : starting'
    e.wait(t)
    print 'wait_for_event_timeout : e.is_set()->', e.is_set()


if __name__ == '__main__':
    e = multiprocessing.Event()
    w1 = multiprocessing.Process(name='block',
                                 target=wait_for_event,
                                 args=(e, ))

    w1.start()

    w2 = multiprocessing.Process(name='non-block',
                                 target=wait_for_event_timeout,
                                 args=(e, 2))

    w2.start()

    print 'main : waiting before calling Event.set()'
    time.sleep(4)
    e.set()
Exemple #3
0
    def __init__(self,
                 source,
                 bufferlen=5,
                 name=None,
                 send_data_to_sink_manager=False,
                 **kwargs):
        '''
        Parameters
        ----------
        source: class 
            lower-level class for interacting directly with the incoming data (e.g., plexnet)
        bufferlen: int
            Constrains the maximum amount of data history stored by the source
        name: string, optional, default=None
            Name of the sink, i.e., HDF table. If one is not provided, it will be inferred based
            on the name of the source module
        send_data_to_sink_manager: boolean, optional, default=False
            Flag to indicate whether data should be saved to a sink (e.g., HDF file)            
        kwargs: dict, optional, default = {}
            For the multi-channel data source, you MUST specify a 'channels' keyword argument
            Note that kwargs['channels'] does not need to a list of integers,
            it can also be a list of strings.
        '''

        super(MultiChanDataSource, self).__init__()
        if name is not None:
            self.name = name
        else:
            self.name = source.__module__.split('.')[-1]
        self.filter = None
        self.source = source
        self.source_kwargs = kwargs
        self.bufferlen = bufferlen
        self.max_len = int(bufferlen * self.source.update_freq)
        self.channels = kwargs['channels']
        self.chan_to_row = dict()
        for row, chan in enumerate(self.channels):
            self.chan_to_row[chan] = row

        self.n_chan = len(self.channels)
        dtype = self.source.dtype  # e.g., np.dtype('float') for LFP
        self.slice_size = dtype.itemsize
        self.idxs = shm.RawArray('l', self.n_chan)
        self.last_read_idxs = np.zeros(self.n_chan)
        rawarray = shm.RawArray('c',
                                self.n_chan * self.max_len * self.slice_size)

        self.data = np.frombuffer(rawarray, dtype).reshape(
            (self.n_chan, self.max_len))

        #self.fo2 = open('/storage/rawdata/test_rda_get.txt','w')
        #self.fo3 = open('/storage/rawdata/test_rda_run.txt','w')

        self.lock = mp.Lock()
        self.pipe, self._pipe = mp.Pipe()
        self.cmd_event = mp.Event()
        self.status = mp.Value('b', 1)
        self.stream = mp.Event()
        self.data_has_arrived = mp.Value('b', 0)

        self.methods = set(n for n in dir(source)
                           if inspect.ismethod(getattr(source, n)))

        self.send_data_to_sink_manager = send_data_to_sink_manager
        if self.send_data_to_sink_manager:
            self.send_to_sinks_dtype = np.dtype([
                ('chan' + str(chan), dtype) for chan in kwargs['channels']
            ])
            self.next_send_idx = mp.Value('l', 0)
            self.wrap_flags = shm.RawArray(
                'b', self.n_chan)  # zeros/Falses by default
            self.supp_hdf_file = kwargs['supp_file']
Exemple #4
0
 def __init__(self):
     self.start: multiprocessing.Event = multiprocessing.Event()
     self.stop: multiprocessing.Event = multiprocessing.Event()
Exemple #5
0
def menu(session, checkUpdate=True):
    """
	Parameters
	----------
	session : ikabot.web.session.Session
	checkUpdate : bool
	"""
    if checkUpdate:
        checkForUpdate()

    show_proxy(session)

    banner()

    process_list = updateProcessList(session)
    if len(process_list) > 0:
        print(_('Running tasks:'))
        for process in process_list:
            print(
                _('- pid: {} task: {}').format(process['pid'],
                                               process['action']))
        print('')

    menu_actions = [
        constructionList, sendResources, distributeResources, getStatus,
        donate, searchForIslandSpaces, loginDaily, alertAttacks, donationBot,
        alertLowWine, buyResources, sellResources, vacationMode,
        activateMiracle, trainArmy, shipMovements, constructBuilding, update,
        importExportCookie, autoPirate, investigate, proxyConf,
        updateTelegramData
    ]

    print(_('(0)  Exit'))
    print(_('(1)  Construction list'))
    print(_('(2)  Send resources'))
    print(_('(3)  Distribute resources'))
    print(_('(4)  Account status'))
    print(_('(5)  Donate'))
    print(_('(6)  Search for new spaces'))
    print(_('(7)  Login daily'))
    print(_('(8)  Alert attacks'))
    print(_('(9)  Donate automatically'))
    print(_('(10) Alert wine running out'))
    print(_('(11) Buy resources'))
    print(_('(12) Sell resources'))
    print(_('(13) Activate vacation mode'))
    print(_('(14) Activate miracle'))
    print(_('(15) Train army'))
    print(_('(16) See movements'))
    print(_('(17) Construct building'))
    print(_('(18) Update Ikabot'))
    print(_('(19) Import / Export cookie'))
    print(_('(20) Auto-Pirate'))
    print(_('(21) Investigate'))
    print(_('(22) Configure Proxy'))
    if telegramDataIsValid(session):
        print(_('(23) Change the Telegram data'))
    else:
        print(_('(23) Enter the Telegram data'))

    total_options = len(menu_actions)
    selected = read(min=0, max=total_options)
    if selected != 0:
        try:
            selected -= 1
            event = multiprocessing.Event()  #creates a new event
            process = multiprocessing.Process(
                target=menu_actions[selected],
                args=(session, event, sys.stdin.fileno()),
                name=menu_actions[selected].__name__)
            process.start()
            process_list.append({
                'pid': process.pid,
                'action': menu_actions[selected].__name__
            })
            updateProcessList(session, programprocesslist=process_list)
            event.wait(
            )  #waits for the process to fire the event that's been given to it. When it does  this process gets back control of the command line and asks user for more input
        except KeyboardInterrupt:
            pass
        menu(session, checkUpdate=False)
    else:
        if isWindows:
            # in unix, you can exit ikabot and close the terminal and the processes will continue to execute
            # in windows, you can exit ikabot but if you close the terminal, the processes will die
            print(_('Closing this console will kill the processes.'))
            enter()
        clear()
        os._exit(
            0
        )  #kills the process which executes this statement, but it does not kill it's child processes
def test_linear_processing_time_scaling(
    slow_process_executor_image,
    logger,
    k8s_cluster,
    load_images_in_kind,
    set_test_pip_version,
):
    flow = Flow(
        name='test-flow-slow-process-executor',
        infrastructure='K8S',
        timeout_ready=120000,
        k8s_namespace='test-flow-slow-process-executor-ns',
    ).add(
        name='slow_process_executor',
        uses=slow_process_executor_image,
        timeout_ready=360000,
        replicas=3,
        grpc_data_requests=True,
    )

    with flow:
        with kubernetes_tools.get_port_forward_contextmanager(
                'test-flow-slow-process-executor-ns', flow.port_expose):
            # sleep as the port forward setup can take some time
            time.sleep(0.1)
            client_kwargs = dict(
                host='localhost',
                port=flow.port_expose,
            )
            client_kwargs.update(flow._common_kwargs)

            stop_event = multiprocessing.Event()
            scale_event = multiprocessing.Event()
            received_resposes = multiprocessing.Queue()
            response_arrival_times = multiprocessing.Queue()
            process = Process(
                target=send_requests,
                kwargs={
                    'client_kwargs': client_kwargs,
                    'stop_event': stop_event,
                    'scale_event': scale_event,
                    'received_resposes': received_resposes,
                    'response_arrival_times': response_arrival_times,
                    'logger': logger,
                },
            )

            process.start()
            process.join()

            import numpy as np

            response_times = []
            while response_arrival_times.qsize():
                response_times.append(response_arrival_times.get())
            mean_response_time = np.mean(response_times)
            logger.debug(
                f'Mean time between responses is {mean_response_time}, expected is 1/3 second'
            )
            assert mean_response_time < 0.4

            responses_list = []
            while received_resposes.qsize():
                responses_list.append(int(received_resposes.get()))

            logger.debug(
                f'Got the following responses {sorted(responses_list)}')
            assert sorted(responses_list) == list(
                range(min(responses_list),
                      max(responses_list) + 1))
Exemple #7
0
    def __init__(self, app, args=None):
        """
        """
        super(MainWindow, self).__init__()

        self.setWindowTitle('Experimental module')
        self.setGeometry(400, 50, 550, 400)

        ##########################################################
        ######## Multiprocessing quantities
        ##########################################################
        self.run_event = multiprocessing.Event(
        )  # to turn on and off recordings execute through multiprocessing.Process
        self.run_event.clear()
        self.closeFaceCamera_event = multiprocessing.Event()
        self.closeFaceCamera_event.clear()
        self.quit_event = multiprocessing.Event()
        self.quit_event.clear()
        self.manager = multiprocessing.Manager(
        )  # Useful to share a string across processes :
        self.datafolder = self.manager.Value(
            c_char_p,
            str(os.path.join(os.path.expanduser('~'), 'DATA', 'trash')))

        ##########################################################
        ######## class values
        ##########################################################
        self.stim, self.acq, self.init, self.screen, self.stop_flag = None, None, False, None, False
        self.FaceCamera_process = None
        self.RigView_process = None
        self.params_window = None

        ##########################################################
        ####### GUI settings
        ##########################################################
        rml = QtWidgets.QLabel(
            '   ' + '-' * 40 + " Recording modalities " + '-' * 40, self)
        rml.move(30, 5)
        rml.setMinimumWidth(500)
        self.VisualStimButton = QtWidgets.QPushButton("Visual-Stim", self)
        self.VisualStimButton.move(30, 40)
        self.LocomotionButton = QtWidgets.QPushButton("Locomotion", self)
        self.LocomotionButton.move(130, 40)
        self.ElectrophyButton = QtWidgets.QPushButton("Electrophy", self)
        self.ElectrophyButton.move(230, 40)
        self.FaceCameraButton = QtWidgets.QPushButton("FaceCamera", self)
        self.FaceCameraButton.clicked.connect(self.toggle_FaceCamera_process)
        self.FaceCameraButton.move(330, 40)
        self.CaImagingButton = QtWidgets.QPushButton("CaImaging", self)
        self.CaImagingButton.move(430, 40)
        for button in [
                self.VisualStimButton, self.LocomotionButton,
                self.ElectrophyButton, self.FaceCameraButton,
                self.CaImagingButton
        ]:
            button.setCheckable(True)
        for button in [
                self.VisualStimButton, self.LocomotionButton,
                self.ElectrophyButton
        ]:
            button.setChecked(True)

        # screen choice
        QtWidgets.QLabel(" Screen :", self).move(250, 90)
        self.cbsc = QtWidgets.QComboBox(self)
        self.cbsc.setMinimumWidth(200)
        self.cbsc.move(320, 90)
        self.cbsc.activated.connect(self.update_screen)
        self.cbsc.addItems(SCREENS.keys())

        # config choice
        QtWidgets.QLabel("  => Config :", self).move(160, 125)
        self.cbc = QtWidgets.QComboBox(self)
        self.cbc.setMinimumWidth(270)
        self.cbc.move(250, 125)
        self.cbc.activated.connect(self.update_config)

        # subject choice
        QtWidgets.QLabel("-> Subject :", self).move(100, 160)
        self.cbs = QtWidgets.QComboBox(self)
        self.cbs.setMinimumWidth(340)
        self.cbs.move(180, 160)
        self.cbs.activated.connect(self.update_subject)

        # protocol choice
        QtWidgets.QLabel(" Visual Protocol :", self).move(20, 195)
        self.cbp = QtWidgets.QComboBox(self)
        self.cbp.setMinimumWidth(390)
        self.cbp.move(130, 195)
        self.cbp.activated.connect(self.update_protocol)

        # buttons and functions
        LABELS = ["i) Initialize", "r) Run", "s) Stop", "q) Quit"]
        FUNCTIONS = [self.initialize, self.run, self.stop, self.quit]

        mainMenu = self.menuBar()
        self.fileMenu = mainMenu.addMenu('')

        self.statusBar = QtWidgets.QStatusBar()
        self.setStatusBar(self.statusBar)
        self.statusBar.showMessage('ready to select a protocol/config')
        for func, label, shift in zip(FUNCTIONS, LABELS,\
                                      110*np.arange(len(LABELS))):
            btn = QtWidgets.QPushButton(label, self)
            btn.clicked.connect(func)
            btn.setMinimumWidth(110)
            btn.move(50 + shift, 250)
            action = QtWidgets.QAction(label, self)
            action.setShortcut(label.split(')')[0])
            action.triggered.connect(func)
            self.fileMenu.addAction(action)

        QtWidgets.QLabel("Notes: ", self).move(40, 300)
        self.qmNotes = QtWidgets.QTextEdit('', self)
        self.qmNotes.move(90, 300)
        self.qmNotes.setMinimumWidth(250)
        self.qmNotes.setMinimumHeight(60)

        btn = QtWidgets.QPushButton('Save\nSettings', self)
        btn.clicked.connect(self.save_settings)
        btn.setMinimumWidth(70)
        btn.setMinimumHeight(50)
        btn.move(380, 310)

        ##########################################################
        ##########################################################
        ##########################################################
        self.config, self.protocol, self.subject = None, None, None

        self.get_config_list()
        self.load_settings()
        # self.toggle_FaceCamera_process() # initialize if pre-set

        self.experiment = {}  # storing the specifics of an experiment
        self.show()
Exemple #8
0
    def __init__(self,
                 suite_name,
                 tests,
                 test_source_cls,
                 browser_cls,
                 browser_kwargs,
                 executor_cls,
                 executor_kwargs,
                 stop_flag,
                 pause_after_test=False,
                 pause_on_unexpected=False,
                 restart_on_unexpected=True,
                 debug_info=None):
        """Thread that owns a single TestRunner process and any processes required
        by the TestRunner (e.g. the Firefox binary).

        TestRunnerManagers are responsible for launching the browser process and the
        runner process, and for logging the test progress. The actual test running
        is done by the TestRunner. In particular they:

        * Start the binary of the program under test
        * Start the TestRunner
        * Tell the TestRunner to start a test, if any
        * Log that the test started
        * Log the test results
        * Take any remedial action required e.g. restart crashed or hung
          processes
        """
        self.suite_name = suite_name

        self.tests = tests
        self.test_source_cls = test_source_cls
        self.test_queue = None

        self.browser_cls = browser_cls
        self.browser_kwargs = browser_kwargs

        self.executor_cls = executor_cls
        self.executor_kwargs = executor_kwargs

        self.test_source = None

        # Flags used to shut down this thread if we get a sigint
        self.parent_stop_flag = stop_flag
        self.child_stop_flag = multiprocessing.Event()

        self.pause_after_test = pause_after_test
        self.pause_on_unexpected = pause_on_unexpected
        self.restart_on_unexpected = restart_on_unexpected
        self.debug_info = debug_info

        self.manager_number = next_manager_number()

        self.command_queue = Queue()
        self.remote_queue = Queue()

        self.test_runner_proc = None

        threading.Thread.__init__(self,
                                  name="Thread-TestrunnerManager-%i" %
                                  self.manager_number)
        # This is started in the actual new thread
        self.logger = None

        self.unexpected_count = 0

        # This may not really be what we want
        self.daemon = True

        self.max_restarts = 5

        self.browser = None
Exemple #9
0
    def run(self, _id: int, verbose: bool=False, timeout: int=60000, n_games: int=100):
        # hostname = f"{socket.gethostname()}_{time.ctime().replace(' ', '-')}"
        hostname = f"{socket.gethostname()}_{_id}_{time.time()}"
        address = f"tcp://{self.args.attach}:{self.args.frontend_port}"
        context = zmq.Context()
        error_sock = context.socket(zmq.REQ)
        error_sock.connect(address)

        while True:
            try: 
                # alive_event:
                # 게임이 인스턴스 재시작 없이 재시작 할 때마다 set
                # 게임이 정상적으로 재시작하고 있다는 의미
                alive_event = mp.Event()
                # req_kill_event:
                # 게임 프로세스 내부에서 외부에 재시작을 요청할 때 set
                # n_games 만큼 게임을 플레이 한 뒤에 set해서 
                # 게임 프로세스 내부에 문제로 인해 프로세스 종료가 안되더라도, 
                # 외부에서 강제로 종료할 수 있도록 event 전달
                req_kill_event = mp.Event()
                # 
                exc_queue = mp.Queue()

                def play_game(hostname, address, n_games, alive_event, req_kill_event, exc_queue):
                    # n_games:
                    # 게임 인스턴스를 한번 실행해서 연속으로 몇 번이나 게임을 실행할 것인가?
                    # - 너무 작으면, 게임 인스턴스를 자주 재시작해야하기 때문에 속도가 느려짐
                    # - 너무 크면, 예외 상황이 발생할 가능성이 높음               

                    # sync_event:
                    # host (게임 서버)와 join (클라이언트) 사이의 동기를 맞추기 위해 사용
                    # host가 learner에게 전달받은 대로 게임 세팅 설정한 뒤에 set을 해서,
                    # join이 다음 단계를 진행할 수 있도록 함
                    sync_event = asyncio.Event()
                    portconfig = Portconfig()

                    context = zmq.Context()
                    sock = context.socket(zmq.REQ)
                    sock.RCVTIMEO = timeout  # zmq 시간제한 설정ㄴ
                    sock.connect(address)

                    # task_dict & players:
                    # 게임 세팅 관련변수, # host와 join이 동일한 reference를 가지고 있어야 함
                    task_dict = dict(step_interval=self.args.step_interval)
                    players = [None, None]

                    asyncio.get_event_loop().run_until_complete(asyncio.gather(
                        Actor._host_game(
                            hostname,
                            sock,
                            task_dict,
                            players,
                            sync_event,
                            alive_event,
                            req_kill_event,
                            exc_queue,
                            n_games=n_games,
                            realtime=False, 
                            portconfig=portconfig
                        ),
                        Actor._join_game(
                            task_dict,
                            players,
                            sync_event,
                            alive_event,
                            req_kill_event,
                            exc_queue,
                            n_games=n_games,
                            realtime=False, 
                            portconfig=portconfig
                        )
                    ))

                if self.args.game_timeout < 0:
                    # 테스트 용:
                    # play_game 기능 테스트할 때, 최대 게임시간을 음수로 설정하면,
                    # play_game 함수 직접 실행
                    play_game(hostname, address, n_games, alive_event, req_kill_event, exc_queue)

                else:
                    # 일반적인 상황에서는 play_game을 자식 프로세스로 실행
                    # 자식 프로세스 내부에서 종료를 요청(req_kill_event)하거나,
                    # 현재 프로세스에서 제한시간마다 게임이 새로 시작하는지 검사해서,
                    # 게임이 새로 시작하지 않으면(alive_event), 자식프로세스 재시작
                    game_play_proc = mp.Process(
                        target=play_game, 
                        args=(hostname, address, n_games, alive_event, req_kill_event, exc_queue),
                        daemon=False,
                    )
                    game_play_proc.start()
                    
                    running = True
                    checkpoint = time.monotonic()
                    while running:
                        # 게임 프로세스(자식 프로세스)가 정상적으로 작동 중인지 확인
                        if req_kill_event.is_set():
                            # 게임 프로세스에서 종료 요청
                            running = False

                        if time.monotonic() - checkpoint > self.args.game_timeout:
                            if alive_event.is_set():
                                # 제한 시간 이전에 게임 프로세스 내부에서 게임 재시작 확인
                                checkpoint = time.monotonic()
                                alive_event.clear()
                            else:
                                running = False

                        while exc_queue.qsize() > 0:
                            # 자식 프로세스에서 발행한 에러 메시지를 Learner에 전달
                            self.log_exception(hostname, error_sock, exc_queue.get())

                        time.sleep(1)

                    # 게임 프로세스 종료 시도 - sig.TERM                    
                    if game_play_proc.is_alive():
                        for _ in range(3):
                            game_play_proc.terminate()
                            time.sleep(0.5)
                            if not game_play_proc.is_alive():
                                break
                    
                    # 게임 프로세스 종료 시도 - sig.KILL
                    if game_play_proc.is_alive():
                        game_play_proc.kill()
                        game_play_proc.close()    

                    # 게임 프로세스 종료 시도 - psutil
                    if game_play_proc.is_alive():  
                        kill_children_processes(game_play_proc.pid, including_parent=True)                

            except Exception as exc:
                import traceback
                traceback.print_exc()
                self.log_exception(hostname, error_sock, traceback.format_exc())
                if self.args.halt_at_exception:
                    # 테스트용: 예외가 발생했을 때 멈춰 있도록 하기 위한 용도
                    embed()

                try:
                    kill_children_processes(including_parent=False)
                except OSError:
                    # 테스트용: 예외가 발생했을 때 멈춰 있도록 하기 위한 용도
                    # 매우 드물게 OSError가 발생할 수 있는데, 원인은 불확실 함
                    traceback.print_exc()
                    embed()
                    pass
Exemple #10
0
def run(cfg, max_ncalls):
    try:
        cfg['Logging']['system_logger'].info(
            "Repeat After Me dialogue system\n" + "=" * 120)

        sample_sentences = load_sentences(
            cfg['RepeatAfterMe']['sentences_file'])

        vio_commands, vio_child_commands = multiprocessing.Pipe(
        )  # used to send commands to VoipIO
        vio_record, vio_child_record = multiprocessing.Pipe(
        )  # I read from this connection recorded audio
        vio_play, vio_child_play = multiprocessing.Pipe(
        )  # I write in audio to be played

        vad_commands, vad_child_commands = multiprocessing.Pipe(
        )  # used to send commands to VAD
        vad_audio_out, vad_child_audio_out = multiprocessing.Pipe(
        )  # used to read output audio from VAD

        tts_commands, tts_child_commands = multiprocessing.Pipe(
        )  # used to send commands to TTS
        tts_text_in, tts_child_text_in = multiprocessing.Pipe(
        )  # used to send TTS text

        command_connections = [vio_commands, vad_commands, tts_commands]

        non_command_connections = [
            vio_record, vio_child_record, vio_play, vio_child_play,
            vad_audio_out, vad_child_audio_out, tts_text_in, tts_child_text_in
        ]

        close_event = multiprocessing.Event()

        vio = VoipIO(cfg, vio_child_commands, vio_child_record, vio_child_play,
                     close_event)
        vad = VAD(cfg, vad_child_commands, vio_record, vad_child_audio_out,
                  close_event)
        tts = TTS(cfg, tts_child_commands, tts_child_text_in, vio_play,
                  close_event)

        vio.start()
        vad.start()
        tts.start()

        cfg['Logging']['session_logger'].set_close_event(close_event)
        cfg['Logging']['session_logger'].set_cfg(cfg)
        cfg['Logging']['session_logger'].start()

        # init the system
        call_connected = False
        call_start = 0
        count_intro = 0
        intro_played = False
        reject_played = False
        intro_id = 0
        last_intro_id = -1
        end_played = False
        s_voice_activity = False
        s_last_voice_activity_time = 0
        u_voice_activity = False
        u_last_voice_activity_time = 0
        ncalls = 0

        db = load_database(cfg['RepeatAfterMe']['call_db'])

        for remote_uri in db['calls_from_start_end_length']:
            num_all_calls, total_time, last24_num_calls, last24_total_time = get_stats(
                db, remote_uri)

            m = []
            m.append('')
            m.append('=' * 120)
            m.append('Remote SIP URI: %s' % remote_uri)
            m.append('-' * 120)
            m.append('Total calls:             %d' % num_all_calls)
            m.append('Total time (s):          %f' % total_time)
            m.append('Last 24h total calls:    %d' % last24_num_calls)
            m.append('Last 24h total time (s): %f' % last24_total_time)
            m.append('-' * 120)

            current_time = time.time()
            if last24_num_calls > cfg['RepeatAfterMe']['last24_max_num_calls'] or \
                    last24_total_time > cfg['RepeatAfterMe']['last24_max_total_time']:

                # add the remote uri to the black list
                vio_commands.send(
                    Command(
                        'black_list(remote_uri="%s",expire="%d")' %
                        (remote_uri,
                         current_time + cfg['RepeatAfterMe']['blacklist_for']),
                        'HUB', 'VoipIO'))
                m.append('BLACKLISTED')
            else:
                m.append('OK')

            m.append('-' * 120)
            m.append('')
            cfg['Logging']['system_logger'].info('\n'.join(m))

        call_back_time = -1
        call_back_uri = None

        while 1:
            # Check the close event.
            if close_event.is_set():
                print 'Received close event in: %s' % multiprocessing.current_process(
                ).name
                return

            time.sleep(cfg['Hub']['main_loop_sleep_time'])

            if vad_audio_out.poll():
                data_vad = vad_audio_out.recv()

            if call_back_time != -1 and call_back_time < time.time():
                vio_commands.send(
                    Command('make_call(destination="%s")' % call_back_uri,
                            'HUB', 'VoipIO'))
                call_back_time = -1
                call_back_uri = None

            # read all messages
            if vio_commands.poll():
                command = vio_commands.recv()

                if isinstance(command, Command):
                    if command.parsed[
                            '__name__'] == "incoming_call" or command.parsed[
                                '__name__'] == "make_call":
                        cfg['Logging']['system_logger'].session_start(
                            command.parsed['remote_uri'])
                        cfg['Logging']['session_logger'].session_start(
                            cfg['Logging']
                            ['system_logger'].get_session_dir_name())

                        cfg['Logging']['system_logger'].session_system_log(
                            'config = ' + unicode(cfg))
                        cfg['Logging']['system_logger'].info(command)

                        cfg['Logging']['session_logger'].config('config = ' +
                                                                unicode(cfg))
                        cfg['Logging']['session_logger'].header(
                            cfg['Logging']["system_name"],
                            cfg['Logging']["version"])
                        cfg['Logging']['session_logger'].input_source("voip")

                    if command.parsed['__name__'] == "rejected_call":
                        cfg['Logging']['system_logger'].info(command)

                        call_back_time = time.time() + cfg['RepeatAfterMe'][
                            'wait_time_before_calling_back']
                        # call back a default uri, if not defined call back the caller
                        if ('call_back_uri_subs' in cfg['RepeatAfterMe']
                            ) and cfg['RepeatAfterMe']['call_back_uri_subs']:
                            ru = command.parsed['remote_uri']
                            for pat, repl in cfg['RepeatAfterMe'][
                                    'call_back_uri_subs']:
                                ru = re.sub(pat, repl, ru)
                            call_back_uri = ru
                        elif ('call_back_uri' in cfg['RepeatAfterMe']
                              ) and cfg['RepeatAfterMe']['call_back_uri']:
                            call_back_uri = cfg['RepeatAfterMe'][
                                'call_back_uri']
                        else:
                            call_back_uri = command.parsed['remote_uri']

                    if command.parsed[
                            '__name__'] == "rejected_call_from_blacklisted_uri":
                        cfg['Logging']['system_logger'].info(command)

                        remote_uri = command.parsed['remote_uri']

                        num_all_calls, total_time, last24_num_calls, last24_total_time = get_stats(
                            db, remote_uri)

                        m = []
                        m.append('')
                        m.append('=' * 120)
                        m.append(
                            'Rejected incoming call from blacklisted URI: %s' %
                            remote_uri)
                        m.append('-' * 120)
                        m.append('Total calls:             %d' % num_all_calls)
                        m.append('Total time (s):          %f' % total_time)
                        m.append('Last 24h total calls:    %d' %
                                 last24_num_calls)
                        m.append('Last 24h total time (s): %f' %
                                 last24_total_time)
                        m.append('=' * 120)
                        m.append('')
                        cfg['Logging']['system_logger'].info('\n'.join(m))

                    if command.parsed['__name__'] == "call_connecting":
                        cfg['Logging']['system_logger'].info(command)

                    if command.parsed['__name__'] == "call_confirmed":
                        cfg['Logging']['system_logger'].info(command)

                        remote_uri = command.parsed['remote_uri']
                        num_all_calls, total_time, last24_num_calls, last24_total_time = get_stats(
                            db, remote_uri)

                        m = []
                        m.append('')
                        m.append('=' * 120)
                        m.append('Incoming call from :     %s' % remote_uri)
                        m.append('-' * 120)
                        m.append('Total calls:             %d' % num_all_calls)
                        m.append('Total time (s):          %f' % total_time)
                        m.append('Last 24h total calls:    %d' %
                                 last24_num_calls)
                        m.append('Last 24h total time (s): %f' %
                                 last24_total_time)
                        m.append('-' * 120)

                        if last24_num_calls > cfg['RepeatAfterMe']['last24_max_num_calls'] or \
                                last24_total_time > cfg['RepeatAfterMe']['last24_max_total_time']:

                            cfg['Logging']['session_logger'].turn("system")
                            tts_commands.send(
                                Command(
                                    'synthesize(text="%s",log="true")' %
                                    cfg['RepeatAfterMe']['rejected'], 'HUB',
                                    'TTS'))
                            call_connected = True
                            reject_played = True
                            s_voice_activity = True
                            vio_commands.send(
                                Command(
                                    'black_list(remote_uri="%s",expire="%d")' %
                                    (remote_uri, time.time() +
                                     cfg['RepeatAfterMe']['blacklist_for']),
                                    'HUB', 'VoipIO'))
                            m.append('CALL REJECTED')
                        else:
                            # init the system
                            call_connected = True
                            call_start = time.time()
                            count_intro = 0
                            intro_played = False
                            reject_played = False
                            end_played = False
                            s_voice_activity = False
                            s_last_voice_activity_time = 0
                            u_voice_activity = False
                            u_last_voice_activity_time = 0

                            intro_id, last_intro_id = play_intro(
                                cfg, tts_commands, intro_id, last_intro_id)

                            m.append('CALL ACCEPTED')

                        m.append('=' * 120)
                        m.append('')
                        cfg['Logging']['system_logger'].info('\n'.join(m))

                        try:
                            db['calls_from_start_end_length'][
                                remote_uri].append([time.time(), 0, 0])
                        except:
                            db['calls_from_start_end_length'][remote_uri] = [
                                [time.time(), 0, 0],
                            ]
                        save_database(cfg['RepeatAfterMe']['call_db'], db)

                    if command.parsed['__name__'] == "call_disconnected":
                        cfg['Logging']['system_logger'].info(command)

                        remote_uri = command.parsed['remote_uri']

                        vio_commands.send(Command('flush()', 'HUB', 'VoipIO'))
                        vad_commands.send(Command('flush()', 'HUB', 'VAD'))
                        tts_commands.send(Command('flush()', 'HUB', 'TTS'))

                        cfg['Logging']['system_logger'].session_end()
                        cfg['Logging']['session_logger'].session_end()

                        try:
                            s, e, l = db['calls_from_start_end_length'][
                                remote_uri][-1]

                            if e == 0 and l == 0:
                                # there is a record about last confirmed but not disconnected call
                                db['calls_from_start_end_length'][remote_uri][
                                    -1] = [s, time.time(),
                                           time.time() - s]
                                save_database('call_db.pckl', db)
                        except KeyError:
                            # disconnecting call which was not confirmed for URI calling for the first time
                            pass

                        intro_played = False
                        call_connected = False
                        ncalls += 1

                    if command.parsed['__name__'] == "play_utterance_start":
                        cfg['Logging']['system_logger'].info(command)
                        s_voice_activity = True

                    if command.parsed['__name__'] == "play_utterance_end":
                        cfg['Logging']['system_logger'].info(command)

                        s_voice_activity = False
                        s_last_voice_activity_time = time.time()

                        if command.parsed['user_id'] == last_intro_id:
                            intro_played = True
                            s_last_voice_activity_time = 0

            if vad_commands.poll():
                command = vad_commands.recv()
                cfg['Logging']['system_logger'].info(command)

                if isinstance(command, Command):
                    if command.parsed['__name__'] == "speech_start":
                        u_voice_activity = True
                    if command.parsed['__name__'] == "speech_end":
                        u_voice_activity = False
                        u_last_voice_activity_time = time.time()

            if tts_commands.poll():
                command = tts_commands.recv()
                cfg['Logging']['system_logger'].info(command)

            current_time = time.time()

            #  print
            #  print intro_played, end_played
            #  print s_voice_activity, u_voice_activity,
            #  print call_start,  current_time, u_last_voice_activity_time, s_last_voice_activity_time
            #  print current_time - s_last_voice_activity_time > 5, u_last_voice_activity_time - s_last_voice_activity_time > 0

            if reject_played == True and s_voice_activity == False:
                # be careful it does not hangup immediately
                reject_played = False
                vio_commands.send(Command('hangup()', 'HUB', 'VoipIO'))
                vio_commands.send(Command('flush()', 'HUB', 'VoipIO'))
                vad_commands.send(Command('flush()', 'HUB', 'VoipIO'))
                tts_commands.send(Command('flush()', 'HUB', 'VoipIO'))

            if intro_played and current_time - call_start > cfg[
                    'RepeatAfterMe'][
                        'max_call_length'] and s_voice_activity == False:
                # too long call
                if not end_played:
                    s_voice_activity = True
                    last_intro_id = str(intro_id)
                    intro_id += 1
                    cfg['Logging']['session_logger'].turn("system")
                    tts_commands.send(
                        Command(
                            'synthesize(text="%s",log="true")' %
                            cfg['RepeatAfterMe']['closing'], 'HUB', 'TTS'))
                    end_played = True
                else:
                    intro_played = False
                    # be careful it does not hangup immediately
                    vio_commands.send(Command('hangup()', 'HUB', 'VoipIO'))
                    vio_commands.send(Command('flush()', 'HUB', 'VoipIO'))
                    vad_commands.send(Command('flush()', 'HUB', 'VAD'))
                    tts_commands.send(Command('flush()', 'HUB', 'TTS'))

            if intro_played and \
                s_voice_activity == False and \
                u_voice_activity == False and \
                current_time - s_last_voice_activity_time > 5 and current_time - u_last_voice_activity_time > 0.6:

                if 'silence' not in cfg['RepeatAfterMe'] or not cfg[
                        'RepeatAfterMe']['silence']:
                    s_voice_activity = True

                    s1 = ram()
                    cfg['Logging']['session_logger'].turn("system")
                    tts_commands.send(
                        Command('synthesize(text="%s",log="true")' % s1, 'HUB',
                                'TTS'))
                    s2 = sample_sentence(sample_sentences)
                    tts_commands.send(
                        Command('synthesize(text="%s",log="true")' % s2, 'HUB',
                                'TTS'))

                    s = s1 + ' ' + s2

                    m = []
                    m.append('=' * 120)
                    m.append('Say: ' + s)
                    m.append('=' * 120)

                    cfg['Logging']['system_logger'].info('\n'.join(m))

            if ncalls != 0 and not call_connected and s_last_voice_activity_time + 5.0 < current_time and ncalls >= max_ncalls:
                break

        # stop processes
        vio_commands.send(Command('stop()', 'HUB', 'VoipIO'))
        vad_commands.send(Command('stop()', 'HUB', 'VAD'))
        tts_commands.send(Command('stop()', 'HUB', 'TTS'))

        # clean connections
        for c in command_connections:
            while c.poll():
                c.recv()

        for c in non_command_connections:
            while c.poll():
                c.recv()

        # wait for processes to stop
        # do not join, because in case of exception the join will not be successful
        #vio.join()
        #system_logger.debug('VIO stopped.')
        #vad.join()
        #system_logger.debug('VAD stopped.')
        #tts.join()
        #system_logger.debug('TTS stopped.')

    except KeyboardInterrupt:
        print 'KeyboardInterrupt exception in: %s' % multiprocessing.current_process(
        ).name
        close_event.set()
        return
    except:
        cfg['Logging']['system_logger'].exception(
            'Uncaught exception in SW_HUB process.')
        close_event.set()
        raise

    print 'RAM HUB: Exiting: %s. Setting close event' % multiprocessing.current_process(
    ).name
    close_event.set()
Exemple #11
0
 def __init__(self, listen_on: Endpoint, experts: Dict[str, ExpertBackend]):
     super().__init__()
     self.listen_on, self.experts = listen_on, experts
     self.ready = mp.Event()
            y1 = int(y1)
            coral_boxes.append((y0, x1, y1, x0))
            face_mini = bgr_img[y0:y1, x0:x1]
            cv2.imwrite(path + "face_" + str(face_cnt) + "_" + str(m) + ".jpg",
                        face_mini)
            print(path + "face_" + str(face_cnt) + "_" + str(m) + ".jpg")
            m += 1
            face_cnt += 1
        sleep(1)
        enc = []
        objsBuffer.put({"boxes": coral_boxes, "encodings": enc})


if __name__ == '__main__':
    multiprocessing.set_start_method('forkserver')
    prog_stop = multiprocessing.Event()
    prog_stop.clear()
    recImage = multiprocessing.Queue(2)
    resultRecogn = multiprocessing.Queue(2)
    camProc = Process(target=camThread,
                      args=(recImage, resultRecogn, prog_stop, 'test.avi'),
                      daemon=True)
    camProc.start()
    frecogn = Process(
        target=recognition,
        args=(recImage, resultRecogn, prog_stop,
              "/home/nano/visi/Jetson-Nano-FaceRecognition/Faces/Stepan/"),
        daemon=True)
    frecogn.start()

    while True:
Exemple #13
0
def ginzi_main(cfg_file, cfg, dirs, subdirs, guiport, mp_loggerqueue):

    setproctitle("gzbx." + os.path.basename(__file__))

    logger = mplogging.setup_logger(mp_loggerqueue, __file__)
    logger.debug(whoami() + "starting ...")

    pwdb = PWDBSender()

    # multiprocessing events
    mp_events = {}
    mp_events["unrarer"] = mp.Event()
    mp_events["verifier"] = mp.Event()

    # threading events
    event_stopped = threading.Event()

    articlequeue = None
    resultqueue = None
    mp_work_queue = mp.Queue()
    renamer_result_queue = mp.Queue()

    # filewrite_lock = mp.Lock()
    mpconnector_lock = threading.Lock()
    filewrite_lock = mp.Lock()

    renamer_parent_pipe, renamer_child_pipe = mp.Pipe()
    unrarer_parent_pipe, unrarer_child_pipe = mp.Pipe()
    verifier_parent_pipe, verifier_child_pipe = mp.Pipe()
    mpconnector_parent_pipe, mpconnector_child_pipe = mp.Pipe()
    pipes = {
        "renamer": [renamer_parent_pipe, renamer_child_pipe],
        "unrarer": [unrarer_parent_pipe, unrarer_child_pipe],
        "verifier": [verifier_parent_pipe, verifier_child_pipe],
        "mpconnector":
        [mpconnector_parent_pipe, mpconnector_child_pipe, mpconnector_lock]
    }

    # load server ts from file
    try:
        server_ts0 = pickle.load(open(dirs["main"] + "ginzibix.ts", "rb"))
    except Exception:
        server_ts0 = {}
    config_servers = get_configured_servers(cfg)
    config_servers.append("-ALL SERVERS-")
    server_ts = {
        key: server_ts0[key]
        for key in server_ts0 if key in config_servers
    }
    del server_ts0

    ct = None

    # update delay
    try:
        update_delay = float(cfg["OPTIONS"]["UPDATE_DELAY"])
    except Exception as e:
        logger.warning(whoami() + str(e) +
                       ", setting update_delay to default 0.5")
        update_delay = 0.5

    # init tcp with gtkgui.py
    context = zmq.Context()
    socket = context.socket(zmq.REP)
    socket.bind("tcp://*:" + str(guiport))
    socket.setsockopt(zmq.RCVTIMEO, int(update_delay * 1000))

    # init sighandler
    logger.debug(whoami() + "initializing sighandler")
    mpp = {
        "nzbparser": None,
        "decoder": None,
        "unrarer": None,
        "renamer": None,
        "verifier": None,
        "post": None,
        "mpconnector": None
    }
    sh = SigHandler_Main(event_stopped, logger)
    signal.signal(signal.SIGINT, sh.sighandler)
    signal.signal(signal.SIGTERM, sh.sighandler)

    # start nzb parser mpp
    logger.info(whoami() + "starting nzbparser process ...")
    mpp_nzbparser = mp.Process(target=nzb_parser.ParseNZB,
                               args=(
                                   cfg,
                                   dirs,
                                   filewrite_lock,
                                   mp_loggerqueue,
                               ))
    mpp_nzbparser.start()
    mpp["nzbparser"] = mpp_nzbparser

    # start renamer
    logger.info(whoami() + "starting renamer process ...")
    mpp_renamer = mp.Process(target=renamer.renamer,
                             args=(
                                 renamer_child_pipe,
                                 renamer_result_queue,
                                 mp_loggerqueue,
                                 filewrite_lock,
                             ))
    mpp_renamer.start()
    mpp["renamer"] = mpp_renamer

    # start mpconnector
    logger.info(whoami() + "starting mpconnector process ...")
    mpp_connector = mp.Process(target=mpconnections.mpconnector,
                               args=(
                                   mpconnector_child_pipe,
                                   cfg,
                                   server_ts,
                                   mp_loggerqueue,
                               ))
    mpp_connector.start()
    mpp["mpconnector"] = mpp_connector

    dl = None
    nzbname = None
    pwdb.exc("db_nzb_set_current_nzbobj", [nzbname], {})
    paused = False
    article_health = 0

    dl_running = True
    applied_datarec = None

    # reload nzb lists for gui
    pwdb.exc("store_sorted_nzbs", [], {})

    DEBUGPRINT = False

    # main looooooooooooooooooooooooooooooooooooooooooooooooooooop
    try:
        while not event_stopped.is_set():
            # set connection health
            if dl:
                stat0 = pwdb.exc("db_nzb_getstatus", [nzbname], {})
                if stat0 == 2:
                    statusmsg = "downloading"
                elif stat0 == 3:
                    statusmsg = "postprocessing"
                elif stat0 == 4:
                    statusmsg = "success"
                elif stat0 == -4:
                    statusmsg = "failed"
                # send data to gui
            else:
                article_health = 0
                statusmsg = ""

            msg = None
            datarec = None
            try:
                msg, datarec = socket.recv_pyobj()
            except zmq.ZMQError as e:
                if e.errno == zmq.EAGAIN:
                    msg = None
                    pass
            except Exception as e:
                logger.error(whoami() + str(e))
                try:
                    socket.send_pyobj(("NOOK", None))
                except Exception as e:
                    logger.error(whoami() + str(e))
            if msg:
                if DEBUGPRINT:
                    print("-" * 10, "received", msg)
            if msg == "REQ":
                try:
                    if DEBUGPRINT:
                        print(">>>> #0 main:", time.time(), msg)
                    serverconfig = do_mpconnections(pipes, "get_server_config",
                                                    None)
                    try:
                        update_server_ts(server_ts, ct, pipes)
                    except Exception as e:
                        logger.warning(whoami() + str(e))
                    if dl:
                        dl_results = dl.results
                    else:
                        dl_results = None
                    getdata = None
                    downloaddata_gc = None
                    if dl_results:
                        nzbname, downloaddata, _, _ = dl_results
                        if DEBUGPRINT:
                            print(">>>> #0a main:", time.time(), msg)
                        bytescount0, allbytesdownloaded0, availmem0, avgmiblist, filetypecounter, _, article_health,\
                            overall_size, already_downloaded_size, p2, overall_size_wparvol, allfileslist = downloaddata
                        gb_downloaded = dl.allbytesdownloaded0 / GB_DIVISOR
                        if DEBUGPRINT:
                            print(">>>> #0b main:", time.time(), msg)
                        downloaddata_gc = bytescount0, availmem0, avgmiblist, filetypecounter, nzbname, article_health,\
                            overall_size, already_downloaded_size
                        if DEBUGPRINT:
                            print(">>>> #4 main:", time.time(), msg)
                        getdata = downloaddata_gc, serverconfig, dl_running, statusmsg,\
                            article_health, dl.serverhealth(), gb_downloaded, server_ts
                    else:
                        downloaddata_gc = None, None, None, None, None, None, None, None
                        getdata = downloaddata_gc, serverconfig, dl_running, statusmsg, \
                            0, 0, 0, server_ts
                        # if one element in getdata != None - send:
                    if getdata.count(None) != len(
                            getdata) or downloaddata_gc.count(None) != len(
                                downloaddata_gc):
                        sendtuple = ("DL_DATA", getdata)
                    else:
                        sendtuple = ("NOOK", None)
                except Exception as e:
                    logger.error(whoami() + str(e))
                    sendtuple = ("NOOK", None)
                try:
                    socket.send_pyobj(sendtuple)
                except Exception as e:
                    logger.error(whoami() + str(e))
                    if DEBUGPRINT:
                        print(str(e))
            elif msg == "NZB_ADDED":
                for nzb0 in datarec:
                    try:
                        shutil.copy(nzb0, dirs["nzb"])
                        socket.send_pyobj(("NZB_ADDED_OK", None))
                    except Exception as e:
                        logger.error(whoami() + str(e))
                logger.info(whoami() + "copied new nzb files into nzb_dir")
            elif msg == "SET_CLOSEALL":
                try:
                    socket.send_pyobj(("SET_CLOSE_OK", None))
                    applied_datarec = datarec
                    event_stopped.set()
                    continue
                except Exception as e:
                    logger.error(whoami() + str(e))
            elif msg == "SET_PAUSE":  # pause downloads
                try:
                    if not paused:
                        paused = True
                        if nzbname:
                            logger.info(whoami() + "download paused for NZB " +
                                        nzbname)
                        else:
                            logger.info(whoami() + "download paused!")
                        do_mpconnections(pipes, "pause", None)
                        if dl:
                            dl.pause()
                        postprocessor.postproc_pause()
                    socket.send_pyobj(("SET_PAUSE_OK", None))
                    dl_running = False
                except Exception as e:
                    logger.error(whoami() + str(e))
            elif msg == "SET_RESUME":  # resume downloads
                try:
                    if paused:
                        logger.info(whoami() + "download resumed for NZB " +
                                    nzbname)
                        paused = False
                        do_mpconnections(pipes, "resume", None)
                        if dl:
                            dl.resume()
                        postprocessor.postproc_resume()
                    socket.send_pyobj(("SET_RESUME_OK", None))
                    dl_running = True
                except Exception as e:
                    logger.error(whoami() + str(e))
                continue
            elif msg == "REPROCESS_FROM_LAST":
                try:
                    for reprocessed_nzb in datarec:
                        reproc_stat0 = pwdb.exc("db_nzb_getstatus",
                                                [reprocessed_nzb], {})
                        if reproc_stat0:
                            nzbdirname = re.sub(r"[.]nzb$",
                                                "",
                                                reprocessed_nzb,
                                                flags=re.IGNORECASE) + "/"
                            incompletedir = dirs["incomplete"] + nzbdirname
                            # status -1, -2, 4: restart from 0
                            if reproc_stat0 in [-1, -2, 4]:
                                pwdb.exc("db_nzb_delete", [reprocessed_nzb],
                                         {})
                                remove_nzbdirs([reprocessed_nzb],
                                               dirs,
                                               pwdb,
                                               logger,
                                               removenzbfile=False)
                                update_fmodtime_nzbfiles(
                                    [reprocessed_nzb], dirs,
                                    logger)  # trigger nzbparser.py
                                logger.debug(whoami() + reprocessed_nzb +
                                             ": status " + str(reproc_stat0) +
                                             ", restart from 0")
                            # status -4/-3 (postproc. failed/interrupted): re-postprocess
                            elif reproc_stat0 in [-4, -3]:
                                if reproc_stat0 == -4:
                                    pwdb.exc("db_nzb_undo_postprocess",
                                             [reprocessed_nzb], {})
                                    clear_postproc_dirs(reprocessed_nzb, dirs)
                                #  if incompletedir: -> postprocess again
                                if os.path.isdir(incompletedir):
                                    pwdb.exc("nzb_prio_insert_second",
                                             [reprocessed_nzb, 3], {})
                                    logger.debug(
                                        whoami() + reprocessed_nzb +
                                        ": status -4/-3 w/ dir, restart from 3"
                                    )
                                # else restart overall
                                else:
                                    pwdb.exc("db_nzb_delete",
                                             [reprocessed_nzb], {})
                                    remove_nzbdirs([reprocessed_nzb],
                                                   dirs,
                                                   pwdb,
                                                   logger,
                                                   removenzbfile=False)
                                    update_fmodtime_nzbfiles([reprocessed_nzb],
                                                             dirs, logger)
                                    logger.debug(
                                        whoami() + reprocessed_nzb +
                                        ": status -4/-3 w/o dir, restart from 0"
                                    )
                            # else: undefined
                            else:
                                logger.debug(whoami() + reprocessed_nzb +
                                             ": status " + str(reproc_stat0) +
                                             ", no action!")
                    pwdb.exc("store_sorted_nzbs", [], {})
                    socket.send_pyobj(("REPROCESS_FROM_START_OK", None))
                except Exception as e:
                    logger.error(whoami() + str(e))
            elif msg in ["DELETED_FROM_HISTORY", "REPROCESS_FROM_START"]:
                try:
                    for removed_nzb in datarec:
                        pwdb.exc("db_nzb_delete", [removed_nzb], {})
                    pwdb.exc("store_sorted_nzbs", [], {})
                    if msg == "DELETED_FROM_HISTORY":
                        remove_nzbdirs(datarec, dirs, pwdb, logger)
                        socket.send_pyobj(("DELETED_FROM_HISTORY_OK", None))
                        logger.info(whoami() +
                                    "NZBs have been deleted from history")
                    else:
                        remove_nzbdirs(datarec,
                                       dirs,
                                       pwdb,
                                       logger,
                                       removenzbfile=False)
                        update_fmodtime_nzbfiles(
                            datarec, dirs, logger)  # trigger nzbparser.py
                        socket.send_pyobj(("REPROCESS_FROM_START_OK", None))
                        logger.info(whoami() +
                                    "NZBs will be reprocessed from start")
                except Exception as e:
                    logger.error(whoami() + str(e))
            elif msg == "SET_NZB_INTERRUPT":
                logger.info(whoami() +
                            "NZBs have been stopped/moved to history")
                try:
                    first_has_changed, moved_nzbs = pwdb.exc(
                        "move_nzb_list", [datarec],
                        {"move_and_resetprios": False})
                    if moved_nzbs:
                        pwdb.exc(
                            "db_msg_insert",
                            [nzbname, "NZB(s) moved to history", "warning"],
                            {})
                    if first_has_changed:
                        logger.info(whoami() + "first NZB has changed")
                        if dl:
                            clear_download(nzbname,
                                           pwdb,
                                           articlequeue,
                                           resultqueue,
                                           mp_work_queue,
                                           dl,
                                           dirs,
                                           pipes,
                                           mpp,
                                           ct,
                                           logger,
                                           stopall=False)
                            dl.stop()
                            dl.join()
                        first_has_changed, moved_nzbs = pwdb.exc(
                            "move_nzb_list", [datarec],
                            {"move_and_resetprios": True})
                        nzbname = None
                        pwdb.exc("db_nzb_set_current_nzbobj", [nzbname], {})
                        if dl:
                            del dl
                            dl = None
                    else:  # if current nzb didnt change just update, but do not restart
                        first_has_changed, moved_nzbs = pwdb.exc(
                            "move_nzb_list", [datarec],
                            {"move_and_resetprios": True})
                    pwdb.exc("store_sorted_nzbs", [], {})
                    socket.send_pyobj(("SET_INTERRUPT_OK", None))
                except Exception as e:
                    logger.error(whoami() + str(e))
            elif msg == "SET_NZB_ORDER":
                logger.info(whoami() + "NZBs have been reordered/deleted")
                try:
                    # just get info if first has changed etc.
                    first_has_changed, deleted_nzbs = pwdb.exc(
                        "reorder_nzb_list", [datarec],
                        {"delete_and_resetprios": False})
                    if deleted_nzbs:
                        pwdb.exc("db_msg_insert",
                                 [nzbname, "NZB(s) deleted", "warning"], {})
                    if first_has_changed:
                        logger.info(whoami() + "first NZB has changed")
                        if dl:
                            clear_download(nzbname,
                                           pwdb,
                                           articlequeue,
                                           resultqueue,
                                           mp_work_queue,
                                           dl,
                                           dirs,
                                           pipes,
                                           mpp,
                                           ct,
                                           logger,
                                           stopall=False)
                            dl.stop()
                            dl.join()
                        first_has_changed, deleted_nzbs = pwdb.exc(
                            "reorder_nzb_list", [datarec],
                            {"delete_and_resetprios": True})
                        remove_nzbdirs(deleted_nzbs, dirs, pwdb, logger)
                        nzbname = None
                        pwdb.exc("db_nzb_set_current_nzbobj", [nzbname], {})
                        if dl:
                            del dl
                            dl = None
                    else:  # if current nzb didnt change just update, but do not restart
                        first_has_changed, deleted_nzbs = pwdb.exc(
                            "reorder_nzb_list", [datarec],
                            {"delete_and_resetprios": True})
                        remove_nzbdirs(deleted_nzbs, dirs, pwdb, logger)
                    pwdb.exc("store_sorted_nzbs", [], {})
                    # release gtkgui from block
                    socket.send_pyobj(("SET_DELETE_REORDER_OK", None))
                except Exception as e:
                    logger.error(whoami() + str(e))
            elif msg:
                try:
                    socket.send_pyobj(("NOOK", None))
                except Exception as e:
                    if DEBUGPRINT:
                        print(str(e))
                    logger.debug(whoami() + str(e) + ", received msg: " +
                                 str(msg))
                continue

            # if not downloading
            if not dl:
                nzbname = make_allfilelist_wait(pwdb, dirs, logger, -1)
                if nzbname:
                    pwdb.exc("db_nzb_set_current_nzbobj", [nzbname], {})
                    do_mpconnections(pipes, "reset_timestamps_bdl", None)
                    logger.info(whoami() + "got next NZB: " + str(nzbname))
                    dl = downloader.Downloader(cfg, dirs, ct, mp_work_queue,
                                               articlequeue, resultqueue, mpp,
                                               pipes, renamer_result_queue,
                                               mp_events, nzbname,
                                               mp_loggerqueue, filewrite_lock,
                                               logger)
                    # if status postprocessing, don't start threads!
                    if pwdb.exc("db_nzb_getstatus", [nzbname],
                                {}) in [0, 1, 2]:
                        if not paused:
                            do_mpconnections(pipes, "resume", None)
                        if paused:
                            dl.pause()
                        dl.start()
            else:
                stat0 = pwdb.exc("db_nzb_getstatus", [nzbname], {})
                # if postproc ok
                if stat0 == 4:
                    logger.info(whoami() + "postprocessor success for NZB " +
                                nzbname)
                    do_mpconnections(pipes, "pause", None)
                    clear_download(nzbname,
                                   pwdb,
                                   articlequeue,
                                   resultqueue,
                                   mp_work_queue,
                                   dl,
                                   dirs,
                                   pipes,
                                   mpp,
                                   ct,
                                   logger,
                                   stopall=False)

                    dl.stop()
                    dl.join()
                    if mpp_is_alive(mpp, "post"):
                        mpp["post"].join()
                        mpp["post"] = None
                    pwdb.exc("db_msg_insert", [
                        nzbname, "downloaded and postprocessed successfully!",
                        "success"
                    ], {})
                    # set 'flags' for getting next nzb
                    del dl
                    dl = None
                    nzbname = None
                    pwdb.exc("db_nzb_set_current_nzbobj", [nzbname], {})
                    pwdb.exc("store_sorted_nzbs", [], {})
                    pwdb.exc("db_save_to_file", [], {})
                # if download ok -> postprocess
                elif stat0 == 3 and not mpp_is_alive(mpp, "post"):
                    article_health = 0
                    logger.info(whoami() +
                                "download success, postprocessing NZB " +
                                nzbname)
                    pwdb.exc("db_msg_insert", [
                        nzbname, "downloaded ok, starting postprocess",
                        "success"
                    ], {})
                    mpp_post = mp.Process(target=postprocessor.postprocess_nzb,
                                          args=(
                                              nzbname,
                                              articlequeue,
                                              resultqueue,
                                              mp_work_queue,
                                              pipes,
                                              mpp,
                                              mp_events,
                                              cfg,
                                              dl.verifiedrar_dir,
                                              dl.unpack_dir,
                                              dl.nzbdir,
                                              dl.rename_dir,
                                              dl.main_dir,
                                              dl.download_dir,
                                              dl.dirs,
                                              dl.pw_file,
                                              mp_loggerqueue,
                                          ))
                    mpp_post.start()
                    mpp["post"] = mpp_post
                # if download failed
                elif stat0 == -2:
                    logger.info(whoami() + "download failed for NZB " +
                                nzbname)
                    do_mpconnections(pipes, "pause", None)
                    clear_download(nzbname,
                                   pwdb,
                                   articlequeue,
                                   resultqueue,
                                   mp_work_queue,
                                   dl,
                                   dirs,
                                   pipes,
                                   mpp,
                                   ct,
                                   logger,
                                   stopall=False,
                                   onlyarticlequeue=False)
                    dl.stop()
                    dl.join()
                    pwdb.exc("db_msg_insert",
                             [nzbname, "downloaded failed!", "error"], {})
                    # set 'flags' for getting next nzb
                    del dl
                    dl = None
                    nzbname = None
                    pwdb.exc("db_nzb_set_current_nzbobj", [nzbname], {})
                    pwdb.exc("store_sorted_nzbs", [], {})
                    pwdb.exc("db_save_to_file", [], {})
                # if postproc failed
                elif stat0 == -4:
                    logger.error(whoami() + "postprocessor failed for NZB " +
                                 nzbname)
                    do_mpconnections(pipes, "pause", None)
                    clear_download(nzbname,
                                   pwdb,
                                   articlequeue,
                                   resultqueue,
                                   mp_work_queue,
                                   dl,
                                   dirs,
                                   pipes,
                                   mpp,
                                   ct,
                                   logger,
                                   stopall=False,
                                   onlyarticlequeue=False)
                    dl.stop()
                    dl.join()
                    if mpp_is_alive(mpp, "post"):
                        mpp["post"].join()
                    pwdb.exc("db_msg_insert", [
                        nzbname, "downloaded and/or postprocessing failed!",
                        "error"
                    ], {})
                    mpp["post"] = None
                    # set 'flags' for getting next nzb
                    del dl
                    dl = None
                    nzbname = None
                    pwdb.exc("db_nzb_set_current_nzbobj", [nzbname], {})
                    pwdb.exc("store_sorted_nzbs", [], {})
                    pwdb.exc("db_save_to_file", [], {})
    except Exception as e:
        if DEBUGPRINT:
            print(str(e))
        else:
            pass
    # shutdown
    logger.info(whoami() + "closeall: starting shutdown sequence")
    do_mpconnections(pipes, "pause", None)
    logger.debug(whoami() + "closeall: connection threads paused")
    if dl:
        dl.stop()
        dl.join()
    logger.debug(whoami() + "closeall: downloader joined")
    try:
        clear_download(nzbname,
                       pwdb,
                       articlequeue,
                       resultqueue,
                       mp_work_queue,
                       dl,
                       dirs,
                       pipes,
                       mpp,
                       ct,
                       logger,
                       stopall=True,
                       onlyarticlequeue=False)
    except Exception as e:
        logger.error(whoami() + str(e) + ": closeall error!")
    dl = None
    nzbname = None
    pwdb.exc("db_nzb_set_current_nzbobj", [nzbname], {})
    logger.debug(whoami() + "closeall: closing gtkgui-socket")
    try:
        socket.close()
        context.term()
    except Exception as e:
        logger.warning(whoami() + str(e))
    logger.debug(whoami() + "closeall: all cleared")
    # save pandas time series
    try:
        pickle.dump(server_ts, open(dirs["main"] + "ginzibix.ts", "wb"))
        logger.info(whoami() + "closeall: saved downloaded-timeseries to file")
    except Exception as e:
        logger.warning(whoami() + str(e) +
                       ": closeall: error in saving download-timeseries")
    # if restart because of settings applied in gui -> write cfg to file
    if applied_datarec:
        try:
            with open(cfg_file, 'w') as configfile:
                applied_datarec.write(configfile)
            logger.debug(whoami() + "changed config file written!")
        except Exception as e:
            logger.error(whoami() + str(e) +
                         ": cannot write changed config file!")
    logger.info(whoami() + "exited!")
    sys.exit(0)
Exemple #14
0
    def test_garbage_file_collection(self):
        event = multiprocessing.Event()
        event.clear()

        self.codechecker_cfg['viewer_port'] = env.get_free_port()
        env.export_test_cfg(self.test_workspace,
                            {'codechecker_cfg': self.codechecker_cfg})

        env.enable_auth(self.test_workspace)

        server_access = codechecker.start_server(self.codechecker_cfg, event)
        server_access['viewer_port'] \
            = self.codechecker_cfg['viewer_port']
        server_access['viewer_product'] \
            = self.codechecker_cfg['viewer_product']

        codechecker.add_test_package_product(server_access,
                                             self.test_workspace)

        self._cc_client = env.setup_viewer_client(self.test_workspace)
        self.assertIsNotNone(self._cc_client)

        self.__create_test_dir()
        files_in_report_before = self.__get_files_in_report()

        # Checker severity levels.
        self.__check_serverity_of_reports()

        self.__rename_project_dir()

        # Delete previous analysis report directory.
        rmtree(self.codechecker_cfg['reportdir'])

        files_in_report_after = self.__get_files_in_report()

        event.set()

        event.clear()

        # Change severity level of core.DivideZero to LOW.
        with io.open(self.workspace_severity_cfg, 'r+') as severity_cgf_file:
            severity_map = json.load(severity_cgf_file)
            severity_map['core.DivideZero'] = 'LOW'

            severity_cgf_file.seek(0)
            severity_cgf_file.truncate()
            severity_cgf_file.write(unicode(json.dumps(severity_map)))

        self.codechecker_cfg['viewer_port'] = env.get_free_port()
        env.export_test_cfg(self.test_workspace,
                            {'codechecker_cfg': self.codechecker_cfg})

        codechecker.start_server(self.codechecker_cfg, event)
        codechecker.login(self.codechecker_cfg, self.test_workspace, 'cc',
                          'test')

        self._cc_client = env.setup_viewer_client(self.test_workspace)
        self.assertIsNotNone(self._cc_client)

        self.assertEqual(len(files_in_report_before & files_in_report_after),
                         0)

        for file_id in files_in_report_before:
            f = self._cc_client.getSourceFileData(file_id, False, None)
            self.assertIsNone(f.fileId)

        # Checker severity levels.
        self.__check_serverity_of_reports()

        event.set()
Exemple #15
0
def spawn_import_clients(options, files_info):
    # Spawn one reader process for each db.table, as well as many client processes
    task_queue = multiprocessing.queues.SimpleQueue()
    error_queue = multiprocessing.queues.SimpleQueue()
    exit_event = multiprocessing.Event()
    interrupt_event = multiprocessing.Event()
    errors = []
    reader_procs = []
    client_procs = []

    parent_pid = os.getpid()
    signal.signal(
        signal.SIGINT,
        lambda a, b: abort_import(a, b, parent_pid, exit_event, task_queue,
                                  client_procs, interrupt_event))

    try:
        progress_info = []

        for i in range(options["clients"]):
            client_procs.append(
                multiprocessing.Process(target=client_process,
                                        args=(options["host"], options["port"],
                                              options["auth_key"], task_queue,
                                              error_queue, options["force"])))
            client_procs[-1].start()

        for file_info in files_info:
            progress_info.append((multiprocessing.Value(ctypes.c_longlong, -1),
                                  multiprocessing.Value(ctypes.c_longlong, 0)))
            reader_procs.append(
                multiprocessing.Process(target=table_reader,
                                        args=(options, file_info, task_queue,
                                              error_queue, progress_info[-1],
                                              exit_event)))
            reader_procs[-1].start()

        # Wait for all reader processes to finish - hooray, polling
        while len(reader_procs) > 0:
            time.sleep(0.1)
            # If an error has occurred, exit out early
            if not error_queue.empty():
                exit_event.set()
            reader_procs = [proc for proc in reader_procs if proc.is_alive()]
            update_progress(progress_info)

        # Wait for all clients to finish
        for client in client_procs:
            if client.is_alive():
                task_queue.put("exit")

        while len(client_procs) > 0:
            time.sleep(0.1)
            client_procs = [
                client for client in client_procs if client.is_alive()
            ]

        # If we were successful, make sure 100% progress is reported
        if error_queue.empty() and not interrupt_event.is_set():
            print_progress(1.0)

        # Continue past the progress output line
        print ""
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if not task_queue.empty():
        error_queue.put(
            (RuntimeError,
             RuntimeError("Error: Items remaining in the task queue"), None))

    if not error_queue.empty():
        # multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
        while not error_queue.empty():
            error = error_queue.get()
            print >> sys.stderr, "Traceback: %s" % (error[2])
            print >> sys.stderr, "%s: %s" % (error[0].__name__, error[1])
            if len(error) == 4:
                print >> sys.stderr, "In file: %s" % (error[3])
        raise RuntimeError("Errors occurred during import")
Exemple #16
0
from applib.coupon_lib import CouponManager
from applib.wx_lib import ItchatManager
#-#from applib.db_lib import HistoryDB
#-#from applib.db_lib import Item
from applib.orm_lib import HistoryDB
from applib.tools_lib import htmlentitydecode
from applib.tools_lib import pcformat
from applib.log_lib import app_log

import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('connectionpool')
logger.setLevel(logging.INFO)

info, debug, warn, error = app_log.info, app_log.debug, app_log.warning, app_log.error
event_notify = multiprocessing.Event()
event_exit = asyncio.Event()

premovetag = re.compile('(<.*?>)', re.M | re.S)
#-#exclude_first_div_tag = re.compile(r'\A<div.*?>(.*?)</div>\Z', re.M | re.S)
#-#exclude_first_td_tag = re.compile(r'\A<td.*?>\s*(.*?)\s*</td>\Z', re.M | re.S)
#-#exclude_first_comment_tag = re.compile(r'\A<!-- .*?-->(.*?)<!-- .*?-->\Z', re.M | re.S)
#-#exclude_first_a_tag = re.compile(r'\A<a.*?>\s*(.*?)\s*</a>\Z', re.M | re.S)


async def signal_handler(sig):
    if sig == signal.SIGINT:
        warn('got Ctrl+C')
        if not event_exit.is_set():
            event_exit.set()
async def test_no_message_lost_during_kill(
    slow_process_executor_image,
    logger,
    k8s_cluster,
    load_images_in_kind,
    set_test_pip_version,
):
    flow = Flow(
        name='test-flow-slow-process-executor',
        infrastructure='K8S',
        timeout_ready=120000,
        k8s_namespace='test-flow-slow-process-executor-ns',
    ).add(
        name='slow_process_executor',
        uses=slow_process_executor_image,
        timeout_ready=360000,
        replicas=3,
        grpc_data_requests=True,
    )

    with flow:
        with kubernetes_tools.get_port_forward_contextmanager(
                'test-flow-slow-process-executor', flow.port_expose):
            client_kwargs = dict(
                host='localhost',
                port=flow.port_expose,
            )
            client_kwargs.update(flow._common_kwargs)

            stop_event = multiprocessing.Event()
            scale_event = multiprocessing.Event()
            received_resposes = multiprocessing.Queue()
            process = Process(
                target=send_requests,
                kwargs={
                    'client_kwargs': client_kwargs,
                    'stop_event': stop_event,
                    'scale_event': scale_event,
                    'received_resposes': received_resposes,
                    'logger': logger,
                },
                daemon=True,
            )
            process.start()

            time.sleep(1.0)

            # scale slow init executor up
            k8s_clients = K8sClients()
            logger.debug('Kill 2 replicas')

            pods = k8s_clients.core_v1.list_namespaced_pod(
                namespace='test-flow-slow-process-executor',
                label_selector=f'app=slow-process-executor',
            )

            names = [item.metadata.name for item in pods.items]
            k8s_clients.core_v1.delete_namespaced_pod(
                names[0], namespace='test-flow-slow-process-executor')
            k8s_clients.core_v1.delete_namespaced_pod(
                names[1], namespace='test-flow-slow-process-executor')

            scale_event.set()

            # wait for replicas to be dead
            while True:
                pods = k8s_clients.core_v1.list_namespaced_pod(
                    namespace='test-flow-slow-process-executor',
                    label_selector=f'app=slow-process-executor',
                )
                current_pod_names = [item.metadata.name for item in pods.items]
                if (names[0] not in current_pod_names
                        and names[1] not in current_pod_names):
                    logger.debug('Killing pods complete')
                    time.sleep(1.0)
                    stop_event.set()
                    break
                else:
                    logger.debug(
                        f'not dead yet {current_pod_names} waiting for {names[0]} and {names[1]}'
                    )
                time.sleep(1.0)

            process.join()

            responses_list = []
            while received_resposes.qsize():
                responses_list.append(int(received_resposes.get()))

            logger.debug(
                f'Got the following responses {sorted(responses_list)}')
            assert sorted(responses_list) == list(
                range(min(responses_list),
                      max(responses_list) + 1))
Exemple #18
0
        # activate logging system
        fmtstr = '{:<10} {:<13.5f} {:<20} {:<28} {:<16} {:<8} {:<22} {:<#18x} {:<44} {:<6d} {:<#12x} {:<#12x}'
        fields = ['layer', 'ts', 'implementation', 'func', 'comm', 'pid', 'topic_name', 'publisher', 'guid', 'seqnum', 'daddr', 'dport']
        self.log = sofa_ros2_utilities.Log(fields=fields, fmtstr=fmtstr,
                                           cvsfilename=os.path.join(self.cfg.logdir, self.cfg.ros2logdir, 'send_log.csv'),
                                           print_raw=self.is_alive())
        # loop with callback to print_event
        b = self.b
        b["send_rcl"].open_perf_buffer(self.print_rcl)
        b["send_fastrtps"].open_perf_buffer(self.print_fastrtps)
        b["send_cyclonedds"].open_perf_buffer(self.print_cyclonedds)

        while not self.set.is_set():
            try:
                b.perf_buffer_poll(timeout=1000)
            except KeyboardInterrupt:
                break
        self.log.close()
        print("[trace_send] Exit")


if __name__ == "__main__":
    cfg = sofa_config.SOFA_Config()

    cflags = []
    if cfg.ros2_topic_whitelist:
        cflags.append('-DWHITELIST=1')
    b = BPF(src_file='./ebpf_ros2.c', cflags=cflags)

    trace = trace_send(args=({'set': multiprocessing.Event(), 'config': cfg, 'b': b}))
    trace.run()
async def test_no_message_lost_during_scaling(
    slow_process_executor_image,
    logger,
    k8s_cluster,
    load_images_in_kind,
    set_test_pip_version,
):
    flow = Flow(
        name='test-flow-slow-process-executor',
        infrastructure='K8S',
        timeout_ready=120000,
        k8s_namespace='test-flow-slow-process-executor-ns',
    ).add(
        name='slow_process_executor',
        uses=slow_process_executor_image,
        timeout_ready=360000,
        replicas=3,
        grpc_data_requests=True,
    )

    with flow:
        with kubernetes_tools.get_port_forward_contextmanager(
                'test-flow-slow-process-executor', flow.port_expose):
            # sleep as the port forward setup can take some time
            time.sleep(0.1)
            client_kwargs = dict(
                host='localhost',
                port=flow.port_expose,
            )
            client_kwargs.update(flow._common_kwargs)

            stop_event = multiprocessing.Event()
            scale_event = multiprocessing.Event()
            received_resposes = multiprocessing.Queue()
            process = Process(
                target=send_requests,
                kwargs={
                    'client_kwargs': client_kwargs,
                    'stop_event': stop_event,
                    'scale_event': scale_event,
                    'received_resposes': received_resposes,
                    'logger': logger,
                },
                daemon=True,
            )
            process.start()

            time.sleep(1.0)

            # scale slow init executor up
            k8s_clients = K8sClients()
            logger.debug('Scale down executor to 1 replica')
            k8s_clients.apps_v1.patch_namespaced_deployment_scale(
                'slow-process-executor',
                namespace='test-flow-slow-process-executor',
                body={"spec": {
                    "replicas": 1
                }},
            )
            scale_event.set()

            # wait for replicas to be dead
            while True:
                pods = k8s_clients.core_v1.list_namespaced_pod(
                    namespace='test-flow-slow-process-executor',
                    label_selector=f'app=slow-process-executor',
                )
                if len(pods.items) == 1:
                    # still continue for a bit to hit the new replica only
                    logger.debug('Scale down complete')
                    time.sleep(1.0)
                    stop_event.set()
                    break
                await asyncio.sleep(1.0)

            # allow some time for responses to complete
            await asyncio.sleep(10.0)
            # kill the process as the client can hang due to lost responsed
            if process.is_alive():
                process.kill()
            process.join()

            responses_list = []
            while received_resposes.qsize():
                responses_list.append(int(received_resposes.get()))

            logger.debug(
                f'Got the following responses {sorted(responses_list)}')
            assert sorted(responses_list) == list(
                range(min(responses_list),
                      max(responses_list) + 1))
Exemple #20
0
    SCREEN = pygame.display.set_mode(SCREENRECT.size, pygame.RESIZABLE, 32)
    BACKGROUND = pygame.image.load('Assets\\A2.png').convert(32, pygame.RLEACCEL)
    BACKGROUND = pygame.transform.smoothscale(BACKGROUND, SIZE)
    BACKGROUND.set_alpha(None)

    sprite_number = 1000
    sprites = numpy.array([pygame.sprite.Sprite() for r in range(sprite_number)])
    for s in sprites:
        image = pygame.image.load('Assets\\SpaceShip.png').convert(32, pygame.RLEACCEL)
        s.image = pygame.image.tostring(image, 'RGB', False)
        s.rect = image.get_rect()
        s.rect.center = (randint(0, SIZE[0]), randint(0, SIZE[1]))

    PROCESS = 10
    QUEUE = multiprocessing.Queue()
    EVENT = multiprocessing.Event()
    MANAGER = multiprocessing.Manager()
    DATA = MANAGER.dict()

    # SPLIT SPRITES INTO CHUNKS
    chunks = numpy.split(sprites, PROCESS)

    i = 0
    for c in chunks:
        DATA[i] = list(c)
        i += 1
    i = 0
    # for i in range(len(chunks)):
    #     print(i, DATA[i])

    BCK = pygame.image.tostring(BACKGROUND, 'RGB', False)
Exemple #21
0
 def __init__(self, *args, **kwargs):
     super(SignalHandlingProcess, self).__init__(*args, **kwargs)
     self._signal_handled = multiprocessing.Event()
     self._after_fork_methods.append(
         (SignalHandlingProcess._setup_signals, [self], {}))
Exemple #22
0
import subprocess
import multiprocessing
# from scp import SCPClient  # TODO: (Aaron) Need to add this in when switch over to new scp.
from functools import wraps

global_vars = {
    'geppetto_install_dir': None,
    'test_status': '',
    'test_notes': multiprocessing.Queue(),
    'email': None,
    'log_file': None,
    'last_email_time': None,
    'save_dir': None,
    'traceback_count': 0,
    'error_count': 0,
    'pause_reporting': multiprocessing.Event(),
    'pause_level': 'debug',
    'commit_id': '',
    'datos_ip': '',
    'datos_install_dir': '',
    'checkpoints': [],
}


class DummyLock():
    def acquire(self):
        pass

    def release(self):
        pass
Exemple #23
0
            df = pd.read_csv('dane_do_symulacji/data.csv')
            for sample in df['signal']:
                if quit_program.is_set():
                    break
                detect_blinks(sample)
                clock.tick(200)
            print('KONIEC SYGNALU')
            quit_program.set()
        else:
             board = OpenBCIGanglion(mac=mac_adress)
             board.start_stream(detect_blinks)

blink_det = mp.Queue()
blink = mp.Value('i', 0)
blinks_num = mp.Value('i', 0)
connected = mp.Event()
quit_program = mp.Event()

proc_blink_det = mp.Process(
    name='proc_',
    target=blinks_detector,
    args=(quit_program, blink_det, blinks_num, blink,)
    )

# rozpoczee podprocesu
proc_blink_det.start()
print('subprocess started')

########################
###       GAME       ###
########################
Exemple #24
0
def main():
    config = get_config()

    logging.basicConfig(
        format=
        '%(asctime)s %(levelname)s [%(processName)s %(process)d] [%(name)s] %(message)s',
        datefmt="%Y-%m-%dT%H:%M:%S%z",
        level=logging.DEBUG)
    multiprocessing_logging.install_mp_handler()
    logging.getLogger("urllib3.connectionpool").setLevel(logging.ERROR)

    if config.zac.health_file is not None:
        health_file = os.path.abspath(config.zac.health_file)

    logging.info("Main start (%d) version %s", os.getpid(), __version__)

    stop_event = multiprocessing.Event()
    state_manager = multiprocessing.Manager()
    processes = []

    source_hosts_queues = []
    source_collectors = get_source_collectors(config)
    for source_collector in source_collectors:
        source_hosts_queue = multiprocessing.Queue()
        process = processing.SourceCollectorProcess(source_collector["name"],
                                                    state_manager.dict(),
                                                    source_collector["module"],
                                                    source_collector["config"],
                                                    source_hosts_queue)
        source_hosts_queues.append(source_hosts_queue)
        processes.append(process)

    try:
        process = processing.SourceHandlerProcess("source-handler",
                                                  state_manager.dict(),
                                                  config.zac.db_uri,
                                                  source_hosts_queues)
        processes.append(process)

        process = processing.SourceMergerProcess("source-merger",
                                                 state_manager.dict(),
                                                 config.zac.db_uri,
                                                 config.zac.host_modifier_dir)
        processes.append(process)

        process = processing.ZabbixHostUpdater("zabbix-host-updater",
                                               state_manager.dict(),
                                               config.zac.db_uri,
                                               config.zabbix)
        processes.append(process)

        process = processing.ZabbixHostgroupUpdater("zabbix-hostgroup-updater",
                                                    state_manager.dict(),
                                                    config.zac.db_uri,
                                                    config.zabbix)
        processes.append(process)

        process = processing.ZabbixTemplateUpdater("zabbix-template-updater",
                                                   state_manager.dict(),
                                                   config.zac.db_uri,
                                                   config.zabbix)
        processes.append(process)
    except exceptions.ZACException as e:
        logging.error("Failed to initialize child processes. Exiting: %s",
                      str(e))
        sys.exit(1)

    for process in processes:
        process.start()

    with processing.SignalHandler(stop_event):
        status_interval = 60
        next_status = datetime.datetime.now()

        while not stop_event.is_set():
            if next_status < datetime.datetime.now():
                if health_file is not None:
                    write_health(health_file, processes, source_hosts_queues,
                                 config.zabbix.failsafe)
                log_process_status(processes)
                next_status = datetime.datetime.now() + datetime.timedelta(
                    seconds=status_interval)

            dead_process_names = [
                process.name for process in processes
                if not process.is_alive()
            ]
            if dead_process_names:
                logging.error("A child has died: %s. Exiting",
                              ', '.join(dead_process_names))
                stop_event.set()

            time.sleep(1)

        logging.debug(
            "Queues: %s",
            ", ".join([str(queue.qsize()) for queue in source_hosts_queues]))

        for process in processes:
            logging.info("Terminating: %s(%d)", process.name, process.pid)
            process.terminate()

        alive_processes = [
            process for process in processes if process.is_alive()
        ]
        while alive_processes:
            process = alive_processes[0]
            logging.info("Waiting for: %s(%d)", process.name, process.pid)
            log_process_status(processes)  # TODO: Too verbose?
            process.join(10)
            if process.exitcode is None:
                logging.warning(
                    "Process hanging. Signaling new terminate: %s(%d)",
                    process.name, process.pid)
                process.terminate()
            time.sleep(1)
            alive_processes = [
                process for process in processes if process.is_alive()
            ]

    logging.info("Main exit")
    def run(
        self,
        start,
        spinup_time,
        timestep_length,
        analysis_length,
        detection_parameters,
        regions,
        real_time,
        max_n_docs_in_memory=None,
        check_previous_docs=True,
        geoparsing_start=False,
        update_locations=True,
        end=False,
        load_detectors=False,
        detection=True,
    ):
        """This program uses 2 processes. The main process (this one) that
        analyzes groups of docs and detects based on this. In addition a
        child process is spawned that reads the docs from the database or
        receives them from a stream. This process is the doc_loader.
        Two events, event_1 and event_2, regulate the execution of both
        processes. First the doc_loader loads the docs used for the spinup
        from the database, then the docs for the first timestep, which are
        all put in a queue (docs_queue). Then this one of the events is
        released, while the doc_loader is paused. The execution of the main
        process is restarted. First it unloads the docs from the docs_queue
        and releases the doc_loader again. This process then iterates."""
        if not update_locations:
            print("WARNING: Not updating locations")

        # Check if timestep not bigger than analysis length
        if timestep_length > analysis_length:
            print("Timestep too big")
            sys.exit(0)

        # Set parameters for sharing between processes
        n_docs_to_unload = Counter(0)
        timestep_end_str = mp.Array('c', 26)
        docs_queue = mp.Queue()
        event_1 = mp.Event()
        event_2 = mp.Event()
        is_real_time = mp.Value(c_bool, False)

        end_date_spinup = start + spinup_time
        if geoparsing_start:
            if geoparsing_start < start:
                print("ERROR: Geoparsing start is smaller than start date")
                sys.exit()
            geoparsing_start = int((geoparsing_start - start) /
                                   timestep_length) * timestep_length + start
            print("Geoparsing start:", geoparsing_start)
            doc_loader_start = geoparsing_start
        else:
            doc_loader_start = start

        doc_loader_mp = Process(target=self.doc_loader.load_docs,
                                args=(docs_queue, n_docs_to_unload,
                                      doc_loader_start, analysis_length,
                                      timestep_length, event_1, event_2,
                                      timestep_end_str, is_real_time))
        doc_loader_mp.daemon = True
        doc_loader_mp.start()

        if detection and geoparsing_start and geoparsing_start > end_date_spinup:
            self.event_detector = EventDetector(
                self.pg,
                self.es,
                start,
                spinup_time,
                detection_parameters=detection_parameters,
                regions=regions,
                load_detectors=load_detectors,
            )
            self.initial_detection(start, geoparsing_start)
            end_date_spinup = None

        while True and (real_time or not is_real_time.value):
            event_1.wait()
            if doc_loader_mp.exception is not None:
                _, traceback = doc_loader_mp.exception
                print(traceback)
                sys.exit()

            unloaded_docs = []
            for i in range(n_docs_to_unload.value()):
                unloaded_docs.append(docs_queue.get())
                n_docs_to_unload.decrease()

            about_ongoing_event_docs = []
            about_ongoing_event_doc_ids = set()
            classified_docs = set()

            IDs = [ID for ID, _ in unloaded_docs]
            if IDs:
                documents = self.es.mget(index=DOCUMENT_INDEX,
                                         body={'ids': IDs})['docs']
            else:
                documents = []
            for doc in documents:
                doc = doc['_source']
                if 'event_related' in doc:
                    classified_docs.add(doc['id'])
                    if doc['event_related'] is True:
                        about_ongoing_event_doc_ids.add(doc['id'])

            for doc in unloaded_docs:
                if doc[0] in about_ongoing_event_doc_ids:
                    about_ongoing_event_docs.append(doc)

            docs_to_classify = []
            examples_to_classify = []
            for doc in unloaded_docs:
                ID, doc_info = doc
                if ID not in classified_docs:
                    example = {
                        'id': ID,
                        'sentence1': doc_info.clean_text,
                        'label': 0
                    }
                    examples_to_classify.append(example)
                    docs_to_classify.append(doc)

            classes = self.text_classifier(examples_to_classify)

            assert len(classes) == len(docs_to_classify)
            es_update = []
            for doc_class, doc in zip(classes, docs_to_classify):
                doc_class = True if doc_class == 'yes' else False
                if doc_class is True:
                    about_ongoing_event_docs.append(doc)
                es_update.append({
                    'doc': {
                        'event_related': doc_class
                    },
                    '_index': DOCUMENT_INDEX,
                    '_id': doc[0],
                    '_op_type': 'update',
                    '_type': '_doc'
                })

            self.es.bulk_operation(es_update)

            about_ongoing_event_docs = sorted(about_ongoing_event_docs,
                                              key=lambda x: x[1].date,
                                              reverse=False)

            self.docs.update(dict(about_ongoing_event_docs))
            if max_n_docs_in_memory is not None and len(
                    self.docs) > max_n_docs_in_memory:
                n_docs_to_delete = len(self.docs) - max_n_docs_in_memory
                IDs_to_remove = list(self.docs.keys())[:n_docs_to_delete]
                for ID in IDs_to_remove:
                    del self.docs[ID]

            event_1.clear()
            event_2.set()
            near_end_date_spinup = False
            if self.docs:
                timestep_end = str(timestep_end_str.value, 'utf-8')
                timestep_end = isoformat_2_date(timestep_end)
                l_docs = []

                if detection and end_date_spinup and timestep_end >= end_date_spinup:
                    self.event_detector = EventDetector(
                        self.pg,
                        self.es,
                        start,
                        spinup_time,
                        detection_parameters=detection_parameters,
                        load_detectors=load_detectors,
                        regions=regions)
                    self.initial_detection(start,
                                           timestep_end - analysis_length)
                    near_end_date_spinup = True

                for ID, doc in self.docs.items():
                    if doc.date > timestep_end - analysis_length:
                        break
                    else:
                        l_docs.append(ID)

                for i, ID in enumerate(l_docs):
                    l_docs[i] = self.docs[ID]
                    del self.docs[ID]

                self.geoparse_timestep(timestep_end,
                                       update_locations=update_locations)
                if detection and not end_date_spinup and (
                        not geoparsing_start
                        or timestep_end > geoparsing_start + analysis_length):
                    self.event_detector.detect_events_l(
                        l_docs, is_real_time=is_real_time)
                    self.event_detector.detect_events_s(
                        self.docs.values(), is_real_time=is_real_time)
                if near_end_date_spinup:
                    end_date_spinup = None

                if end and timestep_end > end:
                    return None
# convert wav to opus
opus_frames = []
CHANNELS = 2
RATE = 48000
enc = encoder.Encoder(RATE, CHANNELS, 'voip')
dec = decoder.Decoder(RATE, CHANNELS)

for data in audio_frames:
    opus_frames.append(enc.encode(data, CHUNK))
print("DATA LENGTH :", len(b''.join(audio_frames)))
print("ENCDATA LENGTH :", len(b''.join(opus_frames)))

frame_size_time = CHUNK / RATE
audio_packet_queue = multiprocessing.Queue()
period_sync_event = multiprocessing.Event()
pyaudio_multiprocessing.start(audio_packet_queue, period_sync_event)
opus_decoded_data = b''
drift_time = 0
for opus_data in opus_frames:
    if drift_time > frame_size_time:
        drift_time -= frame_size_time
        logging.info("Offset drift: %f", drift_time)
        continue

    tic = time.perf_counter()
    decoded_data = dec.decode(opus_data, CHUNK)
    #opus_decoded_data += decoded_data

    if (time.perf_counter() - tic) < frame_size_time:
        logging.info("Append audio")
Exemple #27
0
    def __init__(self, *args, **kwargs):
        super(MultiProcApplyEvents, self).__init__(*args, **kwargs)

        self.workers = []
        self.kill_queue = multiprocessing.JoinableQueue()
        self.kill_event = multiprocessing.Event()
# Twitter : @TwizzyIndy
#
#
#############

import os
import sys
import time
import multiprocessing
import hashlib
import binascii
import itertools

MATRIX_SIZE = [3, 3]
MAX_LEN = MATRIX_SIZE[0] * MATRIX_SIZE[1]
FOUND = multiprocessing.Event()


def lookup(param):
    global FOUND
    lenhash = param[0]
    target = param[1]
    positions = param[2]

    if FOUND.is_set() is True:
        return None

    permutations = itertools.permutations(positions, lenhash)

    for item in permutations:
Exemple #29
0
 def __init__(self):
     multiprocessing.Process.__init__(self)
     self.stop_event = multiprocessing.Event()
Exemple #30
0
 def __init__(self, args, front_sink_addr):
     super().__init__()
     self.port = args.port_out
     self.exit_flag = multiprocessing.Event()
     self.logger = set_logger(colored('SINK', 'green'))
     self.front_sink_addr = front_sink_addr