Exemple #1
0
def main():
        p = Process(target=kinect_sender)
        p.start()
        host, port = '0.0.0.0', 12345
        print "Running server at {host}:{port}".format(host=host, port=port)

        http_server = WSGIServer((host, port), app)
        http_server.serve_forever()
        p.kill()
Exemple #2
0
def main():
        p = Process(target=opengazer_sender)
        p.start()
        #host, port = '192.168.43.9', 55555
        host, port = socket.gethostbyname(socket.gethostname()), 55555
        print "Running server at {host}:{port}".format(host=host, port=port)

        http_server = WSGIServer((host, port), app)
        http_server.serve_forever()
        p.kill()
Exemple #3
0
def main():
    '''Función mainl del programa'''
    p = Process(target=kinect_simulator)
    p.start()
    host, port = '0.0.0.0', 8888
    print "Running server at {host}:{port}".format(host=host, port=port)

    #gevent.spawn(retransmisor)
    #gevent.spawn(sender)
    http_server = WSGIServer((host, port), app)
    http_server.serve_forever()
    p.kill()
Exemple #4
0
def main():
    # Parse command line options
    usage = "usage: %prog [options] arg"
    parser = OptionParser(usage)
    parser.add_option('-n', '--name', dest='name', default='A') # This is our DHT key - temporary!!!
    parser.add_option('-p', '--httpport', dest='http_port', default=None)
    parser.add_option('-u', '--udpport', dest='udp_port', default=None)
    parser.add_option('-f', '--pipe', dest='pipe_test', default=None)
    (options, args) = parser.parse_args()
    
    name = options.name
    http_port = None
    if options.http_port:
        http_port = int(options.http_port)
    udp_port = None
    if options.udp_port:
        udp_port = int(options.udp_port)
    pipe_test = options.pipe_test # Pipe for talking to test harness
    http_ui = True
    
    log.info("server listening on udp_port="+str(udp_port)+", http_port="+str(http_port))
    
    # Named full duplex pipe for communicating between main process and UI/UDP
    # server subprocess
    pipe_server, pipe_ui = Pipe()
    
    # Start UDP server subprocess
    p_server = Process(target=start_server, args=(name, pipe_server, udp_port, pipe_test))
    p_server.start()
    
    # Choose which UI to use (http is better for testing multiple instances on same machine)
    if http_ui and http_port:
        p_ui = Process(target=start_ui, args=(pipe_ui, http_port))
        p_ui.start()
        p_ui.join()
        p_ui.kill()
    # else:
    #     # OSX status bar version not working with python 2.7 on OSX 10.6
    #     from userinterface import OSXstatusbaritem
    #     OSXstatusbaritem.start(pipeUI)
    #     return
    
    p_server.join()
    p_server.kill()
Exemple #5
0
async def page(setup_settings, msend):
    logging.getLogger().addHandler(logging.StreamHandler())

    # backend_app and test_app are basically the same thing
    # but we must create two so they don't reuse each others'
    # database connections, which causes OperationalError
    test_app = create_app()
    backend_app = create_app()
    celery.conf.task_always_eager = True

    # we rely test_app it to setup the database
    with test_app.app_context():
        DB.create_all()

    # we pass backend_app to the sub process that will respond to our frontend
    host, port = settings.SERVICE_URL.split("://")[1].split(":")
    p = Process(target=run_simple, args=(host, int(port), backend_app))
    p.start()

    # launch the browser
    opts = {"headless": True}
    if os.getenv("GOOGLE_CHROME_BIN"):
        opts["executablePath"] = os.getenv("GOOGLE_CHROME_SHIM")

    browser = await launch(opts)
    page = await browser.newPage()

    # at the same time we use the test_app to execute queries inside the tests
    with test_app.app_context():
        yield page

    # terminate browser and backend app
    p.terminate()
    await browser.close()

    time.sleep(0.1)
    if p.is_alive():
        p.kill()

    # we also rely on test_app to purge the database
    with test_app.app_context():
        DB.session.remove()
        DB.drop_all()
    redis_store.flushdb()
Exemple #6
0
class Listener:

    def __init__(self):
        self.name = ''
        self.author = ''
        self.description = ''
        self.running = False
        self.options = {}
        self.__conn = None
        self.__thread = None

    def run(self):
        return

    def __run(self):
        self.__conn = Client(('localhost', 60000), authkey=ipc_pass)
        self.run()

    def start(self):
        self.__thread = Process(target=self.__run, daemon=True)
        self.__thread.start()
        self.running = True

    def dispatch_event(self, event, msg):
        self.__conn.send((event, msg))
        data = ()

        try:
            data = self.__conn.recv()
        except EOFError:
            pass

        return data

    def stop(self):
        self.__thread.kill()
        self.running = False

    def __getitem__(self, key):
        return self.options[key]['Value']

    def __setitem__(self, key, value):
        self.options[key]['Value'] = value
class PlayerProcess():
    """
    Handles the process of a player

    Attributes:
        process (Process) : The process in which player polling will be ran
        target (function) : The function that will be executed in the Process
        args (tuple) : Arguments sent to the process at execution

        data (any) : The data to be sent to the Process
        
        dataQueue (Queue) : The queue used to send data to the process
        resultQueue (Queue) : The queue used to get the result from the process

        model (Model) : The game model, containing teamsData in which we place the response
        teamId (string) : The team identifier, for placing the result in the correct teamsData

        stopwatch (TimeManager) : Used to monitor process response time
    """
    def __init__(self, model, teamId, player):
        """
        Creates a new process to run a player's tick.

        Arguments:
            model (Model) : Access to the model of the game
            teamId (string) : The team to operate
            player (Player) : The player to call
        """

        self._target = runPlayerProcess

        self._data = None
        self._dataQueue = Queue()
        self._resultQueue = Queue()

        self._model = model
        self._teamId = teamId

        self._stopwatch = TimeManager()

        self._args = (self._dataQueue, self._resultQueue, player)

        self._process = Process(target=self._target, args=self._args)

        self.lastResponseTime = None

    def setData(self, pollingData):
        """
        Changes what data will be put in the queue for the next call to execute.

        Arguments:
            pollingData (any) : Correctly formatted data that can be read by a Player
        """
        self._data = pollingData

    def execute(self):
        """
        Places the data in the queue, which will usually trigger the process's next loop.

        Use setData to change what will be read by the player.
        """
        self._stopwatch.StartTimer()
        self._dataQueue.put(self._data)

    def check(self):
        """
        Checks if the player has sent a response, and places it in teamsData[team]

        If no response is given, None is placed.
        """
        try:
            result = self._resultQueue.get(False)

            # In case of player pass turn
            while not self._resultQueue.empty() and result == {}:
                result = self._resultQueue.get(False)

            self.lastResponseTime = self._stopwatch.DeltaTimeMs()

        except:
            if self._model.teamsData[self._teamId] == None:
                result = None
            else:
                result = self._model.teamsData[self._teamId]

        self._model.teamsData[self._teamId] = result

    def start(self):
        """
        Starts the process hosting the player. Should only be called once in a game.
        """
        self._process.start()

    def join(self, timeout=None):
        """
        Waits until process termination.
        """
        self._process.join(timeout)

    def kill(self):
        """
        Kills the process hosting the player. Should only be called once the game is over.
        """
        self._process.kill()
Exemple #8
0
def run_test_server():
    p = Process(target=run_server)
    p.start()
    yield p
    p.kill()

def receptor(conn):
    print("Capturing entrance, use Ctrl + D for exit")
    print("Put text lines :\n")
    sys.stdin = open(0)
    while True:
        try:
            line = input()
            conn.send(line)
        except EOFError:
            print("Exit")
            break


def lector(conn):
    son = current_process().pid
    while True:
        line = conn.recv()
        print(f"Reading (PID={son}): {line}")


if __name__ == "__main__":
    a, b = Pipe()
    process1 = Process(target=receptor, args=(a, ))
    process2 = Process(target=lector, args=(b, ))
    process1.start()
    process2.start()
    process1.join()
    process2.kill()
Exemple #10
0
class FTPSniff(object):
    x = random.randint(1000, 9999)

    def __init__(self,
                 pcap_path=f"files/{x}-ftp.pcap",
                 ftp_file=f"files/{x}-ftp.log",
                 verbose=False,
                 targets=""):
        self.pcap_path = pcap_path
        self.ftp_file = ftp_file
        self.main_thread = None
        self.usernames = []
        self.passwords = []
        self.addrs = []
        self.targets = list(targets.split("/"))

        init()
        logging.addLevelName(logging.CRITICAL, f"[{red}!!{reset}]")
        logging.addLevelName(logging.WARNING, f"[{red}!{reset}]")
        logging.addLevelName(logging.INFO, f"[{cyan}*{reset}]")
        logging.addLevelName(logging.DEBUG, f"[{magenta}*{reset}]")
        logging.basicConfig(format=f"%(levelname)s %(message)s",
                            level=logging.DEBUG if verbose else logging.INFO)

    def check_login(self, pkt, username, passwd):
        if b'230' in pkt[Raw].load:
            return True
        else:
            return False

    def check_for_ftp(self, pkt):
        if pkt.haslayer(TCP) and pkt.haslayer(Raw):
            return True
        else:
            return False

    def handle_pkt(self, pkt):
        if not self.check_for_ftp(pkt):
            return

        wrpcap(self.pcap_path, pkt)
        data = str(pkt[Raw].load).replace("b", "").replace("'", "")

        if 'USER ' in data:
            user = data.split("USER ")[1].strip()
            self.usernames.append(user)
        elif 'PASS ' in data:
            passwd = data.split("PASS ")[1].strip()
            self.passwords.append(passwd)
            self.addrs.append((pkt[IP].src, pkt[IP].dst))
        else:
            try:
                if self.check_login(pkt, self.usernames[-1],
                                    self.passwords[-1]):
                    with open(self.ftp_file, "a+") as fd:
                        user = self.usernames[-1].replace("\r\n", "").replace(
                            "\\r\\n", "")
                        passwd = self.passwords[-1].replace("\r\n",
                                                            "").replace(
                                                                "\\r\\n", "")
                        fd.write(
                            f"{self.addrs[-1][0]} -> {self.addrs[-1][1]}:\n   Username: {user}\n   Password: {passwd}\n\n"
                        )
                        logging.info(
                            f"{self.addrs[-1][0]} -> {self.addrs[-1][1]}:\n\tUsername: {user}\n\tPassword: {passwd}"
                        )
            except:
                pass

    def sniff_thread(self):
        filter_ = "tcp port 21"
        if len(self.targets) > 0:
            for x in self.targets:
                filter_ += " and host " + x
        sniff(filter=filter_, prn=self.handle_pkt)

    def start(self):
        logging.debug("FTP: Starting sniff thread")
        self.main_thread = Process(target=self.sniff_thread)
        self.main_thread.start()

    def stop(self):
        logging.debug("FTP: Sniff thread stopped")
        self.main_thread.kill()
        return (self.pcap_path, self.ftp_file)
Exemple #11
0
    p3 = Process(target=square, args=(8, ), name='8 squared')
    p4 = Process(target=square, args=(12, ), name='12 squared')

    # Process properties
    print(f'p1 pid = {p1.pid}')
    print(f'p1 daemon {p1.daemon}')
    print(
        f'p1 authkey = {p1.authkey.decode(encoding="UTF-8", errors="ignore")}')
    print(f'p1 exitcode before start = {p1.exitcode}')

    p1.start()
    p2.start()
    p3.start()
    p3.terminate()
    p4.start()
    p4.kill()

    p1.join(timeout=3)
    p2.join()
    p3.join()
    p4.join()

    print(f'Is p1 still alive? {p1.is_alive()}')
    print(f'p1 sentinel = {p1.sentinel}')
    print(f'p1 exitcode after join = {p1.exitcode}')

    p1.close()
    p2.close()
    p3.close()
    p4.close()
Exemple #12
0
class Frame:
    __m3u8_process = None
    __server_process = None

    def __init__(self, root, local_frame: layout_local.Frame,
                 video_frame: layout_video.Frame, url_frame: layout_url.Frame,
                 my_cache, logger) -> None:
        super().__init__()
        self.my_cache = my_cache
        self.root = root
        self.__logger = logger

        frame = ttk.Frame(root)
        frame.pack(fill=tkinter.X, padx=5, pady=5, side=tkinter.BOTTOM)

        self.local_frame = local_frame
        self.video_frame = video_frame
        self.url_frame = url_frame

        # 启动 服务
        self.start_btn = start_btn = ttk.Button(frame,
                                                text='启动转播',
                                                command=self.start)
        start_btn.pack(side=tkinter.LEFT)

        # 停止 服务
        self.stop_btn = stop_btn = ttk.Button(frame,
                                              text='停止转播',
                                              command=self.stop,
                                              state=tkinter.DISABLED)
        stop_btn.pack(side=tkinter.LEFT, padx=5, pady=5)

        # 合并视频文件
        self.create_mp4_btn = create_mp4_btn = ttk.Button(
            frame, text='合并视频', command=self.create_mp4)
        create_mp4_btn.pack(side=tkinter.LEFT)

        # 清空视频缓存
        self.clear_cache_btn = clear_cache_btn = ttk.Button(
            frame, text='清空缓存', command=self.clear_cache)
        clear_cache_btn.pack(side=tkinter.RIGHT)

        self.is_start = False

        self.stop()

    # 检查缓存目录
    def check_video_cache_dir(self):
        video_cache_dir = self.local_frame.video_cache_dir()
        if len(video_cache_dir) == 0:
            messagebox.showerror('错误', '请选择缓存目录')
            return False
        video_cache_dir = os.path.abspath(video_cache_dir)
        if not os.path.exists(video_cache_dir):
            os.mkdir(video_cache_dir)
        return video_cache_dir

    def start_process(self):

        video_cache_dir = self.check_video_cache_dir()
        if not video_cache_dir:
            return

        # 检查端口
        port = self.local_frame.port()
        if not utils.is_int(port):
            return messagebox.showerror('错误', '端口只能是数字')
        port = int(port)
        if port < 2000 or port > 60000:
            return messagebox.showerror('错误', '端口只能从2000到60000')

        create_danmaku: bool = self.local_frame.create_danmaku()

        # print(video_cache_dir, port)

        # 检查 三个网址
        video_url = self.video_frame.video_url()
        danmaku_url = self.video_frame.danmaku_url()
        proxy_url = self.video_frame.proxy_url()

        if create_danmaku:
            print('自建弹幕')
            danmaku_url = '1'

        # print(video_url, danmaku_url, proxy_url)

        if len(video_url) == 0:
            return messagebox.showerror('错误', '请填写视频源网址')
        else:
            if video_url != '1' and not utils.is_url(video_url):
                return messagebox.showerror(
                    '错误', '视频源的格式错误,只接受:\nhttp:\\\\xxx\n的格式')

        if danmaku_url != '1':
            if len(danmaku_url) > 0 and not utils.is_url(danmaku_url):
                return messagebox.showerror(
                    '错误', '弹幕源的格式错误,只接受:\nhttp:\\\\xxx\n的格式')

        if len(proxy_url) > 0:
            if not utils.is_url(proxy_url):
                return messagebox.showerror('错误',
                                            '代理的格式错误,只接受:\nhttp:\\\\xxx\n的格式')

        check = test_connect(video_url, proxy_url)
        if check != 'ok':
            has_proxy = len(proxy_url) > 0
            title = '连接错误'
            if has_proxy:
                title = '代理服务器出现错误'
            message = title
            if check == 'NeedTWIP':
                message = '四季TV网络视频源 需要台湾IP'
            elif check == 'ProxyError':
                message = '连接不到代理服务器'
            elif check == 'NotM3u8':
                message = '网络视频源 返回的不是M3u8文件格式'
            elif check == 'TimeOut':
                message = '连接 网络视频源 超时(5秒)'
            return messagebox.showerror(title, message)

        self.__m3u8_process = Process(target=m3u8.run,
                                      args=(video_cache_dir, video_url,
                                            proxy_url, self.my_cache.cache))
        self.__m3u8_process.start()

        only_video = self.local_frame.only_video()
        self.__server_process = Process(target=server.run,
                                        args=(port, video_cache_dir,
                                              danmaku_url, only_video,
                                              self.my_cache.cache))
        self.__server_process.start()

        return '123ok'

    def start(self):
        self.my_cache.cache.set('m3u8_stop', False)
        if self.start_process() != '123ok':
            return
        self.is_start = True

        self.local_frame.disable(True)
        self.video_frame.disable(True)
        self.url_frame.disable(False)
        self.start_btn.config(state=tkinter.DISABLED)
        self.stop_btn.config(state=tkinter.NORMAL)
        self.clear_cache_btn.config(state=tkinter.DISABLED)
        self.create_mp4_btn.config(state=tkinter.DISABLED)

        self.url_frame.set_ip(port=self.local_frame.port())

        self.__logger.open()

    def stop(self):
        print('停止转播')
        self.my_cache.cache.set('m3u8_stop', True)

        if self.__server_process is not None:
            self.__m3u8_process.kill()
            self.__server_process.kill()
            self.__m3u8_process = None
            self.__server_process = None

        self.is_start = False

        self.local_frame.disable(False)
        self.video_frame.disable(False)
        self.url_frame.disable(True)
        self.start_btn.config(state=tkinter.NORMAL)
        self.stop_btn.config(state=tkinter.DISABLED)
        self.clear_cache_btn.config(state=tkinter.NORMAL)
        self.create_mp4_btn.config(state=tkinter.NORMAL)

        self.url_frame.clear_ip()

    def clear_cache(self):
        dir = self.check_video_cache_dir()
        if not dir:
            return
        i = 0
        true = True
        title = '高危操作,确认3次,当前第 {} 次'
        while true and i < 3:
            true = true and messagebox.askokcancel(
                title.format(i + 1), dir + '\n将会清空视频缓存文件夹内所有文件,确认清空?')
            i += 1
        if not true:
            return
        if os.path.exists(dir):
            try:
                rmtree(dir)
                sleep(0.2)
                os.mkdir(dir)
                messagebox.showinfo('清理完成', '成功清空视频缓存文件夹')
            except Exception as e:
                messagebox.showerror('出现错误',
                                     '清空文件夹失败\n' + dir + '\n' + e.__str__())

    def create_mp4(self):
        if not utils.has_ffmpeg():
            return messagebox.showerror('错误', '没有安装 ffmpeg')

        video_cache_dir = self.check_video_cache_dir()
        if not video_cache_dir:
            return
        if not create_list.has_file(video_cache_dir):
            return messagebox.showerror('错误', '缓存文件夹内没有.ts文件')

        create_list.save(video_cache_dir)

        list_path = os.path.normpath(os.path.join(video_cache_dir, 'list.txt'))
        final_mp4_path = os.path.normpath(
            os.path.join(video_cache_dir, 'final.mp4'))

        command_line = 'ffmpeg -f concat -safe 0 -i {} -c copy {} -y'.format(
            list_path, final_mp4_path)

        if platform == 'win32':
            si = subprocess.STARTUPINFO()
            si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
            process = subprocess.Popen(command_line,
                                       shell=True,
                                       stdin=subprocess.PIPE,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE,
                                       startupinfo=si)
        else:
            process = subprocess.Popen(command_line,
                                       shell=True,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE,
                                       env=utils.popen_env())

        win = tkinter.Toplevel()
        tkinter.Label(win, text='正在合并视频文件中,请稍候',
                      font=('times', 20, 'bold')).pack(padx=10, pady=10)
        win.resizable(0, 0)
        win.after(100, utils.move_to_screen_center, win)

        def check():
            return_code = process.poll()
            # print('return_code', return_code)
            if return_code is None:
                win.after(1000, check)
                return

            win.grab_release()
            win.destroy()
            if return_code == 0:
                if messagebox.askyesno('合并文件成功', '是否打开文件夹?'):
                    if platform == 'win32':
                        os.startfile(final_mp4_path)
                        # subprocess.Popen('explorer /select,"{}"'.format(final_mp4_path))
                    else:
                        subprocess.Popen(['open', '-R', final_mp4_path])
            else:
                messagebox.showinfo('合并视频文件错误', process.stdout.read())

        win.after(100, check)

        def on_closing():
            if messagebox.askokcancel('警告', '正在合并视频,关闭这个窗口将会中断合并视频'):
                process.kill()
                win.destroy()

        win.protocol("WM_DELETE_WINDOW", on_closing)

        win.grab_set()
async def test_no_message_lost_during_scaling(
    slow_process_executor_image,
    logger,
    k8s_cluster,
    load_images_in_kind,
    set_test_pip_version,
):
    flow = Flow(
        name='test-flow-slow-process-executor',
        infrastructure='K8S',
        timeout_ready=120000,
        k8s_namespace='test-flow-slow-process-executor-ns',
    ).add(
        name='slow_process_executor',
        uses=slow_process_executor_image,
        timeout_ready=360000,
        replicas=3,
        grpc_data_requests=True,
    )

    with flow:
        with kubernetes_tools.get_port_forward_contextmanager(
                'test-flow-slow-process-executor', flow.port_expose):
            # sleep as the port forward setup can take some time
            time.sleep(0.1)
            client_kwargs = dict(
                host='localhost',
                port=flow.port_expose,
            )
            client_kwargs.update(flow._common_kwargs)

            stop_event = multiprocessing.Event()
            scale_event = multiprocessing.Event()
            received_resposes = multiprocessing.Queue()
            process = Process(
                target=send_requests,
                kwargs={
                    'client_kwargs': client_kwargs,
                    'stop_event': stop_event,
                    'scale_event': scale_event,
                    'received_resposes': received_resposes,
                    'logger': logger,
                },
                daemon=True,
            )
            process.start()

            time.sleep(1.0)

            # scale slow init executor up
            k8s_clients = K8sClients()
            logger.debug('Scale down executor to 1 replica')
            k8s_clients.apps_v1.patch_namespaced_deployment_scale(
                'slow-process-executor',
                namespace='test-flow-slow-process-executor',
                body={"spec": {
                    "replicas": 1
                }},
            )
            scale_event.set()

            # wait for replicas to be dead
            while True:
                pods = k8s_clients.core_v1.list_namespaced_pod(
                    namespace='test-flow-slow-process-executor',
                    label_selector=f'app=slow-process-executor',
                )
                if len(pods.items) == 1:
                    # still continue for a bit to hit the new replica only
                    logger.debug('Scale down complete')
                    time.sleep(1.0)
                    stop_event.set()
                    break
                await asyncio.sleep(1.0)

            # allow some time for responses to complete
            await asyncio.sleep(10.0)
            # kill the process as the client can hang due to lost responsed
            if process.is_alive():
                process.kill()
            process.join()

            responses_list = []
            while received_resposes.qsize():
                responses_list.append(int(received_resposes.get()))

            logger.debug(
                f'Got the following responses {sorted(responses_list)}')
            assert sorted(responses_list) == list(
                range(min(responses_list),
                      max(responses_list) + 1))
Exemple #14
0
class StreamService:
    def __init__(self, music_library=None):
        self.current_frame = Array(ctypes.c_char, 8192, lock=False)
        self.current_frame_id = Array(ctypes.c_char,
                                      len(str(uuid4())),
                                      lock=False)
        self.title = Array('c', 256, lock=False)
        self.artist = Array('c', 256, lock=False)
        self.song_queue = Queue()
        self.next_frame_ready = Event()
        self._p = None

        self.song_pool = []

        # Parse initial playlist
        if music_library:
            self.song_pool = glob.glob(music_library + '/*.mp3')

        print(self.song_pool)

    @staticmethod
    def _stream(song_queue, frame_buffer, next_frame_event, title, artist):
        while 1:
            next_song = Song(song_queue.get(), frame_buffer, next_frame_event)
            title.value = ''.join(next_song.title[:255]).encode()
            artist.value = ''.join(next_song.artist[:255]).encode()
            next_song.stream_mp3()

    @staticmethod
    def _monitor_queue(song_queue, song_pool):
        while 1:
            # Add a random song if the queue is empty
            if song_queue.empty():
                song_queue.put(choice(song_pool))

    def skip(self):
        self.stop_stream()
        self.start_stream()

    def start_stream(self):
        """Start streaming
        """
        self._p_2 = Process(target=self._monitor_queue,
                            args=(self.song_queue, self.song_pool))

        self._p_2.start()

        self._p = Process(target=self._stream,
                          args=(self.song_queue, self.current_frame,
                                self.current_frame_id, self.title,
                                self.artist))
        self._p.start()

    def stop_stream(self):
        """Stop streaming
        """
        if self._p:
            self._p.kill()

        if self._p_2:
            self._p_2.kill()

    def add_song_to_queue(self, song_path):
        self.song_queue.put(song_path)

    def listen(self):
        """Send a stream of live music packets
        """
        last_frame_sent = None
        while True:
            if last_frame_sent != self.current_frame_id.raw:
                last_frame_sent = self.current_frame_id.raw
                yield self.current_frame.raw
Exemple #15
0
if args.help is True:
    tool_exit_message()
    exit()

statics.logging = args.log
basicConfig(filename='logs/logfile.log',
            level=INFO,
            format='%(asctime)s %(message)s',
            datefmt='%m/%d/%Y %I:%M:%S %p')
logstart()

engine = Process(target=starts, )
engine.start()
try:
    while True:
        sleep(2)
        if statics.senderQ.qsize() == 0:
            engine.join(timeout=2)
            break
except KeyboardInterrupt:
    logwarn('The process killed by the user')
    print('[WARNING]The process killed by the user!')
finally:
    try:
        engine.kill()
    except:
        engine.terminate()
    sleep(0.05)
    logstop()
Exemple #16
0
class ProcessManager:
    _processes: Dict[str, "ProcessManager"] = {}

    def __init__(self, config_name: str = "alas") -> None:
        self.config_name = config_name
        self._renderable_queue: queue.Queue[
            ConsoleRenderable] = Setting.manager.Queue()
        self.renderables: List[ConsoleRenderable] = []
        self.renderables_max_length = 400
        self.renderables_reduce_length = 80
        self._process: Process = None
        self.thd_log_queue_handler: threading.Thread = None

    def start(self, func: str, ev: threading.Event = None) -> None:
        if not self.alive:
            self._process = Process(
                target=ProcessManager.run_process,
                args=(
                    self.config_name,
                    func,
                    self._renderable_queue,
                    ev,
                ),
            )
            self._process.start()
            self.start_log_queue_handler()

    def start_log_queue_handler(self):
        if (self.thd_log_queue_handler is not None
                and self.thd_log_queue_handler.is_alive()):
            return
        self.thd_log_queue_handler = threading.Thread(
            target=self._thread_log_queue_handler)
        self.thd_log_queue_handler.start()

    def stop(self) -> None:
        lock = FileLock(f"{filepath_config(self.config_name)}.lock")
        with lock:
            if self.alive:
                self._process.kill()
                self.renderables.append(
                    f"[{self.config_name}] exited. Reason: Manual stop\n")
            if self.thd_log_queue_handler is not None:
                self.thd_log_queue_handler.join(timeout=1)
                if self.thd_log_queue_handler.is_alive():
                    logger.warning(
                        "Log queue handler thread does not stop within 1 seconds"
                    )
        logger.info(f"[{self.config_name}] exited")

    def _thread_log_queue_handler(self) -> None:
        while self.alive:
            try:
                log = self._renderable_queue.get(timeout=1)
            except queue.Empty:
                continue
            self.renderables.append(log)
            if len(self.renderables) > self.renderables_max_length:
                self.renderables = self.renderables[self.
                                                    renderables_reduce_length:]
        logger.info("End of log queue handler loop")

    @property
    def alive(self) -> bool:
        if self._process is not None:
            return self._process.is_alive()
        else:
            return False

    @property
    def state(self) -> int:
        if self.alive:
            return 1
        elif len(self.renderables) == 0:
            return 2
        elif isinstance(self.renderables[-1], str):
            if self.renderables[-1].endswith(
                    "Reason: Manual stop\n") or self.renderables[-1].endswith(
                        "Reason: Finish\n"):
                return 2
            elif self.renderables[-1].endswith("Reason: Update\n"):
                return 4
            else:
                return 3
        else:
            return 3

    @classmethod
    def get_manager(cls, config_name: str) -> "ProcessManager":
        """
        Create a new alas if not exists.
        """
        if config_name not in cls._processes:
            cls._processes[config_name] = ProcessManager(config_name)
        return cls._processes[config_name]

    @staticmethod
    def run_process(config_name,
                    func: str,
                    q: queue.Queue,
                    e: threading.Event = None) -> None:
        # Setup logger
        set_file_logger(name=config_name)
        set_func_logger(func=q.put)

        # Set server before loading any buttons.
        import module.config.server as server
        from module.config.config import AzurLaneConfig

        AzurLaneConfig.stop_event = e
        config = AzurLaneConfig(config_name=config_name)
        server.server = deep_get(config.data,
                                 keys="Alas.Emulator.Server",
                                 default="cn")
        try:
            # Run alas
            if func == "Alas":
                from alas import AzurLaneAutoScript

                if e is not None:
                    AzurLaneAutoScript.stop_event = e
                AzurLaneAutoScript(config_name=config_name).loop()
            elif func == "Daemon":
                from module.daemon.daemon import AzurLaneDaemon

                AzurLaneDaemon(config=config_name, task="Daemon").run()
            elif func == "OpsiDaemon":
                from module.daemon.os_daemon import AzurLaneDaemon

                AzurLaneDaemon(config=config_name, task="OpsiDaemon").run()
            elif func == "AzurLaneUncensored":
                from module.daemon.uncensored import AzurLaneUncensored

                AzurLaneUncensored(config=config_name,
                                   task="AzurLaneUncensored").run()
            elif func == "Benchmark":
                from module.daemon.benchmark import Benchmark

                Benchmark(config=config_name, task="Benchmark").run()
            elif func == "GameManager":
                from module.daemon.game_manager import GameManager

                GameManager(config=config_name, task="GameManager").run()
            else:
                logger.critical("No function matched")
            logger.info(f"[{config_name}] exited. Reason: Finish\n")
        except Exception as e:
            logger.exception(e)

    @classmethod
    def running_instances(cls) -> List["ProcessManager"]:
        l = []
        for process in cls._processes.values():
            if process.alive:
                l.append(process)
        return l

    @staticmethod
    def restart_processes(instances: List["ProcessManager"] = None,
                          ev: threading.Event = None):
        """
        After update and reload, or failed to perform an update,
        restart all alas that running before update
        """
        logger.hr("Restart alas")
        if not instances:
            instances = []
            try:
                with open("./config/reloadalas", mode="r") as f:
                    for line in f.readlines():
                        line = line.strip()
                        instances.append(ProcessManager.get_manager(line))
            except:
                pass

        for process in instances:
            logger.info(f"Starting [{process.config_name}]")
            process.start(func="Alas", ev=ev)

        try:
            os.remove("./config/reloadalas")
        except:
            pass
        logger.info("Start alas complete")
Exemple #17
0
def run(func,
        data,
        n_workers: int = 2,
        n_tasks: int = 10,
        max_queue_size: int = 1,
        grace_period: int = 2,
        kill_period: int = 30,
        interrupt: Optional[int] = None) -> None:
    """
    Run a process pool of workers.

    Args:
        n_workers: Start this many processes
        n_tasks: Launch this many tasks
        max_queue_size: If queue exceeds this size, block when putting items on the queue
        grace_period: Send SIGINT to processes if they don't exit within this time after SIGINT/SIGTERM
        kill_period: Send SIGKILL to processes if they don't exit after this many seconds
        interrupt: If given, send signal SIGTERM to itself after queueing this many tasks
        :param func:
    """

    q = JoinableQueue(maxsize=max_queue_size)
    stop_event = Event()

    def handler(signalname):
        def f(signal_received, frame):
            raise KeyboardInterrupt(f"{signalname} received")

        return f

    # This will be inherited by the child process if it is forked (not spawned)
    signal.signal(signal.SIGINT, handler("SIGINT"))
    signal.signal(signal.SIGTERM, handler("SIGTERM"))

    procs = []
    for i in range(n_workers):
        p = Process(name=f"Worker-{i:02d}",
                    daemon=True,
                    target=worker,
                    args=(func, q, stop_event))
        procs.append(p)
        p.start()
    try:
        # Put tasks on queue
        for i_task in range(n_tasks):
            if interrupt and i_task == interrupt:
                os.kill(os.getpid(), signal.SIGTERM)

            logger.info("Put [{}] on queue".format(data[i_task]))
            q.put([data[i_task]])

        # Wait until all tasks are processed
        q.join()
    except KeyboardInterrupt:
        logger.warning("Caught KeyboardInterrupt! Setting stop event...")
    finally:
        stop_event.set()
        t = time()
        # Send SIGINT if process doesn't exit quickly enough, and kill it as last resort
        # .is_alive() also implicitly joins the process (good practice in linux)
        alive_procs = [p for p in procs if p.is_alive()]
        while alive_procs:
            alive_procs = [p for p in procs if p.is_alive()]
            if time() > t + grace_period:
                for p in alive_procs:
                    os.kill(p.pid, signal.SIGINT)
                    logger.warning("Sending SIGINT to %s", p)
            elif time() > t + kill_period:
                for p in alive_procs:
                    logger.warning("Sending SIGKILL to %s", p)
                    # Queues and other inter-process communication primitives can break when
                    # process is killed, but we don't care here
                    p.kill()
            sleep(.01)
Exemple #18
0
         else:
             head_y += 1
             last_direction = direction
         head_y %= term()[1] * 2 - 10
     elif direction == 'd':
         if last_direction == 'a':
             head_x -= 1
         else:
             head_x += 1
             last_direction = direction
         head_x %= term()[0]
     elif direction == 'p':
         print('\x1b[0m', )
         print('\x1b[0;0H' + ' ' * term()[0] * (term()[1] - 2), )
         print('\x1b[0;0H', end='')
         rsp.kill()
         exit()
 if [head_x, head_y] in snake_dots:
     snake_dots = snake_dots[snake_dots.index([head_x, head_y]) + 1:]
     snake_len_max = max(
         snake_len_max,
         len(snake_dots) + big_food_to_grow_from_eaten_left)
 if small_food_x == head_x and small_food_y == head_y:
     big_food_counter += 1
     small_food_y = random.randint(0, term()[1] * 2 - 11)
     small_food_x = random.randint(0, term()[0] - 1)
     snake_dots = snake_dots[:] + [[head_x, head_y]]
     snake_len_max = max(
         snake_len_max,
         len(snake_dots) + big_food_to_grow_from_eaten_left)
 elif [head_x, head_y] in big_food_dots:
Exemple #19
0
class ZeroServer(object):

    event_listeners = set()
    event_queue = Queue()

    def __init__(self, *args, **kwargs):
        self.SaltClient = APIClient()
        self.event_listener_proc = Process(target=subprocess_read_events, args=(self.event_queue,))
        self.event_listener_proc.start()
        self.event_processor = Greenlet.spawn(process_events, self)
        super(ZeroServer, self).__init__(*args, **kwargs)

    def __del__(self, *args, **kwargs):
        self.event_listener_proc.kill()
        self.event_processor.kill()
        super(ZeroServer, self).__del__(*args, **kwargs)

    def validate_token(self, token):
        r = self.SaltClient.verify_token(token)
        if not r:
            r = {"start": '', "token": token, "expire": '', "name": '', "eauth": '', "valid": False}
        else:
            r['valid'] = True
        return r

    @zerorpc.stream
    def event_stream(self, token):
        v = self.validate_token(token)
        if v.get('valid', False):
            try:
                q = GQueue()
                self.event_listeners.add(q)
                for msg in q:
                    yield msg
            finally:
                self.event_listeners.remove(q)

    def auth(self, username, password, eauth='pam'):
        '''Authenticates a user against external auth and returns a token.'''
        def subprocess_auth(msg, q):
            from salt.client.api import APIClient
            import json
            SaltClient = APIClient()
            try:
                token = SaltClient.create_token(msg)
            except:
                token = {
                    'error': 'Invalid credentials',
                    'details': 'Authentication failed with provided credentials.'
                }
            q.put(json.dumps(token))

        q = GQueue()
        msg = {
            'username': username,
            'password': password,
            'eauth': eauth
        }
        subprocess_auth(msg, q)
        token = q.get()
        return json.loads(token)

    def cmd(self, cmdmesg):
        def subprocess_cmd(msg, q):
            from salt.client.api import APIClient
            import json
            SaltClient = APIClient()
            u = SaltClient.verify_token(msg['token'])
            if not u:
                q.put({"error": "Invalid token"})
                return
            retval = SaltClient.run(msg)
            echodict = deepcopy(msg)
            echodict.pop('token')
            if msg.get('mode', 'async') == 'async':
                echodict['minions'] = retval['minions']
                echodict['jid'] = retval['jid']
            else:
                echodict['result'] = retval
            echodict['username'] = u['name']
            q.put(json.dumps(echodict))
        q = GQueue()
        subprocess_cmd(cmdmesg, q)
        retval = q.get()
        return json.loads(retval)

    def runner_sync(self, cmdmesg):
        def subprocess_runner(msg, q):
            from salt.client.api import APIClient
            import json
            SaltClient = APIClient()
            u = SaltClient.verify_token(msg['token'])
            if not u:
                q.put({"error": "Invalid token"})
                return
            resp = SaltClient.runnerClient.cmd(cmdmesg['fun'], cmdmesg['arg'])
            q.put(json.dumps(resp))
        q = GQueue()
        subprocess_runner(cmdmesg, q)
        retval = q.get()
        return json.loads(retval)

    def signature(self, tgt, module, token):
        cdict = {}
        cdict['tgt'] = tgt
        cdict['module'] = module
        cdict['token'] = token
        j = self.SaltClient.signature(cdict)
        resp = self.get_job(j['jid'])
        while len(resp) == 0:
            sleep(1)
            resp = self.get_job(j['jid'])
        return resp

    def get_minions(self, mid='*'):
        def subprocess_minon(mid, q):
            from salt.client.api import APIClient
            import json
            SaltClient = APIClient()
            resp = SaltClient.runnerClient.cmd('cache.grains', mid)
            q.put(json.dumps(resp))
        q = GQueue()
        subprocess_minon(mid, q)
        retval = q.get()
        return json.loads(retval)

    def get_job(self, jid):
        def subprocess_job(jid, q):
            from salt.client.api import APIClient
            import json
            SaltClient = APIClient()
            resp = SaltClient.runnerClient.cmd('jobs.lookup_jid', jid)
            q.put(json.dumps(resp))
        q = GQueue()
        subprocess_job(jid, q)
        retval = q.get()
        return json.loads(retval)

    def get_active(self):
        def subprocess_job(q):
            from salt.client.api import APIClient
            import json
            SaltClient = APIClient()
            resp = SaltClient.runnerClient.cmd('jobs.active')
            q.put(json.dumps(resp))
        q = GQueue()
        subprocess_job(q)
        retval = q.get()
        return json.loads(retval)

    def broadcast_event(self, e):
        for q in self.event_listeners:
            q.put_nowait(json.loads(e))
    def download_files(self, files):
        '''Download list of GCF and or GCA files from NCBI

		Parameters
			list - list of GCF/GCA ids

		Returns
			list - list of files not downloaded
		'''
        try:
            logger.info("Downloading {files} files".format(files=len(files)))
            if len(files) < self.processes:
                self.processes = len(files)
            self.download_map = self._split(files, self.processes)
            logger.info(
                "Using {np} parallel processes to download files".format(
                    np=self.processes))
            '''function to run download of genomes in paralell'''
            jobs = []
            manager = Manager()
            added = manager.Queue()
            fpath = manager.Queue()
            missing = manager.Queue()
            for i in range(self.processes):
                p = Process(target=download_genomes,
                            args=(self.download_map[i], added, fpath, missing,
                                  self.force))
                p.daemon = True
                p.start()
                jobs.append(p)
            for job in jobs:
                job.join()
            self.added = added.qsize()
            if not any(proc.is_alive() for proc in jobs):
                logger.info('All download processes completed')
            elif len(added.qsize()) % 100 == 0:
                print("{n} downloaded".format(added.qsize()), end="\r")

            count = 0
            while True:
                if self.added == 0:  ## Check if no genome was succesfully downlaoded if no break
                    logger.info(
                        "None of listed genomes could be downloaded! Files not downloaded will be printed to {outdir}/FlexTaxD.missing"
                        .format(outdir=self.outdir.rstrip("/")))
                    self.write_missing(files)
                    break
                l = len(self.genome_names)
                gen = added.get()
                if gen:
                    self.genome_names += gen
                    self.genome_path[gen] = fpath.get()
                    if l == len(self.genome_names):
                        break
                else:
                    self.not_downloaded += missing.get()
                    count += 1
                if added.qsize():
                    break
            if len(self.not_downloaded) > 0:
                self.write_missing(self.not_downloaded)
        except KeyboardInterrupt:
            logger.info(
                "Program was interrupted by user: cleaning up subprocesses!")
        finally:  ## Make sure all sub-processes are ended even if program is forced to quit
            if any(proc.is_alive() for proc in jobs):
                for p in jobs:
                    print(p)
                    p.kill()
            time.sleep(1)
            if any(proc.is_alive() for proc in jobs):
                logger.error(
                    "Could not stop all subprocesses check your process manager and end them manually!"
                )
        return self.not_downloaded
Exemple #21
0
class Scraper(Bubble):

    process = None
    
    def __init__(self, repo, data=None):
        if data == None:  # Crate a new scraper when a new repo is created
            data = {
                'state' :   'Ready',
                'scraped_file_count' : 0,
                'total_file_count'   : 0,
                'start_urls'         : []
            }
        super().__init__(repo, data)
        self.name  = 'Scraper'

    def get_bubble(self):
        return {
            'state' :   self.get_state(),
            'scraped_file_count'    :   self.get_scraped_file_count(),
            'total_file_count'      :   self.get_total_file_count(),
            'start_urls'            :   self.get_start_urls()
        }
    
    def get_scraped_file_count(self):
        path_scraped = os.path.join(PATH_REPO, self.repo.get_id(), PATH_SCRAPED)
        scraped_file = os.listdir(path_scraped)
        return len(scraped_file)
    
    def get_total_file_count(self):
        return self.total_file_count

    def get_start_urls(self):
        return self.start_urls

    def set_url(self, url, start_key, end_key):
        '''
        split = parse.urlsplit(url)
        query  = [re.sub('[{}]','',q) for q in split.query.split(',')]
        self.start_urls = [
            {
                'url':re.sub('[{][a-z]*[}]','{}={}'.format(query[0],key),url),
                'key': key
            } for key in range(int(start_key), int(end_key)+1)
        ]
        '''
        
        self.start_urls = [
            {
                'url': url.format(key),
                'key': key
            } for key in range(int(start_key), int(end_key)+1)
        ]
        self.total_file_count = len(self.start_urls)
        self.start_scrape()

    def start_scrape(self):
        def clear():       
            self.scraped_file_count = 0           
            path = os.path.join(PATH_REPO, self.repo.get_id(), PATH_SCRAPED)
            files = os.listdir(path)
            for f in files:
                path_file = os.path.join(path, f)
                #print(path_file)
                os.remove(path_file)

        def scrape(repo, start_urls):
            with requests.Session() as s:
                for url in start_urls:
                    res = s.get(url['url'])
                    print('{} {}'.format(res.url, res.status_code))
                    save_file(repo, res, url['key'])

        def save_file(repo, res, key):
            path_scraped = os.path.join(PATH_REPO, repo, PATH_SCRAPED)
            path_file = os.path.join(path_scraped, str(key))
            with open(path_file, 'w') as f:
                f.write(res.text)

        if self.state != 'Start':
            clear() #   Clear every scraped filed to start again
            self.set_state('Start')
            self.process = Process(target=scrape, args=(self.repo.get_id(), self.start_urls))
            self.process.start()
            self.process.join()
            self.set_state('Done')    

    def stop_scrape(self):
        self.process.terminate()
    
    def restart_scrape(self):
        self.process.kill()
        self.start_scrape()
Exemple #22
0
print('\nProcess: \nParent process (%s).' % os.getpid())
p = Process(target=run_proc, args=('test',))  # 参数在 args 中传
p.start()  # start() 方法启动, 这样创建进程比 fork() 还要简单
p.join()  # join() 方法可以等待子进程结束后再继续往下运行, 通常用于进程间的同步
##################################################################
# Pool time
from multiprocessing import Pool  # 在 Process 基础上启动大量子进程
# def long_time_task(name):
#     print('Run task %s (%s)...' % (name, os.getpid()))
#     start = time.time(); time.sleep(random.random() * 3)
#     end = time.time(); print('Task %s runs %0.2f seconds.' % (name, (end - start)))
# print('\nPool: \nParent process %s.' % os.getpid())
# p = Pool(4)  # 大小为 4 的线程池, 所以下面 前4个 进程特别快就生成了, 第5个 需要等其中一个运行完
# for i in range(5): p.apply_async(long_time_task, args=(i,))
# p.close()  # 调用 close() 之后就不能继续添加新的 Process 了
# p.join()  # Pool 对象调用 join() 方法会等待所有子进程执行完毕, 调用 join() 之前必须先调用 close()
##################################################################
# subprocess
import subprocess  # 启动一个子进程, 然后控制其输入和输出
print('\nsubprocess 没有控制输入输出: \n$ nslookup www.python.org')  # 不用控制的
r = subprocess.call(['nslookup', 'www.python.org']); print('Exit code:', r)
print('\nsubprocess 控制输入输出: $ nslookup')
p = subprocess.Popen(['nslookup'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate(b'set q=mx\npython.org\nexit\n')  # 相当于下面三条命令: set q=mx; python.org; exit
print(output.decode('utf-8'))
print('Exit code:', p.returncode)
p = subprocess.Popen(['nslookup'], stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr)
p.wait()  # 加上这句话才能在终端等待输入...
p.kill()
print(p.returncode)  # 手动结束的话不会执行到这里, 执行到 kill() 后就会报错 KeyboardInterrupt
Exemple #23
0
class WebServer(object):
    """
    WebServer used to receive hook
    """

    def __init__(self, queue_name):
        self.process = None
        self.http_port = int(os.environ.get("PORT", 9000))
        self.queue_name = queue_name
        logger.info("HTTP webhook server will listen", port=self.http_port)

        # Configure the web application with code review routes
        self.app = web.Application()
        self.app.add_routes(
            [
                web.get("/ping", self.ping),
                web.post("/codereview/new", self.create_code_review),
            ]
        )

    def register(self, bus):
        self.bus = bus
        self.bus.add_queue(self.queue_name, mp=True, redis=True)

    def start(self):
        """
        Run the web server used by hooks in its own process
        """

        def _run():
            web.run_app(self.app, port=self.http_port, print=logger.info)

        # Run webserver in its own process
        self.process = Process(target=_run)
        self.process.start()
        logger.info("Web server started", pid=self.process.pid)

        return self.process

    def stop(self):
        assert self.process is not None, "Web server not started"
        self.process.kill()
        logger.info("Web server stopped")

    async def ping(self, request):
        """
        Dummy test endpoint
        """
        return web.Response(text="pong")

    async def create_code_review(self, request):
        """
        HTTP POST webhook used by HarborMaster on new builds
        It only stores build ids and reply ASAP
        Mandatory query parameters:
        * diff as ID
        * repo as PHID
        * revision as ID
        * target as PHID
        """
        try:
            build = PhabricatorBuild(request)
            await self.bus.send(self.queue_name, build)
        except Exception as e:
            logger.error(str(e), path=request.path_qs)
            raise web.HTTPBadRequest(text=str(e))

        logger.info("Queued new build", build=str(build))
        return web.Response(text="Build queued")
Exemple #24
0
    try:
        processes = []
        for a in xrange(options.count):

            try:
                w = DesWorker(address, port, options.secret)
            except AssertionError as e:
                import traceback
                traceback.print_exc()
                print "Error probably due to key mismatch"
                sys.exit()
            w.log("Connected to manager at %s:%s" % (address, port))

            process = Process(target=w.run)
            processes.append(process)

        # Start processes and wait
        for process in processes:
            process.start()
        for process in processes:
            process.join()

    # Always kill all processes when finished
    finally:
        for process in processes:
            try:
                process.kill()
            except Exception:
                pass
Exemple #25
0
    d = manager.dict()
    q = Queue()
    ps = []
    for i in range(1):
        ps.append(Process(target=gen_proc,
                          args=(q, 10000, i, 4096 * 2)))  ##workers
    hs = []
    for i in range(1 * 1):
        hs.append(Process(target=hasher,
                          args=(q, d, valid, total, i)))  ## hasher

    r = Process(target=reporter, args=(q, d, valid, total))

    for p in ps:
        p.start()
    for h in hs:
        h.start()
    r.start()
    try:
        for p in ps:
            p.join()
        for h in hs:
            h.join()
        r.join()
    except:
        for p in ps:
            p.kill()
        for h in hs:
            h.kill()
        r.kill()
Exemple #26
0
class Deezer(CommonPlaySkill):
    def __init__(self):
        super(Deezer, self).__init__()
        self.regexes = {}
        self.playlist_data = None
        self.playing_wait_thread = None
        self.playing_thread = None
        self.playlist_playing_index = Value('i', -1)
        self.playing_seconds = Value('i', -1)

    def initialize(self):
        super().initialize()
        self.audio_service = AudioService(self.bus)
        self.add_event('mycroft.audio.service.next', self.next_track)
        self.add_event('mycroft.audio.service.prev', self.prev_track)
        self.add_event('mycroft.audio.service.pause', self.pause)
        self.add_event('mycroft.audio.service.resume', self.resume)
        self.arl = self.settings.get('arl')
        # TODO directory should probably default to self.file_system.path
        # This is a unique directory for each Skill.
        # There's also mycroft.util.get_cache_directory if you intend it to be temporary
        self.music_dir = self.settings.get('music_dir', self.file_system.path)
        self.track_directory = os.path.join(self.music_dir, "track")

    def CPS_match_query_phrase(self, phrase):
        self.log.info(f"Check Query Phrase: {phrase}")

        phrase, cps_match_level, data = self.specific_query(phrase=phrase)
        if cps_match_level is None:
            track = deezer_utils.search_first_track(track_name=phrase,
                                                    arl=self.arl)
            if track is None:
                return None
            else:
                track_id = track.get('id')
                self.speak_dialog(key="track_found",
                                  data={
                                      'title_short': track["title_short"],
                                      'artist': track['artist']['name']
                                  })
                download_path = deezer_utils.download_track(
                    track_id=track_id,
                    track_directory=self.track_directory,
                    arl=self.arl)
                data = {
                    "type": 0,
                    "track": download_path,
                    "track_id": track_id
                }
                if 'deezer' in phrase:
                    cps_match_level = CPSMatchLevel.EXACT
                else:
                    cps_match_level = CPSMatchLevel.TITLE

        return phrase, cps_match_level, data

    """ This method responds wether the skill can play the input phrase.

        The method is invoked by the PlayBackControlSkill.

        Returns: tuple (matched phrase(str),
                        match level(CPSMatchLevel),
                        optional data(dict))
                 or None if no match was found.
    """

    def CPS_start(self, phrase, data):
        if self.playing_thread is not None:
            self.playing_thread.kill()
            self.playing_thread = None
        if self.playlist_data is not None:
            self.playlist_data = None
        if self.playlist_playing_index.value is not None:
            self.playlist_playing_index.value = -1

        if data['type'] == 0:
            self.log.info("TrackType is Track")
            self.CPS_play(data['track'])
        elif data['type'] == 1:
            self.log.info("TrackType is Playlist")
            playlist = data['playlist']
            self.playlist_data = data
            playlist_search_results = data['playlist_search_results']
            track_directory = os.path.join(self.music_dir,
                                           str(playlist_search_results['id']))
            self.playing_thread = Process(target=self.playing_playlist,
                                          args=(playlist, track_directory, 0,
                                                -1))
            self.playing_thread.start()
            self.playing_thread.join()
            shutil.rmtree(track_directory, ignore_errors=True)

    """ Starts playback.
    
        Called by the playback control skill to start playback if the
        skill is selected (has the best match level)
    """

    def specific_query(self, phrase):
        # Check if saved
        # match = re.match(self.translate_regex('saved_songs'), phrase)
        # if match and self.saved_tracks:
        #     return (1.0, {'data': None,
        #                   'type': 'saved_tracks'})

        # Check if playlist
        phrase = phrase.lower()
        match = re.match(self.translate_regex('playlist'), phrase)
        if match:
            playlist_search_results = deezer_utils.search_first_playlist(
                match.groupdict()['playlist'], self.arl)
            if playlist_search_results:
                tracklist = requests.get(
                    playlist_search_results['tracklist']).json()
                try:
                    data = tracklist["data"]
                    next_tracklist_url = tracklist['next']
                    try:
                        while True:
                            next_tracklist = requests.get(
                                next_tracklist_url).json()
                            data += next_tracklist['data']
                            next_tracklist_url = next_tracklist['next']
                            self.log.info(next_tracklist_url)
                    except KeyError as index:
                        pass
                except KeyError as dataError:
                    pass
                return_data = {
                    'type': 1,
                    'playlist': data,
                    'playlist_search_results': playlist_search_results
                }
                return phrase, CPSMatchLevel.TITLE, return_data
            else:
                return phrase, CPSMatchLevel.GENERIC, None
        # Check album
        # match = re.match(self.translate_regex('album'), phrase)
        # if match:
        #     album = match.groupdict()['album']
        #     return self.query_album(album)
        #
        # # Check artist
        # match = re.match(self.translate_regex('artist'), phrase)
        # if match:
        #     artist = match.groupdict()['artist']
        #     return self.query_artist(artist)
        # match = re.match(self.translate_regex('song'), phrase)
        # if match:
        #     song = match.groupdict()['track']
        #     return self.query_song(song)

        return phrase, None, None

    def playing_playlist(self, playlist, track_directory, start_index, seek):
        for i in range(start_index, len(playlist)):
            try:
                self.playlist_playing_index.value = i
                track_id = playlist[i]['id']
                downloaded_track = deezer_utils.download_track(
                    track_id=track_id,
                    track_directory=track_directory,
                    arl=self.arl)

                self.log.info(str(downloaded_track))
                if seek > -1:
                    self.audio_service.seek(seconds=seek)
                    self.audio_service.resume()
                else:
                    self.CPS_play(downloaded_track)
                self.log.info("Playing now ...")
                duration = playlist[i]['duration']
                for d in range(0, duration):
                    self.playing_seconds.value = d
                    time.sleep(1)

                shutil.rmtree(downloaded_track, ignore_errors=True)
            except Exception as e:
                print(e)
                self.log.error(e)

    def next_track(self):
        if self.playlist_data is not None:
            if self.playing_thread is not None:
                self.playing_thread.kill()
                self.playing_thread = None
            playlist_search_results = self.playlist_data[
                'playlist_search_results']
            track_directory = os.path.join(self.music_dir,
                                           str(playlist_search_results['id']))
            if self.playlist_playing_index.value + 1 >= len(
                    self.playlist_data['playlist']):
                self.speak_dialog(
                    key='playlist.end',
                    data={
                        'title':
                        self.playlist_data['playlist_search_results']['title']
                    })
                self.playlist_data = None
                self.playlist_playing_index.value = -1
                shutil.rmtree(track_directory)
                return
            self.playing_thread = Process(
                target=self.playing_playlist,
                args=(self.playlist_data['playlist'], track_directory,
                      self.playlist_playing_index.value + 1, -1))
            self.playing_thread.start()
            self.playing_thread.join()

    def prev_track(self):
        if self.playlist_data is not None:
            if self.playing_thread is not None:
                self.playing_thread.kill()
                self.playing_thread = None
            playlist_search_results = self.playlist_data[
                'playlist_search_results']
            track_directory = os.path.join(self.music_dir,
                                           str(playlist_search_results['id']))
            if self.playlist_playing_index.value + 1 >= len(
                    self.playlist_data['playlist']):
                self.speak_dialog(
                    key='playlist.end',
                    data={
                        'title':
                        self.playlist_data['playlist_search_results']['title']
                    })
                self.playlist_data = None
                self.playlist_playing_index.value = -1
                shutil.rmtree(track_directory)
                return
            index = self.playlist_playing_index.value - 1
            if index < 0:
                index = index + 1

            self.playing_thread = Process(target=self.playing_playlist,
                                          args=(self.playlist_data['playlist'],
                                                track_directory, index, -1))
            self.playing_thread.start()
            self.playing_thread.join()

    def pause(self):
        if self.playlist_data is not None:
            if self.playing_thread is not None:
                self.playing_thread.kill()
                self.playing_thread = None

            self.audio_service.pause()

    def resume(self):
        if self.playlist_data is not None:
            if self.playing_thread is not None:
                self.playing_thread.kill()
                self.playing_thread = None
            playlist_search_results = self.playlist_data[
                'playlist_search_results']
            track_directory = os.path.join(self.music_dir,
                                           str(playlist_search_results['id']))

            self.playing_thread = Process(
                target=self.playing_playlist,
                args=(self.playlist_data['playlist'], track_directory,
                      self.playlist_playing_index.value,
                      self.playing_seconds.value))
            self.playing_thread.start()
            self.playing_thread.join()
        pass

    def translate_regex(self, regex):
        if regex not in self.regexes:
            path = self.find_resource(regex + '.regex')
            if path:
                with open(path) as f:
                    string = f.read().strip()
                self.regexes[regex] = string
        return self.regexes[regex]

    @intent_handler('user.intent')
    def speak_user_name(self, message):
        self.log.info("Username Intent")
        self.speak_dialog(
            key='user',
            data={'user_name': deezer_utils.get_user_info(arl=self.arl)})
            data = sendQueue.get_nowait()
        except queue.Empty:
            continue
        else:
            doClientConnection(data)


def doClientConnection(data, host, port):
    client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    time.sleep(0.1)  # wait for server to start listening for clients
    client.connect((host, port))
    time.sleep(0.1)  # wait for thread to display connection
    # send all data
    client.sendall(data.encode() + b"<")
    # close connection
    client.shutdown(socket.SHUT_WR)
    client.close()


def startDriverStationNetworking():
    _thread.start_new_thread(server, ("", RECVPORT))
    client(CLIENT, SENDPORT)


DriverStationNetworking = Process(target=startDriverStationNetworking)

if __name__ == "__main__":
    DriverStationNetworking.start()
    input()
    DriverStationNetworking.kill()
Exemple #28
0
def run_redis_stub():
    p = Process(target=run_redis_stub)
    p.start()
    yield p
    p.kill()
Exemple #29
0
        if (".in") in fname:
            spl = fname.split("/")
            teste = spl[1]
            caso = spl[2].split(".")[0]
            print("Teste {}, caso {}:".format(teste, caso))
            child_conn, parent_conn = Pipe()
            test = Process(target=run_test,
                           args=(fname, child_conn),
                           daemon=True)
            test.start()
            start_time = time()
            res = None
            total += 1
            while (time() - start_time) < timeout:
                if parent_conn.poll():
                    res = parent_conn.recv()
                    break
            if res is None:
                print("-> Falha (timeout)\n")
                fail_timeout += 1
            elif not res:
                print("-> Falha (resultado incorreto)\n")
                fail_wrong += 1
            else:
                success += 1
                print("-> Sucesso!\n")
            test.kill()

print("Total: {}, acertos: {}, timeout: {}, errado: {}".format(
    total, success, fail_timeout, fail_wrong))
Exemple #30
0
def server():
    # Run the server as a separate process
    proc = Process(target=setup_server, args=(), daemon=True)
    proc.start()
    yield proc
    proc.kill()  # Cleanup after test
Exemple #31
0
class SSHLoggerBase(NodeLoggerBase):
    _retrieve_message = "Reading Scylla logs from {since}"

    def __init__(self, node, target_log_file: str):
        super().__init__(node, target_log_file)
        self._termination_event = Event()
        self.node = node
        self._remoter = None
        self._remoter_params = node.remoter.get_init_arguments()
        self._child_process = Process(target=self._journal_thread, daemon=True)

    @property
    @abstractmethod
    def _logger_cmd(self) -> str:
        pass

    def _file_exists(self, file_path):
        try:
            result = self._remoter.run('sudo test -e %s' % file_path,
                                       ignore_status=True)
            return result.exit_status == 0
        except Exception as details:  # pylint: disable=broad-except
            self._log.error('Error checking if file %s exists: %s', file_path,
                            details)
        return False

    def _log_retrieve(self, since):
        if not since:
            since = 'the beginning'
        self._log.debug(self._retrieve_message.format(since=since))

    def _retrieve(self, since):
        since = '--since "{}" '.format(since) if since else ""
        self._remoter.run(self._logger_cmd.format(since=since),
                          verbose=True,
                          ignore_status=True,
                          log_file=self._target_log_file)

    def _retrieve_journal(self, since):
        try:
            self._log_retrieve(since)
            self._retrieve(since)
        except Exception as details:  # pylint: disable=broad-except
            self._log.error('Error retrieving remote node DB service log: %s',
                            details)

    @raise_event_on_failure
    def _journal_thread(self):
        self._remoter = RemoteCmdRunnerBase.create_remoter(
            **self._remoter_params)
        read_from_timestamp = None
        while not self._termination_event.is_set():
            self._wait_ssh_up(verbose=False)
            self._retrieve_journal(since=read_from_timestamp)
            read_from_timestamp = datetime.utcnow().strftime(
                "%Y-%m-%d %H:%M:%S")

    def _wait_ssh_up(self, verbose=True, timeout=500):
        text = None
        if verbose:
            text = '%s: Waiting for SSH to be up' % self
        wait.wait_for(func=self._remoter.is_up,
                      step=10,
                      text=text,
                      timeout=timeout,
                      throw_exc=True)

    def start(self):
        self._child_process.start()

    def stop(self, timeout=None):
        self._child_process.terminate()
        self._child_process.join(timeout)
        if self._child_process.is_alive():
            self._child_process.kill()  # pylint: disable=no-member
Exemple #32
0
class StreamService:
    def __init__(self, **kwargs):
        self.current_frame = Array(ctypes.c_char, 8192, lock=False)
        self.current_frame_id = Array(ctypes.c_char,
                                      len(str(uuid4())),
                                      lock=False)
        self.title = Array('c', 256, lock=False)
        self.artist = Array('c', 256, lock=False)
        self.song_queue = Queue()
        self.next_frame_ready = Event()
        self._p = None

        self._mongo_args = kwargs.get('mongo_args', {})

        self._clients = 0

        self.running = False

    @staticmethod
    def _stream(song_queue, frame_buffer, next_frame_event, title, artist,
                mongo_connect_args):
        connect(**mongo_connect_args)
        while 1:
            next_song_db = song_queue.get()
            next_song_bytes = BytesIO(next_song_db.song.read())

            next_song = Song(next_song_bytes, frame_buffer, next_frame_event,
                             next_song_db.bitrate)

            title.value = ''.join(next_song_db.title[:255]).encode()
            artist.value = ''.join(next_song_db.artist[:255]).encode()

            next_song.stream_mp3()

    @staticmethod
    def _monitor_queue(song_queue, mongo_connect_args):
        connect(**mongo_connect_args)
        while 1:
            # Add a random song if the queue is empty
            if song_queue.empty():
                songs_count = len(SongDB.objects.all())
                song_queue.put(SongDB.objects[randint(0, songs_count - 1)])

    def skip(self):
        if self.running:
            self.stop_stream()
            self.start_stream()

    def start_stream(self):
        """Start streaming
        """
        self._p_2 = Process(target=self._monitor_queue,
                            args=(self.song_queue, self._mongo_args))

        self._p_2.start()

        self._p = Process(target=self._stream,
                          args=(self.song_queue, self.current_frame,
                                self.current_frame_id, self.title, self.artist,
                                self._mongo_args))
        self._p.start()

        self.running = True

    def stop_stream(self):
        """Stop streaming
        """
        if self._p:
            self._p.kill()

        if self._p_2:
            self._p_2.kill()

        self.running = True

    def add_song_to_queue(self, song_path):
        self.song_queue.put(song_path)

    def listen(self):
        """Send a stream of live music packets
        """
        if self._clients < 1:
            self.start_stream()

        self._clients += 1
        last_frame_sent = None
        while True:
            try:
                if last_frame_sent != self.current_frame_id.raw:
                    last_frame_sent = self.current_frame_id.raw
                    yield self.current_frame.raw

            except GeneratorExit:
                print('disconnect!')
                self._clients -= 1

                print(self._clients)

                if self._clients < 1:
                    self._clients = 0
                    self.stop_stream()

                raise GeneratorExit
Exemple #33
0
        op.error("Invalid port number")

    try:
        processes = []
        for a in xrange(options.count):

            try:
                w = DesWorker(address, port, options.secret)
            except AssertionError as e:
                import traceback
                traceback.print_exc()
                print "Error probably due to key mismatch"
                sys.exit()
            w.log("Connected to manager at %s:%s" % (address, port))

            process = Process(target=w.run)
            processes.append(process)

        # Start processes and wait
        for process in processes:
            process.start()
        for process in processes:
            process.join()

    # Always kill all processes when finished
    finally:
        for process in processes:
            try:
                process.kill()
            except Exception: pass
print('fork: \nProcess (%s) start...' % os.getpid())
pid = os.fork()  # fork 轻松创建子进程
if pid == 0:  # 子进程返回 0, 父进程返回子进程的 id, getppid() 得到父进程 pid
    print('I am child process (%s) and my parent is (%s).' % (os.getpid(), os.getppid()))
    exit(0)  # 子进程执行打这里就退出, 不执行后面的
else: print('I (%s) just created a child process (%s).' % (os.getpid(), pid))

##################################################################
## multiprocessing Process
from multiprocessing import Process # fork 无法在 Windows 上运行, Process 可以跨平台
def run_proc(name): print('Run child process %s (%s)...' % (name, os.getpid()))  # 子进程要执行的代码
print('\nProcess: \nParent process (%s).' % os.getpid())
p = Process(target=run_proc, args=('test',))  # 参数在 args 中传
p.start()  # start() 方法启动, 这样创建进程比 fork() 还要简单
p.join()  # join() 方法可以等待子进程结束后再继续往下运行, 通常用于进程间的同步

##################################################################
## subprocess
import subprocess  # 启动一个子进程, 然后控制其输入和输出
print('\nsubprocess 没有控制输入输出: \n$ nslookup www.python.org')  # 不用控制的
r = subprocess.call(['nslookup', 'www.python.org']); print('Exit code:', r)
print('\nsubprocess 控制输入输出: $ nslookup')
p = subprocess.Popen(['nslookup'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate(b'set q=mx\npython.org\nexit\n')  # 相当于下面三条命令: set q=mx; python.org; exit
print(output.decode('utf-8'))
print('Exit code:', p.returncode)
p = subprocess.Popen(['nslookup'], stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr)
p.wait()  # 加上这句话才能在终端等待输入...
p.kill()
print(p.returncode)  # 手动结束的话不会执行到这里, 执行到 kill() 后就会报错 KeyboardInterrupt
class DataSource:
    def __init__(self, source_code, runtime_id, name, module_path):
        self.runtime_id = runtime_id
        self.source_code = source_code
        self.name = name
        self.status = "created"
        self.started_at = datetime.now(timezone.utc)
        self.module_path = module_path
        (child_pipe, parent_pipe) = Pipe(duplex=True)
        self._child_pipe = child_pipe
        self._start_process(
            name=name,
            runtime_id=runtime_id,
            module_path=module_path,
            parent_pipe=parent_pipe,
        )
        self.next_trigger_time = None

    def _start_process(self, name, runtime_id, module_path, parent_pipe):
        self._process = Process(
            target=DataSource.do_run,
            name=f"{name}_{runtime_id}",
            daemon=True,
            args=(name, runtime_id, module_path, parent_pipe),
        )
        self._process.start()

    def retstart(self, purge_data=False):
        self._process.kill()
        if purge_data:
            dbc = DatabaseConnection(self.runtime_id)
            dbc.connect_to_database()
            dbc.purge_datasource_schema()
            dbc.disconnect_from_database()
        (child_pipe, parent_pipe) = Pipe(duplex=True)
        self._child_pipe = child_pipe
        self._start_process(
            name=self.name,
            runtime_id=self.runtime_id,
            module_path=self.module_path,
            parent_pipe=parent_pipe,
        )

    def pause(self):
        self.send_message("pause", "")

    def unpause(self):
        self.send_message("unpause", "")

    def ingest(self, msg_body):
        self.send_message("msg_ingest", msg_body)

    def send_message(self, msg_type, msg_body):
        self._child_pipe.send((msg_type, msg_body))

    def update(self):
        while self._child_pipe.poll() is True:
            (msg_type, msg_body) = self._child_pipe.recv()

            if msg_type == "update_trigger_time":
                self.next_trigger_time = msg_body
            elif msg_type == "update_status":
                self.status = msg_body

    def do_run(name, runtime_id, module_path, parent_pipe):
        spec = importlib.util.spec_from_file_location(f"plugin_{name}",
                                                      module_path)
        plugin_module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(plugin_module)
        init = plugin_module.init
        schedule = plugin_module.schedule
        if schedule() is None:
            ingest_data = plugin_module.ingest_data

        data_source_fields = plugin_module.get_fields()
        fetch_data = plugin_module.fetch_data
        clean_data = plugin_module.clean_data

        init(runtime_id, name)

        is_paused = False
        next_trigger_time = None
        last_trigger_time = None
        schedule_trigger = schedule()

        def message_parent(topic, msg):
            parent_pipe.send((topic, msg))

        def tell_parent_status(status):
            parent_pipe.send(("update_status", status))

        db_connection = DatabaseConnection(runtime_id)
        db_connection.connect_to_database()

        if not db_connection.check_if_schema_exists():
            db_connection.schema_setup(data_source_fields)

        while 1:
            # if we have messages from the main process, handle them
            ingests = []
            while parent_pipe.poll() is True:
                msg = (msg_type, msg_body) = parent_pipe.recv()
                if msg_type == "msg_ingest":
                    log.error(f"INGEST: {pformat(msg)}")
                    ingests.append(msg_body)
                elif msg_type == "pause":
                    log.error(f"PAUSED")
                    is_paused = True
                elif msg_type == "unpause":
                    log.error(f"UNPAUSED")
                    is_paused = False
                else:
                    log.error(f"MSG RECV: {pformat(msg)}")

            # after we handle pending events, if schedule says so we do a run.
            now = datetime.now(timezone.utc)
            if (schedule_trigger is None and len(ingests) > 0) or (
                    schedule_trigger is not None and
                (next_trigger_time is None or now >= next_trigger_time)):
                if schedule_trigger is not None:
                    next_trigger_time = schedule_trigger.get_next_fire_time(
                        last_trigger_time, now)
                    message_parent("update_trigger_time", next_trigger_time)

                run_id = uuid4()
                run_start_time = datetime.now(timezone.utc)
                run_succeeded = False
                if is_paused:
                    tell_parent_status("paused")
                    continue
                else:
                    tell_parent_status("running")
                db_connection.begin_run(run_id)
                db_connection.empty_current_raw_table()
                db_connection.empty_current_clean_table()
                try:
                    db_connection.log(
                        time=datetime.now(timezone.utc),
                        severity="info",
                        message="started run",
                        run_id=run_id,
                    )
                    db_connection.update_run(run_id, "fetching")
                    if schedule_trigger is None:
                        raw_data = ingest_data(db_connection, run_id, ingests)
                    else:
                        raw_data = fetch_data(db_connection, run_id)
                    db_connection.insert_data_current_raw(run_id, raw_data)

                    db_connection.update_run(run_id, "cleaning")
                    cleaned_data = clean_data(db_connection, run_id, raw_data)
                    db_connection.insert_data_current_clean(
                        run_id, cleaned_data)

                    db_connection.archive_raw()
                    db_connection.archive_clean()
                    db_connection.log(
                        time=datetime.now(timezone.utc),
                        severity="info",
                        message="finished run",
                        run_id=run_id,
                    )
                    run_succeeded = True
                except Exception as err:
                    db_connection.log(
                        time=datetime.now(timezone.utc),
                        severity="error",
                        message=traceback.format_exc(),
                        run_id=run_id,
                    )
                    log.error(traceback.format_exc())
                    pass
                finally:
                    run_end_time = datetime.now(timezone.utc)
                    run_duration = run_end_time - run_start_time

                    if run_succeeded:
                        log.error(
                            f"Run #{run_id} succeeded in #{run_duration}")
                        db_connection.end_run(run_id, "succeeded")
                    else:
                        log.error(f"Run #{run_id} failed in #{run_duration}")
                        db_connection.end_run(run_id, "failed")
                tell_parent_status("sleeping")
            sleep(1)
Exemple #36
0
class Core:
    """
    The core module.

    Loads and keeps the config and endpoints. Endpoints are called through this module.
    """

    def __init__(self, conf, reset=False, check_config=False):
        """
        Coco Core.

        Parameters
        ----------
        conf : os.PathLike
            Path to the config file.
        reset : bool
            Whether to reset internal state on start. Default `False`.
        check_config : bool
            Don't really start, check config only. Default `False`.
        """

        # Tell the destructor that there's no worker to be killed
        self.check_config = check_config

        # In case constructor crashes before this gets assigned, so that destructor
        # doesn't fail.
        self.qworker = None
        self.state = None

        # Load the config
        self._load_config(Path(conf))
        logger.setLevel(self.config["log_level"])

        if reset is True:
            # Reset the internal state
            asyncio.run(self.state.reset_state())

        # Configure the forwarder
        try:
            timeout = str2total_seconds(self.config["timeout"])
        except Exception as e:
            raise ConfigError(
                f"Failed parsing value 'timeout' ({self.config['timeout']})."
            ) from e
        self.forwarder = RequestForwarder(
            self.blocklist_path,
            timeout,
            debug_connections=self.config["debug_connections"],
        )
        self.forwarder.set_session_limit(self.config["session_limit"])
        for group, hosts in self.groups.items():
            self.forwarder.add_group(group, hosts)

        self._config_slack_loggers()

        self._load_endpoints()
        self._local_endpoints()
        self._check_endpoint_links()
        self._register_config()

        try:
            self.frontend_timeout = str2total_seconds(self.config["frontend_timeout"])
        except Exception as e:
            raise ConfigError(
                "Failed parsing value 'frontend_timeout' "
                f"({self.config['frontend_timeout']})."
            ) from e

        if self.check_config:
            logger.info("Superficial config check successful. Stopping...")
            return

        # Remove any leftover shutdown commands from the queue
        self.redis_sync = redis.Redis()
        self.redis_sync.lrem("queue", 0, "coco_shutdown")

        # Load queue update script into redis cache
        self.queue_sha = self.redis_sync.script_load(
            """ if redis.call('llen', KEYS[1]) >= tonumber(ARGV[1]) then
                        return true
                    else
                        redis.call('hmset', KEYS[2], ARGV[2], ARGV[3], ARGV[4], ARGV[5], ARGV[6], ARGV[7], ARGV[8], ARGV[9], ARGV[10], ARGV[11])
                        redis.call('rpush', KEYS[1], KEYS[2])
                        return false
                    end
            """
        )

        # Start the worker process
        self.qworker = Process(
            target=worker.main_loop,
            args=(
                self.endpoints,
                self.forwarder,
                self.config["port"],
                self.config["metrics_port"],
                self.config["log_level"],
                self.frontend_timeout,
            ),
        )
        self.qworker.daemon = True
        try:
            self.qworker.start()
        except Exception:
            self.qworker.join()

        self._call_endpoints_on_start()
        self._start_server()

        self.redis_async = None

    def __del__(self):
        """
        Destruct :class:`Core`.

        Join the worker process.
        """
        if not self.check_config:
            logger.info("Joining worker process...")
            try:
                self.redis_sync.rpush("queue", "coco_shutdown")
            except Exception as e:
                logger.error(
                    f"Failed sending shutdown command to worker (have to kill it): {type(e)}: {e}"
                )
                self._kill_worker()
            self._kill_worker()

    def _kill_worker(self):
        if hasattr(self, "qworker"):
            if self.qworker:
                self.qworker.kill()

    def _call_endpoints_on_start(self):
        for endpoint in self.endpoints.values():
            # Initialise request counter
            self.redis_sync.incr(f"dropped_counter_{endpoint.name}", amount=0)
            if endpoint.call_on_start:
                logger.debug(f"Calling endpoint on start: /{endpoint.name}")
                name = f"{os.getpid()}-{time.time()}"

                self.redis_sync.hmset(
                    name,
                    {
                        "method": endpoint.type,
                        "endpoint": endpoint.name,
                        "request": json.dumps({}),
                    },
                )

                # Add task name to queue
                self.redis_sync.rpush("queue", name)

                # Wait for the result
                result = self.redis_sync.blpop(f"{name}:res")[1]
                self.redis_sync.delete(f"{name}:res")
                # TODO: raise log level in failure case?
                logger.debug(f"Called /{endpoint.name} on start, result: {result}")

    def _start_server(self):
        """Start a sanic server."""

        self.sanic_app = Sanic(__name__)
        self.sanic_app.config.REQUEST_TIMEOUT = self.frontend_timeout
        self.sanic_app.config.RESPONSE_TIMEOUT = self.frontend_timeout

        # Create the Redis connection pool, use sanic to start it so that it
        # ends up in the same event loop
        async def init_redis_async(*_):
            self.redis_async = await aioredis.create_redis_pool(
                ("127.0.0.1", 6379), minsize=3, maxsize=10
            )

        async def close_redis_async(*_):
            self.redis_async.close()
            await self.redis_async.wait_closed()

        self.sanic_app.register_listener(init_redis_async, "before_server_start")
        self.sanic_app.register_listener(close_redis_async, "after_server_stop")

        # Set up slack logging, needs to be done here so it gets setup in the right event loop
        def start_slack_log(_, loop):
            slack.start(loop)

        async def stop_slack_log(*_):
            await slack.stop()

        self.sanic_app.register_listener(start_slack_log, "before_server_start")
        self.sanic_app.register_listener(stop_slack_log, "after_server_stop")

        debug = self.log_level == "DEBUG"

        self.sanic_app.add_route(
            self.external_endpoint, "/<endpoint>", methods=["GET", "POST"]
        )

        self.sanic_app.run(
            host="0.0.0.0",
            port=self.config["port"],
            workers=self.config["n_workers"],
            debug=False,
            access_log=debug,
        )

    def _config_slack_loggers(self):
        # Configure the log handlers for posting to slack

        # Don't set up extra loggers if they're not enabled
        if self.config["slack_token"] is None:
            logger.warning(
                "Config variable 'slack_token' not found. Slack messaging DISABLED."
            )
            return

        # Set the authorization token
        slack.set_token(self.config["slack_token"])

        for rule in self.config["slack_rules"]:

            logger_name = rule["logger"]
            channel = rule["channel"]
            level = rule.get("level", "INFO").upper()

            log = logging.getLogger(logger_name)

            handler = slack.SlackLogHandler(channel)
            handler.setLevel(level)
            log.addHandler(handler)

    def _register_config(self):
        # Register config with comet broker
        try:
            enable_comet = self.config["comet_broker"]["enabled"]
        except KeyError as e:
            raise ConfigError("Missing config value 'comet_broker/enabled'.") from e
        if enable_comet:
            try:
                comet_host = self.config["comet_broker"]["host"]
                comet_port = self.config["comet_broker"]["port"]
            except KeyError as exc:
                raise InternalError(
                    "Failure registering initial config with comet broker: 'comet_broker/{}' "
                    "not defined in config.".format(exc)
                ) from exc
            comet = Manager(comet_host, comet_port)
            try:
                comet.register_start(datetime.datetime.utcnow(), __version__)
                comet.register_config(self.config)
            except CometError as exc:
                raise InternalError(
                    "Comet failed registering CoCo startup and initial config: {}".format(
                        exc
                    )
                ) from exc
        else:
            logger.warning("Config registration DISABLED. This is only OK for testing.")

    def _load_config(self, config_path: os.PathLike):

        self.config = config.load_config(config_path)

        self.log_level = self.config["log_level"]
        logger.setLevel(self.config["log_level"])
        # Also set log level for root logger, inherited by all
        logging.getLogger().setLevel(self.config["log_level"])

        # Get the state storage and blocklist path, if it's not absolute then it is resolved
        # relative to the config directory
        self.blocklist_path = Path(self.config["blocklist_path"])
        if not self.blocklist_path.is_absolute():
            raise ConfigError(
                f"Blocklist path \"{self.config['blocklist_path']}\" must be absolute."
            )
        storage_path = Path(self.config["storage_path"])
        if not storage_path.is_absolute():
            raise ConfigError(
                f"Storage path \"{self.config['storage_path']}\" must be absolute."
            )
        if not storage_path.is_dir():
            raise ConfigError(
                f"Storage path \"{self.config['storage_path']}\" doesn't exist."
            )

        # Read groups
        self.groups = self.config["groups"].copy()
        for group, hosts in self.groups.items():
            self.groups[group] = [Host(h) for h in hosts]

        # Init state, tries loading from persistent storage
        self.state = State(
            self.config["log_level"],
            storage_path,
            self.config["load_state"],
            self.config["exclude_from_reset"],
        )

        # Validate slack posting rules
        # TODO: move into config.py
        for rdict in self.config["slack_rules"]:
            if "logger" not in rdict or "channel" not in rdict:
                logger.error(f"Invalid slack rule {rdict}.")

    def _load_endpoints(self):

        self.endpoints = {}

        for conf in self.config["endpoints"]:

            name = conf["name"]

            # Create the endpoint object
            self.endpoints[name] = Endpoint(name, conf, self.forwarder, self.state)

            if self.endpoints[name].group not in self.groups:
                if not self.endpoints[name].has_external_forwards:
                    logger.debug(
                        f"Endpoint {name} has `call` set to 'null'. This means it "
                        f"doesn't call external endpoints. It might check other coco "
                        f"endpoints or return some part of coco's state."
                    )
                else:
                    raise RuntimeError(
                        f"Host group '{self.endpoints[name].group}' used by endpoint "
                        f"{name} unknown."
                    )
            self.forwarder.add_endpoint(name, self.endpoints[name])

    def _local_endpoints(self):
        # Register any local endpoints

        endpoints = {
            "blocklist": ("GET", self.forwarder.blocklist.process_get),
            "update-blocklist": ("POST", self.forwarder.blocklist.process_post),
            "saved-states": ("GET", self.state.get_saved_states),
            "reset-state": ("POST", self.state.reset_state),
            "save-state": ("POST", self.state.save_state),
            "load-state": ("POST", self.state.load_state),
            "wait": ("POST", wait.process_post),
        }

        for name, (type_, callable_) in endpoints.items():
            self.endpoints[name] = LocalEndpoint(name, type_, callable_)
            self.forwarder.add_endpoint(name, self.endpoints[name])

    def _check_endpoint_links(self):
        def check(e):
            if e:
                for a in e:
                    if isinstance(a, dict):
                        if len(a.keys()) != 1:
                            raise ConfigError(
                                f"coco.endpoint: bad config format for endpoint "
                                f"`{e.name}`: `{a}`. Should be either a string or "
                                f"have the format:\n```\nbefore:\n  - endpoint_name:\n   "
                                f"   identical: True\n```"
                            )
                        a = list(a.keys())[0]
                    if isinstance(a, CocoForward):
                        a = a.name
                    if a not in self.endpoints.keys():
                        raise ConfigError(
                            f"coco.endpoint: endpoint `{a}` found in config for "
                            f"`{e.name}` does not exist."
                        )

        for endpoint in self.endpoints.values():
            if hasattr(endpoint, "before"):
                check(endpoint.before)
            if hasattr(endpoint, "after"):
                check(endpoint.after)
            if hasattr(endpoint, "forward_to_coco"):
                check(endpoint.forward_to_coco)

    async def external_endpoint(self, request, endpoint):
        """
        Receive all HTTP calls.

        Core endpoint. Passes all endpoint calls on to redis and blocks until completion.
        """
        # create a unique name for this task: <process ID>-<POSIX timestamp>
        now = time.time()
        name = f"{os.getpid()}-{now}"

        with await self.redis_async as r:
            # Check if queue is full. If not, add this task.
            if self.config["queue_length"] > 0:
                full = await r.evalsha(
                    self.queue_sha,
                    keys=["queue", name],
                    args=[
                        self.config["queue_length"],
                        "method",
                        request.method,
                        "endpoint",
                        endpoint,
                        "request",
                        request.body,
                        "params",
                        request.query_string,
                        "received",
                        now,
                    ],
                )

                if full:
                    # Increment dropped request counter
                    await r.incr(f"dropped_counter_{endpoint}")
                    return response.json(
                        {"reply": "Coco queue is full.", "status": 503}, status=503
                    )
            else:
                # No limit on queue, just give the task to redis
                await r.hmset(
                    name,
                    "method",
                    request.method,
                    "endpoint",
                    endpoint,
                    "request",
                    request.body,
                    "params",
                    request.query_string,
                    "received",
                    now,
                )

                # Add task name to queue
                await r.rpush("queue", name)

            # Wait for the result (operations must be in this order to ensure
            # the result is available)
            code = int((await r.blpop(f"{name}:code"))[1])
            result = (await r.blpop(f"{name}:res"))[1]
            await r.delete(f"{name}:res")
            await r.delete(f"{name}:code")

        return response.raw(
            result, status=code, headers={"Content-Type": "application/json"}
        )
Exemple #37
0
        time.sleep(0.5)

s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
port = int(textport)
server_address = (host, port)
s.bind(server_address)
t
while True:
    buf, address = s.recvfrom(port)
    if not len(buf):
        break
    l = buf == "turnL:1"
    r = buf == "turnR:1"
    if l:
        t = Process(target = turnLeft)       
        t.start()
    if r:
        t = Process(target = turnRight)
        t.start()
    buf, address = s.recvfrom(port)
    if not len(buf):
        break
    if l || r: 
        t.kill()
        
#   print ("Received %s bytes from %s %s: " % (len(buf), address, buf ))

s.shutdown(1)


Exemple #38
0
class Camera:
    def __init__(self, log_level):
        self.log_level = log_level
        self.manager = Manager()
        self.queue = self.manager.Queue()
        self.started = False
        self.last_color_image = None
        self.last_depth_image = None
        self.last_intrinsics = None
        self.camera_process = None

    def pcd(self):
        was_updated = False
        while True:
            try:
                (
                    self.last_color_image,
                    self.last_depth_image,
                    self.last_intrinsics,
                ) = self.queue.get(block=False)
                was_updated = True
            except queue.Empty:
                # We are up to date
                break

        if not was_updated:
            logging.warning("Capture was not updated for this pcd() call!")

        rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(
            o3d.geometry.Image(self.last_color_image),
            o3d.geometry.Image(self.last_depth_image),
            convert_rgb_to_intensity=False,
        )
        pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(
            *self.last_intrinsics
        )
        pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
            rgbd, pinhole_camera_intrinsic
        )
        pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
        return pcd

    def image_depth(self):
        if not self.started:
            self.start()
        while True:
            try:
                (
                    self.last_color_image,
                    self.last_depth_image,
                    self.last_intrinsics,
                ) = self.queue.get(block=(self.last_color_image is None))
                was_updated = True
            except queue.Empty:
                # We are up to date
                break
        depth_colormap = cv2.applyColorMap(
            cv2.convertScaleAbs(self.last_depth_image, alpha=0.03), cv2.COLORMAP_JET
        )
        return self.last_color_image, depth_colormap

    def start(self):
        # Reset all devices
        self.started = True
        self.camera_process = Process(
            target=Camera.loop, args=(self.queue, self.log_level)
        )
        self.camera_process.start()

    def stop(self):
        self.started = False
        self.camera_process.kill()

    def loop(q, log_level):
        logging.basicConfig(level=log_level)
        logging.debug("Camera process started")
        ctx = rs.context()
        devices = ctx.query_devices()
        if len(devices) == 0:
            raise NoDeviceDetectedException()
        for dev in devices:
            dev.hardware_reset()
        time.sleep(5)

        # Configure depth and color streams
        pipeline = rs.pipeline()
        config = rs.config()
        config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

        # Start streaming
        pipeline.start(config)

        # Get stream profile and camera intrinsics
        profile = pipeline.get_active_profile()
        depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
        intrinsics = depth_profile.get_intrinsics()

        # Point cloud acquisition
        pc = rs.pointcloud()
        colorizer = rs.colorizer()
        aligner = rs.align(rs.stream.color)

        while True:
            frames = aligner.process(pipeline.wait_for_frames())
            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()

            if not depth_frame or not color_frame:
                continue

            depth_image = np.asanyarray(depth_frame.get_data())
            color_image = np.asanyarray(color_frame.get_data())

            q.put(
                (
                    color_image,
                    depth_image,
                    (
                        intrinsics.width,
                        intrinsics.height,
                        intrinsics.fx,
                        intrinsics.fy,
                        intrinsics.ppx,
                        intrinsics.ppy,
                    ),
                )
            )
            time.sleep(0.1)
Exemple #39
0
 def kill(self) -> None:
     Process.kill(self)
     self.check()