Beispiel #1
0
    def thread_run(self):
        self._thread_stop = False
        th = threading.Thread(target=self.run, name='Connection run')
        th.start()
        gvar.thread.append(th)

        logger.info('thread start -> Connection.run()')
Beispiel #2
0
 def launch_process(self):
     """
     Launch process for spark-submit.
     """
     # spark-submit command to launch
     command = self.cmd()
     logger.info("%s - Launch command %s", self.name, command)
     # only create stdout and stderr when working directory is provided
     if self.__working_directory:
         stdout_path = util.concat(self.__working_directory, "stdout")
         stderr_path = util.concat(self.__working_directory, "stderr")
         stdout = util.open(stdout_path, "wb")
         stderr = util.open(stderr_path, "wb")
         self.__ps = subprocess.Popen(command,
                                      bufsize=4096,
                                      stdout=stdout,
                                      stderr=stderr,
                                      close_fds=True)
     else:
         self.__ps = subprocess.Popen(command,
                                      bufsize=4096,
                                      stdout=None,
                                      stderr=None,
                                      close_fds=True)
     logger.info("%s - Process pid=%s", self.name, self.__ps.pid)
Beispiel #3
0
    def init_tcp(self):
        self._tp = paramiko.Transport(self._socket, gss_kex=True)
        self._tp.set_gss_host(socket.getfqdn(""))

        self._tp.load_server_moduli()

        host_key = paramiko.RSAKey(filename=conf.SSH_SERVER_RSA_EKY)
        self._tp.add_server_key(host_key)
        server = Server()
        logger.debug('Create Server Class')

        try:
            self._tp.start_server(server=server)
        except paramiko.SSHException:
            logger.error("SSH negotiation failed.")
            gvar.manager.close_connection(self)
            exit()

        self._channel = self._tp.accept(200)

        if self._channel is None:
            logger.error('No channel')
            gvar.manager.close_connection(self)
            exit()

        logger.info('Authenticatied!')
Beispiel #4
0
    def iteration(self):
        """
        Run single iteration, entire logic of executor should be specified in this method, unless
        there is an additional logic between iterations. Iteration is cancelled, if executor is
        terminated.

        :return: boolean flag, True - run next iteration, False - terminate
        """
        # we process special case of terminated executor in case someone would launch it again.
        if self._terminated:
            logger.warning("Executor %s has been terminated", self.name)
            return False
        logger.debug("%s - Run iteration, timeout=%s", self.name, self.timeout)
        try:
            # send reponse to the scheduler that this executor is up and processing tasks
            self._respond_is_alive()
            # check if there are any messages in connection, process one message per iteration
            if self.conn.poll():
                self._process_message(self.conn.recv())
            # check if there is any outstanding task to run, otherwise poll data for current task
            self._process_task()
        except ExecutorInterruptedException:
            logger.info("%s - Requested termination of executor", self.name)
            self._terminated = True
            # cancel task that is currently running and clean up state
            self._cancel_active_task()
            return False
        # pylint: disable=W0703,broad-except
        except Exception as e:
            logger.exception("%s - Unrecoverable error %s, terminating", self.name, e)
            self._terminated = True
            return False
        # pylint: enable=W0703,broad-except
        else:
            return True
Beispiel #5
0
def _check_hdd(mount_path: str):
    try:
        # Get disk space
        hdd = psutil.disk_usage(mount_path)
        free_space = hdd.free / (1024 * 1024 * 1024)
        total_space = hdd.total / (1024 * 1024 * 1024)
        free_space_pct = free_space / total_space * 100

        # Get disk inodes
        inode_hdd_mount = os.statvfs(mount_path)
        total_inode = inode_hdd_mount.f_files  # inodes
        free_inode = inode_hdd_mount.f_ffree  # free inodes
        free_inode_pct = free_inode / total_inode * 100
    except FileNotFoundError:
        logger.warning(f"failed to find '{mount_path}'")
    else:
        # Log and alert
        logger.info(
            f"space left on '{mount_path}': {free_space:.2f}GB ({free_space_pct:.1f}%) | inodes {free_inode:,} ({free_inode_pct:.1f}%)"
        )

        if (free_space_pct <= config.FREE_SPACE_PCT_WARNING) and (
                free_space_pct > config.FREE_SPACE_PCT_ERROR):
            send_alarm_warning(free_space, free_space_pct, mount_path)
        elif free_space_pct <= config.FREE_SPACE_PCT_ERROR:
            send_alarm_error(free_space, free_space_pct, mount_path)

        if (free_inode_pct <= config.FREE_INODE_PCT_WARNING) and (
                free_inode_pct > config.FREE_INODE_PCT_ERROR):
            send_alarm_warning_inode(free_inode, free_inode_pct, mount_path)
        elif free_inode_pct <= config.FREE_INODE_PCT_ERROR:
            send_alarm_error_inode(free_inode, free_inode_pct, mount_path)
Beispiel #6
0
 def load_rom(self, filename: str) -> None:
     rom = open(filename, 'rb').read()
     for i, val in enumerate(rom):
         self.memory[PROGRAM_START + i] = val
     self.program_counter = PROGRAM_START
     self.rom_loaded = True
     logger.info(f"Loaded {filename} into memory")
Beispiel #7
0
 def close(self):
     if self.channel:
         self.thread_stop()
         self.channel.close()
         self.control.remove(self)
         self.channel = None
         logger.info("room connection was closed")
Beispiel #8
0
 def send(self, msg):
     try:
         self._channel.send(msg)
     except OSError:
         logger.info("socket is closed")
         gvar.manager.close_connection(self)
         return
def create_extrapolation_dataset(src: PathLike, dest: PathLike, seed=None):
    if seed is not None:
        np.random.seed(seed=seed)
    else:
        np.random.seed(datetime.now().microsecond)
    logger.info("开始制作外推数据集...")
    # 存储文件编号
    train_set_file_no = 1
    test_set_file_no = 1

    # 每个 folder 代表一天的序列
    for folder in os.listdir(src):
        step = 40  # 这个可以改,假定一天 240 个数据都有的话,按照 40 个一组划分为 6 组 block

        r: List[str] = os.listdir(f"{src}\\{folder}")  # 找出日期文件夹下所有的数据文件
        r.sort()  # 没办法,时序问题必须要按照实际序列取数据集,以防万一,排个序

        r = list(map(lambda p: f"{src}\\{folder}\\{p}",
                     r))  # 每个元素变成可直接使用的地址字符串

        split_list = [r[i:i + step]
                      for i in range(0, len(r), step)]  # 一天划分成几个时间段
        np.random.shuffle(split_list)

        # 分配测试集一个 block
        test_queue = deque(maxlen=30)

        test_block = split_list[0]
        for i in range(0, 30):
            test_queue.append(test_block[i])
        __copy(list(test_queue),
               dest=f"{dest}\\test\\example{test_set_file_no:05d}")
        test_set_file_no += 1

        for i in range(30, len(test_block)):
            test_queue.append(test_block[i])
            __copy(list(test_queue),
                   dest=f"{dest}\\test\\example{test_set_file_no:05d}")
            test_set_file_no += 1

        split_list = split_list[1:]

        # 其余 block 分配给训练集
        train_queue = deque(maxlen=20)

        for train_block in split_list:
            train_queue.clear()
            for i in range(0, 20):
                train_queue.append(train_block[i])
            __copy(list(train_queue),
                   dest=f"{dest}\\train\\example{train_set_file_no:05d}")
            train_set_file_no += 1

            for i in range(20, len(train_block)):
                train_queue.append(train_block[i])
                __copy(list(train_queue),
                       dest=f"{dest}\\train\\example{train_set_file_no:05d}")
                train_set_file_no += 1
    logger.info("外推数据集制作完毕...")
Beispiel #10
0
 def shutdown(self):
     import sys
     logger.info('shutdown the programe.')
     self.close_ssh_server()
     self.close_mansion()
     gvar.thread.kill_all_thread()
     logger.info('bye!')
     sys.exit(0)
Beispiel #11
0
 def __init__(self) -> None:
     self.volume = 0.75
     pygame.mixer.init()
     square_wave = bytearray([128] * 400 + [0] * 400)
     self.sound = pygame.mixer.Sound(buffer=square_wave)
     self.sound.play(loops=-1)
     self.sound.set_volume(0.0)
     logger.info("Sound initialized")
Beispiel #12
0
    def close_room(self):
        if not self._room:
            logger.info('not in room')
            return

        self.ssh_client.close()
        self._room.close()
        logger.info("close the room")
Beispiel #13
0
 def start(self):
     """
     Start scheduler, launches executors asynchronously.
     """
     logger.info("Starting %s '%s' executors", self.num_executors, self.executor_class())
     # Launch executors and save pipes per each
     for i in range(self.num_executors):
         exc = self._prepare_executor("#%s" % i)
         exc.start()
Beispiel #14
0
 def start(self):
     """
     Start scheduler, launches executors asynchronously.
     """
     logger.info("Starting %s '%s' executors", self.num_executors,
                 self.executor_class())
     # Launch executors and save pipes per each
     for i in range(self.num_executors):
         exc = self._prepare_executor("#%s" % i)
         exc.start()
Beispiel #15
0
def create_restore_file(path_queue_file, path_crawled_file, path_restore_file):
    queue_set = set(_open(path_queue_file))
    crawled_set = set(_open(path_crawled_file))

    restore_queue = list(queue_set - crawled_set)

    append_list_to_file(path_restore_file, restore_queue, to_new_file=True)

    logger.info('restore file created')
    return True
Beispiel #16
0
    def __init__(self, socket):
        super().__init__(socket)
        self._tp = None
        self._control = None  # src.control or src.channel
        self._channel: paramiko.channel.Channel = None

        self._thread_stop = False
        logger.info('establish a new ssh session.')

        self.init_tcp()
Beispiel #17
0
    def create_room(self, conn):
        logger.info('createing a room...')
        while True:
            i = self.random_id()
            if self.check_id(i):
                break

        logger.info('random a room id -> %s', i)

        room = Room(i)
        room.channel = conn
        self._rlist[i] = room
Beispiel #18
0
    def start_listening(self, port):
        self._port = port
        self.listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.listener.bind((conf.SSH_SERVER_LISTENING_IP, self._port))

        try:
            self.listener.listen()
            logger.info('SSH.start listening port:%s  OK ...' % self._port)
        except:
            logger.error(
                'SSH.start listening port:%s Error, Exit...' % self._port)
Beispiel #19
0
 def _cancel_active_task(self):
     """
     Cancel current running task, if available.
     """
     if self._active_task:
         task_id = self._active_task.uid
         info = self._active_task.task_info
         self._active_task.cancel()
         self.conn.send(Message(EXECUTOR_TASK_CANCELLED, task_id=task_id, info=info))
         logger.info("%s - Cancelled task %s", self.name, task_id)
         self._active_task = None
     else:
         logger.info("%s - No active task to cancel", self.name)
Beispiel #20
0
 def run(self):
     """
     Method to run tasks on executor, this runs in iterations with each timeout interval. Each
     iteration polls new messages from connection and checks running task. If iteration fails we
     immediately return status False.
     """
     logger.info("Start executor %s, time=%s", self.name, time.time())
     proceed = True
     while proceed: # pragma: no branch
         proceed = self.iteration()
         if not proceed:
             return False
         time.sleep(self.timeout)
Beispiel #21
0
    def connection(self):
        try:
            # params: timeout = None means forever
            self.port = serial.Serial(conf.SERIAL_DEVICE, baudrate=conf.SERIAL_BAUDRATE, timeout=None, write_timeout=2,
                                      parity=serial.PARITY_NONE, bytesize=serial.EIGHTBITS, stopbits=serial.STOPBITS_ONE, xonxoff=False)
        except serial.serialutil.SerialException:
            logger.error(
                "can't open serial %s, Please check the COM is open and retry..." % conf.SERIAL_DEVICE)
            raise(serial.SerialException)

        self.port.flushInput()
        logger.info('serial port \'%s\' connection complete' %
                    conf.SERIAL_DEVICE)
Beispiel #22
0
 def run(self):
     """
     Method to run tasks on executor, this runs in iterations with each timeout interval. Each
     iteration polls new messages from connection and checks running task. If iteration fails we
     immediately return status False.
     """
     logger.info("Start executor %s, time=%s", self.name, time.time())
     proceed = True
     while proceed:  # pragma: no branch
         proceed = self.iteration()
         if not proceed:
             return False
         time.sleep(self.timeout)
Beispiel #23
0
    def select_room(self, conn):
        """
        Enter a room, need select a room ID, and give a username
        """
        logger.debug("into select room process...")
        se = Select(self._rlist.keys(), conn)
        room_id, username = se.process()

        logger.info('get room_id: %s, username: %s' % (room_id, username))
        room = self._rlist[room_id]
        conn.username = username
        room.add_connection(conn)
        del se
Beispiel #24
0
    def wait_keyboard_interrupt(self):
        import time

        logger.debug("Into waiting for keyboard interrupt")
        try:
            while gvar.thread.has_alive_thread():
                gvar.thread.clean_stoped_thread()
                time.sleep(50)
        except KeyboardInterrupt:
            logger.info("shutdown the program...")
            self.shutdown()
            gvar.thread.kill_all_thread()

            logger.info("Bye!")
Beispiel #25
0
 def _process_task(self):
     """
     Process individual task, returns exit code for each task following available API. One of
     the checks is performed to test current task_id against cancelled list, and discard task,
     if it has been marked as cancelled, or terminate running task.
     """
     if not self._active_task:
         self._active_task = self._get_new_task()
         logger.info("%s - New task registered", self.name)
     # before checking statuses and proceed execution, we check if current task was
     # requested to be cancelled, if yes, we remove it from set of ids.
     if self._active_task and self._active_task.uid in self._cancel_task_ids:
         self._cancel_task_ids.discard(self._active_task.uid)
         self._cancel_active_task()
     # check general task processing
     if self._active_task:
         task_id = self._active_task.uid
         info = self._active_task.task_info
         task_status = self._active_task.status
         # perform action based on active task status
         if task_status is TASK_PENDING:
             # check if external system is available to run task (Developer API)
             if self.external_system_available():
                 self._active_task.start()
                 self.conn.send(Message(EXECUTOR_TASK_STARTED, task_id=task_id, info=info))
                 logger.info("%s - Started task %s", self.name, task_id)
             else:
                 logger.info("%s - External system is not available", self.name)
         elif task_status is TASK_STARTED:
             # task has started and running
             if self._active_task.is_alive(): # pragma: no branch
                 logger.debug("%s - Ping task %s is alive", self.name, task_id)
         elif task_status is TASK_SUCCEEDED:
             # task finished successfully
             self.conn.send(Message(EXECUTOR_TASK_SUCCEEDED, task_id=task_id, info=info))
             logger.info("%s - Finished task %s, status %s", self.name, task_id, task_status)
             self._active_task = None
         elif task_status is TASK_FAILED:
             # task failed
             self.conn.send(Message(EXECUTOR_TASK_FAILED, task_id=task_id, info=info))
             logger.info("%s - Finished task %s, status %s", self.name, task_id, task_status)
             self._active_task = None
         elif task_status is TASK_CANCELLED:
             # task has been cancelled
             if self._active_task: # pragma: no branch
                 self._active_task = None
         else:
             logger.warning("%s - Unknown status %s for %s", self.name, task_status, task_id)
     else:
         logger.debug("%s - No active task registered", self.name)
Beispiel #26
0
def download_zip_file(url: str) -> str:
    """
    Streams remote ZIP file to disk without using excessive memory, extract its content,
    then deletes it.

    :param url: remote file URL

    :return: extracted file name
    """

    local_filename = f"tmp/{url.split('/')[-1]}"

    # Downloading ZIP file
    logger.info(
        f"Downloading ZIP file from {url} into local file '{local_filename}' ..."
    )
    with requests.get(url, stream=True) as r, open(local_filename, "wb") as f:
        shutil.copyfileobj(r.raw, f)
    logger.info(
        f"Download of ZIP data from {url} into local file '{local_filename}' successfull !"
    )

    # Extracting ZIP file data
    logger.info(f"Extracting ZIP data from {local_filename} ...")
    with ZipFile(local_filename) as zf:
        zf.extractall()
    logger.info(f"Extraction of ZIP data from {local_filename} successfull !")

    # Removing ZIP file after extraction and returning the extracted filename
    remove(local_filename)
    return local_filename.split(".zip")[0]
Beispiel #27
0
 def check_server_port_is_open(self):
     """
     check with server connection and port is open
     but there have some problem, server point will print SSH Error on logbuf
     """
     try:
         tel = telnetlib.Telnet(host=conf.SSH_SERVER_IP_ADDRESS,
                                port=conf.SSH_SERVER_PORT, timeout=1)
         tel.close()
         return True
     except:
         logger.info(
             "can't connect ssh server, please check the internet setting.")
         return False
Beispiel #28
0
 def _cancel_active_task(self):
     """
     Cancel current running task, if available.
     """
     if self._active_task:
         task_id = self._active_task.uid
         info = self._active_task.task_info
         self._active_task.cancel()
         self.conn.send(
             Message(EXECUTOR_TASK_CANCELLED, task_id=task_id, info=info))
         logger.info("%s - Cancelled task %s", self.name, task_id)
         self._active_task = None
     else:
         logger.info("%s - No active task to cancel", self.name)
Beispiel #29
0
 def create_ssh_server(self, port):
     listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
     listener.bind((conf.SSH_SERVER_LISTENING_IP, port))
     try:
         listener.listen()
         logger.info('SSH.start listening port:%s  OK ...' % port)
     except:
         logger.error('SSH.start listening port:%s Error, Exit...' % port)
     var = SSHServer()
     var.manager = self  # ! maybe not need
     var.listener = listener
     var._port = port
     var.thread_run()
     self.ssh_servers.append(var)
Beispiel #30
0
    def __init__(self) -> None:
        pygame.init()
        self.PIXEL_SIZE = 20
        self.MENU_HEIGHT = 40
        self.WIDTH = 64 * self.PIXEL_SIZE
        self.HEIGHT = 32 * self.PIXEL_SIZE
        self.TOTAL_HEIGHT = self.HEIGHT + self.MENU_HEIGHT
        self.DISPLAY = pygame.display.set_mode((self.WIDTH, self.TOTAL_HEIGHT),
                                               0, 1)

        self.WHITE: tuple[int, int, int] = (230, 230, 230)
        self.BLACK: tuple[int, int, int] = (20, 20, 20)

        self.pixel_matrix: list[list[int]] = [[0] * 32 for _ in range(64)]

        self.paused = False
        logger.info("Screen initialized")
Beispiel #31
0
 def stop(self):
     """
     Stop scheduler, terminates executors, and all tasks that were running at the time.
     """
     for conn in self.pipe.values():
         conn.send(Message(EXECUTOR_SHUTDOWN))
     # timeout to terminate processes and process remaining messages in Pipe by polling thread
     logger.info("Waiting for termination...")
     time.sleep(5)
     for exc in self.executors:
         if exc.is_alive():
             exc.terminate()
         exc.join()
     logger.info("Terminated executors, cleaning up internal data")
     self.pipe = None
     self.executors = None
     self.task_queue_map = None
Beispiel #32
0
 def stop(self):
     """
     Stop scheduler, terminates executors, and all tasks that were running at the time.
     """
     for conn in self.pipe.values():
         conn.send(Message(EXECUTOR_SHUTDOWN))
     # timeout to terminate processes and process remaining messages in Pipe by polling thread
     logger.info("Waiting for termination...")
     time.sleep(5)
     for exc in self.executors:
         if exc.is_alive():
             exc.terminate()
         exc.join()
     logger.info("Terminated executors, cleaning up internal data")
     self.pipe = None
     self.executors = None
     self.task_queue_map = None
Beispiel #33
0
    def get_room_id(self):
        bs = self.recv()
        if not bs:
            return

        code = bs.get_int8()
        if code != 0x1:
            logger.error('Register: ger error room id code: %s' % code)
            return self.send_error()

        self.room_id = bs.get_str()
        logger.info('Register: Get a room id: %s' % self.room_id)

        conf._SSH_SERVER_TERMINAL_PORT = bs.get_str()
        logger.debug('Register: Get Server Terminal Port: %s' %
                     conf._SSH_SERVER_TERMINAL_PORT)
        self.send_room_id()
Beispiel #34
0
    def check_version(self):
        new_ver = self.get_new_version()
        if not new_ver:
            return

        logger.debug("remote repository recent version is: %s" % new_ver)
        logger.debug("the running version is %s" % banner.VERSION)

        new_ver_int = calc_version(new_ver)
        run_ver_int = calc_version(banner.VERSION)
        if not (new_ver_int and run_ver_int):  # check if format is bad
            return False

        if (new_ver_int > run_ver_int):  # check if recent version is new
            return True
        else:
            logger.info("it's the latest version")
            return False
Beispiel #35
0
 def launch_process(self):
     """
     Launch process for spark-submit.
     """
     # spark-submit command to launch
     command = self.cmd()
     logger.info("%s - Launch command %s", self.name, command)
     # only create stdout and stderr when working directory is provided
     if self.__working_directory:
         stdout_path = util.concat(self.__working_directory, "stdout")
         stderr_path = util.concat(self.__working_directory, "stderr")
         stdout = util.open(stdout_path, "wb")
         stderr = util.open(stderr_path, "wb")
         self.__ps = subprocess.Popen(command, bufsize=4096, stdout=stdout, stderr=stderr,
                                      close_fds=True)
     else:
         self.__ps = subprocess.Popen(command, bufsize=4096, stdout=None, stderr=None,
                                      close_fds=True)
     logger.info("%s - Process pid=%s", self.name, self.__ps.pid)
Beispiel #36
0
def parse_txt_files(paths: List[str]) -> Iterator[str]:
    """
    Parses a NCDC data file stored locally and yields database records from it.

    :param paths: list of .txt files paths

    :return: database records generator
    """

    for p in paths:
        with open(p, "r") as f:
            for line in f:
                if line.startswith("#"):
                    header = parse_header(line)
                else:
                    data = parse_data_record(line)
                    yield transform_record({**header, **data})
        remove(p)
        logger.info(f"Successfully parsed and deleted text file {p} !")
Beispiel #37
0
    def get_new_version(self):
        try:
            resp = requests.get(url=banner.REPOSITORY_API_ADDRESS)
        except Exception as e:
            logger.error("network anomaly")
            return False

        if resp.status_code != 200:
            logger.info("unable to get a valid page")
            return False

        _ = str(resp.content.decode())

        new_ver = re.search(r'"name":"(.*?)",', _).group(1)
        if not new_ver:
            logger.info("unable to get valid version information")
            return False

        return new_ver
Beispiel #38
0
 def _process_message(self, msg):
     """
     Process message and take action, e.g. terminate process, execute callback, etc. Message
     types are defined above in the package. Note that this can take actions on tasks, e.g.
     when task is cancelled, so the subsequent processing of task, will work with updated state.
     :param msg: message to process
     """
     logger.debug("%s - Received message %s", self.name, msg)
     if isinstance(msg, Message):
         if msg.status == EXECUTOR_SHUTDOWN: # pragma: no branch
             raise ExecutorInterruptedException("Executor shutdown")
         elif msg.status == EXECUTOR_CANCEL_TASK: # pragma: no branch
             # update set of tasks to cancel
             if "task_id" in msg.arguments: # pragma: no branch
                 task_id = msg.arguments["task_id"]
                 self._cancel_task_ids.add(task_id)
                 logger.debug("%s - Registered cancelled task %s", self.name, task_id)
         else:
             # valid but unrecognized message, no-op
             pass
     else:
         logger.info("%s - Invalid message %s is ignored", self.name, msg)
Beispiel #39
0
 def start(self):
     """
     Start all session services.
     """
     # getting server info validates connection
     logger.info("Trying to connect to Mongo instance")
     server_info = self.client.server_info()
     logger.info(server_info)
     # database and tables' names are fixed for now
     logger.debug("Setting up database and indexes")
     db = self.client.queue
     db.submissions.create_index([
         ("uid", pymongo.ASCENDING),
         ("status", pymongo.ASCENDING),
         ("createtime", pymongo.DESCENDING)
     ])
     db.tasks.create_index([
         ("uid", pymongo.ASCENDING)
     ])
     # subscribe to events
     cherrypy.engine.subscribe(EVENT_CREATE, self.event_create)
     # start scheduler
     self.session.scheduler.start_maintenance()
     self.session.scheduler.start()