コード例 #1
0
   def trace_memory_usage(self, frame, event, arg):
      """Callback for sys.settrace

      Args:
         frame: frame is the current stack frame
         event: event is a string: 'call', 'line', 'return', 'exception', 'c_call', 'c_return', or 'c_exception'
         arg: arg depends on the event type.

      Returns:
         function: wrap_func
      """
      if event in ('call', 'line', 'return') and frame.f_code in self.code_map:
         if event != 'call':
            # "call" event just saves the lineno but not the memory
            process = Process(getpid())
            mem = process.memory_info()[0] / float(2 ** 20)
            # if there is already a measurement for that line get the max
            old_mem = self.code_map[frame.f_code].get(self.prevline, 0)
            self.code_map[frame.f_code][self.prevline] = max(mem, old_mem)
         self.prevline = frame.f_lineno

      if self._original_trace_function is not None:
         self._original_trace_function(frame, event, arg)

      return self.trace_memory_usage
コード例 #2
0
ファイル: judger.py プロジェクト: CompLand/Judger
 def __memory_watcher(self, data):
     max_memory = 0
     try:
         p = Process(data[0])
         memory = 1
         while 0 < memory and max_memory <= self.__mem_lim:
             if os_name.lower() == 'linux':
                 memory = p.memory_info()[0] - p.memory_info_ex().shared
             elif os_name.lower() == 'darwin':
                 memory = p.memory_info()[0]
             elif os_name.lower() == 'windows':
                 memory = p.memory_info_ex().private
             if max_memory < memory:
                 max_memory = memory
         data[1]()
     except NoSuchProcess:
         pass
     finally:
         data.append(max_memory)
コード例 #3
0
def process_with_stats(name, stdin, times=1):
    if times > 1:
        results = []
        for _ in xrange(0, times):
            results.append(process_with_stats(name, stdin, 1))

        result = {'execution': reduce(lambda a, c:
                                      a+c['execution']/float(times),
                                      results, 0),
                  'memory': reduce(lambda a, c:
                                   max(a, c['memory']), results, 0),
                  'output': results[0]['output'],
                  'status': results[0]['status']}

        return result

    process = Popen(name, stdin=PIPE, stdout=PIPE, stderr=DEVNULL,
                    close_fds=True)
    process.stdin.write(stdin.getvalue())
    process.stdin.close()

    stats = Process(process.pid)

    memory_usage = stats.memory_info().rss
    while process.poll() is None:
        try:
            memory_usage = max(memory_usage, stats.memory_info().rss)
        except:
            memory_usage = 0
        sleep(1/1000.0)

    execution_time = time() - stats.create_time()
    output = process.stdout.read()

    if memory_usage == 0 and process.returncode == 0:
        return process_with_stats(name, stdin)

    return {'execution': execution_time,
            'memory': memory_usage,
            'output': output,
            'status': process.returncode}
コード例 #4
0
ファイル: workers.py プロジェクト: 28sui/uliweb
def get_memory(pid):
    # return the memory usage in MB, psutil should be 4.0 version
    from psutil import Process, __version__

    # if __version__ < '4.0.0':
    #     raise Exception('psutil module should be 4.0.0 version at least.')

    if pid_exists(pid):
        process = Process(pid)
        # mem = process.memory_full_info().uss / float(1024*1024)
        mem = process.memory_info().rss / float(1024*1024)
        return mem
    return 0
コード例 #5
0
def get_memory(pid):
    # return the memory usage in MB, psutil should be 4.0 version
    from psutil import Process, __version__

    # if __version__ < '4.0.0':
    #     raise Exception('psutil module should be 4.0.0 version at least.')

    if pid_exists(pid):
        process = Process(pid)
        # mem = process.memory_full_info().uss / float(1024*1024)
        mem = process.memory_info().rss / float(1024 * 1024)
        return mem
    return 0
コード例 #6
0
    def test_memory_free(self):
        process = Process(getpid())
        initial_memory = process.memory_info().rss

        start = time.perf_counter()
        with TiffVolume(os.path.join(IMAGE_PATH, "inputs.tif"),
                        BoundingBox(Vector(0, 0, 0), Vector(1024, 512,
                                                            50))) as v:
            volume_memory = process.memory_info().rss
        end = time.perf_counter()
        print("Load time: {} secs".format(end - start))

        final_memory = process.memory_info().rss

        self.assertAlmostEqual(initial_memory,
                               final_memory,
                               delta=initial_memory * 0.2,
                               msg=("memory leakage: final memory usage is " +
                                    "larger than the initial memory usage"))
        self.assertLess(initial_memory,
                        volume_memory,
                        msg=("volume loading error: volume memory usage is " +
                             "not less than the initial memory usage"))
コード例 #7
0
def reportProcessMemoryUsage():
    """
    rss: aka "Resident Set Size", non-swapped physical memory process has used.
    The portion of memory occupied by a process that is held in main memory
    (RAM). The rest of the occupied memory exists in the swap space or file
    system, either because some parts of the occupied memory were paged out, or
    because some parts of the executable were never loaded

    vms: aka "Virtual Memory Size", total amount of virtual memory used by the
    process.
    """
    p = Process(os.getpid())
    mi = p.memory_info()
    _rssGb = mi.rss / _BYTES_IN_GB
    _vmsGb = mi.vms / _BYTES_IN_GB
    logger.info("ram %.2f GB vm: %.2f GB", _rssGb, _vmsGb)
コード例 #8
0
    def memory_usage(self):
        self._ensure_initialized()

        usage = 0
        agents = []

        for name, container in self._map_container_by_name().iteritems():
            info = self._docker.inspect_container(container)
            pid = info['State']['Pid']
            process = Process(pid)
            mem = process.memory_info()
            usage = usage + mem.rss
            agents.append({'name': name, 'memory_usage': mem.rss})

        avg = usage / len(agents) if len(agents) > 0 else 0

        return {'total_usage': usage, 'average_usage': avg, 'agents': agents}
コード例 #9
0
    def add_new_information_from_process_object(self, process: psutil.Process,
                                                data_retrieval_timestamp: datetime.datetime) -> None:
        """
        Adds the new information about the process to the application profile.
        This should be mainly used for applications with only one process.
        :raises TypeError if process is not of type psutil.Process or data_retrieval_timestamp is not of type
            datetime.datetime.
        :raises ValueError if data_retrieval_timestamp is newer than current time.
        :param process: Information about the specific process.
        :type process: psutil.Process
        :param data_retrieval_timestamp: The time the data was retrieved.
        :type data_retrieval_timestamp: datetime.datetime
        """
        if not (isinstance(process, psutil.Process)):
            raise TypeError(expected_type_but_received_message.format("process", "psutil.Process", process))
        if not (isinstance(data_retrieval_timestamp, datetime.datetime)):
            raise TypeError(expected_type_but_received_message.format("data_retrieval_timestamp", "datetime.datetime",
                                                                      data_retrieval_timestamp))
        if data_retrieval_timestamp.replace(tzinfo=None) > datetime.datetime.now():
            raise ValueError("Argument data_retrieval_timestamp cannot be newer than current time. Value receive: {}"
                             .format(data_retrieval_timestamp))

        # Get info from the process object. One of the following calls may raise an Error (OS, AccessDenied, etc).
        open_files = process.open_files()
        memory_info = process.memory_info()
        child_process_count = len(process.children())
        username = process.username()
        threads_number = process.num_threads()
        process.cpu_percent()
        try:
            connections_num = len(process.connections())
        except psutil.AccessDenied:
            connections_num = 0

        time.sleep(0.1)  # wait for cpu_percent to return a meaningful value.
        cpu_percentage = process.cpu_percent()

        self.add_open_files(open_files=open_files, data_retrieval_timestamp=data_retrieval_timestamp)
        self.__memory_usages.append(memory_info.rss)
        self.__data_retrieval_timestamp.append(data_retrieval_timestamp)
        self.__child_processes_count.append(child_process_count)
        self.__users.extend(username)
        self.__cpu_percent_usages.append(cpu_percentage)
        self.__threads_numbers.append(threads_number)
        self.__connections_numbers.append(connections_num)
コード例 #10
0
ファイル: recording.py プロジェクト: jefferyUstc/CodeSnippets
 def log_memory_info(p: psutil.Process):
     nonlocal log_file, inter_val
     if log_file is None:
         log_file = './monitor_memory.log'
     elif os.path.isdir(log_file):
         log_file = os.path.join(log_file, 'monitor_memory.log')
     with open(log_file, 'a') as f:
         f.write('rss\tvms\n')
         while True:
             try:
                 info = p.memory_info()
             except:
                 os._exit(0)
             f.write('{0}\t{0}\n'.format(str(info.rss / (1024 * 1024.)),
                                         str(info.vms /
                                             (1024 * 1024.))))  # vms
             f.flush()
             time.sleep(inter_val)
コード例 #11
0
ファイル: utils.py プロジェクト: AlissonSilveira/OpenVT
def memory_usage_psutil():
    # return the memory usage in percentage like top
    process = Process(getpid())
    mem = process.memory_info()[0]
    if mem > float(2**30):
        mem = color("{}GB".format(str(mem / float(2**30))[:6]),
                    fontcolor='red',
                    bold=True)
    elif mem > float(2**20):
        mem = color("{}MB".format(str(mem / float(2**20))[:6]),
                    fontcolor='yellow',
                    bold=True)
    elif mem > float(2**10):
        mem = color("{}KB".format(str(mem / float(2**10))[:6]),
                    fontcolor='green',
                    bold=True)
    else:
        mem = color("{}B".format(mem), fontcolor='green', bold=True)
    return "  | {}% | {} |".format(str(process.memory_percent())[:6], mem)
コード例 #12
0
    def __init__(self, proc: psutil.Process, proctable):
        """
            Class constructor
        """

        _dead = False
        self._children = list()
        self._parent = 0
        parent = None

        self._proc, self._pt = proc, proctable
        try:
            self._pgid = os.getpgid(proc.pid)
        except:
            self._pgid = 0
            _dead = True

        if not _dead:
            parent = proc.parent()

        if parent:
            self._parent = parent.pid

        if not _dead:
            self._children = [ p.pid for p in proc.children() ]

        with proc.oneshot():
            if proc.is_running():
                self.rss = proc.memory_info().rss
                self.vms = proc.memory_info().vms
                self.ctx_vol = proc.num_ctx_switches().voluntary
                self.ctx_invol = proc.num_ctx_switches().involuntary
                self._cmdline = proc.cmdline()
                self.pcpu = None
                #self.pcpu += proc.cpu_percent(interval=DEFAULT_INTERVAL)
            else:
                self.rss = 0
                self.vms = 0
                self.ctx_vol = 0
                self.ctx_invol = 0
                self.cmdline = [ ]
                self.pcpu = 0.0
コード例 #13
0
    def memory_usage(self):
        self._ensure_initialized()

        usage = 0
        agents = []

        for name, container in self._map_container_by_name().iteritems():
            info = self._docker.inspect_container(container)
            pid = info['State']['Pid']
            process = Process(pid)
            try:
                mem = process.memory_info()
            except AttributeError:
                mem = process.get_memory_info()
            usage = usage + mem.rss
            agents.append({'name': name, 'memory_usage': mem.rss})

        avg = usage / len(agents) if len(agents) > 0 else 0

        return {'total_usage': usage, 'average_usage': avg, 'agents': agents}
コード例 #14
0
ファイル: os_tools.py プロジェクト: NextWork123/echidna
def stats():
    proc = Process()
    with proc.oneshot():
        uptime = naturaldelta(time()-proc.create_time())
        uptime = uptime[:-6]
        cpu_usage = int(cpu_percent())
        mem_total = virtual_memory().total >> 20
        pid = getpid()
        py = Process(pid)
        memoryUse = py.memory_info()[0] * 10**6

    fields = [
        ('Uso della CPU', str(cpu_usage) + '%', True),
        ('Utilizzo della memoria', str(memoryUse)[:2] + ' MB', True),
        ('Totale memoria', str(mem_total) + ' MB', True),
        ('Discord.py', discord_version, True),
        ('Python', python_version(), True),
        ('Uptime', uptime, True)
    ]

    return fields
コード例 #15
0
    if do_print:
        with open(filename, "r") as f:
            for line in f:
                if "arenas" in line or "unused pools" in line:
                    print(line, end="")


if __name__ == "__main__":
    print(f"Running in pid {PID}")

    # make a bunch of lists that call CPython's arena allocator
    print(
        "Building a bunch of dummy lists to create a lot of arenas and consume memory\n"
    )
    lsts = [_dummy_list() for _ in range(NUM_DUMMIES)]
    peak_rss = PROC.memory_info().rss / 1e6
    print(f"Resident set size: {peak_rss:.1f} MB\n")

    _dump_alloc_info("before.txt")

    num_del = len(lsts) // 4

    print("\n---\n")
    print(
        f"Removing {num_del}/{len(lsts)} lists (~25%) from the list-of-lists\n"
    )
    indices = sorted(random.choices(range(len(lsts)), k=num_del))
    for num, idx in enumerate(indices):
        del lsts[idx - num]
        num += 1
コード例 #16
0
ファイル: sysutil.py プロジェクト: yinruiqing/arsenal
def memory_usage():
    "Return the memory usage of this process in MB."
    p = Process(getpid())
    return p.memory_info()[0] / 2**20
コード例 #17
0
ファイル: text_ui.py プロジェクト: lunareve/mpf
class TextUi(MpfController):
    """Handles the text-based UI."""
    def __init__(self, machine: "MachineController") -> None:
        """Initialize TextUi."""
        super().__init__(machine)

        self.screen = None

        if not machine.options['text_ui']:
            return

        self.start_time = datetime.now()
        self.machine = machine
        self._tick_task = self.machine.clock.schedule_interval(self._tick, 1)
        self.screen = Screen.open()
        self.mpf_process = Process()
        self.ball_devices = list()  # type: List[BallDevice]

        self.switches = OrderedDict(
        )  # type: Dict[Switch, Tuple[str, int, int]]
        self.player_start_row = 0
        self.column_positions = [0, .25, .5, .75]
        self.columns = [0] * len(self.column_positions)

        self.machine.events.add_handler('init_phase_2', self._init)
        self.machine.events.add_handler('init_phase_3', self._update_switches)
        # self.machine.events.add_handler('init_phase_3', self._init2)
        self.machine.events.add_handler('loading_assets',
                                        self._asset_load_change)
        self.machine.events.add_handler('bcp_connection_attempt',
                                        self._bcp_connection_attempt)
        self.machine.events.add_handler('asset_loading_complete',
                                        self._asset_load_complete)
        self.machine.events.add_handler('bcp_clients_connected',
                                        self._bcp_connected)
        self.machine.events.add_handler('shutdown', self.stop)
        self.machine.events.add_handler('player_number', self._update_player)
        self.machine.events.add_handler('player_ball', self._update_player)
        self.machine.events.add_handler('player_score', self._update_player)
        self.machine.events.add_handler('ball_ended',
                                        self._update_player_no_game)

        self._pending_bcp_connection = False
        self._asset_percent = 0
        self._bcp_status = (0, 0, 0)  # type: Tuple[float, int, int]

        self._draw_screen()
        self.screen.refresh()

    def _init(self, **kwargs):
        del kwargs
        self.machine.mode_controller.register_start_method(self._mode_change)
        self.machine.mode_controller.register_stop_method(self._mode_change)
        self.machine.switch_controller.add_monitor(self._update_switches)
        self.machine.bcp.interface.register_command_callback(
            "status_report", self._bcp_status_report)

        for bd in [
                x for x in self.machine.ball_devices if not x.is_playfield()
        ]:
            self.ball_devices.append(bd)

        self.ball_devices.sort()
        self._draw_player_header()

        self._update_switch_layout()

    def _bcp_status_report(self, client, cpu, rss, vms):
        del client
        self._bcp_status = cpu, rss, vms

    def _draw_screen(self):

        for i, percent in enumerate(self.column_positions):
            if not i:
                self.columns[i] = 1
            self.columns[i] = int(self.screen.width * percent)

        height, width = self.screen.dimensions
        title = 'Mission Pinball Framework v{}'.format(
            mpf._version.__version__)  # noqa
        padding = int((self.screen.width - len(title)) / 2)

        self.screen.print_at((' ' * padding) + title + (' ' * (padding + 1)),
                             0,
                             0,
                             colour=7,
                             bg=1)

        self.screen.print_at('<CTRL+C> TO EXIT', width - 16, 0, colour=0, bg=1)

        self.screen.print_at('ACTIVE MODES', self.columns[0], 2)
        self.screen.print_at('SWITCHES', int((width * .5) - 8), 2)
        self.screen.print_at('BALL COUNTS', self.columns[3], 2)
        self.screen.print_at('-' * width, 0, 3)

        self.screen.print_at(self.machine.machine_path,
                             0,
                             height - 2,
                             colour=3)

        if 0 < self._asset_percent < 100:
            self.screen.print_at(' ' * width, 0, int(height / 2) + 1, bg=3)
            self.screen.print_at('LOADING ASSETS: {}%'.format(
                self._asset_percent),
                                 int(width / 2) - 10,
                                 int(height / 2) + 1,
                                 colour=0,
                                 bg=3)

        if self._pending_bcp_connection:
            bcp_string = 'WAITING FOR MEDIA CONTROLLER {}...'.format(
                self._pending_bcp_connection)

            self.screen.print_at(' ' * width, 0, int(height / 2) - 1, bg=3)
            self.screen.print_at(bcp_string,
                                 int((width - len(bcp_string)) / 2),
                                 int(height / 2) - 1,
                                 colour=0,
                                 bg=3)

        self._update_stats()

    def _draw_player_header(self):
        self.player_start_row = (len(self.ball_devices) +
                                 len(self.machine.playfields)) + 7

        self.screen.print_at('CURRENT PLAYER', self.columns[3],
                             self.player_start_row - 2)
        self.screen.print_at('-' * (int(self.screen.width * .75) + 1),
                             self.columns[3], self.player_start_row - 1)
        self._update_player()

    def _update_stats(self):
        height, width = self.screen.dimensions

        # Runtime
        rt = (datetime.now() - self.start_time)
        mins, sec = divmod(rt.seconds + rt.days * 86400, 60)
        hours, mins = divmod(mins, 60)
        time_string = 'RUNNING {:d}:{:02d}:{:02d}'.format(hours, mins, sec)
        self.screen.print_at(time_string,
                             width - len(time_string),
                             height - 2,
                             colour=2)

        # System Stats
        system_str = 'Free Memory (MB): {} CPU:{:3d}%'.format(
            round(virtual_memory().available / 1048576),
            round(cpu_percent(interval=None, percpu=False)))
        self.screen.print_at(system_str,
                             width - len(system_str),
                             height - 1,
                             colour=2)

        # MPF process stats
        stats_str = 'MPF (CPU RSS/VMS): {}% {}/{} MB    '.format(
            round(self.mpf_process.cpu_percent()),
            round(self.mpf_process.memory_info().rss / 1048576),
            round(self.mpf_process.memory_info().vms / 1048576))

        self.screen.print_at(stats_str, 0, height - 1, colour=6)

        # MC process stats
        if self._bcp_status != (0, 0, 0):
            bcp_string = 'MC (CPU RSS/VMS) {}% {}/{} MB '.format(
                round(self._bcp_status[0]),
                round(self._bcp_status[1] / 1048576),
                round(self._bcp_status[2] / 1048576))

            self.screen.print_at(bcp_string,
                                 len(stats_str) - 2,
                                 height - 1,
                                 colour=5)

    def _update_switch_layout(self):
        start_row = 4
        cutoff = int(len(self.machine.switches) / 2) + start_row - 1
        row = start_row
        col = 1

        for sw in sorted(self.machine.switches):
            if sw.invert:
                name = sw.name + '*'
            else:
                name = sw.name

            self.switches[sw] = (name, self.columns[col], row)

            if row == cutoff:
                row = start_row
                col += 1
            else:
                row += 1

        self._update_switches()

    def _update_switches(self, *args, **kwargs):
        del args, kwargs
        for sw, info in self.switches.items():
            if sw.state:
                self.screen.print_at(*info, colour=0, bg=2)
            else:
                self.screen.print_at(*info)

        self.screen.refresh()

    def _mode_change(self, *args, **kwargs):
        # Have to call this on the next frame since the mode controller's
        # active list isn't updated yet
        del args
        del kwargs
        self.machine.clock.schedule_once(self._update_modes)

    def _update_modes(self, *args, **kwargs):
        del args
        del kwargs
        modes = self.machine.mode_controller.active_modes

        for i, mode in enumerate(modes):
            self.screen.print_at(' ' * (self.columns[0] - 1), self.columns[0],
                                 i + 4)
            self.screen.print_at('{} ({})'.format(mode.name, mode.priority),
                                 self.columns[0], i + 4)

        self.screen.print_at(' ' * (int(self.screen.width * .25) - 1),
                             self.columns[0],
                             len(modes) + 4)

    def _update_ball_devices(self, **kwargs):
        del kwargs

        row = 4

        try:
            for pf in self.machine.playfields:
                self.screen.print_at('{}: {} '.format(pf.name, pf.balls),
                                     self.columns[3],
                                     row,
                                     colour=2 if pf.balls else 7)
                row += 1
        except AttributeError:
            pass

        for bd in self.ball_devices:
            # extra spaces to overwrite previous chars if the str shrinks
            self.screen.print_at('{}: {} ({})                   '.format(
                bd.name, bd.balls, bd.state),
                                 self.columns[3],
                                 row,
                                 colour=2 if bd.balls else 7)
            row += 1

    def _update_player(self, **kwargs):
        del kwargs
        for i in range(3):
            self.screen.print_at(
                ' ' * (int(self.screen.width * (1 / len(self.columns))) + 1),
                self.columns[3], self.player_start_row + i)
        try:
            self.screen.print_at(
                'PLAYER: {}'.format(self.machine.game.player.number),
                self.columns[3], self.player_start_row)
            self.screen.print_at(
                'BALL: {}'.format(self.machine.game.player.ball),
                self.columns[3], self.player_start_row + 1)
            self.screen.print_at(
                'SCORE: {:,}'.format(self.machine.game.player.score),
                self.columns[3], self.player_start_row + 2)
        except AttributeError:
            self._update_player_no_game()

    def _update_player_no_game(self, **kwargs):
        del kwargs
        for i in range(3):
            self.screen.print_at(
                ' ' * (int(self.screen.width * (1 / len(self.columns))) + 1),
                self.columns[3], self.player_start_row + i)

        self.screen.print_at('NO GAME IN PROGRESS', self.columns[3],
                             self.player_start_row)

    def _tick(self):
        if self.screen.has_resized():
            self.screen = Screen.open()
            self._update_switch_layout()
            self._update_modes()
            self._draw_screen()
            self._draw_player_header()

        self.machine.bcp.transport.send_to_all_clients("status_request")
        self._update_stats()
        self._update_ball_devices()
        self.screen.refresh()

    def _bcp_connection_attempt(self, name, host, port, **kwargs):
        del name
        del kwargs
        self._pending_bcp_connection = '{}:{}'.format(host, port)
        self._draw_screen()

    def _bcp_connected(self, **kwargs):
        del kwargs
        self._pending_bcp_connection = None
        self.screen.print_at(' ' * self.screen.width, 0,
                             int(self.screen.height / 2) - 1)

        self._update_modes()
        self._update_switches()
        self._update_ball_devices()

    def _asset_load_change(self, percent, **kwargs):
        del kwargs
        self._asset_percent = percent
        self._draw_screen()

    def _asset_load_complete(self, **kwargs):
        del kwargs
        self._asset_percent = 100
        self.screen.print_at(' ' * self.screen.width, 0,
                             int(self.screen.height / 2) + 1)

        self._update_modes()
        self._update_switches()
        self._update_ball_devices()

    def stop(self, **kwargs):
        """Stop the Text UI and restore the original console screen."""
        del kwargs

        if self.screen:
            self.machine.clock.unschedule(self._tick_task)
            logger = logging.getLogger()
            logger.addHandler(logging.StreamHandler())
            self.screen.close(True)
コード例 #18
0
ファイル: stats.py プロジェクト: n343NC001/Bloxlink
    async def __main__(self, CommandArgs):
        response = CommandArgs.response
        clusters = 0

        if IS_DOCKER:
            total_guilds = guilds = 0
            mem = 0
            errored = 0

            stats = await broadcast(None, type="STATS")
            clusters = len(stats)

            for cluster_id, cluster_data in stats.items():
                if cluster_data in ("cluster offline", "cluster timeout"):
                    errored += 1
                else:
                    total_guilds += cluster_data[0]
                    mem += cluster_data[1]

            if errored:
                guilds = f"{total_guilds} ({len(self.client.guilds)}) ({errored} errored)"
            else:
                guilds = f"{total_guilds} ({len(self.client.guilds)})"

        else:
            total_guilds = guilds = str(len(self.client.guilds))
            clusters = 1

            process = Process(getpid())
            mem = math.floor(process.memory_info()[0] / float(2**20))

        seconds = math.floor(time() - STARTED)

        m, s = divmod(seconds, 60)
        h, m = divmod(m, 60)
        d, h = divmod(h, 24)

        days, hours, minutes, seconds = None, None, None, None

        if d:
            days = f"{d}d"
        if h:
            hours = f"{h}h"
        if m:
            minutes = f"{m}m"
        if s:
            seconds = f"{s}s"

        uptime = f"{days or ''} {hours or ''} {minutes or ''} {seconds or ''}".strip(
        )

        embed = Embed(
            description=f"Showing collective stats from **{clusters}** clusters"
        )
        embed.set_author(name=Bloxlink.user.name,
                         icon_url=Bloxlink.user.avatar_url)

        embed.add_field(name="Version", value=VERSION)
        embed.add_field(name="Cluster", value=CLUSTER_ID)
        embed.add_field(name="Shards", value=self.shard_range)
        embed.add_field(name="Servers", value=guilds)
        embed.add_field(name="Uptime", value=uptime)
        embed.add_field(name="Memory Usage", value=f"{mem} MB")

        embed.add_field(name="Invite **Bloxlink**",
                        value="https://blox.link/invite")
        embed.add_field(name="Website", value="https://blox.link")
        embed.add_field(name="Repository",
                        value="https://github.com/bloxlink/Bloxlink")

        await response.send(embed=embed)

        if IS_DOCKER and RELEASE == "MAIN":
            await self.r.table("miscellaneous").insert(
                {
                    "id": "stats",
                    "stats": {
                        "guilds": total_guilds,
                        "version": VERSION,
                        "memory": mem,
                        "uptime": uptime,
                        "clusters": clusters
                    }
                },
                conflict="update").run()
コード例 #19
0
    def run(self):
        start_message = Start(self)

        errors = self._check_job_files()

        errors.extend(self._assert_arg_list())

        self._dump_exec_env()

        if errors:
            yield start_message.with_error("\n".join(errors))
            return

        yield start_message

        executable = self.job_data.get("executable")
        assert_file_executable(executable)

        arg_list = [executable]
        if self.job_data.get("argList"):
            arg_list += self.job_data["argList"]

        if self.job_data.get("stdin"):
            stdin = open(self.job_data.get("stdin"))
        else:
            stdin = None

        if self.std_err:
            stderr = open(self.std_err, "w")
        else:
            stderr = None

        if self.std_out:
            stdout = open(self.std_out, "w")
        else:
            stdout = None

        if self.job_data.get("target_file"):
            target_file_mtime = 0
            if os.path.exists(self.job_data["target_file"]):
                stat = os.stat(self.job_data["target_file"])
                target_file_mtime = stat.st_mtime

        exec_env = self.job_data.get("exec_env")
        if exec_env:
            exec_name, _ = os.path.splitext(
                os.path.basename(self.job_data.get("executable"))
            )
            with open("%s_exec_env.json" % exec_name, "w") as f:
                f.write(json.dumps(exec_env))

        max_running_minutes = self.job_data.get("max_running_minutes")
        run_start_time = dt.now()

        proc = Popen(
            arg_list,
            stdin=stdin,
            stdout=stdout,
            stderr=stderr,
            env=self.job_data.get("environment"),
        )

        exit_code = None

        process = Process(proc.pid)
        max_memory_usage = 0
        while exit_code is None:
            try:
                memory = process.memory_info().rss
            except (NoSuchProcess, AccessDenied, ZombieProcess):
                """In case of a process that has died and is in some
                transitional state, we ignore any failures. Only seen on OSX
                thus far.
                See https://github.com/giampaolo/psutil/issues/1044#issuecomment-298745532
                """
                memory = 0
            if memory > max_memory_usage:
                max_memory_usage = memory

            yield Running(self, max_memory_usage, memory)

            try:
                exit_code = process.wait(timeout=self.MEMORY_POLL_PERIOD)
            except TimeoutExpired:
                run_time = dt.now() - run_start_time
                if (
                    max_running_minutes is not None
                    and run_time.seconds > max_running_minutes * 60
                ):
                    """
                    If the spawned process is not in the same process group
                    as the callee (job_dispatch), we will kill the process
                    group explicitly.

                    Propagating the unsuccessful Exited message will kill the
                    callee group. See job_dispatch.py.
                    """
                    process_group_id = os.getpgid(proc.pid)
                    this_group_id = os.getpgid(os.getpid())
                    if process_group_id != this_group_id:
                        os.killpg(process_group_id, signal.SIGKILL)

                    yield Exited(self, exit_code).with_error(
                        "Job:{} has been running for more than {} minutes - explicitly killed.".format(
                            self.name(), max_running_minutes
                        )
                    )
                    return

        exited_message = Exited(self, exit_code)

        if exit_code != 0:
            yield exited_message.with_error(
                "Process exited with status code {}".format(exit_code)
            )
            return

        # exit_code is 0

        if self.job_data.get("error_file"):
            if os.path.exists(self.job_data["error_file"]):
                yield exited_message.with_error(
                    "Found the error file:{} - job failed.".format(
                        self.job_data["error_file"]
                    )
                )
                return

        if self.job_data.get("target_file"):
            target_file_error = self._check_target_file_is_written(target_file_mtime)
            if target_file_error:
                yield exited_message.with_error(target_file_error)
                return

        yield exited_message
コード例 #20
0
ファイル: BotInfo.py プロジェクト: Roxedus/MornBot
    async def botinfo(self, ctx):
        """Viser info om meg"""

        dev = await self.bot.fetch_user(170506717140877312)

        start = perf_counter()
        status_msg = await ctx.send('Beregner ping...')
        end = perf_counter()
        ping = int((end - start) * 1000)

        now = time()
        diff = int(now - self.bot.uptime)
        days, remainder = divmod(diff, 24 * 60 * 60)
        hours, remainder = divmod(remainder, 60 * 60)
        minutes, seconds = divmod(remainder, 60)

        process = Process(getpid())
        memory_usage = round(process.memory_info().rss / 1000000, 1)
        cpu_percent = process.cpu_percent()

        total_members = []
        online_members = []
        idle_members = []
        dnd_members = []
        offline_members = []
        for guild in self.bot.guilds:
            for member in guild.members:
                if member.id in total_members:
                    continue
                total_members.append(member.id)
                if str(member.status) == 'online':
                    online_members.append(member.id)
                elif str(member.status) == 'idle':
                    idle_members.append(member.id)
                elif str(member.status) == 'dnd':
                    dnd_members.append(member.id)
                elif str(member.status) == 'offline':
                    offline_members.append(member.id)

        embed = discord.Embed(color=ctx.me.color, url=self.bot.misc['website'])
        embed.set_author(name=dev.name, icon_url=dev.avatar_url)
        embed.set_thumbnail(url=self.bot.user.avatar_url)
        embed.add_field(name='Dev',
                        value=f'{dev.mention}\n{dev.name}#{dev.discriminator}')
        embed.add_field(name='Oppetid',
                        value=f'{days}d {hours}t {minutes}m {seconds}s')
        embed.add_field(
            name='Ping',
            value=
            f'Ekte ping: {ping} ms\nWebsocket ping: {int(self.bot.latency * 1000)} ms'
        )
        embed.add_field(name='Servere', value=len(self.bot.guilds))
        embed.add_field(name='Discord.py Versjon', value=discord.__version__)
        embed.add_field(name='Python Versjon', value=platform.python_version())
        embed.add_field(name='Ressursbruk',
                        value=f'RAM: {memory_usage} MB\nCPU: {cpu_percent}%')
        embed.add_field(name='Maskin',
                        value=f'{platform.system()} {platform.release()}')
        embed.add_field(
            name=f'Brukere ({len(total_members)})',
            value=f'{self.bot.emoji["online"]}{len(online_members)} ' +
            f'{self.bot.emoji["idle"]}{len(idle_members)} ' +
            f'{self.bot.emoji["dnd"]}{len(dnd_members)} ' +
            f'{self.bot.emoji["offline"]}{len(offline_members)}')
        embed.add_field(
            name='Lenker',
            value='[Inviter](https://discordapp.com/oauth2/authorize?client_' +
            f'id={self.bot.user.id}&permissions=388174&scope=bot) ' +
            f'| [Nettside]({self.bot.misc["website"]}) ' +
            f'| [Kildekode]({self.bot.misc["source_code"]})')
        await Defaults.set_footer(ctx, embed)
        await status_msg.edit(embed=embed, content=None)
コード例 #21
0
ファイル: simulator_class.py プロジェクト: adfaure/accasim
class Simulator(SimulatorBase):
    """

    Default implementation of the SimulatorBase class.

    """
    def __init__(self,
                 workload,
                 sys_config,
                 dispatcher,
                 resource_manager=None,
                 reader=None,
                 job_factory=None,
                 additional_data=[],
                 simulator_config=None,
                 overwrite_previous=True,
                 scheduling_output=True,
                 pprint_output=False,
                 benchmark_output=False,
                 statistics_output=True,
                 save_parameters=None,
                 show_statistics=True,
                 **kwargs):
        """

        Constructor of the HPC Simulator class.

        :param workload: Filepath to the workload, it is used by the reader. If a reader is not given, the default one is used.
        :param sys_config: Filepath to the synthetic system configuration. Used by the resource manager to create the system.
        :param dispatcher: Dispatching method
        :param resource_manager: Optional. Instantiation of the resource_manager class.
        :param reader: Optional. Instantiation of the reader class.
        :param job_factory: Optional. Instantiation of the job_factory class.
        :param additional_data: Optional. Array of Objects or Classes of AdditionalData class.
        :param simulator_config: Optional. Filepath to the simulator config. For replacing the misc.DEFAULT_SIMULATION parameters.
        :param overwrite_previous: Default True. Overwrite previous results.
        :param scheduling_output: Default True. Dispatching plan output. Format modificable in DEFAULT_SIMULATION
        :param pprint_output: Default False. Dispatching plan output in pretty print version. Format modificable in DEFAULT_SIMULATION
        :param benchmark_output: Default False. Measurement of the simulator and dispatcher performance.
        :param statistics_output: Default True. Statistic of the simulation.
        :param save_parameters: List of simulation name paremeters to be saved in the target results folder. None or empty for not saving the parameters.
        :param show_statistics: Default True. Show Statistic after finishing the simulation.
        :param \*\*kwargs: Optional parameters to be included in the Constants.

        """
        assert (version_info >= (
            3,
            5,
        )), 'Unsupported python version. Try with 3.5 or newer.'

        kwargs['OVERWRITE_PREVIOUS'] = overwrite_previous
        kwargs['SYS_CONFIG_FILEPATH'] = sys_config
        kwargs['WORKLOAD_FILEPATH'] = workload
        kwargs['SCHEDULING_OUTPUT'] = scheduling_output
        kwargs['PPRINT_OUTPUT'] = pprint_output
        kwargs['BENCHMARK_OUTPUT'] = benchmark_output
        kwargs['STATISTICS_OUTPUT'] = statistics_output
        kwargs['SHOW_STATISTICS'] = show_statistics

        _uargs = []

        if not resource_manager:
            resource_manager, equiv, start_time = self.generate_enviroment(
                sys_config)
            kwargs['equivalence'] = equiv
            kwargs['start_time'] = start_time
        if not job_factory:
            kwargs['job_mapper'] = DEFAULT_SWF_MAPPER
            if not kwargs.get('EXTENDED_JOB_DESCRIPTION', False):
                kwargs['job_attrs'] = self.default_job_description()
            else:
                kwargs['job_attrs'] = self.extended_job_description()
            _jf_arguments = ['job_class', 'job_attrs', 'job_mapper']
            args = self.prepare_arguments(_jf_arguments, kwargs)
            _uargs += _jf_arguments
            job_factory = JobFactory(resource_manager, **args)
        if workload and not reader:
            _reader_arguments = [
                'max_lines', 'tweak_function', 'equivalence', 'start_time'
            ]
            args = self.prepare_arguments(_reader_arguments, kwargs)
            reader = self.set_workload_input(workload,
                                             job_factory=job_factory,
                                             **args)
            _uargs += _reader_arguments

        for _u in _uargs:
            kwargs.pop(_u, None)
        SimulatorBase.__init__(self, config_file=simulator_config, **kwargs)

        if not isinstance(additional_data, list):
            assert (
                isinstance(additional_data, AdditionalData)
                or issubclass(additional_data, AdditionalData)
            ), 'Only subclasses of AdditionalData class are acepted as additional_data argument '
            additional_data = [additional_data]

        assert (isinstance(resource_manager, ResourceManager))
        self.resource_manager = resource_manager

        assert (isinstance(dispatcher, SchedulerBase))
        dispatcher.set_resource_manager(resource_manager)
        # self.dispatcher = dispatcher

        assert (isinstance(reader, Reader))
        self.reader = reader

        assert (isinstance(job_factory, JobFactory))
        self.job_factory = job_factory

        additional_data = self.additional_data_init(additional_data)
        self.mapper = EventManager(self.resource_manager, dispatcher,
                                   additional_data)

        if save_parameters:
            self._save_parameters(save_parameters)

        if benchmark_output:
            self._usage_writer = AsyncWriter(
                path=path.join(
                    self.constants.RESULTS_FOLDER_PATH,
                    self.constants.BENCHMARK_PREFIX +
                    self.constants.WORKLOAD_FILENAME),
                pre_process_fun=Simulator.usage_metrics_preprocessor)
            self._process_obj = Process(getpid())
        else:
            self._usage_writer = None
            self._process_obj = None

        self.start_simulation_time = None
        self.end_simulation_time = None
        self.max_sample = 2
        self.daemons = {}
        self.loaded_jobs = 0
        self.dispatched_jobs = 0
        self.rejected_jobs = 0

    def monitor_datasource(self, _stop):
        """

        runs continuously and updates the global data
        Useful for daemons

        :param _stop: Signal for stop

        """
        while (not _stop.is_set()):
            self.constants.running_at[
                'current_time'] = self.mapper.current_time
            self.constants.running_at['running_jobs'] = {
                x: self.mapper.events[x]
                for x in self.mapper.running
            }
            sleep(self.constants.running_at['interval'])

    def start_simulation(self, system_status=False, **kwargs):
        """

        Initializes the simulation

        :param init_unix_time: Adjustement for job timings. If the first job corresponds to 0, the init_unix_time must corresponds to the real submit time of the workload. Otherwise, if the job contains the real submit time, init_unix_time is 0.
        :param system_status: Initializes the system status daemon.
        :param system_utilization: Initializes the running jobs visualization using matplotlib.
        :param \*\*kwargs: a 'tweak_function' to deal with the workloads.

        """
        #=======================================================================
        # System status is the main entry point to get access to the current
        # simulation data. It is used also for the visualization component.
        #=======================================================================
        if system_status:
            functions = {
                'usage_function': self.mapper.usage,
                'availability_function': self.mapper.availability,
                'simulated_status_function': self.mapper.simulated_status,
                'current_time_function': self.mapper.simulated_current_time
            }
            self.daemons['system_status'] = {
                'class': SystemStatus,
                'args': [self.constants.WATCH_PORT, functions],
                'object': None
            }

        # @TODO
        # Add the usage_writer to the daemons array to auto-on/off process
        self._logger_listener.start()
        if self._usage_writer:
            self._usage_writer.start()

        # Starting the daemons
        self.daemon_init()

        self.show_config()
        sim_error = None
        try:
            self.start_hpc_simulation(**kwargs)
        except Exception as e:
            sleep(1)
            print('The simulation will be stopped. Reason: {}'.format(e))
            sim_error = e

        [d['object'].stop() for d in self.daemons.values() if d['object']]

        if self._usage_writer:
            self._usage_writer.stop()
            self._usage_writer = None

        # @TODO
        self.mapper.stop_writers()

        filepaths = self._generated_filepaths()
        self._clean_simulator_constants()
        self._logger_listener.stop()
        if sim_error:
            raise sim_error
        return filepaths

    def start_hpc_simulation(self, **kwargs):
        """

        Initializes the simulation in a new thread. It is called by the start_timulation using its arguments.

        """
        if self.timeout:
            init_sim_time = time()
            ontime = True
        # =======================================================================
        # Load events corresponding at the "current time" and the next one
        # =======================================================================
        event_dict = self.mapper.events
        self.start_simulation_time = clock()
        self.constants.load_constant('start_simulation_time',
                                     self.start_simulation_time)

        self._logger.info('Starting the simulation process.')
        self.load_events(self.mapper.current_time, event_dict, self.mapper,
                         self.max_sample)
        events = self.mapper.next_events()

        # =======================================================================
        # Loop until there are not loaded, queued and running jobs
        # =======================================================================
        while events or self.mapper.has_events():
            current_time = self.mapper.current_time
            benchStartTime = clock() * 1000
            self.mapper.release_ended_events(event_dict)
            queued_len = len(events)

            # ===================================================================
            # External behavior
            # ===================================================================
            # self.execute_additional_data()

            schedEndTime = schedStartTime = clock() * 1000

            if events:
                # to_dispatch, rejected = self.dispatcher.schedule(self.mapper.current_time, event_dict, events)
                to_dispatch, rejected = self.mapper.call_dispatcher(
                    event_dict, events)
                for r in rejected:
                    del event_dict[r]
                dispatched_len = len(to_dispatch)
                rejected_len = len(rejected)
                assert(queued_len == dispatched_len + rejected_len), 'Some queued jobs ({}/{}) were not included in the dispatching decision.'\
                    .format(dispatched_len + rejected_len, queued_len)
                self.rejected_jobs += rejected_len

                schedEndTime = clock() * 1000
                time_diff = int((schedEndTime - schedStartTime) / 1000)

                (n_disp, n_disp_finish,
                 n_post) = self.mapper.dispatch_events(event_dict, to_dispatch,
                                                       time_diff)
                self.dispatched_jobs += n_disp + n_disp_finish

            # ===================================================================
            # Loading next jobs based on Time points
            # ===================================================================
            if len(self.mapper.loaded) < 10:
                sample = self.max_sample if (
                    len(self.mapper.loaded) < self.max_sample) else 2
                self.load_events(current_time, event_dict, self.mapper, sample)
            # ===================================================================
            # Continue with next events
            # ===================================================================
            events = self.mapper.next_events()

            if self.constants.BENCHMARK_OUTPUT:
                benchEndTime = clock() * 1000
                benchMemUsage = self._process_obj.memory_info().rss / float(2**
                                                                            20)
                scheduleTime = schedEndTime - schedStartTime
                dispatchTime = benchEndTime - benchStartTime - scheduleTime

                self._usage_writer.push(
                    (current_time, queued_len, benchEndTime - benchStartTime,
                     scheduleTime, dispatchTime, benchMemUsage))

            if self.timeout and self.timeout <= int(time() - init_sim_time):
                ontime = False
                break

        self.end_simulation_time = clock()
        if not self.timeout or self.timeout and ontime:
            assert (self.loaded_jobs == self.dispatched_jobs + self.rejected_jobs), \
                'Loaded {} != dispatched + rejected {} jobs'.format(self.loaded_jobs, self.dispatched_jobs + self.rejected_jobs)

        self.statics_write_out(self.constants.SHOW_STATISTICS,
                               self.constants.STATISTICS_OUTPUT)
        self._logger.info('Simulation process completed.')
        self.mapper.current_time = None

    @staticmethod
    def usage_metrics_preprocessor(entry):
        """
        To be used as a pre-processor for AsyncWriter objects applied to usage metrics.
        Pre-processes a tuple of usage metrics containing 6 fields. The fields are the following:

        - time: the timestamp relative to the simulation step
        - queueSize: the size of the queue at the simulation step (before scheduling)
        - stepTime: the total time required to perform the simulation step
        - schedTime: the time related to the scheduling procedure
        - simTime: the remaining time used in the step, related to the simulation process
        - memUsage: memory usage (expressed in MB) at the simulation step

        :param entry: Tuple of data to be written to output
        """
        sep_token = ';'
        bline = sep_token.join([str(v) for v in entry]) + '\n'
        return bline

    def statics_write_out(self, show, save):
        """

        Write the statistic output file

        :param show: True for showing the statistics, False otherwise.
        :param save: True for saving the statistics, False otherwise.

        """
        if not (show or save):
            return
        wtimes = self.mapper.wtimes
        slds = self.mapper.slowdowns
        sim_time_ = 'Simulation time: {0:.2f} secs\n'.format(
            self.end_simulation_time - self.start_simulation_time)
        disp_method_ = 'Dispathing method: {}\n'.format(self.mapper.dispatcher)
        total_jobs_ = 'Total jobs: {}\n'.format(self.loaded_jobs)
        makespan_ = 'Makespan: {}\n'.format(
            self.mapper.last_run_time -
            self.mapper.first_time_dispatch if self.mapper.last_run_time
            and self.mapper.first_time_dispatch else 'NA')
        if wtimes:
            avg_wtimes_ = 'Avg. waiting times: {:.2f}\n'.format(
                reduce(lambda x, y: x + y, wtimes) / float(len(wtimes)))
        else:
            avg_wtimes_ = 'Avg. waiting times: NA\n'

        if slds:
            avg_slowdown_ = 'Avg. slowdown: {:.2f}\n'.format(
                reduce(lambda x, y: x + y, slds) / float(len(slds)))
        else:
            avg_slowdown_ = 'Avg. slowdown: NA\n'

        if show:
            self._logger.info('\t ' + sim_time_[:-1])
            self._logger.info('\t ' + disp_method_[:-1])
            self._logger.info('\t ' + total_jobs_[:-1])
            self._logger.info('\t ' + makespan_[:-1])
            self._logger.info('\t ' + avg_wtimes_[:-1])
            self._logger.info('\t ' + avg_slowdown_[:-1])

        if save:
            _filepath = path.join(
                self.constants.RESULTS_FOLDER_PATH,
                self.constants.STATISTICS_PREFIX +
                self.constants.WORKLOAD_FILENAME)
            with open(_filepath, 'a') as f:
                f.write(sim_time_)
                f.write(disp_method_)
                f.write(total_jobs_)
                f.write(makespan_)
                f.write(avg_wtimes_)
                f.write(avg_slowdown_)

    def load_events(self, current_time, jobs_dict, mapper, time_samples=2):
        """

        Incremental loading. Load the new jobs into the

        :param current_time: Current simulation time.
        :param jobs_dict: Dictionary of the current load, queued and running jobs
        :param mapper: Job event mapper object
        :param time_samples: Default 2. It load the next two time steps.

        """
        next_tpoints, parsed_jobs = self.reader.next(current_time,
                                                     time_points=time_samples)
        tmp_dict = {}
        job_list = []

        for next_tpoint in next_tpoints:
            for job in parsed_jobs[next_tpoint]:
                self.loaded_jobs += 1
                tmp_dict[job.id] = job
                job_list.append(job)
        mapper.load_events(job_list)
        jobs_dict.update(tmp_dict)

    def _loaded_jobs(self):
        return sum([len(jobs) for _, jobs in self.mapper.loaded.items()])

    def daemon_init(self):
        """

        Initialization of the simulation daemons. I.e. system_utilization or system_status

        """
        _iter_func = lambda act, next: act.get(next) if isinstance(
            act, dict) else (getattr(act, next)() if callable(
                getattr(act, next)) else getattr(act, next))
        for _name, d in self.daemons.items():
            _class = d['class']
            if not _class:
                continue
            _args = []
            for _arg in d['args']:
                if isinstance(_arg, tuple):
                    res = reduce(_iter_func, _arg[1].split('.'),
                                 self if not _arg[0] else _arg[0])
                    _args.append(res)
                else:
                    _args.append(_arg)
            self.daemons[_name]['object'] = _class(*_args)
            self.daemons[_name]['object'].start()

    def default_job_description(self):
        """

        Method that returns the minimal attributes of a job. Default values: ID, Expected Duration, CORE and MEM.
        
        :return: Array of Attributes

        """
        # Attribute to identify the user
        user_id = AttributeType('user_id', int)

        # New attributes required by the Dispatching methods.
        expected_duration = AttributeType('expected_duration', int)
        queue = AttributeType('queue', int)

        # Default system resources: core and mem.
        total_cores = AttributeType('core', int)
        total_mem = AttributeType('mem', int)

        return [total_cores, total_mem, expected_duration, queue, user_id]

    def extended_job_description(self):
        """

        Method that returns extra attributes of a job. By default it includes the default attributes: ID, Expected Duration, CORE and MEM.
        
        :return: Array of Attributes

        """
        description = self.default_job_description()

        executable = AttributeType('executable', str, optional=True)
        group_id = AttributeType('group_id', int, optional=True)
        status = AttributeType('status', int, optional=True)
        partition = AttributeType('partition', int, optional=True)

        return description + [executable, group_id, status]
コード例 #22
0
    def track_process(self, p: psutil.Process):
        with p.oneshot():
            key = None
            if p.pid in self.pids:
                key = self.pids[p.pid]
                if self.processes[key].name != p.name():
                    key = None
            if key is None:
                key = len(self.processes)
                self.processes.append(ProcessInfo(key,
                                               p.pid,
                                               p.name()))
                self.pids[p.pid] = key
                self.data.update({
                    f'process.{key}.name': p.name(),
                    f'process.{key}.pid': p.pid,
                    f'process.{key}.ppid': p.ppid(),
                    f'process.{key}.create_time': p.create_time(),
                })

                try:
                    self.data.update({
                        f'process.{key}.exe': p.exe(),
                    })
                except (psutil.AccessDenied, psutil.ZombieProcess):
                    pass

                try:
                    self.data.update({
                        f'process.{key}.cmdline': '\n'.join(p.cmdline()),
                    })
                except (psutil.AccessDenied, psutil.ZombieProcess):
                    pass

            self.processes[key].active = True

            try:
                res = p.memory_info()
                self.data.update({
                    f'process.{key}.rss': res.rss,
                    f'process.{key}.vms': res.vms,
                })
            except (psutil.AccessDenied, psutil.ZombieProcess):
                pass

            try:
                res = p.cpu_times()
                self.data.update({
                    f'process.{key}.user': res.user,
                    f'process.{key}.system': res.system,
                })
                if hasattr(res, 'iowait'):
                    self.data.update({
                        f'process.{key}.iowait': res.iowait,
                    })
            except (psutil.AccessDenied, psutil.ZombieProcess):
                pass

            try:
                res = p.cpu_percent()
                self.data.update({
                    f'process.{key}.cpu': res,
                })
            except (psutil.AccessDenied, psutil.ZombieProcess):
                pass

            try:
                res = p.num_threads()
                self.data.update({
                    f'process.{key}.threads': res,
                })
            except (psutil.AccessDenied, psutil.ZombieProcess):
                pass
コード例 #23
0
    async def __main__(self, CommandArgs):
        response = CommandArgs.response

        clusters  = 0
        total_mem = 0

        process = Process(getpid())
        process_mem = math.floor(process.memory_info()[0] / float(2 ** 20))

        if IS_DOCKER:
            total_guilds = guilds = 0
            total_mem = 0
            errored = 0

            stats = await broadcast(None, type="STATS")
            clusters = len(stats)

            for cluster_id, cluster_data in stats.items():
                if cluster_data in ("cluster offline", "cluster timeout"):
                    errored += 1
                else:
                    total_guilds += cluster_data[0]
                    total_mem += cluster_data[1]

            if errored:
                guilds = f"{total_guilds} ({len(self.client.guilds)}) ({errored} errored)"
            else:
                guilds = f"{total_guilds} ({len(self.client.guilds)})"

        else:
            total_guilds = guilds = str(len(self.client.guilds))
            clusters = 1
            total_mem = process_mem

        seconds = math.floor(time() - STARTED)

        m, s = divmod(seconds, 60)
        h, m = divmod(m, 60)
        d, h = divmod(h, 24)

        days, hours, minutes, seconds = None, None, None, None

        if d:
            days = f"{d}d"
        if h:
            hours = f"{h}h"
        if m:
            minutes = f"{m}m"
        if s:
            seconds = f"{s}s"

        uptime = f"{days or ''} {hours or ''} {minutes or ''} {seconds or ''}".strip()
        mem = IS_DOCKER and f"{total_mem} ({process_mem})" or process_mem

        embed = Embed(description=f"Roblox Verification made easy! Features everything you need to integrate your Discord server with Roblox.")
        embed.set_author(name=Bloxlink.user.name, icon_url=Bloxlink.user.avatar_url)

        embed.add_field(name="Servers", value=guilds)
        embed.add_field(name="Node Uptime", value=uptime)
        embed.add_field(name="Memory Usage", value=f"{mem} MB")

        embed.add_field(name="Resources", value="**[Website](https://blox.link)** | **[Discord](https://blox.link/support)** | **[Invite Bot]"
                             "(https://blox.link/invite)** | **[Premium](https://blox.link/premium)**\n\n**[Repository](https://github.com/bloxlink/Bloxlink)**",
                             inline=False)

        embed.set_footer(text=f"Shards: {self.shard_range} | Node: {CLUSTER_ID}/{clusters-1}")

        await response.send(embed=embed)

        if IS_DOCKER and RELEASE == "MAIN":
            await self.r.table("miscellaneous").insert({
                "id": "stats",
                "stats": {
                    "guilds": total_guilds,
                    "memory": total_mem,
                    "uptime": uptime,
                    "clusters": clusters
                }

            }, conflict="update").run()
コード例 #24
0
 def memory_usage(self):
     p = Process(self._process.pid)
     mem = p.memory_info()
     return mem.rss
コード例 #25
0
class FactorioManager(object):
    def __init__(self, name: str, port: int, root_path, is_steam=False):
        self.name = name
        self.port = port
        self.process = None
        self.root_path = Path(root_path)
        self.log = make_log(f'{name}_factorio')
        self.update_available = False
        self._ps_proc = None
        self._virtual_mem = VIRTUAL_MEMORY
        self._is_steam = is_steam
        self._player_data = None
        self._config = None
        self._server_config = None
        self._version_info = None
        self._available_versions = None
        self._temp_update = None
        self._log_queue = deque(maxlen=20)
        self._status_history = deque(maxlen=50)

    @property
    def version(self):
        if self._version_info is None:
            self.set_version_info()
        return self._version_info['version']

    @property
    def build(self):
        if self._version_info is None:
            self.set_version_info()
        return self._version_info['build']

    @property
    def build_num(self):
        if self._version_info is None:
            self.set_version_info()
        return self._version_info['build']['number']

    @property
    def build_platform(self):
        if self._version_info is None:
            self.set_version_info()
        return self._version_info['build']['platform']

    @property
    def build_mode(self):
        if self._version_info is None:
            self.set_version_info()
        return self._version_info['build']['mode']

    @property
    def bin_version(self):
        if self._version_info is None:
            self.set_version_info()
        return self._version_info['binary version']

    @property
    def map_in_version(self):
        if self._version_info is None:
            self.set_version_info()
        return self._version_info['map input version']

    @property
    def map_out_version(self):
        if self._version_info is None:
            self.set_version_info()
        return self._version_info['map output version']

    @property
    def is_experimental(self):
        if not self._available_versions:
            t = self.fetch_factorio_versions()
            t.join()
        if self.version_list.index(self.stable) < self.version_list.index(
                self.version):
            return True
        return False

    @property
    def stable(self):
        if not self._available_versions:
            t = self.fetch_factorio_versions()
            t.join()
        return self._available_versions['stable']

    @property
    def version_list(self):
        if not self._available_versions:
            t = self.fetch_factorio_versions()
            t.join()
        return self._available_versions['version_list']

    @property
    def experimental_version_list(self):
        if not self._available_versions:
            t = self.fetch_factorio_versions()
            t.join()
        return self.version_list[self.version_list.index(self.stable) + 1:]

    @property
    def stable_version_list(self):
        if not self._available_versions:
            t = self.fetch_factorio_versions()
            t.join()
        return self.version_list[:self.version_list.index(self.stable) + 1]

    @property
    def executable(self):
        if OS_WIN:
            return (self.root_path / 'bin' / 'x64' / 'factorio.exe').resolve()
        else:
            return (self.root_path / 'bin' / 'x64' / 'factorio').resolve()

    @property
    def save_file(self):
        return (self.root_path / 'saves' / self.name /
                f'{self.name}.zip').resolve()

    @property
    def player_data(self):
        if not self._player_data:
            self._player_data = json.load(
                (self.root_path / 'player-data.json').resolve().open())
        return self._player_data

    @property
    def service_username(self):
        if not self._player_data:
            self._player_data = json.load(
                (self.root_path / 'player-data.json').resolve().open())
        return self._player_data['service-username']

    @property
    def service_token(self):
        if not self._player_data:
            self._player_data = json.load(
                (self.root_path / 'player-data.json').resolve().open())
        return self._player_data['service-token']

    @property
    def config(self):
        if not self._config:
            conf_parser = ConfigParser()
            self._config = conf_parser.read(
                (self.root_path / 'config' /
                 'config.ini').resolve().as_posix())
        return self._config

    @property
    def server_config(self):
        if not self._server_config:
            self._server_config = json.load(
                (self.root_path / 'config' /
                 'server-settings.json').resolve().open())
        return self._server_config

    @server_config.setter
    def server_config(self, config):
        self._server_config = merge_two_dicts(config, self._server_config)
        save_settings(self._server_config, (self.root_path / 'config' /
                                            'server-settings.json').resolve())

    @property
    def bits(self):
        if self.build_platform[-2:] == '64':
            return '64'
        else:
            return '32'  # I don't have any 32 bit systems so I wasn't sure what factorio would respond with

    @property
    def core_str(self):
        core = f'core-{self.build_platform[:-2]}'
        if self.build_mode == 'headless':
            core = core + '_headless'
        core = core + self.bits
        return core

    def set_version_info(self):
        log.info(f'Getting the version info for {self.name}')
        commands = [self.executable.as_posix(), '--version']
        p = Popen(commands, stdout=PIPE, stderr=PIPE)
        std_out, std_err = p.communicate()
        self._version_info = std_out.decode().splitlines()
        self._version_info = {
            l.split(':')[0].lower(): l.split(':')[1]
            for l in self._version_info
        }
        self._version_info['build'] = self._version_info['version'].split(
            '(')[1]
        self._version_info['build'] = self._version_info['build'].replace(
            ')', '').split(', ')
        self._version_info['build'] = {
            'number': self._version_info['build'][0].replace('build', ''),
            'platform': self._version_info['build'][1],
            'mode': self._version_info['build'][2]
        }
        self._version_info['version'] = self._version_info['version'].split(
            '(')[0].strip()

    def status(self):
        if self.process:
            if not self._ps_proc:
                self._ps_proc = Process(self.process.pid)
            try:
                data = {
                    'status': self._ps_proc.status(),
                    'cpu': self._ps_proc.cpu_percent(interval=2),
                    'mem': naturalsize(self._ps_proc.memory_info().rss),
                    'mem_raw': self._ps_proc.memory_info().rss,
                    'available_mem': naturalsize(self._virtual_mem.available),
                    'available_mem_raw': self._virtual_mem.available,
                    'total_mem': naturalsize(TOTAL_MEMORY),
                    'total_mem_raw': TOTAL_MEMORY,
                }
            except (NoSuchProcess, AttributeError):
                log.warn(
                    f'Factorio Process {self.name} does not exist anymore')
                return
            self._status_history.appendleft(data)
            return list(self._status_history)

    @run_in_thread
    def start(self):
        log.info(f'Starting Factorio instance {self.name}')
        if self.name in app_settings.factorio_instances:
            if isinstance(self.process, Popen):
                # TODO: need to do more here to actually check if it is running
                log.warn(f'{self.name} factorio instance is already running')
                return
        if self.name not in app_settings.factorio_instances:
            log.warn(f'{self.name} factorio instance does not exist')
            return
        commands = [
            self.executable.as_posix(), '--start-server', self.save_file,
            '--port',
            str(self.port)
        ]
        log.debug(f'Starting {self.name}')
        self.process = Popen(commands, stdin=PIPE, stdout=PIPE, stderr=PIPE)
        self.output_log()

    @run_in_thread
    def output_log(self):
        while True:
            std_out = self.process.stdout.readline()
            if std_out:
                std_out = std_out.decode()
                self._log_queue.append('{} {}: {}'.format(
                    datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3],
                    self.name.upper(), std_out.replace('\n', '')))
                self.log.info(std_out)
            else:
                sleep(.05)
            if self.process is None:
                break
            if self.process.poll() is not None:
                break

    def get_log_line(self):
        if len(self._log_queue):
            return self._log_queue.pop()
        else:
            return

    @run_in_thread
    def stop(self):
        log.debug(f'Stopping {self.name}')
        if self.process:
            self.process.terminate()
            self.process = None
            self._ps_proc = None

    @run_in_thread
    def kill(self):
        log.debug(f'Killing {self.name}')
        if self.process:
            self.process.kill()
            self.process = None
            self._ps_proc = None

    @run_in_thread
    def send_command(self, command):
        # TODO: This does not work. No idea how it should work
        if self.process:
            self.process.communicate(f'{command}\n'.encode())

    def create_save_file(self,
                         map_gen_file_path=None,
                         map_settings_path=None,
                         preset=None,
                         map_preview_path=None):
        if not self.save_file.is_file():
            if not (self.save_file / '..').resolve().is_dir():
                (self.save_file / '..').resolve().mkdir()
            commands = [
                self.executable.as_posix(), '--create',
                self.save_file.as_posix()
            ]
            # TODO: Add the optional arguments to commands
            p = Popen(commands, stdout=PIPE, stderr=PIPE)
            log.info(p.communicate())

    def get_version_info(self):
        if self._version_info is None:
            self.set_version_info()
        return self._version_info

    def check_for_update(self):
        self.get_version_info()
        t = self.fetch_factorio_versions()
        t.join()
        if self.is_experimental:
            version_list = self.experimental_version_list
        else:
            version_list = self.stable_version_list
        if self.version != version_list[-1]:
            self.update_available = version_list[-1]
            return version_list[-1]
        else:
            self.update_available = False

    def get_download_link(self, version):
        get_link_url = 'https://www.factorio.com/get-download-link'
        update_version_info = list(
            filter(lambda x: x['to'] == version,
                   self._available_versions['available_versions']))[0]
        data = {
            'username': self.service_username,
            'token': self.service_token,
            'package': self.core_str,
            'from': update_version_info['from'],
            'to': update_version_info['to'],
            'apiVersion': 2
        }
        req = request.Request(get_link_url + '?' + parse.urlencode(data))
        resp = request.urlopen(req)
        download_link = json.loads(resp.read())
        return download_link[0]

    def download_update(self, version):
        link = self.get_download_link(version)
        log.info(link)
        with TqdmUpTo(unit='B',
                      unit_scale=True,
                      miniters=1,
                      desc=link.split('/')[-1]) as t:
            self._temp_update = request.urlretrieve(
                link, reporthook=t.download_progress)[0]

    @run_in_thread
    def apply_update(self):
        if self._temp_update:
            commands = [
                self.executable.as_posix(), '--apply-update', self._temp_update
            ]
            p = Popen(commands, stdout=PIPE, stderr=PIPE)
            log.info(p.communicate())
            p.terminate()
        self.set_version_info()
        self.update_available = False

    @run_in_thread
    def fetch_factorio_versions(self):
        available_versions_url = 'https://www.factorio.com/get-available-versions'
        data = parse.urlencode({
            'username': self.service_username,
            'token': self.service_token,
            'apiVersion': 2
        })
        req = request.Request(available_versions_url + '?' + data)
        resp = request.urlopen(req)
        json_resp = json.loads(resp.read())
        available_versions = json_resp[self.core_str]
        stable_version = list(
            filter(lambda x: True if 'stable' in x else False,
                   available_versions))[0]['stable']
        available_versions = list(
            filter(lambda x: False
                   if 'stable' in x else True, available_versions))
        version_list = sorted(
            available_versions,
            key=lambda s: [int(u) for u in s['to'].split('.')])
        version_list = [u['to'] for u in version_list]
        self._available_versions = {
            'stable': stable_version,
            'version_list': version_list,
            'available_versions': available_versions
        }
        return json_resp
            path_query_where[metadata_list[randint(0, index) % metadata_len]] = {}
            path_query_return[metadata_list[randint(0, index) % metadata_len]] = {}
        path_query_list.append(PathQuery(randint(0, port_range), randint(0, k), randint(0, port_range), randint(0, k),
                                         path_query_where, randint(1, index + 1), path_query_return,
                                         category[randint(0, 1)]))
    query_list = node_query_list + path_query_list
    return query_list


if __name__ == '__main__':
    k_fwd = 100
    k_tele = 10
    p = Process()
    metadata_len = len(metadata_list)
    category = ['performance', 'failure']
    f = open('testsdata/fig_router_memory_500.txt', 'w')
    M_bytes = 2 ** 20.0

    for k in xrange(4, 100, 2):
        fat_tree = FatTreeTopology(k=k)
        router_cnt = 5 * k * k / 4
        query_list = get_query_list(k / 2 - 1, router_cnt - 1)

        optimize_probe_pkt_list = optimize_generate_probe_set(query_list, fat_tree, k_fwd, k_tele)
        memory = p.memory_info()[0] / M_bytes

        print '{} {} {}'.format(k, router_cnt, memory)

        f.write('{} {} {} \n'.format(k, router_cnt, memory))
    f.close()
コード例 #27
0
ファイル: ipc.py プロジェクト: WaltDisneyWorld/Rolink
    async def handle_message(self, message):
        message = json.loads(str(message["data"], "utf-8"))

        data = message["data"]
        type = message["type"]
        nonce = message["nonce"]
        original_cluster = message.get("original_cluster")
        waiting_for = message.get("waiting_for")
        cluster_id = message.get("cluster_id")
        extras = message.get("extras", {})

        if type == "IDENTIFY":
            # we're syncing this cluster with ourselves, and send back our clusters
            if original_cluster == CLUSTER_ID:
                if isinstance(data, int):
                    self.clusters.add(data)
                else:
                    for x in data:
                        self.clusters.add(x)
            else:
                self.clusters.add(original_cluster)

                data = json.dumps({
                    "nonce": None,
                    "cluster_id": CLUSTER_ID,
                    "data": list(self.clusters),
                    "type": "IDENTIFY",
                    "original_cluster": original_cluster,
                    "waiting_for": waiting_for
                })

                await self.redis.publish(
                    f"{RELEASE}:CLUSTER_{original_cluster}", data)

        elif type == "VERIFICATION":
            discord_id = int(data["discordID"])
            guild_id = int(data["guildID"])
            roblox_id = data["robloxID"]
            #roblox_accounts = data["robloxAccounts"]

            guild = Bloxlink.get_guild(guild_id)

            if guild:
                member = guild.get_member(discord_id)

                if not member:
                    try:
                        member = await guild.fetch_member(discord_id)
                    except NotFound:
                        return

                roblox_user, _ = await get_user(roblox_id=roblox_id)

                try:
                    added, removed, nickname, errors, roblox_user = await guild_obligations(
                        member,
                        guild=guild,
                        roles=True,
                        nickname=True,
                        roblox_user=roblox_user,
                        cache=False,
                        dm=False,
                        exceptions=("Blacklisted", "BloxlinkBypass",
                                    "RobloxAPIError", "RobloxDown",
                                    "PermissionError"))

                except Blacklisted as b:
                    blacklist_text = ""

                    if str(b):
                        blacklist_text = f"You have an active restriction for: ``{b}``"
                    else:
                        blacklist_text = f"You have an active restriction from Bloxlink."

                    try:
                        await member.send(
                            f"Failed to update you in the server: ``{blacklist_text}``"
                        )
                    except Forbidden:
                        pass

                except BloxlinkBypass:
                    try:
                        await member.send(
                            f"You have the ``Bloxlink Bypass`` role, so I am unable to update you in the server."
                        )
                    except Forbidden:
                        pass

                except RobloxAPIError:
                    try:
                        await member.send(
                            "An unknown Roblox API error occured, so I was unable to update you in the server. Please try again later."
                        )
                    except Forbidden:
                        pass

                except RobloxDown:
                    try:
                        await member.send(
                            "Roblox appears to be down, so I was unable to retrieve your Roblox information. Please try again later."
                        )
                    except Forbidden:
                        pass

                except PermissionError as e:
                    try:
                        await member.send(
                            f"A permission error occured, so I was unable to update you in the server: ``{e}``"
                        )
                    except Forbidden:
                        pass

                except CancelCommand:
                    pass

                else:
                    try:
                        await member.send(
                            f"Your account was successfully updated to **{roblox_user.username}** in the server **{guild.name}.**"
                        )
                    except Forbidden:
                        pass

                    guild_data = await self.r.table("guilds").get(str(
                        guild.id)).run() or {}  # FIXME: use cache

                    await post_event(
                        guild, guild_data, "verification",
                        f"{member.mention} has **verified** as ``{roblox_user.username}``.",
                        GREEN_COLOR)

        elif type == "EVAL":
            res = (await eval(data, codeblock=False)).description

            data = json.dumps({
                "nonce": nonce,
                "cluster_id": CLUSTER_ID,
                "data": res,
                "type": "CLIENT_RESULT",
                "original_cluster": original_cluster,
                "waiting_for": waiting_for
            })

            await self.redis.publish(f"{RELEASE}:CLUSTER_{original_cluster}",
                                     data)

        elif type == "CLIENT_RESULT":
            task = self.pending_tasks.get(nonce)

            if task:
                task[1][cluster_id] = data
                task[2] += 1
                waiting_for = message["waiting_for"] or len(self.clusters)

                if task[2] == waiting_for:
                    if not task[0].done():
                        task[0].set_result(True)

        elif type == "DM":
            if 0 in SHARD_RANGE:
                try:
                    message_ = await Bloxlink.wait_for(
                        "message",
                        check=lambda m: m.author.id == data and not m.guild,
                        timeout=PROMPT["PROMPT_TIMEOUT"])
                except asyncio.TimeoutError:
                    message_ = "cancel (timeout)"

                data = json.dumps({
                    "nonce":
                    nonce,
                    "cluster_id":
                    CLUSTER_ID,
                    "data":
                    getattr(message_, "content", message_),
                    "type":
                    "CLIENT_RESULT",
                    "original_cluster":
                    original_cluster,
                    "waiting_for":
                    waiting_for
                })

                await self.redis.publish(
                    f"{RELEASE}:CLUSTER_{original_cluster}", data)

        elif type == "STATS":
            seconds = floor(time() - STARTED)

            m, s = divmod(seconds, 60)
            h, m = divmod(m, 60)
            d, h = divmod(h, 24)

            days, hours, minutes, seconds = None, None, None, None

            if d:
                days = f"{d}d"
            if h:
                hours = f"{h}h"
            if m:
                minutes = f"{m}m"
            if s:
                seconds = f"{s}s"

            uptime = f"{days or ''} {hours or ''} {minutes or ''} {seconds or ''}".strip(
            )

            process = Process(getpid())
            mem = floor(process.memory_info()[0] / float(2**20))

            data = json.dumps({
                "nonce": nonce,
                "cluster_id": CLUSTER_ID,
                "data": (len(self.client.guilds), mem, uptime),
                "type": "CLIENT_RESULT",
                "original_cluster": original_cluster,
                "waiting_for": waiting_for
            })

            await self.redis.publish(f"{RELEASE}:CLUSTER_{original_cluster}",
                                     data)

        elif type == "PLAYING_STATUS":
            presence_type = extras.get("presence_type", "normal")
            playing_status = extras.get("status",
                                        PLAYING_STATUS).format(prefix=PREFIX)

            if presence_type == "normal":
                await Bloxlink.change_presence(status=Status.online,
                                               activity=Game(playing_status))
            elif presence_type == "streaming":
                stream_url = extras.get("stream_url",
                                        "https://twitch.tv/blox_link")

                await Bloxlink.change_presence(
                    activity=Streaming(name=playing_status, url=stream_url))

            data = json.dumps({
                "nonce": nonce,
                "cluster_id": CLUSTER_ID,
                "data": True,
                "type": "CLIENT_RESULT",
                "original_cluster": original_cluster,
                "waiting_for": waiting_for
            })

            await self.redis.publish(f"{RELEASE}:CLUSTER_{original_cluster}",
                                     data)
コード例 #28
0
class _ProcessMonitor:
    WARNING_THRESHOLD = 100 * 1024 * 1024

    busy = False

    def __init__(self):
        self.process = Process()
        self.peak_mem_res = 0
        self.low_mem_warning = False

    def monitor_task(self):
        if sys.stdout.isatty():

            while self.busy:
                try:
                    # only print the data out every 10 seconds
                    if datetime.now().second / 10 == 0:
                        info = self._get_info()

                        output.debug(info)
                    else:
                        # call get_mem so that we record peak more accurately
                        self._get_mem()

                    time.sleep(1)
                except Exception:
                    output.debug_exception()

                    self.busy = False

                    pass
        else:
            # if this isn't a TTY, no point in doing any of this
            self.busy = False

    def _get_info(self) -> str:
        from yawast.external.memory_size import Size

        # prime the call to cpu_percent, as the first call doesn't return useful data
        self.process.cpu_percent(interval=1)

        # use oneshot() to cache the data, so we minimize hits
        with self.process.oneshot():
            pct = self.process.cpu_percent()

            times = self.process.cpu_times()
            mem = self._get_mem()
            mem_res = "{0:cM}".format(Size(mem.rss))
            mem_virt = "{0:cM}".format(Size(mem.vms))

            thr = self.process.num_threads()

            vm = psutil.virtual_memory()
            mem_total = "{0:cM}".format(Size(vm.total))
            mem_avail_bytes = vm.available
            mem_avail = "{0:cM}".format(Size(vm.available))

            if mem_avail_bytes < self.WARNING_THRESHOLD and not self.low_mem_warning:
                self.low_mem_warning = True

                output.error(f"Low RAM Available: {mem_avail}")

            cons = -1
            try:
                cons = len(self.process.connections(kind="inet"))
            except Exception:
                # we don't care if this fails
                output.debug_exception()

            cpu_freq = psutil.cpu_freq()

        info = (f"Process Stats: CPU: {pct}% - Sys: {times.system} - "
                f"User: {times.user} - Res: {mem_res} - Virt: {mem_virt} - "
                f"Available: {mem_avail}/{mem_total} - Threads: {thr} - "
                f"Connections: {cons} - CPU Freq: "
                f"{int(cpu_freq.current)}MHz/{int(cpu_freq.max)}MHz")

        return info

    def _get_mem(self):
        mem = self.process.memory_info()

        if mem.rss > self.peak_mem_res:
            self.peak_mem_res = mem.rss
            output.debug(f"New high-memory threshold: {self.peak_mem_res}")

        return mem

    def __enter__(self):
        self.busy = True
        threading.Thread(target=self.monitor_task).start()

        return self

    def __exit__(self, exception, value, tb):
        self.busy = False

        if exception is not None:
            return False
コード例 #29
0
ファイル: ipc.py プロジェクト: F1sxher/Bloxlink-1
    async def handle_message(self, message):
        message = json.loads(str(message["data"], "utf-8"))

        data = message["data"]
        type = message["type"]
        nonce = message["nonce"]
        original_cluster = message.get("original_cluster")
        waiting_for = message.get("waiting_for")
        cluster_id = message.get("cluster_id")
        extras = message.get("extras", {})

        if type == "IDENTIFY":
            # we're syncing this cluster with ourselves, and send back our clusters
            if original_cluster == CLUSTER_ID:
                if isinstance(data, int):
                    self.clusters.add(data)
                else:
                    for x in data:
                        self.clusters.add(x)
            else:
                self.clusters.add(original_cluster)

                data = json.dumps({
                    "nonce": None,
                    "cluster_id": CLUSTER_ID,
                    "data": list(self.clusters),
                    "type": "IDENTIFY",
                    "original_cluster": original_cluster,
                    "waiting_for": waiting_for
                })

                await self.redis.publish(
                    f"{RELEASE}:CLUSTER_{original_cluster}", data)

        elif type == "VERIFICATION":
            discord_id = int(data["discordID"])
            guild_id = int(data["guildID"])
            roblox_id = data["robloxID"]
            #roblox_accounts = data["robloxAccounts"]

            guild = Bloxlink.get_guild(guild_id)

            if guild:
                member = guild.get_member(discord_id)

                if not member:
                    try:
                        member = await guild.fetch_member(discord_id)
                    except NotFound:
                        return

                try:
                    roblox_user, _ = await get_user(roblox_id=roblox_id)
                except RobloxDown:
                    try:
                        await member.send(
                            "Roblox appears to be down, so I was unable to retrieve your Roblox information. Please try again later."
                        )
                    except Forbidden:
                        pass

                    return

                except RobloxAPIError as e:
                    print(e, flush=True)

                    try:
                        await member.send(
                            "An unknown Roblox API error occured. Please try again later."
                        )
                    except Forbidden:
                        pass

                    return

                try:
                    added, removed, nickname, errors, warnings, roblox_user = await guild_obligations(
                        member,
                        guild=guild,
                        join=True,
                        roles=True,
                        nickname=True,
                        roblox_user=roblox_user,
                        cache=False,
                        dm=False,
                        exceptions=("Blacklisted", "BloxlinkBypass",
                                    "RobloxAPIError", "RobloxDown",
                                    "PermissionError"))

                except Blacklisted as b:
                    blacklist_text = ""

                    if isinstance(b.message, str):
                        blacklist_text = f"You have an active restriction for: `{b}`"
                    else:
                        blacklist_text = f"You have an active restriction from Bloxlink."

                    try:
                        await member.send(
                            f"Failed to update you in the server: `{blacklist_text}`"
                        )
                    except Forbidden:
                        pass

                except BloxlinkBypass:
                    try:
                        await member.send(
                            f"You have the `Bloxlink Bypass` role, so I am unable to update you in the server."
                        )
                    except Forbidden:
                        pass

                except RobloxAPIError:
                    try:
                        await member.send(
                            "An unknown Roblox API error occured, so I was unable to update you in the server. Please try again later."
                        )
                    except Forbidden:
                        pass

                except RobloxDown:
                    try:
                        await member.send(
                            "Roblox appears to be down, so I was unable to retrieve your Roblox information. Please try again later."
                        )
                    except Forbidden:
                        pass

                except PermissionError as e:
                    try:
                        await member.send(
                            f"A permission error occured, so I was unable to update you in the server: `{e}`"
                        )
                    except Forbidden:
                        pass

                except CancelCommand:
                    pass

                else:
                    verified_dm, guild_data = await get_guild_value(
                        guild, ["joinDM", ""], return_guild_data=True)
                    server_message = ""

                    if verified_dm and verified_dm != DEFAULTS.get(
                            "welcomeMessage"):
                        server_message = await get_nickname(
                            member,
                            verified_dm,
                            guild_data=guild_data,
                            roblox_user=roblox_user,
                            dm=True,
                            is_nickname=False)
                        server_message = f"\n\nThis message was set by the Server Admins:\n{server_message}"[:
                                                                                                             1500]

                    try:
                        await member.send(
                            f"Your account was successfully updated to **{roblox_user.username}** in the server **{guild.name}.**"
                            f"{server_message}")
                    except Forbidden:
                        pass

                    await post_event(
                        guild, guild_data, "verification",
                        f"{member.mention} has **verified** as `{roblox_user.username}`.",
                        GREEN_COLOR)

        elif type == "EVAL":
            """
            res = (await eval(data, codeblock=False)).description

            data = json.dumps({
                "nonce": nonce,
                "cluster_id": CLUSTER_ID,
                "data": res,
                "type": "CLIENT_RESULT",
                "original_cluster": original_cluster,
                "waiting_for": waiting_for
            })

            await self.redis.publish(f"{RELEASE}:CLUSTER_{original_cluster}", data)
            """
            pass

        elif type == "CLIENT_RESULT":
            task = self.pending_tasks.get(nonce)

            if task:
                task[1][cluster_id] = data
                task[2] += 1
                waiting_for = message["waiting_for"] or len(self.clusters)

                if task[2] == waiting_for:
                    if not task[0].done():
                        task[0].set_result(True)

        elif type == "DM":
            if 0 in SHARD_RANGE:
                try:
                    message_ = await Bloxlink.wait_for(
                        "message",
                        check=lambda m: m.author.id == data and not m.guild,
                        timeout=PROMPT["PROMPT_TIMEOUT"])
                except asyncio.TimeoutError:
                    message_ = "cancel (timeout)"

                data = json.dumps({
                    "nonce":
                    nonce,
                    "cluster_id":
                    CLUSTER_ID,
                    "data":
                    getattr(message_, "content", message_),
                    "type":
                    "CLIENT_RESULT",
                    "original_cluster":
                    original_cluster,
                    "waiting_for":
                    waiting_for
                })

                await self.redis.publish(
                    f"{RELEASE}:CLUSTER_{original_cluster}", data)

        elif type == "DM_AND_INTERACTION":
            if 0 in SHARD_RANGE:
                try:
                    task_1 = asyncio.create_task(
                        suppress_timeout_errors(
                            Bloxlink.wait_for(
                                "message",
                                check=lambda m: m.author.id == data and not m.
                                guild,
                                timeout=PROMPT["PROMPT_TIMEOUT"])))
                    task_2 = asyncio.create_task(
                        suppress_timeout_errors(
                            Bloxlink.wait_for(
                                "interaction",
                                check=lambda i: i.user.id == data and not i.
                                guild_id and i.data.get("custom_id"),
                                timeout=PROMPT["PROMPT_TIMEOUT"])))

                    result_set, pending = await asyncio.wait(
                        {task_1, task_2},
                        return_when=asyncio.FIRST_COMPLETED,
                        timeout=PROMPT["PROMPT_TIMEOUT"])

                    if result_set:
                        item = next(iter(result_set)).result()

                        if hasattr(item, "content"):
                            message_content = {
                                "type": "message",
                                "content": item.content
                            }
                        else:
                            if item.data["component_type"] == 3:
                                message_content = {
                                    "type": "select",
                                    "values": item.data["values"]
                                }
                            else:
                                message_content = {
                                    "type": "button",
                                    "content": item.data["custom_id"]
                                }
                    else:
                        message_content = {
                            "type": "message",
                            "content": "cancel (timeout)"
                        }

                except asyncio.TimeoutError:
                    message_content = {
                        "type": "message",
                        "content": "cancel (timeout)"
                    }

                data = json.dumps({
                    "nonce": nonce,
                    "cluster_id": CLUSTER_ID,
                    "data": message_content,
                    "type": "CLIENT_RESULT",
                    "original_cluster": original_cluster,
                    "waiting_for": waiting_for
                })

                await self.redis.publish(
                    f"{RELEASE}:CLUSTER_{original_cluster}", data)

        elif type == "STATS":
            seconds = floor(time() - STARTED)

            m, s = divmod(seconds, 60)
            h, m = divmod(m, 60)
            d, h = divmod(h, 24)

            days, hours, minutes, seconds = None, None, None, None

            if d:
                days = f"{d}d"
            if h:
                hours = f"{h}h"
            if m:
                minutes = f"{m}m"
            if s:
                seconds = f"{s}s"

            uptime = f"{days or ''} {hours or ''} {minutes or ''} {seconds or ''}".strip(
            )

            process = Process(getpid())
            mem = floor(process.memory_info()[0] / float(2**20))

            data = json.dumps({
                "nonce": nonce,
                "cluster_id": CLUSTER_ID,
                "data": (len(self.client.guilds), mem, uptime),
                "type": "CLIENT_RESULT",
                "original_cluster": original_cluster,
                "waiting_for": waiting_for
            })

            await self.redis.publish(f"{RELEASE}:CLUSTER_{original_cluster}",
                                     data)

        elif type == "USERS":
            data = json.dumps({
                "nonce":
                nonce,
                "cluster_id":
                CLUSTER_ID,
                "data": (sum([g.member_count for g in self.client.guilds]),
                         len(self.client.guilds)),
                "type":
                "CLIENT_RESULT",
                "original_cluster":
                original_cluster,
                "waiting_for":
                waiting_for
            })

            await self.redis.publish(f"{RELEASE}:CLUSTER_{original_cluster}",
                                     data)

        elif type == "PLAYING_STATUS":
            presence_type = extras.get("presence_type", "normal")
            playing_status = extras.get("status",
                                        PLAYING_STATUS).format(prefix=PREFIX)

            if presence_type == "normal":
                await Bloxlink.change_presence(status=Status.online,
                                               activity=Game(playing_status))
            elif presence_type == "streaming":
                stream_url = extras.get("stream_url",
                                        "https://twitch.tv/blox_link")

                await Bloxlink.change_presence(
                    activity=Streaming(name=playing_status, url=stream_url))

            data = json.dumps({
                "nonce": nonce,
                "cluster_id": CLUSTER_ID,
                "data": True,
                "type": "CLIENT_RESULT",
                "original_cluster": original_cluster,
                "waiting_for": waiting_for
            })

            await self.redis.publish(f"{RELEASE}:CLUSTER_{original_cluster}",
                                     data)
コード例 #30
0
def memory_used():
    process = Process(getpid())
    return process.memory_info(
    ).rss  # https://pythonhosted.org/psutil/#psutil.Process.memory_info
コード例 #31
0
    async def botinfo(self, ctx):
        """Viser info om meg"""

        dev = await self.bot.fetch_user(170506717140877312)

        now = time()
        diff = int(now - self.bot.uptime)
        days, remainder = divmod(diff, 24 * 60 * 60)
        hours, remainder = divmod(remainder, 60 * 60)
        minutes, seconds = divmod(remainder, 60)

        process = Process(getpid())
        memory_usage = round(process.memory_info().rss / 1000000, 1)
        cpu_percent = process.cpu_percent()

        total_members = []
        online_members = []
        idle_members = []
        dnd_members = []
        offline_members = []
        for guild in self.bot.guilds:
            for member in guild.members:
                if member.id in total_members:
                    continue
                total_members.append(member.id)
                if str(member.status) == 'online':
                    online_members.append(member.id)
                elif str(member.status) == 'idle':
                    idle_members.append(member.id)
                elif str(member.status) == 'dnd':
                    dnd_members.append(member.id)
                elif str(member.status) == 'offline':
                    offline_members.append(member.id)

        embed = discord.Embed(color=ctx.me.color, url=website)
        embed.set_author(name=dev.name, icon_url=dev.avatar_url)
        embed.set_thumbnail(url=self.bot.user.avatar_url)
        embed.add_field(name='Dev',
                        value=f'{dev.mention}\n{dev.name}#{dev.discriminator}')
        embed.add_field(name='Oppetid',
                        value=f'{days}d {hours}t {minutes}m {seconds}s')
        embed.add_field(name='Ping',
                        value=f'{int(self.bot.latency * 1000)} ms')
        embed.add_field(name='Servere', value=len(self.bot.guilds))
        embed.add_field(name='Discord.py Versjon', value=discord.__version__)
        embed.add_field(name='Python Versjon', value=platform.python_version())
        embed.add_field(name='Ressursbruk',
                        value=f'RAM: {memory_usage} MB\nCPU: {cpu_percent}%')
        embed.add_field(name='Maskin',
                        value=f'{platform.system()} {platform.release()}')
        embed.add_field(
            name=f'Brukere ({len(total_members)})',
            value=f'<:online:516328785910431754>{len(online_members)} ' +
            f'<:idle:516328783347843082>{len(idle_members)} ' +
            f'<:dnd:516328782844395579>{len(dnd_members)} ' +
            f'<:offline:516328785407246356>{len(offline_members)}')
        embed.add_field(
            name='Lenker',
            value='[Inviter](https://discordapp.com/oauth2/authorize?client_' +
            f'id={self.bot.user.id}&permissions=388174&scope=bot) ' +
            f'| [Nettside]({website}) | [Kildekode]({github})')
        await Defaults.set_footer(ctx, embed)
        await ctx.send(embed=embed)
コード例 #32
0
def get_memory_usage():
    """Return the memory usage in Mo."""
    process = Process(getpid())
    mem = process.memory_info()[0] / float(2**20)
    return mem
コード例 #33
0
ファイル: ops.py プロジェクト: Gonewithmyself/gitbitex-spot
 def pinfo(self, p: psutil.Process):
     s = '%-30s\t%d\t%.3f M' % (p.name(), p.pid,
                                p.memory_info().rss / 1024 / 1024)
     print(s)
コード例 #34
0
ファイル: ipc.py プロジェクト: bloxlink/Bloxlink
    async def handle_message(self, message):
        message = json.loads(str(message["data"], "utf-8"))

        data = message["data"]
        type = message["type"]
        nonce = message["nonce"]
        original_cluster = message.get("original_cluster")
        waiting_for = message.get("waiting_for")
        cluster_id = message.get("cluster_id")
        extras = message.get("extras", {})

        if type == "IDENTIFY":
            # we're syncing this cluster with ourselves, and send back our clusters
            if original_cluster == CLUSTER_ID:
                if isinstance(data, int):
                    self.clusters.add(data)
                else:
                    for x in data:
                        self.clusters.add(x)
            else:
                self.clusters.add(original_cluster)

                response_data = json.dumps({
                    "nonce": None,
                    "cluster_id": CLUSTER_ID,
                    "data": list(self.clusters),
                    "type": "IDENTIFY",
                    "original_cluster": original_cluster,
                    "waiting_for": waiting_for
                })

                await self.redis.publish(
                    f"{RELEASE}:CLUSTER_{original_cluster}", response_data)

        elif type == "ACTION_REQUEST":
            action = data.get("action")
            action_type = data.get("type")
            guild_id = int(data.get("guildID"))

            guild = Bloxlink.get_guild(guild_id)

            if guild:
                response_data = {
                    "nonce": nonce,
                }

                if action == "request":
                    if action_type == "channels":
                        response_data["type"] = "channels"
                        response_data["result"] = [{
                            "id": str(c.id),
                            "name": c.name,
                            "position": c.position,
                            "type": c.type,
                        } for c in guild.channels]

                        response_data["success"] = True
                    elif action_type == "roles":
                        response_data["type"] = "roles"
                        response_data["result"] = [{
                            "id":
                            str(r.id),
                            "name":
                            r.name,
                            "position":
                            r.position,
                            "hoist":
                            r.hoist,
                            "managed":
                            r.managed,
                            "permissions":
                            r.permissions.value
                        } for r in guild.roles]

                        response_data["success"] = True
                    else:
                        response_data["success"] = False
                        response_data["error"] = "Invalid action type"

                elif action == "create":
                    error = None

                    if action_type == "roles":
                        role_name = data["name"]
                        role = discord.utils.find(
                            lambda r: r.name == role_name, guild.roles)

                        if not role:
                            try:
                                role = await guild.create_role(name=role_name)
                            except discord.errors.Forbidden:
                                error = "Insufficient permissions"
                            except discord.errors.HTTPException as e:
                                error = f"HTTP Exception -- {e}"

                        if not error:
                            response_data["result"] = {
                                "id": str(role.id),
                                "name": role.name,
                            }
                            response_data["success"] = True
                        else:
                            response_data["success"] = False
                            response_data["error"] = error

                    elif action_type == "webhooks":
                        channel_id = data["channelID"]
                        webhook_name = data["name"]
                        webhook_avatar = data["avatar"]

                        channel = guild.get_channel(int(channel_id))

                        if channel:
                            try:
                                webhook = await channel.create_webhook(
                                    name=webhook_name)
                            except discord.errors.Forbidden:
                                response_data[
                                    "error"] = "Insufficient permissions"
                                response_data["success"] = False
                            except discord.errors.HTTPException as e:
                                response_data[
                                    "error"] = f"HTTP Exception -- {e}"
                                response_data["success"] = False
                            else:
                                response_data["result"] = {
                                    "id": str(webhook.id),
                                    "token": webhook.token,
                                    "channelID": str(webhook.channel_id),
                                }
                                response_data["success"] = True
                        else:
                            response_data["error"] = "Channel not found"
                            response_data["success"] = False

                    else:
                        response_data["success"] = False
                        response_data["error"] = "Invald action type"

                else:
                    response_data["success"] = False
                    response_data["error"] = "Invald action type"

                await self.redis.publish(nonce, json.dumps(response_data))

        elif type == "VERIFICATION":
            if data.get("guildID"):  # ignore verifications by v2
                return

            discord_id = int(data["discordID"])
            guilds = data["guilds"]
            roblox_id = data["robloxID"]

            for guild_id in guilds:
                guild = Bloxlink.get_guild(int(guild_id))

                if guild:
                    member = guild.get_member(discord_id)

                    if not member:
                        try:
                            member = await guild.fetch_member(discord_id)
                        except discord.errors.NotFound:
                            return

                    if member.pending or guild.verification_level == discord.VerificationLevel.highest:
                        return

                    try:
                        roblox_user = (await get_user(roblox_id=roblox_id))[0]
                    except RobloxDown:
                        return

                    except RobloxAPIError as e:
                        print(e, flush=True)

                        return

                    try:
                        added, removed, nickname, errors, warnings, roblox_user = await guild_obligations(
                            member,
                            guild=guild,
                            join=True,
                            roles=True,
                            nickname=True,
                            roblox_user=roblox_user,
                            cache=False,
                            dm=False,
                            exceptions=("CancelCommand", "UserNotVerified",
                                        "Blacklisted", "BloxlinkBypass",
                                        "RobloxAPIError", "RobloxDown",
                                        "PermissionError"))

                    except (CancelCommand, UserNotVerified, Blacklisted,
                            BloxlinkBypass, RobloxAPIError, RobloxDown,
                            PermissionError):
                        pass

                    else:
                        try:
                            await post_event(
                                guild, "verification",
                                f"{member.mention} has **verified** as `{roblox_user.username}`.",
                                GREEN_COLOR)
                        except Error:
                            pass

        elif type == "EVAL":
            """
            res = (await eval(data, codeblock=False)).description

            data = json.dumps({
                "nonce": nonce,
                "cluster_id": CLUSTER_ID,
                "data": res,
                "type": "CLIENT_RESULT",
                "original_cluster": original_cluster,
                "waiting_for": waiting_for
            })

            await self.redis.publish(f"{RELEASE}:CLUSTER_{original_cluster}", data)
            """
            pass

        elif type == "CLIENT_RESULT":
            task = self.pending_tasks.get(nonce)

            if task:
                task[1][cluster_id] = data
                task[2] += 1
                waiting_for = message["waiting_for"] or len(self.clusters)

                if task[2] == waiting_for:
                    if not task[0].done():
                        task[0].set_result(True)

        elif type == "DM":
            if 0 in SHARD_RANGE:
                try:
                    message_ = await Bloxlink.wait_for(
                        "message",
                        check=lambda m: m.author.id == data and not m.guild,
                        timeout=PROMPT["PROMPT_TIMEOUT"])
                except asyncio.TimeoutError:
                    message_ = "cancel (timeout)"

                response_data = json.dumps({
                    "nonce":
                    nonce,
                    "cluster_id":
                    CLUSTER_ID,
                    "data":
                    getattr(message_, "content", message_),
                    "type":
                    "CLIENT_RESULT",
                    "original_cluster":
                    original_cluster,
                    "waiting_for":
                    waiting_for
                })

                await self.redis.publish(
                    f"{RELEASE}:CLUSTER_{original_cluster}", response_data)

        elif type == "DM_AND_INTERACTION":
            if 0 in SHARD_RANGE:
                try:
                    task_1 = asyncio.create_task(
                        suppress_timeout_errors(
                            Bloxlink.wait_for(
                                "message",
                                check=lambda m: m.author.id == data and not m.
                                guild,
                                timeout=PROMPT["PROMPT_TIMEOUT"])))
                    task_2 = asyncio.create_task(
                        suppress_timeout_errors(
                            Bloxlink.wait_for(
                                "interaction",
                                check=lambda i: i.user.id == data and not i.
                                guild_id and i.data.get("custom_id"),
                                timeout=PROMPT["PROMPT_TIMEOUT"])))

                    result_set, pending = await asyncio.wait(
                        {task_1, task_2},
                        return_when=asyncio.FIRST_COMPLETED,
                        timeout=PROMPT["PROMPT_TIMEOUT"])

                    if result_set:
                        item = next(iter(result_set)).result()

                        if hasattr(item, "content"):
                            message_content = {
                                "type": "message",
                                "content": item.content
                            }
                        else:
                            if item.data["component_type"] == 3:
                                message_content = {
                                    "type": "select",
                                    "values": item.data["values"]
                                }
                            else:
                                message_content = {
                                    "type": "button",
                                    "content": item.data["custom_id"]
                                }
                    else:
                        message_content = {
                            "type": "message",
                            "content": "cancel (timeout)"
                        }

                except asyncio.TimeoutError:
                    message_content = {
                        "type": "message",
                        "content": "cancel (timeout)"
                    }

                response_data = json.dumps({
                    "nonce": nonce,
                    "cluster_id": CLUSTER_ID,
                    "data": message_content,
                    "type": "CLIENT_RESULT",
                    "original_cluster": original_cluster,
                    "waiting_for": waiting_for
                })

                await self.redis.publish(
                    f"{RELEASE}:CLUSTER_{original_cluster}", response_data)

        elif type == "STATS":
            seconds = floor(time() - STARTED)

            m, s = divmod(seconds, 60)
            h, m = divmod(m, 60)
            d, h = divmod(h, 24)

            days, hours, minutes, seconds = None, None, None, None

            if d:
                days = f"{d}d"
            if h:
                hours = f"{h}h"
            if m:
                minutes = f"{m}m"
            if s:
                seconds = f"{s}s"

            uptime = f"{days or ''} {hours or ''} {minutes or ''} {seconds or ''}".strip(
            )

            process = Process(getpid())
            mem = floor(process.memory_info()[0] / float(2**20))

            response_data = json.dumps({
                "nonce":
                nonce,
                "cluster_id":
                CLUSTER_ID,
                "data": (len(self.client.guilds), mem, uptime),
                "type":
                "CLIENT_RESULT",
                "original_cluster":
                original_cluster,
                "waiting_for":
                waiting_for
            })

            await self.redis.publish(f"{RELEASE}:CLUSTER_{original_cluster}",
                                     response_data)

        elif type == "USERS":
            response_data = json.dumps({
                "nonce":
                nonce,
                "cluster_id":
                CLUSTER_ID,
                "data": (sum([g.member_count for g in self.client.guilds]),
                         len(self.client.guilds)),
                "type":
                "CLIENT_RESULT",
                "original_cluster":
                original_cluster,
                "waiting_for":
                waiting_for
            })

            await self.redis.publish(f"{RELEASE}:CLUSTER_{original_cluster}",
                                     response_data)

        elif type == "PLAYING_STATUS":
            presence_type = extras.get("presence_type", "normal")
            playing_status = extras.get("status", PLAYING_STATUS)

            if presence_type == "normal":
                await Bloxlink.change_presence(
                    status=discord.Status.online,
                    activity=discord.Game(playing_status))

            elif presence_type == "streaming":
                stream_url = extras.get("stream_url",
                                        "https://twitch.tv/blox_link")

                await Bloxlink.change_presence(activity=discord.Streaming(
                    name=playing_status, url=stream_url))

            response_data = json.dumps({
                "nonce": nonce,
                "cluster_id": CLUSTER_ID,
                "data": True,
                "type": "CLIENT_RESULT",
                "original_cluster": original_cluster,
                "waiting_for": waiting_for
            })

            await self.redis.publish(f"{RELEASE}:CLUSTER_{original_cluster}",
                                     response_data)
コード例 #35
0
ファイル: text_ui.py プロジェクト: Clohman11/mpf
class TextUi(MpfController):
    """Handles the text-based UI."""

    config_name = "text_ui"

    __slots__ = [
        "start_time", "machine", "_tick_task", "screen", "mpf_process",
        "ball_devices", "switches", "config", "_pending_bcp_connection",
        "_asset_percent", "_player_widgets", "_machine_widgets", "_bcp_status",
        "frame", "layout", "scene", "footer_memory", "switch_widgets",
        "mode_widgets", "ball_device_widgets", "footer_cpu", "footer_mc_cpu",
        "footer_uptime", "delay", "_layout_change"
    ]

    def __init__(self, machine: "MachineController") -> None:
        """Initialize TextUi."""
        super().__init__(machine)
        self.delay = DelayManager(machine)
        self.config = machine.config.get('text_ui', {})

        self.screen = None

        if not machine.options['text_ui'] or not Scene:
            return

        # hack to add themes until https://github.com/peterbrittain/asciimatics/issues/207 is implemented
        THEMES["mpf_theme"] = defaultdict(
            lambda:
            (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLACK), {
                "active_switch":
                (Screen.COLOUR_BLACK, Screen.A_NORMAL, Screen.COLOUR_GREEN),
                "pf_active":
                (Screen.COLOUR_GREEN, Screen.A_NORMAL, Screen.COLOUR_BLACK),
                "pf_inactive":
                (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLACK),
                "label":
                (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLACK),
                "title":
                (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_RED),
                "title_exit":
                (Screen.COLOUR_BLACK, Screen.A_NORMAL, Screen.COLOUR_RED),
                "footer_cpu":
                (Screen.COLOUR_CYAN, Screen.A_NORMAL, Screen.COLOUR_BLACK),
                "footer_path":
                (Screen.COLOUR_YELLOW, Screen.A_NORMAL, Screen.COLOUR_BLACK),
                "footer_memory":
                (Screen.COLOUR_GREEN, Screen.A_NORMAL, Screen.COLOUR_BLACK),
                "footer_mc_cpu":
                (Screen.COLOUR_MAGENTA, Screen.A_NORMAL, Screen.COLOUR_BLACK),
            })

        self.start_time = datetime.now()
        self.machine = machine

        self.mpf_process = Process()
        self.ball_devices = list()  # type: List[BallDevice]

        self.switches = {}  # type: Dict[str, Switch]

        self.machine.events.add_handler('init_phase_2', self._init)
        # self.machine.events.add_handler('init_phase_3', self._init2)
        self.machine.events.add_handler('loading_assets',
                                        self._asset_load_change)
        self.machine.events.add_handler('bcp_connection_attempt',
                                        self._bcp_connection_attempt)
        self.machine.events.add_handler('asset_loading_complete',
                                        self._asset_load_complete)
        self.machine.events.add_handler('bcp_clients_connected',
                                        self._bcp_connected)
        self.machine.events.add_handler('shutdown', self.stop)
        self.machine.add_crash_handler(self.stop)
        self.machine.events.add_handler('player_number', self._update_player)
        self.machine.events.add_handler('player_ball', self._update_player)
        self.machine.events.add_handler('player_score', self._update_player)
        self.machine.events.add_handler('ball_ended', self._update_player)

        self._pending_bcp_connection = False
        self._asset_percent = 0
        self._bcp_status = (0, 0, 0)  # type: Tuple[float, int, int]
        self.switch_widgets = []  # type: List[Widget]
        self.mode_widgets = []  # type: List[Widget]
        self.ball_device_widgets = []  # type: List[Widget]
        self._machine_widgets = []  # type: List[Widget]
        self._player_widgets = []  # type: List[Widget]
        self.footer_memory = None
        self.footer_cpu = None
        self.footer_mc_cpu = None
        self.footer_uptime = None
        self._layout_change = True

        self._tick_task = self.machine.clock.schedule_interval(self._tick, 1)
        self._create_window()
        self._draw_screen()

    def _init(self, **kwargs):
        del kwargs
        for mode in self.machine.modes.values():
            self.machine.events.add_handler(
                "mode_{}_started".format(mode.name), self._mode_change)
            self.machine.events.add_handler(
                "mode_{}_stopped".format(mode.name), self._mode_change)

        self.machine.switch_controller.add_monitor(self._update_switches)
        self.machine.register_monitor("machine_vars",
                                      self._update_machine_vars)
        self.machine.variables.machine_var_monitor = True
        self.machine.bcp.interface.register_command_callback(
            "status_report", self._bcp_status_report)

        for bd in [
                x for x in self.machine.ball_devices.values()
                if not x.is_playfield()
        ]:
            self.ball_devices.append(bd)

        self.ball_devices.sort()

        self._update_switch_layout()
        self._schedule_draw_screen()

    async def _bcp_status_report(self, client, cpu, rss, vms):
        del client
        self._bcp_status = cpu, rss, vms

    def _update_stats(self):
        # Runtime
        rt = (datetime.now() - self.start_time)
        mins, sec = divmod(rt.seconds + rt.days * 86400, 60)
        hours, mins = divmod(mins, 60)
        self.footer_uptime.text = 'RUNNING {:d}:{:02d}:{:02d}'.format(
            hours, mins, sec)

        # System Stats
        self.footer_memory.text = 'Free Memory (MB): {} CPU:{:3d}%'.format(
            round(virtual_memory().available / 1048576),
            round(cpu_percent(interval=None, percpu=False)))

        # MPF process stats
        self.footer_cpu.text = 'MPF (CPU RSS/VMS): {}% {}/{} MB    '.format(
            round(self.mpf_process.cpu_percent()),
            round(self.mpf_process.memory_info().rss / 1048576),
            round(self.mpf_process.memory_info().vms / 1048576))

        # MC process stats
        if self._bcp_status != (0, 0, 0):
            self.footer_mc_cpu.text = 'MC (CPU RSS/VMS) {}% {}/{} MB '.format(
                round(self._bcp_status[0]),
                round(self._bcp_status[1] / 1048576),
                round(self._bcp_status[2] / 1048576))
        else:
            self.footer_mc_cpu.text = ""

    def _update_switch_layout(self):
        num = 0
        self.switch_widgets = []
        self.switches = {}
        self.switch_widgets.append((Label("SWITCHES"), 1))
        self.switch_widgets.append((Divider(), 1))
        self.switch_widgets.append((Label(""), 2))
        self.switch_widgets.append((Divider(), 2))

        for sw in sorted(self.machine.switches.values()):
            if sw.invert:
                name = sw.name + '*'
            else:
                name = sw.name

            col = 1 if num <= int(len(self.machine.switches) / 2) else 2

            switch_widget = Label(name)
            if sw.state:
                switch_widget.custom_colour = "active_switch"

            self.switch_widgets.append((switch_widget, col))
            self.switches[sw.name] = (sw, switch_widget)

            num += 1

        self._schedule_draw_screen()

    def _update_switches(self, change, *args, **kwargs):
        del args
        del kwargs
        try:
            sw, switch_widget = self.switches[change.name]
        except KeyError:
            return
        if sw.state:
            switch_widget.custom_colour = "active_switch"
        else:
            switch_widget.custom_colour = "label"

        self._schedule_draw_screen()

    def _draw_switches(self):
        """Draw all switches."""
        for widget, column in self.switch_widgets:
            self.layout.add_widget(widget, column)

    def _mode_change(self, *args, **kwargs):
        # Have to call this on the next frame since the mode controller's
        # active list isn't updated yet
        del args
        del kwargs
        self.mode_widgets = []
        self.mode_widgets.append(Label("ACTIVE MODES"))
        self.mode_widgets.append(Divider())
        try:
            modes = self.machine.mode_controller.active_modes
        except AttributeError:
            modes = None

        if modes:
            for mode in modes:
                self.mode_widgets.append(
                    Label('{} ({})'.format(mode.name, mode.priority)))
        else:
            self.mode_widgets.append(Label("No active modes"))

        # empty line at the end
        self.mode_widgets.append(Label(""))

        self._layout_change = True
        self._schedule_draw_screen()

    def _draw_modes(self):
        for widget in self.mode_widgets:
            self.layout.add_widget(widget, 0)

    def _draw_ball_devices(self):
        for widget in self.ball_device_widgets:
            self.layout.add_widget(widget, 3)

    def _update_ball_devices(self, **kwargs):
        del kwargs
        # TODO: do not create widgets. just update contents
        self.ball_device_widgets = []
        self.ball_device_widgets.append(Label("BALL COUNTS"))
        self.ball_device_widgets.append(Divider())

        try:
            for pf in self.machine.playfields.values():
                widget = Label('{}: {} '.format(pf.name, pf.balls))
                if pf.balls:
                    widget.custom_colour = "pf_active"
                else:
                    widget.custom_colour = "pf_inactive"
                self.ball_device_widgets.append(widget)

        except AttributeError:
            pass

        for bd in self.ball_devices:
            widget = Label('{}: {} ({})'.format(bd.name, bd.balls, bd.state))
            if bd.balls:
                widget.custom_colour = "pf_active"
            else:
                widget.custom_colour = "pf_inactive"

            self.ball_device_widgets.append(widget)

        self.ball_device_widgets.append(Label(""))

        self._layout_change = True
        self._schedule_draw_screen()

    def _update_player(self, **kwargs):
        del kwargs
        self._player_widgets = []
        self._player_widgets.append(Label("CURRENT PLAYER"))
        self._player_widgets.append(Divider())

        try:
            player = self.machine.game.player
            self._player_widgets.append(
                Label('PLAYER: {}'.format(player.number)))
            self._player_widgets.append(Label('BALL: {}'.format(player.ball)))
            self._player_widgets.append(
                Label('SCORE: {:,}'.format(player.score)))
        except AttributeError:
            self._player_widgets.append(Label("NO GAME IN PROGRESS"))
            return

        player_vars = player.vars.copy()
        player_vars.pop('score', None)
        player_vars.pop('number', None)
        player_vars.pop('ball', None)

        names = self.config.get('player_vars', player_vars.keys())
        for name in names:
            self._player_widgets.append(
                Label("{}: {}".format(name, player_vars[name])))

        self._layout_change = True
        self._schedule_draw_screen()

    def _draw_player(self, **kwargs):
        del kwargs
        for widget in self._player_widgets:
            self.layout.add_widget(widget, 3)

    def _update_machine_vars(self, **kwargs):
        """Update machine vars."""
        del kwargs
        self._machine_widgets = []
        self._machine_widgets.append(Label("MACHINE VARIABLES"))
        self._machine_widgets.append(Divider())
        machine_vars = self.machine.variables.machine_vars
        # If config defines explict vars to show, only show those. Otherwise, all
        names = self.config.get('machine_vars', machine_vars.keys())
        for name in names:
            self._machine_widgets.append(
                Label("{}: {}".format(name, machine_vars[name]['value'])))
        self._layout_change = True
        self._schedule_draw_screen()

    def _draw_machine_variables(self):
        """Draw machine vars."""
        for widget in self._machine_widgets:
            self.layout.add_widget(widget, 0)

    def _create_window(self):
        self.screen = Screen.open()
        self.frame = Frame(self.screen,
                           self.screen.height,
                           self.screen.width,
                           has_border=False,
                           title="Test")
        self.frame.set_theme("mpf_theme")

        title_layout = Layout([1, 5, 1])
        self.frame.add_layout(title_layout)

        title_left = Label("")
        title_left.custom_colour = "title"
        title_layout.add_widget(title_left, 0)

        title = 'Mission Pinball Framework v{}'.format(
            mpf._version.__version__)  # noqa
        title_text = Label(title, align="^")
        title_text.custom_colour = "title"
        title_layout.add_widget(title_text, 1)

        exit_label = Label("< CTRL + C > TO EXIT", align=">")
        exit_label.custom_colour = "title_exit"

        title_layout.add_widget(exit_label, 2)

        self.layout = MpfLayout([1, 1, 1, 1], fill_frame=True)
        self.frame.add_layout(self.layout)

        footer_layout = Layout([1, 1, 1])
        self.frame.add_layout(footer_layout)
        self.footer_memory = Label("", align=">")
        self.footer_memory.custom_colour = "footer_memory"
        self.footer_uptime = Label("", align=">")
        self.footer_uptime.custom_colour = "footer_memory"
        self.footer_mc_cpu = Label("")
        self.footer_mc_cpu.custom_colour = "footer_mc_cpu"
        self.footer_cpu = Label("")
        self.footer_cpu.custom_colour = "footer_cpu"
        footer_path = Label(self.machine.machine_path)
        footer_path.custom_colour = "footer_path"
        footer_empty = Label("")
        footer_empty.custom_colour = "footer_memory"

        footer_layout.add_widget(footer_path, 0)
        footer_layout.add_widget(self.footer_cpu, 0)
        footer_layout.add_widget(footer_empty, 1)
        footer_layout.add_widget(self.footer_mc_cpu, 1)
        footer_layout.add_widget(self.footer_uptime, 2)
        footer_layout.add_widget(self.footer_memory, 2)

        self.scene = Scene([self.frame], -1)
        self.screen.set_scenes([self.scene], start_scene=self.scene)

        # prevent main from scrolling out the footer
        self.layout.set_max_height(self.screen.height - 2)

    def _schedule_draw_screen(self):
        # schedule the draw in 10ms if it is not scheduled
        self.delay.add_if_doesnt_exist(10, self._draw_screen, "draw_screen")

    def _draw_screen(self):
        if not self.screen:
            # probably drawing during game end
            return

        if self._layout_change:
            self.layout.clear_columns()
            self._draw_modes()
            self._draw_machine_variables()
            self._draw_switches()
            self._draw_ball_devices()
            self._draw_player()
            self.frame.fix()
            self._layout_change = False

        self.screen.force_update()
        self.screen.draw_next_frame()

    def _tick(self):
        if self.screen.has_resized():
            self._create_window()

        self._update_ball_devices()
        self._update_stats()

        self._schedule_draw_screen()

        self.machine.bcp.transport.send_to_clients_with_handler(
            handler="_status_request", bcp_command="status_request")

    def _bcp_connection_attempt(self, name, host, port, **kwargs):
        del name
        del kwargs
        self._pending_bcp_connection = PopUpDialog(
            self.screen,
            'WAITING FOR MEDIA CONTROLLER {}:{}'.format(host, port), [])
        self.scene.add_effect(self._pending_bcp_connection)
        self._schedule_draw_screen()

    def _bcp_connected(self, **kwargs):
        del kwargs
        self.scene.remove_effect(self._pending_bcp_connection)
        self._schedule_draw_screen()

    def _asset_load_change(self, percent, **kwargs):
        del kwargs
        if self._asset_percent:
            self.scene.remove_effect(self._asset_percent)
        self._asset_percent = PopUpDialog(
            self.screen, 'LOADING ASSETS: {}%'.format(percent), [])
        self.scene.add_effect(self._asset_percent)
        self._schedule_draw_screen()

    def _asset_load_complete(self, **kwargs):
        del kwargs
        self.scene.remove_effect(self._asset_percent)
        self._schedule_draw_screen()

    def stop(self, **kwargs):
        """Stop the Text UI and restore the original console screen."""
        del kwargs
        if self.screen:
            self.machine.clock.unschedule(self._tick_task)
            self.screen.close(True)
            self.screen = None
コード例 #36
0
def memory_info_attrs(proc: psutil.Process):
    minfo = proc.memory_info()
    return {k: getattr(minfo, k) for k in dir(minfo) if (not k.startswith('_') and not callable(getattr(minfo, k)))}