def test_infinite_loop(self):
        # Given
        user_answer = ("#!/bin/bash\nwhile [ 1 ] ;"
            " do echo "" > /dev/null ; done")
        kwargs = {
                  'metadata': {
                    'user_answer': user_answer,
                    'file_paths': self.file_paths,
                    'partial_grading': False,
                    'language': 'bash'
                    },
                    'test_case_data': self.test_case_data,
                  }

        # When
        grader = Grader(self.in_dir)
        result = grader.evaluate(kwargs)

        # Then
        self.assertFalse(result.get("success"))
        self.assert_correct_output(self.timeout_msg,
                                   result.get("error")[0]["message"]
                                   )
        parent_proc = Process(os.getpid()).children()
        if parent_proc:
            children_procs = Process(parent_proc[0].pid)
            self.assertFalse(any(children_procs.children(recursive=True)))
Ejemplo n.º 2
0
def ppid_cascade(process=None):
    if process is None:
        process = Process()
    process = process.parent()
    while process:
        yield process.pid
        process = process.parent()
Ejemplo n.º 3
0
def get_procinfo_by_address(ip_src, ip_dst, port_src=None, port_dst=None):
	"""
	Gets Infos about the Prozess associated with the given address information
	Both port_src and port_dst must be not None or None at the same time.

	return -- [pid, "/path/to/command", "user", "hash"] or []
	"""
	result = []

	if port_src is not None:
		pids = [c.pid for c in net_connections()
			if len(c.raddr) != 0 and c.pid is not None and
				(c.laddr[0], c.raddr[0], c.laddr[1], c.raddr[1]) == (ip_src, ip_dst, port_src, port_dst)]
	else:
		pids = [c.pid for c in net_connections()
			if len(c.raddr) != 0 and c.pid is not None and (c.laddr[0], c.raddr[0]) == (ip_src, ip_dst)]

	try:
		if len(pids) > 1:
			logger.warning("more than 1 matching process: %r", pids)
		proc = Process(pids[0])
		cmd = [pids[0], proc.cmdline(), proc.username()]
		hash_input = "%d%s%s" % (cmd[0], cmd[1], cmd[2])
		procinfo_hash = hashlib.sha256(hash_input.encode("UTF-8"))
		cmd.append(procinfo_hash)
		logger.debug("process info: %r", cmd)
		return cmd
	except IndexError:
		pass
	return []
Ejemplo n.º 4
0
   def trace_memory_usage(self, frame, event, arg):
      """Callback for sys.settrace

      Args:
         frame: frame is the current stack frame
         event: event is a string: 'call', 'line', 'return', 'exception', 'c_call', 'c_return', or 'c_exception'
         arg: arg depends on the event type.

      Returns:
         function: wrap_func
      """
      if event in ('call', 'line', 'return') and frame.f_code in self.code_map:
         if event != 'call':
            # "call" event just saves the lineno but not the memory
            process = Process(getpid())
            mem = process.memory_info()[0] / float(2 ** 20)
            # if there is already a measurement for that line get the max
            old_mem = self.code_map[frame.f_code].get(self.prevline, 0)
            self.code_map[frame.f_code][self.prevline] = max(mem, old_mem)
         self.prevline = frame.f_lineno

      if self._original_trace_function is not None:
         self._original_trace_function(frame, event, arg)

      return self.trace_memory_usage
Ejemplo n.º 5
0
 def __enter__(self):
     cput = P.cpu_times()
     memi = P.memory_info_ex()
     self.start_sys  = cput.system
     self.start_user = cput.user
     self.start_rss  = memi.rss
     self.start_disk = get_wdb() + get_ldb()
Ejemplo n.º 6
0
def click(x, notify=False, pid=None, pids=None, webdriver=None, window_name=None, debug=False):

    if debug:
        print("[beryl] starting click")
        print("\tpid: " + str(pid))
        print("\twebdriver: " + str(webdriver))
        print("\twindow_name: " + str(window_name))
        print("\tstr(type(webdriver)): " + str(type(webdriver)))

    type_as_string = str(type(x))

    webdriver_type_as_string = str(type(webdriver))
    if webdriver_type_as_string == "<class 'selenium.webdriver.firefox.webdriver.WebDriver'>":
        pids = [webdriver.binary.process.pid]
    elif webdriver_type_as_string == "<class 'selenium.webdriver.chrome.webdriver.WebDriver'>":
        process = Process(webdriver.service.process.pid)
        if hasattr(process, "children"):
            pids = [p.pid for p in process.children()]
        elif hasattr(process, "get_children"):
            pids = [p.pid for p in process.get_children()]


    if isinstance(x, str) or isinstance(x, unicode):
        if x.endswith(".png") or x.endswith(".jpg"):
            click_image(x, notify=notify)
        else:
            click_text(x, notify=notify, pids=pids, window_name=window_name, debug=debug)
    elif isinstance(x, PngImageFile):
        click_image(x,notify=notify)
    elif isinstance(x, tuple):
        click_location(x,notify=notify)
Ejemplo n.º 7
0
    def end_broker_process(self):
        try:
            broker_process = Process(self.broker.pid)
        except NoSuchProcess:
            return # was killed
        # get stdout and stderr
        select_config = [self.broker.stdout, self.broker.stderr], [], [], 0.1
        stdout, stderr = [], []
        result = select.select(*select_config)
        while any(result):
            if result[0]:
                stdout.append(result[0][0].readline())
            if result[1]:
                stderr.append(result[1][0].readline())
            result = select.select(*select_config)
        if stdout and DEBUG_STDOUT:
            _print_debug('STDOUT', ''.join(stdout))
        if stderr and DEBUG_STDERR:
            _print_debug('STDERR', ''.join(stderr))

        # kill main process and its children
        children = [process.pid for process in broker_process.get_children()]
        _kill(self.broker.pid, timeout=TIMEOUT / 1000.0)
        for child_pid in children:
            _kill(child_pid, timeout=TIMEOUT / 1000.0)
Ejemplo n.º 8
0
    def test_infinite_loop(self):
        # Given
        user_answer = dedent("""
        #include<stdio.h>
        int main(void){
        while(0==0){
        printf("abc");}
        }""")
        kwargs = {
                  'metadata': {
                    'user_answer': user_answer,
                    'file_paths': self.file_paths,
                    'partial_grading': False,
                    'language': 'cpp'
                    }, 'test_case_data': self.test_case_data,
                  }

        # When
        grader = Grader(self.in_dir)
        result = grader.evaluate(kwargs)

        # Then
        self.assertFalse(result.get("success"))
        self.assert_correct_output(self.timeout_msg,
                                   result.get("error")[0]["message"]
                                   )
        parent_proc = Process(os.getpid()).children()
        if parent_proc:
            children_procs = Process(parent_proc[0].pid)
            self.assertFalse(any(children_procs.children(recursive=True)))
Ejemplo n.º 9
0
def main(argv):
    p = Popen(argv[1:])
    
    try:
        client = ExitCodeClient()
        try:
            proc = Process(pid=p.pid)
            while p.poll() is None:
                total_cpu_percent = proc.get_cpu_percent(interval=0)
                for child_proc in proc.get_children(recursive=True):
                    total_cpu_percent += child_proc.get_cpu_percent(interval=0)
                client.send_status(total_cpu_percent)
                time.sleep(0.1) # recommended waiting period from psutil docs
        except NoSuchProcess:
            pass
    except KeyboardInterrupt:
        try:
            p.terminate()
        except OSError:
            pass
    
    client.send_status(LEDMode.Success if p.returncode == 0 else LEDMode.Error)
    time.sleep(0.5) # Give server time to read value before connection closes
    client.shutdown()
    
    return p.returncode
Ejemplo n.º 10
0
    def test_cleanup_children_on_terminate(self):
        """
        Subprocesses spawned by tasks should be terminated on terminate
        """
        class HangingSubprocessTask(luigi.Task):
            def run(self):
                python = sys.executable
                check_call([python, '-c', 'while True: pass'])

        task = HangingSubprocessTask()
        queue = mock.Mock()
        worker_id = 1

        task_process = TaskProcess(task, worker_id, queue, lambda: None, lambda: None)
        task_process.start()

        parent = Process(task_process.pid)
        while not parent.children():
            # wait for child process to startup
            sleep(0.01)

        [child] = parent.children()
        task_process.terminate()
        child.wait(timeout=1.0)  # wait for terminate to complete

        self.assertFalse(parent.is_running())
        self.assertFalse(child.is_running())
Ejemplo n.º 11
0
    def test_infinite_loop(self):
        # Given
        user_answer = ("class Test {\n\tint square_num(int a)"
                       " {\n\t\twhile(0==0){\n\t\t}\n\t}\n}")
        kwargs = {
                  'metadata': {
                    'user_answer': user_answer,
                    'file_paths': self.file_paths,
                    'partial_grading': False,
                    'language': 'java'
                    }, 'test_case_data': self.test_case_data,
                  }

        # When
        grader = Grader(self.in_dir)
        result = grader.evaluate(kwargs)

        # Then
        self.assertFalse(result.get("success"))
        self.assert_correct_output(self.timeout_msg,
                                   result.get("error")[0]["message"]
                                   )
        parent_proc = Process(os.getpid()).children()
        if parent_proc:
            children_procs = Process(parent_proc[0].pid)
            self.assertFalse(any(children_procs.children(recursive=True)))
Ejemplo n.º 12
0
def get_soledad_server_pid():
    output = check_output(['pidof', 'python'])
    for pid in output.split():
        proc = Process(int(pid))
        cmdline = proc.cmdline()
        if args.issubset(set(cmdline)):
            return int(pid)
Ejemplo n.º 13
0
 def done(self):
     try:
         myself = Process(os.getpid())
         for child in myself.get_children():
             child.kill()
     except Exception as e:
         sys.stderr.write(str(e) + '\n')
Ejemplo n.º 14
0
 def worker(self, obj):
     if self.active_workers:
         for node, active_workers in self.active_workers.iteritems():
             for worker in active_workers:
                 if worker['id'] == obj.task_id:
                     p = Process(worker['worker_pid'])
                     return 'CPU:%.1f%% RAM:%.2f%%' % (p.cpu_percent(0.05), p.memory_percent())
     return 'N/a'
Ejemplo n.º 15
0
def _get_shell_pid():
    """Returns parent process pid."""
    proc = Process(os.getpid())

    try:
        return proc.parent().pid
    except TypeError:
        return proc.parent.pid
Ejemplo n.º 16
0
    def test_infinite_loop(self):
        # Given
        user_answer = dedent("""\
                             #include<stdio.h>
                             int main(void){
                             while(0==0){
                             printf("abc");}
                             }""")

        hook_code = dedent("""\
                            def check_answer(user_answer):
                                with open("Test.c", "w+") as f:
                                    f.write(user_answer)
                                import subprocess
                                success = False
                                err = "Incorrect Answer"
                                mark_fraction = 0.0
                                def _run_command(cmd):
                                    proc = subprocess.Popen(
                                        "{}".format(cmd), shell=True,
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.PIPE
                                    )
                                    stdout,stderr = proc.communicate()
                                    return stdout,stderr
                                cmds = ["gcc Test.c", "./a.out"]
                                for cmd in cmds:
                                    stdout, stderr = _run_command(cmd)
                                if stdout.decode("utf-8") == "Hello, world!":
                                    success, err, mark_fraction = True, "", 1.0
                                return success, err, mark_fraction
                            """)

        test_case_data = [{"test_case_type": "hooktestcase",
                           "hook_code": hook_code, "weight": 1.0}]

        kwargs = {
                  'metadata': {
                    'user_answer': user_answer,
                    'file_paths': self.file_paths,
                    'partial_grading': False,
                    'language': 'cpp'
                    }, 'test_case_data': test_case_data,
                  }

        # When
        grader = Grader(self.in_dir)
        result = grader.evaluate(kwargs)

        # Then
        self.assertFalse(result.get('success'))
        self.assert_correct_output(self.timeout_msg,
                                   result.get("error")[0]["message"]
                                   )
        parent_proc = Process(os.getpid()).children()
        if parent_proc:
            children_procs = Process(parent_proc[0].pid)
            self.assertFalse(any(children_procs.children(recursive=True)))
Ejemplo n.º 17
0
 def test_valid_is_running(self):
     p = Process()
     _, f = mkstemp()
     try:
         with open(f, 'w') as pid_file:
             pid_file.write('{0} {1:6f}'.format(p.pid, p.create_time()))
         self.assertTrue(_is_running(f))
     finally:
         unlink(f)
Ejemplo n.º 18
0
 def run(self):
     self._isRunning = True
     process = Process(os.getpid())
     while self._isRunning:
         self.cpuUsage = process.get_cpu_percent(self._pollInterval)
         self.memUsage = process.get_memory_info()[0]
         if self.delegate is not None:
             self.delegate.processCpuUsage(self.cpuUsage)
             self.delegate.processMemUsage(self.memUsage)
Ejemplo n.º 19
0
 def test_valid_is_running(self):
     p = Process()
     d = mkdtemp()
     f = _get_pid_filename(d)
     try:
         with open(f, 'w') as pid_file:
             pid_file.write('{0} {1:6f}'.format(p.pid, p.create_time()))
         self.assertTrue(_is_running(d))
     finally:
         rmtree(d)
Ejemplo n.º 20
0
def _set_running(filename):
    """Write the current process information to disk.
    :param filename: The name of the file where the process information will be written.
    """
    if isfile(str(filename)):
        raise PidFileExistsError()

    p = Process()
    with open(filename, 'w') as f:
        f.write('{0} {1:.6f}'.format(p.pid, p.create_time()))
Ejemplo n.º 21
0
    def _periodically_log_statistics(self):
        statistics = self._dispersy.statistics
        process = Process(getpid()) if Process else None

        while True:
            statistics.update()

            # CPU
            if cpu_percent:
                self.log("scenario-cpu", percentage=cpu_percent(interval=0, percpu=True))

            # memory
            if process:
                rss, vms = process.get_memory_info()
                self.log("scenario-memory", rss=rss, vms=vms)

            # bandwidth
            self.log(
                "scenario-bandwidth",
                up=self._dispersy.endpoint.total_up,
                down=self._dispersy.endpoint.total_down,
                drop_count=self._dispersy.statistics.drop_count,
                delay_count=statistics.delay_count,
                delay_send=statistics.delay_send,
                delay_success=statistics.delay_success,
                delay_timeout=statistics.delay_timeout,
                success_count=statistics.success_count,
                received_count=statistics.received_count,
            )

            # dispersy statistics
            self.log(
                "scenario-connection",
                connection_type=statistics.connection_type,
                lan_address=statistics.lan_address,
                wan_address=statistics.wan_address,
            )

            # communities
            for community in statistics.communities:
                self.log(
                    "scenario-community",
                    hex_cid=community.hex_cid,
                    classification=community.classification,
                    global_time=community.global_time,
                    sync_bloom_new=community.sync_bloom_new,
                    sync_bloom_reuse=community.sync_bloom_reuse,
                    candidates=[
                        dict(zip(["lan_address", "wan_address", "global_time"], tup)) for tup in community.candidates
                    ],
                )

            # wait
            yield self.enable_statistics
Ejemplo n.º 22
0
def testFDTDServiceOpenFiles():
    """
    #41 - Too many open files (fdtd side)

    """
    hostName = os.uname()[1]
    f = getTempFile(functionalFDTDConfiguration)
    inputOption = "--config=%s" % f.name
    conf = ConfigFDTD(inputOption.split())
    conf.sanitize()
    testName = inspect.stack()[0][3]
    logger = Logger(name=testName, logFile="/tmp/fdtdtest-%s.log" % testName, level=logging.DEBUG)
    apMon = None
    fdtd = FDTD(conf, apMon, logger)

    proc = Process(os.getpid())
    initStateNumOpenFiles = len(proc.get_open_files())

    for testAction in [TestAction("fakeSrc", "fakeDst") for i in range(3)]:
        r = fdtd.service.service(testAction)
        logger.debug("Result: %s" % r)
        assert r.status == 0

    # after TestAction, there should not be left behind any open files
    numOpenFilesNow = len(proc.get_open_files())
    assert initStateNumOpenFiles == numOpenFilesNow

    # test on ReceivingServerAction - it's action after which the
    # separate logger is not closed, test the number of open files went +1,
    # send CleanupProcessesAction and shall again remain
    # initStateNumOpenFiles send appropriate TestAction first (like in real)
    serverId = "server-id"
    testAction = TestAction(hostName, hostName)
    testAction.id = serverId
    r = fdtd.service.service(testAction)
    assert r.status == 0
    options = dict(gridUserDest="someuserDest", clientIP=os.uname()[1], destFiles=[])
    recvServerAction = ReceivingServerAction(testAction.id, options)
    r = fdtd.service.service(recvServerAction)
    print(r.msg)
    assert r.status == 0
    numOpenFilesNow = len(proc.get_open_files())
    # there should be only 1 extra opened file now
    assert initStateNumOpenFiles == numOpenFilesNow - 1
    cleanupAction = CleanupProcessesAction(serverId, timeout=2)
    r = fdtd.service.service(cleanupAction)
    print(r.msg)
    assert r.status == 0
    numOpenFilesNow = len(proc.get_open_files())
    assert initStateNumOpenFiles == numOpenFilesNow

    fdtd.shutdown()
    fdtd.pyroDaemon.closedown()
    logger.close()
Ejemplo n.º 23
0
def _set_running(directory):
    """Write the current process information to disk.
    :param directory: The name of the directory where the process information will be written.
    """
    filename = _get_pid_filename(directory)
    if isfile(str(filename)):
        raise PidFileExistsError

    p = Process()
    with open(filename, 'w') as f:
        f.write('{0} {1:.6f}'.format(p.pid, p.create_time()))
Ejemplo n.º 24
0
def _psutil_kill_pid(pid):
    """
    http://stackoverflow.com/questions/1230669/subprocess-deleting-child-processes-in-windows
    """
    try:
        parent = Process(pid)
        for child in parent.children(recursive=True):
            child.kill()
        parent.kill()
    except NoSuchProcess:
        return
Ejemplo n.º 25
0
def common_option(pid):
    ''' common options to be used in both restore and dump '''

    options = list()
    proc = Process(pid)
    if proc.terminal():
        options.append("--shell-job")
    if proc.connections(kind="inet"):
        options.append("--tcp-established")

    return options
Ejemplo n.º 26
0
def update_process_list():
    global PS
    plist = []
    for pid in psutil.get_pid_list():
        try:
            p = Process(pid)
            plist.append((p,p.get_cpu_percent(),p.get_memory_percent())) 
        except psutil.NoSuchProcess:
            continue
    # copy plist to PS
    PS = sorted(plist[:], key=lambda e: e[1] if e[1] > e[2] else e[2], reverse=True)[:25]
Ejemplo n.º 27
0
 def test_running_valid_file(self):
     d = mkdtemp()
     f = realpath(join(d, '.pid'))
     try:
         _set_running(f)
         with open(f, 'r') as pid_file:
             process_info = pid_file.read().split()
         p = Process()
         self.assertEquals(p.pid, int(process_info[0]))
         self.assertEquals(p.create_time(), float(process_info[1]))
     finally:
         shutil.rmtree(d)
Ejemplo n.º 28
0
def getOpenFilesList(offset=4):
    """
    Returns all currently open files.
    Problem: #41 - Too many open files (fdtd side)
    """
    myPid = os.getpid()
    proc = Process(myPid)
    files = proc.get_open_files()
    filesStr = "\n".join(["%s%s (fd=%s)" % (offset * ' ', f.path, f.fd)
                          for f in files])
    numFiles = len(files)
    return numFiles, filesStr
Ejemplo n.º 29
0
def instant_process_statistics(pid):
    """ Return the instant jiffies and memory values for the process identified by pid. """
    work = memory = 0
    try:
        proc = Process(pid)
        for p in [proc] + proc.children(recursive=True):
            work += sum(p.cpu_times())
            memory += p.memory_percent()
    except (NoSuchProcess, ValueError):
        # process may have disappeared in the interval
        pass
    # take into account the number of processes for the process work 
    return work, memory
Ejemplo n.º 30
0
def get_memory(pid):
    # return the memory usage in MB, psutil should be 4.0 version
    from psutil import Process, __version__

    # if __version__ < '4.0.0':
    #     raise Exception('psutil module should be 4.0.0 version at least.')

    if pid_exists(pid):
        process = Process(pid)
        # mem = process.memory_full_info().uss / float(1024*1024)
        mem = process.memory_info().rss / float(1024*1024)
        return mem
    return 0
Ejemplo n.º 31
0
def get_stats():
    #lazy check stats
    stats = {}
    try:
        for coll in (defaultdb["reportscoll"], defaultdb["filescoll"],
                     "fs.chunks", "fs.files"):
            if coll in client[defaultdb["dbname"]].list_collection_names():
                stats.update({"[{}] Collection".format(coll): "Exists"})
            else:
                stats.update(
                    {"[{}] Collection".format(coll): "Does not exists"})
    except:
        pass
    try:
        stats.update({
            "[Reports] Total reports":
            client[defaultdb["dbname"]][defaultdb["reportscoll"]].find(
                {}).count(),
            "[Reports] Total used space":
            "{}".format(
                convert_size(client[defaultdb["dbname"]].command(
                    "collstats", defaultdb["reportscoll"])["storageSize"] +
                             client[defaultdb["dbname"]].command(
                                 "collstats",
                                 defaultdb["reportscoll"])["totalIndexSize"]))
        })
    except:
        pass
    try:
        stats.update({
            "[Files] Total files uploaded":
            client[defaultdb["dbname"]][defaultdb["filescoll"]].find(
                {}).count()
        })
    except:
        pass
    try:
        stats.update({
            "[Files] Total uploaded files size":
            "{}".format(
                convert_size(
                    client[defaultdb["dbname"]]["fs.chunks"].find().count() *
                    255 * 1000))
        })
    except:
        pass
    try:
        stats.update({
            "[Users] Total users":
            client[defaultdb["dbname"]][defaultdb["userscoll"]].find(
                {}).count()
        })
    except:
        pass
    try:
        total, used, free = disk_usage("/")
        stats.update({
            "CPU memory":
            cpu_percent(),
            "Memory used":
            virtual_memory()[2],
            "Current process used memory":
            "{}".format(convert_size(Process(getpid()).memory_info().rss)),
            "Total disk size":
            "{}".format(convert_size(total)),
            "Used disk size":
            "{}".format(convert_size(used)),
            "Free disk size":
            "{}".format(convert_size(free)),
            "Host platform":
            pplatform()
        })
    except:
        pass

    client.close()
    return stats
Ejemplo n.º 32
0
def _get_shell():
    try:
        shell = Process(os.getpid()).parent().cmdline()[0]
    except TypeError:
        shell = Process(os.getpid()).parent.cmdline[0]
    return shells[shell]
Ejemplo n.º 33
0
class Monitor:
    def __init__(self, pid: int | None = None):
        """
        Parameters
        ----------
        pid : int | None = None
            The process id to monitor, defaults to current process
        """
        self.process = Process(pid)

    def memory(self, units: str = "B", *, kind: str = "vms") -> float:
        """Get the memory consumption

        Parameters
        ----------
        units : "B" | "KB" | "MB" | "GB" = "B"
            Units to measure in

        kind : "vms" | "rss" = "vms"
            The kind of memory to measure.
            https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info

        Returns
        -------
        float
            The memory used
        """
        mem = self.process.memory_info()
        if not hasattr(mem, kind):
            raise ValueError(f"No memory kind {kind}, use one from {mem}")

        usage = getattr(mem, kind)
        return memconvert(usage, frm="B", to=units)

    def memlimit(self, units: str = "B") -> tuple[float, float] | None:
        """

        We can't limit using resource.setrlimit as it seems that None of the
        RLIMIT_X's are available. This we debugged by using
        `import psutil; print(dir(psutil))` in which a MAC system did not have
        any `RLIMIT_X` attributes while a Linux system did.

        Parameters
        ----------
        units : "B" | "KB" | "MB" | "GB" = "B"
            Units to measure in

        Returns
        -------
        float | None
            The memory limit.
            Returns None if it can't be gotten
        """
        if hasattr(psutil, "RLIMIT_AS"):
            limits = self.process.rlimit(psutil.RLIMIT_AS)
            if units != "B":
                limits = tuple(
                    memconvert(x, frm="B", to=units) for x in limits)
            return limits
        else:
            return None
Ejemplo n.º 34
0
    def __init__(
        self,
        proc: psutil.Process,
        parsed_command: ParsedChiaPlotsCreateCommand,
        logroot: str,
    ) -> None:
        '''Initialize from an existing psutil.Process object.  must know logroot in order to understand open files'''
        self.proc = proc
        # These are dynamic, cached, and need to be udpated periodically
        self.phase = Phase(known=False)

        self.help = parsed_command.help
        self.args = parsed_command.parameters

        # an example as of 1.0.5
        # {
        #     'size': 32,
        #     'num_threads': 4,
        #     'buckets': 128,
        #     'buffer': 6000,
        #     'tmp_dir': '/farm/yards/901',
        #     'final_dir': '/farm/wagons/801',
        #     'override_k': False,
        #     'num': 1,
        #     'alt_fingerprint': None,
        #     'pool_contract_address': None,
        #     'farmer_public_key': None,
        #     'pool_public_key': None,
        #     'tmp2_dir': None,
        #     'plotid': None,
        #     'memo': None,
        #     'nobitfield': False,
        #     'exclude_final_dir': False,
        # }
        # TODO: use the configured executable
        if proc.name().startswith("chia_plot"):  # MADMAX
            self.k = 32
            self.r = self.args['threads']  # type: ignore[assignment]
            self.u = self.args['buckets']  # type: ignore[assignment]
            self.v = self.args['buckets3']  # type: ignore[assignment]
            self.rmulti2 = self.args['rmulti2']  # type: ignore[assignment]
            self.b = 0
            self.n = self.args['count']  # type: ignore[assignment]
            self.tmpdir = self.args['tmpdir']  # type: ignore[assignment]
            self.tmp2dir = self.args['tmpdir2']  # type: ignore[assignment]
            self.dstdir = self.args['finaldir']  # type: ignore[assignment]
        else:  # CHIA
            self.k = self.args['size']  # type: ignore[assignment]
            self.r = self.args['num_threads']  # type: ignore[assignment]
            self.u = self.args['buckets']  # type: ignore[assignment]
            self.b = self.args['buffer']  # type: ignore[assignment]
            self.n = self.args['num']  # type: ignore[assignment]
            self.tmpdir = self.args['tmp_dir']  # type: ignore[assignment]
            self.tmp2dir = self.args['tmp2_dir']  # type: ignore[assignment]
            self.dstdir = self.args['final_dir']  # type: ignore[assignment]

        plot_cwd: str = self.proc.cwd()
        self.tmpdir = os.path.join(plot_cwd, self.tmpdir)
        if self.tmp2dir is not None:
            self.tmp2dir = os.path.join(plot_cwd, self.tmp2dir)
        self.dstdir = os.path.join(plot_cwd, self.dstdir)

        # Find logfile (whatever file is open under the log root).  The
        # file may be open more than once, e.g. for STDOUT and STDERR.
        for f in self.proc.open_files():
            if logroot in f.path:
                if self.logfile:
                    assert self.logfile == f.path
                else:
                    self.logfile = f.path
                break

        if self.logfile:
            # Initialize data that needs to be loaded from the logfile
            self.init_from_logfile()
Ejemplo n.º 35
0
def get_stats():
    #lazy check stats
    stats = {}
    try:
        coll = "jobs"
        if coll in conn["jobsqueue"].list_collection_names():
            stats.update({"[{}] Collection".format(coll):"Exists"})
        else:
            stats.update({"[{}] Collection".format(coll):"Does not exists"})
    except:
        pass
    try:
        for coll in ("reports","files","fs.chunks","fs.files"):
            if coll in conn["webinterface"].list_collection_names():
                stats.update({"[{}] Collection".format(coll):"Exists"})
            else:
                stats.update({"[{}] Collection".format(coll):"Does not exists"})
    except:
        pass
    try:
        db = "jobsqueue" 
        col = "jobs"
        stats.update({  "[Queue] status":True if conn[db][col].find_one({'status': 'ON__'},{'_id': False}) else False,
                        "[Queue] Total jobs ": conn[db][col].find({"status" : {"$nin" : ["ON__","OFF_"]}}).count(),
                        "[Queue] Total finished jobs":conn[db][col].find({'status': 'done'}).count(),
                        "[Queue] Total waiting jobs":conn[db][col].find({'status': 'wait'}).count()})
                        
    #get total disk usage
    #"[Queue] Used space":"{} of {}".format(convert_size(conn[db].command("dbstats")["fsUsedSize"]),convert_size(conn[db].command("dbstats")["fsTotalSize"]))

    except:
        pass
    try:
        db = "webinterface" 
        col = "reports"
        stats.update({"[Reports] Total reports":conn[db][col].find({}).count(),
                      "[Reports] Total used space":"{}".format(convert_size(conn[db].command("collstats",col)["storageSize"] + conn[db].command("collstats",col)["totalIndexSize"]))})
    except:
        pass
    try:
        db = "webinterface" 
        col = "files"
        stats.update({"[Files] Total files uploaded":conn[db][col].find({}).count()})
    except:
        pass
    try:
        db = "webinterface" 
        col = "fs.chunks"
        stats.update({"[Files] Total uploaded files size":"{}".format(convert_size(conn[db][col].find().count() * 255 * 1000))})
    except:
        pass
    try:
        db = "webinterface" 
        col = "users"
        stats.update({"[Users] Total users":conn[db][col].find({}).count()})
    except:
        pass
    try:
        total, used, free = disk_usage("/")
        stats.update({"CPU memory":cpu_percent(),
                      "Memory used":virtual_memory()[2],
                      "Current process used memory": "{}".format(convert_size(Process(getpid()).memory_info().rss)),
                      "Total disk size": "{}".format(convert_size(total)),
                      "Used disk size": "{}".format(convert_size(used)),
                      "Free disk size": "{}".format(convert_size(free)),
                      "Host platform":pplatform()})
    except:
        pass

    conn.close()
    return stats
Ejemplo n.º 36
0
def get_info(process=None, interval=0, with_childs=False):
    """Return information about a process. (can be an pid or a Process object)

    If process is None, will return the information about the current process.
    """
    # XXX moce get_info to circus.process ?
    from circus.process import (get_children, get_memory_info, get_cpu_percent,
                                get_memory_percent, get_cpu_times, get_nice,
                                get_cmdline, get_create_time, get_username)

    if process is None or isinstance(process, int):
        if process is None:
            pid = os.getpid()
        else:
            pid = process

        if pid in _PROCS:
            process = _PROCS[pid]
        else:
            _PROCS[pid] = process = Process(pid)

    info = {}
    try:
        mem_info = get_memory_info(process)
        info['mem_info1'] = bytes2human(mem_info[0])
        info['mem_info2'] = bytes2human(mem_info[1])
    except AccessDenied:
        info['mem_info1'] = info['mem_info2'] = "N/A"

    try:
        info['cpu'] = get_cpu_percent(process, interval=interval)
    except AccessDenied:
        info['cpu'] = "N/A"

    try:
        info['mem'] = round(get_memory_percent(process), 3)
    except AccessDenied:
        info['mem'] = "N/A"

    try:
        cpu_times = get_cpu_times(process)
        ctime = timedelta(seconds=sum(cpu_times))
        ctime = "%s:%s.%s" % (
            ctime.seconds // 60 % 60, str(
                (ctime.seconds % 60)).zfill(2), str(ctime.microseconds)[:2])
    except AccessDenied:
        ctime = "N/A"

    info['ctime'] = ctime

    try:
        info['pid'] = process.pid
    except AccessDenied:
        info['pid'] = 'N/A'

    try:
        info['username'] = get_username(process)
    except AccessDenied:
        info['username'] = '******'

    try:
        info['nice'] = get_nice(process)
    except AccessDenied:
        info['nice'] = 'N/A'
    except NoSuchProcess:
        info['nice'] = 'Zombie'

    try:
        raw_cmdline = get_cmdline(process)

        cmdline = os.path.basename(
            shlex.split(raw_cmdline[0], posix=not IS_WINDOWS)[0])
    except (AccessDenied, IndexError):
        cmdline = "N/A"

    try:
        info['create_time'] = get_create_time(process)
    except AccessDenied:
        info['create_time'] = 'N/A'

    try:
        info['age'] = time.time() - get_create_time(process)
    except TypeError:
        info['create_time'] = get_create_time(process)
    except AccessDenied:
        info['age'] = 'N/A'

    info['cmdline'] = cmdline

    info['children'] = []
    if with_childs:
        for child in get_children(process):
            info['children'].append(get_info(child, interval=interval))

    return info
Ejemplo n.º 37
0
    def ensure_no_pidfile(self, log_file_marker):
        pidfile = abspath(join(self.component_dir, 'pidfile'))

        # Pidfile exists ..
        if exists(pidfile):

            # .. but raise an error only if the PID it points to belongs
            # to an already running component. Otherwise, it must be a stale pidfile
            # that we can safely delete.
            pid = open(pidfile).read().strip()
            try:
                pid = int(pid)
            except ValueError:
                raise Exception(
                    'Could not parse pid value `{}` as an integer ({})'.format(
                        pid, pidfile))
            else:
                try:
                    get_info(self.component_dir, INFO_FORMAT.DICT)
                except AccessDenied:
                    # This could be another process /or/ it can be our own component started by another user,
                    # so to be on the safe side, indicate an error instead of deleting the pidfile
                    raise Exception(
                        'Access denied to PID `{}` found in `{}`'.format(
                            pid, pidfile))
                except NoSuchProcess:
                    # This is fine, there is no process of that PID,
                    # which means that this PID does not belong to our component
                    # (because it doesn't belong to any process), so we may just delete this pidfile safely ..
                    os.remove(pidfile)

                    # .. but, if the component is load-balancer, we also need to delete its agent's pidfile.
                    # The assumption is that if the load-balancer is not running then so isn't its agent.
                    if log_file_marker == 'lb-agent':
                        lb_agent_pidfile = abspath(
                            join(self.component_dir, 'zato-lb-agent.pid'))
                        os.remove(lb_agent_pidfile)

                else:
                    #
                    # This PID exists, but it still still possible that it belongs to another process
                    # that took over a PID previously assigned to a Zato component,
                    # in which case we can still delete the pidfile.
                    #
                    # We decide that a process is actually an already running Zato component if it has
                    # opened log files that should belong that kind of component, as indicated by log_file_marker,
                    # otherwise we assume this PID belongs to a completely different process and we can delete pidfile.
                    #
                    has_log = False
                    has_lock = False

                    log_path = abspath(
                        join(self.component_dir, 'logs',
                             '{}.log'.format(log_file_marker)))
                    lock_path = abspath(
                        join(self.component_dir, 'logs',
                             '{}.lock'.format(log_file_marker)))

                    for name in Process(pid).open_files():
                        if name.path == log_path:
                            has_log = True
                        elif name.path == lock_path:
                            has_lock = True

                    # Both files exist - this is our component and it's running so we cannot continue
                    if has_log and has_lock:
                        raise Exception(
                            'Cannot proceed, found pidfile `{}`'.format(
                                pidfile))

                    # This must be an unrelated process, so we can delete pidfile ..
                    os.remove(pidfile)

                    # .. again, if the component is load-balancer, we also need to delete its agent's pidfile.
                    # The assumption is that if the load-balancer is not running then so isn't its agent.
                    if log_file_marker == 'lb-agent':
                        lb_agent_pidfile = abspath(
                            join(self.component_dir, 'zato-lb-agent.pid'))
                        os.remove(lb_agent_pidfile)

        if self.show_output:
            self.logger.info('No such pidfile `%s`, OK', pidfile)
Ejemplo n.º 38
0
def run_subprocess(command,
                   shell=False,
                   doexec=True,
                   monitor=False,
                   tile_id=None):
    """Subprocess runner
    
    If subrocess returns non-zero exit code, STDERR is sent to the logger.
    
    Parameters
    ----------
    command : list of str
        Command to pass to subprocess.run(). Eg ['wget', '-q', '-r', dl_url]
    shell : bool
        Passed to subprocess.run()
    doexec : bool
        Execute the subprocess or just print out the concatenated command
    
    Returns
    -------
    nothing
        nothing
    """
    if doexec:
        cmd = " ".join(command)
        if shell:
            command = cmd
        logger.debug(command)
        popen = Popen(command, shell=shell, stderr=PIPE, stdout=PIPE)
        pid = popen.pid
        if monitor:
            proc = Process(pid)
            with proc.oneshot():
                try:
                    logger_perf.debug(
                        "%s;%s;%s" %
                        (tile_id, proc.memory_full_info(), swap_memory()))
                except NoSuchProcess or ZombieProcess:
                    logger.debug("%s is Zombie or NoSuchProcess" % tile_id)
                except AccessDenied as e:
                    logger_perf.exception(e)
        # if monitor:
        #     running = True
        #     proc = Process(pid)
        #     with proc.oneshot():
        #         while running:
        #             try:
        #                 logger_perf.debug("%s - %s - %s - %s - %s" % (
        #                 tile_id, proc.cpu_percent(), proc.cpu_times(), proc.memory_full_info(), swap_memory()))
        #             except NoSuchProcess or ZombieProcess:
        #                 logger.debug("%s is Zombie or NoSuchProcess" % tile_id)
        #                 break
        #             except AccessDenied as e:
        #                 logger_perf.exception(e)
        #                 break
        #             running = proc.is_running()
        #             logger.debug("%s is running: %s" % (tile_id, running))
        #             sleep(1)
        stdout, stderr = popen.communicate()
        err = stderr.decode(locale.getpreferredencoding(do_setlocale=True))
        popen.wait()
        if popen.returncode != 0:
            logger.debug("Process returned with non-zero exit code: %s",
                         popen.returncode)
            logger.error(err)
            return False
        else:
            return True
    else:
        logger.debug("Not executing %s", command)
        return True
Ejemplo n.º 39
0
 def dump_coverage(self, timeout=15):
     assert SIGUSR1 is not None
     pid = self._puppet.get_pid()
     if pid is None or not self._puppet.is_healthy():
         LOG.debug("Skipping coverage dump (target is not in a good state)")
         return
     # If at this point, the browser is in a good state, i.e. no crashes
     # or hangs, so signal the browser to dump coverage.
     try:
         for child in Process(pid).children(recursive=True):
             LOG.debug("Sending SIGUSR1 to %d (child)", child.pid)
             try:
                 kill(child.pid, SIGUSR1)
             except OSError:
                 LOG.warning("Failed to send SIGUSR1 to pid %d", child.pid)
     except (AccessDenied, NoSuchProcess):  # pragma: no cover
         pass
     LOG.debug("Sending SIGUSR1 to %d (parent)", pid)
     try:
         kill(pid, SIGUSR1)
     except OSError:
         LOG.warning("Failed to send SIGUSR1 to pid %d", pid)
     start_time = time()
     gcda_found = False
     delay = 0.1
     # wait for processes to write .gcno files
     # this should typically take less than 1 second
     while True:
         for proc in process_iter(attrs=["pid", "ppid", "open_files"]):
             # check if proc is the target or child process
             if pid in (proc.info["pid"], proc.info["ppid"]):
                 if proc.info["open_files"] is None:
                     continue
                 if any(
                         x.path.endswith(".gcda")
                         for x in proc.info["open_files"]):
                     gcda_found = True
                     # get the pid of the process that has the file open
                     gcda_open = proc.info["pid"]
                     break
         else:
             gcda_open = None
         elapsed = time() - start_time
         if gcda_found:
             if gcda_open is None:
                 # success
                 LOG.debug("gcda dump took %0.2fs", elapsed)
                 break
             if elapsed >= timeout:
                 # timeout failure
                 LOG.warning("gcda file open by pid %d after %0.2fs",
                             gcda_open, elapsed)
                 try:
                     kill(gcda_open, SIGABRT)
                 except OSError:
                     pass
                 sleep(1)
                 self.close()
                 break
             if delay < 1.0:
                 # increase delay to a maximum of 1 second
                 delay = min(1.0, delay + 0.1)
         elif elapsed >= 3:
             # assume we missed the process writing .gcno files
             LOG.warning("No gcda files seen after %0.2fs", elapsed)
             break
         if not self._puppet.is_healthy():
             LOG.warning("Browser failure during dump_coverage()")
             break
         sleep(delay)
Ejemplo n.º 40
0
def cancell_ORCA():
    if (list_jobs != []):
        for itera in process_iter():
            if (itera.name() == "orca.exe"):
                p = Process(itera.pid)
                p.kill()
Ejemplo n.º 41
0
 def Terminate(self):
     p = Process(self.pid)
     return p.terminate()
Ejemplo n.º 42
0
 def Executable(self):
     p = Process(self.pid)
     return p.exe()
Ejemplo n.º 43
0
    def _update(self, connections):
        listeners_tcp = set()
        listeners_udp = set()

        ingress_tcp = set()
        ingress_udp = set()

        egress_tcp = set()
        egress_udp = set()

        # Register listeners first
        for connection in connections:
            if connection.raddr:
                continue

            program = ''

            if connection.pid:
                try:
                    process = Process(connection.pid)
                    program = process.exe()
                except Error:
                    program = 'pid={}'.format(connection.pid)

            listeners = None

            if connection.type == SOCK_DGRAM:
                listeners = listeners_udp
            elif connection.type == SOCK_STREAM and connection.status == 'LISTEN':
                listeners = listeners_tcp
            else:
                continue

            listeners.add(
                (program, connection.laddr.ip, connection.laddr.port))

        new_listeners_tcp = listeners_tcp - self.known_listeners_tcp
        new_listeners_udp = listeners_udp - self.known_listeners_udp

        for new_listener_udp in new_listeners_udp:
            if new_listener_udp not in self.pending_udp_listeners:
                self.pending_udp_listeners[new_listener_udp] = 1
            else:
                self.pending_udp_listeners[new_listener_udp] += 1

        for old_listener_udp in self.pending_udp_listeners.keys():
            if old_listener_udp not in new_listener_udp:
                del self.pending_udp_listeners[old_listener_udp]

        new_listeners_udp = set(new_listener_udp for new_listener_udp, cnt in
                                self.pending_udp_listeners.iteritems()
                                if cnt > 16)

        for new_listener_udp in new_listeners_udp:
            del self.pending_udp_listeners[new_listener_udp]

        self.known_listeners_tcp.update(listeners_tcp)
        self.known_listeners_udp.update(new_listeners_udp)

        known_listeners_udp = set(
            (ip, port) for _, ip, port in self.known_listeners_udp)

        known_listeners_tcp = set(
            (ip, port) for _, ip, port in self.known_listeners_tcp)

        # Now update ingress/egress connections
        for connection in connections:
            if not connection.raddr:
                continue

            program = ''

            if connection.pid:
                try:
                    process = Process(connection.pid)
                    program = process.exe()
                except Error:
                    program = 'pid={}'.format(connection.pid)

            remote_ip = connection.raddr.ip
            remote_tuple = connection.raddr.ip, connection.raddr.port
            local = connection.laddr.ip, connection.laddr.port

            connlist = None
            connitem = None

            if connection.type == SOCK_DGRAM:
                if any(candidate in known_listeners_udp
                       for candidate in (local, ('::', local[1]),
                                         ('0.0.0.0', local[1]), ('127.0.0.1',
                                                                 local[1]),
                                         '::ffff:127.0.0.1', local[1])):
                    connlist = ingress_udp
                    connitem = program, local, remote_ip
                else:
                    connlist = egress_udp
                    connitem = program, remote_tuple

            elif connection.type == SOCK_STREAM:
                if any(candidate in known_listeners_tcp
                       for candidate in (local, ('::', local[1]),
                                         ('0.0.0.0', local[1]), ('127.0.0.1',
                                                                 local[1]),
                                         '::ffff:127.0.0.1', local[1])):
                    connlist = ingress_tcp
                    connitem = program, local, remote_ip
                else:
                    connlist = egress_tcp
                    connitem = program, remote_tuple

            else:
                continue

            connlist.add(connitem)

        new_ingress_udp = ingress_udp - self.known_ingress_udp
        new_ingress_tcp = ingress_tcp - self.known_ingress_tcp

        new_egress_udp = egress_udp - self.known_egress_udp
        new_egress_tcp = egress_tcp - self.known_egress_tcp

        self.known_ingress_udp.update(ingress_udp)
        self.known_ingress_tcp.update(ingress_tcp)

        self.known_egress_udp.update(egress_udp)
        self.known_egress_tcp.update(egress_tcp)

        new_objects = tuple(
            tuple(x)
            for x in (new_listeners_tcp, new_listeners_udp, new_ingress_tcp,
                      new_ingress_udp, new_egress_tcp, new_egress_udp))

        if not any(x for x in new_objects):
            return

        self.append(new_objects)
Ejemplo n.º 44
0
def _process_cpu():
    """Return process CPU usage."""
    proc = Process(os.getpid())
    return sum([p.cpu_percent(interval=0.01) for p in [proc]])
Ejemplo n.º 45
0
def bot(id):
	global args,locks,urls,user_agents,proxies,drivers
	while True:
		try:
			url=choice(urls)
			with locks[0]:
				if len(proxies)==0:
					proxies.extend(get_proxies())
				proxy=choice(proxies)
				proxies.remove(proxy)
			log('[INFO][%d] Connecting to %s'%(id,proxy))
			user_agent=choice(user_agents) if args.user_agent else user_agents(os=('win','android'))
			log('[INFO][%d] Setting user agent to %s'%(id,user_agent))
			if args.slow_start:
				locks[1].acquire()
			if system()=='Windows':
				executable_dir=path_join(environ['APPDATA'],'DeBos','drivers')
			else:
				executable_dir=path_join(environ['HOME'],'.DeBos','drivers')
			if args.driver=='chrome':
				chrome_options=webdriver.ChromeOptions()
				chrome_options.add_argument('--proxy-server={}'.format(proxy))
				chrome_options.add_argument('--user-agent={}'.format(user_agent))
				chrome_options.add_argument('--mute-audio')
				chrome_options.add_experimental_option('excludeSwitches',['enable-logging'])
				if args.headless:
					chrome_options.add_argument('--headless')
				if is_root():
					chrome.options.add_argument('--no-sandbox')
				if system()=='Windows':
					executable_path=path_join(executable_dir,'chromedriver.exe')
				else:
					executable_path=path_join(executable_dir,'chromedriver')
				driver=webdriver.Chrome(options=chrome_options,executable_path=executable_path)
			else:
				firefox_options=webdriver.FirefoxOptions()
				firefox_options.preferences.update({
					'media.volume_scale':'0.0',
					'general.useragent.override':user_agent,
					'network.proxy.type':1,
					'network.proxy.http':proxy.split(':')[0],
					'network.proxy.http_port':int(proxy.split(':')[1]),
					'network.proxy.ssl':proxy.split(':')[0],
					'network.proxy.ssl_port':int(proxy.split(':')[1])
				})
				if args.headless:
					firefox_options.add_argument('--headless')
				if system()=='Windows':
					executable_path=path_join(executable_dir,'geckodriver.exe')
				else:
					executable_path=path_join(executable_dir,'geckodriver')
				driver=webdriver.Firefox(options=firefox_options,service_log_path=devnull,executable_path=executable_path)
			process=driver.service.process
			pid=process.pid
			cpids=[x.pid for x in Process(pid).children()]
			pids=[pid]+cpids
			drivers.extend(pids)
			if args.slow_start:
				locks[1].release()
			log('[INFO][%d] Successully started webdriver!'%id)
			driver.set_page_load_timeout(45)
			log('[INFO][%d] Opening %s'%(id,url))
			driver.get(url)
			if driver.title.endswith('YouTube'):
				logv('[INFO][%d] Video successfully loaded!'%id)
				play_button=driver.find_element_by_class_name('ytp-play-button')
				if play_button.get_attribute('title')=='Play (k)':
					play_button.click()
				if play_button.get_attribute('title')=='Play (k)':
					raise ElementClickInterceptedException
				if args.duration:
					sleep(args.duration)
				else:
					video_duration=driver.find_element_by_class_name('ytp-time-duration').get_attribute('innerHTML')
					sleep(float(sum([int(x)*60**i for i,x in enumerate(video_duration.split(':')[::-1])]))*uniform(0.35,0.85))
				logv('[INFO][%d] Video successfully viewed!'%id)
			else:
				log('[INFO][%d] Dead proxy eliminated!'%id)
		except WebDriverException as e:
			log('[WARNING][%d] %s'%(id,e.__class__.__name__))
		except NoSuchProcess:
			log('[WARNING][%d] NoSuchProcess'%id)
		except KeyboardInterrupt:exit(0)
		except:exit(1)
		finally:
			log('[INFO][%d] Quitting webdriver!'%id)
			try:driver
			except NameError:pass
			else:driver.quit()
			with locks[2]:
				try:pids
				except NameError:pass
				else:
					for pid in pids:
						try:drivers.remove(pid)
						except:pass
Ejemplo n.º 46
0
    def _is_correct_process(self, job_status: JobStatusSpec,
                            psutil_process: psutil.Process) -> bool:
        if psutil_process.name() != BackgroundJobDefines.process_name:
            return False

        return True
Ejemplo n.º 47
0
def get_info(process=None, interval=0, with_childs=False):
    """Return information about a process. (can be an pid or a Process object)

    If process is None, will return the information about the current process.
    """
    if process is None or isinstance(process, int):
        if process is None:
            pid = os.getpid()
        else:
            pid = process

        if pid in _PROCS:
            process = _PROCS[pid]
        else:
            _PROCS[pid] = process = Process(pid)

    info = {}
    try:
        mem_info = process.get_memory_info()
        info['mem_info1'] = bytes2human(mem_info[0])
        info['mem_info2'] = bytes2human(mem_info[1])
    except AccessDenied:
        info['mem_info1'] = info['mem_info2'] = "N/A"

    try:
        info['cpu'] = process.get_cpu_percent(interval=interval)
    except AccessDenied:
        info['cpu'] = "N/A"

    try:
        info['mem'] = round(process.get_memory_percent(), 1)
    except AccessDenied:
        info['mem'] = "N/A"

    try:
        cpu_times = process.get_cpu_times()
        ctime = timedelta(seconds=sum(cpu_times))
        ctime = "%s:%s.%s" % (
            ctime.seconds // 60 % 60, str(
                (ctime.seconds % 60)).zfill(2), str(ctime.microseconds)[:2])
    except AccessDenied:
        ctime = "N/A"

    info['ctime'] = ctime

    try:
        info['pid'] = process.pid
    except AccessDenied:
        info['pid'] = 'N/A'

    try:
        info['username'] = process.username
    except AccessDenied:
        info['username'] = '******'

    try:
        try:
            info['nice'] = process.get_nice()
        except AttributeError:
            info['nice'] = process.nice
    except AccessDenied:
        info['nice'] = 'N/A'
    except NoSuchProcess:
        info['nice'] = 'Zombie'

    try:
        cmdline = os.path.basename(shlex.split(process.cmdline[0])[0])
    except (AccessDenied, IndexError):
        cmdline = "N/A"

    try:
        info['create_time'] = process.create_time
    except AccessDenied:
        info['create_time'] = 'N/A'

    try:
        info['age'] = time.time() - process.create_time
    except AccessDenied:
        info['age'] = 'N/A'

    info['cmdline'] = cmdline

    info['children'] = []
    if with_childs:
        for child in process.get_children():
            info['children'].append(get_info(child, interval=interval))

    return info
Ejemplo n.º 48
0
      raise SystemExit(0)

try:
  from bottombar import BottomBar
except ImportError:
  BottomBar = None

try:
  from resource import getrusage, RUSAGE_SELF
except ImportError:
  try:
    from psutil import Process
  except ImportError:
    _rss_memory = None
  else:
    _rss_memory = lambda: Process().memory_info().rss
else:
  _rss_memory = lambda: getrusage(RUSAGE_SELF).ru_maxrss << 10

def _format(uri, t0, width):
  if len(uri) + 8 <= width: # space for full uri, maybe stats
    minutes, seconds = divmod(int(time.perf_counter() - t0), 60)
    hours, minutes = divmod(minutes, 60)
    runtime = ' runtime: {}:{:02d}:{:02d}'.format(hours, minutes, seconds)
    memory = ' memory: {:,}M |'.format(_rss_memory() >> 20) if _rss_memory else ''
    prefix = 'writing log to '
    if len(prefix) + len(uri) + len(memory) + len(runtime) > width:
      memory = memory[8:]
      runtime = runtime[9:]
    if len(prefix) + len(uri) + len(memory) + len(runtime) <= width:
      uri = prefix + uri
Ejemplo n.º 49
0
    def _get_memory_usage(pid, force_gc=False):
        if force_gc:
            gc.collect()

        return Process(pid).memory_info().rss
Ejemplo n.º 50
0
 async def _get_mem_usage():
     mem_usage = float(Process(getpid()).memory_info().rss) / 1000000
     return str(round(mem_usage, 2)) + " MB"
Ejemplo n.º 51
0
    async def show_bot_info(self, ctx, patreon_status):
        embed = Embed(title="Doob Info  <:doob:754762131085459498>",
                      colour=ctx.author.colour,
                      timestamp=datetime.utcnow())

        bot_version = self.bot.VERSION

        proc = Process()
        with proc.oneshot():
            uptime = timedelta(seconds=time() - proc.create_time())
            cpu_time = timedelta(seconds=(cpu := proc.cpu_times()).system +
                                 cpu.user)
            mem_total = virtual_memory().total / (1025**2)
            mem_of_total = proc.memory_percent()
            mem_usg = mem_total * (mem_of_total / 100)

        fields = [
            ("Name", "Doob <:doob:754762131085459498>", False),
            ("Description",
             "The multipurpose Discord Bot with global leveling and powerful logging tools for your server.",
             False),
            ("Developers", "<@308000668181069824>, <@476188720521805825>",
             False),
            ("Doob's Server Count", f"{str(len(self.bot.guilds))}", True),
            ("Doob's Member Count", f"{str(len(self.bot.users))}", True),
            ("The ping for Doob is...",
             f" :ping_pong: {round(self.bot.latency * 1000)} ms", False),
            ("Python Version", python_version(), True),
            ("Uptime", uptime, True), ("CPU Time", cpu_time, True),
            ("Memory Usage",
             f"{mem_usg:,.3f} MiB / {mem_total:,.0f} MiB ({mem_of_total:.0f}%)",
             True), ("Library", f"discord.py {discord_version}", True),
            ("Bot Version",
             f"{self.bot.VERSION} - [Changelog](https://github.com/doobdev/doob/blob/master/CHANGELOG.md#v{bot_version.replace('.', '')})",
             True),
            ("Top.gg Link", "https://top.gg/bot/680606346952966177", False),
            ("Invite Link",
             "[Invite Link Here](https://discordapp.com/oauth2/authorize?client_id=680606346952966177&scope=bot&permissions=271674430)",
             True),
            ("GitHub Repository",
             "[Click Here](https://github.com/doobdev/doob)", True)
        ]

        for name, value, inline in fields:
            embed.add_field(name=name, value=value, inline=inline)

        embed.set_thumbnail(url=ctx.guild.me.avatar_url)
        embed.set_footer(
            text=f"{ctx.author.name} requested Doob's information",
            icon_url=ctx.author.avatar_url)

        if patreon_status == True:
            embed.add_field(
                name="Patreon",
                value=
                f"Thanks for [Donating](https://patreon.com/doobdev) {ctx.author.display_name}! :white_check_mark:",
                inline=False)
            await ctx.send(embed=embed)

        if patreon_status == False:
            embed.add_field(
                name="Patreon",
                value="[Click Here for Patreon](https://patreon.com/doobdev)",
                inline=False)
            await ctx.send(embed=embed)
Ejemplo n.º 52
0
class Misc(Cog):
    def __init__(self, bot: CustomBot):
        super().__init__(self.__class__.__name__)
        self.bot = bot
        pid = getpid()
        self.process = Process(pid)
        self.process.cpu_percent()

    async def cog_command_error(self, ctx: Context, error):
        '''Local error handler for the cog'''

        # Throw errors properly for me
        if ctx.author.id in self.bot.config['owners'] and not isinstance(
                error, CommandOnCooldown):
            text = f'```py\n{error}```'
            await ctx.send(text)
            raise error

        # Cooldown
        if isinstance(error, CommandOnCooldown):
            if ctx.author.id in self.bot.config['owners']:
                await ctx.reinvoke()
            else:
                await ctx.send(
                    f"You can only use this command once every `{error.cooldown.per:.0f} seconds` per server. You may use this again in `{error.retry_after:.2f} seconds`."
                )
            return

        # Disabled command
        elif isinstance(error, DisabledCommand):
            if ctx.author.id in self.bot.config['owners']:
                await ctx.reinvoke()
            else:
                await ctx.send("This command has been disabled.")
            return

    @command(enabled=False, aliases=['upvote'])
    @cooldown(1, 5, BucketType.user)
    async def vote(self, ctx: Context):
        '''Gives you a link to upvote the bot'''

        if self.bot.config['dbl_vainity']:
            await ctx.send(
                f"<https://discordbots.org/bot/{self.bot.config['dbl_vainity']}/vote>\nSee {ctx.prefix}perks for more information."
            )
        else:
            await ctx.send(
                f"<https://discordbots.org/bot/{self.bot.user.id}/vote>\nSee {ctx.prefix}perks for more information."
            )

    @command(aliases=['git', 'code'])
    @cooldown(1, 5, BucketType.user)
    async def github(self, ctx: Context):
        '''Gives you a link to the bot's code repository'''

        await ctx.send(f"<{self.bot.config['github']}>")

    @command(aliases=['patreon', 'paypal'])
    @cooldown(1, 5, BucketType.user)
    async def donate(self, ctx: Context):
        '''Gives you the creator's donation links'''

        links = []
        if self.bot.config['patreon']:
            links.append(
                f"Patreon: <{self.bot.config['patreon']}> (see {ctx.prefix}perks to see what you get)"
            )
        if self.bot.config['paypal']:
            links.append(
                f"PayPal: <{self.bot.config['paypal']}> (doesn't get you the perks, but is very appreciated)"
            )
        if not links:
            ctx.command.enabled = False
            ctx.command.hidden = True
        await ctx.send('\n'.join(links))

    @command()
    @cooldown(1, 5, BucketType.user)
    async def invite(self, ctx: Context):
        '''Gives you an invite link for the bot'''

        await ctx.send(
            f"<https://discordapp.com/oauth2/authorize?client_id={self.bot.user.id}&scope=bot&permissions=314432>"
        )

    @command(aliases=['guild', 'support'])
    @cooldown(1, 5, BucketType.user)
    async def server(self, ctx: Context):
        '''Gives you a server invite link'''

        await ctx.send(self.bot.config['guild_invite'])

    @command(hidden=True)
    @cooldown(1, 5, BucketType.user)
    async def echo(self, ctx: Context, *, content: str):
        '''Echos a saying'''

        await ctx.send(content)

    @command(enabled=False)
    @cooldown(1, 5, BucketType.user)
    async def perks(self, ctx: Context):
        '''Shows you the perks associated with different support tiers'''

        # DISABLED UNTIL I KNOW WHAT TO DO WITH IT

        # Normies
        normal_users = [
            "60s tree cooldown",
            "5 children",
        ]

        # Perks for voting
        voting_perks = [
            "30s tree cooldown",
        ]

        # Perks for $1 Patrons
        t1_donate_perks = [
            "15s tree cooldown",
            "Up to 10 children",
            "`disownall` command (disowns all of your children at once)",
        ]

        # $3 Patrons
        t2_donate_perks = [
            "Up to 15 children",
            "`stupidtree` command (shows all relations, not just blood relatives)",
        ]

        # Perks for $5 Patrons
        t3_donate_perks = [
            "5s tree cooldown",
            "Up to 20 children",
        ]
        e = Embed()
        e.add_field(name=f'Normal Users',
                    value=f"Gives you access to:\n* " +
                    '\n* '.join(normal_users))
        e.add_field(name=f'Voting ({ctx.clean_prefix}vote)',
                    value=f"Gives you access to:\n* " +
                    '\n* '.join(voting_perks))
        e.add_field(name=f'T1 Patreon Donation ({ctx.clean_prefix}donate)',
                    value=f"Gives you access to:\n* " +
                    '\n* '.join(t1_donate_perks))
        e.add_field(name=f'T2 Patreon Donation ({ctx.clean_prefix}donate)',
                    value=f"Gives you access to:\n* " +
                    '\n* '.join(t2_donate_perks))
        e.add_field(name=f'T3 Patreon Donation ({ctx.clean_prefix}donate)',
                    value=f"Gives you access to:\n* " +
                    '\n* '.join(t3_donate_perks))
        await ctx.send(embed=e)

    @command(aliases=['status'])
    @cooldown(1, 5, BucketType.user)
    async def stats(self, ctx: Context):
        '''Gives you the stats for the bot'''

        # await ctx.channel.trigger_typing()
        embed = Embed(colour=0x1e90ff)
        embed.set_footer(text=str(self.bot.user),
                         icon_url=self.bot.user.avatar_url)
        embed.add_field(
            name="ProfileBot",
            value="A bot to make the process of filling out forms fun.")
        creator_id = self.bot.config["owners"][0]
        creator = await self.bot.fetch_user(creator_id)
        embed.add_field(name="Creator", value=f"{creator!s}\n{creator_id}")
        embed.add_field(name="Library", value=f"Discord.py {dpy_version}")
        try:
            embed.add_field(
                name="Average Guild Count",
                value=int((len(self.bot.guilds) / len(self.bot.shard_ids)) *
                          self.bot.shard_count))
        except TypeError:
            embed.add_field(name="Guild Count", value=len(self.bot.guilds))
        embed.add_field(name="Shard Count", value=self.bot.shard_count)
        embed.add_field(name="Average WS Latency",
                        value=f"{(self.bot.latency * 1000):.2f}ms")
        embed.add_field(
            name="Coroutines",
            value=
            f"{len([i for i in Task.all_tasks() if not i.done()])} running, {len(Task.all_tasks())} total."
        )
        embed.add_field(name="Process ID", value=self.process.pid)
        embed.add_field(name="CPU Usage",
                        value=f"{self.process.cpu_percent():.2f}")
        embed.add_field(
            name="Memory Usage",
            value=
            f"{self.process.memory_info()[0]/2**20:.2f}MB/{virtual_memory()[0]/2**20:.2f}MB"
        )
        # ut = self.bot.get_uptime()  # Uptime
        # uptime = [
        #     int(ut // (60*60*24)),
        #     int((ut % (60*60*24)) // (60*60)),
        #     int(((ut % (60*60*24)) % (60*60)) // 60),
        #     ((ut % (60*60*24)) % (60*60)) % 60,
        # ]
        # embed.add_field(name="Uptime", value=f"{uptime[0]} days, {uptime[1]} hours, {uptime[2]} minutes, and {uptime[3]:.2f} seconds.")
        try:
            await ctx.send(embed=embed)
        except Exception:
            await ctx.send("I tried to send an embed, but I couldn't.")

    @command(aliases=['clean'])
    async def clear(self, ctx: Context):
        '''Clears the bot's commands from chat'''

        if ctx.channel.permissions_for(ctx.guild.me).manage_messages:
            _ = await ctx.channel.purge(
                limit=100, check=lambda m: m.author.id == self.bot.user.id)
        else:
            _ = await ctx.channel.purge(
                limit=100,
                check=lambda m: m.author.id == self.bot.user.id,
                bulk=False)
        await ctx.send(f"Cleared `{len(_)}` messages from chat.",
                       delete_after=3.0)

    @command()
    async def shard(self, ctx: Context):
        '''Gives you the shard that your server is running on'''

        await ctx.send(
            f"The shard that your server is on is shard `{ctx.guild.shard_id}`."
        )
Ejemplo n.º 53
0
    async def __main__(self, CommandArgs):
        response = CommandArgs.response

        clusters = 0
        total_mem = 0

        process = Process(getpid())
        process_mem = math.floor(process.memory_info()[0] / float(2**20))

        if IS_DOCKER:
            total_guilds = guilds = 0
            total_mem = 0
            errored = 0

            stats = await broadcast(None, type="STATS")
            clusters = len(stats)

            for cluster_id, cluster_data in stats.items():
                if cluster_data in ("cluster offline", "cluster timeout"):
                    errored += 1
                else:
                    total_guilds += cluster_data[0]
                    total_mem += cluster_data[1]

            if errored:
                guilds = f"{total_guilds} ({len(self.client.guilds)}) ({errored} errored)"
            else:
                guilds = f"{total_guilds} ({len(self.client.guilds)})"

        else:
            total_guilds = guilds = str(len(self.client.guilds))
            clusters = 1
            total_mem = process_mem

        seconds = math.floor(time() - STARTED)

        m, s = divmod(seconds, 60)
        h, m = divmod(m, 60)
        d, h = divmod(h, 24)

        days, hours, minutes, seconds = None, None, None, None

        if d:
            days = f"{d}d"
        if h:
            hours = f"{h}h"
        if m:
            minutes = f"{m}m"
        if s:
            seconds = f"{s}s"

        uptime = f"{days or ''} {hours or ''} {minutes or ''} {seconds or ''}".strip(
        )
        mem = IS_DOCKER and f"{total_mem} ({process_mem})" or process_mem

        embed = Embed(
            description=
            f"Roblox Verification made easy! Features everything you need to integrate your Discord server with Roblox."
        )
        embed.set_author(name=Bloxlink.user.name,
                         icon_url=Bloxlink.user.avatar_url)

        embed.add_field(name="Servers", value=guilds)
        embed.add_field(name="Node Uptime", value=uptime)
        embed.add_field(name="Memory Usage", value=f"{mem} MB")

        embed.add_field(
            name="Resources",
            value=
            "**[Website](https://blox.link)** | **[Discord](https://blox.link/support)** | **[Invite Bot]"
            "(https://blox.link/invite)** | **[Premium](https://blox.link/premium)**\n\n**[Repository](https://github.com/bloxlink/Bloxlink)**",
            inline=False)

        embed.set_footer(
            text=f"Shards: {self.shard_range} | Node: {CLUSTER_ID}/{clusters-1}"
        )

        await response.send(embed=embed)

        if IS_DOCKER and RELEASE == "MAIN":
            await self.r.table("miscellaneous").insert(
                {
                    "id": "stats",
                    "stats": {
                        "guilds": total_guilds,
                        "version": VERSION,
                        "memory": total_mem,
                        "uptime": uptime,
                        "clusters": clusters
                    }
                },
                conflict="update").run()
Ejemplo n.º 54
0
def _process_memory():
    """Return process memory usage, in MB."""
    proc = Process(os.getpid())
    return sum([p.memory_info().rss / (1024 * 1024) for p in [proc]])
Ejemplo n.º 55
0
def _get_memory_usage():
    """ Return the memory resident set size (top->RES) usage in bytes. """
    process = Process(getpid())
    return process.memory_info().rss
Ejemplo n.º 56
0
def eprint(*args, **kwargs):
    print(*args, file=stderr, **kwargs)


# slice msg
msg = input()
msg1 = msg[0:int(len(msg) / 2)]
msg2 = msg[int(len(msg) / 2):len(msg)]

# args
queue = MessageQueue(int(argv[1]))
master_name = argv[2]

# master process must be the nearest parent with the exact given name
master_proc = Process(getpid())
while master_proc.name() != master_name:
    master_proc = master_proc.parent()

with Semaphore(int(argv[1])):
    try:
        # send-receive echo
        # type=1 : from me to other
        # type=2 : to me from other
        queue.send(msg1, type=1)
        # will block until a process claims the rest of the message
        (echo1, echo_type) = queue.receive(type=2)
        echo_proc = Process(queue.last_send_pid)

        if echo1.decode("ascii") == msg1:
            parent = echo_proc
Ejemplo n.º 57
0
class systemCollectionAgent():
    def __init__(self):
        self.data={}
        self.collectorFunctions=[ 
            self.agentMemoryInfo,
            self.virtualMemory,
            self.cpu,
            self.swap
        ]
        self.process=Process(os.getpid())
        self.val=1024*1024
    @threaded
    def agentMemoryInfo(self):
        """
        agent memory info   
        controller.agent.mem.rss
        controller.agent.mem.vms
        """
        mem_info = self.process.memory_info()
        self.data['rss']=mem_info.rss/self.val
        self.data['vms']=mem_info.vms/self.val
    
    @threaded
    def virtualMemory(self):
        """ virtual memory """
        virtual_memory = psutil.virtual_memory()
        self.data['virtualMemoryTotal']=virtual_memory.total/self.val
        self.data['virtualMemoryUsed']=(virtual_memory.total - virtual_memory.available)/self.val
        self.data['virtualMemoryAll']=virtual_memory.used/self.val
        self.data['virtualMemoryCached']=virtual_memory.cached/self.val
        self.data['virtualMemoryBuffers']=virtual_memory.buffers/self.val
        self.data['virtualMemoryFree']=virtual_memory.free/self.val
        self.data['virtualMemoryPercent']=virtual_memory.percent
        self.data['virtualMemoryAvailable']=virtual_memory.available/self.val


    @threaded
    def cpu(self):
        """ cpu """
        try:
            cpuTimes = psutil.cpu_times_percent(1)
            self.data['userCPU']=(cpuTimes.user + cpuTimes.nice)
            self.data['systemCPU']=cpuTimes.system + cpuTimes.irq + cpuTimes.softirq
            self.data['idleCPU']=cpuTimes.idle
        except:
            logger.log("error accessing cpu percent")
        return  

        
    @threaded
    def swap(self):
        """ swap memory details """
        try:
            swapMemory = psutil.swap_memory()
            self.data['totalSwapMemory']=swapMemory.total/self.val
            self.data['usedSwapMemory']=swapMemory.used/self.val
            self.data['freeSwapMemory']=swapMemory.free/self.val
            self.data['percentFreeSwapMemory']=swapMemory.percent
        except:
            logger.log("error getting swap memory details")
            
        return
        
    def setData(self):
        handles=[]
        for i in self.collectorFunctions:
            handles.append(i())
        for thread in handles:
            thread.join()
        return self.data

        
Ejemplo n.º 58
0
 def __init__(self):
     self.root = self.build_node(Process(1))
     self.render_tree = RenderTree(self.root)
Ejemplo n.º 59
0
def run_neo4j(inputs, publish=False):
    """
    Starts and carries out operations on the Neo4j database.

    :param inputs: Dictionary of inputs.
    :param publish: If True, publishes messages to be received by GUI.
    :return:
    """
    _create_logger(inputs['fp'])
    # overwritten settings should be retained
    old_inputs = read_settings(inputs['fp'] + '/settings.json')
    # handler to file
    # check if password etc is already there
    if 'username' in old_inputs:
        logins = dict((k, old_inputs[k]) for k in ('username', 'password', 'address', 'neo4j'))
    old_inputs.update(inputs)
    inputs = old_inputs
    if 'pid' in inputs:
        existing_pid = pid_exists(inputs['pid'])
    else:
        existing_pid = False
    if not inputs['neo4j']:
        inputs.update(logins)
    checks = str()
    if inputs['job'] == 'start':
        if not existing_pid:
            start_database(inputs, publish)
            existing_pid = True
        else:
            logger.info("Database is already running.  ")
    elif inputs['job'] == 'quit':
        if not existing_pid:
            logger.info("No database open.  ")
        else:
            try:
                if publish:
                    pub.sendMessage('update', msg='Getting PID...')
                # there is a lingering Java process that places a lock on the database.
                # terminating the subprocess does NOT terminate the Java process,
                # so the store lock has to be deleted manually.
                # This is different for Linux & Windows machines and may not be trivial
                # however, PID solution may be platform-independent
                # CURRENT SOLUTION:
                # get parent PID of subprocess
                # use psutil to get child PIDs
                # kill child PIDs too
                parent_pid = inputs['pid']
                parent = Process(parent_pid)
                children = parent.children(recursive=True)
                for child in children:
                    child.kill()
                # apparently killing the children also kills the parent
            except Exception:
                logger.warning("Failed to close database.  ", exc_info=True)
    elif inputs['job'] == 'clear':
        if not existing_pid:
            start_database(inputs, publish)
            existing_pid = True
        try:
            if publish:
                pub.sendMessage('update', msg='Clearing database...')
            importdriver = ImportDriver(user=inputs['username'],
                                        password=inputs['password'],
                                        uri=inputs['address'], filepath=inputs['fp'])
            importdriver.clear_database()
            importdriver.close()
        except Exception:
            logger.warning("Failed to clear database.  ", exc_info=True)
    elif inputs['job'] == 'write':
        if not existing_pid:
            start_database(inputs, publish)
            existing_pid = True
        try:
            if publish:
                pub.sendMessage('update', msg='Accessing database...')
            importdriver = ImportDriver(user=inputs['username'],
                                        password=inputs['password'],
                                        uri=inputs['address'], filepath=inputs['fp'])
            importdriver.export_network(path=inputs['fp'])
            importdriver.close()
        except Exception:
            logger.warning("Failed to write database to graphml file.  ", exc_info=True)
    elif inputs['job'] == 'cyto':
        if not existing_pid:
            start_database(inputs, publish)
            existing_pid = True
        try:
            if publish:
                pub.sendMessage('update', msg='Accessing database...')
            importdriver = ImportDriver(user=inputs['username'],
                                        password=inputs['password'],
                                        uri=inputs['address'], filepath=inputs['fp'])
            importdriver.export_cyto()
            importdriver.close()
        except Exception:
            logger.warning("Failed to export networks to Cytoscape.  ", exc_info=True)
    else:
        if not existing_pid:
            start_database(inputs, publish)
            existing_pid = True
        if publish:
            pub.sendMessage('update', msg='Uploading files to database...')
        filestore = None
        if inputs['procbioms']:
            filestore = read_bioms(inputs['procbioms'])
        # ask users for additional input
        bioms = Batch(filestore, inputs)
        bioms = Nets(bioms)
        for file in inputs['network']:
            network = _read_network(file)
            bioms.add_networks(network, file)
        importdriver = None
        sleep(12)
        importdriver = ImportDriver(user=inputs['username'],
                                    password=inputs['password'],
                                    uri=inputs['address'], filepath=inputs['fp'])
        # importdriver.clear_database()
        try:
            # pub.sendMessage('update', msg='Uploading BIOM files...')
            logger.info("Uploading BIOM files...")
            itemlist = list()
            for level in inputs['procbioms']:
                for item in inputs['procbioms'][level]:
                    name = inputs['procbioms'][level][item]
                    biomfile = load_table(name)
                    importdriver.convert_biom(biomfile=biomfile, exp_id=name)
                    itemlist.append(name)
            checks += 'Successfully uploaded the following items and networks to the database: \n'
            for item in itemlist:
                checks += (item + '\n')
            checks += '\n'
            logger.info(checks)
        except Exception:
            logger.warning("Failed to upload BIOM files to Neo4j database.  ", exc_info=True)
        try:
            # pub.sendMessage('update', msg='Uploading network files...')
            logger.info('Uploading network files...  ')
            for item in bioms.networks:
                network = bioms.networks[item]
                # try to split filename to make a nicer network id
                subnames = item.split('/')
                if len(subnames) == 1:
                    subnames = item.split('\\')
                name = subnames[-1].split('.')[0]
                importdriver.convert_networkx(network=network, network_id=name, mode='weight')
                itemlist.append(item)
        except Exception:
            logger.warning('Unable to upload network files to Neo4j database. ', exc_info=True)
            checks += 'Unable to upload network files to Neo4j database.\n'
        if publish:
            pub.sendMessage('database_log', msg=checks)
        importdriver.close()
    logger.info('Completed database operations!  ')
    write_settings(inputs)
Ejemplo n.º 60
0
def _get_shell():
    try:
        shell = Process(os.getpid()).parent().name()
    except TypeError:
        shell = Process(os.getpid()).parent.name
    return shells[shell]