Example #1
0
def raise_nofile(nofile_atleast: int = 4096) -> Tuple[int, int]:
    """
    sets nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
    parallel executing plot generators vs. Ubuntu 16.04 default ulimit -n 1024 or OS X El Captian 256
    temporary setting extinguishing with Python session.
    """
# %% (0) what is current ulimit -n setting?
    soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
    hard = ohard
# %% (1) increase limit (soft and even hard) if needed
    if soft < nofile_atleast:
        soft = nofile_atleast

        if hard < soft:
            hard = soft

        print('setting soft & hard ulimit -n {} {}'.format(soft, hard))
        try:
            res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
        except (ValueError, res.error):
            try:
                hard = soft
                print(
                    'trouble with max limit, retrying with soft,hard {},{}'.format(soft, hard))
                res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
            except Exception:
                print('failed to set ulimit, giving up')
                soft, hard = res.getrlimit(res.RLIMIT_NOFILE)

    return soft, hard
Example #2
0
    def testClushConfigSetRlimit(self):
        """test CLI.Config.ClushConfig (setrlimit)"""
        soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
        hard2 = min(32768, hard)
        f = tempfile.NamedTemporaryFile(prefix='testclushconfig')
        f.write("""
[Main]
fanout: 42
connect_timeout: 14
command_timeout: 0
history_size: 100
color: auto
fd_max: %d
verbosity: 1
""" % hard2)

        f.flush()
        parser = OptionParser("dummy")
        parser.install_display_options(verbose_options=True)
        parser.install_connector_options()
        options, _ = parser.parse_args([])
        config = ClushConfig(options, filename=f.name)
        self.assert_(config != None)
        display = Display(options, config)
        self.assert_(display != None)

        # force a lower soft limit
        resource.setrlimit(resource.RLIMIT_NOFILE, (hard2/2, hard))
        # max_fdlimit should increase soft limit again
        set_fdlimit(config.fd_max, display)
        # verify
        soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
        self.assertEqual(soft, hard2)
        f.close()
Example #3
0
 def __init__(self, max_threads=None):
     '''
     If max_threads is not supplied, calculates a reasonable value based
     on system resource limits.
     '''
     self.active_requests = set()
     if not max_threads:
         # man getrlimit: "RLIMIT_NPROC The maximum number of processes (or,
         # more precisely on Linux, threads) that can be created for the
         # real user ID of the calling process."
         try:
             import resource
             rlimit_nproc = resource.getrlimit(resource.RLIMIT_NPROC)[0]
             rlimit_nofile = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
             max_threads = min(rlimit_nofile // 10, rlimit_nproc // 2)
             # resource.RLIM_INFINITY == -1 which can result in max_threads == 0
             if max_threads <= 0 or max_threads > 5000:
                 max_threads = 5000
             self.logger.info(
                     "max_threads=%s (rlimit_nproc=%s, rlimit_nofile=%s)",
                     max_threads, rlimit_nproc, rlimit_nofile)
         except Exception as e:
             self.logger.warn(
                     "unable to calculate optimal number of threads based "
                     "on resource limits due to %s", e)
             max_threads = 100
             self.logger.info("max_threads=%s", max_threads)
     self.max_threads = max_threads
     self.pool = concurrent.futures.ThreadPoolExecutor(max_threads)
Example #4
0
    def testClushConfigSetRlimitValueError(self):
        """test CLI.Config.ClushConfig (setrlimit ValueError)"""
        soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
        f = tempfile.NamedTemporaryFile(prefix='testclushconfig')
        f.write(dedent("""
            [Main]
            fanout: 42
            connect_timeout: 14
            command_timeout: 0
            history_size: 100
            color: auto
            # Use wrong fd_max value to generate ValueError
            fd_max: -1
            verbosity: 1"""))
        f.flush()
        parser = OptionParser("dummy")
        parser.install_config_options()
        parser.install_display_options(verbose_options=True)
        parser.install_connector_options()
        options, _ = parser.parse_args([])
        config = ClushConfig(options, filename=f.name)
        f.close()
        display = Display(options, config)

        class TestException(Exception): pass

        def mock_vprint_err(level, message):
            if message.startswith('Warning: Failed to set max open files'):
                raise TestException()

        display.vprint_err = mock_vprint_err
        self.assertRaises(TestException, set_fdlimit, config.fd_max, display)

        soft2, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
        self.assertEqual(soft, soft2)
Example #5
0
File: master.py Project: abh/salt
    def __set_max_open_files(self):
        # Let's check to see how our max open files(ulimit -n) setting is
        mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
        log.info(
            'Current values for max open files soft/hard setting: '
            '{0}/{1}'.format(
                mof_s, mof_h
            )
        )
        # Let's grab, from the configuration file, the value to raise max open
        # files to
        mof_c = self.opts['max_open_files']
        if mof_c > mof_h:
            # The configured value is higher than what's allowed
            log.warning(
                'The value for the \'max_open_files\' setting, {0}, is higher '
                'than what the user running salt is allowed to raise to, {1}. '
                'Defaulting to {1}.'.format(mof_c, mof_h)
            )
            mof_c = mof_h

        if mof_s < mof_c:
            # There's room to raise the value. Raise it!
            log.warning('Raising max open files value to {0}'.format(mof_c))
            resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
            mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
            log.warning(
                'New values for max open files soft/hard values: '
                '{0}/{1}'.format(mof_s, mof_h)
            )
Example #6
0
    def __init__(
        self,
        application,
        environ=None,
        bindAddress=None,
        umask=None,
        multiplexed=False,
        debug=False,
        roles=(FCGI_RESPONDER,),
        forceCGI=False,
        timeout=None,
        **kw
    ):
        """
        environ, if present, must be a dictionary-like object. Its
        contents will be copied into application's environ. Useful
        for passing application-specific variables.

        bindAddress, if present, must either be a string or a 2-tuple. If
        present, run() will open its own listening socket. You would use
        this if you wanted to run your application as an 'external' FastCGI
        app. (i.e. the webserver would no longer be responsible for starting
        your app) If a string, it will be interpreted as a filename and a UNIX
        socket will be opened. If a tuple, the first element, a string,
        is the interface name/IP to bind to, and the second element (an int)
        is the port number.
        """
        BaseFCGIServer.__init__(
            self,
            application,
            environ=environ,
            multithreaded=False,
            multiprocess=True,
            bindAddress=bindAddress,
            umask=umask,
            multiplexed=multiplexed,
            debug=debug,
            roles=roles,
            forceCGI=forceCGI,
        )
        for key in ("multithreaded", "multiprocess", "jobClass", "jobArgs"):
            if kw.has_key(key):
                del kw[key]
        PreforkServer.__init__(self, jobClass=self._connectionClass, jobArgs=(self, timeout), **kw)

        try:
            import resource

            # Attempt to glean the maximum number of connections
            # from the OS.
            try:
                maxProcs = resource.getrlimit(resource.RLIMIT_NPROC)[0]
                maxConns = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
                maxConns = min(maxConns, maxProcs)
            except AttributeError:
                maxConns = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
        except ImportError:
            maxConns = 100  # Just some made up number.
        maxReqs = maxConns
        self.capability = {FCGI_MAX_CONNS: maxConns, FCGI_MAX_REQS: maxReqs, FCGI_MPXS_CONNS: 0}
Example #7
0
File: server.py Project: befks/odoo
    def process_limit(self):
        # If our parent changed sucide
        if self.ppid != os.getppid():
            _logger.info("Worker (%s) Parent changed", self.pid)
            self.alive = False
        # check for lifetime
        if self.request_count >= self.request_max:
            _logger.info("Worker (%d) max request (%s) reached.", self.pid, self.request_count)
            self.alive = False
        # Reset the worker if it consumes too much memory (e.g. caused by a memory leak).
        rss, vms = memory_info(psutil.Process(os.getpid()))
        if vms > config['limit_memory_soft']:
            _logger.info('Worker (%d) virtual memory limit (%s) reached.', self.pid, vms)
            self.alive = False      # Commit suicide after the request.

        # VMS and RLIMIT_AS are the same thing: virtual memory, a.k.a. address space
        soft, hard = resource.getrlimit(resource.RLIMIT_AS)
        resource.setrlimit(resource.RLIMIT_AS, (config['limit_memory_hard'], hard))

        # SIGXCPU (exceeded CPU time) signal handler will raise an exception.
        r = resource.getrusage(resource.RUSAGE_SELF)
        cpu_time = r.ru_utime + r.ru_stime
        def time_expired(n, stack):
            _logger.info('Worker (%d) CPU time limit (%s) reached.', self.pid, config['limit_time_cpu'])
            # We dont suicide in such case
            raise Exception('CPU time limit exceeded.')
        signal.signal(signal.SIGXCPU, time_expired)
        soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
        resource.setrlimit(resource.RLIMIT_CPU, (cpu_time + config['limit_time_cpu'], hard))
Example #8
0
def test_rlimits():
    for k, v in sorted(resource.__dict__.items()):
        if k.startswith('RLIMIT_'):
            val = resource.getrlimit(v)
            logging.info('%s: %s', k, val)

    limits = resource.getrlimit(resource.RLIMIT_NOFILE)
    maxfiles = min(limits[1], 8192)
    logging.info('Trying to open %d files..', maxfiles)
    if real_run():
        i = 0
        try:
            # list is needed to keep files open (prevent GC)
            handles = []
            for i in range(maxfiles):
                fd = open('/tmp/file-{}'.format(i), 'w')
                fd.write('1')
                handles.append(fd)
                if i > 0 and i % 1000 == 0:
                    logging.debug('Opened %d files', i)
        except IOError:
            logging.exception('Could open %s files', i)
        for i in range(maxfiles):
            try:
                os.unlink('/tmp/file-{}'.format(i))
            except:
                pass
def limit_resource():
   megs=1000000
   rsrc = resource.RLIMIT_AS
   soft, hard = resource.getrlimit(rsrc)
   print 'Soft limit starts as  :', soft
   resource.setrlimit(rsrc, (8*megs*1024, 8*megs*1024)) #limit to 8  Gigabytes
   soft, hard = resource.getrlimit(rsrc)
   print 'Soft limit changed to :', soft
def run(configs, optimal=True, final_config=None, final_config_builder=None,
        timeout=None):
    options, extra_args = parse_args()
    plan_file = options.plan_file

    # Time limits are either positive values in seconds or -1 (unlimited).
    soft_time_limit, hard_time_limit = resource.getrlimit(resource.RLIMIT_CPU)
    print 'External time limit:', hard_time_limit
    if (hard_time_limit >= 0 and timeout is not None and
        timeout != hard_time_limit):
        sys.stderr.write("The externally set timeout (%d) differs from the one "
                         "in the portfolio file (%d). Is this expected?\n" %
                         (hard_time_limit, timeout))
    # Prefer limits in the order: externally set, from portfolio file, default.
    if hard_time_limit >= 0:
        timeout = hard_time_limit
    elif timeout is None:
        sys.stderr.write("No timeout has been set for the portfolio so we take "
                         "the default of %ds.\n" % DEFAULT_TIMEOUT)
        timeout = DEFAULT_TIMEOUT
    print 'Internal time limit:', timeout

    # Memory limits are either positive values in Bytes or -1 (unlimited).
    soft_mem_limit, hard_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
    print 'External memory limit:', hard_mem_limit
    memory = hard_mem_limit - BYTES_FOR_PYTHON
    # Do not limit memory if the previous limit was very low or unlimited.
    if memory < 0:
        memory = None
    print 'Internal memory limit:', memory

    assert len(extra_args) == 3, extra_args
    sas_file = extra_args.pop(0)
    assert extra_args[0] in ["unit", "nonunit"], extra_args
    unitcost = extra_args.pop(0)
    assert extra_args[0][-1] in ["1", "2", "4"], extra_args
    planner = extra_args.pop(0)

    safe_unlink("plan_numbers_and_cost")

    remaining_time_at_start = float(timeout)
    try:
        for line in open("elapsed.time"):
            if line.strip():
                remaining_time_at_start -= float(line)
    except EnvironmentError:
        print "WARNING! elapsed_time not found -- assuming full time available."

    print "remaining time at start: %s" % remaining_time_at_start

    if optimal:
        exitcodes = run_opt(configs, planner, sas_file, plan_file,
                            remaining_time_at_start, memory)
    else:
        exitcodes = run_sat(configs, unitcost, planner, sas_file, plan_file,
                            final_config, final_config_builder,
                            remaining_time_at_start, memory)
    sys.exit(_generate_exitcode(exitcodes))
Example #11
0
    def set_resource_utilization_limits(self):
        if not self.memory_limit or self.memory_limit == 0:
            return

        # get the soft and hard limits for the heap limit - this is default
        soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_AS)
        resource.setrlimit(resource.RLIMIT_AS, (self.memory_limit, hard_limit))

        soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_DATA)
        resource.setrlimit(resource.RLIMIT_DATA, (self.memory_limit, hard_limit))
Example #12
0
    def _run(self):
        try:
            lim = resource.getrlimit(resource.RLIMIT_NOFILE)
            resource.setrlimit(resource.RLIMIT_NOFILE, (lim[1], lim[1]))

            lim = resource.getrlimit(resource.RLIMIT_NOFILE)
            print " maximum number of open files set to", lim[0]

        except Exception, e:
            print " failed to raise the maximum number of open files:", str(e)
Example #13
0
	def prepare_subprocess():
		# create a new session for the spawned subprocess using os.setsid,
		# so we can later kill it and all children on timeout, taken from http://stackoverflow.com/questions/4789837/how-to-terminate-a-python-subprocess-launched-with-shell-true
		os.setsid()
		# Limit the size of files created during execution
		resource.setrlimit(resource.RLIMIT_NOFILE,(128,128))
		if fileseeklimit is not None:
			resource.setrlimit(resource.RLIMIT_FSIZE,(fileseeklimitbytes, fileseeklimitbytes))
			if resource.getrlimit(resource.RLIMIT_FSIZE) != (fileseeklimitbytes, fileseeklimitbytes):
				raise ValueError(resource.getrlimit(resource.RLIMIT_FSIZE))
Example #14
0
 def _prevent_core_dump(cls):
     """Prevent the process from generating a core dump."""
     try:
         # Try to get the current limit
         resource.getrlimit(resource.RLIMIT_CORE)
     except ValueError:
         # System doesn't support the RLIMIT_CORE resource limit
         return
     else:
         # Set the soft and hard limits for core dump size to zero
         resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
Example #15
0
def run(portfolio, executable, sas_file, plan_manager):
    attributes = get_portfolio_attributes(portfolio)
    configs = attributes["CONFIGS"]
    optimal = attributes["OPTIMAL"]
    final_config = attributes.get("FINAL_CONFIG")
    final_config_builder = attributes.get("FINAL_CONFIG_BUILDER")
    timeout = attributes.get("TIMEOUT")

    # Time limits are either positive values in seconds or -1 (unlimited).
    soft_time_limit, hard_time_limit = resource.getrlimit(resource.RLIMIT_CPU)
    print("External time limits: %d, %d" % (soft_time_limit, hard_time_limit))
    external_time_limit = None
    if soft_time_limit != resource.RLIM_INFINITY:
        external_time_limit = soft_time_limit
    elif hard_time_limit != resource.RLIM_INFINITY:
        external_time_limit = hard_time_limit
    if (external_time_limit is not None and
            timeout is not None and
            timeout != external_time_limit):
        print("The externally set timeout (%d) differs from the one "
              "in the portfolio file (%d). Is this expected?" %
              (external_time_limit, timeout), file=sys.stderr)
    # Prefer limits in the order: external soft limit, external hard limit,
    # from portfolio file, default.
    if external_time_limit is not None:
        timeout = external_time_limit
    elif timeout is None:
        print("No timeout has been set for the portfolio so we take "
              "the default of %ds." % DEFAULT_TIMEOUT, file=sys.stderr)
        timeout = DEFAULT_TIMEOUT
    print("Internal time limit: %d" % timeout)

    # Memory limits are either positive values in Bytes or -1 (unlimited).
    soft_mem_limit, hard_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
    print("External memory limits: %d, %d" % (soft_mem_limit, hard_mem_limit))
    if hard_mem_limit == resource.RLIM_INFINITY:
        memory = None
    else:
        memory = hard_mem_limit
    print("Internal memory limit: %s" % memory)

    remaining_time_at_start = float(timeout) - get_elapsed_time()
    print("remaining time at start: %.2f" % remaining_time_at_start)

    if optimal:
        exitcodes = run_opt(configs, executable, sas_file, plan_manager,
                            remaining_time_at_start, memory)
    else:
        exitcodes = run_sat(configs, executable, sas_file, plan_manager,
                            final_config, final_config_builder,
                            remaining_time_at_start, memory)
    exitcode = generate_exitcode(exitcodes)
    if exitcode != 0:
        raise subprocess.CalledProcessError(exitcode, ["run-portfolio", portfolio])
    def setUp(self):
        limits = resource.getrlimit(resource.RLIMIT_NOFILE)
        print 'NOFILE rlimit:', limits
        resource.setrlimit(resource.RLIMIT_NOFILE, (10, limits[1]))
        print 'NOFILE rlimit:', resource.getrlimit(resource.RLIMIT_NOFILE)

        self.mypath = os.path.dirname(__file__)
        self.srcCat = afwTable.SourceCatalog.readFits(
            os.path.join(self.mypath, "v695833-e0-c000.xy.fits"))
        # The .xy.fits file has sources in the range ~ [0,2000],[0,4500]
        self.bbox = afwGeom.Box2I(afwGeom.Point2I(0, 0), afwGeom.Extent2I(2048, 4612)) # approximate
Example #17
0
def setProcessLimits():
    # Read the config file
    config = ConfigParser.ConfigParser()
    config.read('config.cfg')
    sandboxingConfig = ConfigSectionMap(config, "SandboxingParams")

    soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
    resource.setrlimit(resource.RLIMIT_NOFILE, (int(sandboxingConfig['nfile']), hard))
    soft, hard = resource.getrlimit(resource.RLIMIT_NPROC)
    resource.setrlimit(resource.RLIMIT_NPROC, (int(sandboxingConfig['nproc']), hard))
    soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
    resource.setrlimit(resource.RLIMIT_STACK, (int(sandboxingConfig['stacksize']), hard))
Example #18
0
def main():
    global __file__
    __file__ = os.path.abspath(__file__)
    print __file__
    if os.path.islink(__file__):
        print True
        __file__ = getattr(os, 'readlink', lambda x: x)(__file__)
    print os.getpid()
    print os.getppid()
    print resource.getrlimit(resource.RLIMIT_NOFILE)
    resource.setrlimit(resource.RLIMIT_NOFILE, (8192, -1))
    print resource.getrlimit(resource.RLIMIT_NOFILE)
Example #19
0
def _setup_resources():
    """Attempt to increase resource limits up to hard limits.

    This allows us to avoid out of file handle limits where we can
    move beyond the soft limit up to the hard limit.
    """
    target_procs = 10240
    cur_proc, max_proc = resource.getrlimit(resource.RLIMIT_NPROC)
    target_proc = min(max_proc, target_procs) if max_proc > 0 else target_procs
    resource.setrlimit(resource.RLIMIT_NPROC, (max(cur_proc, target_proc), max_proc))
    cur_hdls, max_hdls = resource.getrlimit(resource.RLIMIT_NOFILE)
    target_hdls = min(max_hdls, target_procs) if max_hdls > 0 else target_procs
    resource.setrlimit(resource.RLIMIT_NOFILE, (max(cur_hdls, target_hdls), max_hdls))
Example #20
0
def set_nixMem(args) : 
    if args.nixMem is not None  :
        import resource # this only exists on Unix/Linux based systems
        rsrc = resource.RLIMIT_AS
        soft, hard = resource.getrlimit(rsrc)
        print('Soft limit starts as  :', soft)
        print('Hard limit starts as  :', hard)
        
        resource.setrlimit(rsrc, (args.nixMem * 1048576, hard)) #limit
        
        soft, hard = resource.getrlimit(rsrc)
        print('Soft limit changed to :', soft)
        print('Hard limit changed to  :', hard)
def Main(Bytes = None):
    #Kilobytes = Bytes/1000.0

    print 'Changing Memory Limit To : ' + str(Bytes) + " Bytes"

    MemoryLimitBytes = resource.RLIMIT_AS
    soft, hard = resource.getrlimit(MemoryLimitBytes)
    print 'Soft limit starts as     :', soft

    resource.setrlimit(MemoryLimitBytes, (Bytes, hard)) #limit to one kilobyte

    soft, hard = resource.getrlimit(MemoryLimitBytes)
    print 'Soft limit changed to    :', soft
Example #22
0
File: lib.py Project: virajs/edgedb
def prevent_core_dump():
    """Prevent this process from generating a core dump."""
    core_resource = resource.RLIMIT_CORE

    try:
        resource.getrlimit(core_resource)
    except ValueError as ex:
        raise DaemonError(
            'Unable to limit core dump size: '
            'system does not support RLIMIT_CORE resource limit') from ex

    # Set hard & soft limits to 0, i.e. no core dump at all
    resource.setrlimit(core_resource, (0, 0))
Example #23
0
def limit_info(line):
    rsrc = resource.RLIMIT_AS
    soft, hard = resource.getrlimit(rsrc)
    print 'Soft limit starts as:', soft
    print 'Hard limit starts as:', hard
    if line:
        limit_mb = int(line)
        limit_kb = int(line)*1024
        print 'Setting limit to %s Mb' % limit_mb
        resource.setrlimit(rsrc, (limit_kb, hard)) #limit to one kilobyte
        soft, hard = resource.getrlimit(rsrc)
        print 'Soft limit is now:', soft
        print 'Hard limit is now:', hard
Example #24
0
def allowCoreDumps():
    """ Allow core dump generating by defining infinite core dump size.
        
        :Return:
            previous settings used by operating system
    """
    settings = CoreDumpSettings()
    settings.oldRlimitCore = getrlimit(RLIMIT_CORE)
    settings.oldRlimitFsize = getrlimit(RLIMIT_FSIZE)
    #infinite limits
    setrlimit(RLIMIT_CORE, (-1, -1))
    setrlimit(RLIMIT_FSIZE, (-1, -1))
    
    return settings
Example #25
0
    def execute(self, fname, needToLimitTime=False):
        if os.path.isfile(options.solver) is not True:
            print "Error: Cannot find STP executable. Searched in: '%s'" % \
                options.solver
            print "Error code 300"
            exit(300)

        #construct command
        command = ""
        if not options.novalgrind and random.randint(0, 10) == 0:
            command += "valgrind -q --leak-check=full  --error-exitcode=173 "

        command += options.solver
        command += self.random_options()
        #if options.verbose == False:
        #    command += "--verb 0 "
        command += "-p " #yes, print counterexample

        command += options.extra_options + " "
        command += fname
        print "Executing: %s " % command

        #print time limit
        if options.verbose:
            print "CPU limit of parent (pid %d)" % os.getpid(), resource.getrlimit(resource.RLIMIT_CPU)

        #if need time limit, then limit
        if needToLimitTime:
            p = subprocess.Popen(command.rsplit(), stdout=subprocess.PIPE, preexec_fn=setlimits)
        else:
            p = subprocess.Popen(command.rsplit(), stdout=subprocess.PIPE)

        #print time limit after child startup
        if options.verbose:
            print "CPU limit of parent (pid %d) after startup of child" % \
                os.getpid(), resource.getrlimit(resource.RLIMIT_CPU)

        #Get solver output
        consoleOutput, err = p.communicate()
        if options.verbose:
            print "CPU limit of parent (pid %d) after child finished executing" % \
                os.getpid(), resource.getrlimit(resource.RLIMIT_CPU)

        if p.returncode == 173:
            print "Valgrind is indicating an error!"
            print err
            print consoleOutput
            exit(-1)

        return consoleOutput
Example #26
0
def set_max_fds(maxfds=8180):
    nfd,tot = resource.getrlimit(resource.RLIMIT_NOFILE)

    # if we want fewer than the maximum num of FDs, then just return
    if maxfds < nfd:
        return maxfds

    try:
        resource.setrlimit(resource.RLIMIT_NOFILE, (maxfds, tot))
    except:
        pass
    nfd,tot = resource.getrlimit(resource.RLIMIT_NOFILE)

    return nfd
Example #27
0
def bump_nofile_limit():
    from twisted.python import log
    log.msg("Open files limit: %d" % resource.getrlimit(resource.RLIMIT_NOFILE)[0])
    soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
    values_to_try = [v for v in [hard, 100000, 10000] if v > soft]
    for new_soft in values_to_try:
        try:
            resource.setrlimit(resource.RLIMIT_NOFILE, (new_soft, hard))
        except ValueError:
            continue
        else:
            log.msg("Open files limit increased from %d to %d" % (soft, new_soft))
            break
    else:
        log.msg("Can't bump open files limit")
Example #28
0
def _set_a_process_limit(resource_name, desired_limit, description):
    which_resource = getattr(resource, resource_name)
    (soft, hard) = resource.getrlimit(which_resource)
    def set_resource(attempted_value):
        log_verbose("Trying setrlimit(resource.%s, (%d, %d))" %
                    (resource_name, attempted_value, hard))
        resource.setrlimit(which_resource, (attempted_value, hard))

    log_info("Setting OS limit on %s for process (desire up to %d)..."
             "\n\t Current limit values: soft = %d, hard = %d" %
             (description, desired_limit, soft, hard))

    _negotiate_process_limit(set_resource, desired_limit, soft, hard)
    log_info("Resulting OS limit on %s for process: " % description +
             "soft = %d, hard = %d" % resource.getrlimit(which_resource))
Example #29
0
def pre_request(worker, req):
    import os
    import psutil
    import resource
    import signal
    # VMS and RLIMIT_AS are the same thing: virtual memory, a.k.a. address space
    rss, vms = psutil.Process(os.getpid()).get_memory_info()
    soft, hard = resource.getrlimit(resource.RLIMIT_AS)
    resource.setrlimit(resource.RLIMIT_AS, (config['virtual_memory_limit'], hard))

    r = resource.getrusage(resource.RUSAGE_SELF)
    cpu_time = r.ru_utime + r.ru_stime
    signal.signal(signal.SIGXCPU, time_expired)
    soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
    resource.setrlimit(resource.RLIMIT_CPU, (cpu_time + config['cpu_time_limit'], hard))
Example #30
0
def limit_resources():
    _, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
    # XXX warn if too few compared to max_wokers?
    resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))

    _, hard = resource.getrlimit(resource.RLIMIT_AS)
    rlimit_as = int(config.read('System', 'RLIMIT_AS_gigabytes'))
    if rlimit_as == 0:
        return
    LOGGER.info('Setting RLIMIT_AS per configuration to %d gigabytes', rlimit_as)
    rlimit_as *= 1024 * 1024 * 1024
    if hard > 0 and rlimit_as > hard:
        LOGGER.error('RLIMIT_AS limited to %d bytes by system limit', hard)
        rlimit_as = hard
    resource.setrlimit(resource.RLIMIT_AS, (rlimit_as, hard))
Example #31
0
def set_max_memory(maxsize):
    soft, hard = resource.getrlimit(resource.RLIMIT_AS)
    resource.setrlimit(resource.RLIMIT_AS, (maxsize, hard))
Example #32
0
 def get_file_limits():
     return resource.getrlimit(resource.RLIMIT_NOFILE)
Example #33
0
def main():
    shell.check_python()

    config = shell.get_config(False)

    shell.log_shadowsocks_version()

    daemon.daemon_exec(config)

    try:
        import resource
        logging.info(
            'current process RLIMIT_NOFILE resource: soft %d hard %d' %
            resource.getrlimit(resource.RLIMIT_NOFILE))
    except ImportError:
        pass

    if config['port_password']:
        pass
    else:
        config['port_password'] = {}
        server_port = config['server_port']
        if type(server_port) == list:
            for a_server_port in server_port:
                config['port_password'][a_server_port] = config['password']
        else:
            config['port_password'][str(server_port)] = config['password']

    if not config.get('dns_ipv6', False):
        asyncdns.IPV6_CONNECTION_SUPPORT = False

    if config.get('manager_address', 0):
        logging.info('entering manager mode')
        manager.run(config)
        return

    tcp_servers = []
    udp_servers = []
    dns_resolver = asyncdns.DNSResolver()
    if int(config['workers']) > 1:
        stat_counter_dict = None
    else:
        stat_counter_dict = {}
    port_password = config['port_password']
    config_password = config.get('password', 'm')
    del config['port_password']
    for port, password_obfs in port_password.items():
        method = config["method"]
        protocol = config.get("protocol", 'origin')
        protocol_param = config.get("protocol_param", '')
        obfs = config.get("obfs", 'plain')
        obfs_param = config.get("obfs_param", '')
        bind = config.get("out_bind", '')
        bindv6 = config.get("out_bindv6", '')
        if type(password_obfs) == list:
            password = password_obfs[0]
            obfs = common.to_str(password_obfs[1])
            if len(password_obfs) > 2:
                protocol = common.to_str(password_obfs[2])
        elif type(password_obfs) == dict:
            password = password_obfs.get('password', config_password)
            method = common.to_str(password_obfs.get('method', method))
            protocol = common.to_str(password_obfs.get('protocol', protocol))
            protocol_param = common.to_str(
                password_obfs.get('protocol_param', protocol_param))
            obfs = common.to_str(password_obfs.get('obfs', obfs))
            obfs_param = common.to_str(
                password_obfs.get('obfs_param', obfs_param))
            bind = password_obfs.get('out_bind', bind)
            bindv6 = password_obfs.get('out_bindv6', bindv6)
        else:
            password = password_obfs
        a_config = config.copy()
        ipv6_ok = False
        logging.info(
            "server start with protocol[%s] password [%s] method [%s] obfs [%s] obfs_param [%s]"
            % (protocol, password, method, obfs, obfs_param))
        if 'server_ipv6' in a_config:
            try:
                if len(a_config['server_ipv6']
                       ) > 2 and a_config['server_ipv6'][
                           0] == "[" and a_config['server_ipv6'][-1] == "]":
                    a_config['server_ipv6'] = a_config['server_ipv6'][1:-1]
                a_config['server_port'] = int(port)
                a_config['password'] = password
                a_config['method'] = method
                a_config['protocol'] = protocol
                a_config['protocol_param'] = protocol_param
                a_config['obfs'] = obfs
                a_config['obfs_param'] = obfs_param
                a_config['out_bind'] = bind
                a_config['out_bindv6'] = bindv6
                a_config['server'] = a_config['server_ipv6']
                logging.info("starting server at [%s]:%d" %
                             (a_config['server'], int(port)))
                tcp_servers.append(
                    tcprelay.TCPRelay(a_config,
                                      dns_resolver,
                                      False,
                                      stat_counter=stat_counter_dict))
                udp_servers.append(
                    udprelay.UDPRelay(a_config,
                                      dns_resolver,
                                      False,
                                      stat_counter=stat_counter_dict))
                if a_config['server_ipv6'] == b"::":
                    ipv6_ok = True
            except Exception as e:
                shell.print_exception(e)

        try:
            a_config = config.copy()
            a_config['server_port'] = int(port)
            a_config['password'] = password
            a_config['method'] = method
            a_config['protocol'] = protocol
            a_config['protocol_param'] = protocol_param
            a_config['obfs'] = obfs
            a_config['obfs_param'] = obfs_param
            a_config['out_bind'] = bind
            a_config['out_bindv6'] = bindv6
            logging.info("starting server at %s:%d" %
                         (a_config['server'], int(port)))
            tcp_servers.append(
                tcprelay.TCPRelay(a_config,
                                  dns_resolver,
                                  False,
                                  stat_counter=stat_counter_dict))
            udp_servers.append(
                udprelay.UDPRelay(a_config,
                                  dns_resolver,
                                  False,
                                  stat_counter=stat_counter_dict))
        except Exception as e:
            if not ipv6_ok:
                shell.print_exception(e)

    def run_server():
        def child_handler(signum, _):
            logging.warn('received SIGQUIT, doing graceful shutting down..')
            list(
                map(lambda s: s.close(next_tick=True),
                    tcp_servers + udp_servers))

        signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM),
                      child_handler)

        def int_handler(signum, _):
            sys.exit(1)

        signal.signal(signal.SIGINT, int_handler)

        try:
            loop = eventloop.EventLoop()
            dns_resolver.add_to_loop(loop)
            list(map(lambda s: s.add_to_loop(loop), tcp_servers + udp_servers))

            daemon.set_user(config.get('user', None))
            loop.run()
        except Exception as e:
            shell.print_exception(e)
            sys.exit(1)

    if int(config['workers']) > 1:
        if os.name == 'posix':
            children = []
            is_child = False
            for i in range(0, int(config['workers'])):
                r = os.fork()
                if r == 0:
                    logging.info('worker started')
                    is_child = True
                    run_server()
                    break
                else:
                    children.append(r)
            if not is_child:

                def handler(signum, _):
                    for pid in children:
                        try:
                            os.kill(pid, signum)
                            os.waitpid(pid, 0)
                        except OSError:  # child may already exited
                            pass
                    sys.exit()

                signal.signal(signal.SIGTERM, handler)
                signal.signal(signal.SIGQUIT, handler)
                signal.signal(signal.SIGINT, handler)

                # master
                for a_tcp_server in tcp_servers:
                    a_tcp_server.close()
                for a_udp_server in udp_servers:
                    a_udp_server.close()
                dns_resolver.close()

                for child in children:
                    os.waitpid(child, 0)
        else:
            logging.warn('worker is only available on Unix/Linux')
            run_server()
    else:
        run_server()
Example #34
0
from functools import partial

from mmcv.runner import get_dist_info
from mmcv.parallel import collate
from torch.utils.data import DataLoader

from .sampler import GroupSampler, DistributedGroupSampler, DistributedSampler

# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))


def build_dataloader(dataset,
                     imgs_per_gpu,
                     workers_per_gpu,
                     num_gpus=1,
                     dist=True,
                     **kwargs):
    shuffle = kwargs.get('shuffle', True)
    if dist:
        rank, world_size = get_dist_info()
        if shuffle:
            sampler = DistributedGroupSampler(dataset, imgs_per_gpu,
                                              world_size, rank)
        else:
            sampler = DistributedSampler(
                dataset, world_size, rank, shuffle=False)
        batch_size = imgs_per_gpu
        num_workers = workers_per_gpu
Example #35
0
"""Early initialization for ansible-test before most other imports have been performed."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type

import resource

from .constants import (
    SOFT_RLIMIT_NOFILE,
)

CURRENT_RLIMIT_NOFILE = resource.getrlimit(resource.RLIMIT_NOFILE)
DESIRED_RLIMIT_NOFILE = (SOFT_RLIMIT_NOFILE, CURRENT_RLIMIT_NOFILE[1])

if DESIRED_RLIMIT_NOFILE < CURRENT_RLIMIT_NOFILE:
    resource.setrlimit(resource.RLIMIT_NOFILE, DESIRED_RLIMIT_NOFILE)
    CURRENT_RLIMIT_NOFILE = DESIRED_RLIMIT_NOFILE
Example #36
0
def main():
    """Do the real work"""
    # Parse remaining variables
    network_map = options["input"]
    # network_mapset = network_map.split('@')[0]
    network = network_map.split("@")[1] if len(network_map.split("@")) > 1 else None
    suffix = options["suffix"]
    layer = options["layer"]
    corridor_tolerance = options["corridor_tolerance"]
    cores = options["cores"]
    where = None if options["where"] == "" else options["where"]
    weights = options["weights"].split(",")
    s_flag = flags["s"]
    d_flag = flags["d"]
    r_flag = flags["r"]

    ulimit = resource.getrlimit(resource.RLIMIT_NOFILE)

    net_hist_str = (
        grass.read_command("v.info", map=network_map, flags="h")
        .split("\n")[0]
        .split(": ")[1]
    )

    dist_cmd_dict = task.cmdstring_to_tuple(net_hist_str)

    dist_prefix = dist_cmd_dict[1]["prefix"]
    # network_prefix = dist_cmd_dict[1]['prefix']

    # print(where)

    # in_vertices = dist_cmd_dict[1]['input']

    # Check if db-connection for edge map exists
    con = vect.vector_db(network_map)[int(layer)]
    if not con:
        grass.fatal(
            "Database connection for map {} \
                    is not defined for layer {}.".format(
                network, layer
            )
        )

    # Check if required columns exist and are of required type
    required_columns = ["con_id_u", "from_p", "to_p", "cd_u"]
    if weights:
        required_columns += weights

    in_columns = vect.vector_columns(network_map, layer=layer)

    missing_columns = np.setdiff1d(required_columns, in_columns.keys())

    if missing_columns:
        grass.fatal(
            "Cannot find the following reqired/requested \
                    column(s) {} in vector map \
                    {}.".format(
                ", ".join(missing_columns), network
            )
        )

    #
    weight_types = []
    # Check properly if column is numeric
    for col in required_columns:
        if in_columns[col]["type"] not in ["INTEGER", "DOUBLE PRECISION", "REAL"]:
            grass.fatal(
                "Column {} is of type {}. \
                         Only numeric types (integer, \
                         real or double precision) \
                         allowed!".format(
                    col, in_columns[col]["type"]
                )
            )

        if col in weights:
            weight_types.append(in_columns[col]["type"])

    # Extract necessary informartion on edges from attribute table of
    # edge map
    table_io = StringIO(
        unicode(
            grass.read_command(
                "v.db.select",
                flags="c",
                map=network_map,
                columns=required_columns,
                separator=",",
                where=where,
            )
        )
    )

    try:
        table_extract = np.genfromtxt(
            table_io, delimiter=",", dtype=None, names=required_columns
        )
    except:
        grass.fatal("No edges selected to compute corridors for...")

    # Output result of where-clause and exit (if requested)
    if s_flag:
        print(table_extract)
        # grass.message("con_id_u|from_p|to_p")
        # for fid in $selected_edges_ud:
        #    message_text = $(echo $table_extract | tr ' ' '\n' |
        # tr ',' ' ' | awk -v FID=$fid '{if($1==FID) print $1 "|" $2 "|"
        #  $3}' | head -n 1)
        #    grass.message(message_text)
        sys.exit(0)

    # Get unique identifiers for the selected undirected edges
    selected_patches = np.unique(
        np.append(table_extract["from_p"], table_extract["to_p"])
    )

    selected_edges = np.unique(table_extract["con_id_u"])

    # activate z-flag if more maps have to be aggregated than ulimit
    z_flag = None if len(selected_edges) < ulimit else "z"

    # Check if cost distance raster maps exist
    pattern = "{}_patch_*_cost_dist".format(dist_prefix)
    patchmaps = (
        grass.read_command("g.list", pattern=pattern, type="raster")
        .rstrip("\n")
        .split("\n")
    )

    for patch in selected_patches:
        # Check if cost distance raster maps exist
        patchmap = "{}_patch_{}_cost_dist".format(dist_prefix, patch)
        if patchmap not in patchmaps:
            grass.fatal("Cannot find raster map {}.".format(patchmap))

    # Create mapcalculator expressions for cost distance corridors,
    # assigning distance values
    corridormaps = {}
    if d_flag:
        pattern = "{}_corridor_*_cost_dist".format(dist_prefix)
        corridor_base = "dist"
    else:
        pattern = "{}_corridor_[0-9]+$".format(dist_prefix)
        corridor_base = "id"

    corridormaps[corridor_base] = (
        grass.read_command("g.list", flags="e", pattern=pattern, type="raster")
        .rstrip("\n")
        .split("\n")
    )
    for weight in weights:
        pattern = "{}_corridor_[0-9]+_{}".format(dist_prefix, weight)
        corridormaps[weight] = (
            grass.read_command("g.list", flags="e", pattern=pattern, type="raster")
            .rstrip("\n")
            .split("\n")
        )

    # Setup GRASS modules for raster processing
    mapcalc = Module("r.mapcalc", quiet=True, run_=False)
    reclass = Module("r.reclass", rules="-", quiet=True, run_=False)
    recode = Module("r.recode", rules="-", quiet=True, run_=False)

    # Setup paralel module queue if parallel processing is requested
    # print(weight_types)
    if cores > 1:
        mapcalc_queue = ParallelModuleQueue(nprocs=cores)

        if "INTEGER" in weight_types:
            reclass_queue = ParallelModuleQueue(nprocs=cores)

        if "REAL" in weight_types or "DOUBLE PRECISION" in weight_types:
            recode_queue = ParallelModuleQueue(nprocs=cores)

    corridor_list = []
    for edge_id in selected_edges:
        edge = table_extract[table_extract["con_id_u"] == edge_id][0]
        # print(e.dtype.names)
        if d_flag:
            corridor = "{}_corridor_{}_cost_dist".format(dist_prefix, edge_id)
            # corridor_list.append(corridor)
            mc_expression = "{prefix}_corridor_{CON_ID}_cost_dist=if( \
            ({prefix}_patch_{FROM_P}_cost_dist+ \
            {prefix}_patch_{TO_P}_cost_dist) - \
            (({prefix}_patch_{FROM_P}_cost_dist+ \
            {prefix}_patch_{TO_P}_cost_dist) * \
            {cor_tolerance}/100.0)<= \
            ({prefix}_patch_{FROM_P}_cost_dist + \
            {prefix}_patch_{TO_P}_cost_dist), \
            ({prefix}_patch_{FROM_P}_cost_dist+ \
            {prefix}_patch_{TO_P}_cost_dist), \
            null())".format(
                prefix=dist_prefix,
                CON_ID=edge["con_id_u"],
                FROM_P=edge["from_p"],
                TO_P=edge["to_p"],
                cor_tolerance=corridor_tolerance,
            )
        else:
            corridor = "{}_corridor_{}".format(dist_prefix, edge["con_id_u"])
            # corridor_list.append(corridor)
            # Create mapcalculator expressions for cost distance
            # corridors, assigning connection IDs for reclassification
            mc_expression = "{prefix}_corridor_{CON_ID}=if( \
            ({prefix}_patch_{FROM_P}_cost_dist+ \
            {prefix}_patch_{TO_P}_cost_dist)- \
            (({prefix}_patch_{FROM_P}_cost_dist+ \
            {prefix}_patch_{TO_P}_cost_dist)* \
            {cor_tolerance}/100.0)<={CD}, \
            {CON_ID}, null())".format(
                prefix=dist_prefix,
                CON_ID=edge["con_id_u"],
                FROM_P=edge["from_p"],
                TO_P=edge["to_p"],
                CD=edge["cd_u"],
                cor_tolerance=corridor_tolerance,
            )

        corridor_list.append(corridor)
        # print(corridor)
        # print(corridormaps)

        if r_flag or corridor not in corridormaps[corridor_base]:
            new_mapcalc = copy.deepcopy(mapcalc)

            if cores > 1:
                calc = new_mapcalc(expression=mc_expression)
                mapcalc_queue.put(calc)
            else:
                calc = new_mapcalc(expression=mc_expression, region="intersect")
                calc.run()

        for weight in weights:
            if r_flag or corridor not in corridormaps[weight]:
                in_map = corridor
                out_map = "{}_{}".format(in_map, weight)
                if in_columns[weight]["type"] == "INTEGER":
                    new_reclass = copy.deepcopy(reclass)
                    reclass_rule = "{} = {}".format(edge["con_id_u"], edge[weight])
                    rcl = new_reclass(input=in_map, output=out_map, stdin_=reclass_rule)

                    if cores > 1:
                        reclass_queue.put(rcl)
                    else:
                        rcl.run()

                if in_columns[weight]["type"] in ["REAL", "DOUBLE PRECISION"]:
                    new_recode = copy.deepcopy(recode)
                    recode_rule = "{0}:{0}:{1}:{1}".format(
                        edge["con_id_u"], edge[weight]
                    )
                    rco = new_recode(input=in_map, output=out_map, stdin_=recode_rule)
                    if cores > 1:
                        recode_queue.put(rco)
                    else:
                        rco.run()

    if cores > 1:
        mapcalc_queue.wait()
        if "INTEGER" in weight_types:
            reclass_queue.wait()
        if "REAL" in weight_types or "DOUBLE PRECISION" in weight_types:
            recode_queue.wait()

    grass.verbose("Aggregating corridor maps...")

    if d_flag:
        grass.run_command(
            "r.series",
            flags=z_flag,
            quiet=True,
            input=",".join(corridor_list),
            output="{}_corridors_min_cost_dist_{}".format(dist_prefix, suffix),
            method="minimum",
        )
    else:
        # Summarize corridors
        if not weights:
            print(",".join(corridor_list))
            output_map = "{}_corridors_count_{}".format(dist_prefix, suffix)
            grass.run_command(
                "r.series",
                flags=z_flag,
                quiet=True,
                input=",".join(corridor_list),
                output=output_map,
                method="count",
            )
            write_raster_history(output_map)

        else:
            # Weight corridors according to user requested weights
            for weight in weights:
                # Generate corridor map list
                corridor_map_list = (cm + "_{}".format(weight) for cm in corridor_list)
                output_map = "{}_corridors_{}_sum_{}".format(
                    dist_prefix, weight, suffix
                )
                # Summarize corridors using r.series
                grass.run_command(
                    "r.series",
                    flags=z_flag,
                    quiet=True,
                    input=corridor_map_list,
                    output=output_map,
                    method="sum",
                )
                write_raster_history(output_map)
Example #37
0
                platform.cores_logical
            ])
        else:
            cases = filter_ordered(
                [platform.cores_physical, platform.cores_logical])
        ret.extend([((name, nthread), ) for nthread in cases])

        if basic not in ret:
            warning("skipping `%s`; perhaps you've set OMP_NUM_THREADS to a "
                    "non-standard value while attempting autotuning in "
                    "`max` mode?" % dict(basic))

    return ret


options = {
    'squeezer': 4,
    'blocksize-l0': (8, 16, 24, 32, 64, 96, 128),
    'blocksize-l1': (8, 16, 32),
    'stack_limit': resource.getrlimit(resource.RLIMIT_STACK)[0] / 4
}
"""Autotuning options."""


def log(msg):
    perf("AutoTuner: %s" % msg)


def warning(msg):
    _warning("AutoTuner: %s" % msg)
Example #38
0
def cl_main():
    # parse args
    arg_parser = argparse.ArgumentParser(
        description="SACAD (recursive tool) v%s.%s" %
        (sacad.__version__, __doc__),
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    arg_parser.add_argument(
        "lib_dir", help="Music library directory to recursively analyze")
    arg_parser.add_argument("size", type=int, help="Target image size")
    arg_parser.add_argument("cover_pattern",
                            help="""Cover image path pattern.
                                  {artist} and {album} are replaced by their tag value.
                                  You can set an absolute path, otherwise destination directory is relative to the audio files.
                                  Use single character '%s' to embed JPEG into audio files."""
                            % (EMBEDDED_ALBUM_ART_SYMBOL))
    arg_parser.add_argument(
        "-i",
        "--ignore-existing",
        action="store_true",
        default=False,
        help=
        "Ignore existing covers and force search and download for all files")
    arg_parser.add_argument(
        "-f",
        "--full-scan",
        action="store_true",
        default=False,
        help="""Enable scanning of all audio files in each directory.
                                  By default the scanner will assume all audio files in a single directory are part of
                                  the same album, and only read metadata for the first file.
                                  Enable this if your files are organized in a way than allows files for different
                                  albums to be in the same directory level.
                                  WARNING: This will make the initial scan much slower."""
    )
    sacad.setup_common_args(arg_parser)
    arg_parser.add_argument("-v",
                            "--verbose",
                            action="store_true",
                            default=False,
                            dest="verbose",
                            help="Enable verbose output")
    args = arg_parser.parse_args()
    if args.cover_pattern == EMBEDDED_ALBUM_ART_SYMBOL:
        args.format = "jpg"
    else:
        args.format = os.path.splitext(args.cover_pattern)[1][1:].lower()
    try:
        args.format = sacad.SUPPORTED_IMG_FORMATS[args.format]
    except KeyError:
        print(
            "Unable to guess image format from extension, or unknown format: %s"
            % (args.format))
        exit(1)

    # setup logger
    if not args.verbose:
        logging.getLogger("sacad_r").setLevel(logging.WARNING)
        logging.getLogger().setLevel(logging.ERROR)
        logging.getLogger("asyncio").setLevel(logging.CRITICAL + 1)
        fmt = "%(name)s: %(message)s"
    else:
        logging.getLogger("sacad_r").setLevel(logging.DEBUG)
        logging.getLogger().setLevel(logging.DEBUG)
        logging.getLogger("asyncio").setLevel(logging.WARNING)
        fmt = "%(asctime)s %(levelname)s [%(name)s] %(message)s"
    logging_formatter = colored_logging.ColoredFormatter(fmt=fmt)
    logging_handler = logging.StreamHandler()
    logging_handler.setFormatter(logging_formatter)
    logging.getLogger().addHandler(logging_handler)

    # bump nofile ulimit
    try:
        soft_lim, hard_lim = resource.getrlimit(resource.RLIMIT_NOFILE)
        if ((soft_lim != resource.RLIM_INFINITY) and
            ((soft_lim < hard_lim) or (hard_lim == resource.RLIM_INFINITY))):
            resource.setrlimit(resource.RLIMIT_NOFILE, (hard_lim, hard_lim))
            logging.getLogger().debug(
                "Max open files count set from %u to %u" %
                (soft_lim, hard_lim))
    except (AttributeError, OSError):
        # not supported on system
        pass

    # do the job
    work = analyze_lib(args.lib_dir,
                       args.cover_pattern,
                       ignore_existing=args.ignore_existing,
                       full_scan=args.full_scan,
                       all_formats=args.preserve_format)
    get_covers(work, args)
Example #39
0
    def start(self):
        """
        Start daemonization process.
        """
        # If pidfile already exists, we should read pid from there; to overwrite it, if locking
        # will fail, because locking attempt somehow purges the file contents.
        if os.path.isfile(self.pid):
            with open(self.pid, "r") as old_pidfile:
                old_pid = old_pidfile.read()
        # Create a lockfile so that only one instance of this daemon is running at any time.
        try:
            lockfile = open(self.pid, "w")
        except IOError:
            print("Unable to create the pidfile.")
            sys.exit(1)
        try:
            # Try to get an exclusive lock on the file. This will fail if another process has the file
            # locked.
            fcntl.flock(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
        except IOError:
            print("Unable to lock on the pidfile.")
            # We need to overwrite the pidfile if we got here.
            with open(self.pid, "w") as pidfile:
                pidfile.write(old_pid)
            sys.exit(1)

        # skip fork if foreground is specified
        if not self.foreground:
            # Fork, creating a new process for the child.
            process_id = os.fork()
            if process_id < 0:
                # Fork error. Exit badly.
                sys.exit(1)
            elif process_id != 0:
                # This is the parent process. Exit.
                sys.exit(0)
            # This is the child process. Continue.

            # Stop listening for signals that the parent process receives.
            # This is done by getting a new process id.
            # setpgrp() is an alternative to setsid().
            # setsid puts the process in a new parent group and detaches its controlling terminal.
            process_id = os.setsid()
            if process_id == -1:
                # Uh oh, there was a problem.
                sys.exit(1)

            # Add lockfile to self.keep_fds.
            self.keep_fds.append(lockfile.fileno())

            # Close all file descriptors, except the ones mentioned in self.keep_fds.
            devnull = "/dev/null"
            if hasattr(os, "devnull"):
                # Python has set os.devnull on this system, use it instead as it might be different
                # than /dev/null.
                devnull = os.devnull

            if self.auto_close_fds:
                for fd in range(3,
                                resource.getrlimit(resource.RLIMIT_NOFILE)[0]):
                    if fd not in self.keep_fds:
                        try:
                            os.close(fd)
                        except OSError:
                            pass

            devnull_fd = os.open(devnull, os.O_RDWR)
            os.dup2(devnull_fd, 0)
            os.dup2(devnull_fd, 1)
            os.dup2(devnull_fd, 2)

        if self.logger is None:
            # Initialize logging.
            self.logger = logging.getLogger(self.app)
            self.logger.setLevel(logging.DEBUG)
            # Display log messages only on defined handlers.
            self.logger.propagate = False

            # Initialize syslog.
            # It will correctly work on OS X, Linux and FreeBSD.
            if sys.platform == "darwin":
                syslog_address = "/var/run/syslog"
            else:
                syslog_address = "/dev/log"

            # We will continue with syslog initialization only if actually have such capabilities
            # on the machine we are running this.
            if os.path.exists(syslog_address):
                syslog = handlers.SysLogHandler(syslog_address)
                if self.verbose:
                    syslog.setLevel(logging.DEBUG)
                else:
                    syslog.setLevel(logging.INFO)
                # Try to mimic to normal syslog messages.
                formatter = logging.Formatter(
                    "%(asctime)s %(name)s: %(message)s", "%b %e %H:%M:%S")
                syslog.setFormatter(formatter)

                self.logger.addHandler(syslog)

        # Set umask to default to safe file permissions when running as a root daemon. 027 is an
        # octal number which we are typing as 0o27 for Python3 compatibility.
        os.umask(0o27)

        # Change to a known directory. If this isn't done, starting a daemon in a subdirectory that
        # needs to be deleted results in "directory busy" errors.
        os.chdir(self.chdir)

        # Execute privileged action
        privileged_action_result = self.privileged_action()
        if not privileged_action_result:
            privileged_action_result = []

        # Change owner of pid file, it's required because pid file will be removed at exit.
        uid, gid = -1, -1

        if self.group:
            try:
                gid = grp.getgrnam(self.group).gr_gid
            except KeyError:
                self.logger.error("Group {0} not found".format(self.group))
                sys.exit(1)

        if self.user:
            try:
                uid = pwd.getpwnam(self.user).pw_uid
            except KeyError:
                self.logger.error("User {0} not found.".format(self.user))
                sys.exit(1)

        if uid != -1 or gid != -1:
            os.chown(self.pid, uid, gid)

        # Change gid
        if self.group:
            try:
                os.setgid(gid)
            except OSError:
                self.logger.error("Unable to change gid.")
                sys.exit(1)

        # Change uid
        if self.user:
            try:
                uid = pwd.getpwnam(self.user).pw_uid
            except KeyError:
                self.logger.error("User {0} not found.".format(self.user))
                sys.exit(1)
            try:
                os.setuid(uid)
            except OSError:
                self.logger.error("Unable to change uid.")
                sys.exit(1)

        try:
            lockfile.write("%s" % (os.getpid()))
            lockfile.flush()
        except IOError:
            self.logger.error("Unable to write pid to the pidfile.")
            print("Unable to write pid to the pidfile.")
            sys.exit(1)

        # Set custom action on SIGTERM.
        signal.signal(signal.SIGTERM, self.sigterm)
        atexit.register(self.exit)

        self.logger.warn("Starting daemon.")

        self.action(*privileged_action_result)
Example #40
0
 def _rlimit_nofile():
     return resource.getrlimit(resource.RLIMIT_NOFILE)
Example #41
0
        print HOME_PAGE_EXAMPLE_ROW % {
            "link": urllib.quote(query),
            "query": cgi.escape(query).replace(" ", "&nbsp;"),
            "text": cgi.escape(text),
        }
    print HOME_PAGE_TABLE_END
    print HOME_PAGE_END
    sys.exit(0)

query = fs.getvalue("q", "").strip()
start = int(fs.getvalue("start", 0))
num = int(fs.getvalue("num", PER_PAGE))
max_computation = int(fs.getvalue("comp", MAX_COMPUTATION))

# Shell out to the find-exec binary to get results
soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
if soft == -1 or soft > 30: soft = 30
resource.setrlimit(resource.RLIMIT_CPU, (soft, hard))

soft, hard = resource.getrlimit(resource.RLIMIT_AS)
if hard == -1 or hard > 2048 * 1024 * 1024: hard = 2048 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (hard, hard))

proc = subprocess.Popen(
    [binary, index, query],
    preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL),
    stdout=subprocess.PIPE,
    stderr=subprocess.PIPE)

print RESULT_PAGE_BEGIN % {"query": cgi.escape(query, quote=True)}
Example #42
0
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import resource
import sys
from k2.version import __version__
# import sys
# sys.path.insert(0, os.path.abspath('.'))

# ZMT-5229: We are sometimes hitting the recursion limit when generating the documentation.
# The default recursion limit is 1000. Lets double it and hope for the best.
sys.setrecursionlimit(2000)

# Lets also double the stack size limit to account for the new recursion limit.
rlimit = resource.getrlimit(resource.RLIMIT_STACK)
resource.setrlimit(resource.RLIMIT_STACK, (rlimit[0] * 2, rlimit[1]))

# -- General configuration ------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
    'sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.ifconfig',
    'sphinx.ext.viewcode', 'sphinxcontrib.plantuml',
    'sphinxprettysearchresults'
Example #43
0
class TestHoleModule(object):
    try:
        # on Unix we can manipulate our limits: http://docs.python.org/2/library/resource.html
        import resource
        soft_max_open_files, hard_max_open_files = resource.getrlimit(
            resource.RLIMIT_NOFILE)
    except ImportError:
        pass

    @staticmethod
    @pytest.fixture()
    def universe():
        return mda.Universe(MULTIPDB_HOLE)

    @pytest.mark.skipif(
        rlimits_missing,
        reason="Test skipped because platform does not allow setting rlimits")
    def test_hole_module_fd_closure(self, universe, tmpdir):
        """test open file descriptors are closed (MDAnalysisTests.analysis.test_hole.TestHoleModule): Issue 129"""
        # If Issue 129 isn't resolved, this function will produce an OSError on
        # the system, and cause many other tests to fail as well.
        #
        # Successful test takes ~10 s, failure ~2 s.

        # Hasten failure by setting "ulimit -n 64" (can't go too low because of open modules etc...)
        import resource

        # ----- temporary hack -----
        # on Mac OS X (on Travis) we run out of open file descriptors
        # before even starting this test (see
        # https://github.com/MDAnalysis/mdanalysis/pull/901#issuecomment-231938093);
        # if this issue is solved by #363 then revert the following
        # hack:
        #
        import platform
        if platform.platform() == "Darwin":
            max_open_files = 512
        else:
            max_open_files = 64
        #
        # --------------------------

        resource.setrlimit(resource.RLIMIT_NOFILE,
                           (max_open_files, self.hard_max_open_files))

        with tmpdir.as_cwd():
            try:
                H = hole2.HoleAnalysis(universe, cvect=[0, 1, 0], sample=20.0)
            finally:
                self._restore_rlimits()

            # pretty unlikely that the code will get through 2 rounds if the MDA
            # issue 129 isn't fixed, although this depends on the file descriptor
            # open limit for the machine in question
            try:
                for i in range(2):
                    # will typically get an OSError for too many files being open after
                    # about 2 seconds if issue 129 isn't resolved
                    H.run()
            except OSError as err:
                if err.errno == errno.EMFILE:
                    raise pytest.fail(
                        "hole2.HoleAnalysis does not close file descriptors (Issue 129)"
                    )
                raise
            finally:
                # make sure to restore open file limit !!
                self._restore_rlimits()

    def _restore_rlimits(self):
        try:
            import resource
            resource.setrlimit(
                resource.RLIMIT_NOFILE,
                (self.soft_max_open_files, self.hard_max_open_files))
        except ImportError:
            pass
Example #44
0
def limit_memory(maxsize):
    '''
    限制内存使用,设置可使用的总内存值
    '''
    soft, hard = resource.getrlimit(resource.RLIMIT_AS)
    resource.setrlimit(resource.RLIMIT_AS, (maxsize, hard))
Example #45
0
def set_max_runtime(seconds: int) -> None:
    soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
    resource.setrlimit(resource.RLIMIT_CPU, (seconds, hard))
    signal.signal(signal.SIGXCPU, time_exceeded)
Example #46
0
    def __init__(
        self,
        bento_bundle_path: str = Provide[BentoMLContainer.bundle_path],
        outbound_host: str = Provide[BentoMLContainer.forward_host],
        outbound_port: int = Provide[BentoMLContainer.forward_port],
        outbound_workers: int = Provide[BentoMLContainer.api_server_workers],
        mb_max_batch_size: int = Provide[
            BentoMLContainer.config.bento_server.microbatch.max_batch_size
        ],
        mb_max_latency: int = Provide[
            BentoMLContainer.config.bento_server.microbatch.max_latency
        ],
        max_request_size: int = Provide[
            BentoMLContainer.config.bento_server.max_request_size
        ],
        outbound_unix_socket: str = None,
        enable_access_control: bool = Provide[
            BentoMLContainer.config.bento_server.cors.enabled
        ],
        access_control_allow_origin: Optional[str] = Provide[
            BentoMLContainer.config.bento_server.cors.access_control_allow_origin
        ],
        access_control_options: Optional["ResourceOptions"] = Provide[
            BentoMLContainer.access_control_options
        ],
        timeout: int = Provide[BentoMLContainer.config.bento_server.timeout],
        tracer=Provide[BentoMLContainer.tracer],
    ):

        self._conn: Optional["BaseConnector"] = None
        self._client: Optional["ClientSession"] = None
        self.outbound_unix_socket = outbound_unix_socket
        self.outbound_host = outbound_host
        self.outbound_port = outbound_port
        self.outbound_workers = outbound_workers
        self.mb_max_batch_size = mb_max_batch_size
        self.mb_max_latency = mb_max_latency
        self.batch_handlers = dict()
        self._outbound_sema = None  # the semaphore to limit outbound connections
        self._cleanup_tasks = None
        self.max_request_size = max_request_size
        self.tracer = tracer

        self.enable_access_control = enable_access_control
        self.access_control_allow_origin = access_control_allow_origin
        self.access_control_options = access_control_options

        self.bento_service_metadata_pb = load_bento_service_metadata(bento_bundle_path)

        self.setup_routes_from_pb(self.bento_service_metadata_pb)
        self.timeout = timeout

        if psutil.POSIX:
            import resource

            self.CONNECTION_LIMIT = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
        else:
            self.CONNECTION_LIMIT = 1024
        logger.info(
            "Your system nofile limit is %d, which means each instance of microbatch "
            "service is able to hold this number of connections at same time. "
            "You can increase the number of file descriptors for the server process, "
            "or launch more microbatch instances to accept more concurrent connection.",
            self.CONNECTION_LIMIT,
        )
def start_daemon(proc, pid_fd=None):
    """
    start_daemon(proc, pid_fd = None) -> exit code
    Start a daemon process. Caller must pass a function, proc(), with
    prototype looks below:
        def proc():
            return <integer>
    Please make sure the return code of proc() follows Win32 system
    error code standard.

    If pid_fd is not None, it should be a valid file object. The
    file object should point to a lock file, so we can write real daemon
    PID there.
    """
    import resource
    maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
    if maxfd == resource.RLIM_INFINITY:
        maxfd = MAXFD
    # Make sure stdin, stdout and stderr are closed.
    os.close(STDIN_FD)
    os.open(NULL_TO, os.O_RDWR)
    os.dup2(STDIN_FD, STDOUT_FD)
    os.dup2(STDIN_FD, STDERR_FD)

    try:
        pid = os.fork()
    except OSError:
        msg = "start_daemon(): Failed on fork()"
        write_log(STDERR_FD, None, ERROR_PROC_NOT_FOUND, msg)
        raise ICAException(ERROR_PROC_NOT_FOUND, msg)
    if pid == 0:
        os.setsid()
        # TODO Shall we ignore SIGHUP?
        # import signal
        # signal.signal(signal.SIGHUP, signal.SIG_IGN)
        # TODO Not sure if it should be added. Ignoring child exit
        # signal can take load off icadaemon. However it looks like it's
        # supported only on Linux.
        # signal.signal(signal.SIGCHLD, signal.SIG_IGN)
        try:
            pid = os.fork()
        except OSError:
            msg = "start_daemon(): Failed on fork(), second time"
            write_log(STDERR_FD, None, ERROR_PROC_NOT_FOUND, msg)
            raise ICAException(ERROR_PROC_NOT_FOUND, msg)

        if pid == 0:
            os.chdir(WORKDIR)
            os.umask(UMASK)
            proc_params = "Daemon is running: pid:%d,uid:%d,euid:%d,gid:%d,egid:%d" % (
                os.getpid(), os.getuid(), os.geteuid(), os.getgid(),
                os.getegid())
            # Use ERR level to make sure the pid information is always
            # shown. In FreeBSD 8.2, the INFO level message does not go
            # to /var/log/message by default.
            syslog.syslog(syslog.LOG_ERR, proc_params)

            if pid_fd is not None:
                if type(pid_fd) is type(0):
                    os.write(pid_fd, "%d\n" % os.getpid())
                    os.fsync(pid_fd)
                else:
                    pid_fd.write("%d\n" % os.getpid())
                    pid_fd.flush()
                    os.fsync(pid_fd.fileno())

            # Start specific function.
            try:
                ret = proc()
            except Exception:
                import StringIO
                import traceback
                ret = ERROR_BAD_ENVIRONMENT
                exception_strfd = StringIO.StringIO()
                traceback.print_exc(file=exception_strfd)
                msg = "FATAL: Daemon got unhandled exception."
                write_log(STDERR_FD, None, ret, msg)
                for each_line in exception_strfd.getvalue().split("\n"):
                    write_log(STDERR_FD, None, ret, each_line)
                msg = "FATAL: Traceback printed. Exit gracefully."
                write_log(STDERR_FD, None, ret, msg)

            if ret != ERROR_SUCCESS:
                msg = "FATAL: proc() exit with code: %d" % ret
                write_log(STDERR_FD, None, ret, msg)
            os.exit(ret)  # We should do cleanup here.
        else:
            os._exit(ERROR_SUCCESS)
    else:
        os._exit(ERROR_SUCCESS)
Example #48
0
def set_max_runtime(seconds):
    # Install the signal handler and set a resource limit
    soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
    resource.setrlimit(resource.RLIMIT_CPU, (seconds, hard))
    signal.signal(signal.SIGXCPU, time_exceeded)  #程序运行时,SIGXCPU信号在时间过期时被生成,然后执行清理并退出。
Example #49
0
    def fit(self, X, Y):
        import sklearn.svm

        # Calculate the size of the kernel cache (in MB) for sklearn's LibSVM. The cache size is
        # calculated as 2/3 of the available memory (which is calculated as the memory limit minus
        # the used memory)
        try:
            # Retrieve memory limits imposed on the process
            soft, hard = resource.getrlimit(resource.RLIMIT_AS)

            if soft > 0:
                # Convert limit to units of megabytes
                soft /= 1024 * 1024

                # Retrieve memory used by this process
                maxrss = resource.getrusage(resource.RUSAGE_SELF)[2] / 1024

                # In MacOS, the MaxRSS output of resource.getrusage in bytes; on other platforms,
                # it's in kilobytes
                if sys.platform == 'darwin':
                    maxrss = maxrss / 1024

                cache_size = (soft - maxrss) / 1.5

                if cache_size < 0:
                    cache_size = 200
            else:
                cache_size = 200
        except Exception:
            cache_size = 200

        self.C = float(self.C)
        if self.degree is None:
            self.degree = 3
        else:
            self.degree = int(self.degree)
        if self.gamma is None:
            self.gamma = 0.0
        else:
            self.gamma = float(self.gamma)
        if self.coef0 is None:
            self.coef0 = 0.0
        else:
            self.coef0 = float(self.coef0)
        self.tol = float(self.tol)
        self.max_iter = float(self.max_iter)

        self.shrinking = check_for_bool(self.shrinking)

        if check_none(self.class_weight):
            self.class_weight = None

        self.estimator = sklearn.svm.SVC(C=self.C,
                                         kernel=self.kernel,
                                         degree=self.degree,
                                         gamma=self.gamma,
                                         coef0=self.coef0,
                                         shrinking=self.shrinking,
                                         tol=self.tol,
                                         class_weight=self.class_weight,
                                         max_iter=self.max_iter,
                                         random_state=self.random_state,
                                         cache_size=cache_size,
                                         decision_function_shape='ovr')
        self.estimator.fit(X, Y)
        return self
Example #50
0
 def _setLimits(self):
     self.logger.debug('Setting rlimits')
     for limit in (resource.RLIMIT_NPROC, resource.RLIMIT_NOFILE):
         soft, hard = resource.getrlimit(resource.RLIMIT_NPROC)
         resource.setrlimit(resource.RLIMIT_NPROC, (hard, hard))
Example #51
0
    def test_max_open_files(self):
        with TestsLoggingHandler() as handler:
            logmsg_dbg = (
                'DEBUG:This salt-master instance has accepted {0} minion keys.'
            )
            logmsg_chk = (
                '{0}:The number of accepted minion keys({1}) should be lower '
                'than 1/4 of the max open files soft setting({2}). According '
                'to the system\'s hard limit, there\'s still a margin of {3} '
                'to raise the salt\'s max_open_files setting. Please consider '
                'raising this value.')
            logmsg_crash = (
                '{0}:The number of accepted minion keys({1}) should be lower '
                'than 1/4 of the max open files soft setting({2}). '
                'salt-master will crash pretty soon! According to the '
                'system\'s hard limit, there\'s still a margin of {3} to '
                'raise the salt\'s max_open_files setting. Please consider '
                'raising this value.')

            mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
            tempdir = tempfile.mkdtemp(prefix='fake-keys')
            keys_dir = os.path.join(tempdir, 'minions')
            os.makedirs(keys_dir)

            mof_test = 256

            resource.setrlimit(resource.RLIMIT_NOFILE, (mof_test, mof_h))

            try:
                prev = 0
                for newmax, level in ((24, None), (66, 'INFO'),
                                      (127, 'WARNING'), (196, 'CRITICAL')):

                    for n in range(prev, newmax):
                        kpath = os.path.join(keys_dir, str(n))
                        with salt.utils.fopen(kpath, 'w') as fp_:
                            fp_.write(str(n))

                    opts = {'max_open_files': newmax, 'pki_dir': tempdir}

                    check_max_open_files(opts)

                    if level is None:
                        # No log message is triggered, only the DEBUG one which
                        # tells us how many minion keys were accepted.
                        self.assertEqual([logmsg_dbg.format(newmax)],
                                         handler.messages)
                    else:
                        self.assertIn(logmsg_dbg.format(newmax),
                                      handler.messages)
                        self.assertIn(
                            logmsg_chk.format(
                                level,
                                newmax,
                                mof_test,
                                mof_h - newmax,
                            ), handler.messages)
                    handler.clear()
                    prev = newmax

                newmax = mof_test
                for n in range(prev, newmax):
                    kpath = os.path.join(keys_dir, str(n))
                    with salt.utils.fopen(kpath, 'w') as fp_:
                        fp_.write(str(n))

                opts = {'max_open_files': newmax, 'pki_dir': tempdir}

                check_max_open_files(opts)
                self.assertIn(logmsg_dbg.format(newmax), handler.messages)
                self.assertIn(
                    logmsg_crash.format(
                        'CRITICAL',
                        newmax,
                        mof_test,
                        mof_h - newmax,
                    ), handler.messages)
                handler.clear()
            except IOError as err:
                if err.errno == 24:
                    # Too many open files
                    self.skipTest('We\'ve hit the max open files setting')
                raise
            finally:
                shutil.rmtree(tempdir)
                resource.setrlimit(resource.RLIMIT_NOFILE, (mof_s, mof_h))
    enc = preferred_encoding
    if isosx:
        enc = 'utf-8'
    for i in range(1, len(sys.argv)):
        if not isinstance(sys.argv[i], unicode):
            sys.argv[i] = sys.argv[i].decode(enc, 'replace')

    #
    # Ensure that the max number of open files is at least 1024
    if iswindows:
        # See https://msdn.microsoft.com/en-us/library/6e3b887c.aspx
        if hasattr(winutil, 'setmaxstdio'):
            winutil.setmaxstdio(max(1024, winutil.getmaxstdio()))
    else:
        import resource
        soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
        if soft < 1024:
            try:
                resource.setrlimit(resource.RLIMIT_NOFILE,
                                   (min(1024, hard), hard))
            except Exception:
                if DEBUG:
                    import traceback
                    traceback.print_exc()

    #
    # Setup resources
    import calibre.utils.resources as resources
    resources

    #
Example #53
0
def open_file_limit():
    return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
Example #54
0
        except OSError:
            pass
        else:
            if curMTime > lastMTime:
                netlist = loadList(filename)
                lastMTime = curMTime           
               
        line = sys.stdin.readline()

if __name__ == '__main__':
    # We appear to end up with superfluous FDs, including pipes from other
    # instances, forked from PowerDNS. This can keep us and others from
    # exiting as the fd never gets closed. Close all fds we don't need.
    try:
        import resource
        maxfds = resource.getrlimit(resource.RLIMIT_NOFILE)[1] + 1
        # OS-X reports 9223372036854775808. That's a lot of fds to close
        if maxfds > 1024:
            maxfds = 1024
    except:
        maxfds = 256

    import os
    for fd in range(3, maxfds):
        try:
            os.close(fd)
        except:
            pass
    
    main()
Example #55
0
def main(infile, outfile):
    try:
        boot_time = time.time()
        split_index = read_int(infile)
        if split_index == -1:  # for unit tests
            sys.exit(-1)

        version = utf8_deserializer.loads(infile)
        if version != "%d.%d" % sys.version_info[:2]:
            raise Exception((
                "Python in worker has different version %s than that in " +
                "driver %s, PySpark cannot run with different minor versions. "
                + "Please check environment variables PYSPARK_PYTHON and " +
                "PYSPARK_DRIVER_PYTHON are correctly set.") %
                            ("%d.%d" % sys.version_info[:2], version))

        # read inputs only for a barrier task
        isBarrier = read_bool(infile)
        boundPort = read_int(infile)
        secret = UTF8Deserializer().loads(infile)

        # set up memory limits
        memory_limit_mb = int(
            os.environ.get('PYSPARK_EXECUTOR_MEMORY_MB', "-1"))
        if memory_limit_mb > 0 and has_resource_module:
            total_memory = resource.RLIMIT_AS
            try:
                (soft_limit, hard_limit) = resource.getrlimit(total_memory)
                msg = "Current mem limits: {0} of max {1}\n".format(
                    soft_limit, hard_limit)
                print(msg, file=sys.stderr)

                # convert to bytes
                new_limit = memory_limit_mb * 1024 * 1024

                if soft_limit == resource.RLIM_INFINITY or new_limit < soft_limit:
                    msg = "Setting mem limits to {0} of max {1}\n".format(
                        new_limit, new_limit)
                    print(msg, file=sys.stderr)
                    resource.setrlimit(total_memory, (new_limit, new_limit))

            except (resource.error, OSError, ValueError) as e:
                # not all systems support resource limits, so warn instead of failing
                print("WARN: Failed to set memory limit: {0}\n".format(e),
                      file=sys.stderr)

        # initialize global state
        taskContext = None
        if isBarrier:
            taskContext = BarrierTaskContext._getOrCreate()
            BarrierTaskContext._initialize(boundPort, secret)
            # Set the task context instance here, so we can get it by TaskContext.get for
            # both TaskContext and BarrierTaskContext
            TaskContext._setTaskContext(taskContext)
        else:
            taskContext = TaskContext._getOrCreate()
        # read inputs for TaskContext info
        taskContext._stageId = read_int(infile)
        taskContext._partitionId = read_int(infile)
        taskContext._attemptNumber = read_int(infile)
        taskContext._taskAttemptId = read_long(infile)
        taskContext._resources = {}
        for r in range(read_int(infile)):
            key = utf8_deserializer.loads(infile)
            name = utf8_deserializer.loads(infile)
            addresses = []
            taskContext._resources = {}
            for a in range(read_int(infile)):
                addresses.append(utf8_deserializer.loads(infile))
            taskContext._resources[key] = ResourceInformation(name, addresses)

        taskContext._localProperties = dict()
        for i in range(read_int(infile)):
            k = utf8_deserializer.loads(infile)
            v = utf8_deserializer.loads(infile)
            taskContext._localProperties[k] = v

        shuffle.MemoryBytesSpilled = 0
        shuffle.DiskBytesSpilled = 0
        _accumulatorRegistry.clear()

        # fetch name of workdir
        spark_files_dir = utf8_deserializer.loads(infile)
        SparkFiles._root_directory = spark_files_dir
        SparkFiles._is_running_on_worker = True

        # fetch names of includes (*.zip and *.egg files) and construct PYTHONPATH
        add_path(
            spark_files_dir)  # *.py files that were added will be copied here
        num_python_includes = read_int(infile)
        for _ in range(num_python_includes):
            filename = utf8_deserializer.loads(infile)
            add_path(os.path.join(spark_files_dir, filename))
        if sys.version > '3':
            import importlib
            importlib.invalidate_caches()

        # fetch names and values of broadcast variables
        needs_broadcast_decryption_server = read_bool(infile)
        num_broadcast_variables = read_int(infile)
        if needs_broadcast_decryption_server:
            # read the decrypted data from a server in the jvm
            port = read_int(infile)
            auth_secret = utf8_deserializer.loads(infile)
            (broadcast_sock_file,
             _) = local_connect_and_auth(port, auth_secret)

        for _ in range(num_broadcast_variables):
            bid = read_long(infile)
            if bid >= 0:
                if needs_broadcast_decryption_server:
                    read_bid = read_long(broadcast_sock_file)
                    assert (read_bid == bid)
                    _broadcastRegistry[bid] = \
                        Broadcast(sock_file=broadcast_sock_file)
                else:
                    path = utf8_deserializer.loads(infile)
                    _broadcastRegistry[bid] = Broadcast(path=path)

            else:
                bid = -bid - 1
                _broadcastRegistry.pop(bid)

        if needs_broadcast_decryption_server:
            broadcast_sock_file.write(b'1')
            broadcast_sock_file.close()

        _accumulatorRegistry.clear()
        eval_type = read_int(infile)
        if eval_type == PythonEvalType.NON_UDF:
            func, profiler, deserializer, serializer = read_command(
                pickleSer, infile)
        else:
            func, profiler, deserializer, serializer = read_udfs(
                pickleSer, infile, eval_type)

        init_time = time.time()

        def process():
            iterator = deserializer.load_stream(infile)
            out_iter = func(split_index, iterator)
            try:
                serializer.dump_stream(out_iter, outfile)
            finally:
                if hasattr(out_iter, 'close'):
                    out_iter.close()

        if profiler:
            profiler.profile(process)
        else:
            process()

        # Reset task context to None. This is a guard code to avoid residual context when worker
        # reuse.
        TaskContext._setTaskContext(None)
        BarrierTaskContext._setTaskContext(None)
    except BaseException:
        try:
            exc_info = traceback.format_exc()
            if isinstance(exc_info, bytes):
                # exc_info may contains other encoding bytes, replace the invalid bytes and convert
                # it back to utf-8 again
                exc_info = exc_info.decode("utf-8", "replace").encode("utf-8")
            else:
                exc_info = exc_info.encode("utf-8")
            write_int(SpecialLengths.PYTHON_EXCEPTION_THROWN, outfile)
            write_with_length(exc_info, outfile)
        except IOError:
            # JVM close the socket
            pass
        except BaseException:
            # Write the error to stderr if it happened while serializing
            print("PySpark worker failed with exception:", file=sys.stderr)
            print(traceback.format_exc(), file=sys.stderr)
        sys.exit(-1)
    finish_time = time.time()
    report_times(outfile, boot_time, init_time, finish_time)
    write_long(shuffle.MemoryBytesSpilled, outfile)
    write_long(shuffle.DiskBytesSpilled, outfile)

    # Mark the beginning of the accumulators section of the output
    write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
    write_int(len(_accumulatorRegistry), outfile)
    for (aid, accum) in _accumulatorRegistry.items():
        pickleSer._write_with_length((aid, accum._value), outfile)

    # check end of stream
    if read_int(infile) == SpecialLengths.END_OF_STREAM:
        write_int(SpecialLengths.END_OF_STREAM, outfile)
    else:
        # write a different value to tell JVM to not reuse this worker
        write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
        sys.exit(-1)
Example #56
0
def main(host, port, http_port, bokeh_port, show, _bokeh, bokeh_whitelist,
         prefix, use_xheaders, pid_file):

    if pid_file:
        with open(pid_file, 'w') as f:
            f.write(str(os.getpid()))

        def del_pid_file():
            if os.path.exists(pid_file):
                os.remove(pid_file)

        atexit.register(del_pid_file)

    if sys.platform.startswith('linux'):
        import resource  # module fails importing on Windows
        soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
        limit = max(soft, hard // 2)
        resource.setrlimit(resource.RLIMIT_NOFILE, (limit, hard))

    given_host = host
    host = host or get_ip()
    if ':' in host and port == 8786:
        host, port = host.rsplit(':', 1)
        port = int(port)
    ip = socket.gethostbyname(host)
    loop = IOLoop.current()
    logger.info('-' * 47)

    services = {('http', http_port): HTTPScheduler}
    if _bokeh:
        from distributed.bokeh.scheduler import BokehScheduler
        services[('bokeh', 8788)] = BokehScheduler
    scheduler = Scheduler(ip=ip, loop=loop, services=services)
    scheduler.start(port)

    bokeh_proc = None
    if _bokeh:
        try:
            from distributed.bokeh.application import BokehWebInterface
            bokeh_proc = BokehWebInterface(host=host,
                                           http_port=http_port,
                                           tcp_port=port,
                                           bokeh_port=bokeh_port,
                                           bokeh_whitelist=bokeh_whitelist,
                                           show=show,
                                           prefix=prefix,
                                           use_xheaders=use_xheaders,
                                           quiet=False)
        except ImportError:
            logger.info("Please install Bokeh to get Web UI")
        except Exception as e:
            logger.warn("Could not start Bokeh web UI", exc_info=True)

    logger.info('-' * 47)
    try:
        loop.start()
        loop.close()
    finally:
        scheduler.stop()
        if bokeh_proc:
            bokeh_proc.close()

        logger.info("End scheduler at %s:%d", ip, port)
Example #57
0
    def __init__(self,
                 application,
                 environ=None,
                 bindAddress=None,
                 umask=None,
                 multiplexed=False,
                 debug=False,
                 roles=(FCGI_RESPONDER, ),
                 forceCGI=False,
                 timeout=None,
                 **kw):
        """
        environ, if present, must be a dictionary-like object. Its
        contents will be copied into application's environ. Useful
        for passing application-specific variables.

        bindAddress, if present, must either be a string or a 2-tuple. If
        present, run() will open its own listening socket. You would use
        this if you wanted to run your application as an 'external' FastCGI
        app. (i.e. the webserver would no longer be responsible for starting
        your app) If a string, it will be interpreted as a filename and a UNIX
        socket will be opened. If a tuple, the first element, a string,
        is the interface name/IP to bind to, and the second element (an int)
        is the port number.
        """
        BaseFCGIServer.__init__(self,
                                application,
                                environ=environ,
                                multithreaded=False,
                                multiprocess=True,
                                bindAddress=bindAddress,
                                umask=umask,
                                multiplexed=multiplexed,
                                debug=debug,
                                roles=roles,
                                forceCGI=forceCGI)
        for key in ('multithreaded', 'multiprocess', 'jobClass', 'jobArgs'):
            if key in kw:
                del kw[key]
        PreforkServer.__init__(self,
                               jobClass=self._connectionClass,
                               jobArgs=(self, timeout),
                               **kw)

        try:
            import resource
            # Attempt to glean the maximum number of connections
            # from the OS.
            try:
                maxProcs = resource.getrlimit(resource.RLIMIT_NPROC)[0]
                maxConns = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
                maxConns = min(maxConns, maxProcs)
            except AttributeError:
                maxConns = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
        except ImportError:
            maxConns = 100  # Just some made up number.
        maxReqs = maxConns
        self.capability = {
            FCGI_MAX_CONNS: maxConns,
            FCGI_MAX_REQS: maxReqs,
            FCGI_MPXS_CONNS: 0
        }
Example #58
0
import importlib
import pytest
import syncopy
from syncopy import __acme__
import syncopy.tests.test_packagesetup as setupTestModule

# If dask is available, either launch a SLURM cluster on a cluster node or
# create a `LocalCluster` object if tests are run on a single machine. If dask
# is not installed, return a dummy None-valued cluster object (tests will be
# skipped anyway)
if __acme__:
    import dask.distributed as dd
    import resource
    from acme.dask_helpers import esi_cluster_setup
    from syncopy.tests.misc import is_slurm_node
    if max(resource.getrlimit(resource.RLIMIT_NOFILE)) < 1024:
        msg = "Not enough open file descriptors allowed. Consider increasing " +\
            "the limit using, e.g., `ulimit -Sn 1024`"
        raise ValueError(msg)
    if is_slurm_node():
        cluster = esi_cluster_setup(partition="8GB", n_jobs=10,
                                    timeout=360, interactive=False,
                                    start_client=False)
    else:
        cluster = dd.LocalCluster(n_workers=2)
else:
    cluster = None

# Set up a pytest fixture `testcluster` that uses the constructed cluster object
@pytest.fixture
def testcluster():
Example #59
0
def rlimitTestAndSet(name, limit):
    "Helper function to set rlimits"
    soft, hard = getrlimit(name)
    if soft < limit:
        hardLimit = hard if limit < hard else limit
        setrlimit(name, (limit, hardLimit))
Example #60
0
# print out usage if incorrect arguments are passed
if not vars(args):
  parser.print_help()
  parser.exit(1)
percent_limit = args.percent_limit

# Pull the max open fd's and currently open fd's
file_nr = open("/proc/sys/fs/file-nr").read().strip()
open_fd = file_nr.split()[0]
max_fd = file_nr.split()[-1]
percentage_max_fd = round(float(open_fd) / float(max_fd) * 100, 2)
if percentage_max_fd > percent_limit:
  print "Total open file descriptors have reached", percentage_max_fd, "% of the system max (", open_fd, " of ", max_fd, ")"

# get the soft limit and store it, to use as backup
default_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]

pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]

for pid in pids:
  try:
    pid = pid.strip()
    soft_limit = default_limit
    limits = open("/proc/" + pid + "/limits").readlines()  # get the limits for the proc
    for line in limits:
      if line.startswith("Max open files"):
        soft_limit = int(line.split()[3])  # splitting the line on spaces we want the 4th value
        break
    # used os.walk() to get the names in the /proc/[pid]/fd dir
    fds = []
    for (dirpath, dirname, filenames) in os.walk("/proc/" + pid + "/fd/"):