Example #1
0
def _get_cache_settings(spec):
    if not spec.get("use_cache", True):
        return None
    if not config.getboolean("girder_io", "diskcache_enabled"):
        return None
    return dict(
        directory=config.get("girder_io", "diskcache_directory"),
        eviction_policy=config.get("girder_io", "diskcache_eviction_policy"),
        size_limit=config.getint("girder_io", "diskcache_size_limit"),
        cull_limit=config.getint("girder_io", "diskcache_cull_limit"),
        large_value_threshold=config.getint("girder_io", "diskcache_large_value_threshold"),
    )
Example #2
0
def _get_cache_settings(spec):
    if not spec.get('use_cache', True):
        return None
    if not config.getboolean('girder_io', 'diskcache_enabled'):
        return None
    return dict(
        directory=config.get('girder_io', 'diskcache_directory'),
        eviction_policy=config.get('girder_io', 'diskcache_eviction_policy'),
        size_limit=config.getint('girder_io', 'diskcache_size_limit'),
        cull_limit=config.getint('girder_io', 'diskcache_cull_limit'),
        large_value_threshold=config.getint(
            'girder_io', 'diskcache_large_value_threshold'),
    )
Example #3
0
def _get_cache_settings(spec):
    if not spec.get('use_cache', True):
        return None
    if not config.getboolean('girder_io', 'diskcache_enabled'):
        return None
    return dict(
        directory=config.get('girder_io', 'diskcache_directory'),
        eviction_policy=config.get('girder_io', 'diskcache_eviction_policy'),
        size_limit=config.getint('girder_io', 'diskcache_size_limit'),
        cull_limit=config.getint('girder_io', 'diskcache_cull_limit'),
        large_value_threshold=config.getint(
            'girder_io', 'diskcache_large_value_threshold'),
    )
Example #4
0
def _read_from_config(key, default):
    """
    Helper to read Docker specific config values from the worker config files.
    """
    if config.has_option('docker', key):
        return config.get('docker', key)
    else:
        return default
Example #5
0
def _read_from_config(key, default):
    """
    Helper to read Docker specific config values from the worker config files.
    """
    if config.has_option('docker', key):
        return config.get('docker', key)
    else:
        return default
Example #6
0
def docker_gc(e):
    """
    Garbage collect containers that have not been run in the last hour using the
    https://github.com/spotify/docker-gc project's script, which is copied in
    the same directory as this file. After that, deletes all images that are
    no longer used by any containers.
    """
    if not _read_bool_from_config('gc', False):
        return
    stampfile = os.path.join(config.get('girder_worker', 'tmp_root'),
                             '.dockergcstamp')
    if os.path.exists(stampfile) and time.time() - os.path.getmtime(
            stampfile) < MIN_GC_INTERVAL:
        return
    else:  # touch the file
        with open(stampfile, 'w') as f:
            f.write('')

    logger.info('Garbage collecting docker containers and images.')
    gc_dir = tempfile.mkdtemp()

    try:
        script = os.path.join(os.path.dirname(__file__), 'docker-gc')
        if not os.path.isfile(script):
            raise Exception('Docker GC script %s not found.' % script)
        if not os.access(script, os.X_OK):
            raise Exception('Docker GC script %s is not executable.' % script)

        env = os.environ.copy()
        env['FORCE_CONTAINER_REMOVAL'] = '1'
        env['STATE_DIR'] = gc_dir
        env['PID_DIR'] = gc_dir
        env['GRACE_PERIOD_SECONDS'] = str(
            _read_from_config('cache_timeout', 3600))

        # Handle excluded images
        excluded = _read_from_config('exclude_images', '').split(',')
        excluded = [img for img in excluded if img.strip()]
        if excluded:
            exclude_file = os.path.join(gc_dir, '.docker-gc-exclude')
            with open(exclude_file, 'w') as fd:
                fd.write('\n'.join(excluded) + '\n')
            env['EXCLUDE_FROM_GC'] = exclude_file

        p = subprocess.Popen(args=(script, ), env=env)
        p.wait()  # Wait for garbage collection subprocess to finish

        if p.returncode != 0:
            raise Exception('Docker GC returned code %d.' % p.returncode)
    finally:
        shutil.rmtree(gc_dir)
Example #7
0
def docker_gc(e):
    """
    Garbage collect containers that have not been run in the last hour using the
    https://github.com/spotify/docker-gc project's script, which is copied in
    the same directory as this file. After that, deletes all images that are
    no longer used by any containers.
    """
    if not _read_bool_from_config('gc', False):
        return
    stampfile = os.path.join(config.get('girder_worker', 'tmp_root'), '.dockergcstamp')
    if os.path.exists(stampfile) and time.time() - os.path.getmtime(stampfile) < MIN_GC_INTERVAL:
        return
    else:  # touch the file
        with open(stampfile, 'w') as f:
            f.write('')

    logger.info('Garbage collecting docker containers and images.')
    gc_dir = tempfile.mkdtemp()

    try:
        script = os.path.join(os.path.dirname(__file__), 'docker-gc')
        if not os.path.isfile(script):
            raise Exception('Docker GC script %s not found.' % script)
        if not os.access(script, os.X_OK):
            raise Exception('Docker GC script %s is not executable.' % script)

        env = os.environ.copy()
        env['FORCE_CONTAINER_REMOVAL'] = '1'
        env['STATE_DIR'] = gc_dir
        env['PID_DIR'] = gc_dir
        env['GRACE_PERIOD_SECONDS'] = str(_read_from_config('cache_timeout', 3600))

        # Handle excluded images
        excluded = _read_from_config('exclude_images', '').split(',')
        excluded = [img for img in excluded if img.strip()]
        if excluded:
            exclude_file = os.path.join(gc_dir, '.docker-gc-exclude')
            with open(exclude_file, 'w') as fd:
                fd.write('\n'.join(excluded) + '\n')
            env['EXCLUDE_FROM_GC'] = exclude_file

        p = subprocess.Popen(args=(script,), env=env)
        p.wait()  # Wait for garbage collection subprocess to finish

        if p.returncode != 0:
            raise Exception('Docker GC returned code %d.' % p.returncode)
    finally:
        shutil.rmtree(gc_dir)
Example #8
0
        events.trigger('run.after', info)

        return outputs
    except StateTransitionException:
        if job_mgr:
            status = job_mgr.refreshStatus()
            # If we are canceling we want to stay in that state, otherwise raise
            # the exception
            if status != JobStatus.CANCELING:
                raise
        else:
            raise
    finally:
        events.trigger('run.finally', info)


register_executor('python', python_run)
register_executor('workflow', workflow_run)

# Load plugins that are enabled in the config file or env var
_plugins = os.environ.get('WORKER_PLUGINS_ENABLED',
                          config.get('girder_worker', 'plugins_enabled'))
_plugins = [p.strip() for p in _plugins.split(',') if p.strip()]
_paths = os.environ.get(
    'WORKER_PLUGIN_LOAD_PATH', config.get(
        'girder_worker', 'plugin_load_path')).split(':')
_paths = [p for p in _paths if p.strip()]
_paths.append(os.path.join(PACKAGE_DIR, 'plugins'))
utils.load_plugins(_plugins, _paths, quiet=True)
Example #9
0
def unregister_executor(name):
    """
    Unregister an executor from the map.

    :param name: The name of the executor to unregister.
    :type name: str
    """
    del _task_map[name]


register_executor('python', python_run)
register_executor('workflow', workflow_run)

# Load plugins that are enabled in the config file or env var
_plugins = os.environ.get('WORKER_PLUGINS_ENABLED',
                          config.get('girder_worker', 'plugins_enabled'))
_plugins = [p.strip() for p in _plugins.split(',') if p.strip()]
_paths = os.environ.get(
    'WORKER_PLUGIN_LOAD_PATH', config.get(
        'girder_worker', 'plugin_load_path')).split(':')
_paths = [p for p in _paths if p.strip()]
_paths.append(os.path.join(PACKAGE_DIR, 'plugins'))
utils.load_plugins(_plugins, _paths, quiet=True)


def _resolve_scripts(task):
    if task.get('mode') != 'workflow':
        if 'script_uri' in task and 'script' not in task:
            task['script'] = io.fetch({
                'url': task['script_uri']
            })