Esempio n. 1
0
def clean(mode, config, debug):
    log_level = 'INFO' if not debug else 'DEBUG'
    setup_logger(log_level)
    logger.info('Cleaning all Lithops information')
    config = default_config(config)
    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)

    mode = config['lithops']['mode'] if not mode else mode
    if mode == LOCALHOST:
        compute_config = extract_localhost_config(config)
        compute_handler = LocalhostHandler(compute_config)
    elif mode == SERVERLESS:
        compute_config = extract_serverless_config(config)
        compute_handler = ServerlessHandler(compute_config, storage_config)
    elif mode == STANDALONE:
        compute_config = extract_standalone_config(config)
        compute_handler = StandaloneHandler(compute_config)

    compute_handler.clean()

    # Clean object storage temp dirs
    storage = internal_storage.storage
    clean_bucket(storage, storage_config['bucket'], RUNTIMES_PREFIX, sleep=1)
    clean_bucket(storage, storage_config['bucket'], JOBS_PREFIX, sleep=1)

    # Clean localhost executor temp dirs
    shutil.rmtree(LITHOPS_TEMP_DIR, ignore_errors=True)
    # Clean local lithops cache
    shutil.rmtree(CACHE_DIR, ignore_errors=True)
Esempio n. 2
0
def clean_all(config=None):
    logger.info('Cleaning all Lithops information')
    config = default_config(config)
    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)

    default_executor = config['lithops']['executor']
    if default_executor == 'localhost':
        compute_config = extract_localhost_config(config)
        compute_handler = LocalhostHandler(compute_config)
    elif default_executor == 'serverless':
        compute_config = extract_serverless_config(config)
        compute_handler = ServerlessHandler(compute_config, storage_config)
    elif default_executor == 'standalone':
        compute_config = extract_standalone_config(config)
        compute_handler = StandaloneHandler(compute_config)

    compute_handler.clean()

    # Clean object storage temp dirs
    storage = internal_storage.storage
    clean_bucket(storage, storage_config['bucket'], RUNTIMES_PREFIX, sleep=1)
    clean_bucket(storage, storage_config['bucket'], JOBS_PREFIX, sleep=1)

    # Clean localhost executor temp dirs
    shutil.rmtree(STORAGE_DIR, ignore_errors=True)
    # Clean local lithops cache
    shutil.rmtree(CACHE_DIR, ignore_errors=True)
Esempio n. 3
0
def clean_executor_jobs(executor_id, executor_data):

    storage = None
    prefix = '/'.join([JOBS_PREFIX, executor_id])

    for file_data in executor_data:
        file_location = file_data['file_location']
        data = file_data['data']

        storage_config = data['storage_config']
        clean_cloudobjects = data['clean_cloudobjects']
        if not storage:
            storage = Storage(storage_config=storage_config)

        logger.info(
            f'Cleaning jobs {", ".join([job_key for job_key in data["jobs_to_clean"]])}'
        )

        objects = storage.list_keys(storage.bucket, prefix)

        objects_to_delete = [
            key for key in objects if '-'.join(
                key.split('/')[1].split('-')[0:3]) in data['jobs_to_clean']
        ]

        while objects_to_delete:
            storage.delete_objects(storage.bucket, objects_to_delete)
            time.sleep(5)
            objects = storage.list_keys(storage.bucket, prefix)
            objects_to_delete = [
                key for key in objects if '-'.join(
                    key.split('/')[1].split('-')[0:3]) in data['jobs_to_clean']
            ]

        if clean_cloudobjects:
            for job_key in data['jobs_to_clean']:
                prefix = '/'.join([TEMP_PREFIX, job_key])
                clean_bucket(storage, storage.bucket, prefix)

        if os.path.exists(file_location):
            os.remove(file_location)
        logger.info('Finished')
Esempio n. 4
0
def clean_all(config=None):
    logger.info('Cleaning all Lithops information')
    config = default_config(config)
    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)
    compute_config = extract_compute_config(config)
    compute_handler = Compute(compute_config)

    # Clean localhost executor temp dirs
    shutil.rmtree(STORAGE_FOLDER, ignore_errors=True)
    shutil.rmtree(DOCKER_FOLDER, ignore_errors=True)

    # Clean object storage temp dirs
    compute_handler.delete_all_runtimes()
    storage = internal_storage.storage
    clean_bucket(storage, storage_config['bucket'], RUNTIMES_PREFIX, sleep=1)
    clean_bucket(storage, storage_config['bucket'], JOBS_PREFIX, sleep=1)

    # Clean local lithops cache
    shutil.rmtree(CACHE_DIR, ignore_errors=True)
Esempio n. 5
0
def clean(config, mode, backend, storage, debug):
    if config:
        config = load_yaml_config(config)

    log_level = logging.INFO if not debug else logging.DEBUG
    setup_lithops_logger(log_level)
    logger.info('Cleaning all Lithops information')

    mode = mode or get_mode(backend, config)
    config_ow = {'lithops': {'mode': mode}}
    if storage:
        config_ow['lithops']['storage'] = storage
    if backend:
        config_ow[mode] = {'backend': backend}
    config = default_config(config, config_ow)

    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)

    mode = config['lithops']['mode'] if not mode else mode
    if mode == LOCALHOST:
        compute_config = extract_localhost_config(config)
        compute_handler = LocalhostHandler(compute_config)
    elif mode == SERVERLESS:
        compute_config = extract_serverless_config(config)
        compute_handler = ServerlessHandler(compute_config, storage_config)
    elif mode == STANDALONE:
        compute_config = extract_standalone_config(config)
        compute_handler = StandaloneHandler(compute_config)

    compute_handler.clean()

    # Clean object storage temp dirs
    storage = internal_storage.storage
    clean_bucket(storage, storage_config['bucket'], RUNTIMES_PREFIX, sleep=1)
    clean_bucket(storage, storage_config['bucket'], JOBS_PREFIX, sleep=1)

    # Clean localhost executor temp dirs
    shutil.rmtree(LITHOPS_TEMP_DIR, ignore_errors=True)
    # Clean local lithops cache
    shutil.rmtree(CACHE_DIR, ignore_errors=True)
Esempio n. 6
0
    def clean_file(file_name):
        file_location = os.path.join(CLEANER_DIR, file_name)

        if file_location in [CLEANER_LOG_FILE, CLEANER_PID_FILE]:
            return

        with open(file_location, 'rb') as pk:
            data = pickle.load(pk)

        if 'jobs_to_clean' in data:
            jobs_to_clean = data['jobs_to_clean']
            storage_config = data['storage_config']
            clean_cloudobjects = data['clean_cloudobjects']
            storage = Storage(storage_config=storage_config)

            for job_key in jobs_to_clean:
                logger.info('Going to clean: {}'.format(job_key))

                prefix = '/'.join([JOBS_PREFIX, job_key])
                clean_bucket(storage, storage.bucket, prefix)

                if clean_cloudobjects:
                    prefix = '/'.join([TEMP_PREFIX, job_key])
                    clean_bucket(storage, storage.bucket, prefix)

        if 'cos_to_clean' in data:
            logger.info('Going to clean cloudobjects')
            cos_to_clean = data['cos_to_clean']
            storage_config = data['storage_config']
            storage = Storage(storage_config=storage_config)

            for co in cos_to_clean:
                if co.backend == storage.backend:
                    logging.info('Cleaning {}://{}/{}'.format(
                        co.backend, co.bucket, co.key))
                    storage.delete_object(co.bucket, co.key)

        if os.path.exists(file_location):
            os.remove(file_location)