예제 #1
0
def test_concurrent_executor():
    runner = get_executor(None, 2)
    assert str(runner).find('Multi') >= 0
    run_tests_for_runner(runner, 0.3)

    runner = get_executor(None, 2, use_cloud_pickle=False)
    assert str(runner).find('Multi') >= 0
    run_tests_for_runner(runner, 0.3)
def test_concurrent_executor():
    executor = get_executor(None, 2)
    assert 'Multiproc' in str(executor)
    run_executor_tests(executor)

    executor = get_executor(None, 2, use_cloud_pickle=False)
    assert 'Multiproc' in str(executor)
    run_executor_tests(executor)
예제 #3
0
def ingestion_work(output_type, source_type, ingestion_definition):
    """Run the ingestion process for a user defined configuration

    Args:
        output_type, source_type: types produced by ingest.make_output_type
        ingestion_definition: dict representing a Data Cube ingestion def produced using the utils func.
    """
    conf_path = '/home/' + settings.LOCAL_USER + '/Datacube/data_cube_ui/config/.datacube.conf'
    index = index_connect(local_config=LocalConfig.find([conf_path]))

    tasks = ingest.create_task_list(index, output_type, None, source_type,
                                    ingestion_definition)

    # this is a dry run
    # paths = [ingest.get_filename(ingestion_definition, task['tile_index'], task['tile'].sources) for task in tasks]
    # ingest.check_existing_files(paths)

    # this actually ingests stuff
    successful, failed = ingest.process_tasks(index, ingestion_definition,
                                              source_type, output_type, tasks,
                                              3200, get_executor(None, None))

    index.close()
    return 0
예제 #4
0
def ingest_subset(ingestion_request_id=None):
    """Run the ingestion process on the new database

    Open a connection to the new database and run ingestion based on the
    ingestion configuration found on the IngestionRequest model.

    """

    ingestion_request = IngestionRequest.objects.get(pk=ingestion_request_id)

    config = get_config(ingestion_request.user)
    index = index_connect(local_config=config, validate_connection=False)

    # Thisis done because of something that the agdc guys do in ingest: https://github.com/opendatacube/datacube-core/blob/develop/datacube/scripts/ingest.py#L168
    ingestion_request.ingestion_definition[
        'filename'] = "ceos_data_cube_sample.yaml"

    try:
        source_type, output_type = ingest.make_output_type(
            index, ingestion_request.ingestion_definition)
        tasks = list(
            ingest.create_task_list(index, output_type, None, source_type,
                                    ingestion_request.ingestion_definition))

        ingestion_request.total_storage_units = len(tasks)
        ingestion_request.update_status("WAIT",
                                        "Starting the ingestion process...")

        successful, failed = ingest.process_tasks(
            index, ingestion_request.ingestion_definition, source_type,
            output_type, tasks, 3200, get_executor(None, None))
    except:
        index.close()
        raise

    index.close()
예제 #5
0
        @pass_index(app_name=app_name, expect_initialised=expect_initialised)
        def with_datacube(index, *args, **kwargs):
            return f(Datacube(index=index), *args, **kwargs)

        return functools.update_wrapper(with_datacube, f)

    return decorate


def parse_endpoint(value):
    ip, port = tuple(value.split(':'))
    return ip, int(port)


EXECUTOR_TYPES = {
    'serial': lambda _: get_executor(None, None),
    'multiproc': lambda workers: get_executor(None, int(workers)),
    'distributed': lambda addr: get_executor(parse_endpoint(addr), True),
    'celery': lambda addr: mk_celery_executor(*parse_endpoint(addr))
}

EXECUTOR_TYPES['dask'] = EXECUTOR_TYPES[
    'distributed']  # Add alias "dask" for distributed


def _setup_executor(ctx, param, value):
    try:
        return EXECUTOR_TYPES[value[0]](value[1])
    except ValueError:
        ctx.fail("Failed to create '%s' executor with '%s'" % value)
예제 #6
0
def test_fallback_executor():
    runner = get_executor(None, None)
    assert str(runner).find('Serial') >= 0

    run_tests_for_runner(runner, 0)
예제 #7
0
                return f(index, *args, **kwargs)
            except (OperationalError, ProgrammingError) as e:
                handle_exception('Error Connecting to database: %s', e)

        return functools.update_wrapper(with_index, f)

    return decorate


def parse_endpoint(value):
    ip, port = tuple(value.split(':'))
    return ip, int(port)


EXECUTOR_TYPES = {
    'serial': lambda _: get_executor(None, None),
    'multiproc': lambda workers: get_executor(None, int(workers)),
    'distributed': lambda addr: get_executor(parse_endpoint(addr), True)
}


def _setup_executor(ctx, param, value):
    try:
        return EXECUTOR_TYPES[value[0]](value[1])
    except ValueError:
        ctx.fail("Failed to create '%s' executor with '%s'" % value)


executor_cli_options = click.option('--executor',
                                    type=(click.Choice(EXECUTOR_TYPES.keys()), str),
                                    default=('serial', None),
def test_fallback_executor():
    executor = get_executor(None, None)
    assert 'Serial' in str(executor)

    run_executor_tests(executor, sleep_time=0)