Ejemplo n.º 1
0
def create_fields(monkeypatch):
    monkeypatch.chdir(Path('e2e') / 'scenarios' / 'pano-field-cleanup')
    (Paths.fields_dir(Path('test_dataset')) /
     'orphan_test_field.field.yaml').write_text(TEST_ORPHANED_FIELD)
    (Paths.fields_dir(Path('test_dataset')) /
     'calculated_test_field.field.yaml').write_text(TEST_CALCULATED_FIELD)
    yield
Ejemplo n.º 2
0
def test_validate_config_valid(tmp_path, monkeypatch):
    monkeypatch.setattr(Path, 'home', lambda: tmp_path)

    Paths.config_dir().mkdir()
    with Paths.config_file().open('w') as f:
        f.write(yaml.dump(VALID_CONFIG))

    validate_config()
Ejemplo n.º 3
0
def test_validate_config_invalid_yaml(tmp_path, monkeypatch):
    monkeypatch.setattr(Path, 'home', lambda: tmp_path)

    Paths.config_dir().mkdir()
    with Paths.config_file().open('w') as f:
        f.write('not:\nyaml')

    with pytest.raises(InvalidYamlFile):
        validate_config()
def test_push_pull_e2e(monkeypatch):
    monkeypatch.chdir(Path('e2e') / 'scenarios' / 'pano-push-pull')
    dataset_dir = Path('test_dataset')
    dataset_file: Path = dataset_dir / PresetFileName.DATASET_YAML.value
    model_file: Path = dataset_dir / f'test_model{FileExtension.MODEL_YAML.value}'

    # Create company scoped field
    company_fields_dir = Paths.fields_dir(Path.cwd())
    company_fields_dir.mkdir(exist_ok=True)
    company_field_file: Path = company_fields_dir / f'company_test_field{FileExtension.FIELD_YAML.value}'
    company_field_file.write_text(TEST_COMPANY_FIELD)

    # Create dataset and model to push
    dataset_dir.mkdir(exist_ok=True)
    dataset_file.write_text(TEST_DATASET)
    model_file.write_text(TEST_MODEL)
    # Create dataset scoped field
    dataset_fields_dir = Paths.fields_dir(dataset_dir)
    dataset_fields_dir.mkdir(exist_ok=True)

    dataset_field_file: Path = dataset_fields_dir / f'dataset_test_field{FileExtension.FIELD_YAML.value}'
    dataset_field_file.write_text(TEST_DATASET_FIELD)

    # Push dataset and model
    runner = CliRunner()
    result = runner.invoke(cli, ['push', '-y'])

    # Check push was successful
    assert result.exit_code == 0

    # Delete local files so they can be re-created with pull
    dataset_file.unlink()
    model_file.unlink()
    company_field_file.unlink()
    dataset_field_file.unlink()

    # Pull dataset and model
    result = runner.invoke(cli, ['pull', '-y'])

    # Check pull was successful
    assert dataset_file.exists()
    assert model_file.exists()
    assert dataset_field_file.exists()
    assert company_field_file.exists()

    # Delete local dataset and model files
    dataset_file.unlink()
    model_file.unlink()
    company_field_file.unlink()
    dataset_field_file.unlink()

    # Push deleted dataset and model
    result = runner.invoke(cli, ['push', '-y'])

    # Check push was successful
    assert result.exit_code == 0
Ejemplo n.º 5
0
    def __init__(self, *, path: Path):
        if path == Paths.context_file():
            msg = f'Context file ({path.name}) not found in current working directory. Run pano init to create it.'
        elif path == Paths.config_file():
            msg = f'Config file ({path.absolute()}) not found. Run pano configure to create it.'
        else:
            # Should not happen => we only check above files exist explicitly
            msg = f'File Missing - {path}'

        super().__init__(msg)
def test_field_cleanup_e2e(_, create_fields):
    runner = CliRunner()
    result = runner.invoke(cli, ['field', 'cleanup', '-y'])

    fields_dir = Paths.fields_dir(Path('test_dataset'))

    assert result.exit_code == 0
    assert {f.name for f in fields_dir.iterdir()} == {
        'dataset_test_field.field.yaml',
        'calculated_test_field.field.yaml',
    }
    assert {f.name for f in Paths.company_fields_dir().iterdir()} == {'company_test_field.field.yaml'}
Ejemplo n.º 7
0
    def delete_field(self, field: PanoField):
        """Delete field from local filesystem."""
        assert field.file_name is not None

        if field.package is not None:
            # dataset-scope field
            path = Paths.fields_dir(self.cwd / field.package) / field.file_name
        else:
            # company-scope field
            path = Paths.fields_dir(self.cwd) / field.file_name

        logger.debug(f'About to delete field {field.id}')
        delete_file(path)
Ejemplo n.º 8
0
def test_validate_local_state_missing_field_file(tmp_path, monkeypatch):
    monkeypatch.chdir(tmp_path)

    dataset_dir = tmp_path / 'test_dataset'
    dataset_dir.mkdir()

    with (dataset_dir / PresetFileName.DATASET_YAML.value).open('w') as f:
        f.write(yaml.dump(VALID_DATASET))

    with (dataset_dir / 'model1.model.yaml').open('w') as f:
        f.write(
            yaml.dump({
                **VALID_MODEL_MINIMAL,
                'fields': [{
                    'data_reference': '"COLUMN1"',
                    'field_map': ['field_slug', 'field_slug_2']
                }],
            }))

    field_dir = Paths.fields_dir(dataset_dir)
    field_dir.mkdir()

    with (field_dir / 'field_slug.field.yaml').open('w') as f:
        f.write(yaml.dump(VALID_FIELD_MINIMAL))

    errors = validate_local_state()
    assert errors == [
        MissingFieldFileError(
            field_slug='field_slug_2',
            dataset_slug='test_dataset',
            data_reference='"COLUMN1"',
            identifier=False,
            model_name='model1',
        )
    ]
Ejemplo n.º 9
0
def test_validate_local_state_duplicate_dataset_scoped_field(
        tmp_path, monkeypatch, invalid_field):
    monkeypatch.chdir(tmp_path)

    dataset_dir = tmp_path / 'test_dataset'
    dataset_dir.mkdir()

    with (dataset_dir / PresetFileName.DATASET_YAML.value).open('w') as f:
        f.write(yaml.dump(VALID_DATASET))

    with (dataset_dir / 'test_model.model.yaml').open('w') as f:
        f.write(
            yaml.dump({
                **VALID_MODEL_MINIMAL, 'fields': [{
                    'field_map': ['field_slug'],
                    'data_reference':
                    '"FIELD_SLUG"'
                }]
            }))

    field_dir = Paths.fields_dir(dataset_dir)
    field_dir.mkdir()

    with (field_dir / 'first_field.field.yaml').open('w') as f:
        f.write(yaml.dump(VALID_FIELD_MINIMAL))

    with (field_dir / 'duplicate.field.yaml').open('w') as f:
        f.write(yaml.dump(VALID_FIELD_MINIMAL))

    errors = validate_local_state()
    assert len(errors) == 1
Ejemplo n.º 10
0
def test_validate_context_invalid_yaml(tmp_path, monkeypatch):
    monkeypatch.chdir(tmp_path)
    with Paths.context_file().open('w') as f:
        f.write('not:\nyaml')

    with pytest.raises(InvalidYamlFile):
        validate_context()
Ejemplo n.º 11
0
def test_validate_context_invalid(tmp_path, monkeypatch, context):
    monkeypatch.chdir(tmp_path)
    with Paths.context_file().open('w') as f:
        f.write(yaml.dump(context))

    with pytest.raises(JsonSchemaError):
        validate_context()
Ejemplo n.º 12
0
def validate_config():
    """Check config file against schema."""
    path, schema = Paths.config_file(), JsonSchemas.config()
    _validate_file(path, schema)
    errors = _check_properties_deprecations(path, schema)
    for err in errors:
        echo_warning(str(err))
Ejemplo n.º 13
0
def test_reader_get_packages(tmp_path: Path):
    # scanned directory
    scanned_dir = tmp_path / SystemDirectory.SCANNED.value
    scanned_dir.mkdir()
    (scanned_dir / PresetFileName.DATASET_YAML.value).touch()

    # dataset with one model
    ds1_dir = tmp_path / 'dataset1'
    ds1_dir.mkdir()

    ds1_file = ds1_dir / PresetFileName.DATASET_YAML.value
    ds1_file.touch()

    model_file = ds1_dir / f'test_model{FileExtension.MODEL_YAML.value}'
    model_file.touch()

    ds1_fields_dir = Paths.fields_dir(ds1_dir)
    ds1_fields_dir.mkdir()
    field_file = ds1_fields_dir / f'test_field{FileExtension.FIELD_YAML.value}'
    field_file.touch()

    # empty dataset
    ds2_dir = tmp_path / 'dataset2'
    ds2_dir.mkdir()

    packages = list(FileReader(cwd=tmp_path).get_packages())
    expected = [
        FilePackage(name='dataset1',
                    data_source_file=ds1_file,
                    model_files=[model_file],
                    field_files=[field_file])
    ]
    assert packages == expected
Ejemplo n.º 14
0
def create_command():
    echo_info('Scaffolding a new transform...')
    name = click.prompt('name')

    connections = Connections.load()
    connection_names = connections.keys() if connections else []
    connection_base_text = 'connection'

    if len(connection_names) == 0:
        connection_prompt_text = connection_base_text
    elif len(connection_names) > 3:
        connection_prompt_text = f'{connection_base_text} (Available - {{{",".join(list(connection_names)[:3])}}},...)'
    else:
        connection_prompt_text = f'{connection_base_text} (Available - {{{",".join(connection_names)}}})'

    # Assemble target based on input
    connection = click.prompt(connection_prompt_text)

    target_view_path = click.prompt(f'target: {connection}.', prompt_suffix="")
    target = f'{connection}.{target_view_path}'

    transform = PanoTransform(name=name, fields=[], target=target)
    writer = FileWriter()
    transform_path = Paths.transforms_dir(
    ) / f'{transform.name}{FileExtension.TRANSFORM_YAML.value}'

    if Path.exists(transform_path):
        echo_error(f'Transform {transform_path} already exists')
    else:
        writer.write_transform(transform)
Ejemplo n.º 15
0
def read_config(section: str = '') -> Dict[str, Any]:
    config_file = Paths.config_file()
    if not config_file.is_file():
        return {}

    config = read_yaml(config_file)
    if section != '':
        return config.get(section, {})
    return config
Ejemplo n.º 16
0
def update_config(section: str, data: Dict[str, Any]) -> None:
    data = {section: data}
    config_file = Paths.config_file()
    if config_file.is_file():
        config_yaml = read_yaml(config_file)
        config_yaml.update(data)
        write_yaml(config_file, config_yaml)
    else:
        write_yaml(config_file, data)
Ejemplo n.º 17
0
def test_validate_local_state_valid(tmp_path, monkeypatch):
    monkeypatch.chdir(tmp_path)

    global_fields_dir = Paths.fields_dir(tmp_path)
    global_fields_dir.mkdir()

    dataset_dir = tmp_path / 'test_dataset'
    dataset_dir.mkdir()
    dataset_fields_dir = Paths.fields_dir(dataset_dir)
    dataset_fields_dir.mkdir()

    with (dataset_dir / PresetFileName.DATASET_YAML.value).open('w') as f:
        f.write(yaml.dump(VALID_DATASET))

    model1 = {**VALID_MODEL_MINIMAL, 'model_name': 'sf.db.schema.table1'}
    model2 = {
        **VALID_MODEL_MINIMAL,
        'model_name':
        'sf.db.schema.table2',
        'fields': [{
            'field_map': ['field_slug'],
            'data_reference': '"FIELD_SLUG"'
        }],
    }

    with (dataset_dir / 'test_model-1.model.yaml').open('w') as f:
        f.write(yaml.dump(model1))

    with (dataset_dir / 'test_model-2.model.yaml').open('w') as f:
        f.write(yaml.dump(model2))

    with (global_fields_dir / 'company_field.field.yaml').open('w') as f:
        f.write(yaml.dump(VALID_FIELD_FULL))

    with (dataset_fields_dir / 'first_field.field.yaml').open('w') as f:
        f.write(yaml.dump(VALID_FIELD_MINIMAL))

    errors = validate_local_state()
    assert len(errors) == 0

    state = get_state()
    assert len(state.models) == 2
    assert len(state.data_sources) == 1
    assert len(state.fields) == 2
Ejemplo n.º 18
0
def cli(debug):
    """Run checks at the beginning of every command."""
    if debug:
        logger = logging.getLogger()
        logger.setLevel(logging.DEBUG)

    # hide unclosed socket errors
    warnings.filterwarnings("ignore", category=ResourceWarning, message="unclosed.*<socket.socket.*>")

    load_dotenv(dotenv_path=Paths.dotenv_file())
Ejemplo n.º 19
0
    def write_field(self,
                    field: PanoField,
                    *,
                    package: Optional[str] = None,
                    file_name: Optional[str] = None):
        """Write model to local filesystem."""
        if file_name is None:
            file_name = f'{field.slug}{FileExtension.FIELD_YAML.value}'

        package = package if package is not None else field.data_source
        if package is not None:
            # dataset-scope field
            path = Paths.fields_dir(self.cwd / package) / file_name
        else:
            # company-scope field
            path = Paths.fields_dir(self.cwd) / file_name

        logger.debug(f'About to write field {field.id}')
        write_yaml(path, field.to_dict())
Ejemplo n.º 20
0
def test_validate_local_state_orphan_field_files(tmp_path, monkeypatch):
    monkeypatch.chdir(tmp_path)

    dataset_dir = tmp_path / 'test_dataset'
    dataset_dir.mkdir()

    with (dataset_dir / PresetFileName.DATASET_YAML.value).open('w') as f:
        f.write(yaml.dump(VALID_DATASET))

    with (dataset_dir / 'test_model.model.yaml').open('w') as f:
        f.write(
            yaml.dump({
                **VALID_MODEL_FULL,
                'fields': [{
                    'field_map': ['field_slug'],
                    'data_reference': '"FIELD_SLUG"'
                }],
            }))

    Paths.fields_dir(dataset_dir).mkdir()
    with (Paths.fields_dir(dataset_dir) /
          'test_field.field.yaml').open('w') as f:
        f.write(yaml.dump(VALID_FIELD_MINIMAL))

    with (Paths.fields_dir(dataset_dir) /
          'calculated_field.field.yaml').open('w') as f:
        f.write(
            yaml.dump({
                **VALID_FIELD_MINIMAL, 'slug': 'calculated_slug',
                'calculation': '2+2'
            }))

    with (Paths.fields_dir(dataset_dir) /
          'orphan_field.field.yaml').open('w') as f:
        f.write(yaml.dump({**VALID_FIELD_MINIMAL, 'slug': 'orphan_slug'}))

    errors = validate_local_state()

    assert errors == [
        OrphanFieldFileError(field_slug='orphan_slug',
                             dataset_slug='test_dataset')
    ]
Ejemplo n.º 21
0
def test_connections_e2e(mock_create_engine, monkeypatch, tmpdir):
    monkeypatch.setattr(Path, 'home', lambda: Path(tmpdir))
    runner = CliRunner()

    # Create config
    runner.invoke(cli, ['configure'])

    # Create connection
    result = runner.invoke(
        cli,
        [
            'connection',
            'create',
            'my-connection',
            'sqlite://',
            '--no-test',
        ],
    )

    assert result.exit_code == 0, result.output
    connections_json = {
        'auth': {},
        'connections': {
            'my-connection': {
                'connection_string': 'sqlite://',
            },
        },
    }
    with Paths.config_file().open() as f:
        assert yaml.safe_load(f.read()) == connections_json

    # List
    result = runner.invoke(cli, ['connection', 'list'])
    assert result.exit_code == 0, result.output
    assert result.output == yaml.dump(connections_json['connections']) + "\n"

    # Update
    result = runner.invoke(cli, ['connection', 'update', 'my-connection', 'sqlite://'])
    assert result.exit_code == 0, result.output

    # List
    result = runner.invoke(cli, ['connection', 'list'])
    assert result.exit_code == 0, result.output
    connections_json['connections']['my-connection']['connection_string'] = 'sqlite://'
    assert result.output == yaml.dump(connections_json['connections']) + "\n"

    # Update
    result = runner.invoke(cli, ['connection', 'remove', 'my-connection'])
    assert result.exit_code == 0, result.output

    # List
    result = runner.invoke(cli, ['connection', 'list'])
    assert result.exit_code == 0, result.output
    assert result.stdout.startswith('No connections found.\nUse "pano connection create" to create')
Ejemplo n.º 22
0
def list_connections_command() -> None:
    """CLI command. List all connections."""
    connections = Connections.load()
    if not connections:
        config_file = Paths.config_file()
        echo_info(
            f'No connections found.\n'
            f'Use "pano connection create" to create connection or edit "{config_file}" file.'
        )
        exit(0)

    echo_info(yaml.dump(connections))
Ejemplo n.º 23
0
def test_field_scaffold_e2e(_, clear_fields):
    runner = CliRunner()
    result = runner.invoke(cli, ['field', 'scaffold', '-y'])

    fields_dir = Paths.fields_dir(Path('test_dataset'))
    Paths.fields_dir(Path('test_dataset'))

    assert result.exit_code == 0

    assert {f.name for f in fields_dir.iterdir()} == {'dataset_test_field.field.yaml'}
    assert (
        (fields_dir / 'dataset_test_field.field.yaml').read_text()
        == """aggregation:
  type: group_by
api_version: v1
data_type: text
display_name: dataset_test_field
field_type: dimension
group: CLI
slug: dataset_test_field
"""
    )
def test_configure_e2e(monkeypatch, tmpdir):
    monkeypatch.setattr(Path, 'home', lambda: Path(tmpdir))
    runner = CliRunner()

    result = runner.invoke(cli, ['configure'], input='test-client-id\ntest-client-secret')

    assert result.exit_code == 0, result.output
    with Paths.config_file().open() as f:
        assert yaml.safe_load(f.read()) == {
            'auth': {
                'client_id': 'test-client-id',
                'client_secret': 'test-client-secret',
            },
        }
Ejemplo n.º 25
0
def create_command():
    echo_info('Scaffolding a new transform...')
    name = click.prompt('name')

    target = click.prompt('target:', prompt_suffix="")

    transform = PanoTransform(name=name, fields=[], target=target)
    writer = FileWriter()
    transform_path = Paths.transforms_dir(
    ) / f'{transform.name}{FileExtension.TRANSFORM_YAML.value}'

    if Path.exists(transform_path):
        echo_error(f'Transform {transform_path} already exists')
    else:
        writer.write_transform(transform)
Ejemplo n.º 26
0
def test_file_package_read_fields(tmp_path):
    field_file = Paths.fields_dir(
        tmp_path) / f'test_field{FileExtension.FIELD_YAML.value}'
    os.makedirs(os.path.dirname(field_file), exist_ok=True)

    with field_file.open('w') as f:
        f.write('slug: field_slug')

    package = FilePackage(name='dataset1',
                          data_source_file=Mock(),
                          model_files=[],
                          field_files=[field_file])

    assert list(package.read_fields()) == [({
        'slug': 'field_slug'
    }, field_file)]
Ejemplo n.º 27
0
    def write_compiled_transform(
            self, compiled_transform: CompiledTransform) -> Path:
        file_name = f'{compiled_transform.transform.name}{FileExtension.COMPILED_TRANSFORM_SQL.value}'
        path = Paths.transforms_compiled_dir() / file_name

        ensure_dir(path)

        with open(path, 'w') as f:
            f.writelines([
                '-- Compiled with parameters:\n'
                f'-- \tcompany_id: {compiled_transform.company_id}\n'
                '\n',
                compiled_transform.compiled_query,
            ])

        return path
Ejemplo n.º 28
0
def test_validate_local_state_invalid_dataset_scoped_field(
        tmp_path, monkeypatch, invalid_field):
    monkeypatch.chdir(tmp_path)

    dataset_dir = tmp_path / 'test_dataset'
    dataset_dir.mkdir()

    with (dataset_dir / PresetFileName.DATASET_YAML.value).open('w') as f:
        f.write(yaml.dump(VALID_DATASET))

    field_dir = Paths.fields_dir(dataset_dir)
    field_dir.mkdir()

    with (field_dir / 'first_field.field.yaml').open('w') as f:
        f.write(yaml.dump(invalid_field))

    errors = validate_local_state()
    assert len(errors) == 1
Ejemplo n.º 29
0
def test_connections_e2e(mock_create_engine, monkeypatch, tmpdir):
    monkeypatch.setattr(Path, 'home', lambda: Path(tmpdir))
    runner = CliRunner()

    # Create connection
    result = runner.invoke(
        cli,
        [
            'connection',
            'setup',
            '--url',
            'sqlite://',
            '--no-test',
        ],
    )

    assert result.exit_code == 0, result.output
    connections_json = {
        'connection': {
            'url': 'sqlite://',
        },
    }
    with Paths.context_file().open() as f:
        assert yaml.safe_load(f.read()) == connections_json

    # List
    result = runner.invoke(cli, ['connection', 'show'])
    assert result.exit_code == 0, result.output
    assert result.output == yaml.dump(connections_json['connection']) + "\n"

    # Update
    result = runner.invoke(cli, ['connection', 'setup', '--url', 'sqlite://'])
    assert result.exit_code == 0, result.output

    # List
    result = runner.invoke(cli, ['connection', 'show'])
    assert result.exit_code == 0, result.output
    connections_json['connection']['url'] = 'sqlite://'
    assert result.output == yaml.dump(connections_json['connection']) + "\n"

    # Ensure no traces of the connections are left
    remove_context()
def clear_fields(monkeypatch):
    monkeypatch.chdir(Path('e2e') / 'scenarios' / 'pano-field-scaffold')
    # delete field files
    for f in Paths.fields_dir(Path('test_dataset')).iterdir():
        f.unlink()
    yield