def cpu_image(node_id: NodeID) -> ImageParams:
    image = Image(
        name="simcore/services/comp/pytest/cpu_image",
        tag="1.5.5",
        node_requirements=NodeRequirements(
            CPU=1, RAM=parse_obj_as(ByteSize, "128 MiB"), GPU=None, MPI=None
        ),
    )  # type: ignore
    return ImageParams(
        image=image,
        expected_annotations={
            "resources": {
                "CPU": 1.0,
                "RAM": 128 * 1024 * 1024,
            }
        },
        fake_tasks={node_id: image},
    )
Exemplo n.º 2
0
 async def get_volume_inventory(self) -> List[VolumeDeviceModel]:
     cmd = "ceph-volume inventory --format=json"
     stdout, stderr, rc = await self.call(cmd)
     if rc != 0:
         raise CephadmError(stderr)
     try:
         devs = json.loads(stdout)
         logger.debug(json.dumps(devs, indent=2))
     except json.decoder.JSONDecodeError as e:
         raise CephadmError("format error while obtaining inventory") from e
     inventory = parse_obj_as(List[VolumeDeviceModel], devs)
     for d in inventory:
         if not d.human_readable_type:
             if d.sys_api.rotational:
                 d.human_readable_type = "hdd"
             else:
                 d.human_readable_type = "ssd"
     return inventory
Exemplo n.º 3
0
def fake_task_output_data(
    fake_io_schema: Dict[str, Dict[str, str]],
    fake_io_data: Dict[str, Any],
    faker: Faker,
) -> TaskOutputData:
    converted_data = {
        key: {
            "url": faker.url(),
            "file_mapping": next(iter(fake_io_schema[key]["fileToKeyMap"]))
            if "fileToKeyMap" in fake_io_schema[key]
            else None,
        }
        if fake_io_schema[key]["type"] == "data:*/*"
        else value
        for key, value in fake_io_data.items()
    }
    data = parse_obj_as(TaskOutputData, converted_data)
    assert data
    return data
Exemplo n.º 4
0
def test_parsing_model_naming():
    with pytest.raises(ValidationError) as exc_info:
        parse_obj_as(int, 'a')
    assert str(exc_info.value).split(
        '\n')[0] == '1 validation error for ParsingModel[int]'

    with pytest.raises(ValidationError) as exc_info:
        parse_obj_as(int, 'a', type_name='ParsingModel')
    assert str(
        exc_info.value).split('\n')[0] == '1 validation error for ParsingModel'

    with pytest.raises(ValidationError) as exc_info:
        parse_obj_as(int, 'a', type_name=lambda type_: type_.__name__)
    assert str(exc_info.value).split('\n')[0] == '1 validation error for int'
Exemplo n.º 5
0
    def get_authorization(self, fsname: str,
                          clientid: Optional[str]) -> CephFSAuthorizationModel:

        if not clientid:
            clientid = "default"

        cmd = {
            "prefix": "auth get",
            "entity": f"client.{fsname}-{clientid}",
            "format": "json"
        }
        try:
            res = self.mon.call(cmd)
        except CephCommandError as e:
            if e.rc == errno.ENOENT:
                raise CephFSNoAuthorizationError(e.message)
            raise CephFSError(str(e)) from e
        lst = parse_obj_as(List[CephFSAuthorizationModel], res)
        if len(lst) == 0:
            raise CephFSNoAuthorizationError()
        return lst[0]
Exemplo n.º 6
0
    async def factory() -> DaskClient:
        client = await DaskClient.create(
            app=minimal_app,
            settings=minimal_app.state.settings.DASK_SCHEDULER,
            endpoint=parse_obj_as(AnyUrl, dask_spec_local_cluster.scheduler_address),
            authentication=NoAuthentication(),
        )
        assert client
        assert client.app == minimal_app
        assert client.settings == minimal_app.state.settings.DASK_SCHEDULER
        assert not client._subscribed_tasks

        assert client.dask_subsystem.client
        assert not client.dask_subsystem.gateway
        assert not client.dask_subsystem.gateway_cluster
        scheduler_infos = client.dask_subsystem.client.scheduler_info()  # type: ignore
        print(
            f"--> Connected to scheduler via client {client=} to scheduler {scheduler_infos=}"
        )
        created_clients.append(client)
        return client
Exemplo n.º 7
0
class Settings(BaseSettings):
    API_V1_STR: str = "/api/v1"
    SERVER_NAME: str
    SERVER_HOST: AnyHttpUrl
    # BACKEND_CORS_ORIGINS is a JSON-formatted list of origins
    # e.g: '["http://localhost", "http://*****:*****@validator("BACKEND_CORS_ORIGINS", pre=True)
    def assemble_cors_origins(
            cls, v: Union[str, List[str]]) -> Union[List[str], str]:
        if isinstance(v, str) and not v.startswith("["):
            return [i.strip() for i in v.split(",")]
        elif isinstance(v, (list, str)):
            return v
        raise ValueError(v)

    PROJECT_NAME: str
    SENTRY_DSN: Optional[HttpUrl] = None

    WIKIFACTORY_API_BASE_URL: AnyHttpUrl = parse_obj_as(
        AnyHttpUrl, "https://wikifactory.com")

    @validator("WIKIFACTORY_API_BASE_URL", pre=True)
    def wikifactory_base_url(cls, v: Union[str, AnyHttpUrl]) -> AnyHttpUrl:
        if isinstance(v, str):
            return parse_obj_as(AnyHttpUrl, v)
        elif isinstance(v, AnyHttpUrl):
            return v
        raise ValueError(v)

    WIKIFACTORY_API_HOST: Optional[str] = None

    @validator("WIKIFACTORY_API_HOST")
    def wikifactory_host_from_url(cls, v: Optional[str],
                                  values: Dict[str, Any]) -> str:
        if isinstance(v, str):
            return v

        api_base_url = values.get("WIKIFACTORY_API_BASE_URL")
        if api_base_url:
            if api_base_url.port:
                return f"{api_base_url.host}:{api_base_url.port}"
            return api_base_url.host

        raise ValueError(api_base_url)

    EXPORTER_GIT_MAIL: Optional[str] = None
    EXPORTER_GIT_USER: Optional[str] = None

    @validator("SENTRY_DSN", pre=True)
    def sentry_dsn_can_be_blank(cls, v: str) -> Optional[str]:
        if not v:
            return None
        return v

    POSTGRES_SERVER: str
    POSTGRES_USER: str
    POSTGRES_PASSWORD: str
    POSTGRES_DB: str

    PYTEST_POSTGRES_SERVER: Optional[str]
    PYTEST_POSTGRES_USER: Optional[str]
    PYTEST_POSTGRES_PASSWORD: Optional[str]
    PYTEST_POSTGRES_DB: Optional[str]

    SQLALCHEMY_DATABASE_URI: Optional[PostgresDsn] = None

    @validator("SQLALCHEMY_DATABASE_URI", pre=True)
    def assemble_db_connection(cls, v: Optional[str],
                               values: Dict[str, Any]) -> Any:
        if isinstance(v, str):
            return v
        if "pytest" in sys.modules:
            return PostgresDsn.build(
                scheme="postgresql",
                user=values.get("PYTEST_POSTGRES_USER"),
                password=values.get("PYTEST_POSTGRES_PASSWORD"),
                host=values.get("PYTEST_POSTGRES_SERVER"),
                path=f"/{values.get('PYTEST_POSTGRES_DB') or ''}",
            )
        return PostgresDsn.build(
            scheme="postgresql",
            user=values.get("POSTGRES_USER"),
            password=values.get("POSTGRES_PASSWORD"),
            host=values.get("POSTGRES_SERVER"),
            path=f"/{values.get('POSTGRES_DB') or ''}",
        )

    # TODO - define validations
    JOBS_BASE_PATH: str

    @validator("JOBS_BASE_PATH", pre=True)
    def ensure_download_path(cls, v: str) -> str:
        if "pytest" in sys.modules:
            current_dir = os.path.dirname(os.path.realpath(__file__))
            return os.path.normpath(
                os.path.join(current_dir, "../tests/test_files"))

        # if directory can't be written, raise ValueError(v)
        return v

    BROKER_URL: Optional[AnyUrl]

    class Config:
        case_sensitive = True
Exemplo n.º 8
0
def main() -> None:
    """Analyze mixed initiative schemas."""
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument("--input-dir",
                        type=Path,
                        required=True,
                        help="Input schema directory path.")
    args = parser.parse_args()

    if not args.input_dir.is_dir():
        raise IOError(
            f"Schema directory {args.input_dir} does not exist or is not a directory"
        )

    yaml_files = sorted(args.input_dir.glob("*.yaml"))

    yaml_schemas = []
    for yaml_file in yaml_files:
        with yaml_file.open() as file:
            yaml_data = yaml.safe_load(file)
        yaml_schemas.extend(yaml_data)
    schemas: List[Schema] = parse_obj_as(List[Schema], yaml_schemas)

    for schema in schemas:
        author = get_author(schema)
        schema.private_data["author"] = author
        schema.private_data["tracking"] = parse_obj_as(
            List[LogItem], schema.private_data["tracking"])
        for log_item in schema.private_data["tracking"]:
            log_item.author = author

    wrapper = textwrap.TextWrapper(width=80, subsequent_indent=" " * 4)

    print("+++ General +++")

    log_items = list(
        itertools.chain.from_iterable(schema.private_data["tracking"]
                                      for schema in schemas))
    log_types = sorted(set(log_item.type for log_item in log_items))
    print(wrapper.fill(f"Log types: {log_types}"))
    authors = sorted(set(schema.private_data["author"] for schema in schemas))
    print(f"Authors: {authors}")

    log_type_counts = dict(
        Counter(log_item.type for log_item in log_items).most_common())
    print("Log type counts:")
    for log_type, counts in log_type_counts.items():
        print(f"\t{log_type}: {counts}")
    log_type_counts_per_user = {
        log_type: dict(
            Counter(i.author for i in log_items
                    if i.type == log_type).most_common())
        for log_type in log_types
    }
    print("Log type counts per user:"******"\t{log_type}: {counts_per_user}")
    bad_reorders = sum(
        log_item.type == "reorder_event"
        and log_item.data["old_index"] != log_item.data["new_index"]
        for log_item in log_items)
    print(
        f"Percent actual re-orderings: {bad_reorders / log_type_counts['reorder_event']:.0%} "
        f"({bad_reorders}/{log_type_counts['reorder_event']})")

    print("\n+++ Total step entering time +++")
    times_taken = []
    for schema in schemas:
        start_time = schema.private_data["tracking"][0].date
        end_time = schema.private_data["tracking"][-1].date
        times_taken.append(end_time - start_time)
    seconds_taken = [t.total_seconds() for t in times_taken]
    print(
        f"Minimum total step entering time: {timedelta(seconds=round(min(seconds_taken)))}"
    )
    print(
        f"Maximum total step entering time: {timedelta(seconds=round(max(seconds_taken)))}"
    )
    print(
        f"Mean total step entering time: {timedelta(seconds=round(statistics.mean(seconds_taken)))}"
    )
    print(f"Median total step entering time: "
          f"{timedelta(seconds=round(statistics.median(seconds_taken)))}")

    print("\n+++ Schema lengths +++")
    script_lengths = sorted(len(schema.steps) for schema in schemas)
    print(f"Minimum schema length: {min(script_lengths)}")
    print(f"Maximum schema length: {max(script_lengths)}")
    print(f"Mean schema length: {statistics.mean(script_lengths):.1f}")
    print(f"Median schema length: {statistics.median(script_lengths):.1f}")

    print("\n+++ Suggestion usage +++")
    total_suggestions_given = 0
    total_events_added = 0
    total_suggestions_used_exact = 0
    total_suggestions_used_ignore_case = 0
    total_suggestions_used_rough = 0
    for schema in schemas:
        last_suggestion_selected: Optional[str] = None
        for log_item in schema.private_data["tracking"]:
            if log_item.type == "add_event":
                total_events_added += 1
                if last_suggestion_selected is not None:
                    suggested = last_suggestion_selected
                    added = log_item.data
                    if suggested == added:
                        total_suggestions_used_exact += 1
                    if suggested.lower() == added.lower():
                        total_suggestions_used_ignore_case += 1
                    if SequenceMatcher(None, suggested.lower(),
                                       added.lower()).ratio() > 0.8:
                        total_suggestions_used_rough += 1
            elif log_item.type == "gpt2_suggestion_output":
                total_suggestions_given += 1
                last_suggestion_selected = None
            elif log_item.type == "gpt2_suggestion_select":
                last_suggestion_selected = log_item.data
    print(f"Total suggestion sets given: {total_suggestions_given}")
    print(f"Total events added: {total_events_added}")
    print(
        f"Total suggestions used (exact match): {total_suggestions_used_exact} "
        f"({total_suggestions_used_exact/total_events_added:.1%})")
    print(f"Total suggestions used (exact match, ignoring case): "
          f"{total_suggestions_used_ignore_case} "
          f"({total_suggestions_used_ignore_case/total_events_added:.1%})")
    print(
        f"Total suggestions used (rough match): {total_suggestions_used_rough} "
        f"({total_suggestions_used_rough/total_events_added:.1%})")

    # Although timing is recorded in the logs, this is to get benchmarks with the improved server
    print("\n+++ Suggestion generation time +++")
    times = []
    for log_item in log_items:
        if log_item.type != "gpt2_suggestion_input":
            continue
        gpt2_input = log_item.data["input"]
        request_url = "https://dev.example.org/api/get_gpt2_suggestions"
        start = time.time()
        request_response = requests.get(request_url,
                                        params=gpt2_input,
                                        timeout=30)
        end = time.time()
        times.append(end - start)
        # Abort script if GPT-2 server does not return suggestions properly
        if request_response.status_code != HTTPStatus.OK:
            raise RuntimeError(
                f"GPT-2 server returned status code {request_response.status_code}"
            )
        if "suggestions" not in request_response.json():
            raise RuntimeError("GPT-2 server returned malformed output")
    print(f"Total suggestion generation time: {sum(times):.1f} s")
    print(f"Minimum suggestion generation time: {min(times):.1f} s")
    print(f"Maximum suggestion generation time: {max(times):.1f} s")
    print(f"Mean suggestion generation time: {statistics.mean(times):.1f} s")
    print(
        f"Median suggestion generation time: {statistics.median(times):.1f} s")
Exemplo n.º 9
0
 def host_ls(self) -> List[OrchHostListModel]:
     cmd = {"prefix": "orch host ls"}
     res = self.call(cmd)
     return parse_obj_as(List[OrchHostListModel], res)
Exemplo n.º 10
0
def get_animes() -> List[Anime]:
    with client:
        r = client.get(url=f"{TWIST_URL}{ANIME_ENDPOINT}")
        r.raise_for_status()
        return parse_obj_as(List[Anime], r.json())
def calculate_indicators(
    market_data: List[InputMarketDataSchema],
) -> List[IndicatorSchema]:
    """calculate_indicator:
    Transform streams of market data into an list of indicators calculated from that data.
    Returns two indicators, one for buy and one for sell/close position.
    """

    no_op = IndicatorSchema(label="None", indicator=[])
    if not all(map(lambda x: check_is_good_for_signal(x.ticks), market_data)):
        return [no_op, no_op]

    min_ask_bid_ratio = 0.0035
    disregard_bars_count: int = 10
    rolling_indicator_window: str = "180s"

    primary_data = parse_obj_as(List[PricepointSchema], market_data[0].ticks)
    secondary_data = parse_obj_as(List[PricepointSchema], market_data[1].ticks)
    # Convert Pydantic lists to dataframes
    primary_dataframe = pd.DataFrame([x.dict() for x in primary_data])
    primary_dataframe["Time"] = pd.to_datetime(primary_dataframe.time, unit="s")
    primary_dataframe.set_index("Time", inplace=True)
    primary_dataframe.sort_index(inplace=True)

    secondary_dataframe = pd.DataFrame([x.dict() for x in secondary_data])
    secondary_dataframe["Time"] = pd.to_datetime(secondary_dataframe.time, unit="s")
    secondary_dataframe.set_index("Time", inplace=True)
    secondary_dataframe.sort_index(inplace=True)

    primary_without_latest = primary_dataframe["price"].shift(disregard_bars_count)
    secondary_without_latest = secondary_dataframe["price"].shift(disregard_bars_count)
    coefficients = secondary_without_latest / primary_without_latest

    pseudo_spread_secondary = secondary_dataframe["price"] * min_ask_bid_ratio

    # Calculate inter-market slip difference and get indicating list
    arbitrage_difference = (
        primary_dataframe["price"] * coefficients
    ) - secondary_dataframe["price"]
    arbitrage_indicator = (arbitrage_difference / pseudo_spread_secondary).dropna()

    # Disregard noise values (x < 1.5)
    arbitrage_indicator.loc[arbitrage_indicator < 1.5] = 0

    rolling_buy_indicator: pd.Series = arbitrage_indicator.rolling(
        rolling_indicator_window
    ).sum()
    sell_difference = (primary_dataframe["price"] * coefficients) - secondary_dataframe[
        "price"
    ]
    sell_indicator = (sell_difference / pseudo_spread_secondary).dropna()
    sell_indicator.loc[sell_indicator > -0.8] = 0
    rolling_sell_indicator: pd.Series = sell_indicator.rolling(
        rolling_indicator_window
    ).sum()
    buy_indicator = IndicatorSchema(
        label=IndicatorsEnum.buy_probability_line,
        indicator=[
            dict(timestamp=x, value=y) for x, y in rolling_buy_indicator.items()
        ],
    )
    sell_indicator = IndicatorSchema(
        label=IndicatorsEnum.sell_probability_line,
        indicator=[
            dict(timestamp=x, value=y) for x, y in rolling_sell_indicator.items()
        ],
    )
    return [buy_indicator, sell_indicator]
Exemplo n.º 12
0
 def get_pools_stats(self) -> List[CephOSDPoolStatsModel]:
     cmd: Dict[str, str] = {"prefix": "osd pool stats", "format": "json"}
     results: Dict[str, Any] = self.call(cmd)
     return parse_obj_as(List[CephOSDPoolStatsModel], results)
Exemplo n.º 13
0
from os import path
import sys 
BASE_DIR = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(BASE_DIR)


from textwrap import dedent

from pydantic.tools import parse_obj_as

from pepperbot.command import as_command, pattern, with_command
from pepperbot.main import *
from pepperbot.models.sender import Sender



class TestModel(BaseModel):
    order: Text
    face: Face

    class Config:
        arbitrary_types_allowed = True


# items = parse_obj_as(List[Face], [Text("1234")])
items = parse_obj_as(TestModel, [Text("1234")])
debug(items)
Exemplo n.º 14
0
import os

from pydantic import EmailStr, HttpUrl
from pydantic.tools import parse_obj_as

from settings.adapters import RESTAdapterSettings
from settings.grabbers import FeedlyGrabberSettings
from settings.notification_channels import \
    SendPulseNotificationChannelSettings

zzr_settings = RESTAdapterSettings(
    client_id=os.environ["ZZR_CLIENT_ID"],
    client_secret=os.environ["ZZR_CLIENT_SECRET"],
    token_url=parse_obj_as(HttpUrl, os.environ["ZZR_TOKEN_URL"]),
    base_url=parse_obj_as(HttpUrl, os.environ["ZZR_BASE_URL"]),
    authorization_url=parse_obj_as(HttpUrl,
                                   os.environ["ZZR_AUTHORIZATION_URL"]),
)

feedly_settings = FeedlyGrabberSettings(
    refresh_token=str(os.environ.get("FEEDLY_REFRESH_TOKEN")),
    returned_posts_count=int(os.environ["FEEDLY_RETURNED_POSTS_COUNT"]),
)

emails = [
    EmailStr(email)
    for email in os.environ["SENDPULSE_NOTIFICATION_EMAILS"].split(",")
]

sendpulse_settings = SendPulseNotificationChannelSettings(
    emails=emails,
Exemplo n.º 15
0
    #     '11id': '123',
    #     'signup_ts': '2019-06-01 12:22',
    #     'friends': [1, 2, '3'],
    #     '表头': 'head',
    # }
    # try:
    #     user = User(**external_data)
    #     print(user.dict())
    # except ValidationError as e:
    #     print(e.errors())
    #     # print(e.json())

    external_data_list = [{
        'id': '123',
        'name': 'a',
        'signup_ts': '2019-06-01 12:22',
        'friends': [1, 2, '3'],
        '表头': 'head',
    }, {
        'id': '123',
        'name': 'b',
        'signup_ts': '2019-06-01 12:22',
        'friends': [1, 2, '3'],
        '表头': 'head',
    }]
    try:
        user_list = parse_obj_as(List[User], external_data_list)
        print([dict(user) for user in user_list])
    except ValidationError as e:
        print(e.errors())
Exemplo n.º 16
0
    async def execute(self, redis_list, writer, state, auth, packer):
        try:
            args = []
            if len(redis_list) != self.num_args:
                writer.write(b"-NUM_ARG_MISMATCH\r\n")
                await writer.drain()
                return True

            pos = 0
            for (arg_name, arg_type) in self.signature:
                if arg_type == Auth:
                    args.append(auth)
                    continue
                elif arg_type == AuthRequired:
                    if not auth.value:
                        await write_permission_denied(writer)
                        return True
                    args.append(AuthRequired(auth.value))
                    continue
                elif arg_type == ConnState:
                    args.append(state)
                    continue

                redis_value = redis_list[pos]
                pos += 1

                try:
                    raw_value = msgpack.unpackb(redis_value)
                except msgpack.UnpackException as e:
                    loc = json.dumps(["command", arg_name])
                    msg = json.dumps("invalid msgpack")
                    writer.write(
                        f"-INVALID_MSGPACK {loc} {msg}\r\n".encode("utf8"))
                    await writer.drain()
                    return True

                try:
                    obj = parse_obj_as(arg_type, raw_value)
                except ValidationError as e:
                    err = e.errors()[0]
                    loc = json.dumps(("command", arg_name) + err["loc"][1:])
                    msg = json.dumps(err["msg"])
                    t = json.dumps(err["type"])
                    writer.write(
                        f"-VALIDATION_ERROR {loc} {msg} {t}\r\n".encode(
                            "utf8"))
                    await writer.drain()
                    return True
                args.append(obj)

            result = await self.handler(*args)
            to_send = pack_msgpack(packer, result)
            writer.write(b"$%d\r\n%b\r\n" % (len(to_send), to_send))
            await writer.drain()
            return False
        except Exception as e:
            msg = json.dumps(str(e))
            writer.write(f"-UNEXPECTED_ERROR {msg}\r\n".encode("utf8"))
            await writer.drain()

            raise e
Exemplo n.º 17
0
 def test_object_list(self, test_object_dict: Dict[str, Any]) -> None:
     model = parse_obj_as(list[ObjectModel], [test_object_dict])
     assert len(model) == 1
     assert model[0].dict(by_alias=True) == test_object_dict
Exemplo n.º 18
0
import sys
from typing import Any, List, Mapping, Tuple
from pydantic.tools import parse_obj_as
from common import MethodAbs, get_metrics

with open(sys.argv[1], "r") as f:
    dump1: Mapping[str, List[Tuple[str, Any]]] = json.load(f)

with open(sys.argv[2], "r") as f:
    dump2: Mapping[str, List[Tuple[str, Any]]] = json.load(f)

for m in set(dump1.keys()).intersection(set(dump2.keys())):
    m_printed = False
    abs1 = dump1[m]
    abs2 = dump2[m]
    parsed1 = parse_obj_as(MethodAbs, abs1)
    parsed2 = parse_obj_as(MethodAbs, abs2)
    metrics1 = get_metrics(parsed1)
    metrics2 = get_metrics(parsed2)
    # assert len(metrics1) == len(metrics2)
    if len(metrics1) != len(metrics2):
        continue
    if metrics1 != metrics2:
        for s1, s2 in zip(metrics1, metrics2):
            unit = s1["unit"]
            # assert s1["unit"] == s2["unit"]
            if s1["unit"] != s2["unit"]:
                continue
            for k in s1["metrics"].keys():
                m1 = s1["metrics"][k]
                m2 = s2["metrics"][k]
Exemplo n.º 19
0
def test_parse_obj(obj, type_, parsed):
    assert parse_obj_as(type_, obj) == parsed
Exemplo n.º 20
0
import json
import sys
from typing import Any, List, Mapping, Tuple
from pydantic.tools import parse_obj_as
from common import MethodAbs, get_metrics

with open(sys.argv[1], "r") as f:
    dump: Mapping[str, List[Tuple[str, Any]]] = json.load(f)

for method, abs_json in dump.items():
    abstractions = parse_obj_as(MethodAbs, abs_json)
    metrics = get_metrics(abstractions)

    print("=====" * 20)
    print("===== Method: " + method)
    print(abstractions.body)
    for stmt, details in abstractions.abstractions:
        print(" > " + stmt)
        print("    - Local:")
        for k, v in details.localNodeMap.items():
            print(f"      - {k}: {v}")
        print("    - Global:")
        for k, v in details.globalNodeMap.items():
            print(f"      - {k}: {v}")
        print("    - Node handlers:")
        for nodeID, event, methods in details.nodeHandlerMap:
            print(f"      - {nodeID}, {event}: {methods}")
        print("    - Dialog handlers:")
        for nodeID, event, methods in details.dialogHandlerMap:
            print(f"      - {nodeID}, {event}: {methods}")
        print()
Exemplo n.º 21
0
def test_parse_mapping_as():
    inputs = {'1': '2'}
    assert parse_obj_as(Dict[int, int], inputs) == {1: 2}
Exemplo n.º 22
0
def ubuntu_task(request: FixtureRequest,
                ftp_server: List[URL]) -> ServiceExampleParam:
    """Creates a console task in an ubuntu distro that checks for the expected files and error in case they are missing"""
    integration_version = version.Version(request.param)
    print("Using service integration:", integration_version)
    # defines the inputs of the task
    input_data = TaskInputData.parse_obj({
        "input_1": 23,
        "input_23": "a string input",
        "the_input_43": 15.0,
        "the_bool_input_54": False,
        **{
            f"some_file_input_{index+1}": FileUrl(url=f"{file}")
            for index, file in enumerate(ftp_server)
        },
        **{
            f"some_file_input_with_mapping{index+1}": FileUrl(url=f"{file}",
                                                              file_mapping=f"{index+1}/some_file_input")
            for index, file in enumerate(ftp_server)
        },
    })
    # check in the console that the expected files are present in the expected INPUT folder (set as ${INPUT_FOLDER} in the service)
    file_names = [file.path for file in ftp_server]
    list_of_commands = [
        "echo User: $(id $(whoami))",
        "echo Inputs:",
        "ls -tlah -R ${INPUT_FOLDER}",
        "echo Outputs:",
        "ls -tlah -R ${OUTPUT_FOLDER}",
        "echo Logs:",
        "ls -tlah -R ${LOG_FOLDER}",
    ]
    list_of_commands += [
        f"(test -f ${{INPUT_FOLDER}}/{file} || (echo ${{INPUT_FOLDER}}/{file} does not exists && exit 1))"
        for file in file_names
    ] + [f"echo $(cat ${{INPUT_FOLDER}}/{file})" for file in file_names]

    input_json_file_name = ("inputs.json"
                            if integration_version > LEGACY_INTEGRATION_VERSION
                            else "input.json")

    list_of_commands += [
        f"(test -f ${{INPUT_FOLDER}}/{input_json_file_name} || (echo ${{INPUT_FOLDER}}/{input_json_file_name} file does not exists && exit 1))",
        f"echo $(cat ${{INPUT_FOLDER}}/{input_json_file_name})",
        f"sleep {randint(1,4)}",
    ]

    # defines the expected outputs
    jsonable_outputs = {
        "pytest_string": "is quite an amazing feat",
        "pytest_integer": 432,
        "pytest_float": 3.2,
        "pytest_bool": False,
    }
    output_file_url = next(iter(ftp_server)).with_path("output_file")
    expected_output_keys = TaskOutputDataSchema.parse_obj({
        **{k: {
            "required": True
        }
           for k in jsonable_outputs.keys()},
        **{
            "pytest_file": {
                "required": True,
                "mapping": "a_outputfile",
                "url": f"{output_file_url}",
            },
            "pytest_file_with_mapping": {
                "required": True,
                "mapping": "subfolder/a_outputfile",
                "url": f"{output_file_url}",
            },
        },
    })
    expected_output_data = TaskOutputData.parse_obj({
        **jsonable_outputs,
        **{
            "pytest_file": {
                "url": f"{output_file_url}",
                "file_mapping": "a_outputfile",
            },
            "pytest_file_with_mapping": {
                "url": f"{output_file_url}",
                "file_mapping": "subfolder/a_outputfile",
            },
        },
    })
    jsonized_outputs = json.dumps(jsonable_outputs).replace('"', '\\"')
    output_json_file_name = ("outputs.json" if
                             integration_version > LEGACY_INTEGRATION_VERSION
                             else "output.json")

    # check for the log file if legacy version
    list_of_commands += [
        "echo $(ls -tlah ${LOG_FOLDER})",
        f"(test {'!' if integration_version > LEGACY_INTEGRATION_VERSION else ''} -f ${{LOG_FOLDER}}/{LEGACY_SERVICE_LOG_FILE_NAME} || (echo ${{LOG_FOLDER}}/{LEGACY_SERVICE_LOG_FILE_NAME} file does {'' if integration_version > LEGACY_INTEGRATION_VERSION else 'not'} exists && exit 1))",
    ]
    if integration_version == LEGACY_INTEGRATION_VERSION:
        list_of_commands = [
            f"{c} >> ${{LOG_FOLDER}}/{LEGACY_SERVICE_LOG_FILE_NAME}"
            for c in list_of_commands
        ]
    # set the final command to generate the output file(s) (files and json output)
    list_of_commands += [
        f"echo {jsonized_outputs} > ${{OUTPUT_FOLDER}}/{output_json_file_name}",
        "echo 'some data for the output file' > ${OUTPUT_FOLDER}/a_outputfile",
        "mkdir -p ${OUTPUT_FOLDER}/subfolder",
        "echo 'some data for the output file' > ${OUTPUT_FOLDER}/subfolder/a_outputfile",
    ]

    log_file_url = parse_obj_as(
        AnyUrl, f"{next(iter(ftp_server)).with_path('log.dat')}")

    return ServiceExampleParam(
        docker_basic_auth=DockerBasicAuth(server_address="docker.io",
                                          username="******",
                                          password=""),
        #
        # NOTE: we use sleeper because it defines a user
        # that can write in outputs and the
        # sidecar can remove the outputs dirs
        #
        service_key="itisfoundation/sleeper",
        service_version="2.1.2",
        command=[
            "/bin/bash",
            "-c",
            " && ".join(list_of_commands),
        ],
        input_data=input_data,
        output_data_keys=expected_output_keys,
        log_file_url=log_file_url,
        expected_output_data=expected_output_data,
        expected_logs=[
            '{"input_1": 23, "input_23": "a string input", "the_input_43": 15.0, "the_bool_input_54": false}',
            "This is the file contents of 'file_1'",
            "This is the file contents of 'file_2'",
            "This is the file contents of 'file_3'",
        ],
        integration_version=integration_version,
    )
Exemplo n.º 23
0
 def devices_ls(self) -> List[OrchDevicesPerHostModel]:
     cmd = {"prefix": "orch device ls"}
     res = self.call(cmd)
     return parse_obj_as(List[OrchDevicesPerHostModel], res)
Exemplo n.º 24
0
 def _get_buckets(self) -> List[BucketModel]:
     response = self._ensured_client.list_buckets()
     return parse_obj_as(List[BucketModel], response.get("Buckets"))
Exemplo n.º 25
0
 def wikifactory_base_url(cls, v: Union[str, AnyHttpUrl]) -> AnyHttpUrl:
     if isinstance(v, str):
         return parse_obj_as(AnyHttpUrl, v)
     elif isinstance(v, AnyHttpUrl):
         return v
     raise ValueError(v)
Exemplo n.º 26
0
 def get_bucket_objects(self, bucket: str) -> List[ObjectModel]:
     response = self._ensured_client.list_objects(Bucket=bucket)
     logger.debug("List objects response:\n%s", response)
     return parse_obj_as(List[ObjectModel], response.get("Contents"))
Exemplo n.º 27
0
    def __validate_cls_namespace__(name: str,
                                   namespace: Dict) -> None:  # noqa C901
        """Validate the class name space in place"""
        annotations = resolve_annotations(namespace.get("__annotations__", {}),
                                          namespace.get("__module__"))
        config = validate_config(namespace.get("Config", BaseODMConfig), name)
        odm_fields: Dict[str, ODMBaseField] = {}
        references: List[str] = []
        bson_serialized_fields: Set[str] = set()
        mutable_fields: Set[str] = set()

        # Make sure all fields are defined with type annotation
        for field_name, value in namespace.items():
            if (should_touch_field(value=value) and not is_dunder(field_name)
                    and field_name not in annotations):
                raise TypeError(
                    f"field {field_name} is defined without type annotation")

        # Validate fields types and substitute bson fields
        for (field_name, field_type) in annotations.items():
            if not is_dunder(field_name) and should_touch_field(
                    type_=field_type):
                substituted_type = validate_type(field_type)
                # Handle BSON serialized fields after substitution to allow some
                # builtin substitution
                bson_serialization_method = getattr(substituted_type,
                                                    "__bson__", None)
                if bson_serialization_method is not None:
                    bson_serialized_fields.add(field_name)
                annotations[field_name] = substituted_type

        # Validate fields
        for (field_name, field_type) in annotations.items():
            value = namespace.get(field_name, Undefined)

            if is_dunder(field_name) or not should_touch_field(
                    value, field_type):
                continue  # pragma: no cover
                # https://github.com/nedbat/coveragepy/issues/198

            if isinstance(value, PDFieldInfo):
                raise TypeError(
                    "please use odmantic.Field instead of pydantic.Field")

            if is_type_mutable(field_type):
                mutable_fields.add(field_name)

            if lenient_issubclass(field_type, EmbeddedModel):
                if isinstance(value, ODMFieldInfo):
                    namespace[field_name] = value.pydantic_field_info
                    key_name = (value.key_name
                                if value.key_name is not None else field_name)
                    primary_field = value.primary_field
                else:
                    key_name = field_name
                    primary_field = False

                odm_fields[field_name] = ODMEmbedded(
                    primary_field=primary_field,
                    model=field_type,
                    key_name=key_name,
                    model_config=config,
                )
            elif lenient_issubclass(field_type, Model):
                if not isinstance(value, ODMReferenceInfo):
                    raise TypeError(
                        f"cannot define a reference {field_name} (in {name}) without"
                        " a Reference assigned to it")
                key_name = value.key_name if value.key_name is not None else field_name
                raise_on_invalid_key_name(key_name)
                odm_fields[field_name] = ODMReference(model=field_type,
                                                      key_name=key_name,
                                                      model_config=config)
                references.append(field_name)
                del namespace[
                    field_name]  # Remove default ODMReferenceInfo value
            else:
                if isinstance(value, ODMFieldInfo):
                    key_name = (value.key_name
                                if value.key_name is not None else field_name)
                    raise_on_invalid_key_name(key_name)
                    odm_fields[field_name] = ODMField(
                        primary_field=value.primary_field,
                        key_name=key_name,
                        model_config=config,
                    )
                    namespace[field_name] = value.pydantic_field_info

                elif value is Undefined:
                    odm_fields[field_name] = ODMField(primary_field=False,
                                                      key_name=field_name,
                                                      model_config=config)

                else:
                    try:
                        parse_obj_as(field_type, value)
                    except ValidationError:
                        raise TypeError(
                            f"Unhandled field definition {name}: {repr(field_type)}"
                            f" = {repr(value)}")
                    odm_fields[field_name] = ODMField(primary_field=False,
                                                      key_name=field_name,
                                                      model_config=config)

        duplicate_key = find_duplicate_key(odm_fields.values())
        if duplicate_key is not None:
            raise TypeError(f"Duplicated key_name: {duplicate_key} in {name}")
        # NOTE: Duplicate key detection make sur that at most one primary key is
        # defined
        namespace["__annotations__"] = annotations
        namespace["__odm_fields__"] = odm_fields
        namespace["__references__"] = tuple(references)
        namespace["__bson_serialized_fields__"] = frozenset(
            bson_serialized_fields)
        namespace["__mutable_fields__"] = frozenset(mutable_fields)
        namespace["Config"] = config
Exemplo n.º 28
0
def get_settings_override() -> Settings:
    return Settings(redis_dsn=parse_obj_as(RedisDsn, 'redis://redis:6379/2'))
Exemplo n.º 29
0
 def return_fake_input_value(*args, **kwargs):
     for value, value_type in zip(fake_inputs.values(), fake_io_schema.values()):
         if value_type["type"] == "data:*/*":
             yield parse_obj_as(AnyUrl, faker.url())
         else:
             yield value
Exemplo n.º 30
0
def fmt_url(url_fmt: str, **kwargs: Union[int, str]) -> HttpUrl:
    url: HttpUrl = parse_obj_as(HttpUrl, url_fmt.format(**kwargs))
    return url