class OfferExtraData(BaseModel): author: Optional[str] durationMinutes: Optional[int] isbn: Optional[str] musicSubType: Optional[str] musicType: Optional[str] performer: Optional[str] showSubType: Optional[str] showType: Optional[str] stageDirector: Optional[str] speaker: Optional[str] visa: Optional[str] _convert_music_sub_type = validator("musicSubType", pre=True, allow_reuse=True)(get_id_converter( MUSIC_SUB_TYPES_DICT, "musicSubType")) _convert_music_type = validator("musicType", pre=True, allow_reuse=True)(get_id_converter( MUSIC_TYPES_DICT, "musicType")) _convert_show_sub_type = validator("showSubType", pre=True, allow_reuse=True)(get_id_converter( SHOW_SUB_TYPES_DICT, "showSubType")) _convert_show_type = validator("showType", pre=True, allow_reuse=True)(get_id_converter( SHOW_TYPES_DICT, "showType"))
class FavoriteOfferResponse(BaseModel): id: int name: str category: FavoriteCategoryResponse subcategoryId: SubcategoryIdEnum externalTicketOfficeUrl: Optional[str] image: Optional[FavoriteMediationResponse] coordinates: Coordinates price: Optional[int] = None startPrice: Optional[int] = None date: Optional[datetime] = None startDate: Optional[datetime] = None isExpired: bool = False expenseDomains: list[ExpenseDomain] isReleased: bool isSoldOut: bool = False _convert_price = validator("price", pre=True, allow_reuse=True)(convert_to_cent) _convert_start_price = validator("startPrice", pre=True, allow_reuse=True)(convert_to_cent) class Config: orm_mode = True @classmethod def from_orm(cls, offer): # type: ignore offer.category = get_serialized_offer_category(offer) offer.coordinates = {"latitude": offer.venue.latitude, "longitude": offer.venue.longitude} offer.expenseDomains = get_expense_domains(offer) return super().from_orm(offer)
class AEMOClosureRecord(BaseConfig): station_name: str duid: Optional[str] expected_closure_year: Optional[int] expected_closure_date: Optional[datetime] _validate_closure_year = validator("expected_closure_year", pre=True)( _clean_expected_closure_year ) _clean_duid = validator("duid", pre=True)(normalize_duid)
class Credit(BaseModel): initial: int remaining: int _convert_initial = validator("initial", pre=True, allow_reuse=True)(convert_to_cent) _convert_remaining = validator("remaining", pre=True, allow_reuse=True)(convert_to_cent) class Config: orm_mode = True
class Expense(BaseModel): domain: ExpenseDomain current: int limit: int _convert_current = validator("current", pre=True, allow_reuse=True)(convert_to_cent) _convert_limit = validator("limit", pre=True, allow_reuse=True)(convert_to_cent) class Config: orm_mode = True
class BookingReponse(BaseModel): id: int cancellationDate: Optional[datetime] cancellationReason: Optional[BookingCancellationReasons] confirmationDate: Optional[datetime] completedUrl: Optional[str] dateUsed: Optional[datetime] expirationDate: Optional[datetime] qrCodeData: Optional[str] quantity: int stock: BookingStockResponse total_amount: int token: str activationCode: Optional[BookingActivationCodeResponse] _convert_total_amount = validator("total_amount", pre=True, allow_reuse=True)(convert_to_cent) @classmethod def from_orm(cls: Any, booking: Booking): # type: ignore # Native application should use `booking.completedUrl` but actually # up to version 135, it uses booking.stock.offer.url instead. # Therefore the API will override `booking.stock.offer.url` with # `booking.completedUrl`. # Unfortunate side-effect, the offer object has its url modified and # needs to be rolledback. booking.stock.offer.url = booking.completedUrl booking.confirmationDate = booking.cancellationLimitDate return super().from_orm(booking) class Config: orm_mode = True alias_generator = to_camel allow_population_by_field_name = True
class SettingsResponse(BaseModel): deposit_amount: int is_recaptcha_enabled: bool auto_activate_digital_bookings: bool allow_id_check_registration: bool enable_native_id_check_version: bool enable_native_id_check_verbose_debugging: bool enable_id_check_retention: bool enable_phone_validation: bool object_storage_url: str whole_france_opening: bool display_dms_redirection: bool use_app_search: bool id_check_address_autocompletion: bool is_webapp_v2_enabled: bool enable_native_eac_individual: bool account_creation_minimum_age: int _convert_deposit_amount = validator("deposit_amount", pre=True, allow_reuse=True)(convert_to_cent) class Config: alias_generator = to_camel allow_population_by_field_name = True
class OfferStockResponse(BaseModel): id: int beginningDatetime: Optional[datetime] bookingLimitDatetime: Optional[datetime] cancellation_limit_datetime: Optional[datetime] isBookable: bool price: int _convert_price = validator("price", pre=True, allow_reuse=True)(convert_to_cent) class Config: orm_mode = True alias_generator = to_camel allow_population_by_field_name = True @staticmethod def _get_cancellation_limit_datetime(stock: Stock) -> Optional[datetime]: # compute date as if it were booked now return compute_confirmation_date(stock.beginningDatetime, datetime.now()) @classmethod def from_orm(cls, stock): # type: ignore stock.cancellation_limit_datetime = cls._get_cancellation_limit_datetime( stock) return super().from_orm(stock)
def make_list_validator(*field_name: str): """Get a validator make a list of object if a single object is passed.""" def split(v: Any): if not isinstance(v, list): v = [v] return v return validator(*field_name, allow_reuse=True, pre=True)(split)
class JouveContent(pydantic.BaseModel): # TODO: analyze jouve results to see where we can remove "optional" activity: typing.Optional[str] address: typing.Optional[str] birthDateTxt: typing.Optional[datetime.datetime] birthLocationCtrl: typing.Optional[str] bodyBirthDateCtrl: typing.Optional[str] bodyBirthDateLevel: typing.Optional[int] bodyFirstnameCtrl: typing.Optional[str] bodyFirstnameLevel: typing.Optional[int] bodyNameLevel: typing.Optional[int] bodyNameCtrl: typing.Optional[str] bodyPieceNumber: typing.Optional[str] bodyPieceNumberCtrl: typing.Optional[str] bodyPieceNumberLevel: typing.Optional[int] city: typing.Optional[str] creatorCtrl: typing.Optional[str] id: int email: typing.Optional[str] firstName: typing.Optional[str] gender: typing.Optional[str] initialNumberCtrl: typing.Optional[str] initialSizeCtrl: typing.Optional[str] lastName: typing.Optional[str] phoneNumber: typing.Optional[str] postalCode: typing.Optional[str] posteCodeCtrl: typing.Optional[str] serviceCodeCtrl: typing.Optional[str] _parse_body_birth_date_level = validator("bodyBirthDateLevel", pre=True, allow_reuse=True)(_parse_level) _parse_body_first_name_level = validator("bodyFirstnameLevel", pre=True, allow_reuse=True)(_parse_level) _parse_body_name_level = validator("bodyNameLevel", pre=True, allow_reuse=True)(_parse_level) _parse_body_piece_number_level = validator("bodyPieceNumberLevel", pre=True, allow_reuse=True)(_parse_level) _parse_birth_date = validator("birthDateTxt", pre=True, allow_reuse=True)(_parse_date)
class OfferStockResponse(BaseModel): id: int beginningDatetime: Optional[datetime] isBookable: bool price: int _convert_price = validator("price", pre=True, allow_reuse=True)(convert_to_cent) class Config: orm_mode = True alias_generator = to_camel allow_population_by_field_name = True
def get_enum_validator(*field_name: str, enum: Type[Enum]): """ Get a case-insensitive enum validator that will returns the corresponding enum value. If the input is a list, then each list value is checked individually. Args: enum (Type[Enum]): The enum type for which to validate. """ def get_enum(v): for entry in enum: if entry.lower() == v.lower(): return entry return v return validator(*field_name, allow_reuse=True, pre=True, each_item=True)(get_enum)
def get_split_string_on_delimiter_validator(*field_name: str): """Get a validator to split strings passed to the specified field_name. Strings are split based on an automatically selected provided delimiter. The delimiter is the field's own delimiter, if that was defined using Field(.., delimiter=".."). Otherwise, the delimiter is the field's parent class's delimiter (which should be (subclass of) INIBasedModel.) The validator splits a string value into a list of substrings before any other validation takes place. Returns: the validator which splits strings on the provided delimiter. """ def split(cls, v: Any, field: ModelField): if isinstance(v, str): v = v.split(cls.get_list_field_delimiter(field.name)) v = [item.strip() for item in v if item != ""] return v return validator(*field_name, allow_reuse=True, pre=True)(split)
class OfferStockResponse(BaseModel): id: int beginningDatetime: Optional[datetime] bookingLimitDatetime: Optional[datetime] cancellation_limit_datetime: Optional[datetime] isBookable: bool isSoldOut: bool isExpired: bool price: int activationCode: Optional[OfferStockActivationCodeResponse] _convert_price = validator("price", pre=True, allow_reuse=True)(convert_to_cent) class Config: orm_mode = True alias_generator = to_camel allow_population_by_field_name = True @staticmethod def _get_cancellation_limit_datetime(stock: Stock) -> Optional[datetime]: # compute date as if it were booked now return compute_cancellation_limit_date(stock.beginningDatetime, datetime.now()) @staticmethod def _get_non_scrappable_activation_code(stock: Stock) -> Optional[dict]: if not stock.canHaveActivationCodes: return None # here we have N+1 requests (for each stock we query an activation code) # but it should be more efficient than loading all activationCodes of all stocks activation_code = offers_repository.get_available_activation_code(stock) if not activation_code: return None return {"expirationDate": activation_code.expirationDate} @classmethod def from_orm(cls, stock): # type: ignore stock.cancellation_limit_datetime = cls._get_cancellation_limit_datetime(stock) stock.activationCode = cls._get_non_scrappable_activation_code(stock) return super().from_orm(stock)
class DensePoseConfig(ZambaBaseModel): """Configuration for running dense pose on videos. Args: video_loader_config (VideoLoaderConfig): Configuration for loading videos output_type (str): one of DensePoseOutputEnum (currently "segmentation" or "chimp_anatomy"). render_output (bool): Whether to save a version of the video with the output overlaid on top. Defaults to False. embeddings_in_json (bool): Whether to save the embeddings matrices in the json of the DensePose result. Setting to True can result in large json files. Defaults to False. data_dir (Path): Where to find the files listed in filepaths (or where to look if filepaths is not provided). filepaths (Path, optional): Path to a CSV file with a list of filepaths to process. save_dir (Path, optional): Directory for where to save the output files; defaults to os.getcwd(). cache_dir (Path, optional): Path for downloading and saving model weights. Defaults to env var `MODEL_CACHE_DIR` or the OS app cache dir. weight_download_region (RegionEnum, optional): region where to download weights; should be one of RegionEnum (currently 'us', 'asia', and 'eu'). Defaults to 'us'. """ video_loader_config: VideoLoaderConfig output_type: DensePoseOutputEnum render_output: bool = False embeddings_in_json: bool = False data_dir: Path filepaths: Optional[Path] = None save_dir: Optional[Path] = None cache_dir: Optional[Path] = None weight_download_region: RegionEnum = RegionEnum("us") _validate_cache_dir = validator("cache_dir", allow_reuse=True, always=True)( validate_model_cache_dir ) def run_model(self): """Use this configuration to execute DensePose via the DensePoseManager""" if not isinstance(self.output_type, DensePoseOutputEnum): self.output_type = DensePoseOutputEnum(self.output_type) if self.output_type == DensePoseOutputEnum.segmentation.value: model = MODELS["animals"] elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value: model = MODELS["chimps"] else: raise Exception(f"invalid {self.output_type}") output_dir = Path(os.getcwd()) if self.save_dir is None else self.save_dir dpm = DensePoseManager( model, model_cache_dir=self.cache_dir, download_region=self.weight_download_region ) for fp in tqdm(self.filepaths.filepath, desc="Videos"): fp = Path(fp) vid_arr, labels = dpm.predict_video(fp, video_loader_config=self.video_loader_config) # serialize the labels generated by densepose to json output_path = output_dir / f"{fp.stem}_denspose_labels.json" dpm.serialize_video_output( labels, filename=output_path, write_embeddings=self.embeddings_in_json ) # re-render the video with the densepose labels visualized on top of the video if self.render_output: output_path = output_dir / f"{fp.stem}_denspose_video{''.join(fp.suffixes)}" visualized_video = dpm.visualize_video( vid_arr, labels, output_path=output_path, fps=self.video_loader_config.fps ) # write out the anatomy present in each frame to a csv for later analysis if self.output_type == DensePoseOutputEnum.chimp_anatomy.value: output_path = output_dir / f"{fp.stem}_denspose_anatomy.csv" dpm.anatomize_video( visualized_video, labels, output_path=output_path, fps=self.video_loader_config.fps, ) @root_validator(pre=False, skip_on_failure=True) def get_filepaths(cls, values): """If no file list is passed, get all files in data directory. Warn if there are unsupported suffixes. Filepaths is set to a dataframe, where column `filepath` contains files with valid suffixes. """ if values["filepaths"] is None: logger.info(f"Getting files in {values['data_dir']}.") files = [] new_suffixes = [] # iterate over all files in data directory for f in values["data_dir"].rglob("*"): if f.is_file(): # keep just files with supported suffixes if f.suffix.lower() in VIDEO_SUFFIXES: files.append(f.resolve()) else: new_suffixes.append(f.suffix.lower()) if len(new_suffixes) > 0: logger.warning( f"Ignoring {len(new_suffixes)} file(s) with suffixes {set(new_suffixes)}. To include, specify all video suffixes with a VIDEO_SUFFIXES environment variable." ) if len(files) == 0: raise ValueError(f"No video files found in {values['data_dir']}.") logger.info(f"Found {len(files)} videos in {values['data_dir']}.") values["filepaths"] = pd.DataFrame(files, columns=["filepath"]) return values @root_validator(skip_on_failure=True) def validate_files(cls, values): # if globbing from data directory, already have valid dataframe if isinstance(values["filepaths"], pd.DataFrame): files_df = values["filepaths"] else: # make into dataframe even if only one column for clearer indexing files_df = pd.DataFrame(pd.read_csv(values["filepaths"])) if "filepath" not in files_df.columns: raise ValueError(f"{values['filepaths']} must contain a `filepath` column.") # can only contain one row per filepath num_duplicates = len(files_df) - files_df.filepath.nunique() if num_duplicates > 0: logger.warning( f"Found {num_duplicates} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video." ) files_df = files_df[["filepath"]].drop_duplicates() values["filepaths"] = check_files_exist_and_load( df=files_df, data_dir=values["data_dir"], skip_load_validation=True, ) return values