def transformer(model: str = 'base', quantized: bool = False, **kwargs): """ Load transformer encoder-decoder model to translate EN-to-MS. Parameters ---------- model : str, optional (default='base') Model architecture supported. Allowed values: * ``'small'`` - Transformer SMALL parameters. * ``'base'`` - Transformer BASE parameters. * ``'large'`` - Transformer LARGE parameters. * ``'bigbird'`` - BigBird BASE parameters. * ``'small-bigbird'`` - BigBird SMALL parameters. quantized : bool, optional (default=False) if True, will load 8-bit quantized model. Quantized model not necessary faster, totally depends on the machine. Returns ------- result: model List of model classes: * if `bigbird` in model, return `malaya.model.bigbird.Translation`. * else, return `malaya.model.tf.Translation`. """ model = model.lower() if model not in _transformer_availability: raise ValueError( 'model not supported, please check supported models from `malaya.translation.en_ms.available_transformer()`.' ) if 'bigbird' in model: return load_bigbird.load( module='translation-en-ms', model=model, model_class=BigBird_Translation, maxlen=_transformer_availability[model]['Suggested length'], quantized=quantized, **kwargs) else: return load_transformer.load(module='translation-en-ms', model=model, encoder='subword', model_class=Translation, quantized=quantized, **kwargs) return load_transformer.load(module='translation-en-ms', model=model, encoder='subword', model_class=Translation, quantized=quantized, **kwargs)
def transformer(model: str = 'base', **kwargs): """ Load transformer encoder-decoder model to True Case. Parameters ---------- model : str, optional (default='base') Model architecture supported. Allowed values: * ``'small'`` - Transformer SMALL parameters. * ``'base'`` - Transformer BASE parameters. Returns ------- result: malaya.model.tf.TRUE_CASE class """ model = model.lower() if model not in _transformer_availability: raise Exception( 'model not supported, please check supported models from malaya.true_case.available_transformer()' ) return load_transformer.load( PATH_TRUE_CASE, S3_PATH_TRUE_CASE, model, 'yttm', TRUE_CASE )
def transformer(model: str = 'small', quantized: bool = False, **kwargs): """ Load transformer encoder-decoder model to Segmentize. Parameters ---------- model : str, optional (default='base') Model architecture supported. Allowed values: * ``'small'`` - Transformer SMALL parameters. * ``'base'`` - Transformer BASE parameters. quantized : bool, optional (default=False) if True, will load 8-bit quantized model. Quantized model not necessary faster, totally depends on the machine. Returns ------- result: malaya.model.tf.Segmentation class """ model = model.lower() if model not in _transformer_availability: raise ValueError( 'model not supported, please check supported models from `malaya.segmentation.available_transformer()`.' ) return load_transformer.load( module='segmentation', model=model, encoder='yttm', model_class=Segmentation, quantized=quantized, **kwargs, )
def transformer(model: str = 'base', **kwargs): """ Load Transformer encoder-decoder model to translate MS-to-EN. Parameters ---------- model : str, optional (default='base') Model architecture supported. Allowed values: * ``'small'`` - Transformer SMALL parameters. * ``'base'`` - Transformer BASE parameters. * ``'large'`` - Transformer LARGE parameters. Returns ------- result: malaya.model.tf.TRANSLATION class """ model = model.lower() if model not in _transformer_availability: raise Exception( 'model not supported, please check supported models from malaya.translation.ms_en.available_transformer()' ) path = PATH_TRANSLATION['ms-en'] s3_path = S3_PATH_TRANSLATION['ms-en'] return load_transformer.load(path, s3_path, model, 'subword', TRANSLATION)
def transformer(model: str = 'base', quantized: bool = False, **kwargs): """ Load transformer encoder-decoder model to True Case. Parameters ---------- model : str, optional (default='base') Model architecture supported. Allowed values: * ``'small'`` - Transformer SMALL parameters. * ``'base'`` - Transformer BASE parameters. quantized : bool, optional (default=False) if True, will load 8-bit quantized model. Quantized model not necessary faster, totally depends on the machine. Returns ------- result: malaya.model.tf.TRUE_CASE class """ model = model.lower() if model not in _transformer_availability: raise Exception( 'model not supported, please check supported models from `malaya.true_case.available_transformer()`.' ) return load_transformer.load( PATH_TRUE_CASE, S3_PATH_TRUE_CASE, model, 'yttm', TRUE_CASE, quantized=quantized, )
def transformer(model: str = 'base', quantized: bool = False, **kwargs): """ Load transformer encoder-decoder model to True Case. Parameters ---------- model : str, optional (default='base') Model architecture supported. Allowed values: * ``'small'`` - Transformer SMALL parameters. * ``'base'`` - Transformer BASE parameters. * ``'super-tiny-t5'`` - T5 SUPER TINY parameters. * ``'super-super-tiny-t5'`` - T5 SUPER SUPER TINY parameters. * ``'3x-super-tiny-t5'`` - T5 3X SUPER TINY parameters. * ``'3x-super-tiny-t5-4k'`` - T5 3X SUPER TINY 4k vocab size parameters. quantized : bool, optional (default=False) if True, will load 8-bit quantized model. Quantized model not necessary faster, totally depends on the machine. Returns ------- result: malaya.model.tf.TrueCase class """ model = model.lower() if model not in _transformer_availability: raise ValueError( 'model not supported, please check supported models from `malaya.true_case.available_transformer()`.' ) if 't5' in model: return t5_load.load( module='true-case', model=model, model_class=T5_TrueCase, quantized=quantized, **kwargs, ) else: return load_transformer.load( module='true-case', model=model, encoder='yttm', model_class=TrueCase, quantized=quantized, **kwargs, )
def transformer(model: str = 'base', quantized: bool = False, **kwargs): """ Load Transformer encoder-decoder model to translate MS-to-EN. Parameters ---------- model : str, optional (default='base') Model architecture supported. Allowed values: * ``'small'`` - Transformer SMALL parameters. * ``'base'`` - Transformer BASE parameters. * ``'large'`` - Transformer LARGE parameters. quantized : bool, optional (default=False) if True, will load 8-bit quantized model. Quantized model not necessary faster, totally depends on the machine. Returns ------- result: malaya.model.tf.TRANSLATION class """ model = model.lower() if model not in _transformer_availability: raise Exception( 'model not supported, please check supported models from `malaya.translation.ms_en.available_transformer()`.' ) path = PATH_TRANSLATION['ms-en'] s3_path = S3_PATH_TRANSLATION['ms-en'] return load_transformer.load(path, s3_path, model, 'subword', TRANSLATION, quantized=quantized, **kwargs)