def transformer(model: str = 'xlnet', **kwargs): """ Load Transformer emotion model. Parameters ---------- model : str, optional (default='bert') Model architecture supported. Allowed values: * ``'bert'`` - BERT architecture from google. * ``'tiny-bert'`` - BERT architecture from google with smaller parameters. * ``'albert'`` - ALBERT architecture from google. * ``'tiny-albert'`` - ALBERT architecture from google with smaller parameters. * ``'xlnet'`` - XLNET architecture from google. * ``'alxlnet'`` - XLNET architecture from google + Malaya. Returns ------- MODEL : Transformer class """ model = model.lower() if model not in _availability: raise Exception( 'model not supported, please check supported models from malaya.relevancy.available_transformer_model()' ) return softmax.transformer(PATH_RELEVANCY, S3_PATH_RELEVANCY, 'relevancy', label, model=model, **kwargs)
def transformer(model: str = 'xlnet', **kwargs): """ Load Transformer emotion model. Parameters ---------- model : str, optional (default='bert') Model architecture supported. Allowed values: * ``'bert'`` - Google BERT BASE parameters. * ``'tiny-bert'`` - Google BERT TINY parameters. * ``'albert'`` - Google ALBERT BASE parameters. * ``'tiny-albert'`` - Google ALBERT TINY parameters. * ``'xlnet'`` - Google XLNET BASE parameters. * ``'alxlnet'`` - Malaya ALXLNET BASE parameters. Returns ------- result : malaya.supervised.softmax.transformer function """ model = model.lower() if model not in _transformer_availability: raise ValueError( 'model not supported, please check supported models from malaya.emotion.available_transformer()' ) return softmax.transformer(PATH_EMOTION, S3_PATH_EMOTION, 'emotion', label, model=model, **kwargs)
def transformer(model: str = 'xlnet', **kwargs): """ Load Transformer subjectivity model. Parameters ---------- model : str, optional (default='bert') Model architecture supported. Allowed values: * ``'bert'`` - BERT architecture from google. * ``'tiny-bert'`` - BERT architecture from google with smaller parameters. * ``'albert'`` - ALBERT architecture from google. * ``'tiny-albert'`` - ALBERT architecture from google with smaller parameters. * ``'xlnet'`` - XLNET architecture from google. * ``'alxlnet'`` - XLNET architecture from google + Malaya. Returns ------- result : Transformer class """ model = model.lower() if model not in _availability: raise Exception( 'model not supported, please check supported models from malaya.subjective.available_transformer()' ) return softmax.transformer(PATH_SUBJECTIVE, S3_PATH_SUBJECTIVE, 'subjective', label, model=model, **kwargs)
def transformer(model: str = 'xlnet', quantized: bool = False, **kwargs): """ Load Transformer relevancy model. Parameters ---------- model : str, optional (default='bert') Model architecture supported. Allowed values: * ``'bert'`` - Google BERT BASE parameters. * ``'tiny-bert'`` - Google BERT TINY parameters. * ``'albert'`` - Google ALBERT BASE parameters. * ``'tiny-albert'`` - Google ALBERT TINY parameters. * ``'xlnet'`` - Google XLNET BASE parameters. * ``'alxlnet'`` - Malaya ALXLNET BASE parameters. * ``'bigbird'`` - Google BigBird BASE parameters. * ``'tiny-bigbird'`` - Malaya BigBird BASE parameters. quantized : bool, optional (default=False) if True, will load 8-bit quantized model. Quantized model not necessary faster, totally depends on the machine. Returns ------- result: model List of model classes: * if `bert` in model, will return `malaya.model.bert.MulticlassBERT`. * if `xlnet` in model, will return `malaya.model.xlnet.MulticlassXLNET`. * if `bigbird` in model, will return `malaya.model.xlnet.MulticlassBigBird`. """ model = model.lower() if model not in _transformer_availability: raise ValueError( 'model not supported, please check supported models from `malaya.relevancy.available_transformer()`.' ) return softmax.transformer( class_name = 'relevancy', label = label, model = model, quantized = quantized, **kwargs )
def transformer(model: str = 'bert', quantized: bool = False, **kwargs): """ Load Transformer subjectivity model. Parameters ---------- model : str, optional (default='bert') Model architecture supported. Allowed values: * ``'bert'`` - Google BERT BASE parameters. * ``'tiny-bert'`` - Google BERT TINY parameters. * ``'albert'`` - Google ALBERT BASE parameters. * ``'tiny-albert'`` - Google ALBERT TINY parameters. * ``'xlnet'`` - Google XLNET BASE parameters. * ``'alxlnet'`` - Malaya ALXLNET BASE parameters. quantized : bool, optional (default=False) if True, will load 8-bit quantized model. Quantized model not necessary faster, totally depends on the machine. Returns ------- result : malaya.supervised.softmax.transformer function """ model = model.lower() if model not in _transformer_availability: raise Exception( 'model not supported, please check supported models from `malaya.subjective.available_transformer()`.' ) return softmax.transformer(PATH_SUBJECTIVE, S3_PATH_SUBJECTIVE, 'subjective', label, model=model, quantized=quantized, **kwargs)