def load_bart_classifier_model(load_path): from aitg_host.models.bart_classifier import BartClassifierAI ensure_model_dir(load_path) # load model ai = BartClassifierAI(model_folder=load_path, to_device=get_compute_device()[0]) load_common_ext(ai, load_path) return ai
def load_led_summarizer_model(load_path): from aitg_host.models.led__summarizer import LedSummarizerAI ensure_model_dir(load_path) # load model ai = LedSummarizerAI(model_folder=load_path, to_device=get_compute_device()[0]) load_common_ext(ai, load_path) return ai
def load_question_answer_model(load_path): from aitg_host.models.question_answer import QuestionAnswerAI ensure_model_dir(load_path) # load model ai = QuestionAnswerAI(model_folder=load_path, to_device=get_compute_device()[0]) load_common_ext(ai, load_path) return ai
def load_sentence_embed_model(load_path): from aitg_host.models.sentence_embed import SentenceEmbedAI ensure_model_dir(load_path) # load model ai = SentenceEmbedAI(model_folder=load_path, to_device=get_compute_device()[0]) load_common_ext(ai, load_path) return ai
def load_gpt_model(load_path): from aitextgen import aitextgen use_gpu = get_compute_device()[1] == "gpu" ensure_model_dir(load_path) # load model ai = aitextgen(model_folder=load_path, to_gpu=use_gpu) load_common_ext(ai, load_path) return ai
def info_callback(value: bool): if value: from aitg_host.util import get_compute_device import torch device_info = get_compute_device() pad = ' ' banner = f'AITG HOST v{__version__}' underline = len(banner) * '‾' print(ICON_ART, f'\n{pad}{banner}') print(f'{pad}{underline}') print(f'{pad} ⊦ PLATFORM: {sys.platform}') print(f'{pad} ⊦ DEVICE: {device_info[0]}') print(f'{pad} ⊦ MEMORY: {device_info[2]:.2f} GB') raise typer.Exit()