def __init__(self, lang_code="en", weightsPath=None, params_path=None):
        if lang_code not in trainedModels and lang_code in lang_code_mapping:
            lang_code = lang_code_mapping[lang_code]
        
        home = os.path.expanduser("~")
        langPath = os.path.join(home, '.fastPunct_' + lang_code)
        weightsPath = os.path.join(langPath, 'checkpoint.h5')
        params_path = os.path.join(langPath, 'params.pkl')

        if not os.path.exists(langPath):
            os.mkdir(langPath)

        if not os.path.exists(weightsPath):
            pydload.dload(url=trainedModels[lang_code]['checkpoint'], save_to_path=weightsPath, max_time=None)

        if not os.path.exists(params_path):
            pydload.dload(url=trainedModels[lang_code]['params'], save_to_path=params_path, max_time=None)


        with open(params_path, "rb") as file:
            self.parameters = pickle.load(file)
        self.parameters["reverse_enc_dict"] = {i:c for c, i in self.parameters["enc_token"].word_index.items()}
        self.model = getModelInstance(self.parameters)
        self.model.load_weights(weightsPath)
        self.allowedExtra = getExtraChars(self.parameters)
示例#2
0
    def __init__(self, model_name):
        model_name = model_name.lower()
        for x, y in LANGUAGE_ALISASES.items():
            model_name = model_name.replace(x, y)

        if model_name not in MODEL_URLS:
            print(f"model_name should be one of {list(MODEL_URLS.keys())}")
            return None

        self.source_lang, self.target_lang = model_name.split("-")
        self.source_lang = _LANGUAGE_ALISASES[self.source_lang]
        self.target_lang = _LANGUAGE_ALISASES[self.target_lang]

        home = os.path.expanduser("~")
        lang_path = os.path.join(home, ".Anuvaad_" + model_name)
        if not os.path.exists(lang_path):
            os.mkdir(lang_path)

        for file_name, url in MODEL_URLS[model_name].items():
            file_path = os.path.join(lang_path, file_name)
            if os.path.exists(file_path):
                continue
            print(f"Downloading {file_name}")
            pydload.dload(url=url, save_to_path=file_path, max_time=None)

        self.tokenizer = T5Tokenizer.from_pretrained(lang_path)
        self.model = T5ForConditionalGeneration.from_pretrained(
            lang_path, return_dict=True)

        if torch.cuda.is_available():
            print(f"Using GPU")
            self.model = self.model.cuda()
示例#3
0
    def __init__(self, model_name='te'):
        model_name = model_name.lower()
        for x, y in LANGUAGE_ALISASES.items():
            model_name = model_name.replace(x, y)

        if model_name not in MODEL_URLS and model_name not in LANGUAGE_ALISASES:
            if model_name in LANGUAGE_ALISASES:
                model_name = LANGUAGE_ALISASES[model_name]

            print(f"model_name should be one of {list(MODEL_URLS.keys())}")
            return None

        home = os.path.expanduser("~")
        lang_path = os.path.join(home, ".IndicASR_" + model_name)
        if not os.path.exists(lang_path):
            os.mkdir(lang_path)

        for file_name, url in MODEL_URLS[model_name].items():
            file_path = os.path.join(lang_path, file_name)
            if os.path.exists(file_path):
                continue
            print(f"Downloading {file_name}")
            pydload.dload(url=url, save_to_path=file_path, max_time=None)

        self.processor = Wav2Vec2Processor.from_pretrained(lang_path)
        self.model = Wav2Vec2ForCTC.from_pretrained(lang_path)

        if torch.cuda.is_available():
            print(f"Using GPU")
            self.model = self.model.cuda()
    def download_models(self, models_path):
        '''
        Download models from GitHub Releases if not exists
        '''
        for l in self.lang_config:
            lang_name = self.lang_config[l]["eng_name"]
            lang_model_path = os.path.join(models_path, lang_name)
            if not os.path.isdir(lang_model_path):
                print('Downloading model for language: %s' % lang_name)
                remote_url = MODEL_DOWNLOAD_URL_PREFIX + lang_name + '.zip'
                downloaded_zip_path = os.path.join(models_path,
                                                   lang_name + '.zip')
                dload(url=remote_url,
                      save_to_path=downloaded_zip_path,
                      max_time=None)

                if not os.path.isfile(downloaded_zip_path):
                    exit(
                        f'ERROR: Unable to download model from {remote_url} into {models_path}'
                    )

                with zipfile.ZipFile(downloaded_zip_path, 'r') as zip_ref:
                    zip_ref.extractall(models_path)

                if os.path.isdir(lang_model_path):
                    os.remove(downloaded_zip_path)
                else:
                    exit(
                        f'ERROR: Unable to find models in {lang_model_path} after download'
                    )
        return
示例#5
0
    def __init__(self, model_name="default"):
        """
            model = Detector()
        """
        checkpoint_url = FILE_URLS[model_name]["checkpoint"]
        classes_url = FILE_URLS[model_name]["classes"]

        home = os.path.expanduser("~")
        model_folder = os.path.join(home, f".NudeNet/{model_name}")
        if not os.path.exists(model_folder):
            os.makedirs(model_folder)

        checkpoint_path = os.path.join(model_folder, "checkpoint")
        classes_path = os.path.join(model_folder, "classes")

        if not os.path.exists(checkpoint_path):
            print("Downloading the checkpoint to", checkpoint_path)
            pydload.dload(checkpoint_url,
                          save_to_path=checkpoint_path,
                          max_time=None)

        if not os.path.exists(classes_path):
            print("Downloading the classes list to", classes_path)
            pydload.dload(classes_url,
                          save_to_path=classes_path,
                          max_time=None)

        self.detection_model = models.load_model(checkpoint_path,
                                                 backbone_name="resnet50")
        self.classes = [
            c.strip() for c in open(classes_path).readlines() if c.strip()
        ]
    def __init__(self):
        url = "https://github.com/notAI-tech/NudeNet/releases/download/v0/classifier_lite.onnx"
        home = os.path.expanduser("~")
        model_folder = os.path.join(home, ".NudeNet/")
        if not os.path.exists(model_folder):
            os.mkdir(model_folder)

        model_path = os.path.join(model_folder, os.path.basename(url))

        if not os.path.exists(model_path):
            print("Downloading the checkpoint to", model_path)
            pydload.dload(url, save_to_path=model_path, max_time=None)

        self.lite_model = cv2.dnn.readNet(model_path)
示例#7
0
    def __init__(self, lang_code):
        """
        Initialize deeptranslit

        Parameters:

        lang_code (str): Name or code of the language. (Currently supported: hindi/hi)

        """

        if lang_code in lang_code_mapping:
            lang_code = lang_code_mapping[lang_code]

        if lang_code not in model_links:
            print("DeepTranslit doesn't support '" + lang_code + "' yet.")
            print(
                "Please raise a issue at https://github.com/bedapudi6788/deeptranslit to add this language into future checklist."
            )
            return None

        # loading the model
        home = os.path.expanduser("~")
        lang_path = os.path.join(home, '.DeepTranslit_' + lang_code)
        checkpoint_path = os.path.join(lang_path, 'checkpoint')
        params_path = os.path.join(lang_path, 'params')

        if not os.path.exists(lang_path):
            os.mkdir(lang_path)

        if not os.path.exists(checkpoint_path):
            print('Downloading checkpoint',
                  model_links[lang_code]['checkpoint'], 'to', checkpoint_path)
            pydload.dload(url=model_links[lang_code]['checkpoint'],
                          save_to_path=checkpoint_path,
                          max_time=None)

        if not os.path.exists(params_path):
            print('Downloading model params', model_links[lang_code]['params'],
                  'to', params_path)
            pydload.dload(url=model_links[lang_code]['params'],
                          save_to_path=params_path,
                          max_time=None)

        self.model, self.params = build_model(params_path=params_path,
                                              enc_lstm_units=64,
                                              use_gru=True,
                                              display_summary=False)
        self.model.load_weights(checkpoint_path)
示例#8
0
    def __init__(self):
        '''
            model = Detector()
        '''
        url = 'https://github.com/bedapudi6788/NudeNet/releases/download/v0/detector_model'
        home = os.path.expanduser("~")
        model_folder = os.path.join(home, '.NudeNet/')
        if not os.path.exists(model_folder):
            os.mkdir(model_folder)
        
        model_path = os.path.join(model_folder, 'detector')

        if not os.path.exists(model_path):
            print('Downloading the checkpoint to', model_path)
            pydload.dload(url, save_to_path=model_path, max_time=None)

        Detector.detection_model = models.load_model(model_path, backbone_name='resnet101')
示例#9
0
    def __init__(self):
        """
        model = Classifier()
        """
        url = "https://github.com/notAI-tech/NudeNet/releases/download/v0/classifier_model.onnx"
        home = os.path.expanduser("~")
        model_folder = os.path.join(home, ".NudeNet/")
        if not os.path.exists(model_folder):
            os.mkdir(model_folder)

        model_path = os.path.join(model_folder, os.path.basename(url))

        if not os.path.exists(model_path):
            print("Downloading the checkpoint to", model_path)
            pydload.dload(url, save_to_path=model_path, max_time=None)

        self.nsfw_model = onnxruntime.InferenceSession(model_path)
示例#10
0
    def __init__(self):
        '''
            model = Classifier()
        '''
        url = 'https://github.com/bedapudi6788/NudeNet/releases/download/v0/classifier_model'
        home = os.path.expanduser("~")
        model_folder = os.path.join(home, '.NudeNet/')
        if not os.path.exists(model_folder):
            os.mkdir(model_folder)

        model_path = os.path.join(model_folder, 'classifier')

        if not os.path.exists(model_path):
            print('Downloading the checkpoint to', model_path)
            pydload.dload(url, save_to_path=model_path, max_time=None)

        Classifier.nsfw_model = keras.models.load_model(model_path)
    def __init__(self):
        """
            model = Classifier()
        """
        url = "https://github.com/bedapudi6788/NudeNet/releases/download/v0/classifier_model"
        home = os.path.expanduser("~")
        model_folder = os.path.join(home, ".NudeNet/")
        if not os.path.exists(model_folder):
            os.mkdir(model_folder)

        model_path = os.path.join(model_folder, "classifier")

        if not os.path.exists(model_path):
            print("Downloading the checkpoint to", model_path)
            pydload.dload(url, save_to_path=model_path, max_time=None)

        self.nsfw_model = keras.models.load_model(model_path)
示例#12
0
    def __init__(self, language='english', checkpoint_local_path=None):

        model_name = language.lower()

        if model_name not in MODEL_URLS:
            logging.warn(
                f"model_name should be one of {list(MODEL_URLS.keys())}")
            return None

        home = os.path.expanduser("~")
        lang_path = os.path.join(home, ".FastPunct_" + model_name)

        if checkpoint_local_path:
            lang_path = checkpoint_local_path

        if not os.path.exists(lang_path):
            os.mkdir(lang_path)

        for file_name, url in MODEL_URLS[model_name].items():
            file_path = os.path.join(lang_path, file_name)
            if os.path.exists(file_path):
                continue
            logging.info(f"Downloading {file_name}")
            pydload.dload(url=url, save_to_path=file_path, max_time=None)

        self.tokenizer = T5Tokenizer.from_pretrained(lang_path)

        self.model = T5ForConditionalGeneration.from_pretrained(
            lang_path, return_dict=True, torchscript=True)

        # self.model.eval()
        # example_input = torch.rand(1, 3, 224, 224)  # after test, will get 'size mismatch' error message with size 256x256
        # traced_model = torch.jit.trace(self.model, example_input)
        #
        # coremlModel = ct.convert(
        #     traced_model,
        #     inputs=[ct.ImageType(name="input_1", shape=example_input.shape)]# name "input_1" is used in 'quickstart'
        # )

        if torch.cuda.is_available():
            logging.info(f"Using GPU")
            self.model = self.model.cuda()

        logging.info("Warming up")
        self.punct(["i am batman"])
    def __init__(self, lang_code="en", weights_path=None, params_path=None):
        if lang_code not in model_links and lang_code in lang_code_mapping:
            lang_code = lang_code_mapping[lang_code]

        if lang_code not in model_links:
            print("fastPunct doesn't support '" + lang_code + "' yet.")
            print(
                "Please raise a issue at https://github.com/notai-tech/fastPunct/ to add this language into future checklist."
            )
            return None

        home = os.path.expanduser("~")
        lang_path = os.path.join(home, '.fastPunct_' + lang_code)
        weights_path = os.path.join(lang_path, 'checkpoint.h5')
        params_path = os.path.join(lang_path, 'params.pkl')

        if not os.path.exists(lang_path):
            os.mkdir(lang_path)

        if not os.path.exists(weights_path):
            print('Downloading checkpoint',
                  model_links[lang_code]['checkpoint'], 'to', weights_path)
            pydload.dload(url=model_links[lang_code]['checkpoint'],
                          save_to_path=weights_path,
                          max_time=None)

        if not os.path.exists(params_path):
            print('Downloading model params', model_links[lang_code]['params'],
                  'to', params_path)
            pydload.dload(url=model_links[lang_code]['params'],
                          save_to_path=params_path,
                          max_time=None)

        with open(params_path, "rb") as file:
            self.parameters = pickle.load(file)
        self.parameters["reverse_enc_dict"] = {
            i: c
            for c, i in self.parameters["enc_token"].word_index.items()
        }
        self.model = get_model_instance(self.parameters)
        self.model.load_weights(weights_path)
        self.allowed_extras = get_extra_chars(self.parameters)
示例#14
0
文件: loit.py 项目: bedapudi6788/LOIT
def download(lang_name, to_download):
    if lang_name not in urls:
        logging.error(lang_name + ' is not supported yet.')
        exit()

    if to_download not in urls[lang_name]:
        logging.error(lang_name + ' : ' + to_download +
                      ' is not supported yet.')
        exit()

    home = os.path.expanduser("~")
    lang_path = os.path.join(home, '.LOIT_' + lang_name)
    to_download_path = os.path.join(lang_path, to_download)
    url = urls[lang_name][to_download]

    if not os.path.exists(lang_path):
        os.mkdir(lang_path)

    logging.info('Downloading ' + lang_name + ' : ' + to_download)
    pydload.dload(url=url, save_to_path=to_download_path, max_time=None)

    return True
示例#15
0
def detect(url):
    path = os.path.join(os.getcwd(), str(uuid.uuid4()))
    dload_status = pydload.dload(url, path, timeout=2, max_time=3)

    if not dload_status:
        return json.dumps({"error": "File too large to download"})
    res = detector.detect(path)

    try:
        os.remove(path)
    except Exception as e:
        logging.error(f"Unable to remove stored image: {path}")

    return res
示例#16
0
def nudenet_classifier_from_url():
    if request.method == 'GET':
        url = request.args.get('url')
    elif request.method == 'POST':
        url = request.json['url']

    try:
        path = str(uuid.uuid4())
        dload_status = pydload.dload(url, path)

        if not dload_status:
            return json.dumps({'error': 'File too large to download'})
        res = classifier.classify(path)[path]
        for key, value in res.items():
            res[key] = str(value)
        return json.dumps(res)
    except Exception as ex:
        return json.dumps({'error': str(ex)})
def classifier_from_url():
    if request.method == 'GET':
        url = request.args.get('url')
    elif request.method == 'POST':
        url = request.json.get('url')

    if url is None:
        raise InvalidUsage('Não existe campo url')

    try:
        path = str(uuid.uuid4())
        dload_status = pydload.dload(url, path, timeout=2, max_time=3)

        if not dload_status:
            os.remove(path)
            raise InvalidUsage('Arquivo é grande demais', status_code=413)

        img = cv2.imread(str(path))
        os.remove(path)
        res = make_prediction(img)
        return jsonify(res)
    except Exception as ex:
        print(ex)
        raise InvalidUsage(str(ex), status_code=500)
示例#18
0
import cv2
import numpy as np

WEIGHTS_URL = 'https://github.com/notAi-tech/LogoDet/releases/download/292_classes_v1/weights'
CLASSES_URL = 'https://github.com/notAi-tech/LogoDet/releases/download/292_classes_v1/classes'

home = os.path.expanduser("~")
model_folder = os.path.join(home, '.LogoDet/')
if not os.path.exists(model_folder):
    os.mkdir(model_folder)

model_path = os.path.join(model_folder, 'weights')

if not os.path.exists(model_path):
    print('Downloading the checkpoint to', model_path)
    pydload.dload(WEIGHTS_URL, save_to_path=model_path, max_time=None)

classes_path = os.path.join(model_folder, 'classes')

if not os.path.exists(classes_path):
    print('Downloading the class list to', classes_path)
    pydload.dload(CLASSES_URL, save_to_path=classes_path, max_time=None)

detection_model = models.load_model(model_path, backbone_name='resnet50')
classes = open(classes_path).readlines()
classes = [i.strip() for i in classes if i.strip()]

def detect_single(img_path, min_prob=0.4):
    image = read_image_bgr(img_path)
    image = preprocess_image(image)
    image, scale = resize_image(image)
示例#19
0
from vosk import Model, KaldiRecognizer, SetLogLevel

SetLogLevel(0)

MAX_WAV_LEN = int(os.getenv("MAX_WAV_LEN", "0"))

SAMPLE_RATE = int(os.getenv("SAMPLE_RATE", "0"))

model_zip_url = os.getenv(
    "MODEL_ZIP_URL",
    "http://alphacephei.com/kaldi/models/vosk-model-en-us-aspire-0.2.zip",
)

if not os.path.exists("./model"):
    pydload.dload(model_zip_url, save_to_path="./model.zip", max_time=None)

    with zipfile.ZipFile("./model.zip", "r") as zip_ref:
        zip_ref.extractall("./model")

    os.remove("./model.zip")

files_in_model_dir = glob.glob("./model/*")

if len(files_in_model_dir) == 1:
    model = Model(files_in_model_dir[0])

else:
    model = Model("./model")

if not SAMPLE_RATE:
示例#20
0
    def __init__(self,
                 lang_code=None,
                 checkpoint_path=None,
                 params_path=None,
                 utils_path=None,
                 tf_serving=False,
                 checkpoint_name=None):
        """
        Initialize deepsegment

        Parameters:

        lang_code (str): Name or code of the language. (default is english)

        checkpoint_path (str): If using with custom models, pass the custom model checkpoint path and set lang_code=None

        params_path (str): See checkpoint_path.

        utils_path (str): See checkpoint_path.

        tf_serving (bool): If using with tf_serving docker image, set to True.

        checkpoint_name (str): If using with finetuned models use this.

        """
        if lang_code:
            if lang_code not in model_links and lang_code in lang_code_mapping:
                lang_code = lang_code_mapping[lang_code]

            if lang_code not in model_links:
                print("DeepSegment doesn't support '" + lang_code + "' yet.")
                print(
                    "Please raise a issue at https://github.com/bedapudi6788/deepsegment to add this language into future checklist."
                )
                # return None

            # loading the model
            home = os.path.expanduser("~")
            lang_path = os.path.join(home, '.DeepSegment_' + lang_code)

            checkpoint_path = os.path.join(lang_path, 'checkpoint')

            if checkpoint_name:
                if not checkpoint_name.startswith('checkpoint_'):
                    checkpoint_name = 'checkpoint_' + checkpoint_name

                finetuned_checkpoint_path = os.path.join(
                    lang_path, checkpoint_name)
                if not os.path.exists(finetuned_checkpoint_path):
                    print('There is no file present at',
                          finetuned_checkpoint_path)
                    print('All the files present at that path are:',
                          glob.glob(lang_path + '/*'))
                    print('Loading the default checkpoint')
                else:
                    checkpoint_path = finetuned_checkpoint_path
            else:
                other_checkpoints = glob.glob(checkpoint_path + '_*')
                if other_checkpoints:
                    other_checkpoints = [
                        i.split('/')[-1] for i in other_checkpoints
                    ]
                    print(
                        '\n=============================================================================================================='
                    )
                    print(
                        "NOTE: There are multiple checkpoints present for this language."
                    )
                    print(other_checkpoints)
                    print('Default checkpoint is currently being used.')
                    print(
                        'To use a different checkpoint, use DeepSegment("en", checkpoint_name=name_of_the_checkpoint)'
                    )
                    print(
                        '==============================================================================================================\n'
                    )

            utils_path = os.path.join(lang_path, 'utils')
            params_path = os.path.join(lang_path, 'params')

            if not os.path.exists(lang_path):
                os.mkdir(lang_path)

            if not os.path.exists(checkpoint_path) and not tf_serving:
                print('Downloading checkpoint',
                      model_links[lang_code]['checkpoint'], 'to',
                      checkpoint_path)
                pydload.dload(url=model_links[lang_code]['checkpoint'],
                              save_to_path=checkpoint_path,
                              max_time=None)

            if not os.path.exists(utils_path):
                print('Downloading preprocessing utils',
                      model_links[lang_code]['utils'], 'to', utils_path)
                pydload.dload(url=model_links[lang_code]['utils'],
                              save_to_path=utils_path,
                              max_time=None)

            if not os.path.exists(params_path):
                print('Downloading model params',
                      model_links[lang_code]['params'], 'to', params_path)
                pydload.dload(url=model_links[lang_code]['params'],
                              save_to_path=params_path,
                              max_time=None)

        if not tf_serving:
            DeepSegment.seqtag_model = model_from_json(
                open(params_path).read(), custom_objects={'CRF': CRF})
            DeepSegment.seqtag_model.load_weights(checkpoint_path)

        elif tf_serving:
            if not is_tfserving_installed:
                logging.exception(
                    "Tensorflow serving is not installed. Cannot be used with tesnorflow serving docker images."
                )
                logging.exception(
                    "Run pip install tensorflow-serving-api==1.12.0 if you want to use with tf serving."
                )
                exit()
            DeepSegment.seqtag_model = 'deepsegment_' + lang_code

        DeepSegment.data_converter = pickle.load(open(utils_path, 'rb'))
示例#21
0
    def __init__(self, lang_code, rank='auto'):
        """
        Initialize deeptranslit

        Parameters:

        lang_code (str): Name or code of the language. (Currently supported: hindi/hi)

        rank (str): Mode of ranking. In default mode ('auto') kenlm will be used if available. (simple|kenlm|auto are the supported options)

        """

        if lang_code in lang_code_mapping:
            lang_code = lang_code_mapping[lang_code]

        if lang_code not in model_links:
            print("DeepTranslit doesn't support '" + lang_code + "' yet.")
            print(
                "Please raise a issue at https://github.com/bedapudi6788/deeptranslit to add this language into future checklist."
            )
            return None

        # loading the model
        home = os.path.expanduser("~")
        lang_path = os.path.join(home, '.DeepTranslit_' + lang_code)
        checkpoint_path = os.path.join(lang_path, 'checkpoint')
        params_path = os.path.join(lang_path, 'params')
        words_path = os.path.join(lang_path, 'words')
        lm_path = os.path.join(lang_path, 'lm')

        if not os.path.exists(lang_path):
            os.mkdir(lang_path)

        if not os.path.exists(checkpoint_path):
            print('Downloading checkpoint',
                  model_links[lang_code]['checkpoint'], 'to', checkpoint_path)
            pydload.dload(url=model_links[lang_code]['checkpoint'],
                          save_to_path=checkpoint_path,
                          max_time=None)

        if not os.path.exists(params_path):
            print('Downloading model params', model_links[lang_code]['params'],
                  'to', params_path)
            pydload.dload(url=model_links[lang_code]['params'],
                          save_to_path=params_path,
                          max_time=None)

        if not os.path.exists(words_path):
            print('Downloading words', model_links[lang_code]['words'], 'to',
                  words_path)
            pydload.dload(url=model_links[lang_code]['words'],
                          save_to_path=words_path,
                          max_time=None)

        if not os.path.exists(lm_path):
            print('Downloading lm', model_links[lang_code]['lm'], 'to',
                  lm_path)
            pydload.dload(url=model_links[lang_code]['lm'],
                          save_to_path=lm_path,
                          max_time=None)

        DeepTranslit.model, DeepTranslit.params = build_model(
            params_path=params_path,
            enc_lstm_units=64,
            use_gru=True,
            display_summary=False)
        DeepTranslit.model.load_weights(checkpoint_path)

        DeepTranslit.words = pickle.load(open(words_path, 'rb'))

        if kenlm_available and rank in {'auto', 'kenlm'}:
            logging.warn('Loading KenLM.')
            DeepTranslit.lm = kenlm.Model(lm_path)
            DeepTranslit.rank = rank
示例#22
0
import os
import pydload

if not os.path.exists("checkpoint"):
    pydload.dload(
        "https://storage.googleapis.com/audioset/yamnet.h5",
        save_to_path="checkpoint",
        max_time=None,
    )

import numpy as np
import resampy
import soundfile as sf
import tensorflow as tf

import params
import yamnet as yamnet_model

graph = tf.Graph()
with graph.as_default():
    yamnet = yamnet_model.yamnet_frames_model(params)
    yamnet.load_weights("./checkpoint")

yamnet_classes = yamnet_model.class_names("yamnet_class_map.csv")


def read_wav(w, max_audio_time=30):
    wav_data, sr = sf.read(w, dtype=np.int16)
    waveform = wav_data / 32768.0

    if len(waveform.shape) > 1: