示例#1
0
def test_logging_message():
    os.environ['JINA_LOG_VERBOSITY'] = 'success'
    logger = get_logger('test_logger')
    logger.debug('this is test debug message')
    logger.info('this is test info message')
    logger.success('this is test success message')
    logger.warning('this is test warning message')
    logger.error('this is test error message')
    logger.critical('this is test critical message')
示例#2
0
    def __init__(self, host: str, port: int, collection_name: str):
        """
        Initialize an MilvusDBHandler

        :param host: Host of the Milvus Server
        :param port: Port to connect to the Milvus Server
        :param collection_name: Name of the collection where the Handler will insert and query vectors.
        """
        self.logger = get_logger(self.__class__.__name__)
        self.host = host
        self.port = str(port)
        self.collection_name = collection_name
        self.milvus_client = None
示例#3
0
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"

import os
from typing import Optional
import numpy as np

from .. import BaseEncoder
from ..frameworks import BaseTextTFEncoder, BaseTextTorchEncoder
from ..helper import reduce_mean, reduce_max, reduce_min, reduce_cls
from ...decorators import batching, as_ndarray
from jina.logging.base import get_logger

logger = get_logger("transformer_encoder")


def auto_reduce(model_outputs, mask_2d, model_name):
    """
    Automatically creates a sentence embedding from its token embeddings.
        * For BERT-like models (BERT, RoBERTa, DistillBERT, Electra ...) uses embedding of first token
        * For XLM and XLNet models uses embedding of last token
        * Assumes that other models are language-model like and uses embedding of last token
    """
    if "bert" in model_name or "electra" in model_name:
        return reduce_cls(model_outputs, mask_2d)
    if "xlnet" in model_name:
        return reduce_cls(model_outputs, mask_2d, cls_pos="tail")
    logger.warning("Using embedding of a last token as a sequence embedding. "
                   "If that's not desirable, change `pooling_strategy`")
    return reduce_cls(model_outputs, mask_2d, cls_pos="tail")
示例#4
0
 def __init__(self, client, collection_name: str):
     self.logger = get_logger(self.__class__.__name__)
     self.client = client
     self.collection_name = collection_name