Beispiel #1
0
    def __init__(self,
                 loglevel,
                 configfile=None,
                 normalize_method=None,
                 git_model_id=None,
                 model_filename=None,
                 colorspace="BGR",
                 input_size=256):
        logger.debug(
            "Initializing %s: (loglevel: %s, configfile: %s, normalize_method: %s, "
            "git_model_id: %s, model_filename: '%s', colorspace: '%s'. input_size: %s)",
            self.__class__.__name__, loglevel, configfile, normalize_method,
            git_model_id, model_filename, colorspace, input_size)
        self.loglevel = loglevel
        self.normalize_method = normalize_method
        self.colorspace = colorspace.upper()
        self.input_size = input_size
        self.extract = Extract()
        self.init = None
        self.error = None

        # The input and output queues for the plugin.
        # See lib.queue_manager.QueueManager for getting queues
        self.queues = {"in": None, "out": None}

        #  Get model if required
        self.model_path = self.get_model(git_model_id, model_filename)

        # Approximate VRAM required for aligner. Used to calculate
        # how many parallel processes / batches can be run.
        # Be conservative to avoid OOM.
        self.vram = None
        logger.debug("Initialized %s", self.__class__.__name__)
Beispiel #2
0
    def __init__(self, verbose=False):
        self.verbose = verbose
        self.cachepath = os.path.join(os.path.dirname(__file__), ".cache")
        self.extract = Extract()
        self.init = None

        # The input and output queues for the plugin.
        # See lib.multithreading.QueueManager for getting queues
        self.queues = {"in": None, "out": None}

        #  Path to model if required
        self.model_path = self.set_model_path()

        # Approximate VRAM required for aligner. Used to calculate
        # how many parallel processes / batches can be run.
        # Be conservative to avoid OOM.
        self.vram = None
Beispiel #3
0
    def __init__(self, loglevel):
        logger.debug("Initializing %s", self.__class__.__name__)
        self.loglevel = loglevel
        self.cachepath = os.path.join(os.path.dirname(__file__), ".cache")
        self.extract = Extract()
        self.init = None

        # The input and output queues for the plugin.
        # See lib.queue_manager.QueueManager for getting queues
        self.queues = {"in": None, "out": None}

        #  Path to model if required
        self.model_path = self.set_model_path()

        # Approximate VRAM required for aligner. Used to calculate
        # how many parallel processes / batches can be run.
        # Be conservative to avoid OOM.
        self.vram = None
        logger.debug("Initialized %s", self.__class__.__name__)