Esempio n. 1
0
    def _check_constraints(self):
        free_space_on_disk = fileutils.df(self._engine.path)[2]
        corpus_size_on_disk = 0
        for root in self._roots:
            corpus_size_on_disk += fileutils.du(root)
        free_memory = fileutils.free()

        recommended_mem = self._GB * corpus_size_on_disk / (
            350 * self._MB)  # 1G RAM every 350M on disk
        recommended_disk = 10 * corpus_size_on_disk

        if free_memory < recommended_mem or free_space_on_disk < recommended_disk:
            if free_memory < recommended_mem:
                raise EngineBuilder.HWConstraintViolated(
                    'more than %.fG of RAM recommended, only %.fG available' %
                    (recommended_mem / self._GB, free_memory / self._GB))
            if free_space_on_disk < recommended_disk:
                raise EngineBuilder.HWConstraintViolated(
                    'more than %.fG of storage recommended, only %.fG available'
                    % (recommended_disk / self._GB,
                       free_space_on_disk / self._GB))
Esempio n. 2
0
    def _check_constraints(self):
        recommended_gpu_ram = 2 * self._GB

        # Get the list of GPUs to employ using torch utils (This takes into account the user's choice)
        gpus = torch_utils.torch_get_gpus()

        if gpus is None or len(gpus) == 0:
            raise EngineBuilder.HWConstraintViolated(
                'No GPU for Neural engine training, the process will take very long time to complete.')

        # AT THE MOMENT TRAINING IS MONOGPU AND WE ONLY USE THE FIRST AVAILABLE GPU FOR TRAINING.
        # SO JUST CHECK CONSTRAINTS FOR IT. THIS MAY CHANGE IN THE FUTURE
        gpus = [gpus[0]]

        gpus_ram = self._get_gpus_ram(gpus)

        for i in range(len(gpus_ram)):
            if gpus_ram[i] < recommended_gpu_ram:
                raise EngineBuilder.HWConstraintViolated(
                    'The RAM of GPU %d is only %.fG. More than %.fG of RAM recommended for each GPU.' %
                    (gpus[i], gpus_ram[i] / self._GB, recommended_gpu_ram / self._GB)
                )