Exemple #1
0
def get_gpu_useage():
    gpus = getGPUs()
    used = 0.0
    for gpu in gpus:
        used += float(gpu.memoryUsed / gpu.memoryTotal * 100)
    rate = used / len(gpus)
    return round(rate, 2)
Exemple #2
0
def benim_hwid():
    # HWID Yakala
    if sistem == 'nt':
        benim_hwid = check_output('wmic csproduct get uuid').decode().split(
            '\n')[1].strip()
    else:
        try:
            from GPUtil import getGPUs
        except ModuleNotFoundError:
            from os import system
            system("pip3 install gputil")
            system("clear")

        if ekran_kartlari := getGPUs():
            benim_hwid = ekran_kartlari[0].uuid[4:].upper()
        else:
def set_gpu(id=-1):
    """
    Set GPU device or select the one with the lowest memory usage (None for
    CPU-only)
    """
    if id is None:
        # CPU only
        print(colored('GPU not selected', 'yellow'))
    else:
        # -1 for automatic choice
        device = id if id != -1 else getFirstAvailable(order='memory')[0]
        try:
            name = getGPUs()[device].name
        except IndexError:
            print('The selected GPU does not exist. Switching to the most '
                  'available one.')
            device = getFirstAvailable(order='memory')[0]
            name = getGPUs()[device].name

        print(colored('GPU selected: %d - %s' % (device, name), 'yellow'))
        os.environ["CUDA_VISIBLE_DEVICES"] = str(device)
Exemple #4
0
def get_gpu_environment() -> Dict[str, str]:
    """Get environment variables for using CUDA enabled GPUs."""
    env = os.environ.copy()

    gpu_uuid = env.get("WORKER_GPU_UUID")
    gpus = [gpu.id for gpu in getGPUs() if gpu.uuid == gpu_uuid]

    # Only set this env var if WORKER_GPU_UUID was supplied,
    # and it matches an installed GPU
    if gpus:
        env["CUDA_VISIBLE_DEVICES"] = str(gpus[0])

    return env
Exemple #5
0
def getStatistics():
    net = psutil.net_io_counters(pernic=True)[data["system"]["interface"]]
    stats = {
        "cpu": psutil.cpu_percent(interval=1, percpu=True),
        "ram": psutil.virtual_memory()[2],
        "network": {
            "in": round(net[1] / 1000000000),
            "out": round(net[0] / 1000000000)
        }
    }
    if data['system']['enableGPU']:
        gpu = getGPUs()[0]
        stats['gpu'] = {
            "name": gpu.name,
            "temperature": gpu.temperature,
            "load": round(gpu.load * 100, 1),
            "memory": round(gpu.memoryUtil * 100, 1)
        }
    return jsonify(stats)
    def run(self) -> None:

        self.thread_running = True
        network_card = "Ethernet 2"

        _ul = 0.0
        _dl = 0.0
        _t0 = time()

        _upload = net_io_counters(pernic=True)[network_card][0]
        _download = net_io_counters(pernic=True)[network_card][1]
        _up_down = (_upload, _download)

        while self.thread_running:
            _gpu = getGPUs()

            _gpu_load = round(_gpu[0].load * 100, 1)
            _gpu_temp = _gpu[0].temperature
            _cpu_load = cpu_percent()
            _memory_load = virtual_memory().percent

            _last_up_down = _up_down
            _upload = net_io_counters(pernic=True)[network_card][0]
            _download = net_io_counters(pernic=True)[network_card][1]
            _up_down = (_upload, _download)

            _t1 = time()
            _ul, _dl = [
                (_now - _last) / (_t1 - _t0)
                for _now, _last in zip(_up_down, _last_up_down)
            ]
            _t0 = time()

            _ul_l = self.humanbits(_ul * 8)
            _dl_l = self.humanbits(_dl * 8)

            self.system_info.emit(
                [_gpu_load, _gpu_temp, _cpu_load, _memory_load, _dl_l, _ul_l]
            )
            sleep(self.system_refresh_rate)
def _get_encoders_list() -> dict:
    encoders_dict = dict()
    encoders_dict.update(
        {"x264": list(), "x265": list(), "ProRes": ["prores_ks"], "AV1": ["libaom-av1"]}
    )
    tmp_list_x264 = list()
    tmp_list_x265 = list()
    gpus = getGPUs()
    for gpu in gpus:
        gpu_name = gpu.name
        if match("NVIDIA", gpu_name):
            tmp_list_x264.append("h264_nvenc")
            tmp_list_x265.append("hevc_nvenc")
        if match("AMD", gpu_name):
            tmp_list_x264.append("h264_amf")
            tmp_list_x265.append("hevc_amf")
        if match("Intel", gpu_name):
            tmp_list_x264.append("h264_qsv")
            tmp_list_x265.append("hevc_qsv")
    tmp_list_x264.append("libx264")
    tmp_list_x265.append("libx265")
    encoders_dict["x264"] = tmp_list_x264
    encoders_dict["x265"] = tmp_list_x265
    return encoders_dict
Exemple #8
0
 def printDevice(self):
     if self.device is torch.device('cpu'):
         print('CPU')
     else:
         name = getGPUs()[self.device.index].name
         print('GPU %d - %s' % (self.device.index, name))
Exemple #9
0
 def printDevice(self):
     if self.device is None:
         print('CPU')
     else:
         name = getGPUs()[self.device.id].name
         print('GPU %d - %s' % (self.device.id, name))
def get_gpu_name(id: int) -> str:
    name = getGPUs()[id].name
    return '%s (%d)' % (name, id)
Exemple #11
0
def get_gpu_total():
    gpus = getGPUs()
    total = 0.0
    for gpu in gpus:
        total += float(gpu.memoryTotal)
    return megabyte2gigabyte(total)
Exemple #12
0
 def deviceName(self):
     if self.device is None:
         return "CPU"
     else:
         name = getGPUs()[self.device.id].name
         return "GPU %d - %s" % (self.device.id, name)