コード例 #1
0
def set_GPU(num_of_GPUs):

    current_memory_gpu = GPUInfo.gpu_usage()[1]
    list_available_gpu = np.where(
        np.array(current_memory_gpu) < 1500)[0].astype('str').tolist()
    current_available_gpu = ",".join(list_available_gpu)
    if len(list_available_gpu) < num_of_GPUs:
        print("==============Warning==============")
        print("Your process had been terminated")
        print("Please decrease number of gpus you using")
        print(
            f"number of Devices available:\t{len(list_available_gpu)} gpu(s)")
        print(f"number of Device will use:\t{num_of_GPUs} gpu(s)")
        sys.exit()
    elif len(list_available_gpu) > num_of_GPUs:
        redundant_gpu = len(list_available_gpu) - num_of_GPUs
        list_available_gpu = list_available_gpu[:redundant_gpu]
        current_available_gpu = ",".join(list_available_gpu)
        print("[DEBUG]***********************************************")
        print(f"[DEBUG]You are using GPU(s): {current_available_gpu}")
        print("[DEBUG]***********************************************")
        os.environ["CUDA_VISIBLE_DEVICES"] = current_available_gpu
    else:
        print("[DEBUG]***********************************************")
        print(f"[DEBUG]You are using GPU(s): {current_available_gpu}")
        print("[DEBUG]***********************************************")
        os.environ["CUDA_VISIBLE_DEVICES"] = current_available_gpu
コード例 #2
0
def __main__():
    op_system_name = os.name
    op_user = getpass.getuser()
    print(op_user)
    if op_system_name == 'posix':
        print("This operating System is a Mac")
    if op_system_name == 'nt':
        print("this operating System is a Windows")
    if op_system_name == "java":
        print("this operating System is Java")
    total, used, free = shutil.disk_usage("/")
    print("Total Storage: %d GB" % (total // (2**30)))
    print("Used Storage: %d GB" % (used // (2**30)))
    print("Free Storage: %d GB" % (free // (2**30)))
    print("cpu percentage = {}\nVirtual Storage = {}".format(
        psutil.cpu_percent(), psutil.virtual_memory()))
    for each in psutil.virtual_memory():
        print(each)
    print('memory % used:', psutil.virtual_memory()[2])
    print(GPUInfo.check_empty())
    print(GPUInfo.get_info())
    print(GPUInfo.gpu_usage())
    mem = psutil.virtual_memory()
    print(mem.total)
    print("Ram = {}".format(mem.total / 1024.**3))
    op_check = OpSysChecker()
    print("-----------------\n{}".format(op_check.return_info()))
コード例 #3
0
def set_GPU(num_of_GPUs, log, memory_restraint=0):
    try:
        from gpuinfo import GPUInfo
        current_memory_gpu = GPUInfo.gpu_usage()[1]
        if not memory_restraint:
            list_available_gpu = np.where(
                np.array(current_memory_gpu))[0].astype('str').tolist()

        else:
            list_available_gpu = np.where(
                np.array(current_memory_gpu) < memory_restraint)[0].astype(
                    'str').tolist()

    except:
        log.write_log(
            "Cannot find nvidia-smi, please include it into Environment Variables",
            message_type=0)
        print(
            "[INFO] Cannot find nvidia-smi, please include it into Environment Variables"
        )
        if torch.cuda.is_available():
            list_available_gpu = [str(i) for i in range(num_of_GPUs)]

        else:
            list_available_gpu = []

    list_gpu_using = list_available_gpu[:num_of_GPUs]

    if len(list_available_gpu) < num_of_GPUs and len(list_available_gpu) > 0:
        print("==============Warning==============")
        print("Your process had been terminated")
        print("Please decrease number of gpus you using")
        print(
            f"number of Devices available:\t{len(list_available_gpu)} gpu(s)")
        print(f"number of Device will use:\t{num_of_GPUs} gpu(s)")
        log.write_log(
            f"number of Devices available:\t{len(list_available_gpu)} gpu(s) < number of Device will use:\t{num_of_GPUs} gpu(s)",
            message_type=2)
        sys.exit()

    elif num_of_GPUs <= len(list_available_gpu) and num_of_GPUs != 0:
        current_available_gpu = ",".join(list_gpu_using)

    elif num_of_GPUs == 0 or len(list_available_gpu) == 0:
        current_available_gpu = "-1"

    print("[INFO] ***********************************************")

    if len(list_gpu_using) > 0:
        tmp_message = f"[INFO] You are using GPU(s): {current_available_gpu}"
    else:
        tmp_message = "[INFO] You are using CPU !"

    print(tmp_message)
    if log is not None:
        log.write_log(tmp_message, message_type=0)

    print("[INFO] ***********************************************")
    os.environ["CUDA_VISIBLE_DEVICES"] = current_available_gpu
コード例 #4
0
ファイル: 10_fold_bert.py プロジェクト: zyang37/bert
def gpu_usage():
    total_memory = 32000
    available_device = GPUInfo.check_empty()
    percent, memory = GPUInfo.gpu_usage()
    for i in range(len(memory)):
        memory[i] = float(memory[i]) / total_memory
    print(memory)
    return memory
コード例 #5
0
ファイル: output.py プロジェクト: gongwan33/posedetection
    def get_gpu_info(self):
        print("GPUINFO: Start loop")
        while self.gpu_thread:
            self.thread_lock.acquire()
            self.gpu_percent, self.gpu_memory = GPUInfo.gpu_usage()
            self.thread_lock.release()
            time.sleep(2)

        print("GPUINFO: End loop")
        return
コード例 #6
0
    def __getitem__(self, index):
        vid_dir, label, frame_count, class_name = self.data_list[index]
        buffer = 0 #np.empty((self.clip_len, self.resize_height, self.resize_width, 3), np.dtype('float32'))
        text = 0
        try:
            if 'text_only' != self.method:
                buffer = self.load_frames(vid_dir, frame_count)
                if self.do_crop:
                    buffer = self.spatial_crop(buffer, self.crop_size)
                buffer = self.normalize(buffer)
                buffer = self.to_tensor(buffer)
            if 'joint' == self.method or 'text_only' == self.method:
                key=(vid_dir.split('/')[-1]).split('.')[0]
                text = get_text_description(self.dictionary_pickle, key)
                text=text.astype('float32')
                #text=text
        except Exception as e:
            print("Error ", e)
            from gpuinfo import GPUInfo
            print(GPUInfo.gpu_usage())
            #sys.exit()

        return buffer, label, text
コード例 #7
0
file = open("RendimientoAgrupados.txt", "w")
tiempo = int(input("Ingrese tiempo para tomar datos (segundos)[ejem 120s]: "))
intervalo = float(
    input("Ingrese intervalo de tiempo entre muestra (segundos)[ejem 0.5s]: "))
muestras = int(tiempo / intervalo)

while True:
    selecOption = input("realizar Experimento[y / n]: ")
    if selecOption == "n":
        break
    else:
        label = input("Nombre del Experimento [ejem: 320x240]: ")
        for x in range(muestras):
            cpu_per = psutil.cpu_percent(interval=intervalo)  # % de uso CPU
            aux = psutil.virtual_memory()
            ram = aux[2]  # % de uso Ram
            gpu_percent, gpu_memory = GPUInfo.gpu_usage()
            gpu = gpu_percent[0] + round(random(), 1)  # % de uso GPU
            gpu_vram = round((gpu_memory[0] * 100) / vram, 1)  # % uso RAM GPU
            file.write(str(cpu_per) + " ")
            file.write(str(ram) + " ")
            file.write(str(gpu) + " ")
            file.write(str(gpu_vram) + " ")
            file.write(label + "\n")

file.close()
print("TiempoTranscurrido(s)=" + str(tiempo) + " NúmeroMuestras=" +
      str(muestras))
time.sleep(8)
コード例 #8
0
dest = '/home/storms/Progress/Images/gauges/temp_gaugej_br.png'

src = '/home/storms/Progress/Images/gauges/bottom_right/temp_gaugej_br_' + mem_str + '.png'

copyfile(src, dest)

gpu_temp = int(
    subprocess.check_output(["nvidia-settings -query [gpu:0]/GPUCoreTemp -t"],
                            shell=True).decode("utf-8"))
gpu_temp = str(14 + 2 * round(gpu_temp / 2))
src = '/home/storms/Progress/Images/gauges/top_right/temp_gaugej_tr_' + gpu_temp + '.png'
dest = '/home/storms/Progress/Images/gauges/temp_gaugej_tr.png'

copyfile(src, dest)

use_stats = GPUInfo.gpu_usage()
gpu_usage = use_stats[0][0]
vram_usage = round(use_stats[1][0] / 1000)

dest = '/home/storms/Progress/Images/gauges/zotac_symbol.png'
src = '/home/storms/Progress/Images/gauges/zotac_body/zotac_gaming_logo' + str(
    gpu_usage) + '-01.png'
copyfile(src, dest)

dest = '/home/storms/Progress/Images/gauges/Zotac_Wings.png'
src = '/home/storms/Progress/Images/gauges/Zotac_Wings/zotac_wings' + str(
    vram_usage) + '.png'
copyfile(src, dest)
"""""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """
Section 2: Update Progress Bars 
""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """"""
コード例 #9
0
ファイル: GPU.py プロジェクト: Michael07220823/Tools
from gpuinfo import GPUInfo

available_device=GPUInfo.check_empty()
#available_device就是一个含有所有没有任务的gpu编号的列表
print(available_device)

percent,memory=GPUInfo.gpu_usage()
#获得所有gpu的使用百分比和显存占用量
print(percent, memory)

min_percent=percent.index(min([percent[i] for i in available_device]))
#未被使用的gpu里percent最小的
print(min_percent)

min_memory=memory.index(min([memory[i] for i in available_device]))
#未被使用的gpu里显存占用量最少的

print(min_memory)
コード例 #10
0
def compute_features(dataloader, model, N):
    if args.verbose:
        print('Compute features')
    batch_time = AverageMeter()
    end = time.time()
    model.eval()
    print("Before", GPUInfo.gpu_usage())

    # discard the label information in the dataloader
    try:
        for i, (input_tensor, _, text) in enumerate(dataloader):
            torch.no_grad()

            try:
                if text_only:
                    #text = torch.autograd.Variable(text)#.cuda())  # , volatile=True)
                    #print(text[0].split()[:512].size)

                    aux = model.extract_features(
                        text.cuda()).data.cpu().numpy()
                    aux = aux.astype('float32')

                elif is_joint:
                    input_var = torch.autograd.Variable(
                        input_tensor.cuda())  # , volatile=True)
                    text = text.cuda()
                    aux = model.module.extract_features(
                        input_var, text).data.cpu().numpy()
                    aux = aux.astype('float32')

                else:
                    input_var = torch.autograd.Variable(
                        input_tensor.cuda())  # , volatile=True)
                    aux = model.module.extract_features(
                        input_var).data.cpu().numpy()
                    aux = aux.astype('float32')
                if i == 0:
                    features = np.zeros((N, aux.shape[1]), dtype='float32')

                if i < len(dataloader):
                    features[i * args.batch:(i + 1) * args.batch] = aux
                else:
                    # special treatment for final batch
                    features[i * args.batch:] = aux

                # measure elapsed time
                batch_time.update(time.time() - end)
                end = time.time()

                if args.verbose and (i % 50) == 0:
                    print('{0} / {1}\t'
                          'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})'.
                          format(i, len(dataloader), batch_time=batch_time))
            except Exception as e:
                print("RAM Usage: ", str(psutil.virtual_memory().percent))
                print(GPUInfo.gpu_usage())

                print("failed: ", e)
    except RuntimeError:
        print("RAM Usage: ", str(psutil.virtual_memory().percent))
        print(GPUInfo.gpu_usage())

        return features
    except Exception as e:
        print("Error {}".format(e))
    finally:
        return features
コード例 #11
0
            cpu
        )  # here the same, but only 1 space. because 85.6 have only 4 characters
    else:
        cpuStr = str(
            cpu
        )  #100.0 is 5 characters so there is no need to put spaces in before

    # CPU Speed
    cpufreq = round(
        psutil.cpu_freq().current / 1000, 1
    )  # get cpu frequency, divide by 1000 because 1GHz = 1000MHz, round to one decimal place
    cpufreqStr = "  " + str(
        cpufreq)  # convert to string and make 5 spaces in total

    # GPU Info
    percent, memory = GPUInfo.gpu_usage(
    )  # get GPU utilization and used memory info

    # GPU Utilization
    percentNum = percent[0]  # obtain number from the percent list

    if percentNum < 10:  # same as for CPU (make 5 spaces)
        percentStr = "    " + str(percentNum)
    elif percentNum < 100:
        percentStr = "   " + str(percentNum)
    else:
        percentStr = "  " + str(percentNum)

    # GPU Used Memory
    memoryNum = round(
        memory[0] / 1024,
        1)  # obtain number from the memory list and round to one decimal place
コード例 #12
0
ファイル: utils.py プロジェクト: blueardour/pytorch-utils
def gpu_info():
    try:
        percent, memory = GPUInfo.gpu_usage()
    except ValueError:
        return "Error when read GPU utilization"
    return "precent: %r, memory: %r" % (percent, memory)