Exemple #1
0
 def getFreeMem(self, emptyCache=False):
     if self.cuda:
         if emptyCache:
             torch.cuda.empty_cache()
         free_ram = readgpu.getGPU()[self.deviceId]
     else:
         mem = psutil.virtual_memory()
         free_ram = mem.free
     self.freeRam = free_ram
     return free_ram
Exemple #2
0
 def system(self):
     if not Config.cudaAvailable():
         return []
     try:
         freeMems = readgpu.getGPU()
         self.freeRam = freeMems[self.deviceId]
         gram = [freeMem // 2**20 for freeMem in freeMems]
     except Exception as e:
         print(e)
         gram = []
     return gram
Exemple #3
0
 def system(self):
     mem = psutil.virtual_memory()
     mem_total = int(mem.total / 1024**2)
     mem_free = int(mem.free / 1024**2)
     cpu_count_phy = psutil.cpu_count(logical=False)
     cpu_count_log = psutil.cpu_count(logical=True)
     deviceId = self.deviceId
     try:
         gname = readgpu.getName()[deviceId]
         gram = int(readgpu.getGPU()[deviceId] / 2**20)
         major, minor = torch.cuda.get_device_capability(deviceId)
         ginfo = [gname, gram, major + minor / 10]
         self.ginfo = ginfo
     except Exception as e:
         gerror = '没有检测到NVIDIA的显卡,系统将采用CPU模式'
         ginfo = [gerror, 'N/A', 'N/A']
         print(e)
     return mem_total, mem_free, cpu_count_log, cpu_count_phy, ginfo