def device_usage_str(self): '''Returns a formatted string displaying the memory usage.''' s = 'device usage:\n' s += '-'*10 + '\n' #s += format_array('vertices', self.vertices) + '\n' #s += format_array('triangles', self.triangles) + '\n' s += format_array('nodes', self.nodes) + '\n' s += '%-15s %6s %6s' % ('total', '', format_size(self.nodes.nbytes)) + '\n' s += '-'*10 + '\n' free, total = cuda.mem_get_info() s += '%-15s %6s %6s' % ('device total', '', format_size(total)) + '\n' s += '%-15s %6s %6s' % ('device used', '', format_size(total-free)) + '\n' s += '%-15s %6s %6s' % ('device free', '', format_size(free)) + '\n' return s
def device_usage_str(self, cl_context=None): '''Returns a formatted string displaying the memory usage.''' s = 'device usage:\n' s += '-' * 10 + '\n' #s += format_array('vertices', self.vertices) + '\n' #s += format_array('triangles', self.triangles) + '\n' s += format_array('nodes', self.nodes) + '\n' s += '%-15s %6s %6s' % ('total', '', format_size( self.nodes.nbytes)) + '\n' s += '-' * 10 + '\n' if api.is_gpu_api_cuda(): free, total = cuda.mem_get_info() elif api.is_gpu_api_opencl: total = cl_context.get_info(cl.context_info.DEVICES)[0].get_info( cl.device_info.GLOBAL_MEM_SIZE) free = total - self.metadata['d_gpu_used'] s += '%-15s %6s %6s' % ('device total', '', format_size(total)) + '\n' s += '%-15s %6s %6s' % ('device used', '', format_size(total - free)) + '\n' s += '%-15s %6s %6s' % ('device free', '', format_size(free)) + '\n' return s