Beispiel #1
0
    def _init_device(self):
        if not torch.cuda.is_available():
            self.logger.info('no gpu device available')
            sys.exit(1)

        np.random.seed(self.cfg.get('seed', 1337))
        torch.manual_seed(self.cfg.get('seed', 1337))
        torch.cuda.manual_seed(self.cfg.get('seed', 1337))
        cudnn.enabled = True
        cudnn.benchmark = True
        self.device_id, self.gpus_info = get_gpus_memory_info()
        self.device = torch.device('cuda:{}'.format(0 if self.cfg['training']['multi_gpus'] else self.device_id))
Beispiel #2
0
 def _init_device(self):
     self.device = torch.device("cuda" if self.cfg['searching']['gpu'] else "cpu")
     np.random.seed(self.cfg.get('seed', 1337))
     torch.manual_seed(self.cfg.get('seed', 1337))
     if self.cfg['searching']['gpu'] and torch.cuda.is_available() :
         self.device_id, _ = get_gpus_memory_info()
         self.device = torch.device('cuda:{}'.format(0 if self.cfg['searching']['multi_gpus'] else self.device_id))
         torch.cuda.manual_seed(self.cfg.get('seed', 1337))
         torch.cuda.set_device(self.device)
         cudnn.enabled = True
         cudnn.benchmark = True
     else:
         self.logger.info('No gpu devices available!, we will use cpu')
         self.device = torch.device('cpu')
         self.device_id = 0