def __init__( self, num_visible, num_hidden=None, unitary_dict=None, gpu=False, module=None ): if gpu and torch.cuda.is_available(): warnings.warn( "Using ComplexWaveFunction on GPU is not recommended due to poor performance compared to CPU.", ResourceWarning, 2, ) self.device = torch.device("cuda") else: self.device = torch.device("cpu") if module is None: self.rbm_am = BinaryRBM(num_visible, num_hidden, gpu=gpu) self.rbm_ph = BinaryRBM(num_visible, num_hidden, gpu=gpu) else: _warn_on_missing_gpu(gpu) self.rbm_am = module.to(self.device) self.rbm_am.device = self.device self.rbm_ph = module.to(self.device).clone() self.rbm_ph.device = self.device self.num_visible = self.rbm_am.num_visible self.num_hidden = self.rbm_am.num_hidden self.device = self.rbm_am.device self.unitary_dict = unitary_dict if unitary_dict else unitaries.create_dict() self.unitary_dict = { k: v.to(device=self.device) for k, v in self.unitary_dict.items() }
def __init__(self, num_visible, num_hidden=None, num_aux=None, zero_weights=False, gpu=False): super().__init__() self.num_visible = int(num_visible) self.num_hidden = (int(num_hidden) if num_hidden is not None else self.num_visible) self.num_aux = int( num_aux) if num_aux is not None else self.num_visible # Parameters are: # W: The weights of the visible-hidden edges # U: The weights of the visible-auxiliary edges # b: The biases of the visible nodes # c: The biases of the hidden nobdes # d: The biases of the auxiliary nodes # The auxiliary bias of the phase RBM is always zero self.num_pars = ((self.num_visible * self.num_hidden) + (self.num_visible * self.num_aux) + self.num_visible + self.num_hidden + self.num_aux) _warn_on_missing_gpu(gpu) self.gpu = gpu and torch.cuda.is_available() self.device = torch.device("cuda") if self.gpu else torch.device("cpu") self.initialize_parameters(zero_weights=zero_weights)
def __init__(self, n_vis, n_hin, gpu=True): super(RBM, self).__init__() self.n_vis = n_vis self.n_hin = n_hin _warn_on_missing_gpu(gpu) self.gpu = gpu and torch.cuda.is_available() self.device = torch.device("cuda") if self.gpu else torch.device("cpu") self.initialize_parameters()
def __init__(self, num_visible, num_hidden, zero_weights=False, gpu=True): super(BinaryRBM, self).__init__() self.num_visible = int(num_visible) self.num_hidden = int(num_hidden) self.num_pars = ((self.num_visible * self.num_hidden) + self.num_visible + self.num_hidden) _warn_on_missing_gpu(gpu) self.gpu = gpu and torch.cuda.is_available() self.device = torch.device("cuda") if self.gpu else torch.device("cpu") self.initialize_parameters(zero_weights=zero_weights)
def __init__(self, num_visible, num_hidden=None, gpu=True, module=None): if module is None: self.rbm_am = BinaryRBM(num_visible, num_hidden, gpu=gpu) else: _warn_on_missing_gpu(gpu) gpu = gpu and torch.cuda.is_available() device = torch.device("cuda") if gpu else torch.device("cpu") self.rbm_am = module.to(device) self.rbm_am.device = device self.num_visible = self.rbm_am.num_visible self.num_hidden = self.rbm_am.num_hidden self.device = self.rbm_am.device
def __init__(self, num_visible, num_hidden=None, unitary_dict=None, gpu=True, module=None): if gpu and torch.cuda.is_available(): warnings.warn( ("Using ComplexWaveFunction on GPU is not recommended due to poor " "performance compared to CPU. In the future, ComplexWaveFunction " "will default to using CPU, even if a GPU is available."), ResourceWarning, 2, ) self.device = torch.device("cuda") else: self.device = torch.device("cpu") if module is None: self.rbm_am = BinaryRBM( int(num_visible), int(num_hidden) if num_hidden else int(num_visible), gpu=gpu, ) self.rbm_ph = BinaryRBM( int(num_visible), int(num_hidden) if num_hidden else int(num_visible), gpu=gpu, ) else: _warn_on_missing_gpu(gpu) self.rbm_am = module.to(self.device) self.rbm_am.device = self.device self.rbm_ph = module.to(self.device).clone() self.rbm_ph.device = self.device self.num_visible = int(num_visible) self.num_hidden = int(num_hidden) if num_hidden else self.num_visible self.unitary_dict = unitary_dict if unitary_dict else unitaries.create_dict( ) self.unitary_dict = { k: v.to(device=self.device) for k, v in self.unitary_dict.items() }
def __init__(self, init_params, masks, gpu=True): self.init_params = dict(init_params) _warn_on_missing_gpu(gpu) self.gpu = gpu and torch.cuda.is_available() self.device = torch.device("cuda") if self.gpu else torch.device("cpu") # given masks will use the convention of: 1 = pruned, 0 = kept # in order to use these as multiplicative masks, we need to flip them self.masks = {k: (1 - v.to(self.device)) for k, v in masks.items()} self.init_params = { k: v.to(self.device) for k, v in self.init_params.items() } super(MaskedBinaryRBM, self).__init__(num_visible=self.init_params["weights"].shape[1], num_hidden=self.init_params["weights"].shape[0], gpu=gpu)
def __init__(self, num_visible, num_hidden, zero_weights=False, gpu=True, num_chains=100): super(BinaryRBM, self).__init__() self.num_visible = int(num_visible) self.num_hidden = int(num_hidden) self.num_chains = int(num_chains) self.num_pars = ((self.num_visible * self.num_hidden) + self.num_visible + self.num_hidden) _warn_on_missing_gpu(gpu) self.gpu = gpu and torch.cuda.is_available() # Maximum number of visible units for exact enumeration self.size_cut = 16 self.device = torch.device('cuda') if self.gpu else torch.device('cpu') if zero_weights: self.weights = nn.Parameter((torch.zeros(self.num_hidden, self.num_visible, device=self.device, dtype=torch.double)), requires_grad=True) self.visible_bias = nn.Parameter(torch.zeros(self.num_visible, device=self.device, dtype=torch.double), requires_grad=True) self.hidden_bias = nn.Parameter(torch.zeros(self.num_hidden, device=self.device, dtype=torch.double), requires_grad=True) else: self.initialize_parameters()