def V_setup(self): self.dim = self.input_data["input"].shape[1] self.num_ob = self.input_data["target"].shape[0] self.explicit_gradient = True self.need_higherorderderiv = True self.num_units = 10 prior_hidden_fn = prior_generator("gaussian_inv_gamma_2") prior_out_fn = prior_generator("normal", var=1 / self.num_units) self.hidden_in = prior_hidden_fn(obj=self, name="hidden_in", shape=(self.num_units, self.dim)) self.hidden_out = prior_out_fn(obj=self, name="hidden_out", shape=(2, self.num_units), global_scale=1 / self.num_units) #self.hidden_in_z = nn.Parameter(torch.zeros(self.num_units, self.dim), requires_grad=True) #self.hidden_out_z = nn.Parameter(torch.zeros(2,self.num_units),requires_grad=True) self.y = Variable(torch.from_numpy(self.y_np), requires_grad=False).type("torch.LongTensor") self.X = Variable(torch.from_numpy(self.X_np), requires_grad=False).type(self.precision_type) # include self.dict_parameters = { "hidden_in": self.hidden_in, "hidden_out": self.hidden_out } return ()
def V_setup(self): self.dim = self.input_data["input"].shape[1] self.num_ob = self.input_data["target"].shape[0] self.num_classes = len(numpy.unique(self.input_data["target"])) self.explicit_gradient = True self.need_higherorderderiv = True self.num_units = self.model_dict["num_units"] prior_hidden_fn = prior_generator(self.prior_dict["name"]) prior_out_fn = prior_generator("normal") self.hidden_in = prior_hidden_fn(obj=self, name="hidden_in", shape=(self.num_units, self.dim), global_scale=math.sqrt(1 / self.dim)) self.hidden_out = prior_out_fn( obj=self, name="hidden_out", shape=(self.num_classes, self.num_units), global_scale=math.sqrt(1 / self.num_units)) #self.hidden_in_z = nn.Parameter(torch.zeros(self.num_units, self.dim), requires_grad=True) #self.hidden_out_z = nn.Parameter(torch.zeros(2,self.num_units),requires_grad=True) self.y = Variable(torch.from_numpy(self.input_data["target"]), requires_grad=False).type("torch.LongTensor") self.X = Variable(torch.from_numpy(self.input_data["input"]), requires_grad=False).type(self.precision_type) self.dict_parameters = { "hidden_in": self.hidden_in, "hidden_out": self.hidden_out } # include return ()
def V_setup(self): self.dim = self.input_data["input"].shape[1] self.num_ob = self.input_data["target"].shape[0] self.num_classes = len(numpy.unique(self.input_data["target"])) self.explicit_gradient = True self.need_higherorderderiv = True self.num_units = 35 self.num_layers = self.model_dict["num_layers"] prior_fn = prior_generator("normal") self.hidden_in = prior_fn(obj=self, name="hidden_in", shape=(self.num_units, self.dim), global_scale=1) self.hidden_out = prior_fn(obj=self, name="hidden_out", shape=(self.num_classes, self.num_units), global_scale=1) self.dict_parameters = { "hidden_in": self.hidden_in, "hidden_out": self.hidden_out } for i in range(self.num_layers - 1): prior_hidden_fn = prior_generator(self.prior_dict["name"]) weights_name = "hidden_{}".format(i + 1) obj = prior_hidden_fn(obj=self, name=weights_name, shape=(self.num_units, self.num_units), global_scale=1) setattr(self, weights_name, obj) self.dict_parameters.update({weights_name: obj}) #self.hidden_in_z = nn.Parameter(torch.zeros(self.num_units, self.dim), requires_grad=True) #self.hidden_out_z = nn.Parameter(torch.zeros(2,self.num_units),requires_grad=True) self.y = Variable(torch.from_numpy(self.input_data["target"]), requires_grad=False).type("torch.LongTensor") self.X = Variable(torch.from_numpy(self.input_data["input"]), requires_grad=False).type(self.precision_type) # include return ()
def V_setup(self): self.dim = self.input_data["input"].shape[1] self.num_ob = self.input_data["input"].shape[0] self.explicit_gradient = False self.need_higherorderderiv = True prior_generator_fn = prior_generator(self.prior_dict["name"]) prior_obj = prior_generator_fn(obj=self, name="beta", shape=[self.dim]) # self.beta_obj = prior_obj.generator(obj=self,name="beta",shape=[self.dim]) self.beta_obj = prior_obj self.y = Variable(torch.from_numpy(self.input_data["target"]), requires_grad=False).type(self.precision_type) self.X = Variable(torch.from_numpy(self.input_data["input"]), requires_grad=False).type(self.precision_type) self.dict_parameters = {"beta":self.beta_obj} return ()
def V_setup(self): self.dim = len(self.input_data["target"]) self.explicit_gradient = False self.need_higherorderderiv = False hs_prior_generator_fun = prior_generator("rhorseshoe_3") prior_obj = hs_prior_generator_fun(obj=self, name="beta", shape=[self.dim]) self.beta_obj = prior_obj self.y = Variable(torch.from_numpy(self.input_data["target"]), requires_grad=False).type(self.precision_type) self.dict_parameters = {"beta": self.beta_obj} return ()
def V_setup(self): self.dim = self.X_np.shape[1] self.num_ob = self.X_np.shape[0] self.explicit_gradient = False self.need_higherorderderiv = True prior_generator_fn = prior_generator("horseshoe_1") prior_obj = prior_generator_fn(obj=self, name="beta", shape=[self.dim]) #self.beta_obj = prior_obj.generator(obj=self,name="beta",shape=[self.dim]) self.beta_obj = prior_obj self.y = Variable(torch.from_numpy(self.input_data["target"]), requires_grad=False).type(self.precision_type) self.X = Variable(torch.from_numpy(self.input_data["input"]), requires_grad=False).type(self.precision_type) # include #self.sigma =1 return ()