Esempio n. 1
0
 def __init__(self,
              a,
              b,
              sigma=0.01,
              log_transform=False,
              validate_args=False):
     TModule.__init__(self)
     _a = torch.tensor(float(a)) if isinstance(a, Number) else a
     _a = _a.view(-1) if _a.dim() < 1 else _a
     _a, _b, _sigma = broadcast_all(_a, b, sigma)
     if not torch.all(constraints.less_than(_b).check(_a)):
         raise ValueError("must have that a < b (element-wise)")
     # TODO: Proper argument validation including broadcasting
     batch_shape, event_shape = _a.shape[:-1], _a.shape[-1:]
     # need to assign values before registering as buffers to make argument validation work
     self.a, self.b, self.sigma = _a, _b, _sigma
     super(SmoothedBoxPrior, self).__init__(batch_shape,
                                            event_shape,
                                            validate_args=validate_args)
     # now need to delete to be able to register buffer
     del self.a, self.b, self.sigma
     self.register_buffer("a", _a)
     self.register_buffer("b", _b)
     self.register_buffer("sigma", _sigma)
     self.tails = NormalPrior(torch.zeros_like(_a),
                              _sigma,
                              validate_args=validate_args)
     self._log_transform = log_transform
Esempio n. 2
0
    def __init__(self,
                 components={},
                 *args,
                 input=None,
                 skip_module_init=False,
                 **kwargs):

        self._flags = {
            'recursive_get': 1,
            'components_initialized': 0
        }  # Used for controlling recursion. Don't mess with this.

        if not skip_module_init:  # This is so the ModelFromModule Class can work.
            Module.__init__(self)

        self.components = PyTorch_Component_Map({}, model=self)
        self.init_default_components()

        for key, value in components.items():
            self.components[key] = value

        HookedPassThroughPipe.__init__(self, input=input)
        self.enable_updates()
        self.enable_inference()
        self._flags['components_initialized'] = 1
Esempio n. 3
0
 def __init__(self, forward_rate, time_len=MAX_YR_LEN):
     Module.__init__(self)
     self.time_len = time_len
     if isinstance(forward_rate, float):
         forward_rate = torch.tensor([forward_rate])
     self._forward_rate = Parameter(forward_rate)
     self.forward_rate = self._forward_rate.expand(time_len)
Esempio n. 4
0
 def __init__(self, input, hidden, output):
     Module.__init__(self)
     self.layer1 = torch.nn.Linear(input, hidden)
     self.layer1_5 = torch.nn.Linear(hidden, 23)
     self.layer2 = torch.nn.Linear(23, output)
     self.relu = torch.nn.ReLU()
     self.sigmoid = torch.nn.Sigmoid()
Esempio n. 5
0
    def __init__(self, name, class_type, config):
        """
        Initializes a Model object.

        :param name: Model name.
        :type name: str

        :param class_type: Class type of the component.

        :param config: Parameters read from configuration file.
        :type config: ``ptp.configuration.ConfigInterface``

        This constructor:

        - calls base class constructors (save config, name, logger, app_state etc.)

        - initializes the best model loss (used to select which model to save) to ``np.inf``:

            >>> self.best_loss = np.inf

        """
        # Call constructors of parent classes.
        Component.__init__(self, name, class_type, config)
        Module.__init__(self)

        # Flag indicating whether the model is frozen or not.
        self.frozen = False
Esempio n. 6
0
    def __init__(self, output_dim=None, log_std=None):
        Module.__init__(self)

        self.log_std = log_std

        if log_std is None:
            self.log_std = Parameter(torch.zeros(output_dim), requires_grad=True)
Esempio n. 7
0
 def __init__(self, nu, K, validate_args=False):
     TModule.__init__(self)
     if K.dim() < 2:
         raise ValueError("K must be at least 2-dimensional")
     n = K.shape[-1]
     if K.shape[-2] != K.shape[-1]:
         raise ValueError("K must be square")
     if isinstance(nu, Number):
         nu = torch.tensor(float(nu))
     if torch.any(nu <= n):
         raise ValueError("Must have nu > n - 1")
     self.n = torch.tensor(n, dtype=torch.long, device=nu.device)
     batch_shape = nu.shape
     event_shape = torch.Size([n, n])
     # normalization constant
     logdetK = torch.logdet(K) if K.dim() == 2 else torch.stack(
         [torch.logdet(k) for k in K])
     C = -(nu / 2) * (logdetK + n * math.log(2)) - torch.mvlgamma(nu / 2, n)
     K_inv = torch.inverse(K) if K.dim() == 2 else torch.stack(
         [torch.inverse(k) for k in K])
     # need to assign values before registering as buffers to make argument validation work
     self.nu = nu
     self.K_inv = K_inv
     self.C = C
     super(WishartPrior, self).__init__(batch_shape,
                                        event_shape,
                                        validate_args=validate_args)
     # now need to delete to be able to register buffer
     del self.nu, self.K_inv, self.C
     self.register_buffer("nu", nu)
     self.register_buffer("K_inv", K_inv)
     self.register_buffer("C", C)
Esempio n. 8
0
 def __init__(self, optimize=True):
     # must be before Module.init since the field is used in __getattr__
     Module.__init__(self)
     self._set_optimized(optimize)
     self._parameters = OrderedParameterDict(self)
     self._buffers = OrderedBufferDict(self)
     self._modules = OrderedModuleDict(self)
Esempio n. 9
0
 def __init__(self, n, eta, validate_args=False):
     TModule.__init__(self)
     if not isinstance(n, int) or n < 1:
         raise ValueError("n must be a positive integer")
     if isinstance(eta, Number):
         eta = torch.tensor(float(eta))
     self.n = torch.tensor(n, dtype=torch.long, device=eta.device)
     batch_shape = eta.shape
     event_shape = torch.Size([n, n])
     # Normalization constant(s)
     i = torch.arange(n, dtype=eta.dtype, device=eta.device)
     C = (((2 * eta.view(-1, 1) - 2 + i) * i).sum(1) *
          math.log(2)).view_as(eta)
     C += n * torch.sum(2 * torch.lgamma(i / 2 + 1) - torch.lgamma(i + 2))
     # need to assign values before registering as buffers to make argument validation work
     self.eta = eta
     self.C = C
     super(LKJPrior, self).__init__(batch_shape,
                                    event_shape,
                                    validate_args=validate_args)
     # now need to delete to be able to register buffer
     del self.eta, self.C
     self.register_buffer("eta", eta)
     self.register_buffer("C", C)
     self._log_transform = False
Esempio n. 10
0
 def __init__(self, x_dim, vec_dim):
     """
     n: mu dimension
     ndiag: number of learnable
     """
     Module.__init__(self)
     self.scale_vec = nn.Parameter(torch.zeros(vec_dim - x_dim))
    def __init__(self, edge_model=None, node_model=None, global_model=None):
        Module.__init__(self)
        self.edge_model = edge_model
        self.node_model = node_model
        self.global_model = global_model

        self.reset_parameters()
Esempio n. 12
0
    def __init__(self,
                 state_dim,
                 action_dim,
                 phi_body=None,
                 actor_body=None,
                 critic_body=None):
        Module.__init__(self)
        # if phi_body is None: phi_body = DummyBody(state_dim)
        # if actor_body is None: actor_body = DummyBody(phi_body.feature_dim)
        # if critic_body is None: critic_body = DummyBody(phi_body.feature_dim)
        self.phi_body = phi_body
        self.actor_body = actor_body
        self.critic_body = critic_body
        self.fc_action_x = layer_init(nn.Linear(actor_body.feature_dim, action_dim), 1e-3)
        self.fc_action_y = layer_init(nn.Linear(actor_body.feature_dim, action_dim), 1e-3)
        self.fc_critic = layer_init(nn.Linear(critic_body.feature_dim, 1), 1e-3)

        self.actor_params = list(self.actor_body.parameters()) + list(self.fc_action.parameters())
        self.critic_params = list(self.critic_body.parameters()) + list(self.fc_critic.parameters())
        self.phi_params = list(self.phi_body.parameters())

        self.controller = nn.LSTMCell(ch * cw, lstm_out_size)

        self.fc_sigma_x = nn.Linear()
        self.fc_sigma_y = nn.Linear()

        self.fc_mu_x = nn.Linear()
        self.fc_mu_y = nn.Linear()

        self.std_y = nn.Parameter(torch.zeros(action_dim))
        self.std_x = nn.Parameter(torch.zeros(action_dim))
Esempio n. 13
0
    def __init__(self,
                 state_dim,
                 action_dim,
                 phi_body=None,
                 actor_body=None,
                 critic_body=None,
                 granular=False):
        Module.__init__(self)
        self.state_dim = state_dim
        self.action_dim = action_dim

        self.phi_body = phi_body
        feature_dim = self.phi_body.feature_dim

        self.actor_body = actor_body
        self.fc_action_cat = nn.Linear(feature_dim, state_dim[0])
        self.fc_action_loc = nn.Linear(feature_dim, action_dim)

        self.critic_body = critic_body
        self.fc_critic = layer_init(nn.Linear(feature_dim, 1), 1e-3)

        # auxilary value for predicting Himts
        self.fc_auxilary = layer_init(nn.Linear(feature_dim, 1), 1e-3)

        self.std = nn.Parameter(torch.zeros(action_dim))
        self.predictor_module = None
        self.granular = granular
        self.apply(weights_init)
Esempio n. 14
0
    def __init__(self,
                 state_dim,
                 action_dim,
                 phi_body=None,
                 actor_body=None,
                 critic_body=None,
                 granular=False):
        Module.__init__(self)
        self.state_dim = state_dim
        self.action_dim = action_dim

        self.phi_body = phi_body
        feature_dim = self.phi_body.feature_dim

        self.actor_body = actor_body
        self.fc_action_cat = nn.Linear(feature_dim, state_dim[0])
        self.fc_action_loc = nn.Linear(feature_dim, action_dim)

        self.critic_body = critic_body
        self.fc_critic = layer_init(nn.Linear(feature_dim, 1), 1e-3)

        # auxilary value for predicting Himts
        self.fc_auxilary = layer_init(nn.Linear(feature_dim, 1), 1e-3)

        self.std = nn.Parameter(torch.zeros(action_dim))
        # todo https://openai.com/blog/reinforcement-learning-with-prediction-based-rewards/
        self.predictor_module = None
        self.granular = granular
        self.__init_weights()
Esempio n. 15
0
 def __init__(self, nu, K, validate_args=False):
     TModule.__init__(self)
     if K.dim() < 2:
         raise ValueError("K must be at least 2-dimensional")
     n = K.shape[-1]
     if isinstance(nu, Number):
         nu = torch.tensor(float(nu))
     if torch.any(nu <= 0):
         raise ValueError("Must have nu > 0")
     self.n = torch.tensor(n, dtype=torch.long, device=nu.device)
     batch_shape = nu.shape
     event_shape = torch.Size([n, n])
     # normalization constant
     c = (nu + n - 1) / 2
     logdetK = torch.logdet(K) if K.dim() == 2 else torch.stack(
         [torch.logdet(k) for k in K])
     C = c * (logdetK - n * math.log(2)) - torch.mvlgamma(c, n)
     # need to assign values before registering as buffers to make argument validation work
     self.nu = nu
     self.K = K
     self.C = C
     super(InverseWishartPrior, self).__init__(batch_shape,
                                               event_shape,
                                               validate_args=validate_args)
     # now need to delete to be able to register buffer
     del self.nu, self.K, self.C
     self.register_buffer("nu", nu)
     self.register_buffer("K", K)
     self.register_buffer("C", C)
     self._log_transform = False
Esempio n. 16
0
 def __init__(self, in_channels, zdim, residual=False):
     Module.__init__(self)
     _ly = [dict(k=6, s=1), dict(k=6, s=1), dict(k=4, s=2), dict(k=4, s=1)]
     self.dec1 = DeConvNormRelu(zdim, 32, **_ly[3])
     self.dec2 = DeConvNormRelu(32, 16, **_ly[2])
     self.dec3 = DeConvNormRelu(16, 8, **_ly[1])
     self.dec4 = DeConvNormRelu(8, in_channels, **_ly[0])
Esempio n. 17
0
 def __init__(self, optimize=True):
     # must be before Module.init since the field is used in __getattr__
     Module.__init__(self)
     self._set_optimized(optimize)
     self._parameters = OrderedParameterDict(self)
     self._buffers = OrderedBufferDict(self)
     self._modules = OrderedModuleDict(self)
Esempio n. 18
0
    def __init__(self,
                 enc,
                 dec,
                 action_size,
                 z_size,
                 target_size,
                 shared_size=None,
                 subgoal_size=None,
                 policy=None):
        Module.__init__(self)
        shared_size = shared_size if shared_size else 2 * action_size

        self.encode_state = enc
        self.decode_state = dec

        self.encode_target = MLP2(target_size, shared_size)
        self.encode_action = MLP2(action_size, shared_size)

        #self.generate_subgoal = MLP2(z_size, subgoal_size)
        # not needed
        # self.decode_action = MLP2(shared_size, action_size)

        #
        self.merge_sfz = MLP2(shared_size + z_size, z_size)
        self.merge_saz = MLP2(shared_size + z_size, z_size)
        self.pred_reward = MLP2(z_size, 1)

        self.policy = PolicySimple(
            z_size, shape=[action_size]) if policy is None else policy
        self.sigmoid = nn.Sigmoid()
        self.softmax = nn.Softmax()
Esempio n. 19
0
 def __init__(self, loc, scale, validate_args=None, transform=None):
     TModule.__init__(self)
     LogNormal.__init__(self,
                        loc=loc,
                        scale=scale,
                        validate_args=validate_args)
     self._transform = transform
Esempio n. 20
0
    def __init__(self, embedding, full_name):
        Module.__init__(self)
        self.num_embeddings = getattr(embedding, "num_embeddings", None)
        self.embedding_dim = getattr(embedding, "embedding_dim", None)

        self.embedding = embedding
        self.full_name = full_name
    def __init__(self, elements):
        Module.__init__(self)
        
        self.elements = elements

        for ele in self.elements:
            self.add_module(ele.name, ele)
Esempio n. 22
0
 def __init__(self, nIn, nOut, bias=False):
     Module.__init__(self)
     self.nIn = nIn
     self.nOut = nOut
     std = (2.0 / nIn)**0.5
     self.weight = Parameter(torch.Tensor(nIn, nOut).normal_(0, std))
     if bias:
         self.bias = Parameter(torch.Tensor(nOut).zero_())
Esempio n. 23
0
    def __init__(self, *args: Union[dict, Callable, None], **kwargs):
        """
			Compute output of each module stored when forward() is called.
			Subclass of Dict[str, Module] and Module.
		"""
        args = [arg for arg in args if arg is not None]
        dict.__init__(self, *args, **kwargs)
        Module.__init__(self)
Esempio n. 24
0
 def __init__(self, input_quant: Union[ActQuantProxyProtocol,
                                       Type[Injector]],
              output_quant: Union[ActQuantProxyProtocol, Type[Injector]],
              return_quant_tensor: bool, **kwargs):
     Module.__init__(self)
     QuantInputOutputLayer.__init__(self, input_quant, output_quant,
                                    return_quant_tensor, default_update_aqi,
                                    default_update_aqi, **kwargs)
Esempio n. 25
0
    def __init__(self, config):
        Module.__init__(self)

        self.dim_out = config['e_outc']
        self.dim_in = config['e_inc'] + 2 * config['n_inc'] + config['u_inc']
        self.dim_w1, self.dim_w2 = config['w_inc'], self.dim_in // 2
        self.dim_h1 = config['edge_model_mlp1_hidden_sizes'][0]
        self.dim_h2 = config['edge_model_mlp1_hidden_sizes'][1]
    def __init__(self, config):
        Module.__init__(self)

        inc = config['u_inc'] + config['n_outc'] + config['e_outc']
        hs1 = config['global_model_mlp1_hidden_sizes'][0]

        self.global_mlp = Seq(Linear(inc, hs1), LayerNorm(hs1), ReLU(),
                              Linear(hs1, config['u_outc']))
Esempio n. 27
0
 def __init__(self, loc, scale, validate_args=False, transform=None):
     TModule.__init__(self)
     Normal.__init__(self,
                     loc=loc,
                     scale=scale,
                     validate_args=validate_args)
     _bufferize_attributes(self, ("loc", "scale"))
     self._transform = transform
Esempio n. 28
0
 def __init__(self, state_dim, rewards=[], coefs=None):
     Module.__init__(self)
     self.state_dim = state_dim
     self.base_rewards = rewards
     if coefs is not None:
         self.coefs = coefs
     else:
         self.coefs = np.ones(len(list))
 def __init__(self, name, L, K1, alpha = torch.tensor(0.0)):
     Module.__init__(self)
     #self.register_parameter('L',torch.nn.parameter.Parameter(length))
     #self.register_parameter('K1',torch.nn.parameter.Parameter(K1))
     self.L = L
     self.K1 = K1
     
     self.name = name
Esempio n. 30
0
 def __init__(self, in_channels, zdim, residual=False):
     Module.__init__(self)
     _ly = [dict(k=6, s=1), dict(k=6, s=1), dict(k=4, s=2), dict(k=4, s=1)]
     self.residual = residual
     self.conv1 = ConvNormRelu(in_channels, 8, **_ly[0])
     self.conv2 = ConvNormRelu(8, 16, **_ly[1])
     self.conv3 = ConvNormRelu(16, 32, **_ly[2])
     self.conv4 = ConvNormRelu(32, zdim, **_ly[3], batch_norm=False)
Esempio n. 31
0
 def __init__(self, in_size, out_size, activation=nn.ReLU):
     Module.__init__(self)
     self.l = nn.Sequential(
         nn.Linear(in_size, (in_size + out_size) // 2),
         activation(),
         nn.Linear((in_size + out_size) // 2, out_size),
         # activation()
     )