def __init__(self, args, dim_in, dim_out, cond_dim):
        super(OdeLayer, self).__init__()
        self.layer_cond = args.layer_cond
        self.set_param_shapes(args, dim_in, dim_out, cond_dim)

        if args.layer_cond == 'hyper':
            total_shape = sum([np.prod(s) for s in self.list_param_shapes])
            self.hyper_net = HyperNet(cond_dim, args.hyper_dims + '-' + str(total_shape), args.nonlinearity)
        else:
            self.create_param_from_shapes(self.list_param_shapes)
        weights_init(self)
Ejemplo n.º 2
0
 def __init__(self,
              x_dim,
              y_dim,
              hidden_dims,
              nonlinearity,
              trainable=False):
     input_dim = max(x_dim, y_dim) * 2
     super(SizeBalancedEmbedNet, self).__init__(input_dim, hidden_dims,
                                                nonlinearity, trainable)
     self.x_dim = x_dim
     self.y_dim = y_dim
     if x_dim > y_dim:
         self.mm = nn.Linear(y_dim, x_dim)
     else:
         self.mm = nn.Linear(x_dim, y_dim)
     weights_init(self)
Ejemplo n.º 3
0
 def __init__(self,
              prior_embed_dim,
              ob_dim,
              hidden_dims,
              nonlinearity,
              time_length=1.0):
     super(TimePredNet, self).__init__()
     self.time_length = time_length
     self.ob2embed = nn.Linear(ob_dim, prior_embed_dim)
     self.st_pred = MLP(input_dim=prior_embed_dim * 2,
                        hidden_dims=hidden_dims,
                        nonlinearity=nonlinearity,
                        act_last='softplus')
     self.dur_pred = MLP(input_dim=prior_embed_dim * 2,
                         hidden_dims=hidden_dims,
                         nonlinearity=nonlinearity,
                         act_last='sigmoid')
     weights_init(self)
    def __init__(self, input_dim, hidden_dims, nonlinearity, act_last=None):
        super(MLP, self).__init__()
        self.act_last = act_last
        hidden_dims = tuple(map(int, hidden_dims.split("-")))
        prev_size = input_dim

        layers = []
        activation_fns = []
        for h in hidden_dims:
            layers.append(nn.Linear(prev_size, h))
            prev_size = h
            activation_fns.append(NONLINEARITIES[nonlinearity])
        if act_last is not None:
            activation_fns[-1] = NONLINEARITIES[self.act_last]
        self.output_size = prev_size
        self.layers = nn.ModuleList(layers)
        self.activation_fns = nn.ModuleList(activation_fns)
        weights_init(self)
 def __init__(self, input_dim, hidden_dims, nonlinearity):
     super(HyperNet, self).__init__()
     self.mlp = MLP(input_dim, hidden_dims, nonlinearity, act_last=nonlinearity)
     weights_init(self)
 def __init__(self, dim, num):
     super(LogisticReg, self).__init__()
     self.dim = dim
     self.w = Parameter(torch.Tensor(num, dim))
     weights_init(self)