def __init__(self, frame_size, dim, q_levels, weight_norm): super().__init__() self.q_levels = q_levels self.input = torch.nn.Conv1d(in_channels=frame_size, out_channels=dim, kernel_size=1, bias=False) init.kaiming_uniform(self.input.weight) if weight_norm: self.input = torch.nn.utils.weight_norm(self.input) self.hidden = torch.nn.Conv1d(in_channels=dim, out_channels=dim, kernel_size=1) init.kaiming_uniform(self.hidden.weight) init.constant(self.hidden.bias, 0) if weight_norm: self.hidden = torch.nn.utils.weight_norm(self.hidden) self.output = torch.nn.Conv1d( in_channels=dim, out_channels=2, # mu, log_sigma kernel_size=1) lecun_uniform(self.output.weight) init.constant(self.output.bias, 0) if weight_norm: self.output = torch.nn.utils.weight_norm(self.output)
def __init__(self, frame_size, dim, q_levels, weight_norm): super().__init__() self.q_levels = q_levels self.embedding = torch.nn.Embedding(self.q_levels, self.q_levels) self.input = torch.nn.Conv1d(in_channels=q_levels, out_channels=dim, kernel_size=frame_size, bias=False) init.kaiming_uniform_(self.input.weight) if weight_norm: self.input = torch.nn.utils.weight_norm(self.input) self.hidden = torch.nn.Conv1d(in_channels=dim, out_channels=dim, kernel_size=1) init.kaiming_uniform_(self.hidden.weight) init.constant_(self.hidden.bias, 0) if weight_norm: self.hidden = torch.nn.utils.weight_norm(self.hidden) self.output = torch.nn.Conv1d(in_channels=dim, out_channels=q_levels, kernel_size=1) nn.lecun_uniform(self.output.weight) init.constant_(self.output.bias, 0) if weight_norm: self.output = torch.nn.utils.weight_norm(self.output)
def __init__(self, frame_size, dim, q_levels, weight_norm): super().__init__() self.q_levels = q_levels self.frame_size = frame_size self.dim = dim self.embedding = torch.nn.Embedding(self.q_levels, self.q_levels) self.last_out_shape = q_levels # self.input = torch.nn.Conv1d( # in_channels=q_levels, # out_channels=dim, # kernel_size=frame_size, # bias=False # ) self.input = torch.nn.Linear(self.frame_size * self.last_out_shape, self.dim, bias=False) init.kaiming_uniform_(self.input.weight) if weight_norm: self.input = torch.nn.utils.weight_norm(self.input) # self.hidden = torch.nn.Conv1d( # in_channels=dim, # out_channels=dim, # kernel_size=1 # ) self.hidden = torch.nn.Linear(self.dim, self.dim) init.kaiming_uniform_(self.hidden.weight) init.constant_(self.hidden.bias, 0) if weight_norm: self.hidden = torch.nn.utils.weight_norm(self.hidden) self.hidden_2 = torch.nn.Linear(self.dim, self.dim) init.kaiming_uniform_(self.hidden_2.weight) init.constant_(self.hidden_2.bias, 0) # self.output = torch.nn.Conv1d( # in_channels=dim, # out_channels=q_levels, # kernel_size=1 # ) self.output = torch.nn.Linear(self.dim, self.q_levels) nn.lecun_uniform(self.output.weight) init.constant_(self.output.bias, 0) if weight_norm: self.output = torch.nn.utils.weight_norm(self.output)