def __init__(self, features, latent_dim, encoder, decoder, deterministic_path_drop_rate=0.5): super().__init__() self.embedder = nn.Embedding(MAX_VALUE, features) self.encoder = ResidualAttentionEncoder(**encoder) self._latent_encoder = nn.ModuleList([ ResidualAttentionEncoder(**encoder), NormalNode(features, latent_dim)] ) self.z_to_c = nn.Linear(latent_dim, latent_dim*155) self.decoder = CondtionalResidualAttentionEncoder(**decoder) self.logits = FeedForwardGELU(features, MAX_VALUE) self.drop = nn.Dropout(deterministic_path_drop_rate)
def __init__(self, features, c_features, attention_layer, max_len=200, n_layers=3): super().__init__() self.layers = nn.ModuleList( map(lambda x: AttentionLayer(**attention_layer), range(n_layers))) positional_encoding = position_encoding_init(max_len, features) self.c_layers = nn.ModuleList( map(lambda x: FeedForwardGELU(c_features, features * 2), range(n_layers))) self.p2x = nn.Linear(features, features * 2) self.register_buffer('positional_encoding', positional_encoding)
def __init__(self, features, latent_dim, encoder, decoder, num_flows=3): """ features - number of features in the model latent_dim - the latent dimension of the model encoder - dictionary containing instantiation parameters for ResidualAttentionEncoder module decoder - dictionary containing instantiation parameters for CondtionalResidualAttentionEncoder module num_flows - the number of flows for the TriangularSylvesterFlow module """ super().__init__() self.embedder = nn.Embedding(MAX_VALUE, features) self.encoder = ResidualAttentionEncoder(**encoder) self._latent_encoder = nn.ModuleList([ ResidualAttentionEncoder(**encoder), TriangularSylvesterFlow(features, latent_dim, num_flows) ]) self.z_to_c = nn.Linear(latent_dim, latent_dim * 155) self.decoder = CondtionalResidualAttentionEncoder(**decoder) self.logits = FeedForwardGELU(features, MAX_VALUE) self.n_features = features
def __init__(self, features, latent_dim, encoder, decoder, deterministic_path_drop_rate=0.5, num_flows=3): super().__init__() self.embedder = nn.Embedding(MAX_VALUE, features) self.encoder = ResidualAttentionEncoder(**encoder) self._latent_encoder = nn.ModuleList([ ResidualAttentionEncoder(**encoder), TriangularSylvesterFlow(features, latent_dim, num_flows) ]) self.z_to_c = nn.Linear(latent_dim, latent_dim * 155) self.decoder = CondtionalResidualAttentionEncoder(**decoder) self.logits = FeedForwardGELU(features, MAX_VALUE) self.drop = nn.Dropout(deterministic_path_drop_rate) self.n_features = features
def __init__(self, features, c_features, attention_layer, max_len=200, n_layers=3): """ features - the number of features per parameter c_features - the number of side conditioning features per batch item attention_layer - a dictionary containing instantiation parameters for the AttentionLayer module max_len - the maximum needed size of the positional encodings n_layers - number of layers for the module to use """ super().__init__() self.layers = nn.ModuleList( map(lambda x: AttentionLayer(**attention_layer), range(n_layers)) ) positional_encoding = position_encoding_init(max_len, features) self.c_layers = nn.ModuleList( map(lambda x: FeedForwardGELU(c_features, features*2), range(n_layers)) ) self.p2x = nn.Linear(features, features * 2) self.register_buffer('positional_encoding', positional_encoding)