def __init__(self, args): super(RfillAutoreg, self).__init__() glorot_uniform(self) self.DECISION_MASK = torch.tensor(DECISION_MASK).to(args.device) self.STATE_TRANS = torch.LongTensor(STATE_TRANS).to(args.device) self.cell_type = args.cell_type self.vocab = deepcopy(RFILL_VOCAB) self.tok_start = self.vocab['|'] self.tok_stop = self.vocab['eos'] self.tok_pad = self.vocab['pad'] assert self.tok_pad == 0 self.inv_map = {} for key in self.vocab: self.inv_map[self.vocab[key]] = key self.rnn_state_proj = args.rnn_state_proj self.rnn_layers = args.rnn_layers if self.rnn_state_proj: self.ctx2h = MLP(args.embed_dim, [args.embed_dim * self.rnn_layers], nonlinearity=args.act_func, act_last=args.act_func) if self.cell_type == 'lstm': self.ctx2c = MLP(args.embed_dim, [args.embed_dim * self.rnn_layers], nonlinearity=args.act_func, act_last=args.act_func) if args.tok_type == 'embed': self.tok_embed = nn.Embedding(len(self.vocab), args.embed_dim) input_size = args.embed_dim elif args.tok_type == 'onehot': input_size = len(self.vocab) self.tok_embed = partial(self._get_onehot, vsize=input_size) if self.cell_type == 'lstm': self.rnn = nn.LSTM(input_size, args.embed_dim, self.rnn_layers, bidirectional=False) elif self.cell_type == 'gru': self.rnn = nn.GRU(input_size, args.embed_dim, self.rnn_layers, bidirectional=False) else: raise NotImplementedError self.out_pred = nn.Linear(args.embed_dim, len(self.vocab))
def __init__(self, args): super(RFillOneStepEditor, self).__init__() self.rnn_layers = args.rnn_layers self.rnn_state_proj = args.rnn_state_proj if self.rnn_state_proj: self.ctx2h = MLP(args.embed_dim, [args.embed_dim * self.rnn_layers], act_last='tanh') self.ctx2c = MLP(args.embed_dim, [args.embed_dim * self.rnn_layers], act_last='tanh') self.editor_loc = EditLocationPredictor(args) self.subexpr_sampler = RfillSubexprRnnSampler(args) self.update_cell = nn.LSTM(args.embed_dim * 2, args.embed_dim, self.rnn_layers)
def __init__(self, base_sampler, discrete_dim, n_choices, embed_dim): super(VarlenMultinomialSampler, self).__init__() self.discrete_dim = discrete_dim self.n_choices = n_choices self.base_sampler = base_sampler ctx_dim = self.get_context_dim() self.pos_pred = MLP(ctx_dim, [embed_dim * 2, embed_dim * 2, discrete_dim]) self.val_pred = MLP(ctx_dim + embed_dim, [embed_dim * 2] * 2 + [n_choices]) self.stop_pred = MLP(ctx_dim, [embed_dim * 2, embed_dim * 2, 1]) self.mod_pos_embed = nn.Embedding(discrete_dim, embed_dim)
def __init__(self, base_sampler, discrete_dim, embed_dim, learn_stop, mu0, device): super(MLPVarLenSampler, self).__init__(base_sampler, discrete_dim, embed_dim) self.device = device self.learn_stop = learn_stop self.mu0 = mu0 if self.learn_stop: self.stop_pred = MLP(discrete_dim, [embed_dim * 2, embed_dim * 2, 1])
def __init__(self, base_sampler, discrete_dim, n_choices, embed_dim): super(MLPVarLenMultinomialSampler, self).__init__(base_sampler, discrete_dim, n_choices, embed_dim) self.input_tok_embed = nn.Embedding(n_choices, 4) self.pos_encode = PosEncoding(4) self.input_encode = MLP(self.discrete_dim * 4, [embed_dim * 2] + [embed_dim])
def __init__(self, n_choices, discrete_dim, embed_dim, act_last, f_scale): super(CondMLPScore, self).__init__() self.discrete_dim = discrete_dim tok_dim = 8 self.input_tok_embed = nn.Embedding(n_choices, tok_dim) self.pos_encode = PosEncoding(tok_dim) self.f_scale = f_scale self.mlp = MLP(self.discrete_dim * tok_dim, [embed_dim * 2] * 3 + [1], act_last=act_last)
def __init__(self, args): super(BidirIOEmbed, self).__init__(args) self.vocab = {'unk': 0, 'eos': 1} for i, c in enumerate(STR_VOCAB): self.vocab[c] = i + 2 self.tok_embed = nn.Embedding(len(self.vocab), args.embed_dim) self.lstm = nn.LSTM(args.embed_dim, args.embed_dim, 3, bidirectional=False) self.embed_merge = MLP(args.embed_dim * 2, [args.embed_dim], nonlinearity=args.act_func) self.device = args.device
def __init__(self, n_choices, discrete_dim, embed_dim): super(MLPSampler, self).__init__(n_choices, discrete_dim, embed_dim) self.init_h = nn.Parameter(torch.Tensor(1, embed_dim)) glorot_uniform(self) list_mods = [] for i in range(1, self.discrete_dim): mlp = MLP(i, [embed_dim * 2, embed_dim * 2, embed_dim]) list_mods.append(mlp) self.list_mods = nn.ModuleList(list_mods)
def __init__(self, args, n_choices, act_last, f_scale): super(CondRnnScore, self).__init__() self.pos_encode = PosEncoding(args.embed_dim) self.input_tok_embed = nn.Embedding(n_choices, args.embed_dim) self.lstm = nn.LSTM(args.embed_dim, args.embed_dim, args.rnn_layers, bidirectional=True, batch_first=True) self.f_scale = f_scale self.mlp = MLP(2 * args.embed_dim, [args.embed_dim * 2] * 2 + [1], act_last=act_last)
def __init__(self, args): super(EditLocationPredictor, self).__init__() self.vocab = deepcopy(RFILL_VOCAB) self.tok_start = self.vocab['|'] self.tok_constexpr = self.vocab['ConstStr'] self.tok_subexpr = self.vocab['SubStr'] self.tok_stop = self.vocab['eos'] self.tok_pad = self.vocab['pad'] self.tok_embed = nn.Embedding(len(self.vocab), args.embed_dim) self.rnn_layers = args.rnn_layers self.lstm = nn.LSTM(args.embed_dim, args.embed_dim, num_layers=self.rnn_layers, batch_first=False, bidirectional=True) self.ctx2h = MLP(args.embed_dim, [args.embed_dim * 2 * self.rnn_layers], act_last='tanh') self.ctx2c = MLP(args.embed_dim, [args.embed_dim * 2 * self.rnn_layers], act_last='tanh') self.del_score = MLP(args.embed_dim * 2, [args.embed_dim * 2, 1]) self.modify_score = MLP(args.embed_dim * 2, [args.embed_dim * 2, 1]) self.insert_score = MLP(args.embed_dim * 2, [args.embed_dim * 2, 1]) self.stop_score = MLP(args.embed_dim * 2, [args.embed_dim * 2, 1])
def __init__(self, input_dim, hidden_dims, scale=1.0, nonlinearity='elu', act_last=None, bn=False, dropout=-1, bound=-1): super(MLPScore, self).__init__() self.scale = scale self.bound = bound self.mlp = MLP(input_dim, hidden_dims, nonlinearity, act_last, bn, dropout)
def __init__(self, args): super(MLPIOEmbed, self).__init__(args) self.max_output_len = args.maxOutputLength self.vocab = {'unk': 0} for i, c in enumerate(STR_VOCAB): self.vocab[c] = i + 1 if args.io_embed_type == 'normal': self.tok_embed = nn.Embedding(len(self.vocab), 4) else: self.tok_embed = MaskedEmbedding(len(self.vocab), 4, masked_token=self.vocab['unk']) self.embed_merge = MLP(4 * 3 * self.max_output_len, [args.embed_dim] * 5, nonlinearity=args.act_func, act_last=args.act_func) self.device = args.device
def __init__(self, base_sampler, discrete_dim, embed_dim): super(MLPGibbsSampler, self).__init__() self.discrete_dim = discrete_dim self.base_sampler = base_sampler self.pos_pred = MLP(discrete_dim, [embed_dim * 2, embed_dim * 2, discrete_dim])
def __init__(self, n_choices, discrete_dim, embed_dim): super(AutoregSampler, self).__init__() self.discrete_dim = discrete_dim self.embed_dim = embed_dim self.out_pred = MLP(embed_dim, [embed_dim * 2, n_choices]) self.baseline_pred = MLP(embed_dim, [embed_dim * 2, 1])
def __init__(self, n_choices, discrete_dim, embed_dim): super(CondAutoregSampler, self).__init__() self.discrete_dim = discrete_dim self.embed_dim = embed_dim self.out_pred = MLP(embed_dim, [embed_dim * 2, n_choices]) self.pos_encode = PosEncoding(embed_dim)