def __init__(self, seed, lr, seq_len, num_classes, n_hidden, emb_arr=None, cfg=None): self.seq_len = seq_len self.num_classes = num_classes self.n_hidden = n_hidden _, self.indd, vocab = get_word_emb_arr(VOCAB_DIR) self.vocab_size = len(vocab) print('vocab_size:', self.vocab_size) self.model_name = os.path.abspath(__file__) if emb_arr: self.emb_arr = emb_arr if cfg.fedprox: super(ClientModel, self).__init__(seed, lr, optimizer=PerturbedGradientDescent( lr, cfg.fedprox_mu)) else: super(ClientModel, self).__init__(seed, lr)
def __init__(self, seed, lr, seq_len, n_hidden, num_layers, keep_prob=1.0, max_grad_norm=5, init_scale=0.1, cfg=None): self.seq_len = seq_len self.n_hidden = n_hidden self.num_layers = num_layers self.keep_prob = keep_prob self.max_grad_norm = max_grad_norm self.model_name = os.path.abspath(__file__) # initialize vocabulary self.vocab, self.vocab_size, self.unk_symbol, self.pad_symbol = self.load_vocab( ) print('vocab_size: {}'.format(self.vocab_size)) self.initializer = tf.random_uniform_initializer( -init_scale, init_scale) if cfg.fedprox: super(ClientModel, self).__init__(seed, lr, optimizer=PerturbedGradientDescent( lr, cfg.fedprox_mu)) else: super(ClientModel, self).__init__(seed, lr)
def __init__(self, seed, lr, num_classes, cfg=None): self.num_classes = num_classes self.model_name = os.path.abspath(__file__) if cfg.fedprox: super(ClientModel, self).__init__(seed, lr, optimizer=PerturbedGradientDescent( lr, cfg.fedprox_mu)) else: super(ClientModel, self).__init__(seed, lr)