def __init__(self, params): # transfer parameters to self for key, val in params.items(): setattr(self, key, val) self.agent = Agent(params) self.save_path = None self.train_environment = env(params, 'train') # loaded train data here self.dev_test_environment = env(params, 'dev') # loaded dev data here self.test_test_environment = env(params, 'test') # loaded test data here self.test_environment = self.dev_test_environment self.rev_relation_vocab = self.train_environment.grapher.rev_relation_vocab self.rev_entity_vocab = self.train_environment.grapher.rev_entity_vocab self.max_hits_at_10 = 0 self.disc_size = 5 self.ePAD = self.entity_vocab['PAD'] self.rPAD = self.relation_vocab['PAD'] # optimize self.baseline = ReactiveBaseline(l=self.Lambda) self.optimizer = tf.train.AdamOptimizer(self.learning_rate) self.input_dir = params['data_input_dir'] self.disc_embedding_size = 2 * params['embedding_size'] self.discriminator = Discriminator(self.disc_size, self.disc_embedding_size) self.num_rollouts = params['num_rollouts'] self.num_iter = params['total_iterations']
def __init__(self, params): self.batch_size = params['batch_size'] self.num_rollouts = params['num_rollouts'] self.action_vocab_size = len(params['relation_vocab']) self.entity_vocab_size = len(params['entity_vocab']) self.embedding_size = params['embedding_size'] self.train_env = env(params,'train') self.test_env = env(params,'test') self.eval_every = params['eval_every'] self.learning_rate = params['learning_rate_judge'] self.total_iteration = params['total_iterations'] self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
def __init__(self, params): # transfer parameters to self for key, val in params.items(): setattr(self, key, val); self.agent = Agent(params) self.save_path = None self.train_environment = env(params, 'train') self.dev_test_environment = env(params, 'dev') self.test_test_environment = env(params, 'test') self.test_environment = self.dev_test_environment self.rev_relation_vocab = self.train_environment.grapher.rev_relation_vocab self.rev_entity_vocab = self.train_environment.grapher.rev_entity_vocab self.max_hits_at_10 = 0 self.ePAD = self.entity_vocab['PAD'] self.rPAD = self.relation_vocab['PAD'] # optimize self.baseline = ReactiveBaseline(l=self.Lambda) self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
def __init__(self, params): # transfer parameters to self for key, val in params.items(): setattr(self, key, val) self.agent = Agent(params) self.save_path = None self.train_environment = env(params, 'train') self.dev_test_environment = env(params, 'dev') self.test_test_environment = env(params, 'test') self.test_environment = self.dev_test_environment self.rev_relation_vocab = self.train_environment.grapher.rev_relation_vocab self.rev_entity_vocab = self.train_environment.grapher.rev_entity_vocab self.max_hits_at_10 = 0 self.ePAD = self.entity_vocab['PAD'] self.rPAD = self.relation_vocab['PAD'] self.global_step = 0 self.decaying_beta = tf.keras.optimizers.schedules.ExponentialDecay( self.beta, decay_steps=200, decay_rate=0.90, staircase=True) # optimize self.baseline = ReactiveBaseline(l=self.Lambda) # self.optimizer = tf.compat.v1.train.AdamOptimizer(self.learning_rate) self.optimizer = tf.keras.optimizers.Adam(self.learning_rate)