def __init__(self, modelfilename, conf, model_name='dcgan', gen_input='z:0', gen_output='gen_/tanh:0', gen_loss='reduced_mean:0', z_dim=100, batch_size=64): self.conf = conf print(config) print("lala") self.args = train.configure() print("aha") self.batch_size = batch_size self.z_dim = z_dim # self.gi = self.graph.get_tensor_by_name(model_name+'/'+gen_input) # self.go = self.graph.get_tensor_by_name(model_name+'/'+gen_output) # self.gl = self.graph.get_tensor_by_name(model_name+'/'+gen_loss) self.gcgan = gan.GAN(args) self.gi = gcgan.z self.go = gcgan.gen_output self.gl = gcgan.G_loss self.image_shape = self.go.shape[1:].as_list() self.lamb = config.lambda_p self.sess = tf.Session(graph=self.graph) self.z = np.random.randn(self.batch_size, self.z_dim)
for i in range(batch_size): example_id = batch_map['example_ids'][i] tokens = sentences[i].tolist() words = [idx2word[idx] for idx in tokens] if length == 2: o = dict(example_id=example_id, tree=(words[0], words[1])) elif length == 1: o = dict(example_id=example_id, tree=words[0]) print(json.dumps(o)) continue trainer.step(batch_map, train=False, compute_loss=False) trees = parse_predictor.parse_batch(batch_map) for ii, tr in enumerate(trees): example_id = batch_map['example_ids'][ii] s = [idx2word[idx] for idx in sentences[ii].tolist()] tr = replace_leaves(tr, s) o = dict(example_id=example_id, tree=tr) print(json.dumps(o)) if __name__ == '__main__': parser = argument_parser() options = parse_args(parser) configure(options) run(options)