Пример #1
0
    def __init__(self, save, sess):
        self.sess = sess

        # load the saved config
        with open(os.path.join(save, 'config.json')) as f:
            config = read_config(json.load(f), chars_vocab=True)
        self.model = Model(config, True)

        # restore the saved model
        tf.global_variables_initializer().run()
        saver = tf.train.Saver(tf.global_variables())
        ckpt = tf.train.get_checkpoint_state(save)
        saver.restore(self.sess, ckpt.model_checkpoint_path)
Пример #2
0
def plot(clargs):
    sess = tf.InteractiveSession()
    predictor = BayesianPredictor(clargs.save, sess)

    with open(os.path.join(clargs.save, 'config.json')) as f:
        config = read_config(json.load(f), chars_vocab=True)

    # Plot for indicidual evidences
    for ev in config.evidence:
        print(ev.name)
        with open(clargs.input_file[0], 'rb') as f:
            deriveAndScatter(f, predictor, [ev])

    # Plot with all Evidences
    with open(clargs.input_file[0], 'rb') as f:
        deriveAndScatter(f, predictor, [ev for ev in config.evidence])

    with open(clargs.input_file[0], 'rb') as f:
        useAttributeAndScatter(f, 'b2')
Пример #3
0
    def __init__(self, save, sess):

        with open(os.path.join(save, 'config.json')) as f:
            config = read_config(json.load(f), chars_vocab=True)
        assert config.model == 'lle', 'Trying to load different model implementation: ' + config.model

        config.batch_size = 5
        self.config = config
        self.sess = sess

        infer = True

        self.inputs = [ev.placeholder(config) for ev in self.config.evidence]
        self.nodes = tf.placeholder(
            tf.int32, [config.batch_size, config.decoder.max_ast_depth])
        self.edges = tf.placeholder(
            tf.bool, [config.batch_size, config.decoder.max_ast_depth])

        targets = tf.concat([
            self.nodes[:, 1:],
            tf.zeros([config.batch_size, 1], dtype=tf.int32)
        ],
                            axis=1)  # shifted left by one

        ev_data = self.inputs
        nodes = tf.transpose(self.nodes)
        edges = tf.transpose(self.edges)

        ###########################3
        with tf.variable_scope('Embedding'):
            emb = tf.get_variable(
                'emb', [config.decoder.vocab_size, config.decoder.units])

        with tf.variable_scope("Encoder"):

            self.encoder = BayesianEncoder(config, ev_data, infer)
            samples_1 = tf.random_normal(
                [config.batch_size, config.latent_size],
                mean=0.,
                stddev=1.,
                dtype=tf.float32)

            self.psi_encoder = self.encoder.psi_mean + tf.sqrt(
                self.encoder.psi_covariance) * samples_1

        # setup the reverse encoder.
        with tf.variable_scope("Reverse_Encoder"):
            embAPI = tf.get_variable('embAPI', [
                config.reverse_encoder.vocab_size, config.reverse_encoder.units
            ])
            embRT = tf.get_variable(
                'embRT',
                [config.evidence[4].vocab_size, config.reverse_encoder.units])
            embFS = tf.get_variable(
                'embFS',
                [config.evidence[5].vocab_size, config.reverse_encoder.units])
            self.reverse_encoder = BayesianReverseEncoder(
                config, embAPI, nodes, edges, ev_data[4], embRT, ev_data[5],
                embFS)
            samples_2 = tf.random_normal(
                [config.batch_size, config.latent_size],
                mean=0.,
                stddev=1.,
                dtype=tf.float32)

            self.psi_reverse_encoder = self.reverse_encoder.psi_mean + tf.sqrt(
                self.reverse_encoder.psi_covariance) * samples_2

        # setup the decoder with psi as the initial state
        with tf.variable_scope("Decoder"):
            lift_w = tf.get_variable(
                'lift_w', [config.latent_size, config.decoder.units])
            lift_b = tf.get_variable('lift_b', [config.decoder.units])
            initial_state = tf.nn.xw_plus_b(self.psi_reverse_encoder,
                                            lift_w,
                                            lift_b,
                                            name="Initial_State")
            self.decoder = BayesianDecoder(config, emb, initial_state, nodes,
                                           edges)

        with tf.variable_scope("RE_Decoder"):
            ## RE

            emb_RE = config.evidence[
                4].emb * 0.0  #tf.get_variable('emb_RE', [config.evidence[4].vocab_size, config.evidence[4].units])

            lift_w_RE = tf.get_variable(
                'lift_w_RE', [config.latent_size, config.evidence[4].units])
            lift_b_RE = tf.get_variable('lift_b_RE',
                                        [config.evidence[4].units])

            initial_state_RE = tf.nn.xw_plus_b(self.psi_reverse_encoder,
                                               lift_w_RE,
                                               lift_b_RE,
                                               name="Initial_State_RE")

            input_RE = tf.transpose(
                tf.reverse_v2(tf.zeros_like(ev_data[4]), axis=[1]))
            output = SimpleDecoder(config, emb_RE, initial_state_RE, input_RE,
                                   config.evidence[4])

            projection_w_RE = tf.get_variable(
                'projection_w_RE',
                [config.evidence[4].units, config.evidence[4].vocab_size])
            projection_b_RE = tf.get_variable('projection_b_RE',
                                              [config.evidence[4].vocab_size])
            logits_RE = tf.nn.xw_plus_b(output.outputs[-1], projection_w_RE,
                                        projection_b_RE)

            labels_RE = tf.one_hot(tf.squeeze(ev_data[4]),
                                   config.evidence[4].vocab_size,
                                   dtype=tf.int32)
            loss_RE = tf.nn.softmax_cross_entropy_with_logits_v2(
                labels=labels_RE, logits=logits_RE)

            cond = tf.not_equal(tf.reduce_sum(self.encoder.psi_mean, axis=1),
                                0)
            # cond = tf.reshape( tf.tile(tf.expand_dims(cond, axis=1) , [1,config.evidence[5].max_depth]) , [-1] )
            self.loss_RE = tf.reduce_mean(
                tf.where(cond, loss_RE, tf.zeros(cond.shape)))

        with tf.variable_scope("FS_Decoder"):
            #FS
            emb_FS = config.evidence[
                5].emb  #tf.get_variable('emb_FS', [config.evidence[5].vocab_size, config.evidence[5].units])
            lift_w_FS = tf.get_variable(
                'lift_w_FS', [config.latent_size, config.evidence[5].units])
            lift_b_FS = tf.get_variable('lift_b_FS',
                                        [config.evidence[5].units])

            initial_state_FS = tf.nn.xw_plus_b(self.psi_reverse_encoder,
                                               lift_w_FS,
                                               lift_b_FS,
                                               name="Initial_State_FS")

            input_FS = tf.transpose(tf.reverse_v2(ev_data[5], axis=[1]))
            self.decoder_FS = SimpleDecoder(config, emb_FS, initial_state_FS,
                                            input_FS, config.evidence[5])

            output = tf.reshape(tf.concat(self.decoder_FS.outputs, 1),
                                [-1, self.decoder_FS.cell1.output_size])
            logits_FS = tf.matmul(output, self.decoder_FS.projection_w_FS
                                  ) + self.decoder_FS.projection_b_FS

            # logits_FS = output
            targets_FS = tf.reverse_v2(tf.concat(
                [tf.zeros_like(ev_data[5][:, -1:]), ev_data[5][:, :-1]],
                axis=1),
                                       axis=[1])

            # self.gen_loss_FS = tf.contrib.seq2seq.sequence_loss(logits_FS, target_FS,
            #                                       tf.ones_like(target_FS, dtype=tf.float32))
            cond = tf.not_equal(tf.reduce_sum(self.encoder.psi_mean, axis=1),
                                0)
            cond = tf.reshape(
                tf.tile(tf.expand_dims(cond, axis=1),
                        [1, config.evidence[5].max_depth]), [-1])
            cond = tf.where(cond, tf.ones(cond.shape), tf.zeros(cond.shape))

            self.gen_loss_FS = seq2seq.sequence_loss(
                [logits_FS], [tf.reshape(targets_FS, [-1])], [cond])

        # get the decoder outputs
        with tf.name_scope("Loss"):
            output = tf.reshape(tf.concat(self.decoder.outputs, 1),
                                [-1, self.decoder.cell1.output_size])
            logits = tf.matmul(
                output, self.decoder.projection_w) + self.decoder.projection_b
            ln_probs = tf.nn.log_softmax(logits)

            # 1. generation loss: log P(Y | Z)
            cond = tf.not_equal(tf.reduce_sum(self.encoder.psi_mean, axis=1),
                                0)
            cond = tf.reshape(
                tf.tile(tf.expand_dims(cond, axis=1),
                        [1, config.decoder.max_ast_depth]), [-1])
            cond = tf.where(cond, tf.ones(cond.shape), tf.zeros(cond.shape))

            self.gen_loss = seq2seq.sequence_loss([logits],
                                                  [tf.reshape(targets, [-1])],
                                                  [cond])

            #KL_cond = tf.not_equal(tf.reduce_sum(self.encoder.psi_mean, axis=1) , 0)

            self.loss = self.gen_loss + 1 / 32 * self.loss_RE + 8 / 32 * self.gen_loss_FS

        probY = -1 * self.loss + self.get_multinormal_lnprob(
            self.psi_reverse_encoder) - self.get_multinormal_lnprob(
                self.psi_reverse_encoder, self.reverse_encoder.psi_mean,
                self.reverse_encoder.psi_covariance)
        EncA, EncB = self.calculate_ab(self.encoder.psi_mean,
                                       self.encoder.psi_covariance)
        RevEncA, RevEncB = self.calculate_ab(
            self.reverse_encoder.psi_mean, self.reverse_encoder.psi_covariance)

        ###############################

        countValid = tf.cast(
            tf.count_nonzero(tf.not_equal(tf.reduce_sum(self.nodes, axis=1),
                                          0)), tf.float32)
        cond = tf.not_equal(tf.reduce_sum(self.nodes, axis=1), 0)
        self.RevEncA = tf.reduce_sum(tf.where(cond, RevEncA,
                                              tf.zeros_like(RevEncA)),
                                     axis=0,
                                     keepdims=True) / countValid
        self.RevEncB = tf.reduce_sum(tf.where(cond, RevEncB,
                                              tf.zeros_like(RevEncB)),
                                     axis=0,
                                     keepdims=True) / countValid

        self.EncA = tf.reduce_mean(EncA, axis=0, keepdims=True)
        self.EncB = tf.reduce_mean(EncB, axis=0, keepdims=True)

        self.probY = tf.reduce_mean(probY, axis=0, keepdims=True)

        # restore the saved model
        tf.global_variables_initializer().run()
        all_vars = tf.global_variables()
        saver = tf.train.Saver(all_vars)

        ckpt = tf.train.get_checkpoint_state(save)
        saver.restore(self.sess, ckpt.model_checkpoint_path)

        return
Пример #4
0
def train(clargs):
    config_file = clargs.config if clargs.continue_from is None \
                                else os.path.join(clargs.continue_from, 'config.json')
    with open(config_file) as f:
        config = read_config(json.load(f), chars_vocab=clargs.continue_from)
    reader = Reader(clargs, config)

    jsconfig = dump_config(config)
    print(clargs)
    print(json.dumps(jsconfig, indent=2))
    with open(os.path.join(clargs.save, 'config.json'), 'w') as f:
        json.dump(jsconfig, fp=f, indent=2)

    model = Model(config)

    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)
        tf.train.write_graph(sess.graph_def, clargs.save, 'model.pbtxt')
        tf.train.write_graph(sess.graph_def,
                             clargs.save,
                             'model.pb',
                             as_text=False)

        # restore model
        if clargs.continue_from is not None:
            ckpt = tf.train.get_checkpoint_state(clargs.continue_from)
            saver.restore(sess, ckpt.model_checkpoint_path)

        # training
        for i in range(config.num_epochs):
            reader.reset_batches()
            avg_loss = avg_evidence = avg_latent = avg_generation = 0
            for b in range(config.num_batches):
                start = time.time()

                # setup the feed dict
                ev_data, n, e, y = reader.next_batch()
                feed = {model.targets: y}
                for j, ev in enumerate(config.evidence):
                    feed[model.encoder.inputs[j].name] = ev_data[j]
                for j in range(config.decoder.max_ast_depth):
                    feed[model.decoder.nodes[j].name] = n[j]
                    feed[model.decoder.edges[j].name] = e[j]

                # run the optimizer
                loss, evidence, latent, generation, mean, covariance, _ \
                    = sess.run([model.loss,
                                model.evidence_loss,
                                model.latent_loss,
                                model.gen_loss,
                                model.encoder.psi_mean,
                                model.encoder.psi_covariance,
                                model.train_op], feed)
                end = time.time()
                avg_loss += np.mean(loss)
                avg_evidence += np.mean(evidence)
                avg_latent += np.mean(latent)
                avg_generation += generation
                step = i * config.num_batches + b
                if step % config.print_step == 0:
                    print(
                        '{}/{} (epoch {}), evidence: {:.3f}, latent: {:.3f}, generation: {:.3f}, '
                        'loss: {:.3f}, mean: {:.3f}, covariance: {:.3f}, time: {:.3f}'
                        .format(step,
                                config.num_epochs * config.num_batches, i,
                                np.mean(evidence), np.mean(latent), generation,
                                np.mean(loss), np.mean(mean),
                                np.mean(covariance), end - start))
            checkpoint_dir = os.path.join(clargs.save,
                                          'model{}.ckpt'.format(i))
            saver.save(sess, checkpoint_dir)
            print(
                'Model checkpointed: {}. Average for epoch evidence: {:.3f}, latent: {:.3f}, '
                'generation: {:.3f}, loss: {:.3f}'.format(
                    checkpoint_dir, avg_evidence / config.num_batches,
                    avg_latent / config.num_batches,
                    avg_generation / config.num_batches,
                    avg_loss / config.num_batches))
Пример #5
0
def index(clargs):
    #set clargs.continue_from = True while testing, it continues from old saved config
    clargs.continue_from = None

    model = bayou.models.low_level_evidences.infer.BayesianPredictor

    # load the saved config
    with open(os.path.join(clargs.save, 'config.json')) as f:
        config = read_config(json.load(f), chars_vocab=True)
    config.batch_size = 500

    reader = Reader(clargs, config, infer=True)

    # Placeholders for tf data
    nodes_placeholder = tf.placeholder(reader.nodes.dtype, reader.nodes.shape)
    edges_placeholder = tf.placeholder(reader.edges.dtype, reader.edges.shape)
    targets_placeholder = tf.placeholder(reader.targets.dtype,
                                         reader.targets.shape)
    evidence_placeholder = [
        tf.placeholder(input.dtype, input.shape) for input in reader.inputs
    ]

    # reset batches
    feed_dict = {fp: f for fp, f in zip(evidence_placeholder, reader.inputs)}
    feed_dict.update({nodes_placeholder: reader.nodes})
    feed_dict.update({edges_placeholder: reader.edges})
    feed_dict.update({targets_placeholder: reader.targets})

    dataset = tf.data.Dataset.from_tensor_slices(
        (nodes_placeholder, edges_placeholder, targets_placeholder,
         *evidence_placeholder))
    batched_dataset = dataset.batch(config.batch_size)
    iterator = batched_dataset.make_initializable_iterator()
    jsp = reader.js_programs

    with tf.Session() as sess:
        predictor = model(clargs.save, sess, config,
                          iterator)  # goes to infer.BayesianPredictor
        sess.run(iterator.initializer, feed_dict=feed_dict)
        infer_vars = {}

        allEvSigmas = predictor.get_ev_sigma()
        print(allEvSigmas)

        programs = []
        k = 69
        for j in range(config.num_batches):
            prob_Y, a1, b1, a2, b2 = predictor.get_all_params_inago()
            for i in range(config.batch_size):
                infer_vars = jsp[i]
                prog_json = deepcopy(jsp[j * config.batch_size + i])
                prog_json['a2'] = "%.3f" % a2[i].item()
                prog_json['b2'] = ["%.3f" % val.item() for val in b2[i]]
                prog_json['ProbY'] = "%.3f" % prob_Y[i].item()
                programs.append(prog_json)

            if (j + 1) % 200 == 0 or (j + 1) == config.num_batches:
                fileName = "Program_output_" + str(k) + ".json"
                k += 1
                print('\nWriting to {}...'.format(fileName), end='')
                with open(fileName, 'w') as f:
                    json.dump({'programs': programs}, fp=f, indent=2)

                for item in programs:
                    del item
                del programs
                gc.collect()
                programs = []

    print('Batch Processing Completed')

    return infer_vars, config
Пример #6
0
def reload(clargs):

    config_file = 'savedFocusModel/config.json'
    clargs.continue_from = True

    with open(config_file) as f:
        config = read_config(json.load(f), chars_vocab=True)

    reader = Reader(clargs, config, dataIsThere=True)

    # Placeholders for tf data
    prog_ids_placeholder = tf.placeholder(reader.prog_ids.dtype,
                                          reader.prog_ids.shape)
    js_prog_ids_placeholder = tf.placeholder(reader.js_prog_ids.dtype,
                                             reader.js_prog_ids.shape)
    nodes_placeholder = tf.placeholder(reader.nodes.dtype, reader.nodes.shape)
    edges_placeholder = tf.placeholder(reader.edges.dtype, reader.edges.shape)
    targets_placeholder = tf.placeholder(reader.targets.dtype,
                                         reader.targets.shape)
    evidence_placeholder = [
        tf.placeholder(input.dtype, input.shape) for input in reader.inputs
    ]
    # reset batches

    feed_dict = {fp: f for fp, f in zip(evidence_placeholder, reader.inputs)}
    feed_dict.update({prog_ids_placeholder: reader.prog_ids})
    feed_dict.update({js_prog_ids_placeholder: reader.js_prog_ids})
    feed_dict.update({nodes_placeholder: reader.nodes})
    feed_dict.update({edges_placeholder: reader.edges})
    feed_dict.update({targets_placeholder: reader.targets})

    dataset = tf.data.Dataset.from_tensor_slices(
        (prog_ids_placeholder, js_prog_ids_placeholder, nodes_placeholder,
         edges_placeholder, targets_placeholder, *evidence_placeholder))
    batched_dataset = dataset.batch(config.batch_size)
    iterator = batched_dataset.make_initializable_iterator()

    model = Model(config, iterator, bayou_mode=False)
    i = 0
    with tf.Session(config=tf.ConfigProto(log_device_placement=False,
                                          allow_soft_placement=True)) as sess:
        writer = tf.summary.FileWriter(clargs.save)
        writer.add_graph(sess.graph)
        tf.global_variables_initializer().run()

        tf.train.write_graph(sess.graph_def, clargs.save, 'model.pbtxt')
        tf.train.write_graph(sess.graph_def,
                             clargs.save,
                             'model.pb',
                             as_text=False)
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)

        # restore model
        if clargs.continue_from is not None:
            bayou_vars = get_var_list()['bayou_vars']
            old_saver = tf.train.Saver(bayou_vars, max_to_keep=None)
            ckpt = tf.train.get_checkpoint_state('savedFocusModelReDone')
            old_saver.restore(sess, ckpt.model_checkpoint_path)

            reverse_encoder_vars = get_var_list()['rev_encoder_vars']
            old_saver = tf.train.Saver(reverse_encoder_vars, max_to_keep=None)
            ckpt = tf.train.get_checkpoint_state('savedFocusModel')
            old_saver.restore(sess, ckpt.model_checkpoint_path)

        checkpoint_dir = os.path.join(clargs.save,
                                      'model{}.ckpt'.format(i + 1))
        saver.save(sess, checkpoint_dir)
Пример #7
0
def forward_pass(clargs):
    #set clargs.continue_from = True while testing, it continues from old saved config
    clargs.continue_from = True

    with open(os.path.join(clargs.save, 'config.json')) as f:
        model_type = json.load(f)['model']

    if model_type == 'lle':
        model = bayou.models.low_level_evidences.infer.BayesianPredictor
    else:
        raise ValueError('Invalid model type in config: ' + model_type)

    # load the saved config
    with open(os.path.join(clargs.save, 'config.json')) as f:
        config = read_config(json.load(f), chars_vocab=True)

    reader = Reader(clargs, config, infer=True)

    # Placeholders for tf data
    nodes_placeholder = tf.placeholder(reader.nodes.dtype, reader.nodes.shape)
    edges_placeholder = tf.placeholder(reader.edges.dtype, reader.edges.shape)
    targets_placeholder = tf.placeholder(reader.targets.dtype,
                                         reader.targets.shape)
    evidence_placeholder = [
        tf.placeholder(input.dtype, input.shape) for input in reader.inputs
    ]

    # reset batches

    feed_dict = {fp: f for fp, f in zip(evidence_placeholder, reader.inputs)}
    feed_dict.update({nodes_placeholder: reader.nodes})
    feed_dict.update({edges_placeholder: reader.edges})
    feed_dict.update({targets_placeholder: reader.targets})

    dataset = tf.data.Dataset.from_tensor_slices(
        (nodes_placeholder, edges_placeholder, targets_placeholder,
         *evidence_placeholder))
    batched_dataset = dataset.batch(config.batch_size)
    iterator = batched_dataset.make_initializable_iterator()
    jsp = reader.js_programs
    with tf.Session() as sess:
        predictor = model(clargs.save, sess, config,
                          iterator)  # goes to infer.BayesianPredictor
        # testing
        sess.run(iterator.initializer, feed_dict=feed_dict)
        infer_vars = {}

        allEvSigmas = predictor.get_ev_sigma()
        print(allEvSigmas)

        for j in range(config.num_batches):
            prob_Y, a1, b1, a2, b2 = predictor.get_all_params_inago()
            for i in range(config.batch_size):
                prog_id = j * config.batch_size + i
                infer_vars[prog_id] = {}
                infer_vars[prog_id]['a1'] = a1[i].round(decimals=2)
                infer_vars[prog_id]['a2'] = a2[i].round(decimals=2)
                infer_vars[prog_id]['b1'] = b1[i].round(decimals=2)
                infer_vars[prog_id]['b2'] = b2[i].round(decimals=2)
                infer_vars[prog_id]['ProbY'] = prob_Y[i].round(decimals=2)
                infer_vars[prog_id]['count_prog_ids'] = 1
                infer_vars[prog_id]['JS'] = jsp[prog_id]

            if (j + 1) % 1000 == 0:
                print('Completed Processing {}/{} batches'.format(
                    j + 1, config.num_batches))

    return infer_vars, config