def main():
    print ("Enter input query (of the form '[<E,t> <J,t>][M, A]' ): ")
    #input_query = input()
    #input_query = '[<A,f>][B, J]'
    #input_query = '[<J,t> <E,f>][B, M]'
    input_query = '[<M,t> <J,f>][B, E]'
    print ('Inference by Enumeration: ')
    net = AlarmNetwork()
    random = psuedorandom()
    exactInfer = Inference(net, 0, 0, random )
    print ( exactInfer.infer(input_query)[0])
    print ("Enter sample count: ")
    #s = input()
    s = 10000
    NoOfSamples = int(s)
    print ('Inference by prior sampling: ')
    priorSamplingInfer = Inference(net, 1, NoOfSamples, random)
    print (priorSamplingInfer.infer(input_query)[0])
    print ('Inference by Rejection sampling: ')
    rejectionSamplingInfer = Inference(net, 2, NoOfSamples, random)
    print (rejectionSamplingInfer.infer(input_query)[0])
    print ('Inference by likelihood sampling: ')
    likelihoodInfer = Inference(net, 3, NoOfSamples, random)
    print (likelihoodInfer.infer(input_query)[0])
Пример #2
0
bool_near = (mask == 3)
bool_mid = (mask == 2)
bool_far = (mask == 1)

cumulative_near = bool_near
cumulative_near_len = cumulative_near.sum()

cumulative_mid = np.logical_or(bool_near, bool_mid)
cumulative_mid_len = cumulative_mid.sum()

cumulative_far = np.logical_or(cumulative_mid, bool_far)
cumulative_far_len = cumulative_far.sum()

while True:
    frame = cap.read()
    seg = model.infer(frame)
    seg = np.expand_dims(seg, axis=2)

    near = float(np.logical_and(
        (seg == 0), cumulative_near).sum()) / cumulative_near_len
    mid = float(np.logical_and(
        (seg == 0), cumulative_mid).sum()) / cumulative_mid_len
    far = float(np.logical_and(
        (seg == 0), cumulative_far).sum()) / cumulative_far_len

    obsPub.publish(Float32MultiArray(data=[near, mid, far]))

    alpha = 0.4

    overlay = frame.copy()
    overlay[np.where((seg > 0).all(axis=2))] = green
Пример #3
0
def main(unused_argv):
    global config

    if len(unused_argv) != 1:  # prints a message if you've entered flags incorrectly
        raise Exception("Problem with flags: %s" % unused_argv)

    print('Starting %s in %s mode...' % (FLAGS.exp_name, FLAGS.mode))

    FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)
    if not os.path.exists(FLAGS.log_root):
        if FLAGS.mode == "train":
            os.makedirs(FLAGS.log_root)
        else:
            raise Exception("Logdir %s doesn't exist. Run in train mode to create it." % (FLAGS.log_root))

    hparam_list = ['mode', 'lr', 'rand_unif_init_mag', 'trunc_norm_init_std', 'max_grad_norm','hidden_dim', 'emb_dim', 'batch_size', 'max_dec_steps','max_bac_enc_steps', 'max_con_enc_steps', 'max_span_len', 'multi_hop_span_pre_mode', 'matching_layer', 'matching_gate']
    hps_dict = {}
    for key, val in FLAGS.__flags.items():
        if key in hparam_list:
            hps_dict[key] = val

    hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)
    vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size)
    tf.set_random_seed(111)

    if hps.mode == 'train':
        batcher = Batcher(FLAGS.train_path, vocab, hps, single_pass=False)
        print("creating model...")
        model = Model(hps, vocab)
        train(model, batcher)

    elif hps.mode == 'val':
        train_dir = os.path.join(FLAGS.log_root, "train")
        hps = hps._replace(batch_size=1)
        infer_model_hps = hps._replace(max_dec_steps=1)

        try:
            r = open(os.path.join(train_dir, "finished_val_models.json"), 'r', encoding='utf-8')
            finished_val_models = json.load(r)
            r.close()
        except FileNotFoundError:
            finished_val_models = {"finished_val_models": []}
            w = open(os.path.join(train_dir, "finished_val_models.json"), 'w', encoding='utf-8')
            json.dump(finished_val_models, w)
            w.close()

        while True:
            ckpt = tf.train.get_checkpoint_state(train_dir)
            if ckpt and ckpt.model_checkpoint_path:
                for ckpt_path in list(ckpt.all_model_checkpoint_paths):
                    if ckpt_path in finished_val_models["finished_val_models"]:
                        print("Val_mode: %s already has been evaluated, skip it" % ckpt_path)
                        pass
                    else:
                        print("Val_mode: start new eval %s" % ckpt_path)
                        batcher = Batcher(FLAGS.dev_path, vocab, hps, single_pass=True)
                        model = Model(infer_model_hps, vocab)
                        val_infer = Inference(model, batcher, vocab, ckpt_path)
                        val_infer.infer()
                        tf.reset_default_graph()
                        finished_val_models["finished_val_models"].append(ckpt_path)
                        w = open(os.path.join(train_dir, "finished_val_models.json"), 'w', encoding='utf-8')
                        json.dump(finished_val_models, w)
                        w.close()
                        print("Val_mode: finished one eval %s" % ckpt_path)
                print("Val_mode: current iterations of all_model_checkpoint_paths are completed...")
                print("Val_mode: finished %d modes" % len(finished_val_models["finished_val_models"]))
                if len(finished_val_models["finished_val_models"]) == FLAGS.epoch_num:
                    print("All val is ended")
                    break
            else:
                print('Val_mode: wait train finish the first epoch...')
            time.sleep(60)

    elif hps.mode == 'test':
        hps = hps._replace(batch_size=1)
        batcher = Batcher(FLAGS.test_path, vocab, hps, single_pass=True)
        infer_model_hps = hps._replace(max_dec_steps=1)
        model = Model(infer_model_hps, vocab)
        if FLAGS.test_model_dir is None:
            raise Exception("should appoint the test_model_dir")
        test_infer = Inference(model, batcher, vocab, FLAGS.test_model_dir)
        test_infer.infer()

    else:
        raise ValueError("The 'mode' flag must be one of train/val/test")