Example #1
0
File: train.py Project: nayname/npi
def train_addition(epochs, verbose=0):
    """
    Instantiates an Addition Core, NPI, then loads and fits model to data.

    :param epochs: Number of epochs to train for.
    """
    # Load Data
    with open(DATA_PATH, 'r') as f:
        data = pickle.load(f)

    # Initialize Addition Core
    print 'Initializing Addition Core!'
    core = AdditionCore()

    # Initialize NPI Model
    print 'Initializing NPI Model!'
    npi = NPI(core, CONFIG, LOG_PATH, verbose=verbose)

    # Initialize TF Saver
    saver = tf.train.Saver()

    # Initialize TF Session
    sess = tf.Session()
    sess.run(tf.initialize_all_variables())

    # Start Training
    for ep in range(1, epochs + 1):
        for i in range(len(data)):
            # Reset NPI States
            npi.reset_state()

            # Setup Environment
            in1, in2, steps = data[i]
            scratch = ScratchPad(in1, in2)
            x, y = steps[:-1], steps[1:]
            # Run through steps, and fit!
            step_def_loss, step_arg_loss, term_acc, prog_acc, = 0.0, 0.0, 0.0, 0.0
            arg0_acc, arg1_acc, arg2_acc, num_args = 0.0, 0.0, 0.0, 0
            for j in range(len(x)):
                (prog_name, prog_in_id), arg, term = x[j]
                (_, prog_out_id), arg_out, term_out = y[j]
                # print(x[j], y[j])
                # Update Environment if MOVE or WRITE
                if prog_in_id == MOVE_PID or prog_in_id == WRITE_PID:
                    scratch.execute(prog_in_id, arg)

                # Get Environment, Argument Vectors
                env_in = [scratch.get_env()]
                arg_in, arg_out = [get_args(arg, arg_in=True)
                                   ], get_args(arg_out, arg_in=False)
                prog_in, prog_out = [[prog_in_id]], [prog_out_id]
                term_out = [1] if term_out else [0]

                # Fit!
                if prog_out_id == MOVE_PID or prog_out_id == WRITE_PID:
                    loss, t_acc, p_acc, a_acc, _ = sess.run(
                        [
                            npi.arg_loss, npi.t_metric, npi.p_metric,
                            npi.a_metrics, npi.arg_train_op
                        ],
                        feed_dict={
                            npi.env_in: env_in,
                            npi.arg_in: arg_in,
                            npi.prg_in: prog_in,
                            npi.y_prog: prog_out,
                            npi.y_term: term_out,
                            npi.y_args[0]: [arg_out[0]],
                            npi.y_args[1]: [arg_out[1]],
                            npi.y_args[2]: [arg_out[2]]
                        })
                    # print({npi.prg_in: prog_in, npi.y_prog: prog_out, npi.y_term: term_out})
                    # print({npi.y_args[0]: [arg_out[0]], npi.y_args[1]: [arg_out[1]], npi.y_args[2]: [arg_out[2]]})
                    step_arg_loss += loss
                    term_acc += t_acc
                    prog_acc += p_acc
                    arg0_acc += a_acc[0]
                    arg1_acc += a_acc[1]
                    arg2_acc += a_acc[2]
                    num_args += 1
                else:
                    loss, t_acc, p_acc, _ = sess.run(
                        [
                            npi.default_loss, npi.t_metric, npi.p_metric,
                            npi.default_train_op
                        ],
                        feed_dict={
                            npi.env_in: env_in,
                            npi.arg_in: arg_in,
                            npi.prg_in: prog_in,
                            npi.y_prog: prog_out,
                            npi.y_term: term_out
                        })
                    step_def_loss += loss
                    term_acc += t_acc
                    prog_acc += p_acc

            print "Epoch {0:02d} Step {1:03d} Default Step Loss {2:05f}, " \
                  "Argument Step Loss {3:05f}, Term: {4:03f}, Prog: {5:03f}, A0: {6:03f}, " \
                  "A1: {7:03f}, A2: {8:03}"\
                .format(ep, i, step_def_loss / len(x), step_arg_loss / len(x), term_acc / len(x),
                        prog_acc / len(x), arg0_acc / num_args, arg1_acc / num_args,
                        arg2_acc / num_args)

        # Save Model
        saver.save(sess, 'tasks/addition/log/model.ckpt')
Example #2
0
def train_vqa(epochs, verbose=0):
    """
    Instantiates a VQA Core, NPI, then loads and fits model to data.

    :param epochs: Number of epochs to train for.
    """
    # Load Data
    with open(DATA_PATH, 'rb') as f:
        dataT = pickle.load(f)
        data = dataT[:80]
        test = dataT[80:100]
    with open(DATA_PATH2, 'rb') as f:
        dataT2 = pickle.load(f)
        test_out = dataT2[:20]
    with open(DATA_PATH3, 'rb') as f:
        dataT3 = pickle.load(f)
        test_out2 = dataT3[:20]

    # Initialize VQA Core
    print('Initializing VQA Core!')
    core = VQAcore()

    # Initialize NPI Model
    print('Initializing NPI Model!')
    npi = NPI(core, CONFIG, LOG_PATH, verbose=verbose)

    # Initialize TF Saver
    saver = tf.train.Saver()

    # Initialize TF Session
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # Start Training
        removed = {}
        errors = {}
        # for learning curve
        count = 0
        # tot_loss_def = []
        # tot_loss_arg = []
        # test_loss_def = []
        # test_loss_arg = []
        # test1_loss_def = []
        # test1_loss_arg = []
        # test2_loss_def = []
        # test2_loss_arg = []
        test_term_acct = []
        test_prog_acct = []
        test_arg_acct = []
        train_term_acct = []
        train_prog_acct = []
        train_arg_acct = []
        test1_term_acct = []
        test1_prog_acct = []
        test1_arg_acct = []
        test2_term_acct = []
        test2_prog_acct = []
        test2_arg_acct = []

        step = []
        for ep in range(1, epochs + 1):
            removed[ep] = 0
            for i in range(len(data)):
                # Reset NPI States
                npi.reset_state()

                # Setup Environment
                _, imgid, qid, qtype, steps = data[i]
                scene = Scene(imgid)
                x, y = steps[:-1], steps[1:]
                if len(x) == 0 or len(y) == 0:
                    removed[ep] += 1
                    continue
                count += 1

                # Run through steps, and fit!
                step_def_loss, step_arg_loss, term_acc, prog_acc, = 0.0, 0.0, 0.0, 0.0
                arg0_acc, arg1_acc, arg2_acc, num_args = 0.0, 0.0, 0.0, 0
                for j in range(len(x)):
                    (prog_name, prog_in_id), arg, term = x[j]
                    (_, prog_out_id), arg_out, term_out = y[j]

                    # Update Environment if MOVE or WRITE
                    if prog_in_id in EX_PROG_PID:
                        scene.execute(prog_in_id, arg)

                    # Get Environment, Argument Vectors
                    env_in = [scene.get_env()]
                    # env_in = [np.asarray(list(env_in.values())).transpose().flatten()]
                    arg_in, arg_out = [get_args(arg, arg_in=True)
                                       ], get_args(arg_out, arg_in=False)
                    prog_in, prog_out = [[prog_in_id]], [prog_out_id]
                    term_out = [1] if term_out else [0]

                    # Fit!
                    if prog_out_id in PAR_PROG_PID:
                        loss, t_acc, p_acc, a_acc, _ = sess.run(
                            [
                                npi.arg_loss, npi.t_metric, npi.p_metric,
                                npi.a_metrics, npi.arg_train_op
                            ],
                            feed_dict={
                                npi.env_in: env_in,
                                npi.arg_in: arg_in,
                                npi.prg_in: prog_in,
                                npi.y_prog: prog_out,
                                npi.y_term: term_out,
                                npi.y_args[0]: [arg_out[0]],
                                npi.y_args[1]: [arg_out[1]],
                                npi.y_args[2]: [arg_out[2]]
                            })
                        step_arg_loss += loss
                        term_acc += t_acc
                        prog_acc += p_acc
                        arg0_acc += a_acc[0]
                        arg1_acc += a_acc[1]
                        arg2_acc += a_acc[2]
                        num_args += 1

                    else:
                        loss, t_acc, p_acc, _ = sess.run(
                            [
                                npi.default_loss, npi.t_metric, npi.p_metric,
                                npi.default_train_op
                            ],
                            feed_dict={
                                npi.env_in: env_in,
                                npi.arg_in: arg_in,
                                npi.prg_in: prog_in,
                                npi.y_prog: prog_out,
                                npi.y_term: term_out
                            })
                        step_def_loss += loss
                        term_acc += t_acc
                        prog_acc += p_acc

                try:
                    print ("Epoch {0:02d} Step {1:03d} Default Step Loss {2:05f}, " \
                    "Argument Step Loss {3:05f}, Term: {4:03f}, Prog: {5:03f}, A0: {6:03f}, " \
                    "A1: {7:03f}, A2: {8:03}".format(ep, i, step_def_loss / len(x), step_arg_loss / len(x), term_acc / len(x),
                            prog_acc / len(x), arg0_acc / num_args, arg1_acc / num_args,
                            arg2_acc / num_args))
                    if count % 10 == 0:
                        # Save Model
                        tmp = stat.mean([
                            arg0_acc / num_args, arg1_acc / num_args,
                            arg2_acc / num_args
                        ])
                        saver.save(sess, 'tasks/vqa/log/model.ckpt')
                        train_arg_acct.append(tmp / len(x))
                        train_prog_acct.append(prog_acc / len(x))
                        train_term_acct.append(term_acc / len(x))
                        step.append(count)
                        a, b, c = test_vqa(test, npi, core, sess)
                        test_arg_acct.append(c)
                        test_prog_acct.append(b)
                        test_term_acct.append(a)
                        a, b, c = test_vqa(test_out, npi, core, sess)
                        test1_arg_acct.append(c)
                        test1_prog_acct.append(b)
                        test1_term_acct.append(a)
                        a, b, c = test_vqa(test_out2, npi, core, sess)
                        test2_arg_acct.append(c)
                        test2_prog_acct.append(b)
                        test2_term_acct.append(a)
                except:
                    print('main print failed')

            # Save Model
            saver.save(sess, 'tasks/vqa/log/model.ckpt')
        # print learning curve
        print('train term,prog,arg: ', test_term_acct[-1], test_prog_acct[-1],
              test_arg_acct[-1])
        print('test_inside term,prog,arg: ', test_term_acct[-1],
              test_prog_acct[-1], test_arg_acct[-1])
        print('test_out term,prog,arg: ', test1_term_acct[-1],
              test1_prog_acct[-1], test1_arg_acct[-1])
        print('test_out2 term,prog,arg: ', test2_term_acct[-1],
              test2_prog_acct[-1], test2_arg_acct[-1])

        plt.figure(figsize=(20, 5))
        plt.plot(step, train_term_acct, 'b', label='train_query_term')
        plt.plot(step, test_term_acct, 'm', label='test_query_term')
        plt.plot(step, test1_term_acct, 'c', label='test_count_term')
        plt.plot(step, test2_term_acct, 'k', label='test_exist_term')
        plt.legend()
        plt.xticks(step)
        plt.xlabel('step')
        plt.ylabel('acc')
        plt.title('learning curve for termination')
        plt.savefig(SAVE_PATH + 'acc_query_term')
        plt.close()
        plt.figure(figsize=(20, 5))
        plt.plot(step, train_prog_acct, 'b', label='train_query_prog')
        plt.plot(step, test_prog_acct, 'm', label='test_query_prog')
        plt.plot(step, test1_prog_acct, 'c', label='test_count_prog')
        plt.plot(step, test2_prog_acct, 'k', label='test_exist_prog')
        plt.legend()
        plt.xticks(step)
        plt.xlabel('step')
        plt.ylabel('acc')
        plt.title('learning curve for program')
        plt.savefig(SAVE_PATH + 'acc_query_prog')
        plt.close()
        plt.figure(figsize=(20, 5))
        plt.plot(step, train_arg_acct, 'b', label='train_query_arg')
        plt.plot(step, test_arg_acct, 'm', label='test_query_arg')
        plt.plot(step, test1_arg_acct, 'c', label='test_count_arg')
        plt.plot(step, test2_arg_acct, 'k', label='test_exist_arg')
        plt.legend()
        plt.xticks(step)
        plt.xlabel('step')
        plt.ylabel('acc')
        plt.title('learning curve for arguments')
        plt.savefig(SAVE_PATH + 'acc_query_arg')
        plt.close()
Example #3
0
def train_addition(epochs, verbose=0):
    """
    Instantiates an Addition Core, NPI, then loads and fits model to data.

    :param epochs: Number of epochs to train for.
    """
    # Load Data
    with open(DATA_PATH_TRAIN, 'rb') as f:
        data = pickle.load(f)

    # Initialize Addition Core
    print ('Initializing Addition Core!')
    core = AdditionCore()

    # Initialize NPI Model
    print ('Initializing NPI Model!')
    npi = NPI(core, CONFIG, LOG_PATH, verbose=verbose)

    # Initialize TF Saver
    saver = tf.train.Saver()

    # Initialize TF Session
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    # Start Training
    for ep in range(1, epochs + 1):
        for i in range(len(data)):
            # Reset NPI States
            npi.reset_state()

            # Setup Environment
            steps = data[i]
            # print(data[i])

            x, y = steps[:-1], steps[1:]
            # Run through steps, and fit!
            step_def_loss, step_arg_loss, term_acc, prog_acc, = 0.0, 0.0, 0.0, 0.0
            arg0_acc, arg1_acc, arg2_acc, num_args = 0.0, 0.0, 0.0, 0

            # dsl = DSL([], [])

            for j in range(len(x)):
                # {'program': {'program': 'check'}, 'environment': {'terminate': False, 'answer': 1, 'is_redirect': 2},'args': {'id': 0}}
                # print(y[j])
                prog_name, prog_in_id, arg, term = x[j]["program"]["program"], x[j]["program"]["id"], x[j]["args"]["id"], x[j]["environment"]["terminate"]
                prog_name_out, prog_out_id, arg_out, term_out = y[j]["program"]["program"], y[j]["program"]["id"], y[j]["args"]["id"], y[j]["environment"]["terminate"]
                # Get Environment, Argument Vectors
                env_in = [get_env(x[j]["environment"])]

                arg_in, arg_out = [get_args(arg, arg_in=True)], get_args(arg_out, arg_in=False)
                term_out = [1] if term_out else [0]

                # if prog_name_out=="WRITE":
                #     prog_out_id = dsl.get_code(y[j]["prog"]["arg"][1])
                #     os._exit()

                prog_in, prog_out = [[prog_in_id]], [prog_out_id]

                # Fit!
                if True:
                    t_acc, p_acc, _, loss = sess.run(
                        [npi.t_metric, npi.p_metric, npi.default_train_op, npi.default_loss],
                        feed_dict={npi.env_in: env_in, npi.arg_in: arg_in, npi.prg_in: prog_in,
                                   npi.y_prog: prog_out, npi.y_term: term_out})
                    # print({npi.env_in: env_in, npi.arg_in: arg_in, npi.prg_in: prog_in, npi.y_prog: prog_out, npi.y_term: term_out})
                    # print({npi.y_args[0]: [arg_out[0]], npi.y_args[1]: [arg_out[1]], npi.y_args[2]: [arg_out[2]]})
                    # step_arg_loss += loss
                    term_acc += t_acc
                    prog_acc += p_acc
                    step_def_loss += loss
                    # arg0_acc += a_acc[0]
                    # arg1_acc += a_acc[1]
                    # arg2_acc += a_acc[2]
                    # num_args += 1
                    # else:
                    #     loss, t_acc, p_acc, _ = sess.run(
                    #         [npi.default_loss, npi.t_metric, npi.p_metric, npi.default_train_op],
                    #         feed_dict={npi.env_in: env_in, npi.arg_in: arg_in, npi.prg_in: prog_in,
                    #                    npi.y_prog: prog_out, npi.y_term: term_out})
                    #     step_def_loss += loss
                    #     term_acc += t_acc
                    #     prog_acc += p_acc

            print ("Epoch {0:02d} Step {1:03d} Loss: {2:03f} Term: {3:03f}, Prog: {4:03f}" \
                    .format(ep, i, step_def_loss / len(x), term_acc / len(x), prog_acc / len(x)))

        # Save Model
        saver.save(sess, CKPT_PATH)
        # !!!!
        tf.train.write_graph(sess.graph_def, '/tmp/tf/log', 'graph.pb', as_text=False)
        # tf.train.write_graph(my_graph, path_to_model_pb, 'saved_model.pb', as_text=False)
        # !!!!
Example #4
0
def train_addition(epochs, start_epoch, start_step, sub, verbose=0):
    """
    Instantiates an Addition Core, NPI, then loads and fits model to data.

    :param epochs: Number of epochs to train for.
    """
    if not os.path.exists("log/" + sub + "/models/"):
        os.mkdir("log/" + sub + "/models/")
    # Load Data
    with open(DATA_PATH_TRAIN, 'rb') as f:
        data = pickle.load(f)

# f = open('log/log_train.txt', 'r+')
#f.truncate()

# Initialize Addition Core
    print('Initializing Addition Core!')
    core = AdditionCore()

    # Initialize NPI Model
    print('Initializing NPI Model!')
    npi = NPI(core, CONFIG, LOG_PATH, verbose=verbose)

    # Initialize TF Saver
    saver = tf.train.Saver()

    # Initialize TF Session
    sess = tf.Session()

    if start_epoch > 0 or start_step > 0:
        saver = tf.train.Saver()
        if start_step > 0:
            saver.restore(
                sess,
                "log/" + sub + "/models/model-{0:04d}-{1:06d}.ckpt".format(
                    start_epoch + 1, start_step))
            print("log/" + sub + "/models/model-{0:04d}-{1:06d}.ckpt".format(
                start_epoch + 1, start_step))
        else:
            saver.restore(
                sess, "log/" + sub +
                "/models/model-{0:04d}.ckpt".format(start_epoch))
            print("log/" + sub +
                  "/models/model-{0:04d}.ckpt".format(start_epoch))
    else:
        sess.run(tf.global_variables_initializer())

    # Reset NPI States
    npi.reset_state()
    tf.get_default_graph().finalize()

    # Start Training
    for ep in range(start_epoch + 1, epochs + 1):
        sum = 0
        for i in range(len(data)):
            if i > start_step:
                # Setup Environment
                steps = data[i]
                # print(data[i])

                x, y = steps[:-1], steps[1:]
                # Run through steps, and fit!
                step_def_loss, step_arg_loss, term_acc, prog_acc, = 0.0, 0.0, 0.0, 0.0
                arg0_acc, arg1_acc, arg2_acc, num_args = 0.0, 0.0, 0.0, 0

                # dsl = DSL([], [])

                if len(x) > 0:
                    for j in range(len(x)):
                        # {'program': {'program': 'check'}, 'environment': {'terminate': False, 'answer': 1, 'is_redirect': 2},'args': {'id': 0}}
                        #print(x[j]['addinfo'])
                        prog_name, prog_in_id, arg, term = x[j]["program"][
                            "program"], x[j]["program"]["id"], x[j]["args"][
                                "id"], x[j]["environment"]["terminate"]
                        prog_name_out, prog_out_id, arg_out, term_out = y[j][
                            "program"]["program"], y[j]["program"]["id"], y[j][
                                "args"]["id"], y[j]["environment"]["terminate"]
                        # Get Environment, Argument Vectors
                        env_in = [get_env(x[j]["environment"])]

                        arg_in, arg_out = [get_args(arg, arg_in=True)
                                           ], get_args(arg_out, arg_in=False)
                        term_out = [1] if term_out else [0]

                        # if prog_name_out=="WRITE":
                        #     prog_out_id = dsl.get_code(y[j]["prog"]["arg"][1])
                        #     os._exit()

                        prog_in, prog_out = [[prog_in_id]], [prog_out_id]

                        # Fit!
                        if True:
                            t_acc, p_acc, _, loss = sess.run(
                                [
                                    npi.t_metric, npi.p_metric,
                                    npi.default_train_op, npi.default_loss
                                ],
                                feed_dict={
                                    npi.env_in: env_in,
                                    npi.arg_in: arg_in,
                                    npi.prg_in: prog_in,
                                    npi.y_prog: prog_out,
                                    npi.y_term: term_out
                                })
                            # print({npi.env_in: env_in, npi.arg_in: arg_in, npi.prg_in: prog_in, npi.y_prog: prog_out, npi.y_term: term_out})
                            # print({npi.y_args[0]: [arg_out[0]], npi.y_args[1]: [arg_out[1]], npi.y_args[2]: [arg_out[2]]})
                            # step_arg_loss += loss
                            term_acc += t_acc
                            prog_acc += p_acc
                            step_def_loss += loss
                            # arg0_acc += a_acc[0]
                            # arg1_acc += a_acc[1]
                            # arg2_acc += a_acc[2]
                            # num_args += 1
                            # else:
                            #     loss, t_acc, p_acc, _ = sess.run(
                            #         [npi.default_loss, npi.t_metric, npi.p_metric, npi.default_train_op],
                            #         feed_dict={npi.env_in: env_in, npi.arg_in: arg_in, npi.prg_in: prog_in,
                            #                    npi.y_prog: prog_out, npi.y_term: term_out})
                            #     step_def_loss += loss
                            #     term_acc += t_acc
                            #     prog_acc += p_acc
                    sum += prog_acc / len(x)
                    #with open('log/log_train.txt', "a") as myfile:
                    if i % 1000 == 0:
                        message = "Epoch "+sub+" {0:02d} Step {1:03d} Loss: {2:03f} Term: {3:03f}, Prog: {4:03f} AVG: {5:03f}" \
                            .format(ep, i, step_def_loss / len(x), term_acc / len(x), prog_acc / len(x), sum / (i - start_step))
                        print(message)
                        with open("log/" + sub + "/info", "a") as myfile:
                            myfile.write(message + "\n")
                    if i % chunk == 0:
                        saver.save(
                            sess, "log/" + sub +
                            "/models/model-{0:04d}-{1:06d}.ckpt".format(ep, i))
                        #if os.path.exists("log/model-{0:04d}-{1:06d}.ckpt.meta".format(ep, i-chunk)):
                        #    os.remove("log/model-{0:04d}-{1:06d}.ckpt.meta".format(ep, i-chunk))
                        #    os.remove("log/model-{0:04d}-{1:06d}.ckpt.index".format(ep, i-chunk))
                        #    os.remove("log/model-{0:04d}-{1:06d}.ckpt.data-00000-of-00001".format(ep, i-chunk))
        print ("Epoch {0:02d} Step {1:03d}  AVG: {2:03f}" \
                        .format(ep, len(data), sum / len(data)))
        # Save Model
        saver.save(sess,
                   "log/" + sub + "/models/model-{0:04d}.ckpt".format(ep))
        # !!!!
        tf.train.write_graph(sess.graph_def,
                             '/tmp/tf/log',
                             'graph.pb',
                             as_text=False)
Example #5
0
def test_vqa(verbose=0):
    """
    Instantiates a VQA Core, NPI, then loads and fits model to data.

    :param epochs: Number of epochs to train for.
    """

    # Load Data
    with open(DATA_PATH, 'rb') as f:
        data = pickle.load(f)
        data = data[80:300]  # the default model saved has been trained on same data but [:80]
    # Initialize VQA Core
    print ('Initializing VQA Core!')
    core = VQAcore()

    # Initialize NPI Model
    print ('Initializing NPI Model!')
    npi = NPI(core, CONFIG, LOG_PATH, verbose=verbose)

    with tf.Session() as sess:
        # Restore from Checkpoint
        saver = tf.train.Saver()
        saver.restore(sess, CKPT_PATH)

        term_acct = []
        prog_acct = []
        arg_acct = []
        step = []
        count = 0
        # Start Testing
        for i in range(len(data)):
            # Reset NPI States
            npi.reset_state()

            # Setup Environment
            _,imgid, qid, qtype, steps = data[i]
            scene = Scene(imgid)
            x, y = steps[:-1], steps[1:]
            if len(x) == 0 or len(y) == 0:
                continue
            count += 1

            # Run through steps, and fit!
            step_def_loss, step_arg_loss, term_acc, prog_acc, = 0.0, 0.0, 0.0, 0.0
            arg0_acc, arg1_acc, arg2_acc, num_args = 0.0, 0.0, 0.0, 0
            for j in range(len(x)):
                if random.uniform(0,1) > ABAL_THR:
                    (prog_name, prog_in_id), arg, term = y[j]
                    (_, prog_out_id), arg_out, term_out = x[j]
                else:
                    (prog_name, prog_in_id), arg, term = x[j]
                    (_, prog_out_id), arg_out, term_out = y[j]

                # Update Environment if MOVE or WRITE
                if prog_in_id in EX_PROG_PID:
                    scene.execute(prog_in_id, arg)

                # Get Environment, Argument Vectors
                env_in = [scene.get_env()]
                arg_in, arg_out = [get_args(arg, arg_in=True)], get_args(arg_out, arg_in=False)
                prog_in, prog_out = [[prog_in_id]], [prog_out_id]
                term_out = [1] if term_out else [0]

                # Fit!
                if prog_out_id in PAR_PROG_PID:

                    loss, t_acc, p_acc, a_acc= sess.run(
                        [npi.arg_loss, npi.t_metric, npi.p_metric, npi.a_metrics],
                        feed_dict={npi.env_in: env_in, npi.arg_in: arg_in, npi.prg_in: prog_in,
                                npi.y_prog: prog_out, npi.y_term: term_out,
                                npi.y_args[0]: [arg_out[0]], npi.y_args[1]: [arg_out[1]],
                                npi.y_args[2]: [arg_out[2]]})
                    step_arg_loss += loss
                    term_acc += t_acc
                    prog_acc += p_acc
                    arg0_acc += a_acc[0]
                    arg1_acc += a_acc[1]
                    arg2_acc += a_acc[2]
                    num_args += 1

                else:

                    loss, t_acc, p_acc= sess.run(
                        [npi.default_loss, npi.t_metric, npi.p_metric],
                        feed_dict={npi.env_in: env_in, npi.arg_in: arg_in, npi.prg_in: prog_in,
                                npi.y_prog: prog_out, npi.y_term: term_out})
                    step_def_loss += loss
                    term_acc += t_acc
                    prog_acc += p_acc



            try:
                print ("Step {} Default Step Loss {}, " \
                "Argument Step Loss {}, Term: {}, Prog: {}, A0: {}, " \
                "A1: {}, A2: {}".format(i, step_def_loss / len(x), step_arg_loss / len(x), term_acc / len(x),
                        prog_acc / len(x), arg0_acc / num_args, arg1_acc / num_args,
                        arg2_acc / num_args))
                tmp = stat.mean([arg0_acc / num_args, arg1_acc / num_args, arg2_acc / num_args])
                term_acct.append(term_acc / len(x))
                prog_acct.append(prog_acc / len(x))
                arg_acct.append(tmp)
                step.append(count)
            except:
                print('main print failed')

            plt.figure(figsize=(20, 5))
            plt.plot(step, term_acct, 'b', label='term')
            plt.plot(step, prog_acct, 'm', label='prog')
            plt.plot(step, arg_acct, 'c', label='arg')
            plt.legend()
            plt.xticks(step)
            plt.xlabel('step')
            plt.ylabel('acc')
            plt.title('Ablation Study')
            plt.savefig(SAVE_PATH + 'acc_ablation')
            plt.close()
Example #6
0
def train_card_pattern_matching(epochs, verbose=0):
    # Load Data
    with open(DATA_PATH, 'rb') as f:
        data = pickle.load(f)

    # Initialize Card Pattern Matching Core
    print('Initializing Card Pattern Matching Core!')
    core = CardPatternMatchingCore()

    # Initialize NPI Model
    print('Initializing NPI Model!')
    npi = NPI(core, CONFIG, LOG_PATH, verbose=verbose)

    # Initialize TF Saver
    saver = tf.train.Saver()

    # Initialize TF Session
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # Start Training
        for ep in range(1, epochs + 1):
            for i in range(len(data)):
                # Reset NPI States
                npi.reset_state()

                # Setup Environment
                card1, card2, steps = data[i]
                scratch = ScratchPad(card1, card2)
                x, y = steps[:-1], steps[1:]

                # Run through steps, and fit!
                step_def_loss, step_arg_loss, term_acc, prog_acc, = 0.0, 0.0, 0.0, 0.0
                arg0_acc, arg1_acc, arg2_acc, num_args = 0.0, 0.0, 0.0, 0
                for j in range(len(x)):
                    (prog_name, prog_in_id), arg, term = x[j]
                    (_, prog_out_id), arg_out, term_out = y[j]

                    # Update Environment if MOVE or WRITE
                    if prog_in_id == MOVE_PID or prog_in_id == WRITE_PID:
                        scratch.execute(prog_in_id, arg)

                    # Get Environment, Argument Vectors
                    env_in = [scratch.get_env()]
                    arg_in, arg_out = [get_args(arg, arg_in=True)
                                       ], get_args(arg_out, arg_in=False)
                    prog_in, prog_out = [[prog_in_id]], [prog_out_id]
                    term_out = [1] if term_out else [0]

                    # Fit!
                    if prog_out_id == MOVE_PID or prog_out_id == WRITE_PID:
                        loss, t_acc, p_acc, a_acc, _ = sess.run(
                            [
                                npi.arg_loss, npi.t_metric, npi.p_metric,
                                npi.a_metrics, npi.arg_train_op
                            ],
                            feed_dict={
                                npi.env_in: env_in,
                                npi.arg_in: arg_in,
                                npi.prg_in: prog_in,
                                npi.y_prog: prog_out,
                                npi.y_term: term_out,
                                npi.y_args[0]: [arg_out[0]],
                                npi.y_args[1]: [arg_out[1]],
                                npi.y_args[2]: [arg_out[2]]
                            })
                        step_arg_loss += loss
                        term_acc += t_acc
                        prog_acc += p_acc
                        arg0_acc += a_acc[0]
                        arg1_acc += a_acc[1]
                        arg2_acc += a_acc[2]
                        num_args += 1
                    else:
                        loss, t_acc, p_acc, _ = sess.run(
                            [
                                npi.default_loss, npi.t_metric, npi.p_metric,
                                npi.default_train_op
                            ],
                            feed_dict={
                                npi.env_in: env_in,
                                npi.arg_in: arg_in,
                                npi.prg_in: prog_in,
                                npi.y_prog: prog_out,
                                npi.y_term: term_out
                            })
                        step_def_loss += loss
                        term_acc += t_acc
                        prog_acc += p_acc

                print ("Epoch {0:02d} Step {1:03d} Default Step Loss {2:05f}, " \
                    "Argument Step Loss {3:05f}, Term: {4:03f}, Prog: {5:03f}, A0: {6:03f}, " \
                    "A1: {7:03f}, A2: {8:03}".format(ep, i, step_def_loss / len(x), step_arg_loss / len(x), term_acc / len(x),
                            prog_acc / len(x), arg0_acc / num_args, arg1_acc / num_args,
                            arg2_acc / num_args))

            # Save Model
            saver.save(
                sess,
                'tasks/card_pattern_matching/log/card_pattern_matching_model.ckpt'
            )

    print('Model generation complete!')