Example #1
0
def build_inferred_model(mod, tutor, after_k_interations):
    tutor.filterlist = clean_filterlist(tutor.filterlist)
    dummod = Domain()
    dummod.concepts = [Concept(i) for i,c in enumerate(mod.concepts) ]
    for k,v in tutor.filterlist.items():
        print(k, state_as_str(v))
        con = dummod.concepts[k]
        pixs = [ix for ix,bl in enumerate(v) if bl ] #get ids where state entry is True
        for i in pixs:
            con.predecessors.append(dummod.concepts[i])            
    gvrender(dummod, "inferred"+str(after_k_interations))
    return dummod
Example #2
0
                          (c.id, c.predecessors[0].id, c.id))
                else:
                    print("\\node[state] (%d) [below right of=%d] {%d};" %
                          (c.id, c.predecessors[0].id, c.id))
                    active_nodes.pop(0)  #discard the second predecessor
            else:
                c1_id = c_list[0].id
                c2_id = c_list[1].id
                print("\\node[state] (%d) [below of=%d] {%d};" %
                      (c1_id, f.id, c1_id))
                print("\\node[state] (%d) [below right of=%d] {%d};" %
                      (c2_id, f.id, c2_id))
            active_nodes += c_list  #extend the active nodes list with the children


#             print([a.id for a in active_nodes])

    s = "\\path "
    for c in mod.concepts:
        c_list = child_lookup[c]
        if c_list:
            for cc in c_list:
                s += "(%d) edge node {} (%d)\n" % (c.id, cc.id)
    s += ";"
    print(s)

if __name__ == "__main__":
    d = ConNTree(branch_factor=3)
    d.regenerate(10)
    gvrender(d, fname="c")
Example #3
0
#                 print("".join(['X' if student.knows(n) else "-" for n in model.concepts]))
#                 print([(c.id,[p.id for p in c.predecessors]) for c in model.concepts])

if __name__ == '__main__':
    #create some chain of Concepts

    #     model = FreeDomain()
    #     model.regenerate(N)
    k_steps = 5
    master_log = []

    #create a single model upon which all our efforts will focus
    model = BranchMergeNetwork(4)
    model.regenerate(N)
    gvrender(model)

    for batch in batches:
        if not batch["run"]:
            continue

        tutor = SarsaTutor(N) if batch["tutor"] == "tabular" else RandomTutor(
            N)

        batch_msgs = []
        batch_x = []
        batch_y = []
        batch_score = []
        name = batch["batch_name"]

        for trial in range(trials_per_model):
        'batch_name': '8000 - Tabular',
        'num_missions': 16000
    },
]

if __name__ == '__main__':
    k_steps = 100
    master_log = []

    models = []
    N = 100  # number of nodes
    for m in range(num_models):
        mod = BranchMergeNetwork(branch_factor)
        mod.regenerate(N)
        models.append(mod)
        gvrender(models[0], "real")

    if LOAD_FROM_FILE:
        saved_struct = _load_raw_from_file("test100.dat")
        models[0].concepts = saved_struct
        gvrender(models[0], "real")

    tutor = SarsaTutor(N, 0.5,
                       5)  # if batch["tutor"]=="tabular" else RandomTutor(N)
    student = IdealStudent()

    last_num = 0
    main_log = []
    for batch in batches:
        num_missions = batch['num_missions']
        more_missions = num_missions - last_num
Example #5
0
        r = tp / float(tp+fn)
    except DivisionByZero:
        r= 1.0
#   r = tp/ float(arcs)
#     print(p,r)

    F = 0.0 if (p+r==0) else (2.0*p*r / (p+r))
    return p,r,F

    

if __name__ == '__main__':
    
    fname = "itec2011"
    model = load_domain_model_from_file(fname+".dat")    
    gvrender(model, fname)
    train_logs = []
    batch_names =[]
    
    num_nodes=len(model.concepts)
    tutor = RandomTutor(num_nodes=num_nodes)
#     tutor = Qutor(num_nodes, 0.5, 100, 1.0, "Qutor") #0.1 7000
#     tutor = Qutor(num_nodes, 0.1, 7000, 1.0, "Qutor") #0.1 7000
#     tutor = SarsaL2(num_nodes, 0.5, 5000, 1.0, "SarsaL2")
#     tutor = DynaQutor(num_nodes, 0.5, 1000, 1.0, "DynaQ")
    
    for _ in range(100):
        tutor.reset()
        p = IdealStudent()
        tutor.run_episode(model, p, -1, True)
    
Example #6
0
def main():

    max_steps=25000
    n = 100
#     branchfs = [2,3,4,5]
    branchfs = [2]
    
#     log_dir = "..\\..\\compare_tree_model_logs\\"
#     log_dir ="..\\..\\forgetting_logs\\"
#     log_dir="..\\..\\bmc_only\\"
    log_dir ="..\\..\\dynaqutor_logs\\"
    
    write = True

#     model_types=[BranchMergeNetwork]
#     model_types=[ ChainDomain, DivNTree,ConNTree,BranchMergeNetwork]
    model_types = [ChainDomain]
    
    models =[]
    for m in model_types:
        for b in branchfs:
            mod = m(branch_factor=b)
            mod.regenerate(n)
            models.append(mod)
            print("created "+str(mod))
            gvrender(mod, os.path.join(log_dir, str(type(mod)).split(".")[-1][:-2]+str(b) ))
    
#     tut = RandomTutor(name="RandomTutor")
    tut = DynaQutor(100, 0.5, 1000, 1.0, "Dynaqutor")
    N=1
   
    for m in models:
        if write: logfile = open(os.path.join(log_dir,tut.name.split(" ")[0]+ str(type(m)).split(".")[-1][:-2] + str(m.branch_factor)+".log"),"w")
        
        m_scores = defaultdict(int)
        m_scores[0] = 0
        for i in range(N):
            step_cnt=0
            p = RelearningStudent()
#             p = ForgettingStudent()
#             p= IdealStudent()
#             m.regenerate(n)
            max_ep_len=100
            while step_cnt < max_steps:        
                ep_len = tut.run_episode(m, p, max_steps=max_ep_len, update_qvals=True, reset_student=False)        
                tut.transition_trace.clear() # clear this trace
                
                mastery = p.get_mastery_score()/len(m.concepts)

                step_cnt += ep_len
                m_scores[step_cnt]+=mastery/N
                if ep_len==0:
                    break
                
            pc=100.0*(i+1)/N
            if (pc == int(pc)):
                print("{}%".format(pc))

        for step_cnt in sorted(m_scores.keys()):            
            mastery = m_scores[step_cnt]
            if write:
                #print("writing log file")
                logfile.write("{},{}\n".format(step_cnt, mastery))

        if write: logfile.close()
Example #7
0
def main():
    #     epss=[2,100]
    #     alphas=[0.1,0.9]
    epss = [1000]
    #     alphas=[0.1, 0.5, 1.0]
    alphas = [0.5]
    gammas = [1.0]  #discount factors
    lambdas = [0.7]
    #lambdas=[0.0, 0.3, 0.7, 0.99]
    max_steps = 100

    log_dir = "..\\..\\compare_logs\\"

    load = False
    load_file = "itec2011.dat"
    DEBUGG = False
    write = True

    #LOAD The model
    if load:
        mod = load_domain_model_from_file(load_file)
    else:
        mod = BranchMergeNetwork(4)
        mod.regenerate(100)
    gvrender(mod, "real")
    #     save_domain_to_file(mod, "test10.dat")
    models = [mod]
    num_nodes = len(mod.concepts)

    #     intervals=[x for x in range(1,500,10)]+[x for x in range(1,1001,100)]
    #     intervals=[x for x in range(1,max_episodes+1, plotting_interval)]

    tutorclasses = ['RandomTutor', 'Qutor', 'SarsaL2', 'DynaQutor']

    tutors = []
    for eps in epss:
        for alpha in alphas:
            for gamma in gammas:
                for lambduh in lambdas:
                    for classname in tutorclasses:
                        klass = eval(classname)
                        tutor = klass(num_nodes, alpha, eps, gamma, classname)
                        #                         if hasattr(tutor, "lambda_val"): #i.e. if this tutor uses an eligibility trace, it needs a decay value

                        try:
                            tutor.lambda_val = lambduh
                            tutor.name += (" L" + str(lambduh))
                        except AttributeError as aerr:
                            print(repr(aerr))
                        tutors.append(tutor)


#     fig, ax1 = pyplot.subplots()

#     ax2 = ax1.twinx()
#     ax1.set_xlabel('# of episodes')
#     ax1.set_ylabel('Avg lessons to complete {}-node course, BF {}'.format( len(mod.concepts), mod.branch_factor ))
#     ax2.set_ylabel('Error in inferred domain model')

    pyplot.xlabel("# episodes")
    pyplot.ylabel("Avg lessons to complete course")

    #     snapshot1 = tracemalloc.take_snapshot()
    for tut in tutors:
        tut.DEBUG = DEBUGG
        if write:
            logfile = codecs.open(
                os.path.join(log_dir,
                             tut.name.split(" ")[0] + ".log"), "w")

        step_cnt = 0
        while step_cnt < max_steps:
            p = ForgettingStudent()
            ep_len = tut.run_episode(models[0],
                                     p,
                                     max_steps=-1,
                                     update_qvals=True)
            print(tutor, ep_len)

            if write:
                logfile.write("e\n")
                ep_t_log = tut.transition_trace.pop()
                for s in ep_t_log:
                    logfile.write(str(s) + "\n")
            step_cnt += ep_len

        if write: logfile.close()
        tut = None
Example #8
0
def main():

    n = 20
    #     branchfs = [2,3,4,5]
    branchfs = [2]

    #     log_dir = "..\\..\\compare_tree_model_logs\\"
    #     log_dir ="..\\..\\forgetting_logs\\"
    #     log_dir="..\\..\\bmc_only\\"
    log_dir = "..\\..\\dynaqutor_logs\\"

    write = True

    model_types = [BranchMergeNetwork]
    #     model_types=[ ChainDomain, DivNTree,ConNTree,BranchMergeNetwork]
    #     model_types = [ChainDomain]

    models = []
    for m in model_types:
        for b in branchfs:
            mod = m(branch_factor=b)
            mod.regenerate(n)
            models.append(mod)
            print("created " + str(mod))
            gvrender(
                mod,
                os.path.join(log_dir,
                             str(type(mod)).split(".")[-1][:-2] + str(b)))

#     tut = RandomTutor(name="RandomTutor")
    tut = DynaQutor2(n, 0.55, 250, 1.0, "DynaQutor2")
    tut.MASTERY_THRESHOLD = 0.95
    tut.modelling_intensity = -1
    num_training_sessions = 800

    scores = []
    steps_to_teach = []
    for m in models:
        if write:
            logfile = open(
                os.path.join(
                    log_dir,
                    tut.name.split(" ")[0] + str(type(m)).split(".")[-1][:-2] +
                    str(m.branch_factor) + ".log"), "w")

        #         m_scores = {}
        #         m_scores[0] = 0
        tut.possible_actions = m.concepts
        s = tut.sRep.reset_state()
        tut.extend_Q(s, tut.possible_actions)

        step_cnt = 0
        for i in range(num_training_sessions):
            p = RelearningStudent()
            #             p = ForgettingStudent()
            #             p= IdealStudent()
            #             m.regenerate(n)
            tut.sRep.reset_state()
            mastery = 0.0
            ep_cnt = 0
            while mastery < tut.MASTERY_THRESHOLD:
                #                 tut.test_student(p)
                lesson, ex = tut.get_next_lesson()
                k = p.knows(lesson)
                success = p.try_learn(lesson)
                print(i, "/", step_cnt, "/", ep_cnt, str(tut.sRep),
                      ": tried to teach p", lesson.id, "with success=",
                      success)
                mastery = p.get_mastery_score() / len(m.concepts)

                tut.record_lesson_results(lesson, success, k, mastery, ex)

                step_cnt += 1
                #                 m_scores[step_cnt]=mastery #/num_training_sessions
                scores.append(mastery)
                print("m=", mastery)
                ep_cnt += 1
            steps_to_teach.append([step_cnt, ep_cnt])
            pc = 100.0 * (i + 1) / num_training_sessions
            if (pc == int(pc)):
                print("{}%".format(pc))


#         input("hit return")
        q = RelearningStudent()
        #         q = IdealStudent()
        mastery = 0.0
        tut.sRep.reset_state()
        ep_cnt = 0
        while mastery < tut.MASTERY_THRESHOLD:
            #             tut.test_student(q)
            lesson, ex = tut.get_next_lesson()
            k = q.knows(lesson)
            success = q.try_learn(lesson)
            print(step_cnt, tut.sRep, ": tried to teach q", lesson.id,
                  "with success=", success)
            mastery = q.get_mastery_score() / len(m.concepts)
            tut.record_lesson_results(lesson, success, k, mastery, ex)
            step_cnt += 1
            ep_cnt += 1
            scores.append(mastery)
        steps_to_teach.append([step_cnt, ep_cnt])
        #             m_scores[step_cnt]=mastery
        #             print("m=",m_scores)

        print("len scores", len(scores))
        #         for step_cnt in sorted(m_scores.keys()):
        #             mastery = m_scores[step_cnt]
        for step_cnt, mastery in enumerate(scores):
            if write:
                #print("writing log file")
                logfile.write("{},{}\n".format(step_cnt, mastery))

        if write: logfile.close()

        print(steps_to_teach)