Пример #1
0
def histo(fname, sizes, taskcount):

    # getting some estimators
    oracles = util.loadfile(fname + "_models")
    odict = lc._get_odict(oracles, sizes)

    # the generated things
    sampled = lc.readresults(fname, taskcount)
    # the training stuff i assume
    tasks = util.loadfile(fname)

    # hash:tringraphs
    hs = lambda s: s.samplerid + s.size + s.repeat * 10000
    taskindex = {hs(t): t for t in tasks}

    for sampld in sampled:
        print "size: %d   repeat: %d" % (sampld.size, sampld.repeat)
        seed = util.graphs_to_scores(taskindex[hs(sampld)].pos,
                                     odict[sampld.size][sampld.repeat])
        gen = util.graphs_to_scores(sampld.graphs,
                                    odict[sampld.size][sampld.repeat])

        plt.figure(figsize=(12, 6))
        plt.hist((seed, gen),
                 20,
                 normed=1,
                 alpha=.8,
                 histtype='step',
                 stacked=False,
                 fill=True,
                 label=['seed', 'generated'])
        plt.legend()
        plt.show()
Пример #2
0
def draw(tasknum):
    plt.figure(figsize=(15, 5))

    # draw the run points
    res = [util.loadfile("ASD/d_stack_taskABC_%d" % i) for i in range(tasknum)]

    def res_to_xy(res):
        res = toolz.groupby(lambda x: x[0], res)
        keys = res.keys()
        keys.sort()
        print res[keys[0]]

        def avg(r):
            r = [e[1] for e in r]
            return sum(r) / float(len(r))

        y = [avg(res[k]) for k in keys]
        return keys, y

    res2 = [util.loadfile("ASD/stack_taskABC_%d" % i) for i in range(tasknum)]

    x, y = res_to_xy(res)
    plt.plot(x, y, color='blue')

    x, y = res_to_xy(res2)
    plt.plot(x, y, color='red')

    plt.show()
Пример #3
0
def loadgraphs(fname):
    tasks = util.loadfile(fname)
    # we now have a bunch of tasks
    #task = namedtuple("task",'samplerid size repeat sampler neg pos')
    res = []
    for i, task in enumerate(tasks):
        if task.samplerid == 1:  # see util if this is still correct :)
            sampl = util.loadfile("res_%s_%d" % (fname, i))
            # sampled = namedtuple("sampled",'samplerid,size,repeat,time, graphs')
            res.append([task.pos, sampl.graphs, task.size, task.repeat])

    res.sort(key=lambda x: x[2])
    return res
Пример #4
0
def init_secret():
    global D_4SQ_SECRET, D_4SQ_ID
    j = u.loadfile( 'secret.json')
    D_4SQ_SECRET = "client_secret=" + j['4sq_secret'] 
    D_4SQ_ID= "client_id=" + j['4sq_id']
    logging.info( D_4SQ_SECRET )
    logging.info( D_4SQ_ID )
Пример #5
0
def init_secret():
    global D_4SQ_SECRET, D_4SQ_ID
    j = u.loadfile('secret.json')
    D_4SQ_SECRET = "client_secret=" + j['4sq_secret']
    D_4SQ_ID = "client_id=" + j['4sq_id']
    logging.info(D_4SQ_SECRET)
    logging.info(D_4SQ_ID)
Пример #6
0
def run(fname, idd):
    task = util.loadfile(fname)[idd]
    #draw.graphlearn(decomposers[:5],size=10)
    esti = util.graphs_to_linmodel(task.pos, task.neg)
    X, y = util.graphs_to_Xy(task.postest, task.negtest)
    ypred = esti.predict(X)
    acc = sklearn.metrics.accuracy_score(y, ypred)
    util.dumpfile((task.size, acc), "ASD/d_%s_%d" % (fname, idd))
Пример #7
0
def readresults(filename, taskcount):
    #return [util.loadfile( "res_%s_%d"  %(filename,i)) for i in range(taskcount) ]
    data = []
    for e in range(taskcount):
        path = getresfilename(filename, e)
        if os.path.exists(path):
            data.append(util.loadfile(path))
    return data
Пример #8
0
def write_XMLresults(atype, results):
    '''write XML JUnit results'''
    from pymavlink.generator import mavtemplate
    t = mavtemplate.MAVTemplate()
    for x in glob.glob(util.reltopdir('Tools/autotest/junit.xml')):
        junit_xml = util.loadfile(x)
        f = open(util.reltopdir("../buildlogs/%s-%s" % (atype, os.path.basename(x))), mode='w')
        t.write(f, junit_xml, results)
        f.close()
Пример #9
0
def write_XMLresults(atype, results):
    '''write XML JUnit results'''
    from pymavlink.generator import mavtemplate
    t = mavtemplate.MAVTemplate()
    for x in glob.glob(util.reltopdir('Tools/autotest/junit.xml')):
        junit_xml = util.loadfile(x)
        f = open(util.reltopdir("../buildlogs/%s-%s" % (atype, os.path.basename(x))), mode='w')
        t.write(f, junit_xml, results)
        f.close()
Пример #10
0
def write_webresults(results):
    '''write webpage results'''
    from pymavlink.generator import mavtemplate
    t = mavtemplate.MAVTemplate()
    for h in glob.glob(util.reltopdir('Tools/autotest/web/*.html')):
        html = util.loadfile(h)
        f = open("buildlogs/%s" % os.path.basename(h), mode='w')
        t.write(f, html, results)
        f.close()
    for f in glob.glob(util.reltopdir('Tools/autotest/web/*.png')):
        shutil.copy(f, 'buildlogs/%s' % os.path.basename(f))
Пример #11
0
def run(filename, taskid):
    tasks = util.loadfile(filename)
    try:
        result = util.sample(tasks[taskid])
    except Exception as exc:
        print "molelearnedlayer except"
        #print tasks[taskid]
        #import traceback
        #print traceback.format_exc(20)
        return None
    util.dumpfile(result, getresfilename(filename, taskid))
Пример #12
0
def write_webresults(results):
    '''write webpage results'''
    from pymavlink.generator import mavtemplate
    t = mavtemplate.MAVTemplate()
    for h in glob.glob(util.reltopdir('Tools/autotest/web/*.html')):
        html = util.loadfile(h)
        f = open(util.reltopdir("../buildlogs/%s" % os.path.basename(h)), mode='w')
        t.write(f, html, results)
        f.close()
    for f in glob.glob(util.reltopdir('Tools/autotest/web/*.png')):
        shutil.copy(f, util.reltopdir('../buildlogs/%s' % os.path.basename(f)))
Пример #13
0
def write_webresults(results):
    '''write webpage results'''
    sys.path.insert(0, os.path.join(util.reltopdir("../mavlink/pymavlink/generator")))
    import mavtemplate
    t = mavtemplate.MAVTemplate()
    for h in glob.glob(util.reltopdir('Tools/autotest/web/*.html')):
        html = util.loadfile(h)
        f = open(util.reltopdir("../buildlogs/%s" % os.path.basename(h)), mode='w')
        t.write(f, html, results)
        f.close()
    for f in glob.glob(util.reltopdir('Tools/autotest/web/*.png')):
        shutil.copy(f, util.reltopdir('../buildlogs/%s' % os.path.basename(f)))
Пример #14
0
def write_webresults(results):
    """write webpage results"""
    from pymavlink.generator import mavtemplate

    t = mavtemplate.MAVTemplate()
    for h in glob.glob(util.reltopdir("Tools/autotest/web/*.html")):
        html = util.loadfile(h)
        f = open(util.reltopdir("../buildlogs/%s" % os.path.basename(h)), mode="w")
        t.write(f, html, results)
        f.close()
    for f in glob.glob(util.reltopdir("Tools/autotest/web/*.png")):
        shutil.copy(f, util.reltopdir("../buildlogs/%s" % os.path.basename(f)))
Пример #15
0
def eva(aid, fname, tasknum):
    '''
    repeat and size should id a task thing... that should give the test cases and train graphs
    then we load the results to get the gen graphs.
    '''

    tasks = util.loadfile(fname)
    tasks = toolz.groupby(lambda x: x.size, tasks)
    respos = toolz.groupby(
        lambda x: x.size,
        [util.loadfile("res_%s_%d" % (fname, i)) for i in range(tasknum)])
    resneg = toolz.groupby(lambda x: x.size, [
        util.loadfile("res_%s_%d" % (fname, i))
        for i in range(tasknum, tasknum * 2)
    ])

    RESULT = []
    for k in tasks:  # the same k is also in respos and resneg

        tasks_byrepeat = toolz.groupby(lambda x: x.repeat, tasks[k])
        respos_byrepeat = toolz.groupby(lambda x: x.repeat, respos[k])
        resneg_byrepeat = toolz.groupby(lambda x: x.repeat, resneg[k])

        means = []
        for j in tasks_byrepeat:
            task = tasks_byrepeat[j][0]
            pos = respos_byrepeat[j][0]
            neg = resneg_byrepeat[j][0]
            # get an estimator
            model = util.graphs_to_linmodel(task.pos + pos.graphs,
                                            task.neg + neg.graphs)

            X, y = util.graphs_to_Xy(task.postest, task.negtest)

            asd = Counter(model.predict(X) * y)
            means.append(float(asd[1]) / sum(asd.values()))

        RESULT.append((k, np.array(means).mean()))
    RESULT.sort()
    return RESULT
Пример #16
0
def evalandshow(fname, tasknum, sizes, show=False):
    #oracle = util.aid_to_linmodel(aid)

    res = readresults(fname, tasknum)
    processed = eval(res, util.loadfile(fname + "_models"), sizes)

    util.dumpfile((processed, fname, show), "ASASD")
    draw2(processed, fname + "score.png", show=show)
    draw(processed,
         fname + "time.png",
         get_mean=lambda x: x.time_mean,
         get_var=lambda x: x.time_var,
         show=show)
Пример #17
0
def run(filename, taskid):
    tasks = util.loadfile("natlearn/" + filename)
    task = gettask(tasks, taskid)
    try:
        result = util.sample(task)
    except Exception as exc:
        print "naturallearning is showing the task object:"
        print task
        import traceback
        print traceback.format_exc(20)
        return None

    util.dumpfile(result, "natlearn/res_%s_%d" % (filename, taskid))
Пример #18
0
def run(fname,idd):
    def getacc(esti, a,b):
        X,y = util.graphs_to_Xy(a,b)
        ypred = esti.predict(X)
        acc = sklearn.metrics.accuracy_score(y,ypred)
        return acc

    task = util.loadfile(fname)[idd]
    # make an estimator with the full stack
    full_stacked = [task.sampler.decomposer.make_new_decomposer(data).pre_vectorizer_graph()
                   for data in task.sampler.graph_transformer.fit_transform(task.pos,task.neg,
                        remove_intermediary_layers=False)]

    esti= util.graphs_to_linmodel( full_stacked[:task.size], full_stacked[task.size:]  )

    # fully stacked test instances
    testgraphs= [task.sampler.decomposer.make_new_decomposer(data).pre_vectorizer_graph()
                   for data in task.sampler.graph_transformer.transform(task.postest+task.negtest,remove_intermediary_layers=False)]

    acc= getacc(esti, testgraphs[:len(task.postest)], testgraphs[len(task.postest):] )
    util.dumpfile((task.size,acc), "stacked/%s_%d" % (fname,idd))


    task = util.loadfile(fname)[idd]
    # make an estimator with the full stack
    full_stacked = [task.sampler.decomposer.make_new_decomposer(data).pre_vectorizer_graph()
                   for data in task.sampler.graph_transformer.fit_transform(task.pos,task.neg,
                        remove_intermediary_layers=True)]

    esti= util.graphs_to_linmodel( full_stacked[:task.size], full_stacked[task.size:]  )

    # fully stacked test instances
    testgraphs= [task.sampler.decomposer.make_new_decomposer(data).pre_vectorizer_graph()
                   for data in task.sampler.graph_transformer.transform(task.postest+task.negtest,remove_intermediary_layers=True)]

    acc= getacc(esti, testgraphs[:len(task.postest)], testgraphs[len(task.postest):] )
    util.dumpfile((task.size,acc), "stacked/2_%s_%d" % (fname,idd))
Пример #19
0
    def plod(prefix='stacked/',col='red',label='nolabel'):

        res= [ util.loadfile("%s%s_%d" % (prefix,taskfilename,e)) for e in range(numtasks) ]
        rez= toolz.groupby(lambda x:x[0], res)
        keys=rez.keys()
        keys.sort()
        y_values=[]
        y_variances=[]
        for key in keys:
            values=np.array([ val for (crap, val) in rez[key]])
            #print values
            y_values.append(values.mean())
            y_variances.append(values.var())
        y_values=np.array(y_values)
        y_variances=np.array(y_variances)
        #print y_values, y_variances
        plt.fill_between(keys, y_values+y_variances , y_values -y_variances, facecolor=col,
                             alpha=0.15)
        plt.plot(keys,y_values,color=col,label=label)
Пример #20
0
def showtask(filename, taskid):
    tasks = util.loadfile(filename)
    print tasks[taskid]
Пример #21
0
 def __init__(self):
     self.date = time.asctime()
     self.githash = util.loadfile(util.reltopdir('.git/refs/heads/master'))
     self.tests = []
     self.files = []