示例#1
0
def evaluate_student_number(phrasedir, summarydir, output):
    body = []
    
    lectures = annotation.Lectures
    for i, lec in enumerate(lectures):
        
        for prompt in ['q1', 'q2']:
            key_prefix = os.path.join(phrasedir, str(lec), '%s.%s'%(prompt, method))
            sum_prefix = os.path.join(summarydir, str(lec), '%s'%(prompt))
            
            evalator = EvalStudent(key_prefix, sum_prefix, len(annotation.anotators))
            
            scores = evalator.score_no()
            
            row = [lec] + scores
            
            body.append(row)
    
    head = ['week', 'precision', 'recall', 'f-measure']
    row = ['ave']
    for i in range(1, len(head)):
        scores = [float(xx[i]) for xx in body]
        row.append(numpy.mean(scores))
    body.append(row)
    
    fio.WriteMatrix(output, body, head)
示例#2
0
def getStudentResponses4Annotation(excelfile, cid, maxWeek, datadir):
    sheets = range(1, maxWeek + 1)

    for sheet in sheets:
        week = sheet

        for type in ['q1', 'q2', 'q3', 'q4']:
            head = ['student_id', 'sentence_id', 'responses']
            body = []

            student_summaryList = getStudentResponseList(
                excelfile, cid, week, type, True)

            if len(student_summaryList) == 0: continue

            filename = datadir + "response." + str(week) + "." + type + ".txt"

            old = ""
            i = 1
            for summary, id in student_summaryList:
                row = []
                summary = summary.replace('"', '\'')
                if len(summary.strip()) == 0: continue

                if id == old:
                    row.append(" ")
                else:
                    row.append(id)
                row.append(i)
                row.append(summary)
                body.append(row)
                i = i + 1
                old = id

            fio.WriteMatrix(filename, body, head)
示例#3
0
def gather_performance(output):
    sim_extractor = Similarity()
    allfeatures = sorted(sim_extractor.features.keys())

    allbody = []
    for k in range(len(allfeatures) + 1):
        #features = allfeatures#['WordEmbedding']

        if k == len(allfeatures):  #use all features
            features = allfeatures
        else:
            features = [allfeatures[k]]
            #features = allfeatures[0:k] + allfeatures[k+1:]

        name = '_'.join(features)

        resultfile = '../data/%s/simlearning.cv.svm.%s.txt' % (course, name)

        head, body = fio.ReadMatrix(resultfile, hasHead=True)

        #get the average
        allhead = ['name'] + head[2:]
        average = [name]
        for i in range(2, len(head)):  #start from the third one
            values = [float(row[i]) for row in body]
            average.append(np.mean(values))

        allbody.append(average)

    fio.WriteMatrix(output, allbody, allhead)
示例#4
0
def PrintClusterRankSummary(datadir):
    sheets = range(0,maxWeek)
    
    lectures = fio.LoadDictJson('../data/CourseMIRROR/lectures.json')
    
    head = ['week', 'data', 'Point of Interest', "Muddiest Point"]
    body = []
    
    for i, sheet in enumerate(sheets):        
        row = []
        week = i + 1
        
        row.append(week)
        row.append(getDate(lectures, course, week))
        
        for type in ['q1', 'q2', 'q3', 'q4']:
            path = datadir + str(i+1)+ '/'
            summaryfile = path + type + '.summary'
            if not fio.IsExist(summaryfile): continue
            
            summaries = [line.strip() for line in fio.ReadFile(summaryfile)]
            
            sourcefile = path + type + '.summary.source'
            sources = [line.split(',') for line in fio.ReadFile(sourcefile)]
            
            combinedSummary = []
            for j, (summary, source) in enumerate(zip(summaries, sources)):
                summary = summary.replace('"', '\'')
                combinedSummary.append(str(j+1) + ") " + summary + " [" + str(len(source)) + "]")
            
            row.append('"' + chr(10).join(combinedSummary)+ '"') 
        
        body.append(row)
    fio.WriteMatrix(datadir + "summary.txt", body, head)
示例#5
0
def getStudentResponses4Quality(excelfile, cid, maxWeek, datadir):
    sheets = range(1, maxWeek + 1)

    for sheet in sheets:
        week = sheet

        for type in ['q1', 'q2', 'q3', 'q4']:
            head = ['student_id', 'responses']
            body = []

            student_summaries = getStudentResponse(excelfile, cid, week, type)
            if len(student_summaries) == 0: continue

            for id, summaryList in student_summaries.items():
                summary = ' '.join(summaryList)

                row = []
                summary = summary.replace('"', '\'')
                if len(summary.strip()) == 0: continue

                row.append(id)
                row.append(summary)
                body.append(row)

            filename = datadir + "response." + str(week) + "." + type + ".txt"
            fio.WriteMatrix(filename, body, head)
示例#6
0
def getPhraseClusterAll(sennafile,
                        weightfile,
                        output,
                        ratio=None,
                        MalformedFlilter=False,
                        source=None,
                        np=None):
    NPCandidates, sources = getNPs(sennafile,
                                   MalformedFlilter,
                                   source=source,
                                   np=np)

    if len(NPCandidates) == 0: return

    NPs, matrix = fio.ReadMatrix(weightfile, hasHead=True)

    #change the similarity to distance
    matrix = Similarity2Distance(matrix)

    index = {}
    for i, NP in enumerate(NPs):
        index[NP] = i

    newMatrix = []

    for NP1 in NPCandidates:
        assert (NP1 in index)
        i = index[NP1]

        row = []
        for NP2 in NPCandidates:
            if NP2 not in index:
                print NP2, weightfile, np
            j = index[NP2]
            row.append(matrix[i][j])

        newMatrix.append(row)

    V = len(NPCandidates)
    if ratio == "sqrt":
        K = int(math.sqrt(V))
    elif float(ratio) > 1:
        K = int(ratio)
    else:
        K = int(ratio * V)

    if K < 1: K = 1

    clusterid = ClusterWrapper.KMedoidCluster(newMatrix, K)

    body = []
    for NP, id in zip(NPCandidates, clusterid):
        row = []
        row.append(NP)
        row.append(id)
        body.append(row)

    fio.WriteMatrix(output, body, header=None)
示例#7
0
def train_leave_one_lecture_out(model_dir, name='simlearn_cv'):
    #     model_dir = '../data/IE256/%s/model/%s/'%(system, name)
    #     fio.NewPath(model_dir)
    #
    #     outputdir = '../data/IE256/%s/extraction/%s_output/'%(system, name)
    #     fio.NewPath(outputdir)

    sim_extractor = Similarity()
    allfeatures = sorted(sim_extractor.features.keys())

    if True:
        k = len(allfeatures)
        #for k in range(len(allfeatures)+1):
        #features = allfeatures#['WordEmbedding']

        if k == len(allfeatures):  #use all features
            features = allfeatures
        else:
            features = [allfeatures[k]]

        name = '_'.join(features)

        lectures = annotation.Lectures

        dict = defaultdict(int)

        MSE = []
        for i, lec in enumerate(lectures):
            train = [x for x in lectures if x != lec]
            test = [lec]

            print train
            print test

            model_file = os.path.join(model_dir, '%d_%s.model' % (lec, name))

            if fio.IsExist(model_file):
                with open(model_file, 'rb') as handle:
                    clf = pickle.load(handle)
            else:
                train_X, train_Y = combine_files(train, features)
                clf = svm.SVR()
                clf.fit(train_X, train_Y)

                with open(model_file, 'wb') as handle:
                    pickle.dump(clf, handle)

            for q in ['q1', 'q2']:
                test_X, test_Y = combine_files(test, features, prompts=[q])
                predict_Y = clf.predict(test_X)

                mse = mean_squared_error(test_Y, predict_Y)

                MSE.append([lec, q, mse])

        output = '../data/%s/simlearning.cv.%s.txt' % (course, name)

        fio.WriteMatrix(output, MSE, header=['lec', 'prompt', 'MSE'])
示例#8
0
def getPhraseClusterPhrase(phrasefile,
                           weightfile,
                           output,
                           ratio=None,
                           method=None):
    NPCandidates = fio.ReadFile(phrasefile)
    if len(NPCandidates) == 0: return

    NPs, matrix = fio.ReadMatrix(weightfile, hasHead=True)

    #change the similarity to distance
    matrix = Similarity2Distance(matrix)

    index = {}
    for i, NP in enumerate(NPs):
        index[NP] = i

    newMatrix = []

    for NP1 in NPCandidates:
        if NP1 not in index: continue

        i = index[NP1]

        row = []
        for NP2 in NPCandidates:
            if NP2 not in index:
                print NP2, weightfile, method
                continue

            j = index[NP2]
            row.append(matrix[i][j])

        newMatrix.append(row)

    V = len(NPCandidates)
    if ratio == "sqrt":
        K = int(math.sqrt(V))
    elif float(ratio) >= 1:
        K = int(ratio)
    else:
        K = int(ratio * V)

    if K < 1: K = 1

    K = min(K, V)

    clusterid = ClusterWrapper.KMedoidCluster(newMatrix, K)

    body = []
    for NP, id in zip(NPCandidates, clusterid):
        row = []
        row.append(NP)
        row.append(id)
        body.append(row)

    fio.WriteMatrix(output, body, header=None)
示例#9
0
def correlation_analysis(course):
    phrasedir1 = '../data/%s/oracle_annotator_1/phrase/' % course
    phrasedir2 = '../data/%s/oracle_annotator_2/phrase/' % course

    outdir = '../data/%s/simlearning/' % course
    fio.NewPath(outdir)

    sim_extractor = Similarity()

    features = sorted(sim_extractor.features.keys())
    head = features + ['score', 'predict']
    body = []
    lectures = annotation.Lectures
    name = '_'.join(features)

    for i, lec in enumerate(lectures):

        model_file = os.path.join(model_dir, '%d_%s.model' % (lec, name))

        with open(model_file, 'rb') as handle:
            clf = pickle.load(handle)

        for q in ['q1', 'q2']:

            outfile = os.path.join(outdir, str(lec), '%s%s' % (q, sim_exe))

            for phrasedir in [phrasedir1, phrasedir2]:
                path = phrasedir + str(lec) + '/'

                filename = os.path.join(path, q + sim_exe)

                data = fio.LoadDictJson(filename)

                for fdict, score, _ in data:
                    row = []

                    for fname in features:
                        x = fdict[fname]

                        if str(x) == 'nan':
                            x = 0.0

                        row.append(x)

                    predict_score = clf.predict([row])

                    row.append(score)

                    row.append(predict_score[0])

                    body.append(row)

    out_correlation = os.path.join(outdir, 'data.txt')

    print out_correlation
    fio.WriteMatrix(out_correlation, body, head)
示例#10
0
def getPhraseCluster(phrasedir, method='lexicalOverlapComparer', ratio=None):
    sheets = range(0, 12)

    for sheet in sheets:
        week = sheet + 1
        for type in ['POI', 'MP', 'LP']:
            weightfilename = phrasedir + str(week) + '/' + type + '.' + method
            print weightfilename

            NPs, matrix = fio.ReadMatrix(weightfilename, hasHead=True)

            #change the similarity to method
            for i, row in enumerate(matrix):
                for j, col in enumerate(row):
                    matrix[i][j] = 1 - float(
                        matrix[i][j]) if matrix[i][j] != "NaN" else 0

            V = len(NPs)
            if ratio == None:
                K = int(math.sqrt(V))
            else:
                K = int(ratio * V)

            K = 10
            clusterid = ClusterWrapper.KMedoidCluster(matrix, K)

            #             sorted_lists = sorted(zip(NPs, clusterid), key=lambda x: x[1])
            #             NPs, clusterid = [[x[i] for x in sorted_lists] for i in range(2)]

            dict = defaultdict(int)
            for id in clusterid:
                dict[id] = dict[id] + 1

            body = []
            for NP, id in zip(NPs, clusterid):
                row = []
                row.append(NP)
                row.append(id)
                #row.append(dict[id])

                body.append(row)

            if ratio == None:
                file = phrasedir + '/' + str(
                    week
                ) + '/' + type + ".cluster.kmedoids." + "sqrt" + "." + method
            else:
                file = phrasedir + '/' + str(
                    week) + '/' + type + ".cluster.kmedoids." + str(
                        ratio) + "." + method
            fio.WriteMatrix(file, body, header=None)
示例#11
0
def gather_rouge(output):
    datadir = '../data/%s/' % course

    #output = '../data/IE256/result.rouge.txt'

    models = [
        'QPS_NP',
        #'QPS_A1_N', 'QPS_A2_N', 'QPS_union', 'QPS_intersect',
        'QPS_combine'
    ]
    methods = [
        'rouge_crf_optimumComparerLSATasa',
        'rouge_crf_ct.svm.default',
        #'rouge_crf_svm',
        #'rouge_crf_svr',
        'rouge_crf_ct.svm.default',
        #'rouge_crf_ct.svr.default',
    ]

    Header = [
        'method',
        'model',
        'R1-R',
        'R1-P',
        'R1-F',
        'R2-R',
        'R2-P',
        'R2-F',
        'RSU4-R',
        'RSU4-P',
        'RSU4-F',
    ]

    xbody = []
    for method in methods:
        for model in models:

            filename = os.path.join(datadir, model, "%s.txt" % method)

            if not fio.IsExist(filename): continue

            head, body = fio.ReadMatrix(filename, hasHead=True)

            row = [method, model]
            row += body[-1][1:]

            xbody.append(row)

    fio.WriteMatrix(output, xbody, Header)
示例#12
0
def writegraph_leave_one_lecture_out_lsa(model_dir,
                                         phrasedir,
                                         modelname='lsa'):
    lectures = annotation.Lectures

    for i, lec in enumerate(lectures):
        test = [lec]

        path = os.path.join(phrasedir, str(lec))

        for q in ['q1', 'q2']:
            #write the output
            phrasefile = os.path.join(path, "%s.%s.key" % (q, method))
            phrases = fio.LoadList(phrasefile)

            if modelname == 'lsa':
                similarties_results = os.path.join(
                    path, "%s.%s.optimumComparerLSATasa" % (q, method))
            elif modelname == 'svm':
                similarties_results = os.path.join(path,
                                                   "%s.%s.svm" % (q, method))

            simhead, simbody = fio.ReadMatrix(similarties_results,
                                              hasHead=True)

            assert (len(simhead) == len(phrases))

            body = []
            for i, p1 in enumerate(phrases):
                for j, p2 in enumerate(phrases):
                    if j <= i:
                        continue  #undirect graph

                    score = simbody[i][j]

                    score = float(score) if score != 'NaN' else 0.0

                    #if score == 0.0: score = 0.000001
                    #if score < 0.5: continue
                    if score == 0.0: continue

                    #row = [i, j, '%f'%score]
                    row = [i, j]

                    body.append(row)

            output = os.path.join(
                path, "%s.%s.%s%s" % (q, method, modelname, net_exe))
            fio.WriteMatrix(output, body)
示例#13
0
def predict_IE256(train_course, model_dir, phrasedir, modelname='svm'):
    sim_extractor = Similarity()
    allfeatures = sorted(sim_extractor.features.keys())

    features = allfeatures

    name = '_'.join(features)

    lectures = annotation.Lectures

    for i, lec in enumerate(lectures):
        test = [lec]

        print test
        model_file = os.path.join(model_dir,
                                  '%s_%s.model' % (train_course, name))

        with open(model_file, 'rb') as handle:
            clf = pickle.load(handle)

        path = os.path.join(phrasedir, str(lec))

        for q in ['q1', 'q2']:
            test_X, test_Y = combine_files_test(phrasedir,
                                                test,
                                                features,
                                                prompts=[q])
            predict_Y = clf.predict(test_X)

            #write the output
            phrasefile = os.path.join(path, "%s.%s.key" % (q, method))
            phrases = fio.LoadList(phrasefile)

            assert (len(predict_Y) == len(phrases) * len(phrases))

            k = 0
            body = []
            for p1 in phrases:
                row = []
                for p2 in phrases:
                    row.append(predict_Y[k])
                    k += 1
                body.append(row)

            output = os.path.join(path, "%s.%s.%s" % (q, method, modelname))
            fio.WriteMatrix(output, body, phrases)
示例#14
0
def correlation_analysis_noduplicate():
    phrasedir1 = '../data/%s/oracle_annotator_1/phrase/' % course
    phrasedir2 = '../data/%s/oracle_annotator_2/phrase/' % course

    outdir = '../data/%s/simlearning/' % course
    fio.NewPath(outdir)

    sim_extractor = Similarity()

    features = sorted(sim_extractor.features.keys())
    head = features + ['score']
    body = []
    lectures = annotation.Lectures

    for i, lec in enumerate(lectures):
        for q in ['q1', 'q2']:

            outfile = os.path.join(outdir, str(lec), '%s%s' % (q, sim_exe))

            for phrasedir in [phrasedir1, phrasedir2]:
                path = phrasedir + str(lec) + '/'

                filename = os.path.join(path, q + sim_exe)

                data = fio.LoadDictJson(filename)

                for fdict, score, pd in data:
                    if pd['p1'] == pd['p2']:
                        print pd['p1']
                        continue

                    row = []

                    for name in features:
                        x = fdict[name]

                        if str(x) == 'nan':
                            x = 0.0

                        row.append(x)
                    row.append(score)

                    body.append(row)

    out_correlation = os.path.join(outdir, 'data.txt')
    fio.WriteMatrix(out_correlation, body, head)
示例#15
0
def split_rouge(filename, prefix, N=2):
    head, body = fio.ReadMatrix(filename, hasHead=True)

    newbodies = [[] for i in range(N)]

    for i, row in enumerate(body[:-1]):
        newbodies[i % N].append(row)

    #compute the new average
    for k in range(len(newbodies)):
        row = ['ave']
        for i in range(1, len(head)):
            scores = [float(xx[i]) for xx in newbodies[k]]
            row.append(numpy.mean(scores))
        newbodies[k].append(row)

    for i, newbody in enumerate(newbodies):
        fio.WriteMatrix('%s_q%d.txt' % (prefix, i + 1), newbody, head)
示例#16
0
def write_communite_to_clusters(communites, phrases, output):
    body = []

    dict = {}
    for i, community in enumerate(communites):
        for node in community:
            row = [phrases[node], i + 1]
            body.append(row)
            dict[node] = 1


#     k = len(communites)
#     #write phrases that are not in any communities
#     for i in range(len(phrases)):
#         if i in dict: continue
#
#         row = [phrases[i], k]
#         k += 1
#         body.append(row)

    fio.WriteMatrix(output, body, None)
示例#17
0
        crf_sub_output = '../data/%s/%s/extraction/all_output/' % (course,
                                                                   system)

        eval = CRFEval(class_index_dict_file, crf_sub_output)

        eval.get_label_accuracy()
        eval.get_mention_precision()
        eval.get_mention_recall()
        eval.get_mention_F_measure()

        for test in sorted(eval.dict):
            if test.startswith('overall'): continue

            #row = [system, eval.dict['overall_accuracy']['value'], eval.dict['overall_mention_precision']['value'],eval.dict['overall_mention_recall']['value'], eval.dict['overall_mention_F_measure']]
            row = [
                system, test, eval.dict[test]['mention_precision']['value'],
                eval.dict[test]['mention_recall']['value'],
                eval.dict[test]['mention_F_measure']
            ]
            body.append(row)

        eval.get_sentence_label_accuracy()
        print 'accuracy:%.4f (%d/%d)' % (
            eval.dict['sentence_accuracy']['value'],
            eval.dict['sentence_accuracy']['correct'],
            eval.dict['sentence_accuracy']['total'])

        #print 'accuracy:%.4f\tprecision:%.4f\trecall:%.4f\tF-measure:%.4f'%(eval.dict['overall_accuracy']['value'], eval.dict['overall_mention_precision']['value'],eval.dict['overall_mention_recall']['value'], eval.dict['overall_mention_F_measure'])

    fio.WriteMatrix(output, body, head)
示例#18
0
def TestRouge():
    #ref = ["police killed the gunman"]
    #S1 = ["police kill the gunman"]
    #S2 = ["the gunman kill police"]
    
    S1_1 = ['Unable to finish the problem set before the quiz',
'Unable to understand why R is useful',
'Some questions were not answered',
'The problem set was confusing',
'Did not understand plots in R, or mean and median'
]
    
    S1_3=['There were two fundamental issues with the class. One, the students felt rushed and were unable to complete the required exercises, and two, the teaching assistant was seen as disorganized and rushed.']
    
    S1_4 = ['The recitation was a good time to learn about the many useful features of R, and how to use them, e.g., plotting data sets and finding the mean and median. The questions in the problem set were well-organized to this end. The application to stock prices was a helpful demonstration.']
    
    S1_2 = ['Learned some useful built-in functions of R',
'Practiced programming in R',
'The question types in the problem set were good',
'Application of R to stock values was helpful',
'The organization of the recitation was good']

    S2_1=['Unfinished In-Class Questions',
'Knowing when to use R',
'Calculating Means/Medians',
'Using / Creating a Histogram',
'Using Rstudio']

    S2_2 = ['Enjoyed Practice Session / In Class Questions',
'Liked learning about capabilities of rstudio',
'Liked Graphical Depictions / Diagrams',
'Liked Learning about Practical Applications of R',
'Enjoyed Calculating R']
    
    S2_3=['Students had trouble knowing when to use R, and would have liked more examples. Some students had trouble using Rstudio. They were also concerned about the questions in the practice session, as they did not get to go over them.']
    
    S2_4 = ['Students seemed to like using rstudio to calculate R, and creating graphics and seeing applications (such as stock values) for R. They liked the practice problems, and would have liked more of them.']
    
    S1_5=['we cannot solve the last two questions of ps if we can solve them before quiz it will be very good ',
'Still not very clear how to use R/what data we were using at all times today when we were discussing R',
'Ps assistant couldn\'t manage her time properly.',
'question 3 in ps 1 was confusing',
'we can plot normal dist when the values are not continous but for it to work we need a lot independent variables , but like how much ?in the question it was 100',
]

    S1_6=['things we can do with r',
    'Question types',
    'I did not think that these questions can be created in lectures.',
    'Interesting to combine the R with statistics on stock values',
    'In the first part of class, we practised R programming, whuch was easy to understand.',
    ]
    
    S2_5=['The r experience was a bit Fast and hard to follow and the ps TA made simple questions look like a bit more complicated',
    'in r function there can be more examples',
    'Still not very clear how to use R/what data we were using at all times today when we were discussing R',
    'inferring informations from histograph is a little confusing since it is new for me',
    'Rstudio ps was not good I couldn\'t concentrate much',
    ]
    
    S2_6=['In the second part, we solved questions, which was helpful for us to remember our probability knowledge.',
    'Most interesting thing was to learn that R is capable of many things like reading Excel or txt files.',
    'Make a result of collection of data, look like normal distribution',
    'the graphics in R and importing data to R',
    'learning how to calculate in R',
    ]

    body = []
    #getRouge(ref, S2)
    r1 = getRouge(S1_1, S2_1)
    r2 = getRouge(S1_2, S2_2)
    r3 = getRouge(S1_3, S2_3)
    r4 = getRouge(S1_4, S2_4)
    r5 = getRouge(S1_5, S2_5)
    r6 = getRouge(S1_6, S2_6)
    
    body.append(r1)
    body.append(r2)
    body.append(r3)
    body.append(r4)
    body.append(r5)
    body.append(r6)
    print body
    
    fio.WriteMatrix('log.txt', body)
示例#19
0
def getOracleRougeSplit(oracledir, np, L, metric, outputdir):
    #sheets = range(0,1)
    sheets = range(0,12)
    
    body = []
    
    for i, sheet in enumerate(sheets):
        week = i + 1
            
        #Add a cache to make it faster
        Cache = {}
        cachefile = oracledir + str(week) + '/' + 'cache.json'
        print cachefile
        if fio.IsExist(cachefile):
            with open(cachefile, 'r') as fin:
                Cache = json.load(fin)
        
        row = []
        for type in ['POI', 'MP', 'LP']:
            row.append(week)
        
            #read TA's summmary
            reffile = oracledir + str(week) + '/' + type + '.ref.summary'
            lines = fio.ReadFile(reffile)
            ref = [line.strip() for line in lines]
            
            Round = 1
            while True:
                sumfile = oracledir + str(week) + '/' + type + '.' + str(np) + '.L' + str(L) + "." + str(metric) + '.R' + str(Round) +'.summary'
                if not fio.IsExist(sumfile): break
                Round = Round + 1
            
            Round = Round - 1
            sumfile = oracledir + str(week) + '/' + type + '.' + str(np) + '.L' + str(L) + "." + str(metric) + '.R' + str(Round) +'.summary'
            
            if fio.IsExist(sumfile):
                import os
                ssfile = oracledir + str(week) + '/' + type + '.' + str(np) + '.L' + str(L) + ".summary"
                cmd = 'cp ' + sumfile + ' ' + ssfile
                print cmd
                
                os.system(cmd)
                lines = fio.ReadFile(sumfile)
                TmpSum = [line.strip() for line in lines]
                
                cacheKey = getKey(ref, TmpSum)
                if cacheKey in Cache:
                    scores = Cache[cacheKey]
                    print "Hit"
                else:
                    print "Miss", cacheKey
                    print sumfile
                    scores = getRouge(ref, TmpSum)
                    Cache[cacheKey] = scores
                    #exit()
                
                row = row + scores
            else:
                row = row + [0]*len(RougeHeader)
            
        body.append(row)
    
    print body
    print "RougeHeader", len(RougeHeader)
    header = ['week'] + RougeHeader*3
    row = []
    row.append("average")
    print len(header)
    for i in range(1, len(header)):
        scores = [float(xx[i]) for xx in body]
        row.append(numpy.mean(scores))
    body.append(row)
    
    fio.WriteMatrix(outputdir + "rouge." + str(np) + '.L' + str(L) + "." + str(metric) + ".txt", body, header)
示例#20
0
def writegraph_leave_one_lecture_out(model_dir,
                                     phrasedir,
                                     modelname='svr',
                                     traincourse=None):
    from sklearn import svm
    from sklearn.metrics import mean_squared_error, precision_recall_fscore_support, accuracy_score
    import QPS_simlearning

    sim_extractor = Similarity()
    allfeatures = sorted(sim_extractor.features.keys())

    features = allfeatures

    name = '_'.join(features)

    lectures = annotation.Lectures

    for i, lec in enumerate(lectures):
        test = [lec]

        print test
        model_file = os.path.join(model_dir, '%d_%s.model' % (lec, name))
        #model_file = os.path.join(model_dir, '%s_%s.model'%('IE256_2016', name))

        with open(model_file, 'rb') as handle:
            clf = pickle.load(handle)

        path = os.path.join(phrasedir, str(lec))

        for q in ['q1', 'q2']:
            test_X, test_Y = QPS_simlearning.combine_files_test(phrasedir,
                                                                test,
                                                                features,
                                                                prompts=[q])
            predict_Y = clf.predict(test_X)

            #write the output
            phrasefile = os.path.join(path, "%s.%s.key" % (q, method))
            phrases = fio.LoadList(phrasefile)

            assert (len(predict_Y) == len(phrases) * len(phrases))

            k = 0
            body = []
            for i, p1 in enumerate(phrases):
                for j, p2 in enumerate(phrases):
                    if j <= i:
                        k += 1
                        continue  #undirect graph

                    if modelname == 'svm':
                        if predict_Y[k] == 1.0:
                            #row = [i,j, '%.1f'%predict_Y[k]]
                            row = [i, j]
                            body.append(row)
                    else:
                        row = [i, j, '%.2f' % predict_Y[k]]
                        body.append(row)

                    k += 1

            output = os.path.join(
                path, "%s.%s.%s%s" % (q, method, modelname, net_exe))
            fio.WriteMatrix(output, body)
示例#21
0
def train_leave_one_lecture_out_svm(model_dir, name='simlearn_cv'):
    #     model_dir = '../data/IE256/%s/model/%s/'%(system, name)
    #     fio.NewPath(model_dir)
    #
    #     outputdir = '../data/IE256/%s/extraction/%s_output/'%(system, name)
    #     fio.NewPath(outputdir)

    sim_extractor = Similarity()
    allfeatures = sorted(sim_extractor.features.keys())

    #for k in range(len(allfeatures)+1):
    k = len(allfeatures)
    if True:

        #for k in range(len(allfeatures)):
        #if allfeatures[k] != 'optimumComparerLSATasa': continue

        if k == len(allfeatures):  #use all features
            features = allfeatures
        else:
            features = [allfeatures[k]]
            #features = allfeatures[0:k] + allfeatures[k+1:]

        name = '_'.join(features)

        lectures = annotation.Lectures

        dict = defaultdict(int)

        MSE = []
        for i, lec in enumerate(lectures):
            train = [x for x in lectures if x != lec]
            test = [lec]

            print train
            print test

            model_file = os.path.join(model_dir, '%d_%s.model' % (lec, name))

            if fio.IsExist(model_file):
                with open(model_file, 'rb') as handle:
                    clf = pickle.load(handle)
            else:
                train_X, train_Y = combine_files(train, features)
                clf = svm.SVC()
                clf.fit(train_X, train_Y)

                with open(model_file, 'wb') as handle:
                    pickle.dump(clf, handle)

            for q in ['q1', 'q2']:
                test_X, test_Y = combine_files(test, features, prompts=[q])
                predict_Y = clf.predict(test_X)

                prf = precision_recall_fscore_support(test_Y,
                                                      predict_Y,
                                                      average='weighted')

                accuracy = accuracy_score(test_Y, predict_Y)

                MSE.append([lec, q, accuracy] + [prf[0], prf[1], prf[2]])

        output = '../data/%s/simlearning.cv.svm.%s.txt' % (course, name)

        fio.WriteMatrix(output,
                        MSE,
                        header=[
                            'lec', 'prompt', 'accuracy', 'precision', 'recall',
                            'f-score'
                        ])
示例#22
0
def extractPhraseFromAnnotation(phrasedir, annotator, summarydir=None):
    for doc, lec, annotator in annotation.generate_all_files(
            annotation.datadir + 'json/',
            '.json',
            anotators=annotator,
            lectures=annotation.Lectures):
        print doc

        #load task
        task = annotation.Task()
        task.loadjson(doc)

        path = phrasedir + str(lec) + '/'
        fio.NewPath(path)

        #Add a cache to make it faster
        Cache = {}
        cachefile = phrasedir + str(lec) + '/' + 'cache.json'
        if fio.IsExist(cachefile):
            with open(cachefile, 'r') as fin:
                Cache = json.load(fin)

        for prompt in ['q1', 'q2']:
            filename = path + prompt + '.' + method + '.key'
            cluster_output = path + prompt + '.cluster.kmedoids.sqrt.oracle.%s' % method

            if summarydir:
                fio.NewPath(os.path.join(summarydir, str(lec)))
                summary_file = os.path.join(summarydir, str(lec),
                                            '%s.summary' % prompt)

            body = []

            if summarydir:
                summaries = []

            phrase_summary_dict = task.get_phrase_summary_textdict(prompt)
            extracted_phrases = []
            phrase_annotation = task.get_phrase_annotation(prompt)
            for rank in sorted(phrase_annotation):
                rank_phrases = []
                phrases = phrase_annotation[rank]
                for phrasedict in phrases:
                    phrase = phrasedict['phrase'].lower()
                    extracted_phrases.append(phrase)
                    rank_phrases.append(phrase)
                    row = [phrase, rank]
                    body.append(row)

                if summarydir:
                    rank_summary = phrase_summary_dict[rank]
                    max_summary = get_max_phrase_by_ROUGE(
                        rank_summary, rank_phrases, Cache)
                    print max_summary

                    summaries.append(max_summary)

            fio.SaveList(extracted_phrases, filename)

            fio.WriteMatrix(cluster_output, body, header=None)

            if summarydir:
                fio.SaveList(summaries, summary_file)

            with open(cachefile, 'w') as outfile:
                json.dump(Cache, outfile, indent=2)
示例#23
0
def extractStatistics(annotator, output):

    students = set()

    body = []
    for doc, lec, annotator in annotation.generate_all_files(
            annotation.datadir + 'json/',
            '.json',
            anotators=annotator,
            lectures=annotation.Lectures):
        print doc

        #load task
        task = annotation.Task()
        task.loadjson(doc)

        for prompt in ['q1', 'q2']:
            #for each lecture, prompt
            row = [lec, prompt]

            stu = set()

            #number of students
            wc = 0.0
            dict = {}
            raw_responses = task.get_raw_response(prompt)
            for response_row in raw_responses[1:]:
                student_id, response = response_row[
                    'student_id'], response_row['response']
                student_id = student_id.lower()

                if student_id not in dict:
                    dict[student_id] = []
                dict[student_id].append(response)
                students.add(student_id)
                stu.add(student_id)

                wc += len(response.split())

            response_number = len(dict)
            row.append(response_number)  #number of responses
            row.append(wc)  #word count
            row.append(wc /
                       response_number)  #averaged number of words per response

            phrase_summary_dict = task.get_phrase_summary_textdict(prompt)
            extracted_phrases = []
            phrase_annotation = task.get_phrase_annotation(prompt)

            stu_h = set()
            ph_c = 0
            for rank in sorted(phrase_annotation):
                phrases = phrase_annotation[rank]
                ph_c += len(phrases)
                for phrasedict in phrases:
                    phrase = phrasedict['phrase'].lower()  #phrase
                    extracted_phrases.append(phrase)

                    student_id = phrasedict['student_id'].lower().strip()
                    stu_h.add(student_id)

            row.append(ph_c)  #phrase count
            coverage = stu.intersection(stu_h)
            coverage_ratio = len(coverage) * 1.0 / len(stu)
            row.append(coverage_ratio)

            body.append(row)

    #add average
    head = [
        'lec', 'prompt', 'Response', 'Word', 'Word/Response', 'Highlights',
        'Coverage'
    ]

    row = ['', 'ave']
    for i in range(2, len(head)):
        scores = [float(xx[i]) for xx in body]
        row.append(np.mean(scores))
    body.append(row)

    #add std
    row = ['', 'std']
    for i in range(2, len(head)):
        scores = [float(xx[i]) for xx in body]
        row.append(np.std(scores))
    body.append(row)

    fio.WriteMatrix(output, body, head)

    print(len(students))
示例#24
0
def getRouge(datadir, maxWeek, output):
    sheets = range(0, maxWeek)

    body = []
    allbody = []

    #Krange = range(1, 25)
    #Krange = range(1, 25)
    Krange = [gK]

    for sheet in sheets:
        week = sheet + 1
        dir = datadir + str(week) + '/'

        for type in ['q1', 'q2']:

            maxS = 0
            maxK = -1
            maxScore = []

            Cache = {}
            cachefile = os.path.join(datadir, str(week), 'cache.json')
            print cachefile
            if fio.IsExist(cachefile):
                with open(cachefile, 'r') as fin:
                    Cache = json.load(fin)

            allrow = [week]

            #Krange = [np.random.randint(1, 25)]

            for K in Krange:

                summary_file = dir + type + '.%d.summary' % K

                print summary_file

                if not fio.IsExist(summary_file):
                    print summary_file
                    continue

                #read TA's summmary
                refs = []
                for i in range(2):
                    reffile = os.path.join(datadir, str(week),
                                           type + '.ref.%d' % i)
                    if not fio.IsExist(reffile):
                        print reffile
                        continue

                    lines = fio.ReadFile(reffile)
                    ref = [line.strip() for line in lines]
                    refs.append(ref)

                if len(refs) == 0: continue

                lstref = refs[0] + refs[1]

                lines = fio.ReadFile(summary_file)
                TmpSum = [line.strip() for line in lines]

                cacheKey = OracleExperiment.getKey(lstref, TmpSum)
                if cacheKey in Cache:
                    scores = Cache[cacheKey]
                    print "Hit"
                else:
                    print "Miss"
                    print summary_file
                    scores = OracleExperiment.getRouge_IE256(refs, TmpSum)
                    Cache[cacheKey] = scores

                s = float(scores[RIndex])

                allrow.append(s)

                if s >= maxS:
                    maxS = s
                    maxScore = scores
                    maxK = K

            if maxK == -1: continue

            row = [week]
            row = row + maxScore + [maxK]

            body.append(row)

            allrow.append(maxK)

            allbody.append(allrow)

            try:
                fio.SaveDict2Json(Cache, cachefile)
            except:
                #fio.SaveDict(Cache, cachefile + '.dict')
                pass

    header = ['id'] + RougeHeader
    row = ['ave']
    for i in range(1, len(header)):
        scores = [float(xx[i]) for xx in body]
        row.append(numpy.mean(scores))
    body.append(row)

    fio.WriteMatrix(output, body, header)

    fio.WriteMatrix(output + '.all', allbody, ['week'] + Krange)
示例#25
0
def getRouge(datadir, maxWeek, output):
    print datadir

    sheets = range(0, maxWeek)

    body = []

    for sheet in sheets:
        week = sheet + 1
        dir = datadir + str(week) + '/'

        for type in ['q1', 'q2']:
            summary_file = dir + type + "." + 'summary'
            print summary_file

            if not fio.IsExist(summary_file):
                print summary_file
                continue

            Cache = {}
            cachefile = os.path.join(datadir, str(week), 'cache.json')
            print cachefile
            if fio.IsExist(cachefile):
                with open(cachefile, 'r') as fin:
                    Cache = json.load(fin)

            #read TA's summmary
            refs = []
            for i in range(2):
                reffile = os.path.join(datadir, str(week),
                                       type + '.ref.%d' % i)
                if not fio.IsExist(reffile):
                    print reffile
                    continue

                lines = fio.ReadFile(reffile)
                ref = [line.strip() for line in lines]
                refs.append(ref)

            if len(refs) == 0: continue

            lstref = refs[0] + refs[1]

            lines = fio.ReadFile(summary_file)
            TmpSum = [line.strip() for line in lines]

            cacheKey = OracleExperiment.getKey(lstref, TmpSum)
            if cacheKey in Cache:
                scores = Cache[cacheKey]
                print "Hit"
            else:
                print "Miss"
                print summary_file
                scores = OracleExperiment.getRouge_IE256(refs, TmpSum)
                Cache[cacheKey] = scores

            row = [week]
            row = row + scores

            body.append(row)

            try:
                fio.SaveDict2Json(Cache, cachefile)
            except Exception as e:
                #fio.SaveDict(Cache, cachefile + '.dict')
                print e

    header = ['id'] + RougeHeader
    row = ['ave']
    for i in range(1, len(header)):
        scores = [float(xx[i]) for xx in body]
        row.append(numpy.mean(scores))
    body.append(row)

    fio.WriteMatrix(output, body, header)
示例#26
0
def gather_rouge(output):

    courses = ['IE256', 'IE256_2016', 'CS0445']

    rouges = [
        ('LexRank', 'QPS_NP', 'rouge_LexRank'),
        ('PhraseSum', 'QPS_NP', 'rouge_crf_optimumComparerLSATasa'),
        ('SequenceSum', 'QPS_combine_coling',
         'rouge_crf_optimumComparerLSATasa'),
        ('SimSum', 'QPS_combine_coling', 'rouge_crf_svm'),
        ('CDSum', 'QPS_combine_coling', 'rouge_crf_ct.svm.default'),
    ]

    baseline1 = ('PhraseSum', 'QPS_NP', 'rouge_crf_optimumComparerLSATasa')
    baseline2 = ('SequenceSum', 'QPS_combine_coling',
                 'rouge_crf_optimumComparerLSATasa')

    Header = [
        'course',
        'name',
        'R1-R',
        'R1-P',
        'R1-F',
        'R2-R',
        'R2-P',
        'R2-F',
        'RSU4-R',
        'RSU4-P',
        'RSU4-F',
    ]

    ROUGE_Head = [
        'id', 'R1-R', 'R1-P', 'R1-F', 'R2-R', 'R2-P', 'R2-F', 'RSU4-R',
        'RSU4-P', 'RSU4-F'
    ]

    ROUGE_index = [
        ROUGE_Head.index(name) for name in ROUGE_Head if name != 'id'
    ]

    xbody = []
    for course in courses:
        for name, model, method in rouges:
            datadir = '../data/%s/' % course

            filename = os.path.join(datadir, model, "%s.txt" % method)
            if not fio.IsExist(filename): continue

            baseline1_name = os.path.join(datadir, baseline1[1],
                                          "%s.txt" % baseline1[2])
            baseline2_name = os.path.join(datadir, baseline2[1],
                                          "%s.txt" % baseline2[2])

            if name in ['LexRank', 'SequenceSum', 'SimSum', 'CDSum']:
                pvalues1 = get_pvalues(filename, baseline1_name, ROUGE_index)
            else:
                pvalues1 = [1] * len(ROUGE_index)

            if name in ['SimSum', 'CDSum']:
                pvalues2 = get_pvalues(filename, baseline2_name, ROUGE_index)
            else:
                pvalues2 = [1] * len(ROUGE_index)

            head, body = fio.ReadMatrix(filename, hasHead=True)

            row = [course, name]
            row += [
                '%.3f%s%s' % (float(x), '*' if pvalues1[i] < 0.05 else '',
                              '+' if pvalues2[i] < 0.05 else '')
                for i, x in enumerate(body[-1][1:])
            ]

            xbody.append(row)

    fio.WriteMatrix(output, xbody, Header)