Exemplo n.º 1
0
def ComputePrecisionK(modelfile, testfile, K_list):

    maxParagraphLength = 10
    maxParagraphs = 4
    #nlabels=1001
    #vocabularySize=76391
    labels = 8
    vocabularySize = 244
    model = Model(maxParagraphLength, maxParagraphs, labels, vocabularySize)

    testing = DataParser(maxParagraphLength, maxParagraphs, labels,
                         vocabularySize)
    print(testfile)
    testing.getDataFromfile(testfile)
    print("data loading done")
    print("no of test examples: " + str(testing.totalPages))

    model.load(modelfile)

    print("model loading done")

    batchSize = 1

    testing.restore()
    truePre = []
    pred = []
    for itr in range(testing.totalPages):
        data = testing.nextBatch(1)
        truePre.append(data[0])
        pre = model.predict(data)
        pred.append(pre[0])

    precAtK = {}
    for itr in K_list:
        precAtK[itr] = 0

    for i, v in enumerate(pred):
        temp = [(labId, labProb) for labId, labProb in enumerate(v)]
        #     print(temp)
        temp = sorted(temp, key=lambda x: x[1], reverse=True)
        for ele in K_list:
            pBag = 0
            for itr in range(ele):
                if truePre[i][0][temp[itr][0]] == 1:
                    pBag += 1
        #         print(float(pBag)/float(ele))
            precAtK[ele] += float(pBag) / float(ele)

    f = open("results/precAtK_model3_n", "w")
    for key in sorted(precAtK.keys()):
        #     print(key, precAtK[key]/len(pred))
        print(precAtK[key] / len(pred))
        f.write(str(key) + "\t" + str(precAtK[key] / len(pred)) + "\n")
    f.close()
def genAnalysis(modelfile,testfile,outputfile):
    maxParagraphLength = 20
    maxParagraphs = 5
    filterSizes = [1]
    num_filters = 64
    wordEmbeddingDimension = 30
    lrate = float(1e-3)
    labels = 30938
    vocabularySize = 101939

    model = Model(maxParagraphs,maxParagraphLength,labels,vocabularySize,filterSizes,num_filters,wordEmbeddingDimension,lrate)

    testing = DataParser(maxParagraphs,maxParagraphLength,labels,vocabularySize)
    testing.getDataFromfile(testfile)

    model.load(modelfile)

    print("loading done")
    print("no of test examples: " + str(testing.totalPages))

    batchSize = 1
    testing.restore()
    truePre=[]
    pred=[]
    for itr in range(testing.totalPages):
        data=testing.nextBatch(1)
        truePre.append(data[0])
        pre=model.predict(data)
        pred.append(pre[0])

    labelIDName = open("../labelId-labelName-full.txt").read().split("\n")
    labelIDName = [  [ int(x.split("\t")[0]) , x.split("\t")[1].rstrip() ] for x in labelIDName]
    # print(labelIDName)    

    #making it a dictionary
    labelName = dict(labelIDName)
    # print(labelName[9026])

    f = open(outputfile,"w")
    for i,v in enumerate(pred):
        temp = [(labId,labProb) for labId,labProb in enumerate(v) ]
        temp = sorted(temp,key=lambda x:x[1],reverse=True)  #sorting based on label probability to get top k
        predLabel = [0]*len(temp)

        output = ""
        for itr in range(11):
            predLabel[temp[itr][0]] = 1
            if truePre[i][0][temp[itr][0]] == 1:
                output = output + "," + labelName[temp[itr][0]]
        f.write(str(i) + ","  + output + "\n")
    f.close()
Exemplo n.º 3
0
def ComputePrecisionK(modelfile, testfile, K_list):

    CURRENT_DIR = os.path.dirname(os.path.abspath("./WikiCategoryLabelling/"))
    sys.path.append(os.path.dirname(CURRENT_DIR + "/WikiCategoryLabelling/"))

    maxParagraphLength = 250
    maxParagraphs = 10
    labels = 1001
    vocabularySize = 76390
    model = Model(maxParagraphLength, maxParagraphs, labels, vocabularySize)

    testing = DataParser(maxParagraphLength, maxParagraphs, labels,
                         vocabularySize)
    testing.getDataFromfile(testfile)
    print("data loading done")
    print("no of test examples: " + str(testing.totalPages))

    model.load(modelfile)

    print("model loading done")

    batchSize = 10

    testing.restore()
    truePre = []
    pred = []
    for i in range(math.ceil(testing.totalPages / batchSize)):
        if i < testing.totalPages / batchSize:
            data = testing.nextBatch(batchSize)
        else:
            data = testing.nextBatch(testing.totalPages % batchSize)
        truePre.extend(data[0])
        pre = model.predict(data)
        pred.extend(pre[0].tolist())

    avgPrecK = [0] * len(K_list)
    for i, p in enumerate(pred):
        sortedL = sorted(range(len(p)), key=p.__getitem__, reverse=True)
        for k, K in enumerate(K_list):
            labelK = sortedL[:K]
            precK = 0
            for l in labelK:
                if truePre[i][l] == 1:
                    precK += 1
            avgPrecK[k] += precK / float(K)
    avgPrecK = [float(a) / len(pred) for a in avgPrecK]

    for p in avgPrecK:
        print(str(p))
Exemplo n.º 4
0

paragraphLength = int(sys.argv[1])
maxParagraphs = int(sys.argv[2] )
filterSizes = [int(i) for i in sys.argv[3].split("-")]
print(filterSizes)
num_filters = int(sys.argv[4])
wordEmbeddingDimension = int(sys.argv[5])
batchSize= int(sys.argv[6])
epochEnd = int(sys.argv[7])
folder_name = sys.argv[8]
nlabels = 8
vocabularySize = 244

training = DataParser(maxParagraphs,paragraphLength,nlabels,vocabularySize)
training.getDataFromfile("../../Reuter_dataset/reuters_sparse_training.txt")
model = Model(maxParagraphs,paragraphLength,nlabels,vocabularySize,filterSizes,num_filters,wordEmbeddingDimension)

costfile = open("results/costfile.txt","a")
output = folder_name

epoch=0
# epochEnd=400
costepochs = []

for e in range(epoch,epochEnd):
    
    cost=0

    for itr in range(int(training.totalPages/batchSize)):
        cost += model.train(training.nextBatch(batchSize))
Exemplo n.º 5
0
filterSizes = [int(i) for i in sys.argv[3].split("-")]
print(filterSizes)
num_filters = int(sys.argv[4])
wordEmbeddingDimension = int(sys.argv[5])
batchSize = int(sys.argv[6])
epochEnd = int(sys.argv[7])
folder_name = sys.argv[8]
lrate = float(sys.argv[9])
poolLength = int(sys.argv[10])
keep_prob = float(sys.argv[11])

nlabels = 30938
vocabularySize = 101939

training = DataParser(maxParagraphs, paragraphLength, nlabels, vocabularySize)
training.getDataFromfile(
    "/home/khushboo/wiki10/dataset/original_split/wiki10_miml_train.txt")
#training.getDataFromfile("minus_top5_dataset/wiki10_miml_minusTop5labels_train_dl.txt")
model = Model(maxParagraphs,paragraphLength,nlabels,vocabularySize,filterSizes,num_filters\
                ,wordEmbeddingDimension,lrate,poolLength, keep_prob)

costfile = open("results/costfile.txt", "a")
output = folder_name

epoch = 0
# epochEnd=400
costepochs = []

for e in range(epoch, epochEnd):

    cost = 0
def ComputeFscore(modelfile, testfile, outputfile):
    maxParagraphLength = int(sys.argv[1])
    maxParagraphs = int(sys.argv[2])
    filterSizes = [int(i) for i in sys.argv[3].split("-")]
    num_filters = int(sys.argv[4])
    wordEmbeddingDimension = int(sys.argv[5])
    # batchSize= int(sys.argv[6])
    # epochs= int(sys.argv[7])
    # folder_name = sys.argv[8]

    labels = 8
    vocabularySize = 244

    model = Model(maxParagraphs, maxParagraphLength, labels, vocabularySize,
                  filterSizes, num_filters, wordEmbeddingDimension)

    testing = DataParser(maxParagraphs, maxParagraphLength, labels,
                         vocabularySize)
    testing.getDataFromfile(testfile)

    model.load(modelfile)

    print("loading done")

    testing.restore()
    truePre = []
    pred = []
    for itr in range(testing.totalPages):
        data = testing.nextBatch(1)
        truePre.append(data[0])
        pre = model.predict(data)
        pred.append(pre[0])

    labelsCount = {}
    ConfusionMa = {}
    fScr = {}

    thres = 0.5
    valid = int(
        len(truePre) * 0.5
    )  #using first 50% data for threshold tuning - we have merged test and cv files
    labelsCount = {}
    ConfusionMa = {}
    fScr = {}
    thresLab = {}
    for la in range(labels):
        if la % 25 == 0:
            print("Current label", la)
        t = []
        p = []
        for i in range(valid):
            t.append(truePre[i][0][la])
            p.append(pred[i][la])
        bestF, bestThre = thresholdTuning(t, p)

        t = []
        p = []
        for i in range(valid, len(truePre)):
            t.append(truePre[i][0][la])
            p.append(pred[i][la])

        p = np.array(p)
        fScr[la] = f1_score(t, p >= bestThre)
        ConfusionMa[la] = confusion_matrix(t, p > bestThre)
        thresLab[la] = bestThre

    f = open(outputfile, "a")
    output = sys.argv[9]

    sum_fscore = 0.0
    for i in range(labels):
        sum_fscore = sum_fscore + fScr[i]
        output = output + " , " + str(fScr[i])
    output += " , " + str(sum_fscore / float(labels - 1))
    print("Fscore at " + sys.argv[7] + " epochs: " +
          str(sum_fscore / float(labels - 1)))
    f.write(output + "\n")
    f.close()
Exemplo n.º 7
0
def ComputePrecisionK(modelfile, testfile, outputfile):
    maxParagraphLength = int(sys.argv[1])
    maxParagraphs = int(sys.argv[2])
    filterSizes = [int(i) for i in sys.argv[3].split("-")]
    num_filters = int(sys.argv[4])
    wordEmbeddingDimension = int(sys.argv[5])
    lrate = float(sys.argv[10])
    poolLength = int(sys.argv[11])

    labels = 30938
    vocabularySize = 101939

    model = Model(maxParagraphs, maxParagraphLength, labels, vocabularySize,
                  filterSizes, num_filters, wordEmbeddingDimension, lrate,
                  poolLength)

    testing = DataParser(maxParagraphs, maxParagraphLength, labels,
                         vocabularySize)
    testing.getDataFromfile(testfile)

    model.load(modelfile)

    print("loading done")
    print("no of test examples: " + str(testing.totalPages))

    print("Computing Prec@k")

    #check if batchsize needs to be taken by parameter

    batchSize = 1
    testing.restore()
    truePre = []
    pred = []
    for itr in range(testing.totalPages):
        data = testing.nextBatch(1)
        truePre.append(data[0])
        pre = model.predict(data)
        pred.append(pre[0])

    k = 5
    for i in [1, 3, 5]:
        val = ndcg_score(truePre, pred, i)
        print(val)

    def dcg_score(y_true, y_score, k=5):
        """Discounted cumulative gain (DCG) at rank K.

        Parameters
        ----------
        y_true : array, shape = [n_samples]
            Ground truth (true relevance labels).
        y_score : array, shape = [n_samples, n_classes]
            Predicted scores.
        k : int
            Rank.

        Returns
        -------
        score : float
        """
        order = np.argsort(y_score)[::-1]
        y_true = np.take(y_true, order[:k])

        gain = 2**y_true - 1

        discounts = np.log2(np.arange(len(y_true)) + 2)
        return np.sum(gain / discounts)

    def ndcg_score(ground_truth, predictions, k=5):
        """Normalized discounted cumulative gain (NDCG) at rank K.

        Normalized Discounted Cumulative Gain (NDCG) measures the performance of a
        recommendation system based on the graded relevance of the recommended
        entities. It varies from 0.0 to 1.0, with 1.0 representing the ideal
        ranking of the entities.

        Parameters
        ----------
        ground_truth : array, shape = [n_samples]
            Ground truth (true labels represended as integers).
        predictions : array, shape = [n_samples, n_classes]
            Predicted probabilities.
        k : int
            Rank.

        Returns
        -------
        score : float

        Example
        -------
        >>> ground_truth = [1, 0, 2]
        >>> predictions = [[0.15, 0.55, 0.2], [0.7, 0.2, 0.1], [0.06, 0.04, 0.9]]
        >>> score = ndcg_score(ground_truth, predictions, k=2)
        1.0
        >>> predictions = [[0.9, 0.5, 0.8], [0.7, 0.2, 0.1], [0.06, 0.04, 0.9]]
        >>> score = ndcg_score(ground_truth, predictions, k=2)
        0.6666666666
        """
        lb = LabelBinarizer()
        lb.fit(range(len(predictions) + 1))
        T = lb.transform(ground_truth)

        scores = []

        # Iterate over each y_true and compute the DCG score
        for y_true, y_score in zip(T, predictions):
            actual = dcg_score(y_true, y_score, k)
            best = dcg_score(y_true, y_true, k)
            score = float(actual) / float(best)
            scores.append(score)

        return np.mean(scores)
Exemplo n.º 8
0




'''
maxParagraphLength = 10
maxParagraphs = 8
#nlabels=1001
#vocabularySize=76391
nlabels = 8
vocabularySize = 244
training = DataParser(maxParagraphLength, maxParagraphs, nlabels,
                      vocabularySize)
#training.getDataFromfile("data/wiki_fea_76390_Label_1000_train")
training.getDataFromfile(
    "/home/khushboo/Desktop/Reuter_dataset/reuters_sparse_training.txt")

model = Model(maxParagraphLength, maxParagraphs, nlabels, vocabularySize)

batchSize = 1

epoch = 0
epochEnd = 100
for e in range(epoch, epochEnd):
    print 'Epoch: ' + str(e + 1)
    cost = 0
    for itr in range(int(training.totalPages / batchSize)):
        cost += model.train(training.nextBatch(batchSize))
    print(str(cost / training.totalPages))

    if (e + 1) % 10 == 0:
Exemplo n.º 9
0
def ComputeFscore(modelfile, testfile, outputfile):
    labels = 8
    vocabularySize = 244
    regLambda = float(sys.argv[1])

    model = Model(labels, vocabularySize, regLambda)

    testing = DataParser(labels, vocabularySize)
    testing.getDataFromfile(testfile)

    model.load(modelfile)

    print("loading done")

    testing.restore()
    truePre = []
    pred = []
    for itr in range(testing.totalPages):
        data = testing.nextBatch(1)
        truePre.append(data[0])
        pre = model.predict(data)
        pred.append(pre[0])

    labelsCount = {}
    ConfusionMa = {}
    fScr = {}

    thres = 0.5
    valid = int(
        len(truePre) * 0.5
    )  #using first 50% data for threshold tuning - we have merged test and cv files
    labelsCount = {}
    ConfusionMa = {}
    fScr = {}
    thresLab = {}
    for la in range(labels):
        if la % 25 == 0:
            print("Current label", la)
        t = []
        p = []
        for i in range(valid):
            t.append(truePre[i][0][la])
            p.append(pred[i][la])
        bestF, bestThre = thresholdTuning(t, p)

        t = []
        p = []
        for i in range(valid, len(truePre)):
            t.append(truePre[i][0][la])
            p.append(pred[i][la])

        p = np.array(p)
        fScr[la] = f1_score(t, p >= bestThre)
        ConfusionMa[la] = confusion_matrix(t, p > bestThre)
        thresLab[la] = bestThre

    f = open(outputfile, "a")
    output = sys.argv[5]

    sum_fscore = 0.0
    for i in range(labels):
        sum_fscore = sum_fscore + fScr[i]
        output = output + "," + str(fScr[i])
    output += "," + str(sum_fscore / float(labels - 1))
    print("Fscore at " + sys.argv[3] + " epochs: " +
          str(sum_fscore / float(labels - 1)))
    f.write(output + "\n")
    f.close()
Exemplo n.º 10
0
def ComputePrecisionK(modelfile,testfile,outputfile):
    maxParagraphLength = int(sys.argv[1])
    maxParagraphs = int(sys.argv[2] )
    filterSizes = [int(i) for i in sys.argv[3].split("-")]
    num_filters = int(sys.argv[4])
    wordEmbeddingDimension = int(sys.argv[5])
    lrate = float(sys.argv[10])

    keep_prob = 1.0

    labels = 30938
    vocabularySize = 101939

    model = Model(maxParagraphs,maxParagraphLength,labels,vocabularySize,\
        filterSizes,num_filters,wordEmbeddingDimension,lrate, keep_prob)

    testing = DataParser(maxParagraphs,maxParagraphLength,labels,vocabularySize)
    testing.getDataFromfile(testfile)

    model.load(modelfile)

    print("loading done")
    print("no of test examples: " + str(testing.totalPages))

    print("Computing Prec@k")
    
    #check if batchsize needs to be taken by parameter

    batchSize = 1
    testing.restore()
    truePre=[]
    pred=[]
    for itr in range(testing.totalPages):
        data=testing.nextBatch(1)
        truePre.append(data[0])
        pre=model.predict(data)
        pred.append(pre[0])

    K_list = [1,3,5]     #prec@1 .....prec@NoofLabels
    precAtK = [0.0]*6	

    # #As need to get Prec only on last 50% of test data as first 50% is for cross validation
    # valid=int(len(truePre)*0.5)
    # pred = pred[valid:]
    # truePre = truePre[valid:]

    for i,v in enumerate(pred):
        temp = [(labId,labProb) for labId,labProb in enumerate(v) ]
        temp = sorted(temp,key=lambda x:x[1],reverse=True)  #sorting based on label probability to get top k
        for ele in K_list:        #1....No of Labels
            pBag = 0              #no of true positive for this instance 
            for itr in range(ele): #top k ie top ele
                if truePre[i][0][temp[itr][0]]==1:
                	precAtK[ele] += 1 
                    # pBag += 1
            # precAtK[ele] += float(pBag)/float(ele)

    f = open(outputfile,"a")
    output = sys.argv[9]

    for k in K_list:
		precAtK[k] /= (k * len(pred)) 
		print ("Prec@" + str(k) + " = " + str(precAtK[k]))
		output = output + "," + "Prec@" + str(k) + "=," + str(precAtK[k])
    f.write(output + "\n")
    f.close()
Exemplo n.º 11
0
filterSizes = [int(i) for i in sys.argv[3].split("-")]
print(filterSizes)
num_filters = int(sys.argv[4])
wordEmbeddingDimension = int(sys.argv[5])
batchSize = int(sys.argv[6])
epochEnd = int(sys.argv[7])
folder_name = sys.argv[8]
lrate = float(sys.argv[9])
poolLength = int(sys.argv[10])
keep_prob = float(sys.argv[11])

nlabels = 30938
vocabularySize = 101939

training = DataParser(maxParagraphs, paragraphLength, nlabels, vocabularySize)
training.getDataFromfile("dataset/wiki10_miml_train_dl.txt")
model = Model(maxParagraphs,paragraphLength,nlabels,vocabularySize,filterSizes,num_filters\
                ,wordEmbeddingDimension,lrate,poolLength, keep_prob)

costfile = open("results/costfile.txt", "a")
output = folder_name

epoch = 0
# epochEnd=400
costepochs = []

for e in range(epoch, epochEnd):

    cost = 0

    for itr in range(int(training.totalPages / batchSize)):
Exemplo n.º 12
0
filterSizes = [int(i) for i in sys.argv[3].split("-")]
print(filterSizes)
num_filters = int(sys.argv[4])
wordEmbeddingDimension = int(sys.argv[5])
batchSize = int(sys.argv[6])
epochEnd = int(sys.argv[7])
folder_name = sys.argv[8]
lrate = float(sys.argv[9])
poolLength = int(sys.argv[10])
keep_prob = float(sys.argv[11])

nlabels = 30938
vocabularySize = 101939

training = DataParser(maxParagraphs, paragraphLength, nlabels, vocabularySize)
training.getDataFromfile(
    "minus_top5_dataset/wiki10_miml_minusTop5labels_train_dl.txt")
model = Model(maxParagraphs,paragraphLength,nlabels,vocabularySize,filterSizes,num_filters\
                ,wordEmbeddingDimension,lrate,poolLength, keep_prob)

costfile = open("results/costfile.txt", "a")
output = folder_name

epoch = 0
# epochEnd=400
costepochs = []

for e in range(epoch, epochEnd):

    cost = 0

    for itr in range(int(training.totalPages / batchSize)):
def ComputePrecisionK(modelfile, testfile):
    maxParagraphLength = 50
    maxParagraphs = 10
    filterSizes = [2, 3, 4]
    num_filters = 16
    wordEmbeddingDimension = 50
    lrate = float(0.001)
    poolLength = 2
    labels = 30938
    vocabularySize = 101939

    keep_prob = 1.0

    model = Model(maxParagraphs,maxParagraphLength,labels,vocabularySize,\
                    filterSizes,num_filters,wordEmbeddingDimension,lrate,poolLength, keep_prob)

    testing = DataParser(maxParagraphs, maxParagraphLength, labels,
                         vocabularySize)
    testing.getDataFromfile(testfile)

    model.load(modelfile)

    print("loading done")
    print("no of test examples: " + str(testing.totalPages))

    batchSize = 1
    testing.restore()
    truePre = []
    pred = []
    for itr in range(testing.totalPages):
        data = testing.nextBatch(1)
        truePre.append(data[0])
        pre = model.predict(data)
        pred.append(pre[0])

    labelids = open("sorted_labelid_dl_clustering.txt",
                    "r").read().strip().split("\n")
    labelids = [int(x) for x in labelids]

    no_of_partition = 10
    partition_size = labels / no_of_partition
    prec1 = [0] * no_of_partition
    prec3 = [0] * no_of_partition
    prec5 = [0] * no_of_partition

    for i, v in enumerate(pred):
        temp = [(labId, labProb) for labId, labProb in enumerate(v)]
        temp = sorted(
            temp, key=lambda x: x[1],
            reverse=True)  #sorting based on label probability to get top k
        #finding how many of these were true

        if truePre[i][0][temp[0][0]] == 1:
            prec1[labelids.index(temp[0][0]) / partition_size] += 1
            prec3[labelids.index(temp[0][0]) / partition_size] += 1
            prec5[labelids.index(temp[0][0]) / partition_size] += 1

        if truePre[i][0][temp[1][0]] == 1:
            prec3[labelids.index(temp[1][0]) / partition_size] += 1
            prec5[labelids.index(temp[1][0]) / partition_size] += 1

        if truePre[i][0][temp[2][0]] == 1:
            prec3[labelids.index(temp[2][0]) / partition_size] += 1
            prec5[labelids.index(temp[2][0]) / partition_size] += 1

        if truePre[i][0][temp[3][0]] == 1:
            prec5[labelids.index(temp[3][0]) / partition_size] += 1

        if truePre[i][0][temp[4][0]] == 1:
            prec5[labelids.index(temp[4][0]) / partition_size] += 1

    print(prec1)
    print(prec3)
    print(prec5)

    prec1 = [(float(x) / testing.totalPages) * 100 for x in prec1]
    prec3 = [(float(x) / (3 * testing.totalPages)) * 100 for x in prec3]
    prec5 = [(float(x) / (5 * testing.totalPages)) * 100 for x in prec5]

    print(prec1)
    print(prec3)
    print(prec5)
Exemplo n.º 14
0
def genAnalysis(modelfile, testfile, confusionFile):
    maxParagraphLength = 20
    maxParagraphs = 5
    filterSizes = [1]
    num_filters = 64
    wordEmbeddingDimension = 30
    lrate = float(1e-3)
    labels = 30938
    vocabularySize = 101939

    model = Model(maxParagraphs, maxParagraphLength, labels, vocabularySize,
                  filterSizes, num_filters, wordEmbeddingDimension, lrate)

    testing = DataParser(maxParagraphs, maxParagraphLength, labels,
                         vocabularySize)
    testing.getDataFromfile(testfile)

    model.load(modelfile)

    print("loading done")

    testing.restore()
    truePre = []
    pred = []
    for itr in range(testing.totalPages):
        data = testing.nextBatch(1)
        truePre.append(data[0])
        pre = model.predict(data)
        pred.append(pre[0])

    valid = int(
        len(truePre) * 0.5
    )  #using first 25% data for threshold tuning - we have merged test and cv files
    thresLab = {}
    for la in range(labels):
        t = []
        p = []
        for i in range(valid):
            t.append(truePre[i][0][la])
            p.append(pred[i][la])
        bestF, bestThre = thresholdTuning(t, p)
        thresLab[la] = bestThre

    print(thresLab)

    labelIDName = open("../labelId-labelName-full.txt").read().split("\n")
    labelIDName = [[int(x.split("\t")[0]),
                    x.split("\t")[1].rstrip()] for x in labelIDName]
    # print(labelIDName)

    #making it a dictionary
    labelname = dict(labelIDName)
    # print(labelName[9026])

    f = open(confusionFile, "w")
    for itr in range(valid,
                     testing.totalPages):  #on next 75% getting analaysis
        predLabel = [pred[itr][i] > thresLab[i] for i in range(labels)]
        output = ""
        for i in range(labels):
            if predLabel[i] == 1:
                output = output + "," + labelname[i]

        tn, fp, fn, tp = confusion_matrix(truePre[itr][0], predLabel).ravel()
        f.write(
            str(itr) + "," + str(tn) + "," + str(fp) + "," + str(fn) + "," +
            str(tp) + "," + output + "\n")
    f.close()
Exemplo n.º 15
0
paragraphLength = int(sys.argv[1])
maxParagraphs = int(sys.argv[2])
filterSizes = [int(i) for i in sys.argv[3].split("-")]
print(filterSizes)
num_filters = int(sys.argv[4])
wordEmbeddingDimension = int(sys.argv[5])
batchSize = int(sys.argv[6])
epochEnd = int(sys.argv[7])
folder_name = sys.argv[8]
lrate = float(sys.argv[9])
nlabels = 10
vocabularySize = 101940

training = DataParser(maxParagraphs, paragraphLength, nlabels, vocabularySize)
training.getDataFromfile(
    "../wiki10_miml_dataset/preprocessed_data/toplabels_split/wiki10-top10labels_train.txt"
)
model = Model(maxParagraphs, paragraphLength, nlabels, vocabularySize,
              filterSizes, num_filters, wordEmbeddingDimension, lrate)

costfile = open("results/costfile.txt", "a")
output = folder_name

epoch = 0
# epochEnd=400
costepochs = []

for e in range(epoch, epochEnd):

    cost = 0
def ComputePrecisionK(modelfile,testfile):
    maxParagraphLength = 15
    maxParagraphs = 10
    filterSizes = [2,3,4]
    num_filters = 16
    wordEmbeddingDimension = 50
    lrate = float(0.001)
    poolLength = 2
    labels = 30938
    vocabularySize = 101939

    keep_prob = 1.0

    noOfLabelsPerClusters = open("cluster_metainfo_wiki10.txt").read().strip().split("\n")
    noOfLabelsPerClusters = [int(x) for x in noOfLabelsPerClusters]
    TotalNoOfInstances = 107016
    noOfInstancePerClusters = open("trn_cluster_noofInstances.txt").read().strip().split("\n")
    noOfInstancePerClusters = [ float(x) / TotalNoOfInstances  for x in noOfInstancePerClusters]

    numberOfClusters = 32
    weightsForCluster = []
    for x in range(numberOfClusters):
        temp = [ noOfInstancePerClusters[x] ] * noOfLabelsPerClusters[x]
        weightsForCluster.extend(temp)


    model = Model(maxParagraphs,maxParagraphLength,labels,vocabularySize,\
                    filterSizes,num_filters,wordEmbeddingDimension,lrate,poolLength, keep_prob)

    testing = DataParser(maxParagraphs,maxParagraphLength,labels,vocabularySize)
    testing.getDataFromfile(testfile)

    model.load(modelfile)

    print("loading done")
    print("no of test examples: " + str(testing.totalPages))

    batchSize = 1
    testing.restore()
    truePre=[]
    pred=[]
    for itr in range(testing.totalPages):
        data=testing.nextBatch(1)
        truePre.append(data[0])
        pre=model.predict(data)
        pred.append(list(np.multiply(pre[0], weightsForCluster)))

    # labelids = open("sorted_labelid_dl_clustering.txt","r").read().strip().split("\n")
    # labelids = [ int(x) for x in labelids ]
    
    noOfLabelsPerClusters = open("cluster_metainfo_cumulative.txt").read().strip().split("\n")
    noOfLabelsPerClusters = [int(x) for x in noOfLabelsPerClusters]

    no_of_partition = 32
    partition_size = labels / no_of_partition
    prec1 = [0]*no_of_partition
    prec3 = [0]*no_of_partition
    prec5 = [0]*no_of_partition

    pred1 = [0]*no_of_partition
    pred3 = [0]*no_of_partition
    pred5 = [0]*no_of_partition

    for i,v in enumerate(pred):
        temp = [(labId,labProb) for labId,labProb in enumerate(v) ]
        temp = sorted(temp,key=lambda x:x[1],reverse=True)  #sorting based on label probability to get top k
        #finding how many of these were true
        
        bucket = 31
        for id in range(len(noOfLabelsPerClusters)):
            if temp[0][0] < noOfLabelsPerClusters[id]:
                bucket = id
                break
        pred1[ bucket ] += 1
        pred3[ bucket ] += 1
        pred5[ bucket ] += 1
        if truePre[i][0][temp[0][0]] == 1:
            prec1[ bucket ] += 1
            prec3[ bucket ] += 1
            prec5[ bucket ] += 1

        
        bucket = 31
        for id in range(len(noOfLabelsPerClusters)):
            if temp[1][0] < noOfLabelsPerClusters[id]:
                bucket = id
                break
        pred3[ bucket ] += 1
        pred5[ bucket ] += 1
        if truePre[i][0][temp[1][0]] == 1:
            prec3[ bucket ] += 1
            prec5[ bucket ] += 1

        bucket = 31
        for id in range(len(noOfLabelsPerClusters)):
            if temp[2][0] < noOfLabelsPerClusters[id]:
                bucket = id
                break
        pred3[ bucket ] += 1
        pred5[ bucket ] += 1       
        if truePre[i][0][temp[2][0]] == 1:
            prec3[ bucket ] += 1
            prec5[ bucket ] += 1

        bucket = 31
        for id in range(len(noOfLabelsPerClusters)):
            if temp[3][0] < noOfLabelsPerClusters[id]:
                bucket = id
                break
        pred5[ bucket ] += 1
        if truePre[i][0][temp[3][0]] == 1:
            prec5[ bucket ] += 1
        

        bucket = 31
        for id in range(len(noOfLabelsPerClusters)):
            if temp[4][0] < noOfLabelsPerClusters[id]:
                bucket = id
                break
        pred5[ bucket ] += 1
        if truePre[i][0][temp[4][0]] == 1:
            prec5[ bucket ] += 1

    print( prec1 )
    print( prec3 ) 
    print( prec5 )

    prec1 = [ ( float(x) /testing.totalPages )*100 for x in prec1  ]
    prec3 = [ ( float(x) /( 3 * testing.totalPages) )*100 for x in prec3  ]
    prec5 = [ ( float(x) /( 5 * testing.totalPages) )*100 for x in prec5  ]

    
    print( prec1 )
    print( prec3 ) 
    print( prec5 )


    print( pred1 )
    print( pred3 ) 
    print( pred5 )

    pred1 = [ ( float(x) /testing.totalPages )*100 for x in pred1  ]
    pred3 = [ ( float(x) /( 3 * testing.totalPages) )*100 for x in pred3  ]
    pred5 = [ ( float(x) /( 5 * testing.totalPages) )*100 for x in pred5  ]

    
    print( pred1 )
    print( pred3 ) 
    print( pred5 )
Exemplo n.º 17
0
def analyse(modelfile, testfile, outputfile):
    maxParagraphLength = 20
    maxParagraphs = 10
    filterSizes = [2, 3, 4]
    num_filters = 64
    wordEmbeddingDimension = 100
    lrate = float(0.001)
    poolLength = 2
    labels = 30938
    vocabularySize = 101939

    model = Model(maxParagraphs,maxParagraphLength,labels,vocabularySize,\
                    filterSizes,num_filters,wordEmbeddingDimension,lrate,poolLength)

    testing = DataParser(maxParagraphs, maxParagraphLength, labels,
                         vocabularySize)
    testing.getDataFromfile(testfile)

    model.load(modelfile)

    print("loading done")
    print("no of test examples: " + str(testing.totalPages))

    batchSize = 1
    testing.restore()
    truePre = []
    pred = []
    for itr in range(testing.totalPages):
        data = testing.nextBatch(1)
        truePre.append(data[0])
        pre = model.predict(data)
        pred.append(pre[0])

    labelids = open("../../dataset/labelid_labelcount_sans5toplabels.txt",
                    "r").read().strip().split("\n")
    labelcounts = [int((x.split("\t"))[1]) for x in labelids]
    print labelcounts

    totalNoofDocuments = 19406  # 6137 (test instances ) +
    rangelist = [
        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100
    ]

    no_of_partition = len(rangelist)
    rank1 = [0] * no_of_partition
    rank3 = [0] * no_of_partition
    rank5 = [0] * no_of_partition

    for i, v in enumerate(pred):
        temp = [(labId, labProb) for labId, labProb in enumerate(v)]
        temp = sorted(
            temp, key=lambda x: x[1],
            reverse=True)  #sorting based on label probability to get top k

        for ind, count in enumerate(rangelist):
            if labelcounts[temp[0][0]] <= (count * totalNoofDocuments) / 100:
                rank1[ind] += 1
                rank3[ind] += 1
                rank5[ind] += 1

            if labelcounts[temp[1][0]] <= (count * totalNoofDocuments) / 100:
                rank3[ind] += 1
                rank5[ind] += 1

            if labelcounts[temp[2][0]] <= (count * totalNoofDocuments) / 100:
                rank3[ind] += 1
                rank5[ind] += 1

            if labelcounts[temp[3][0]] <= (count * totalNoofDocuments) / 100:
                rank5[ind] += 1

            if labelcounts[temp[4][0]] <= (count * totalNoofDocuments) / 100:
                rank5[ind] += 1

    rank1 = [(float(x) / testing.totalPages) * 100 for x in rank1]
    rank3 = [(float(x) / (3 * testing.totalPages)) * 100 for x in rank3]
    rank5 = [(float(x) / (5 * testing.totalPages)) * 100 for x in rank5]

    print(rank1)
    print(rank3)
    print(rank5)

    filePtr = open(outputfile, "w")
    for i in rank1:
        filePtr.write(str(i) + ",")
    filePtr.write("\n")

    for i in rank3:
        filePtr.write(str(i) + ",")
    filePtr.write("\n")

    for i in rank5:
        filePtr.write(str(i) + ",")
    filePtr.close()
Exemplo n.º 18
0
def ComputeFscore(modelfile,testfile,outputfile):
    maxParagraphLength=10
    maxParagraphs=8
    #nlabels=1001
    #vocabularySize=76391
    labels=8
    vocabularySize=244
    model = Model(maxParagraphLength,maxParagraphs,labels,vocabularySize)

    testing = DataParser(maxParagraphLength,maxParagraphs,labels,vocabularySize)
    testing.getDataFromfile(testfile)

    model.load(modelfile)

    print("loading done")

    testing.restore()
    truePre=[]
    pred=[]
    for itr in range(testing.totalPages):
        data=testing.nextBatch(1)
        truePre.append(data[0])
        pre=model.predict(data)
        pred.append(pre[0])

    labelsCount={}
    ConfusionMa={}
    fScr={}

    thres=0.5
    valid=int(len(truePre)*0.35)
    labelsCount={}
    ConfusionMa={}
    fScr={}
    thresLab={}
    for la in range(labels):
        if la%25==0:
            print("Currnet label",la)
        t=[]
        p=[]
        for i in range(valid):
            t.append(truePre[i][0][la])
            p.append(pred[i][la])
        bestF,bestThre=thresholdTuning(t,p)
    
        t=[]
        p=[]
        for i in range(valid,len(truePre)):
            t.append(truePre[i][0][la])
            p.append(pred[i][la])
    
        p=np.array(p)
        fScr[la]=f1_score(t,p>=bestThre)
        ConfusionMa[la]= confusion_matrix(t,p>bestThre)
        thresLab[la]=bestThre
    
    f=open(outputfile,"w")
    sum_fscore = 0.0
    for i in range(labels):

        sum_fscore = sum_fscore + fScr[i]
        inp=str(i)+","+str(thresLab[i])+","+str(fScr[i])+"\n"
        f.write(inp)
    f.write(str(sum_fscore / float(labels - 1)))

    print(sum_fscore)
    print(sum_fscore / float((labels - 1)))
    f.close()
Exemplo n.º 19
0
def analyse(modelfile,testfile,outputfile):
    maxParagraphLength = 20
    maxParagraphs = 10
    filterSizes = [2,3,4]
    num_filters = 64
    wordEmbeddingDimension = 100
    lrate = float(0.001)
    poolLength = 2
    labels = 30938
    vocabularySize = 101939

    model = Model(maxParagraphs,maxParagraphLength,labels,vocabularySize,\
                    filterSizes,num_filters,wordEmbeddingDimension,lrate,poolLength)

    testing = DataParser(maxParagraphs,maxParagraphLength,labels,vocabularySize)
    testing.getDataFromfile(testfile)

    model.load(modelfile)

    print("loading done")
    print("no of test examples: " + str(testing.totalPages))

    batchSize = 1
    testing.restore()
    truePre=[]
    pred=[]
    for itr in range(testing.totalPages):
        data=testing.nextBatch(1)
        truePre.append(data[0])
        pre=model.predict(data)
        pred.append(pre[0])

    labelids = open("../../dataset/sorted_labelid_sans5toplabels.txt","r").read().strip().split("\n")
    labelids = [ int(x) for x in labelids ]

    no_of_partition = 10
    partition_size = labels / no_of_partition
    rank1 = [0]*no_of_partition
    rank3 = [0]*no_of_partition
    rank5 = [0]*no_of_partition

    for i,v in enumerate(pred):
        temp = [(labId,labProb) for labId,labProb in enumerate(v) ]
        temp = sorted(temp,key=lambda x:x[1],reverse=True)  #sorting based on label probability to get top k
        rank1[ labelids.index( temp[0][0] ) / partition_size ] += 1
        rank3[ labelids.index( temp[0][0] ) / partition_size ] += 1
        rank5[ labelids.index( temp[0][0] ) / partition_size ] += 1

        rank3[ labelids.index( temp[1][0] ) / partition_size ] += 1
        rank5[ labelids.index( temp[1][0] ) / partition_size ] += 1
        rank3[ labelids.index( temp[2][0] ) / partition_size ] += 1
        rank5[ labelids.index( temp[2][0] ) / partition_size ] += 1


        rank5[ labelids.index( temp[3][0] ) / partition_size ] += 1
        rank5[ labelids.index( temp[4][0] ) / partition_size ] += 1

    rank1 = [ ( float(x) /testing.totalPages )*100 for x in rank1  ]
    rank3 = [ ( float(x) /( 3 * testing.totalPages) )*100 for x in rank3  ]
    rank5 = [ ( float(x) /( 5 * testing.totalPages) )*100 for x in rank5  ]

    print( rank1)
    print( rank3) 
    print(rank5)

    filePtr = open( outputfile , "w")
    for i in rank1:
        filePtr.write( str(i) + "," )
    filePtr.write("\n")

    for i in rank3:
        filePtr.write( str(i) + "," )
    filePtr.write("\n")

    for i in rank5:
        filePtr.write( str(i) + "," )
    filePtr.close()
Exemplo n.º 20
0
from DataParser import DataParser
from model2 import Model2 as Model

# In[ ]:

maxParagraphLength = 250
maxParagraphs = 10
labels = 1000
vocabularySize = 15000
model = Model(maxParagraphLength, maxParagraphs, labels, vocabularySize)
training = DataParser(maxParagraphLength, maxParagraphs, labels,
                      vocabularySize)
training.getDataFromfile("data/vocab_3L_l1000_sampled_10000_red_train.txt")

batchSize = 50

epoch = 0
epochEnd = 10
for e in range(epoch, epochEnd):
    print 'Epoch: ' + str(e)
    cost = 0
    for itr in range(int(training.totalPages / batchSize)):
        cost += model.train(training.nextBatch(batchSize))
        #break
    print(str(cost))

    if e % 10 == 0:
        model.save("model2_l1000_" + str(e))
Exemplo n.º 21
0
#from DataParser_siml import DataParser_siml as DataParser
#from model2_siml import Model2_siml as Model
from DataParser import DataParser as DataParser
from model3 import Model3 as Model

maxParagraphLength = 100
maxParagraphs = 1
#nlabels=1001
#vocabularySize=76391
nlabels = 8
vocabularySize = 244
training = DataParser(maxParagraphLength, maxParagraphs, nlabels,
                      vocabularySize)
#training.getDataFromfile("data/wiki_fea_76390_Label_1000_train")
training.getDataFromfile(
    "C:/gitrepo/Wiki-Text-Categorization/Distant Supervision/Reuter_dataset/reuters_sparse_training.txt"
)

model = Model(maxParagraphLength, maxParagraphs, nlabels, vocabularySize)

batchSize = 64

epoch = 0
epochEnd = 105
for e in range(epoch, epochEnd):
    print('Epoch: ' + str(e + 1))
    cost = 0
    for itr in range(int(training.totalPages / batchSize)):
        cost += model.train(training.nextBatch(batchSize))
    print(str(cost / training.totalPages))
Exemplo n.º 22
0
def ComputeFscore(modelfile, testfile, outputfile):

    CURRENT_DIR = os.path.dirname(os.path.abspath("./WikiCategoryLabelling/"))
    sys.path.append(os.path.dirname(CURRENT_DIR + "/WikiCategoryLabelling/"))

    maxParagraphLength = 250
    maxParagraphs = 10
    labels = 1000
    vocabularySize = 150000
    model = Model(maxParagraphLength, maxParagraphs, labels, vocabularySize)

    testing = DataParser(maxParagraphLength, maxParagraphs, labels,
                         vocabularySize)
    testing.getDataFromfile(testfile)

    model.load(modelfile)

    print("loading done")

    testing.restore()
    truePre = []
    pred = []
    for itr in range(testing.totalPages):
        data = testing.nextBatch()
        truePre.append(data[0])
        pre = model.predict(data)
        pred.append(pre[0])

    labelsCount = {}
    ConfusionMa = {}
    fScr = {}

    thres = 0.5
    valid = int(len(truePre) * 0.35)
    labelsCount = {}
    ConfusionMa = {}
    fScr = {}
    thresLab = {}
    for la in range(1000):
        if la % 25 == 0:
            print("Currnet label", la)
        t = []
        p = []
        for i in range(valid):
            t.append(truePre[i][la])
            p.append(pred[i][la])
        bestF, bestThre = thresholdTuning(t, p)

        t = []
        p = []
        for i in range(valid, len(truePre)):
            t.append(truePre[i][la])
            p.append(pred[i][la])

        p = np.array(p)
        fScr[la] = f1_score(t, p >= bestThre)
        ConfusionMa[la] = confusion_matrix(t, p > bestThre)
        thresLab[la] = bestThre

    f = open(outputfile, "w")
    for i in range(1000):
        inp = str(i) + "," + str(thresLab[i]) + "," + str(fScr[i]) + "\n"
        f.write(inp)
    f.close()