Example #1
0
def make_executable_Java ():
    if not util.file_exists("solution.java"):
        raise Exception("solution.java does not exist")
    util.del_file("Main.java")
    util.system("javac solution.java")
    if not util.file_exists("Main.class"):
        raise Exception("error in Java compilation")
Example #2
0
def make_corrects_Java ():
    for f in glob.glob("*.cor"):
        util.del_file(f)
    inps = sorted(glob.glob("*.inp"))
    for inp in inps:
        tst = os.path.splitext(inp)[0]
        util.system("java Main <%s.inp >%s.cor" % (tst, tst))
Example #3
0
def make_corrects_Python3 ():
    for f in glob.glob("*.cor"):
        util.del_file(f)
    inps = sorted(glob.glob("*.inp"))
    for inp in inps:
        tst = os.path.splitext(inp)[0]
        util.system("python3 solution.py <%s.inp >%s.cor" % (tst, tst))
Example #4
0
def performance(origin_labels, predict_labels, deci_value, output=None, title=None, roc=False, bi_or_multi=0):
    """evaluations used to evaluate the performance of the model.
    :param origin_labels: true values of the dataset.
    :param predict_labels: predicted values of the dataset.
    :param deci_value: decision values used for ROC and AUC.
    :param output: the output file name of the ROC curve.
    :param title: the title of the ROC curve.
    :param roc: indicate whether to draw the ROC curve or not.
    """
    if len(origin_labels) != len(predict_labels):
        raise ValueError("The number of the original labels must equal to that of the predicted labels.")
    if bi_or_multi == 0:
        TP = 0.0
        TN = 0.0
        FP = 0.0
        FN = 0.0
        for i in range(len(origin_labels)):
            if origin_labels[i] == 1.0 and predict_labels[i] == 1.0:
                TP += 1.0
            elif origin_labels[i] == 1.0 and predict_labels[i] == -1.0:
                FN += 1.0
            elif origin_labels[i] == -1.0 and predict_labels[i] == 1.0:
                FP += 1.0
            elif origin_labels[i] == -1.0 and predict_labels[i] == -1.0:
                TN += 1.0
        try:
            SN = TP / (TP + FN)
        except ZeroDivisionError:
            SN = 0.0
        try:
            SP = TN / (FP + TN)
        except ZeroDivisionError:
            SP = 0.0
        ACC = (TP + TN) / (TP + TN + FP + FN)
        try:
            MCC = (TP * TN - FP * FN) / math.sqrt((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN))
        except ZeroDivisionError:
            MCC = 0.0
        current_path = os.path.dirname(os.path.realpath(__file__))
        roc_data_file = current_path + const.GEN_FILE_PATH + 'roc_data'
        try:
            AUC = plot_roc(deci_value, origin_labels, output, title, roc, roc_data_file)
        except ZeroDivisionError:
            AUC = 0.0
        del_file(roc_data_file)

        return ACC, MCC, AUC, SN, SP
    elif bi_or_multi == 1:
        correct_labels = 0.0
        for elem in zip(origin_labels, predict_labels):
            if elem[0] == elem[1]:
                correct_labels += 1.0
        ACC = correct_labels / len(origin_labels)
        return ACC
Example #5
0
def make_corrects_RunPython ():
    for f in glob.glob("*.cor"):
        util.del_file(f)
    inps = sorted(glob.glob("*.inp"))
    for inp in inps:
        tst = os.path.splitext(inp)[0]
        os.system("cat solution.py %s.inp > work.py" % tst)
        util.system("python3 work.py >%s.cor" % (tst, ))

        # additionally, create doctest-like session
        if tst == 'sample':
            python_doctest(tst)
Example #6
0
def make_executable_C ():

    handler = util.read_yml("handler.yml")

    if handler["handler"] != "std":
        raise Exception("unknown handler")

    if not util.file_exists("solution.c"):
        raise Exception("solution.c does not exist")

    util.del_file("solution.exe")
    util.system("%s %s solution.c -o solution.exe" % (cc, ccflags))
    if not util.file_exists("solution.exe"):
        raise Exception("error in C compilation")
Example #7
0
def make_executable_GHC ():

    handler = util.read_yml("handler.yml")

    if handler["handler"] != "std":
        raise Exception("unknown handler")

    if not util.file_exists("solution.hs"):
        raise Exception("solution.hs does not exist")

    util.del_file("solution.exe")
    util.system("ghc solution.hs -o solution.exe")
    if not util.file_exists("solution.exe"):
        raise Exception("error in GHC compilation")
Example #8
0
def make_executable_PRO2 ():
    util.del_file("solution.exe")
    util.del_dir('compilation')
    os.mkdir('compilation')
    if util.file_exists("solution.cc"):
        util.system('cp solution.cc compilation/program.cc')
    elif util.file_exists("solution.hh"):
        util.system('cp solution.hh compilation/program.hh')
    else:
        print "There is no solution.cc nor solution.hh"
    util.system('cp public/* compilation')
    util.system('cp private/* compilation')
    os.chdir('compilation')
    util.system("%s %s *.cc -o ../solution.exe" % (cxx, cxxflags))
    os.chdir('..')
    util.del_dir('compilation')
    if not util.file_exists("solution.exe"):
        raise Exception("solution.exe not created")
    util.system("(cd public && tar cf ../public.tar *)")
    util.system("(cd private && tar cf ../private.tar *)")
Example #9
0
def make_corrects_RunHaskell ():
    for f in glob.glob("*.cor"):
        util.del_file(f)
    inps = sorted(glob.glob("*.inp"))
    for inp in inps:
        tst = os.path.splitext(inp)[0]
        util.copy_file("solution.hs", "work.hs")
        if util.file_exists("judge.hs"):
            os.system("cat judge.hs >> work.hs")
        f = open("work.hs", "a")
        print >>f, "main = do"
        for line in open(tst+".inp").readlines():
            line = line.rstrip()
            if line.startswith("let "):
                print >>f, "    %s" % line
#            elif line.startswith("deb "):
#                print >>f, '    hPutStrLn stderr "%s"' % line
            else:
                print >>f, "    print (%s)" % line
        f.close()
        util.system("runhaskell work.hs >%s.cor" % (tst, ))
Example #10
0
def verify_program (program):

    """Verify that program compiles and gets AC for each test."""

    # This implementation is not yet very functional, but works well in basic cases

    # compile
    program = os.path.splitext(program)[0]
    if not util.file_exists(program+".cc"):
        raise Exception("%s.cc does not exist" % program)
    if not util.file_exists("handler.yml"):
        raise Exception("handler.yml does not exist")
    handler = util.read_yml("handler.yml")
    if handler["handler"] != "std":
        raise Exception("unknown handler")
    util.del_file(program+".exe")
    if util.file_exists("main.cc"):
        raise Exception("not implemented yet")
        if handler["source_modifier"] == "structs":
            util.system("cat solution.cc main.cc > temporal.cc ; %s %s temporal.cc -o solution.exe ; rm temporal.cc" % (cxx, cxxflags))
        else:
            util.system("%s %s solution.cc main.cc -o solution.exe" % (cxx, cxxflags))
    else:
        if util.file_exists("solution.fallback"):
            raise Exception("not implemented yet")
            util.system("%s %s solution.cc -o solution.exe" % (cxx, cxxflags_fallback))
        else:
            util.system("%s %s %s.cc -o %s.exe" % (cxx, cxxflags, program, program))
    if not util.file_exists(program+".exe"):
        raise Exception(program+".exe not created")

    # execute on tests
    tests = sorted(glob.glob("*.inp"))
    for test in tests:
        test = os.path.splitext(test)[0]
        os.system("./%s.exe < %s.inp > %s.out" % (program, test, test))
        r = subprocess.call(["cmp", test+".out", test+".cor"])
        if r: msg = "WA"
        else: msg = "OK"
        print "%s:\t\t%s" % (test, msg)
Example #11
0
def make_executable_MakePRO2 ():
    if not util.file_exists("solution"):
        raise Exception("There is no solution directory")
    if not util.file_exists("public"):
        raise Exception("There is no public directory")
    if not util.file_exists("private"):
        raise Exception("There is no private directory")

    util.del_file("solution.exe")
    util.del_dir('compilation')
    os.mkdir('compilation')
    util.system('cp solution/*  public/* private/* compilation')
    os.chdir('compilation')
    util.system("make")
    util.system('cp program.exe ../solution.exe')
    os.chdir('..')
    util.del_dir('compilation')
    if not util.file_exists("solution.exe"):
        raise Exception("solution.exe not created")
    util.system("(cd public && tar cf ../public.tar *)")
    util.system("(cd private && tar cf ../private.tar *)")
    util.system("(cd solution && tar cf ../solution.tar *)")
Example #12
0
def make_executable_CPP ():

    handler = util.read_yml("handler.yml")

    if handler["handler"] != "std":
        raise Exception("unknown handler")

    if not util.file_exists("solution.cc"):
        raise Exception("solution.cc does not exist")

    util.del_file("solution.exe")
    if util.file_exists("main.cc"):
        if handler["source_modifier"] == "structs":
            util.system("cat solution.cc main.cc > temporal.cc ; %s %s temporal.cc -o solution.exe ; rm temporal.cc" % (cxx, cxxflags))
        else:
            util.system("%s %s solution.cc main.cc -o solution.exe" % (cxx, cxxflags))
    else:
        if util.file_exists("solution.fallback"):
            util.system("%s %s solution.cc -o solution.exe" % (cxx, cxxflags_fallback))
        else:
            util.system("%s %s solution.cc -o solution.exe" % (cxx, cxxflags))
    if not util.file_exists("solution.exe"):
        raise Exception("error in C++ compilation")
Example #13
0
def make_corrects ():

    """Makes all correct files in the cwd."""

    make_executable()

    handler = util.read_yml("handler.yml")
    if handler.get('compilers', '') == 'RunHaskell':
        make_corrects_RunHaskell()
    elif handler.get('compilers', '') == 'RunPython':
        make_corrects_RunPython()
    elif handler.get('solution', '') == 'Python3':
        make_corrects_Python3()
    elif handler.get('solution', '') == 'Java':
        make_corrects_Java()
    else:
        if not util.file_exists("solution.exe"):
            raise Exception("solution.exe does not exist")
        for f in glob.glob("*.cor"):
            util.del_file(f)
        inps = sorted(glob.glob("*.inp"))
        for inp in inps:
            tst = os.path.splitext(inp)[0]
            util.system("./solution.exe < %s.inp > %s.cor" % (tst, tst))
    util.move_file(f, f2)

if util.file_exists('enunciat.tex'):
    util.move_file('enunciat.tex', 'problem.es.tex')

if util.file_exists('*entrades*'):
    util.move_file('entrades', 'inputs')

if util.file_exists('*entrada*'):
    util.move_file('entrada', 'input')

if util.file_exists('puntets.txt'):
    util.move_file('puntets.txt', 'scores.yml')
    print "  !!! AFEGEIX ELS sample-*/CANVIA ELS prova-* AL FITXER scores.yml !!!"

util.del_file('a.out')
for f in glob.glob('*.exe'):
    util.del_file(f)
for f in glob.glob('*.ps'):
    util.del_file(f)
for f in glob.glob('*.pdf'):
    util.del_file(f)



util.move_file('problem.es.tex', 'problem.es.tex.latin1')
os.system('iconv -f latin1 -t utf8 < problem.es.tex.latin1 > problem.es.tex')
util.del_file('problem.es.tex.latin1')

handler = 'std'
Example #15
0
def make_executable_Haskell ():
    if not util.file_exists("solution.hs"):
        raise Exception("solution.hs does not exist")

    util.del_file("work")
    util.del_file("work.hi")
    util.del_file("work.o")
    util.copy_file("solution.hs", "work.hs")
    f = open("work.hs", "a")
    print >>f, """main = do print "OK" """
    f.close()

    util.system("ghc -O3 work.hs")
    if not util.file_exists("work"):
        raise Exception("error in haskell compilation")
    util.del_file("work")
    util.del_file("work.hi")
    util.del_file("work.o")
Example #16
0
def cross_validation(label_list, vector_list, fold, svm_params, predict_params, bi_or_multi):
    """Do cross validation.
    :param label_list: list of labels.
    :param vector_list: list of vectors.
    :param fold: the fold of cross validation.
    """
    datasetsize = len(label_list)
    result = dataset_split_cv(label_list, vector_list, fold)
    if result == False:
        return False
    else:
        split_vector_list, split_label_list = result
    len_vector = len(split_vector_list)
    len_label = len(split_label_list)
    if len_vector != len_label:
        print 'Error: The length of the labels is not equal to that of the vectors.'
        return False
    deci = []
    original_labels = []
    acc_list = []
    mcc_list = []
    auc_list = []
    sn_list = []
    sp_list = []
    if bi_or_multi == 0:
        if fold != datasetsize:
            for i in range(len_vector):
                train_vector_list = []
                train_label_list = []
                #test_vector_list = []
                #test_label_list = []
                test_vector_list = split_vector_list[i]
                test_label_list = split_label_list[i]
                original_labels.extend(test_label_list)
                for j in range(len_vector):
                    if j != i:
                        train_vector_list.extend(split_vector_list[j])
                        train_label_list.extend(split_label_list[j])
                m = svm_train(train_label_list, train_vector_list, svm_params)
                p_label, p_acc, p_val = svm_predict(test_label_list, test_vector_list, m, predict_params)
                labels = m.get_labels()
                subdeci = [labels[0]*val[0] for val in p_val]
                deci += subdeci
                evals = performance(test_label_list, p_label, subdeci, bi_or_multi=bi_or_multi)
                acc_list.append(evals[0])
                mcc_list.append(evals[1])
                auc_list.append(evals[2])
                sn_list.append(evals[3])
                sp_list.append(evals[4])
            acc_average = sum(acc_list) / len(acc_list)
            mcc_average = sum(mcc_list) / len(mcc_list)
            auc_average = sum(auc_list) / len(auc_list)
            sn_average = sum(sn_list) / len(sn_list)
            sp_average = sum(sp_list) / len(sp_list)

            label_all = []
            for i in split_label_list:
                label_all.extend(i)
            check_gnuplot_exe()
            roc_output = 'cross_validation.png'
            title = 'cross validation'
            current_path = os.path.dirname(os.path.realpath(__file__))
            roc_data_file = current_path + const.GEN_FILE_PATH + 'roc_data'
            plot_roc(deci, label_all, roc_output, title, True, roc_data_file)
            del_file(roc_data_file)
            dest_file = current_path + const.FINAL_RESULTS_PATH + roc_output
            copy_file(roc_output, dest_file)
        elif fold == datasetsize:
            predicted_labels = []
            #deci_list = []
            for i in range(len_vector):
                train_vector_list = []
                train_label_list = []
                #test_vector_list = []
                #test_label_list = []
                test_vector_list = split_vector_list[i]
                test_label_list = split_label_list[i]
                original_labels.extend(test_label_list)
                for j in range(len_vector):
                    if j != i:
                        train_vector_list.extend(split_vector_list[j])
                        train_label_list.extend(split_label_list[j])
                m = svm_train(train_label_list, train_vector_list, svm_params)
                p_label, p_acc, p_val = svm_predict(test_label_list, test_vector_list, m, predict_params)
                labels = m.get_labels()
                subdeci = [labels[0]*val[0] for val in p_val]
                deci += subdeci
                predicted_labels.extend(p_label)
            evals = performance(original_labels, predicted_labels, deci, bi_or_multi=bi_or_multi)
            acc_average = evals[0]
            mcc_average = evals[1]
            auc_average = evals[2]
            sn_average = evals[3]
            sp_average = evals[4]

            label_all = []
            for i in split_label_list:
                label_all.extend(i)
            check_gnuplot_exe()
            roc_output = 'cross_validation.png'
            title = 'cross validation'
            current_path = os.path.dirname(os.path.realpath(__file__))
            roc_data_file = current_path + const.GEN_FILE_PATH + 'roc_data'
            plot_roc(deci, label_all, roc_output, title, True, roc_data_file)
            del_file(roc_data_file)
            dest_file = current_path + const.FINAL_RESULTS_PATH + roc_output
            copy_file(roc_output, dest_file)

        #print acc_list
        acc_re = 'ACC = %.4f' % acc_average
        mcc_re = 'MCC = %.4f' % mcc_average
        auc_re = 'AUC = %.4f' % auc_average
        sn_re = 'Sn  = %.4f' % sn_average
        sp_re = 'Sp  = %.4f\n' % sp_average
        eval_re = [acc_re, mcc_re, auc_re, sn_re, sp_re]
        print ('The cross validation results are as follows:')
        print acc_re
        print mcc_re
        print auc_re
        print sn_re
        print sp_re
        print "The ROC curve has been saved. You can check it here: "
        if sys.platform.startswith('win'):
            print dest_file.replace('/', '\\'), '\n'
        else:
            print dest_file.replace('\\', '/'), '\n'
        result_file = current_path + const.FINAL_RESULTS_PATH + "cv_eval_results.txt"
        with open(result_file, 'w') as f:
            f.write('The cross validation results are as follows:\n')
            for i in eval_re:
                f.write(i)
                f.write("\n")
        prob_file = current_path + const.FINAL_RESULTS_PATH + "probability_values.txt"
        with open(prob_file, 'w') as f:
            for i, j in zip(original_labels, deci):
                f.write(str(i))
                f.write('\t')
                f.write(str(j))
                f.write("\n")

    elif bi_or_multi == 1:
        if fold != datasetsize:
            for i in range(len_vector):
                train_vector_list = []
                train_label_list = []
                #test_vector_list = []
                #test_label_list = []
                test_vector_list = split_vector_list[i]
                test_label_list = split_label_list[i]
                for j in range(len_vector):
                    if j != i:
                        train_vector_list.extend(split_vector_list[j])
                        train_label_list.extend(split_label_list[j])
                m = svm_train(train_label_list, train_vector_list, svm_params)
                p_label, p_acc, p_val = svm_predict(test_label_list, test_vector_list, m, predict_params)
                labels = m.get_labels()
                subdeci = [labels[0]*val[0] for val in p_val]
                deci += subdeci
                evals = performance(test_label_list, p_label, subdeci, bi_or_multi=bi_or_multi)
                acc_list.append(evals)
            acc_average = sum(acc_list) / len(acc_list)
        elif fold == datasetsize:
            predicted_labels = []
            original_labels = []
            for i in range(len_vector):
                train_vector_list = []
                train_label_list = []
                #test_vector_list = []
                #test_label_list = []
                test_vector_list = split_vector_list[i]
                test_label_list = split_label_list[i]
                original_labels.extend(test_label_list)
                for j in range(len_vector):
                    if j != i:
                        train_vector_list.extend(split_vector_list[j])
                        train_label_list.extend(split_label_list[j])
                m = svm_train(train_label_list, train_vector_list, svm_params)
                p_label, p_acc, p_val = svm_predict(test_label_list, test_vector_list, m, predict_params)
                labels = m.get_labels()
                subdeci = [labels[0]*val[0] for val in p_val]
                deci += subdeci
                predicted_labels.extend(p_label)
            evals = performance(original_labels, predicted_labels, deci, bi_or_multi=bi_or_multi)
            acc_average = evals
        print ('The cross validation results are as follows:')
        print 'ACC = %.4f' % acc_average


#def cv_jackknife(label_list, vector_list, fold, svm_params, predict_params, bi_or_multi):
    """Do jackknife cross validation.
Example #17
0
        print("parse  " + k)


# 将指定Excel文件转换成json文本
def excel2json(xlsx_path):
    # load excel data
    xlsx_data = open_excel(xlsx_path)

    # 获取所有sheet
    nsheets = xlsx_data.nsheets
    for idx in range(nsheets):
        sheet = xlsx_data.sheets()[idx]
        if sheet.nrows > 0 or sheet.ncols > 0:
            sheet2json(sheet)


# 删除原文件
util.del_file(LOC_CFG_PATH)

files = os.listdir(XLSX_PATH)
for f in files:
    name, ext = os.path.splitext(f)
    if (ext == '.xls' or ext == '.xlsx'):
        if name.find("~") >= 0:
            print("ignore file ", f)
            continue
        print("export  " + f)
        excel2json(XLSX_PATH + "/" + f)

print("localization Done")
Example #18
0
def cross_validation(label_list, vector_list, fold, svm_params, predict_params,
                     bi_or_multi):
    """Do cross validation.
    :param label_list: list of labels.
    :param vector_list: list of vectors.
    :param fold: the fold of cross validation.
    """
    result = dataset_split_cv(label_list, vector_list, fold)
    if result == False:
        return False
    else:
        split_vector_list, split_label_list = result
    len_vector = len(split_vector_list)
    len_label = len(split_label_list)
    if len_vector != len_label:
        print 'Error: The length of the labels is not equal to that of the vectors.'
        return False
    deci = []
    acc_list = []
    mcc_list = []
    auc_list = []
    sn_list = []
    sp_list = []
    if bi_or_multi == 0:
        for i in range(len_vector):
            train_vector_list = []
            train_label_list = []
            #test_vector_list = []
            #test_label_list = []
            test_vector_list = split_vector_list[i]
            test_label_list = split_label_list[i]
            for j in range(len_vector):
                if j != i:
                    train_vector_list.extend(split_vector_list[j])
                    train_label_list.extend(split_label_list[j])
            m = svm_train(train_label_list, train_vector_list, svm_params)
            p_label, p_acc, p_val = svm_predict(test_label_list,
                                                test_vector_list, m,
                                                predict_params)
            labels = m.get_labels()
            subdeci = [labels[0] * val[0] for val in p_val]
            deci += subdeci
            evals = performance(test_label_list,
                                p_label,
                                subdeci,
                                bi_or_multi=bi_or_multi)
            acc_list.append(evals[0])
            mcc_list.append(evals[1])
            auc_list.append(evals[2])
            sn_list.append(evals[3])
            sp_list.append(evals[4])
        acc_average = sum(acc_list) / len(acc_list)
        mcc_average = sum(mcc_list) / len(mcc_list)
        auc_average = sum(auc_list) / len(auc_list)
        sn_average = sum(sn_list) / len(sn_list)
        sp_average = sum(sp_list) / len(sp_list)

        label_all = []
        for i in split_label_list:
            label_all.extend(i)
        check_gnuplot_exe()
        roc_output = 'cross_validation.png'
        title = 'cross validation'
        current_path = os.path.dirname(os.path.realpath(__file__))
        roc_data_file = current_path + const.GEN_FILE_PATH + 'roc_data'
        plot_roc(deci, label_all, roc_output, title, True, roc_data_file)
        del_file(roc_data_file)
        dest_file = current_path + const.FINAL_RESULTS_PATH + roc_output
        copy_file(roc_output, dest_file)
        print('The cross validation results are as follows:')
        print 'ACC = %.4f' % acc_average
        print 'MCC = %.4f' % mcc_average
        print 'AUC = %.4f' % auc_average
        print 'Sn  = %.4f' % sn_average
        print 'Sp  = %.4f\n' % sp_average
        print "The ROC curve has been saved. You can check it here: "
        if sys.platform.startswith('win'):
            print dest_file.replace('/', '\\'), '\n'
        else:
            print dest_file.replace('\\', '/'), '\n'
    elif bi_or_multi == 1:
        for i in range(len_vector):
            train_vector_list = []
            train_label_list = []
            #test_vector_list = []
            #test_label_list = []
            test_vector_list = split_vector_list[i]
            test_label_list = split_label_list[i]
            for j in range(len_vector):
                if j != i:
                    train_vector_list.extend(split_vector_list[j])
                    train_label_list.extend(split_label_list[j])
            m = svm_train(train_label_list, train_vector_list, svm_params)
            p_label, p_acc, p_val = svm_predict(test_label_list,
                                                test_vector_list, m,
                                                predict_params)
            labels = m.get_labels()
            subdeci = [labels[0] * val[0] for val in p_val]
            deci += subdeci
            evals = performance(test_label_list,
                                p_label,
                                subdeci,
                                bi_or_multi=bi_or_multi)
            acc_list.append(evals)
        acc_average = sum(acc_list) / len(acc_list)
        print('The cross validation results are as follows:')
        print 'ACC = %.4f' % acc_average