def report(rname,qdir): f = open(rname,'wb') f.write('<!DOCTYPE html>\n') f.write('<html>\n') f.write('<body>\n') hd = h1('Assembly Statistics') f.write(hd) f.write('<table>\n') reportdir=qdir+'/report.txt' for line in file(reportdir): line = line.strip() if re.match('All statistics are based on contigs', line): f.write(p(line)+'\n') elif len(line)>0 : f.write('<tr>'+'\n') elem = re.split('\s{2,}',line) for el in elem: f.write(td(el)) f.write('</tr>'+'\n') f.write('</table>') f.write(p(b('Please download the whole quast report data for details'))+'\n') f.write('</body>') f.write('</html>')
def eval_clf_perfs_bag_of_words(): minSplit = 0.2 maxSplit = 0.6 data= CH.getMasterData("../FinalResults/ImgShrRnkListWithTags.csv") for attribsType in ['non_sparse','non_zero','abv_mean']: print("Classifier training started for %s" %attribsType) allAttribs = CH.genAllAttribs("../FinalResults/ImgShrRnkListWithTags.csv",attribsType,"../data/infoGainsExpt2.csv") codes = [] classifiers = [] for i in np.arange(minSplit,maxSplit,0.1): # i is the test percent clfForRoc = [] for method in methods: clfObj = CH.buildBinClassifier(data,allAttribs,1-i,80,method,kwargsDict[method]) clfObj.runClf() classifiers.append(clfObj) clfForRoc.append(clfObj) # logic for roc curve: traces = [] for clfObj in clfForRoc: fpr,tpr,_ = clfObj.roccurve trace = go.Scatter( x = fpr, y = tpr, name = clfObj.methodName ) traces.append(trace) layout = dict( xaxis = dict(title='False Positive Rate'), yaxis = dict(title='True Positive Rate'), title = str('Train-Test Split Ratio: %f' %i) ) fig = dict(data=traces,layout=layout) codes.append(py.iplot(fig,filename="ROC_curve_%s_%i" %(attribsType,int(i*100))).embed_code) flNm = "../ClassifierResults/ROC_curve_%s.html" %attribsType with open(flNm,"w") as perf: perf.write(HT.h1("ROC curves of Classifiers with %s Attributes." %attribsType)) for row in codes: perf.write(HT.HTML(row)) gen_graphics_clf_perf(classifiers, attribsType) print("Classifier training complete for %s" %attribsType) print()
def getHtmlPageFromCsvFile(filePath, css, title): """ Prepare a html page including a table """ myHtml = html( head( link(' ', rel='stylesheet', type='text/css', href=css), title=filePath ) ) myHtml = myHtml.append( body( h1(title), getHtmlTableFromCsvFile(filePath), ) ) return myHtml
def gen_graphics_clf_perf(classifiers, suffix): printableClfs = [] for clf in classifiers: printableClfs.append(dict(literal_eval(clf.__str__()))) df = pd.DataFrame(printableClfs) df = df[['methodName','splitPercent','accScore','precision','recall','f1Score','auc','sqerr']] df.columns = ['Classifier','Train-Test Split','Accuracy','Precision','Recall','F1 score','AUC','Squared Error'] df.to_csv("../ClassifierResults/extrmClfMetrics_%s.csv" %suffix,index=False) iFrameBlock = [] for i in np.arange(minSplit,maxSplit,0.1): df1 = df[(df['Train-Test Split']==1-i)] df1.index = df1['Classifier'] df1 = df1[['Accuracy','Precision','Recall','F1 score','AUC','Squared Error']].transpose() fig = df1.iplot(kind='bar',filename=str('Train-Test_Split_Ratio %s %f' %(suffix,1-i)),title=str('Train-Test Split Ratio: %f' %i)) iFrameBlock.append(fig.embed_code) flNm = "../ClassifierResults/performanceComparison_%s.html" %suffix with open(flNm,"w") as perf: perf.write(HT.h1("Performance Comparisons of Classifiers with %s Attributes." %suffix)) for row in iFrameBlock: perf.write(HT.HTML(row))
def to_html(results): PREAMBLE_FILE = os.getenv('LSM_PREAMBLE_FILE', "") preamble = "" methods = [ 'capabilities', 'systems', 'plugin_info', 'pools', 'job_status', 'job_free', 'iscsi_chap_auth', 'volumes', 'volume_create', 'volume_delete', 'volume_resize', 'volume_replicate', 'volume_replicate_range_block_size', 'volume_replicate_range', 'volume_enable', 'volume_disable', 'disks', 'target_ports', 'volume_mask', 'volume_unmask', 'volume_child_dependency', 'volume_child_dependency_rm', 'access_groups', 'access_groups_granted_to_volume', 'access_group_create', 'access_group_delete', 'volumes_accessible_by_access_group', 'access_groups_granted_to_volume', 'access_group_initiator_add', 'access_group_initiator_delete', 'fs', 'fs_create', 'fs_delete', 'fs_resize', 'fs_clone', 'fs_file_clone', 'fs_snapshots', 'fs_snapshot_create', 'fs_snapshot_delete', 'fs_snapshot_restore', 'fs_child_dependency', 'fs_child_dependency_rm', 'export_auth', 'exports', 'export_fs', 'export_remove' ] ch = [] row_data = [] if os.path.isfile(PREAMBLE_FILE): with open(PREAMBLE_FILE, 'r') as pm: preamble = pm.read() #Build column header for r in results: ch.append(r['SYSTEM']['ID']) # Add overall pass/fail for unit tests pass_fail = ['Overall Pass/Fail result'] for r in results: if r['META']['ec'] == '0': pass_fail.append('P') else: pass_fail.append('F') row_data.append(pass_fail) # Append on link for error log error_log = ['Error log (click +)'] for r in results: error_log.append('<a href="%s">+</a>' % ('./' + os.path.basename(r['META']['error_file']))) row_data.append(error_log) for m in methods: row = [m] for r in results: row.append(get_result(r, m)) row_data.append(row) # Build HTML text = '<!DOCTYPE html>' text += str( html( head( link(rel="stylesheet", type="text/css", href="../../test.css"), title("libStorageMgmt test results"), ), body( HTML( h1("%s Results generated @ %s") % (preamble, time.strftime("%c"))), div(table(_table_header(ch), _table_body(row_data)), _class="angled_table"), div( pre(" Legend\n" " P = Pass (Method called and returned without error)\n" " F = Fail (Method call returned an error)\n" " U = Unsupported or unable to test due to other errors\n" " * = Unable to connect to array or provider totally unsupported\n" " + = hyper link to error log"))))) return bs(text).prettify()
def to_html(results): PREAMBLE_FILE = os.getenv('LSM_PREAMBLE_FILE', "") preamble = "" methods = ['capabilities', 'systems', 'plugin_info', 'pools', 'job_status', 'job_free', 'iscsi_chap_auth', 'volumes', 'volume_create', 'volume_delete', 'volume_resize', 'volume_replicate', 'volume_replicate_range_block_size', 'volume_replicate_range', 'volume_enable', 'volume_disable', 'disks', 'target_ports', 'volume_mask', 'volume_unmask', 'volume_child_dependency', 'volume_child_dependency_rm', 'access_groups', 'access_groups_granted_to_volume', 'access_group_create', 'access_group_delete', 'volumes_accessible_by_access_group', 'access_groups_granted_to_volume', 'access_group_initiator_add', 'access_group_initiator_delete', 'fs', 'fs_create', 'fs_delete', 'fs_resize', 'fs_clone', 'fs_file_clone', 'fs_snapshots', 'fs_snapshot_create', 'fs_snapshot_delete', 'fs_snapshot_restore', 'fs_child_dependency', 'fs_child_dependency_rm', 'export_auth', 'exports', 'export_fs', 'export_remove' ] ch = [] row_data = [] if os.path.isfile(PREAMBLE_FILE): with open(PREAMBLE_FILE, 'r') as pm: preamble = pm.read() #Build column header for r in results: ch.append(r['SYSTEM']['ID']) # Add overall pass/fail for unit tests pass_fail = ['Overall Pass/Fail result'] for r in results: if r['META']['ec'] == '0': pass_fail.append('P') else: pass_fail.append('F') row_data.append(pass_fail) # Append on link for error log error_log = ['Error log (click +)'] for r in results: error_log.append('<a href="%s">+</a>' % ('./' + os.path.basename(r['META']['error_file']))) row_data.append(error_log) for m in methods: row = [m] for r in results: row.append(get_result(r, m)) row_data.append(row) # Build HTML text = '<!DOCTYPE html>' text += str(html( head(link(rel="stylesheet", type="text/css", href="../../test.css"), title("libStorageMgmt test results"), ), body( HTML(h1("%s Results generated @ %s") % (preamble, time.strftime("%c"))), div(table(_table_header(ch), _table_body(row_data)), _class="angled_table"), div(pre( " Legend\n" " P = Pass (Method called and returned without error)\n" " F = Fail (Method call returned an error)\n" " U = Unsupported or unable to test due to other errors\n" " * = Unable to connect to array or provider totally unsupported\n" " + = hyper link to error log\n\n\n", HTML(' Source code for plug-in for this test run <a href=./smis.py.html>is here. </a>')))) )) return bs(text).prettify()