Exemple #1
0
 def get_book_list(self):
     try:
         resp = requests.get(
             'https://myschool.ru.is/myschool/?Page=Exe&ID=1.13',
             auth=(USERNAME, self._pwd))
         soup = BeautifulSoup(resp.text, 'html.parser')
         table_soup = soup('center')
         table = ''
         for t in table_soup:
             thead = '<thead>'
             tbody = '<tbody>'
             for i, tr in enumerate(t.find_all('tr')):
                 if i == 0:
                     thead = '{0}{1}'.format(thead, tr)
                 else:
                     td_temp = ''
                     for td in tr.find_all('td')[1:-1]:
                         if not td.div == None:
                             td_temp = '{0}<td>{1}</td>'.format(
                                 td_temp, td.div.get_text())
                         elif not td.p == None:
                             td_temp = '{0}<td>{1}</td>'.format(
                                 td_temp, td.p.get_text())
                         else:
                             td_temp = '{0}{1}'.format(td_temp, td)
                     tbody = '{0}<tr>{1}</tr>'.format(tbody, td_temp)
             table = '{0}<table>{1}</thead>{2}</tbody></table>'.format(
                 table, thead, tbody)
         return prettytable.from_html(table)
     except:
         traceback.print_exc()
         sys.stderr.write('Could not get book list\n')
Exemple #2
0
 def get_timetable(self):
     try:
         resp = requests.get(
             'https://myschool.ru.is/myschool/?Page=Exe&ID=3.2',
             auth=(USERNAME, self._pwd))
         soup = BeautifulSoup(resp.text, 'html.parser')
         tr_soup = soup('center')[0].table.tbody('tr')[1:-1]
         table = '<table><thead>'
         for i, tr in enumerate(tr_soup):
             if i < 2:
                 if i == 0 and USE_DAY_OF_THE_WEEK:
                     table = '{0}{1}</thead><tbody>'.format(table, tr)
                 elif i == 1 and not USE_DAY_OF_THE_WEEK:
                     table = '{0}{1}</thead><tbody>'.format(table, tr)
             else:
                 td_temp = ''
                 for td in tr.find_all('td'):
                     if not td.span == None:
                         td_temp = '{0}<td>{1}</td>'.format(
                             td_temp,
                             self._strip_html_markup(
                                 str(td.span.a.small).replace('<br>',
                                                              '\n')))
                     else:
                         td_temp = '{0}{1}'.format(td_temp, td)
                 table = '{0}<tr>{1}</tr>'.format(table, td_temp)
         return prettytable.from_html('{0}</tbody></table>'.format(table))
     except:
         traceback.print_exc()
         sys.stderr.write('Could not get timetable\n')
def main(argv):
    del argv
    if FLAGS.report == None:
        logging.info("need report dir")
    logging.info("add report")
    user = '******'
    passwd = 'Light1019'
    auth = (user, passwd)
    pageid = '18645695'
    table_html = get_page_table(auth=auth, pageid=pageid)
    table = from_html(table_html)
    report_xml = listxmlfile_dir(FLAGS.report)
    new_row = []
    #runtime = FLAGS.report.split('-')[-1]
    new_row.append(runtime(FLAGS.report.split('-')[-1]))
    new_row.append(report_xml.get("root").get("tests"))
    new_row.append(
        int(report_xml.get("root").get("tests")) -
        int(report_xml.get("root").get("failures")) -
        int(report_xml.get("root").get("errors")))
    new_row.append(report_xml.get("root").get("failures"))
    new_row.append(report_xml.get("root").get("errors"))
    new_row.append(report_xml.get("root").get("time"))
    new_row.append(
        "http://jenkins.tsingj.local/job/QA/job/smoketest/%s/testReport/" %
        (str(FLAGS.buildid)))
    new_row.append("10.18.0.18::gfs/qa/smoketest/" + FLAGS.report)
    table[0].add_row(new_row)
    write_table(auth=auth,
                pageid=pageid,
                html=table[0].get_html_string(),
                title='报告汇总')
Exemple #4
0
 def _get_table(self, link, c_index, t_index):
     try:
         resp = requests.get(link, auth=(USERNAME, self._pwd))
         soup = BeautifulSoup(resp.text, 'html.parser')
         tr_soup = soup('center')[c_index].table.tbody('tr')[t_index:-1]
         table = '<table><thead>'
         for i, tr in enumerate(tr_soup):
             if i == 0:
                 th_temp = ''
                 for th in tr.find_all('th'):
                     if not th.a == None:
                         th_temp = '{0}<th>{1}</th>'.format(
                             th_temp, th.a.get_text())
                     else:
                         th_temp = '{0}{1}'.format(th_temp, th)
                 table = '{0}<tr>{1}</tr></thead><tbody>'.format(
                     table, th_temp)
             else:
                 td_temp = ''
                 for td in tr.find_all('td'):
                     if not td.a == None:
                         td_temp = '{0}<td>{1}</td>'.format(
                             td_temp, td.a.get_text())
                     else:
                         td_temp = '{0}{1}'.format(td_temp, td)
                 table = '{0}<tr>{1}</tr>'.format(table, td_temp)
         return prettytable.from_html('{0}</tbody></table>'.format(table))
     except:
         traceback.print_exc()
         sys.stderr.write('Could not get table\n')
Exemple #5
0
def display_testresults():
    from prettytable import from_html
    pretty_str = ''
    for pretty_table in from_html(premailer()):
        pretty_str += str(pretty_table)
        pretty_str += "\n"
        pretty_str += '=' * 40
        pretty_str += "\n"
    return pretty_str
Exemple #6
0
def save_as_pdf(tid, tables_string, compname, filename, revision):
    pts = from_html(tables_string)
    dirpath = os.path.join(__conf__['outputPath'], 'tmp_report', 'report' + tid)
    if os.path.exists(dirpath) == False:
        os.makedirs(dirpath)

    print revision, compname, tables_string
    result_info = '<div STYLE="word-wrap: break-word">Subversion Revision Number: ' \
        + revision + '<br/><br/>' + compname + '<br/><br/></ div>' + tables_string
    pisa.CreatePDF(result_info, file(os.path.join(dirpath, filename), 'wb'))
def submit_to_web(string: str):
    # 无GUI配置
    chrome_options = Options()
    chrome_options.add_argument("--headless")
    chrome_options.add_argument('--disable-gpu')
    chrome_options.add_argument('--no-sandbox')
    # 打开网页
    url = 'https://www.tgbus.com/gametools/DWSYH_DTC/'
    browser = webdriver.Chrome(executable_path="./chromedriver", options=chrome_options)
    browser.get(url)
    # 获取输入框与确认按钮
    input_field = browser.find_element_by_xpath('//input[@name="inlineInput"]')
    button = browser.find_element_by_xpath('//input[@onclick="onInlinePredictionButtonClick()"]')
    # 输入数据并提交
    input_field.send_keys(string)
    button.click()
    # 获取结果
    soup = BeautifulSoup(browser.page_source, "html.parser")
    browser.quit()
    raw_body = soup.find('tbody', id='predictionTableBody')
    html_table = raw_body.parent
    raw_field = html_table.find('thead')
    # 删除多余field
    field_list = raw_field.find_all('tr')
    field_list[0].extract()
    field_list[2].extract()
    # 补充field
    tag = soup.new_tag('th')
    tag.string = '类型1'
    field_list[1].insert(0, tag)
    tag = soup.new_tag('th')
    tag.string = '类型2'
    field_list[1].insert(1, tag)
    tag = soup.new_tag('th')
    tag.string = '购入价格'
    field_list[1].insert(2, tag)
    # 修改row
    for row in raw_body.find_all('tr'):
        # th改为td
        error_elem_list = row.find_all('th')
        for elem in error_elem_list:
            elem.name = 'td'
        # 连续下跌没有type2, 需要补充一列空
        if len(error_elem_list) == 1:
            tag = soup.new_tag('td')
            tag.string = '-'
            row.insert(1, tag)
    # 表格构建
    table = prettytable.from_html(str(html_table))
    return table
Exemple #8
0
def printCourseTable(course):
    if course == '':
        return
    utils = Utils()
    url = utils.getUrl(course)
    if url == '':
        return
    if url.find('ocw.mit.edu') != -1:
        r = requests.get(url + '/calendar/')
        if r.status_code == 404:
            print 'page not found'
            return
        #table = prettytable.from_html(r.text)[2]
        for table in prettytable.from_html(r.text):
            if table.field_names[0] == 'Field 1':
                continue
            table.align['TOPICS'] = 'l'
            print table
    elif url.find('itunes.apple.com') != -1:
        r = requests.get(url)
        soup = BeautifulSoup(r.text)
        table = soup.find(
            'table',
            class_=
            'tracklist-table content sortable total-flexible-columns-2 total-columns-6'
        )
        parser = TableHandler()
        parser.setMaxCellWidth(cell_width)
        parser.feed(table.prettify().replace('Video', ''))
        parser.tables[0].align["Name"] = "l"
        parser.tables[0].align["Description"] = "l"
        #print parser.tables[0].field_names
        #parser.tables[0].sortby = parser.tables[0].field_names[3]
        #parser.tables[0].reversesort = True
        print parser.tables[0]
    else:
        print 'not suport ' + course
        return
Exemple #9
0
 def get_assignments(self, filt):
     try:
         resp = requests.get(
             'https://myschool.ru.is/myschool/?Page=Exe&ID=1.12',
             auth=(USERNAME, self._pwd))
         soup = BeautifulSoup(resp.text, 'html.parser')
         tr_soup = soup('center')[0].table.tbody('tr')[:-1]
         table = '<table><thead>{0}</thead><tbody>'.format(tr_soup[0])
         for i in range(1, len(tr_soup)):
             if filt.lower() in tr_soup[i].get_text().lower():
                 td_temp = ''
                 for td in tr_soup[i].find_all('td'):
                     if not td.a == None:
                         td_temp = '{0}<td>{1}</td>'.format(
                             td_temp, td.a.get_text())
                     else:
                         td_temp = '{0}{1}'.format(td_temp, td)
                 table = '{0}<tr>{1}</tr>'.format(table, td_temp)
         table = '{0}</tbody></table>'.format(table)
         return prettytable.from_html(table)
     except:
         traceback.print_exc()
         sys.stderr.write('Could not get assignments\n')
Exemple #10
0
 def testHtmlAndBack(self):
     html_string = self.x.get_html_string()
     new_table = from_html(html_string)[0]
     assert new_table.get_string() == self.x.get_string()
# table.add_row(['8','server08','服务器08','172.16.0.8'])
# table.add_row(['9','server09','服务器09','172.16.0.9'])
# print(table)

from prettytable import from_html

html_string = '''<table>
<tr>
<th>code</th>
<th>uuid</th>
<th>name</th>
<th>IP</th>
</tr>
<tr>
<td>1</td>
<td>server01</td>
<td>server-01</td>
<td>192.168.100.1</td>
</tr>
<tr>
<td>2</td>
<td>server02</td>
<td>server-02</td>
<td>192.168.100.2</td>
</tr>
</table>'''

table = from_html(html_string)

print(table[0])
Exemple #12
0
 def testHtmlAndBack(self):
     html_string = self.x.get_html_string()
     new_table = from_html(html_string)[0]
     assert new_table.get_string() == self.x.get_string()