示例#1
0
#爬取近七日的天气
html_dizhi = "http://www.weather.com.cn/weather/" + test_id[1] + ".shtml"
html = urlopen(html_dizhi)
obj = bf(html.read(), 'html.parser')
mes_links = obj.find_all("li", {"class": re.compile('sky skyid lv\d')})
for mes in mes_links:
    date.append(mes.h1.get_text())
    wter.append(mes.p.get_text())
    wd_g.append(mes.span.get_text())
for i in range(7):
    wd_d.append(obj.select('.tem i')[i].get_text())
    wind.append(obj.select('.win i')[i].get_text())

#输出数据部分
tb_7 = pt.PrettyTable()
tb_7.field_names = ['日期', '天气', '最高温度', '最低温度', '风力']


def seven_d():
    for i in range(1, len(date)):
        tb_7.add_row([date[i], wter[i], wd_g[i], wd_d[i], wind[i]])
    print('******************************')
    print(city, '近七日天气', sep='')
    print(tb_7)


def one_d():
    print('------------------------------')
    print(city, '当日天气', sep='')
    print('更新时间:', time, sep='')
示例#2
0
def main():
    '''
    TBD
    '''
    _options = cli_postional_argument_parser()

    _test_results_table = prettytable.PrettyTable(["Cloud Model", \
                                                   "Cloud Attach", \
                                                   "VM Attach", \
                                                   " VM Attach ", \
                                                   "  VM Attach  ", \
                                                   "VM Capture", \
                                                   "VM Attach  ", \
                                                   "IMAGE Delete", \
                                                   "   VM Attach   ", \
                                                   "    VM Attach    "])

    _second_header = [
        '', '', "no pubkey injection", "pubkey injection", "pubkey injection",
        '', "pubkey injection", '', "pubkey injection", "pubkey injection"
    ]
    _test_results_table.add_row(_second_header)
    _third_header = [
        strftime("%Y-%m-%d"),
        strftime("%H:%M:%S"), "pre-existing image", "pre-existing image",
        "pre-existing image", '', "newly captured image", '',
        "non-existent image", "pre-existing image"
    ]
    _test_results_table.add_row(_third_header)
    _fourth_header = [
        '', '', "no volume", "no volume", "volume", '', "no volume", '',
        "no volume", "no volume"
    ]
    _test_results_table.add_row(_fourth_header)
    _fifth_header = [
        '', '', "no failure", "no failure", "no failure", '', "no failure", '',
        "failure", "forced failure"
    ]
    _test_results_table.add_row(_fifth_header)

    for _cloud_model in _options.cloud_models:
        _start = int(time())
        print ''
        _command = _cb_cli_path + " --hard_reset --config " + _options.cloud_config_dir + '/' + _cloud_model + ".txt exit"
        print "Attaching Cloud Model \"" + _cloud_model + "\" by running the command \"" + _command + "\"..."
        _proc_h = subprocess.Popen(_command,
                                   shell=True,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
        _resul = _proc_h.communicate()

        _status = _proc_h.returncode
        if _status:
            print "ERROR while attempting to attach Cloud Model \"" + _cloud_model + "\""
            exit(_status)

        api_file_name = "/tmp/cb_api_" + username
        if os.access(api_file_name, os.F_OK):
            try:
                _fd = open(api_file_name, 'r')
                _api_conn_info = _fd.read()
                _fd.close()
            except:
                _msg = "Unable to open file containing API connection information "
                _msg += "(" + api_file_name + ")."
                print _msg
                exit(4)
        else:
            _msg = "Unable to locate file containing API connection information "
            _msg += "(" + api_file_name + ")."
            print _msg
            exit(4)

        _msg = "Connecting to API daemon (" + _api_conn_info + ")..."
        print _msg
        api = APIClient(_api_conn_info)

        _results_row = []
        _results_row.append(_cloud_model)

        if _options.pause:
            raw_input("Press Enter to continue...")

        _cloud_result, _cloud_name = check_cloud_attach(api, _cloud_model)
        _results_row.append(_cloud_result)

        _test_cases = ["NA", "NA", "NA", "NA", "NA", "NA", "NA", "NA"]
        if _options.test_instances:
            _test_cases[0] = "no pubkey injection, no volume"

        if _options.test_ssh:
            _test_cases[1] = "pubkey injection, no volume"

        if _options.test_volumes:
            _test_cases[2] = "pubkey injection, volume"

        if _options.test_capture:
            _test_cases[3] = "vm capture"
            _test_cases[4] = "newly captured image, no volume"
            _test_cases[5] = "image delete"
            _test_cases[6] = "non-existent image failure"
            _test_cases[7] = "pubkey injection, force failure"

        if _cloud_model == "kub":
            _test_cases[2] = "NA"
            _test_cases[3] = "NA"
            _test_cases[4] = "NA"
            _test_cases[5] = "NA"

        for _test_case in _test_cases:
            if _test_case.count("vm capture"):
                _results_row.append(
                    check_vm_capture(api, _cloud_model, _cloud_name, _options))
            elif _test_case.count("image delete"):
                _results_row.append(
                    check_img_delete(api, _cloud_model, _cloud_name, _options))
            elif _test_case == "NA":
                _results_row.append("NA")
            else:
                _results_row.append(
                    check_vm_attach(api, _cloud_model, _cloud_name, _test_case,
                                    _options))

        _results_row[0] = _results_row[0] + " (" + str(int(time()) -
                                                       _start) + "s)"

        _test_results_table.add_row(_results_row)

        _x_test_results_table = _test_results_table.get_string().split('\n')
        _aux = _x_test_results_table[2]
        _x_test_results_table[2] = _x_test_results_table[3]
        _x_test_results_table[3] = _x_test_results_table[4]
        _x_test_results_table[4] = _x_test_results_table[5]
        _x_test_results_table[5] = _x_test_results_table[6]
        _x_test_results_table[6] = _aux
        _x_test_results_table = '\n'.join(_x_test_results_table)

        _fn = "/tmp/real_multicloud_regression_test.txt"
        _fh = open(_fn, "w")
        _fh.write(str(_x_test_results_table))
        _fh.close()

        print _x_test_results_table

        _error = False

        if _error:
            exit(1)

    exit(0)
示例#3
0
def table_performance_comparison_mrr(table, input_data):
    """Generate the table(s) with algorithm: table_performance_comparison_mrr
    specified in the specification file.

    :param table: Table to generate.
    :param input_data: Data to process.
    :type table: pandas.Series
    :type input_data: InputData
    """

    logging.info("  Generating the table {0} ...".format(table.get(
        "title", "")))

    # Transform the data
    logging.info("    Creating the data set for the {0} '{1}'.".format(
        table.get("type", ""), table.get("title", "")))
    data = input_data.filter_data(table, continue_on_error=True)

    # Prepare the header of the tables
    try:
        header = [
            "Test case",
            "{0} Throughput [Mpps]".format(table["reference"]["title"]),
            "{0} stdev [Mpps]".format(table["reference"]["title"]),
            "{0} Throughput [Mpps]".format(table["compare"]["title"]),
            "{0} stdev [Mpps]".format(table["compare"]["title"]), "Change [%]"
        ]
        header_str = ",".join(header) + "\n"
    except (AttributeError, KeyError) as err:
        logging.error(
            "The model is invalid, missing parameter: {0}".format(err))
        return

    # Prepare data to the table:
    tbl_dict = dict()
    for job, builds in table["reference"]["data"].items():
        for build in builds:
            for tst_name, tst_data in data[job][str(build)].iteritems():
                if tbl_dict.get(tst_name, None) is None:
                    name = "{0}-{1}".format(
                        tst_data["parent"].split("-")[0],
                        "-".join(tst_data["name"].split("-")[1:]))
                    tbl_dict[tst_name] = {
                        "name": name,
                        "ref-data": list(),
                        "cmp-data": list()
                    }
                try:
                    tbl_dict[tst_name]["ref-data"].\
                        append(tst_data["result"]["throughput"])
                except TypeError:
                    pass  # No data in output.xml for this test

    for job, builds in table["compare"]["data"].items():
        for build in builds:
            for tst_name, tst_data in data[job][str(build)].iteritems():
                try:
                    tbl_dict[tst_name]["cmp-data"].\
                        append(tst_data["result"]["throughput"])
                except KeyError:
                    pass
                except TypeError:
                    tbl_dict.pop(tst_name, None)

    tbl_lst = list()
    for tst_name in tbl_dict.keys():
        item = [
            tbl_dict[tst_name]["name"],
        ]
        data_t = tbl_dict[tst_name]["ref-data"]
        if data_t:
            item.append(round(mean(data_t) / 1000000, 2))
            item.append(round(stdev(data_t) / 1000000, 2))
        else:
            item.extend([None, None])
        data_t = tbl_dict[tst_name]["cmp-data"]
        if data_t:
            item.append(round(mean(data_t) / 1000000, 2))
            item.append(round(stdev(data_t) / 1000000, 2))
        else:
            item.extend([None, None])
        if item[1] is not None and item[3] is not None and item[1] != 0:
            item.append(int(relative_change(float(item[1]), float(item[3]))))
        if len(item) == 6:
            tbl_lst.append(item)

    # Sort the table according to the relative change
    tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)

    # Generate tables:
    # All tests in csv:
    tbl_names = [
        "{0}-1t1c-full{1}".format(table["output-file"],
                                  table["output-file-ext"]),
        "{0}-2t2c-full{1}".format(table["output-file"],
                                  table["output-file-ext"]),
        "{0}-4t4c-full{1}".format(table["output-file"],
                                  table["output-file-ext"])
    ]
    for file_name in tbl_names:
        logging.info("      Writing file: '{0}'".format(file_name))
        with open(file_name, "w") as file_handler:
            file_handler.write(header_str)
            for test in tbl_lst:
                if file_name.split("-")[-2] in test[0]:  # cores
                    test[0] = "-".join(test[0].split("-")[:-1])
                    file_handler.write(",".join([str(item)
                                                 for item in test]) + "\n")

    # All tests in txt:
    tbl_names_txt = [
        "{0}-1t1c-full.txt".format(table["output-file"]),
        "{0}-2t2c-full.txt".format(table["output-file"]),
        "{0}-4t4c-full.txt".format(table["output-file"])
    ]

    for i, txt_name in enumerate(tbl_names_txt):
        txt_table = None
        logging.info("      Writing file: '{0}'".format(txt_name))
        with open(tbl_names[i], 'rb') as csv_file:
            csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
            for row in csv_content:
                if txt_table is None:
                    txt_table = prettytable.PrettyTable(row)
                else:
                    txt_table.add_row(row)
            txt_table.align["Test case"] = "l"
        with open(txt_name, "w") as txt_file:
            txt_file.write(str(txt_table))
示例#4
0
            features = [v for v in directory if v[1] == ind]
            dist = distance(float(features[0][4]), float(features[0][5]),
                            float(directory[(num - 1)][4]),
                            float(directory[(num - 1)][5]))
            x.add_row([
                features[0][0], features[0][3], features[0][2], features[0][6],
                features[0][7], dist, value[1]
            ])

    x.float_format = 0.3
    print x


os.system('cls' if os.name == 'nt' else 'clear')
print "\n\n"
z = pt.PrettyTable(
    ["WELCOME TO THE BERKELEY RESTAURANT RECOMMENDATION SYSTEM"])
print z
print "\n\nHow would you like to explore restaurants?"
print "\n\n** Based on User Reviews: **\n"
print "1. Find restaurants with similar user ratings"
print "2. Build a customized recommendation just for you (Coolest option!)"
print "\n** Based on Restaurant Features: **\n"
print "3. Find restaurants with similar characteristics \n\n"
print "** Explore statistical accuracy **"
print "4. Explore statistical error rate in user approach"
print "5. Explore statistical error rate in feature approach\n\n"

choice = int(raw_input("What would you like to do?   "))

if (choice == 1):
    directory = load_directory()
示例#5
0
    info_of_root_partition = psutil.disk_usage("/")
    percent_of_root_partition_usage = "%.2f%%" % (
        float(info_of_root_partition.used) * 100 /
        float(info_of_root_partition.total))
    total_size_of_root_partition = "%.2f" % (
        float(psutil.disk_usage("/").total / 1024) / 1024 / 1024)
    memory_info = getMemory()
    memory_usage = "%.2f%%" % (float(memory_info['used']) * 100 /
                               float(memory_info['total']))
    swap_info = getVirtualMemory()
    swap_usage = "%.2f%%" % (float(swap_info['used']) * 100 /
                             float(swap_info['total']))
    local_ip_address = getLocalIP()

    table = prettytable.PrettyTable(border=False,
                                    header=False,
                                    left_padding_width=2)
    table.field_names = ["key1", "value1", "key2", "value2"]
    table.add_row([
        "System load:", system_load, "Processes:",
        len(list(psutil.process_iter()))
    ])
    table.add_row([
        "Usage of /:",
        "%s of %sGB" %
        (percent_of_root_partition_usage, total_size_of_root_partition),
        "Users logged in:", user_logged_in
    ])
    table.add_row([
        "Memory usage:", memory_usage,
        "IP address for %s:" % local_ip_address[0], local_ip_address[1]
    'Upgrade-Insecure-Requests': '1',
    'User-Agent':
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
    'Accept':
    'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
    'DNT': '1',
    'Accept-Encoding': 'gzip, deflate, sdch',
    'Accept-Language': 'en-US,en;q=0.8,zh-CN;q=0.6,zh-TW;q=0.4'
}

resource = requests.get(url_to_check, headers=headers)
Soup = BeautifulSoup(resource.text, 'lxml')
download_list = Soup.find('div', class_="downloadbutton").find_all('a')

table = prettytable.PrettyTable(border=True,
                                header=True,
                                left_padding_width=2,
                                padding_width=1)
table.field_names = ["Name", "Filename", "Version", "Download URL"]

for item in download_list:
    title = item.get_text().replace(
        u' ', ' ')  # Note: prettytable only support ASCII code
    href = item['href']
    filename = os.path.basename(href)
    version = filename.split('-')[-1].split('.')[0]

    pattern = re.compile(r'-(\d+)\.iso')
    match = pattern.search(filename)
    if match:
        version = match.group(1)
示例#7
0
def display_messages(msgs, prettify=False, ignore_fields='', max_len=1000):
    """Returns a string describing the set of messages provided
    If prettify is true, candidates are displayed using prettytable.
    ignore_fields provides a list of fields in the msgs which should not be displayed.
    """
    lines = []
    episode_done = False
    ignore_fields = ignore_fields.split(',')
    for index, msg in enumerate(msgs):
        if msg is None or (index == 1 and 'agent_reply' in ignore_fields):
            # We only display the first agent (typically the teacher) if we
            # are ignoring the agent reply.
            continue
        if msg.get('episode_done'):
            episode_done = True
        # Possibly indent the text (for the second speaker, if two).
        space = ''
        if len(msgs) == 2 and index == 1:
            space = '   '
        # Only display rewards !=0 as they are confusing in non-RL tasks.
        if msg.get('reward', 0) != 0:
            lines.append(space + '[reward: {r}]'.format(r=msg['reward']))
        for key in msg:
            if key not in ['episode_done', 'id', 'image', 'text', 'labels', 'eval_labels', 'label_candidates', 'text_candidates', 'reward'] and key not in ignore_fields:
                line = '[' + key + ']: ' + clip_text(str(msg.get(key)), max_len)
                lines.append(space + line)
        if type(msg.get('image')) == str:
            lines.append(msg['image'])
        if msg.get('text', ''):
            text = clip_text(msg['text'], max_len)
            ID = '[' + msg['id'] + ']: ' if 'id' in msg else ''
            lines.append(space + ID + text)
        if msg.get('labels') and 'labels' not in ignore_fields:
            lines.append(space + ('[labels: {}]'.format(
                        '|'.join(msg['labels']))))
        if msg.get('eval_labels') and 'eval_labels' not in ignore_fields:
            lines.append(space + ('[eval_labels: {}]'.format(
                        '|'.join(msg['eval_labels']))))

        if msg.get('label_candidates') and 'label_candidates' not in ignore_fields:
            cand_len = len(msg['label_candidates'])
            if cand_len <= 10:
                lines.append(space + ('[label_candidates: {}]'.format(
                        '|'.join(msg['label_candidates']))))
            else:
                # select five label_candidates from the candidate set,
                # can't slice in because it's a set
                cand_iter = iter(msg['label_candidates'])
                display_cands = (next(cand_iter) for _ in range(5))
                # print those cands plus how many cands remain
                lines.append(space + ('[label_candidates: {}{}]'.format(
                        '|'.join(display_cands),
                        '| ...and {} more'.format(cand_len - 5)
                        )))
        if msg.get('text_candidates') and 'text_candidates' not in ignore_fields:
            if prettify:
                cand_len = len(msg['text_candidates'])
                cands = [c for c in msg['text_candidates'] if c is not None]
                try:
                    import prettytable
                except ImportError:
                    raise ImportError('Please install prettytable to \
                    display text candidates: `pip install prettytable`')
                scores = None
                if msg.get('candidate_scores') is not None:
                    table = prettytable.PrettyTable(['Score', 'Text'])
                    scores = msg.get('candidate_scores')
                else:
                    table = prettytable.PrettyTable(['Text'])
                table.align = 'l'
                table.hrules = 1
                display_cands = []
                num_cands = 0
                for cand in cands:
                    cand_max_length = 250 if scores is None else 100
                    if len(cand) > cand_max_length:
                        # Show beginning and end
                        split = [cand[:cand_max_length], cand[cand_max_length:]]
                        cand = split[0] + '\n\n. . .\n\n' + split[1][-(min(50, len(split[1]))):]
                    if scores is not None:
                        table.add_row([scores[num_cands], cand])
                    else:
                        table.add_row([cand])
                    num_cands += 1
                    if num_cands > 5:
                        break

                lines.append(space + table.get_string())
            else:
                cand_len = len(msg['text_candidates'])
                if cand_len <= 10:
                    lines.append(space + ('[text_candidates: {}]'.format(
                            '|'.join(msg['text_candidates']))))
                else:
                    # select five label_candidates from the candidate set,
                    # can't slice in because it's a set
                    cand_iter = iter(msg['text_candidates'])
                    display_cands = (next(cand_iter) for _ in range(5))
                    # print those cands plus how many cands remain
                    lines.append(space + ('[text_candidates: {}{}]'.format(
                            '|'.join(display_cands),
                            '| ...and {} more'.format(cand_len - 5)
                            )))
    if episode_done:
        lines.append('- - - - - - - - - - - - - - - - - - - - -')
    return '\n'.join(lines)
示例#8
0
def print_dict(d):
    pt = prettytable.PrettyTable(['Property', 'Value'], caching=False)
    pt.align = 'l'
    [pt.add_row(list(r)) for r in d.iteritems()]
    print pt.get_string(sortby='Property')
示例#9
0
def get_param_from_txt(txt_path, print_param = True):
    """
    _
    Args:
        txt_path:the txt file path which contains all params for run,
            the txt writting format follow as : Param_name@Param_value
            the Param_names are:
                 
            - 01) init_lr: 初始学习率
            - 02) optm_sw_iter: 优化器转换的迭代数
            - 03) model_sv_step: 每迭代model_sv_step,保存一次模型以作为newest的模型,
                                这么做的目的是为了在实验意外中断后可load迭代数最近一次的权重以继续实验
                                ps,此参数对应的保存方式类似于save,而不是save as
            - 04) model_sv_step_4_vis: 每迭代model_sv_step_4_vis,保存一次模型以用来可视化
                                ps,此参数对应的保存方式类似于save as,而不是save
            - 05) os_stage:跑代码的平台,windows则为 'W',linux则为 'L'
            - 06) batch_size
            - 07) max_iter: 训练的最大迭代数,大于次数则停止训练
            - 08) min_verify_iter: 开始验证和测试的最小迭代数量,因为main的代码是训练的同时进行测试的
            - 09) ver_step: 当迭代次数大于min_verify_iter时候,每迭代ver_step次,进行一次测试和验证
            - 10) data_input_shape: 输入样本的shape
            - 11) label_index: h5文件的索引,代表样本的label
            - 12) label_shape: label的shape,也相当于label的元素数量,如果label是独热编码3分类标签,则此处为3
                 
            - 13) aug_subject_path:扩增样本的所在路径(如果使用实时扩增,则次参数必须指定为 None)
            - 14) or_subject_path: 未扩增的原始样本所在的文件夹
            - 15) folder_path: 交叉验证分折文件存放的文件夹(如果不是交叉验证实验,则无需指定)
            - 16) result_save_path: log以及model保存的路径
            - 17) fold_name :指定实验对应第几折(如果不是交叉验证实验,则无需制定)
            - 18) task_name: 指定实验名称
            - 19) GPU_index: 指定GPU的序号
       
    Return:字典形式返回各个参数
        
        
        
    txt Example:
# 纯数字参数
init_lr@1e-6
optm_sw_iter@2000
model_sv_step@500                 
model_sv_step_4_vis@5000
batch_size@6
max_iter@20000
min_verify_iter@50
ver_step@50

# list类型参数
data_input_shape@[280,280,16]
label_shape@[2]


# 字符串类型参数
label_index@label3
aug_subject_path@/data/@data_pnens_zhuanyi_dl/data_aug/v
or_subject_path@/data/@data_pnens_zhuanyi_dl/data/v
folder_path@/data/@data_pnens_zhuanyi_dl/CVfold5_newnew
result_save_path@/data/XS_Aug_model_result/model_templete/recurrent/pnens_zhuanyi_resnet_v_new(fuxk)/fold6
fold_name@1
task_name@recurrent fold5
os_stage@L
   
    """

    # 首先读取txt文件到一个list里面(非txt的无后缀文件也可)
    str_list = []
    f = open(txt_path, "r")
    contents = f.readlines()
    for item in contents:
        content = item.strip()
        str_list.append(content)
    f.close()

    # 将list中的字符串提取为参数,并储存到字典中
    # 注意,因为文件路径或者任务名称中可能会有@,所以要从第一个@处开始分割
    dic = {}
    tb = pt.PrettyTable()
    tb.field_names = ["param_name", "param_value","value_type"]
    for tmp_str in str_list:
        param_name = tmp_str.split('@')[0]  # 第一种截取字符串的方法
        param_value = tmp_str[tmp_str.find('@') + 1:]  # 第二种截取字符串的方法

        # 根据参数名,判断是否需要转换为数字,并存储
        if (    param_name == 'optm_sw_iter' or
                param_name == 'model_sv_step'or
                param_name == 'model_sv_step_4_vis'or
                param_name == 'min_verify_iter' or
                param_name == 'ver_step'or
                param_name == 'batch_size' or
                param_name == 'max_iter'
            ):
            param_value = int(param_value)
            dic[param_name]=param_value
            tb.add_row([param_name, param_value, typeof(param_value)])
        # 根据参数名,判断是否需要转换为list,并存储
        if (    param_name == 'data_input_shape' or
                param_name == 'label_shape'):
            param_value = str2list_w(param_value)
            dic[param_name]=param_value
            tb.add_row([param_name, param_value, typeof(param_value)])

        # 根据参数名,判断是否可以存储,若是参数则存储
        if (    param_name == 'label_index' or
                param_name == 'aug_subject_path' or
                param_name == 'or_subject_path' or
                param_name == 'folder_path' or
                param_name == 'result_save_path' or
                param_name == 'fold_name' or
                param_name == 'task_name' or
                param_name == 'os_stage' or
                param_name == 'GPU_index' or
                param_name == 'aug_subject_path_othermode' or
                param_name == 'or_subject_path_othermode'):
            dic[param_name]=param_value
            tb.add_row([param_name, param_value, typeof(param_value)])

        if (    param_name == 'init_lr'):
            param_value = float(param_value)
            dic[param_name]=param_value
            tb.add_row([param_name, param_value, typeof(param_value)])


    if print_param:
        tb.align["param_value"] = "l"
        tb.align["param_name"] = "r"
        # tb.set_style(pt.MSWORD_FRIENDLY)
        print(tb)

    return dic
def defaultprettytable( cols ):    
    p = prettytable.PrettyTable(cols)
    p.align = 'l'
    p.sortby = None
    return p    
示例#11
0
city_list = city_data['state_and_code'].tolist()
n=25
final = [city_list[i * n:(i + 1) * n] for i in range((len(city_list) + n - 1) // n )]  




cnt = 0
for x in final:
    print(cnt)
    headers = {'Content-type': 'application/json'}
    data = json.dumps({"seriesid": final[cnt],"startyear":"2014", "endyear":"2018"})
    p = requests.post('https://api.bls.gov/publicAPI/v1/timeseries/data/', data=data, headers=headers)
    json_data[cnt] = json.loads(p.text)
    for series in json_data['Results']['series']:
        x=prettytable.PrettyTable(["series id","year","period","value","footnotes"])
        seriesId = series['seriesID']
        for item in series['data']:
            year = item['year']
            period = item['period']
            value = item['value']
            footnotes=""
            for footnote in item['footnotes']:
                if footnote:
                    footnotes = footnotes + footnote['text'] + ','  
    cnt +=1

test_df = json_normalize(json_data[0]['Results']['series'])

test = pd.concat(
                        [(json_normalize(v[keyparent],sep='_')) 
示例#12
0
x, y = printTable('testResult.txt', 'pDFT')
x, y = printTable('testResult1.txt', 'pDCT')

ax2.plot(x, y)
ax2.set_xlabel('pDCT')
ax2.set_ylabel('Результат')
x, y = printTable('testResultS.txt', 's')
ax3.plot(x, y)
ax3.set_xlabel('S')
ax3.set_ylabel('Результат')
x, y = printTable('testResultW.txt', 'w')
ax4.plot(x, y)
ax4.set_xlabel('W')
ax4.set_ylabel('Результат')

table = PT.PrettyTable()
table.field_names = ['Количество тестовых изображений', 'm', 'n', 'pDFT', 'pDCT', 'BIN', 'W', 'S', 'Результат']
data_file = open('testResultCount.txt', 'r')
rows = json.loads(data_file.read())
x = []
y = []
for row in rows:
    tmp = []
    for field in row:
        if field == 'c':
            tmp.append((row['c'] - 1)*40)
            x.append((row['c'] - 1)*40)
        elif field == 'res':
            y.append(row['res'])
            tmp.append(row[field])
        else:
def get_participant(code, **defineDate):
    url = 'https://sc.hkexnews.hk/TuniS/www.hkexnews.hk/sdw/search/searchsdw_c.aspx'
    today = time.strftime('%Y-%m-%d', time.localtime())
    today = datetime.datetime.strptime(today, "%Y-%m-%d")
    if defineDate:
        yesterday = defineDate['date']
    else:
        yesterday = str(
            (today + datetime.timedelta(days=-1)).strftime("%Y-%m-%d"))
    print(today, yesterday)

    tb = pt.PrettyTable()
    tb.align = 'c'  # 对齐方式(c:居中,l居左,r:居右)
    page = Page()
    c = Line()
    data = {
        'today': today,
        '__EVENTTARGET': 'btnSearch',
        '__EVENTARGUMENT': '',
        'txtShareholdingDate': yesterday,
        'txtStockCode': code,
        'txtStockName': '',
        'txtParticipantID': '',
        'txtParticipantName': ''
    }
    requst = req.session()
    response = requst.post(url=url, data=data)
    tree = etree.HTML(response.text)
    code = code
    txtStockName = tree.xpath('//input[@name="txtStockName"]/@value')[0]

    head = tree.xpath(
        '//div[@id="pnlResultNormal"]/div[@class="search-details-table-container table-mobile-list-container"]//table/thead/tr'
    )
    header = []
    for line in head:
        participantid = line.xpath(
            './th[@data-column-class="col-participant-id"]/text()')[0]  #机构编号
        participantname = line.xpath(
            './th[@data-column-class="col-participant-name"]/text()')[0]  #机构名称
        address = line.xpath('./th[@data-column-class="col-address"]/text()')[
            0]  #机构地址
        shareholding = line.xpath(
            './th[@data-column-class="col-shareholding"]/text()')[0]  #持股数量
        shareholding_percent = str(
            line.xpath(
                './th[@data-column-class="col-shareholding-percent"]/text()')
            [0]).split('/')[-1][2:].strip()  #持股百分比
    header.append(participantid)
    header.append(participantname)
    # header.append(address)
    header.append(shareholding)
    header.append(shareholding_percent)
    print(code, txtStockName)
    data = []
    tempdata = tree.xpath(
        '//div[@id="pnlResultNormal"]/div[@class="search-details-table-container table-mobile-list-container"]//table/tbody//tr'
    )
    i = 0
    for line in tempdata:
        i += 1
        if i == 10:
            break
        else:
            participantid = line.xpath(
                './td[@class="col-participant-id"]/div[2]/text()')[0]
            participantname = line.xpath(
                './td[@class="col-participant-name"]/div[2]/text()')[0]
            # address=line.xpath('./td[@class="col-address"]/div[2]/text()')[0]
            shareholding = line.xpath(
                './td[@class="col-shareholding text-right"]/div[2]/text()')[0]
            shareholding_percent = line.xpath(
                './td[@class="col-shareholding-percent text-right"]/div[2]/text()'
            )[0]
        dict = {
            '机构编号': participantid,
            '机构名称': participantname,
            '持股数量': shareholding,
            '持股百分比': shareholding_percent
        }
        tb.add_row([
            participantid, participantname, shareholding, shareholding_percent
        ])
        data.append(dict)
    pdf = pds.DataFrame(data)
    tb.field_names = header  # 设置表头
    print(tb.get_string())
    #print(pdf)
    return pdf
                all_slots[pred_chunk[-1]]['FP'] += 1
        for label_chunk in label_chunks:
            if label_chunk[-1] not in all_slots:
                all_slots[label_chunk[-1]] = {'TP':0.0, 'FP':0.0, 'FN':0.0, 'TN':0.0}
            if label_chunk not in pred_chunks:
                FN += 1
                all_slots[label_chunk[-1]]['FN'] += 1
        correct_sentence_slots += int(label_chunks == pred_chunks)
        if intent_correct and label_chunks == pred_chunks:
            correct_sentence += 1
        if label_chunks != pred_chunks and opt.print_log:
            print(' '.join([word if label == 'O' else word+':'+label for word, label in zip(words, labels)]))
            print(' '.join([word if pred == 'O' else word+':'+pred for word, pred in zip(words, preds)]))
            print('-'*20)

    table = prettytable.PrettyTable(["Metric", "TP", "FN", "FP", "Prec.", "Recall", "F1-score", "Snt. Acc"])
    ## 自定义表格输出样式
    ### 设定左对齐
    table.align = 'l'
    ### 设定数字输出格式
    table.float_format = "2.2"

    if TP == 0:
        table.add_row(('all slots', int(TP), int(FN), int(FP), 0, 0, 0, 100*correct_sentence_slots/sentence_number))
    else:
        table.add_row(('all slots', int(TP), int(FN), int(FP), 100*TP/(TP+FP), 100*TP/(TP+FN), 100*2*TP/(2*TP+FN+FP), 100*correct_sentence_slots/sentence_number))
    if TP2 != 0:
        table.add_row(('all intents', int(TP2), int(FN2), int(FP2), 100*TP2/(TP2+FP2), 100*TP2/(TP2+FN2), 100*2*TP2/(2*TP2+FN2+FP2), 100*correct_sentence_intents/sentence_number))
        table.add_row(('all slots+intents', '-', '-', '-', '-', '-', '-', 100*correct_sentence/sentence_number))
    table.add_row(('-', '-', '-', '-', '-', '-', '-', '-'))
    all_F1 = []
示例#15
0
def test_output_summary(region, stack, layer):
    table = prettytable.PrettyTable()
    table.field_names = ["Region", "StackID", "LayerID"]
    table.add_row([str(region), str(stack), str(layer)])
    print(table.get_string(title="Test Input Summary"))
示例#16
0
def _generate_all_charts(spec, input_data):
    """Generate all charts specified in the specification file.

    :param spec: Specification.
    :param input_data: Full data set.
    :type spec: Specification
    :type input_data: InputData
    """

    def _generate_chart(_, data_q, graph):
        """Generates the chart.
        """

        logs = list()

        logging.info("  Generating the chart '{0}' ...".
                     format(graph.get("title", "")))
        logs.append(("INFO", "  Generating the chart '{0}' ...".
                     format(graph.get("title", ""))))

        job_name = graph["data"].keys()[0]

        csv_tbl = list()
        res = list()

        # Transform the data
        logs.append(("INFO", "    Creating the data set for the {0} '{1}'.".
                     format(graph.get("type", ""), graph.get("title", ""))))
        data = input_data.filter_data(graph, continue_on_error=True)
        if data is None:
            logging.error("No data.")
            return

        chart_data = dict()
        for job, job_data in data.iteritems():
            if job != job_name:
                continue
            for index, bld in job_data.items():
                for test_name, test in bld.items():
                    if chart_data.get(test_name, None) is None:
                        chart_data[test_name] = OrderedDict()
                    try:
                        chart_data[test_name][int(index)] = \
                            test["result"]["receive-rate"]
                    except (KeyError, TypeError):
                        pass

        # Add items to the csv table:
        for tst_name, tst_data in chart_data.items():
            tst_lst = list()
            for bld in builds_dict[job_name]:
                itm = tst_data.get(int(bld), '')
                if not isinstance(itm, str):
                    itm = itm.avg
                tst_lst.append(str(itm))
            csv_tbl.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n')
        # Generate traces:
        traces = list()
        index = 0
        for test_name, test_data in chart_data.items():
            if not test_data:
                logs.append(("WARNING", "No data for the test '{0}'".
                             format(test_name)))
                continue
            message = "index: {index}, test: {test}".format(
                index=index, test=test_name)
            test_name = test_name.split('.')[-1]
            try:
                trace, rslt = _generate_trending_traces(
                    test_data,
                    job_name=job_name,
                    build_info=build_info,
                    name='-'.join(test_name.split('-')[2:-1]),
                    color=COLORS[index])
            except IndexError:
                message = "Out of colors: {}".format(message)
                logs.append(("ERROR", message))
                logging.error(message)
                index += 1
                continue
            traces.extend(trace)
            res.append(rslt)
            index += 1

        if traces:
            # Generate the chart:
            graph["layout"]["title"] = \
                "<b>{title}</b>".format(title=graph.get("title", ""))
            name_file = "{0}-{1}{2}".format(spec.cpta["output-file"],
                                            graph["output-file-name"],
                                            spec.cpta["output-file-type"])

            logs.append(("INFO", "    Writing the file '{0}' ...".
                         format(name_file)))
            plpl = plgo.Figure(data=traces, layout=graph["layout"])
            try:
                ploff.plot(plpl, show_link=False, auto_open=False,
                           filename=name_file)
            except plerr.PlotlyEmptyDataError:
                logs.append(("WARNING", "No data for the plot. Skipped."))

        data_out = {
            "job_name": job_name,
            "csv_table": csv_tbl,
            "results": res,
            "logs": logs
        }
        data_q.put(data_out)

    builds_dict = dict()
    for job in spec.input["builds"].keys():
        if builds_dict.get(job, None) is None:
            builds_dict[job] = list()
        for build in spec.input["builds"][job]:
            status = build["status"]
            if status != "failed" and status != "not found" and \
                status != "removed":
                builds_dict[job].append(str(build["build"]))

    # Create "build ID": "date" dict:
    build_info = dict()
    tb_tbl = spec.environment.get("testbeds", None)
    for job_name, job_data in builds_dict.items():
        if build_info.get(job_name, None) is None:
            build_info[job_name] = OrderedDict()
        for build in job_data:
            testbed = ""
            tb_ip = input_data.metadata(job_name, build).get("testbed", "")
            if tb_ip and tb_tbl:
                testbed = tb_tbl.get(tb_ip, "")
            build_info[job_name][build] = (
                input_data.metadata(job_name, build).get("generated", ""),
                input_data.metadata(job_name, build).get("version", ""),
                testbed
            )

    work_queue = multiprocessing.JoinableQueue()
    manager = multiprocessing.Manager()
    data_queue = manager.Queue()
    cpus = multiprocessing.cpu_count()

    workers = list()
    for cpu in range(cpus):
        worker = Worker(work_queue,
                        data_queue,
                        _generate_chart)
        worker.daemon = True
        worker.start()
        workers.append(worker)
        os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
                  format(cpu, worker.pid))

    for chart in spec.cpta["plots"]:
        work_queue.put((chart, ))
    work_queue.join()

    anomaly_classifications = list()

    # Create the header:
    csv_tables = dict()
    for job_name in builds_dict.keys():
        if csv_tables.get(job_name, None) is None:
            csv_tables[job_name] = list()
        header = "Build Number:," + ",".join(builds_dict[job_name]) + '\n'
        csv_tables[job_name].append(header)
        build_dates = [x[0] for x in build_info[job_name].values()]
        header = "Build Date:," + ",".join(build_dates) + '\n'
        csv_tables[job_name].append(header)
        versions = [x[1] for x in build_info[job_name].values()]
        header = "Version:," + ",".join(versions) + '\n'
        csv_tables[job_name].append(header)

    while not data_queue.empty():
        result = data_queue.get()

        anomaly_classifications.extend(result["results"])
        csv_tables[result["job_name"]].extend(result["csv_table"])

        for item in result["logs"]:
            if item[0] == "INFO":
                logging.info(item[1])
            elif item[0] == "ERROR":
                logging.error(item[1])
            elif item[0] == "DEBUG":
                logging.debug(item[1])
            elif item[0] == "CRITICAL":
                logging.critical(item[1])
            elif item[0] == "WARNING":
                logging.warning(item[1])

    del data_queue

    # Terminate all workers
    for worker in workers:
        worker.terminate()
        worker.join()

    # Write the tables:
    for job_name, csv_table in csv_tables.items():
        file_name = spec.cpta["output-file"] + "-" + job_name + "-trending"
        with open("{0}.csv".format(file_name), 'w') as file_handler:
            file_handler.writelines(csv_table)

        txt_table = None
        with open("{0}.csv".format(file_name), 'rb') as csv_file:
            csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
            line_nr = 0
            for row in csv_content:
                if txt_table is None:
                    txt_table = prettytable.PrettyTable(row)
                else:
                    if line_nr > 1:
                        for idx, item in enumerate(row):
                            try:
                                row[idx] = str(round(float(item) / 1000000, 2))
                            except ValueError:
                                pass
                    try:
                        txt_table.add_row(row)
                    except Exception as err:
                        logging.warning("Error occurred while generating TXT "
                                        "table:\n{0}".format(err))
                line_nr += 1
            txt_table.align["Build Number:"] = "l"
        with open("{0}.txt".format(file_name), "w") as txt_file:
            txt_file.write(str(txt_table))

    # Evaluate result:
    if anomaly_classifications:
        result = "PASS"
        for classification in anomaly_classifications:
            if classification == "regression" or classification == "outlier":
                result = "FAIL"
                break
    else:
        result = "FAIL"

    logging.info("Partial results: {0}".format(anomaly_classifications))
    logging.info("Result: {0}".format(result))

    return result
示例#17
0
                array[x] = array[x + 1]
                array[x + 1] = j


def Sort_2(array_2):  #сортировка select
    for i in range(len(array_2)):
        min = i
        for j in range(i + 1, len(array_2)):
            if array_2[j] < array_2[min]:
                min = j
        var = array_2[min]
        array_2[min] = array_2[i]
        array_2[i] = var


table = prettytable.PrettyTable(
    ["Размер списка", "Время пузырька", "Время select"])
x = []
y1 = []
y2 = []

for i in range(1000, 5000, 1000):
    x.append(i)
    min = 1
    max = i
    for j in range(i):
        array.append(random.randint(min, max))
    array_2 = array.copy()
    t1 = datetime.datetime.now()
    Sort(array)
    t2 = datetime.datetime.now()
    print(array)
示例#18
0
def print_list(objs, fields, exclude_unavailable=False, formatters=None,
               sortby_index=0):
    '''Prints a list of objects.

    @param objs: Objects to print
    @param fields: Fields on each object to be printed
    @param exclude_unavailable: Boolean to decide if unavailable fields are
                                removed
    @param formatters: Custom field formatters
    @param sortby_index: Results sorted against the key in the fields list at
                         this index; if None then the object order is not
                         altered
    '''
    formatters = formatters or {}
    mixed_case_fields = ['serverId']
    removed_fields = []
    rows = []

    for o in objs:
        row = []
        for field in fields:
            if field in removed_fields:
                continue
            if field in formatters:
                row.append(formatters[field](o))
            else:
                if field in mixed_case_fields:
                    field_name = field.replace(' ', '_')
                else:
                    field_name = field.lower().replace(' ', '_')
                if isinstance(o, dict) and field in o:
                    data = o[field]
                else:
                    if not hasattr(o, field_name) and exclude_unavailable:
                        removed_fields.append(field)
                        continue
                    else:
                        data = getattr(o, field_name, '')
                if data is None:
                    data = '-'
                if isinstance(data, six.string_types) and "\r" in data:
                    data = data.replace("\r", " ")
                row.append(data)
        rows.append(row)

    for f in removed_fields:
        fields.remove(f)

    pt = prettytable.PrettyTable((f for f in fields), caching=False)
    pt.aligns = ['l' for f in fields]
    for row in rows:
        count = 0
        # Converts unicode values in dictionary to string
        for part in row:
            count = count + 1
            if isinstance(part, dict):
                part = unicode_key_value_to_string(part)
                row[count - 1] = part
        pt.add_row(row)

    if sortby_index is None:
        order_by = None
    else:
        order_by = fields[sortby_index]
    _print(pt, order_by)
示例#19
0
    def pretty_print(entry, probs, attention, pred_reuse_distances):
        """Returns a human-readable string of the entry, probs, and attention.

    Args:
      entry (EvictionEntry): entry to print
      probs (torch.FloatTensor): probs of evicting entry's eviction candidates.
      attention (list[torch.FloatTensor, CacheAccess]): pairs of attention
        weight on past EvictionEntry sorted from most distant to most recent.
      pred_reuse_distances (torch.FloatTensor): the predicted reuse distance of
        each cache line in the entry of shape (num_cache_lines).

    Returns:
      string: error analysis string for probs and attention on the entry.
    """
        cache_access = entry.cache_access
        eviction_decision = entry.eviction_decision

        s = [
            "PC: {}\nAddress: {}\nEvict: {}\nCache lines:\n".format(
                hex(cache_access.pc), hex(cache_access.address),
                eviction_decision.evict)
        ]

        _, true_ranks = probs.sort(descending=True)
        true_rank_to_pred_rank = dict(
            zip(true_ranks.cpu().data.numpy(), range(len(true_ranks))))
        headers = [
            "true rank", "pc", "address", "pred rank", "prob", "oracle score",
            "pred reuse distance", "rank correct?", "in history?"
        ]
        cache_lines_table = prettytable.PrettyTable(headers)
        for i, (line, prob, pred_reuse) in enumerate(
                zip(cache_access.cache_lines, probs, pred_reuse_distances)):
            cand, pc = line
            pred_rank = true_rank_to_pred_rank[i]
            success = "SUCCESS" if pred_rank == i else "FAILURE"

            present_in_history = any(cand == prev_access.address
                                     for _, prev_access in attention)
            present = "PRESENT" if present_in_history else "ABSENT"
            cache_lines_table.add_row([
                i,
                hex(pc),
                hex(cand), pred_rank, "{:.2f}".format(prob),
                "{:.2f}".format(eviction_decision.cache_line_scores[cand]),
                "{:.2f}".format(pred_reuse.item()), success, present
            ])
        s.append(str(cache_lines_table))
        s.append("\n")

        s.append("Attention:\n")
        num_cache_lines = len(cache_access.cache_lines)
        headers = (["timestep", "pc", "address"] +
                   ["line {}".format(i) for i in range(num_cache_lines)])
        attention_table = prettytable.PrettyTable(headers)
        for i, (attention_weights, access) in enumerate(reversed(attention)):
            # Truncate padded attention to num_cache_lines
            attention_entries = [
                "{:.2f}".format(weight)
                for weight in attention_weights[:num_cache_lines]
            ]
            row = (["t - {}".format(i),
                    hex(access.pc),
                    hex(access.address)] + attention_entries)
            attention_table.add_row(row)
        s.append(str(attention_table))
        s.append("\n")
        return "".join(s)
示例#20
0
def table_performance_comparison(table, input_data):
    """Generate the table(s) with algorithm: table_performance_comparison
    specified in the specification file.

    :param table: Table to generate.
    :param input_data: Data to process.
    :type table: pandas.Series
    :type input_data: InputData
    """

    logging.info("  Generating the table {0} ...".format(table.get(
        "title", "")))

    # Transform the data
    logging.info("    Creating the data set for the {0} '{1}'.".format(
        table.get("type", ""), table.get("title", "")))
    data = input_data.filter_data(table, continue_on_error=True)

    # Prepare the header of the tables
    try:
        header = [
            "Test case",
        ]

        history = table.get("history", None)
        if history:
            for item in history:
                header.extend([
                    "{0} Throughput [Mpps]".format(item["title"]),
                    "{0} Stdev [Mpps]".format(item["title"])
                ])
        header.extend([
            "{0} Throughput [Mpps]".format(table["reference"]["title"]),
            "{0} Stdev [Mpps]".format(table["reference"]["title"]),
            "{0} Throughput [Mpps]".format(table["compare"]["title"]),
            "{0} Stdev [Mpps]".format(table["compare"]["title"]), "Change [%]"
        ])
        header_str = ",".join(header) + "\n"
    except (AttributeError, KeyError) as err:
        logging.error(
            "The model is invalid, missing parameter: {0}".format(err))
        return

    # Prepare data to the table:
    tbl_dict = dict()
    for job, builds in table["reference"]["data"].items():
        for build in builds:
            for tst_name, tst_data in data[job][str(build)].iteritems():
                if tbl_dict.get(tst_name, None) is None:
                    name = "{0}-{1}".format(
                        tst_data["parent"].split("-")[0],
                        "-".join(tst_data["name"].split("-")[1:]))
                    tbl_dict[tst_name] = {
                        "name": name,
                        "ref-data": list(),
                        "cmp-data": list()
                    }
                try:
                    tbl_dict[tst_name]["ref-data"].\
                        append(tst_data["throughput"]["value"])
                except TypeError:
                    pass  # No data in output.xml for this test

    for job, builds in table["compare"]["data"].items():
        for build in builds:
            for tst_name, tst_data in data[job][str(build)].iteritems():
                try:
                    tbl_dict[tst_name]["cmp-data"].\
                        append(tst_data["throughput"]["value"])
                except KeyError:
                    pass
                except TypeError:
                    tbl_dict.pop(tst_name, None)
    if history:
        for item in history:
            for job, builds in item["data"].items():
                for build in builds:
                    for tst_name, tst_data in data[job][str(
                            build)].iteritems():
                        if tbl_dict.get(tst_name, None) is None:
                            continue
                        if tbl_dict[tst_name].get("history", None) is None:
                            tbl_dict[tst_name]["history"] = OrderedDict()
                        if tbl_dict[tst_name]["history"].get(
                                item["title"], None) is None:
                            tbl_dict[tst_name]["history"][item["title"]] = \
                                list()
                        try:
                            tbl_dict[tst_name]["history"][item["title"]].\
                                append(tst_data["throughput"]["value"])
                        except (TypeError, KeyError):
                            pass

    tbl_lst = list()
    for tst_name in tbl_dict.keys():
        item = [
            tbl_dict[tst_name]["name"],
        ]
        if history:
            if tbl_dict[tst_name].get("history", None) is not None:
                for hist_data in tbl_dict[tst_name]["history"].values():
                    if hist_data:
                        data_t = remove_outliers(
                            hist_data, outlier_const=table["outlier-const"])
                        if data_t:
                            item.append(round(mean(data_t) / 1000000, 2))
                            item.append(round(stdev(data_t) / 1000000, 2))
                        else:
                            item.extend([None, None])
                    else:
                        item.extend([None, None])
            else:
                item.extend([None, None])
        if tbl_dict[tst_name]["ref-data"]:
            data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
                                     outlier_const=table["outlier-const"])
            # TODO: Specify window size.
            if data_t:
                item.append(round(mean(data_t) / 1000000, 2))
                item.append(round(stdev(data_t) / 1000000, 2))
            else:
                item.extend([None, None])
        else:
            item.extend([None, None])
        if tbl_dict[tst_name]["cmp-data"]:
            data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
                                     outlier_const=table["outlier-const"])
            # TODO: Specify window size.
            if data_t:
                item.append(round(mean(data_t) / 1000000, 2))
                item.append(round(stdev(data_t) / 1000000, 2))
            else:
                item.extend([None, None])
        else:
            item.extend([None, None])
        if item[-4] is not None and item[-2] is not None and item[-4] != 0:
            item.append(int(relative_change(float(item[-4]), float(item[-2]))))
        if len(item) == len(header):
            tbl_lst.append(item)

    # Sort the table according to the relative change
    tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)

    # Generate tables:
    # All tests in csv:
    tbl_names = [
        "{0}-ndr-1t1c-full{1}".format(table["output-file"],
                                      table["output-file-ext"]),
        "{0}-ndr-2t2c-full{1}".format(table["output-file"],
                                      table["output-file-ext"]),
        "{0}-ndr-4t4c-full{1}".format(table["output-file"],
                                      table["output-file-ext"]),
        "{0}-pdr-1t1c-full{1}".format(table["output-file"],
                                      table["output-file-ext"]),
        "{0}-pdr-2t2c-full{1}".format(table["output-file"],
                                      table["output-file-ext"]),
        "{0}-pdr-4t4c-full{1}".format(table["output-file"],
                                      table["output-file-ext"])
    ]
    for file_name in tbl_names:
        logging.info("      Writing file: '{0}'".format(file_name))
        with open(file_name, "w") as file_handler:
            file_handler.write(header_str)
            for test in tbl_lst:
                if (file_name.split("-")[-3] in test[0] and  # NDR vs PDR
                        file_name.split("-")[-2] in test[0]):  # cores
                    test[0] = "-".join(test[0].split("-")[:-1])
                    file_handler.write(",".join([str(item)
                                                 for item in test]) + "\n")

    # All tests in txt:
    tbl_names_txt = [
        "{0}-ndr-1t1c-full.txt".format(table["output-file"]),
        "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
        "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
        "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
        "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
        "{0}-pdr-4t4c-full.txt".format(table["output-file"])
    ]

    for i, txt_name in enumerate(tbl_names_txt):
        txt_table = None
        logging.info("      Writing file: '{0}'".format(txt_name))
        with open(tbl_names[i], 'rb') as csv_file:
            csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
            for row in csv_content:
                if txt_table is None:
                    txt_table = prettytable.PrettyTable(row)
                else:
                    txt_table.add_row(row)
            txt_table.align["Test case"] = "l"
        with open(txt_name, "w") as txt_file:
            txt_file.write(str(txt_table))

    # Selected tests in csv:
    input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
                                               table["output-file-ext"])
    with open(input_file, "r") as in_file:
        lines = list()
        for line in in_file:
            lines.append(line)

    output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
                                               table["output-file-ext"])
    logging.info("      Writing file: '{0}'".format(output_file))
    with open(output_file, "w") as out_file:
        out_file.write(header_str)
        for i, line in enumerate(lines[1:]):
            if i == table["nr-of-tests-shown"]:
                break
            out_file.write(line)

    output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
                                                  table["output-file-ext"])
    logging.info("      Writing file: '{0}'".format(output_file))
    with open(output_file, "w") as out_file:
        out_file.write(header_str)
        for i, line in enumerate(lines[-1:0:-1]):
            if i == table["nr-of-tests-shown"]:
                break
            out_file.write(line)

    input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
                                               table["output-file-ext"])
    with open(input_file, "r") as in_file:
        lines = list()
        for line in in_file:
            lines.append(line)

    output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
                                               table["output-file-ext"])
    logging.info("      Writing file: '{0}'".format(output_file))
    with open(output_file, "w") as out_file:
        out_file.write(header_str)
        for i, line in enumerate(lines[1:]):
            if i == table["nr-of-tests-shown"]:
                break
            out_file.write(line)

    output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
                                                  table["output-file-ext"])
    logging.info("      Writing file: '{0}'".format(output_file))
    with open(output_file, "w") as out_file:
        out_file.write(header_str)
        for i, line in enumerate(lines[-1:0:-1]):
            if i == table["nr-of-tests-shown"]:
                break
            out_file.write(line)
示例#21
0
# 训练分类模型
model_tree = tree.DecisionTreeClassifier(random_state=0)  # 建立决策树模型对象
model_tree.fit(X_train, y_train.astype('int'))  # 训练决策树模型
pre_y = model_tree.predict(X_test)  # 使用测试集做模型效果检验

# 输出模型概况
n_samples, n_features = X.shape  # 总样本量,总特征数`打印出样本量和特征数量
"""print(100 * '-')
samples:21927	 feature:4
----------------------------------------------------------------------------------------------------
"""

# 混淆矩阵
confusion_m = confusion_matrix(y_test, pre_y)  # 获得混淆矩阵
confusion_matrix_table = prettytable.PrettyTable()  # 创建表格实例
confusion_matrix_table.add_row(confusion_m[0, :])  # 增加第一行数据
confusion_matrix_table.add_row(confusion_m[1, :])  # 增加第二行数据
print('confusion matrix')
print(confusion_matrix_table)  # 打印出混淆矩阵

# 核心指标评估
y_score = model_tree.predict_proba(X_test)  # 获得混淆矩阵
fpr, tpr, thresholds = roc_curve(y_test, y_score[:, 1])  # ROC
auc_s = auc(fpr, tpr)  # AUC
accuracy_s = accuracy_score(y_test, pre_y)  # 准确率
precision_s = precision_score(y_test, pre_y)  # 精准率
recall_s = recall_score(y_test, pre_y)  # 召回率
f1_s = f1_score(y_test, pre_y)  # f1得分
core_metrics = prettytable.PrettyTable()
core_metrics.field_names = ['auc', 'accuracy', 'precision', 'recall',
 def output_service(self, rows):
     tb = pt.PrettyTable()
     tb.field_names = ["service", "username", "password"]
     for row in rows:
         tb.add_row([row[0], row[1], row[2]])
     print(tb)
示例#23
0
文件: lab2.py 项目: Yarik353/MND
            x1[2] * averages_y[2]) / 3, (x2[0] * averages_y[0] +
                                         x2[1] * averages_y[1] +
                                         x2[2] * averages_y[2]) / 3

deter = det([[1, mx1, mx2], [mx1, a1, a2], [mx2, a2, a3]])
b0 = det([[my, mx1, mx2], [a11, a1, a2], [a22, a2, a3]]) / deter
b1 = det([[1, my, mx2], [mx1, a11, a2], [mx2, a22, a3]]) / deter
b2 = det([[1, mx1, my], [mx1, a1, a11], [mx2, a2, a22]]) / deter

delta_x1, delta_x2, x10, x20 = abs(x1_max - x1_min) / 2, abs(
    x2_max - x2_min) / 2, (x1_max + x1_min) / 2, (x2_max + x2_min) / 2
a0 = ((b0 - b1 * x10 / delta_x1) - (b2 * x20 / delta_x2))
a1 = (b1 / delta_x1)
a2 = (b2 / delta_x2)

table = prettytable.PrettyTable()
table.field_names = ["№", "X1", "X2", *[f"Y{i+1}" for i in range(m)]]

table.add_row([1, x1[0], x2[0], *y_1])
table.add_row([2, x1[1], x2[1], *y_2])
table.add_row([3, x1[2], x2[2], *y_3])

print("Критерій Романовського: " + str(romanovsky))
print("Головне відхилення: " + str(offset))
print(table)

table2 = prettytable.PrettyTable()
table2.field_names = ["№", "Average Y", "Dispersion Y", "Fuv", "σuv", "Ruv"]
for i in range(3):
    table2.add_row([
        i + 1,
示例#24
0
X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=.3, random_state=0)                # 将数据分为训练集和测试集, random_state就是为了保证程序每次运行都分割一样的训练集和测试集

# XGB分类模型训练
# 参数是二元分类, estimators数量是10,每个使用样本比例是0.8,最大深度是10,占用全部cpu资源
param_dist = {'objective': 'binary:logistic', 'n_estimators': 10,
              'subsample': 0.8, 'max_depth': 10, 'n_jobs': -1}
model_xgb = xgb.XGBClassifier(**param_dist)
model_xgb.fit(X_train, y_train)
print(model_xgb)
pre_y = model_xgb.predict(X_test)

# 混淆矩阵
# 分类算法效果评估的基本方法,监督式学习的一种可视化工具,主要用于比较结果和实例的真实信息
tn, fp, fn, tp = confusion_matrix(y_test, pre_y).ravel()  # 获得混淆矩阵
confusion_matrix_table = prettytable.PrettyTable(['', 'prediction-0', 'prediction-1'])  # 创建表格实例
confusion_matrix_table.add_row(['actual-0', tp, fn])  # 增加第一行数据
confusion_matrix_table.add_row(['actual-1', fp, tn])  # 增加第二行数据
print('confusion matrix \n', confusion_matrix_table)

# 核心评估指标
y_score = model_xgb.predict_proba(X_test)  # 获得决策树的预测概率
fpr, tpr, _ = roc_curve(y_test, y_score[:, 1])  # ROC曲线
auc_s = auc(fpr, tpr)  # AUC,roc曲线下的面积
scores = [round(i(y_test, pre_y), 3)for i in (accuracy_score, precision_score, recall_score, f1_score)]       # 列表生成式计算(准确率,精确率,召回率,f1得分值)
scores.insert(0, auc_s)
core_metrics = prettytable.PrettyTable()  # 创建表格实例
core_metrics.field_names = ['auc', 'accuracy', 'precision', 'recall', 'f1']  # 定义表格列名
core_metrics.add_row(scores)  # 增加数据
print('core metrics\n', core_metrics)
示例#25
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('instance', help='Name of database instance')
    parser.add_argument('database', help='name of database')
    parser.add_argument('--dc', required=False, help='The region to '
                        'build the database in', choices=['DFW', 'ORD', 'LON'])
    parser.add_argument('--user', required=False, help='The user to create for'
                        'database access. Defaults to current user')
    parser.add_argument('--password', required=False,
                        help='Password for DB user')

    args = parser.parse_args()

    if not args.password:
        chars = string.ascii_letters + string.digits
        random.seed = os.urandom(1024)
        password = ''.join(random.choice(chars) for i in xrange(12))
    else:
        password = args.password

    if not args.user:
        user = os.getlogin()
    else:
        user = args.user

    rax = raxcloudrest.RaxCloudREST()
    rax.set_from_file()
    rax.authenticate()
    endpoints = rax.db_svc_cat()
    dc = args.dc if args.dc else endpoints.keys()[0]
    endpoint = endpoints.get(dc)

    print 'Creating DB instance in %s' % dc

    data = {
        'instance': {
            'databases': [
                {
                    'name': args.database
                }
            ],
            'users': [
                {
                    'databases': [
                        {
                            'name': args.database
                        }
                    ],
                    'name': user,
                    'password': password
                }
            ],
            'flavorRef': '%s/flavors/1',
            'name': args.instance,
            'volume': {
                'size': 1
            }
        }
    }

    headers, instance = rax.post('%s/instances' % endpoint, data)

    t = prettytable.PrettyTable(['ID', 'Name', 'Hostname', 'Database', 'User',
                                 'Password'])
    t.add_row([instance['instance']['id'], instance['instance']['name'],
               instance['instance']['hostname'], args.database, user,
               password])

    print
    print t
示例#26
0
def print_model(rho, data, show_omitted_variables=False):

    variable_names = data['variable_names']

    rho_values = np.copy(rho)
    rho_names = list(variable_names)

    if '(Intercept)' in rho_names:
        intercept_ind = variable_names.index('(Intercept)')
        intercept_val = int(rho[intercept_ind])
        rho_values = np.delete(rho_values, intercept_ind)
        rho_names.remove('(Intercept)')
    else:
        intercept_val = 0

    if 'outcome_name' in data:
        predict_string = "Pr(Y = +1) = 1/(1 + exp(%d - score))" % intercept_val
    else:
        predict_string = "Pr(%s = +1) = 1/(1 + exp(%d - score)" % (
            data['outcome_name'].upper(), intercept_val)

    if not show_omitted_variables:
        selected_ind = np.flatnonzero(rho_values)
        rho_values = rho_values[selected_ind]
        rho_names = [rho_names[i] for i in selected_ind]
        rho_binary = [
            np.all((data['X'][:, j] == 0) | (data['X'][:, j] == 1))
            for j in selected_ind
        ]

        #sort by most positive to most negative
        sort_ind = np.argsort(-np.array(rho_values))
        rho_values = [rho_values[j] for j in sort_ind]
        rho_names = [rho_names[j] for j in sort_ind]
        rho_binary = [rho_binary[j] for j in sort_ind]
        rho_values = np.array(rho_values)

    rho_values_string = [str(int(i)) + " points" for i in rho_values]
    n_variable_rows = len(rho_values)
    total_string = "ADD POINTS FROM ROWS %d to %d" % (1, n_variable_rows)

    max_name_col_length = max(len(predict_string), len(total_string),
                              max([len(s) for s in rho_names])) + 2
    max_value_col_length = max(
        7,
        max([len(s) for s in rho_values_string]) + len("points")) + 2

    m = pt.PrettyTable()
    m.field_names = ["Variable", "Points", "Tally"]

    m.add_row([predict_string, "", ""])
    m.add_row(
        ['=' * max_name_col_length, "=" * max_value_col_length, "========="])

    for name, value_string in zip(rho_names, rho_values_string):
        m.add_row([name, value_string, "+ ....."])

    m.add_row(
        ['=' * max_name_col_length, "=" * max_value_col_length, "========="])
    m.add_row([total_string, "SCORE", "= ....."])
    m.header = False
    m.align["Variable"] = "l"
    m.align["Points"] = "r"
    m.align["Tally"] = "r"

    print(m)
    return m
示例#27
0
 def __init__(self, to_file=None):
     super(DisplayCallback, self).__init__(name='display')
     self.cur_epoch = 0
     self.to_file = to_file
     self.table = prettytable.PrettyTable()
示例#28
0
'''104職缺'''
from bs4 import BeautifulSoup
import requests
import prettytable
keyword = input("請輸入職缺關鍵字:")
t = prettytable.PrettyTable(["公司名稱", "職缺名稱"], encoding="utf8")
for page in range(1, 3, 1):
    r1 = requests.get(
        "https://www.104.com.tw/jobs/search/",
        headers={
            "User-Agent":
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0",
            "Accept-Language": "zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7",
        },
        params={
            "ro": "0",
            "keyword": keyword,
            "page": "page",
            "mode": "s",
            "jobsource": "2018indexpoc"
        })
    b1 = BeautifulSoup(r1.text, "html.parser")
    a1 = b1.find_all("article", {"class": "job-list-item"})
    for a2 in a1:
        t.add_row([a2.attrs["data-cust-name"], a2.attrs["data-job-name"]])
print(t)
示例#29
0
def table_performance_trending_dashboard(table, input_data):
    """Generate the table(s) with algorithm:
    table_performance_trending_dashboard
    specified in the specification file.

    :param table: Table to generate.
    :param input_data: Data to process.
    :type table: pandas.Series
    :type input_data: InputData
    """

    logging.info("  Generating the table {0} ...".format(table.get(
        "title", "")))

    # Transform the data
    logging.info("    Creating the data set for the {0} '{1}'.".format(
        table.get("type", ""), table.get("title", "")))
    data = input_data.filter_data(table, continue_on_error=True)

    # Prepare the header of the tables
    header = [
        "Test Case", "Trend [Mpps]", "Short-Term Change [%]",
        "Long-Term Change [%]", "Regressions [#]", "Progressions [#]"
    ]
    header_str = ",".join(header) + "\n"

    # Prepare data to the table:
    tbl_dict = dict()
    for job, builds in table["data"].items():
        for build in builds:
            for tst_name, tst_data in data[job][str(build)].iteritems():
                if tst_name.lower() in table["ignore-list"]:
                    continue
                if tbl_dict.get(tst_name, None) is None:
                    name = "{0}-{1}".format(
                        tst_data["parent"].split("-")[0],
                        "-".join(tst_data["name"].split("-")[1:]))
                    tbl_dict[tst_name] = {"name": name, "data": OrderedDict()}
                try:
                    tbl_dict[tst_name]["data"][str(build)] =  \
                        tst_data["result"]["throughput"]
                except (TypeError, KeyError):
                    pass  # No data in output.xml for this test

    tbl_lst = list()
    for tst_name in tbl_dict.keys():
        if len(tbl_dict[tst_name]["data"]) < 2:
            continue

        data_t = pd.Series(tbl_dict[tst_name]["data"])

        classification_lst, avgs = classify_anomalies(data_t)

        win_size = min(data_t.size, table["window"])
        long_win_size = min(data_t.size, table["long-trend-window"])
        try:
            max_long_avg = max(
                [x for x in avgs[-long_win_size:-win_size] if not isnan(x)])
        except ValueError:
            max_long_avg = nan
        last_avg = avgs[-1]
        avg_week_ago = avgs[max(-win_size, -len(avgs))]

        if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
            rel_change_last = nan
        else:
            rel_change_last = round(
                ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)

        if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
            rel_change_long = nan
        else:
            rel_change_long = round(
                ((last_avg - max_long_avg) / max_long_avg) * 100, 2)

        if classification_lst:
            if isnan(rel_change_last) and isnan(rel_change_long):
                continue
            tbl_lst.append([
                tbl_dict[tst_name]["name"],
                '-' if isnan(last_avg) else round(last_avg / 1000000, 2),
                '-' if isnan(rel_change_last) else rel_change_last,
                '-' if isnan(rel_change_long) else rel_change_long,
                classification_lst[-win_size:].count("regression"),
                classification_lst[-win_size:].count("progression")
            ])

    tbl_lst.sort(key=lambda rel: rel[0])

    tbl_sorted = list()
    for nrr in range(table["window"], -1, -1):
        tbl_reg = [item for item in tbl_lst if item[4] == nrr]
        for nrp in range(table["window"], -1, -1):
            tbl_out = [item for item in tbl_reg if item[5] == nrp]
            tbl_out.sort(key=lambda rel: rel[2])
            tbl_sorted.extend(tbl_out)

    file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])

    logging.info("    Writing file: '{0}'".format(file_name))
    with open(file_name, "w") as file_handler:
        file_handler.write(header_str)
        for test in tbl_sorted:
            file_handler.write(",".join([str(item) for item in test]) + '\n')

    txt_file_name = "{0}.txt".format(table["output-file"])
    txt_table = None
    logging.info("    Writing file: '{0}'".format(txt_file_name))
    with open(file_name, 'rb') as csv_file:
        csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
        for row in csv_content:
            if txt_table is None:
                txt_table = prettytable.PrettyTable(row)
            else:
                txt_table.add_row(row)
        txt_table.align["Test case"] = "l"
    with open(txt_file_name, "w") as txt_file:
        txt_file.write(str(txt_table))
示例#30
0
def MakeTableCols(table_layout,
                  slice,
                  Obs_scatter,
                  subtests,
                  analytic_soln,
                  analytic,
                  master_column=None):
    #  API:  Missing
    #
    #  Table Layout:
    #     D column headings
    #     D "master" column to use for data collection
    #     D keys for data access (subtest,slice)
    #     - Flag to include errors?
    #  D Format string for latex
    #  D Formatting for Scientific Notation
    #

    #  Create the table
    t = prettytable.PrettyTable()

    #  Local copy of header keys/text
    headers = table_layout[slice]['header']

    #  First (Master) Column
    if master_column is None:
        master_key = headers[0]
        if (table_layout[slice][master_key]['datasrc'] == 'Amanzi'):
            st = table_layout[slice][master_key]['subtest']
            var = table_layout[slice][master_key]['variable']
            # Make a copy so we can sort it without messing up relationships to data
            master_data = list(Obs_scatter[st][slice][var])
        master_data.sort()
        t.add_column(master_key, master_data)
        t.float_format[master_key] = "4.2f"
    else:
        t.add_column(master_column[master_key], master_column[master_data])

    # Second -- Last Columns
    del headers[0]
    for col_key in headers:
        if (table_layout[slice][col_key]['datasrc'] == 'Amanzi'):
            st = table_layout[slice][col_key]['subtest']
            var = table_layout[slice][col_key]['variable']
            amanzi_sol = Obs_scatter[st][slice]
            amanzi_data = []
            for d in master_data:
                if d in amanzi_sol['distance']:
                    i = amanzi_sol['distance'].index(d)
                    amanzi_data.append(amanzi_sol[var][i])
                else:
                    amanzi_data.append("Unavailable")
            t.add_column(col_key, amanzi_data)
            t.float_format[col_key] = ".5e"

        elif (table_layout[slice][col_key]['datasrc'] == 'Analytic'):
            solution = analytic_soln[slice]
            idepvar = table_layout[slice][col_key]['idepvar']
            var = table_layout[slice][col_key]['variable']
            analytic_data = []

            vmin_max = 0.0
            for d in master_data:
                vmin = 1e+99
                for v in solution[idepvar]:
                    if (abs(d - v) < vmin):
                        vmin = abs(d - v)
                        i = solution[idepvar].index(v)
                analytic_data.append(solution[var][i])
                vmin_max = max(vmin_max, vmin)

            t.add_column(col_key, analytic_data)
            t.float_format[col_key] = ".5e"
            print("Maximal deviation at point distance was ", vmin_max)

    # We could insert columns for particular error / differences here.

    # Set formatting options
    t.padding_width = 5
    t.hrules = prettytable.ALL
    t.horizontal_char = "-"
    t.horizontal_header_char = "="
    t.header = True

    # Write the table to a file
    table_file = open(table_layout[slice]['filename'], "w+")
    table_file.write('.. tabularcolumns:: ' + table_layout[slice]['tabular'] +
                     "\n\n")
    table_file.write(t.get_string())
    table_file.write("\n")
    table_file.close()

    return