コード例 #1
0
def make_ts_area_plot(vecs, cap, wt, rb):
    plt.figure()
    df = get_metrics(vecs, cap, wt, rb)
    subfields = [
        "empty_waiting", "empty_rebalancing", "empty_moving_to_pickup"
    ]
    subfields.extend(["time_pass_%d" % i for i in xrange(1, cap + 1)])
    df_small = df[subfields].copy()
    ax = df_small.plot(kind="area", colormap="rainbow", figsize=(4, 2.2))
    labels = ["Waiting", "Rebalancing", "Picking Up"]
    labels.extend(["N. Pass: %d" % n for n in xrange(1, cap + 1)])
    handles, _ = ax.get_legend_handles_labels()
    handles = fix_area_handles(handles)
    lgd = ax.legend(reversed(handles),
                    reversed(labels),
                    loc='center left',
                    bbox_to_anchor=(1.0, 0.5),
                    borderaxespad=0,
                    handletextpad=0)
    d_str = "N. Vecs: {}, Cap: {}, M.W.T: {}".format(vecs, cap, wt, rb)
    ax.set_title("Vehicle Occupancy Over Time \nw/ " + d_str)
    max_x_ticks = ax.get_xticks()[-1]
    ax.set_xticks(np.linspace(0, max_x_ticks - 4720, 8))
    dr = pd.date_range(start="05-05-13", periods=len(ax.get_xticks()))
    dr = dr.map(lambda t: t.strftime("%a"))
    ax.set_xticklabels(dr)
    vals = ax.get_yticks()
    ax.set_yticklabels(['{:3.0f}%'.format(x / 10) for x in vals])
    # ax.set_xlabel("Time")
    plt.savefig("figs/ts-area-v{}-c{}-w{}.png".format(vecs, cap, wt),
                bbox_extra_artists=(lgd, ),
                bbox_inches='tight')
    plt.close()
 def create_and_evaluate_RF(dic):
     rf = RandomForestClassifier(**dic, n_jobs=n_jobs)
     rf.fit(train_samples, train_labels)
     predictions = rf.predict(test_samples)
     _, _, _, f1 = get_metrics(test_labels, predictions, get_cm=False)
     print(f'Random Forrest number {i} : f1 - {f1}')
     return f1
コード例 #3
0
ファイル: plot_metrics.py プロジェクト: wallarelvo/mod
def make_ts_area_plot(vecs, cap, wt, rb):
    plt.figure()
    df = get_metrics(vecs, cap, wt, rb)
    subfields = ["empty_waiting", "empty_rebalancing",
                 "empty_moving_to_pickup"]
    subfields.extend(["time_pass_%d" % i for i in xrange(1, cap + 1)])
    df_small = df[subfields].copy()
    ax = df_small.plot(kind="area", colormap="rainbow",
                       figsize=(4, 2.2))
    labels = ["Waiting", "Rebalancing", "Picking Up"]
    labels.extend(["N. Pass: %d" % n for n in xrange(1, cap + 1)])
    handles, _ = ax.get_legend_handles_labels()
    handles = fix_area_handles(handles)
    lgd = ax.legend(reversed(handles),
                    reversed(labels),
                    loc='center left',
                    bbox_to_anchor=(1.0, 0.5),
                    borderaxespad=0,
                    handletextpad=0)
    d_str = "N. Vecs: {}, Cap: {}, M.W.T: {}".format(vecs, cap, wt, rb)
    ax.set_title("Vehicle Occupancy Over Time \nw/ " + d_str)
    max_x_ticks = ax.get_xticks()[-1]
    ax.set_xticks(np.linspace(0, max_x_ticks - 4720, 8))
    dr = pd.date_range(start="05-05-13", periods=len(ax.get_xticks()))
    dr = dr.map(lambda t: t.strftime("%a"))
    ax.set_xticklabels(dr)
    vals = ax.get_yticks()
    ax.set_yticklabels(['{:3.0f}%'.format(x / 10) for x in vals])
    # ax.set_xlabel("Time")
    plt.savefig("figs/ts-area-v{}-c{}-w{}.png".format(vecs, cap, wt),
                bbox_extra_artists=(lgd,), bbox_inches='tight')
    plt.close()
コード例 #4
0
def make_ts_plot(vecs, wt, rb, field):
    # fmt = DateFormatter("%a")
    # matplotlib.rc("font", weight="bold")
    # matplotlib.rc("axes", labelweight="bold")
    # matplotlib.rc("figure", titleweight="bold")
    fig, ax = plt.subplots()
    fig.set_size_inches(4, 2.46)
    for cap, clr in zip(caps, clrs):
        df = get_metrics(vecs, cap, wt, 0)
        locs, labels = plt.xticks()
        _, dts = plot_ts(df,
                         field,
                         "o",
                         color=clr,
                         alpha=1,
                         label=str(cap),
                         markersize=4)
    ticks = [min(dts)] + list(ax.get_xticks()) + [max(dts)]
    new_ticks = list()
    for i in xrange(len(ticks) - 1):
        new_ticks.append(ticks[i])
        new_ticks.append(0.5 * (ticks[i] + ticks[i + 1]))
    new_ticks.append(ticks[-1])
    ax.set_xticks(new_ticks)
    ticklabels = "| Su | Mo | Tu | We | Th | Fr | Sa |".split(" ")
    ax.set_xticklabels(ticklabels)
    lgd = plt.legend(loc="center left",
                     fancybox=True,
                     shadow=True,
                     bbox_to_anchor=(1, 0.5),
                     title="Capacity")
    lgd.get_title().set_fontsize(15)
    set_legend_marker_size(lgd, 15)
    plt.ylabel(prettify(field))
    if "%" in prettify(field):
        ax.set_ylim([0, 1])
        vals = ax.get_yticks()
        ax.set_yticklabels(['{:3.0f}%'.format(x * 100) for x in vals])
    # fig.autofmt_xdate()
    # plt.title("N. Vecs: {}, M.W.T: {}".format(vecs, wt))
    plt.savefig("figs/ts-{}-v{}-w{}.png".format(field, vecs, wt),
                bbox_extra_artists=(lgd, ),
                bbox_inches='tight')
    plt.close()
コード例 #5
0
def get_avg_dataframe():
    cols = ["predictions", "vehicles", "waiting_time", "capacity"] + fields \
        + ["n_shared_per_passenger"]
    data = pd.DataFrame(columns=cols)
    counter = 0
    gen = product(predictions, vehicles, caps, waiting_times)
    for (p, v, cap, wt) in gen:
        f_vals = list()
        try:
            df = get_metrics(v, cap, wt, p)
            for field in fields:
                f_vals.append(np.mean(df[field]))
            data.loc[counter] = [p, int(v), int(wt), int(cap)] \
                + f_vals \
                + [df["n_shared"].sum() / df["n_pickups"].sum()]
            counter += 1
        except IOError:
            pass
    return data
コード例 #6
0
ファイル: plot_metrics.py プロジェクト: wallarelvo/mod
def get_avg_dataframe():
    cols = ["predictions", "vehicles", "waiting_time", "capacity"] + fields \
        + ["n_shared_per_passenger"]
    data = pd.DataFrame(columns=cols)
    counter = 0
    gen = product(predictions, vehicles, caps, waiting_times)
    for (p, v, cap, wt) in gen:
        f_vals = list()
        try:
            df = get_metrics(v, cap, wt, p)
            for field in fields:
                f_vals.append(np.mean(df[field]))
            data.loc[counter] = [p, int(v), int(wt), int(cap)] \
                + f_vals \
                + [df["n_shared"].sum() / df["n_pickups"].sum()]
            counter += 1
        except IOError:
            pass
    return data
コード例 #7
0
def make_ts_area_plot_single(vecs, cap, wt, rb, weekday):
    # sns.set_context("poster", font_scale=2)
    plt.figure()
    df = get_metrics(vecs, cap, wt, rb)
    subfields = [
        "empty_waiting", "empty_rebalancing", "empty_moving_to_pickup"
    ]
    subfields.extend(["time_pass_%d" % i for i in xrange(1, cap + 1)])
    df_small = df[subfields].copy()
    q_str = "{0} * 2878 <= index <= ({0} + 1) * 2878".format(weekday)
    df_small = df_small.query(q_str)
    df_small.index = range(2879)
    ax = df_small.plot(kind="area", colormap="rainbow", figsize=(4, 2.2))
    labels = ["Waiting", "Rebalancing", "Picking Up"]
    labels.extend(["N. Pass: %d" % n for n in xrange(1, cap + 1)])
    handles, _ = ax.get_legend_handles_labels()
    handles = fix_area_handles(handles)
    lgd = ax.legend(reversed(handles),
                    reversed(labels),
                    loc='center left',
                    bbox_to_anchor=(1.0, 0.5),
                    borderaxespad=0,
                    handletextpad=0)
    d_str = "N. Vecs: {}, Cap: {}, M.W.T: {}".format(vecs, cap, wt)
    t_str = "Vehicle Occupancy Over Time On {} \n w/ ".format(days[weekday])
    ax.set_title(t_str + d_str)
    max_x_ticks = ax.get_xticks()[-1]
    ax.set_xticks(np.arange(0, max_x_ticks, max_x_ticks / 6))
    dr = pd.date_range(start="05-05-16",
                       periods=len(ax.get_xticks()),
                       freq="4H")
    dr = dr.map(lambda t: t.strftime("%H"))
    ax.set_xticklabels(dr)
    vals = ax.get_yticks()
    ax.set_yticklabels(
        ['{:3.0f}%'.format((x / 10) / (vecs / 1000)) for x in vals])
    ax.set_xlabel("Hour")
    plt.savefig("figs/ts-area-v{}-c{}-w{}-{}.png".format(
        vecs, cap, wt, days[weekday]),
                bbox_extra_artists=(lgd, ),
                bbox_inches='tight')
    plt.close()
コード例 #8
0
ファイル: plot_metrics.py プロジェクト: wallarelvo/mod
def make_ts_area_plot_single(vecs, cap, wt, rb, weekday):
    # sns.set_context("poster", font_scale=2)
    plt.figure()
    df = get_metrics(vecs, cap, wt, rb)
    subfields = ["empty_waiting", "empty_rebalancing",
                 "empty_moving_to_pickup"]
    subfields.extend(["time_pass_%d" % i for i in xrange(1, cap + 1)])
    df_small = df[subfields].copy()
    q_str = "{0} * 2878 <= index <= ({0} + 1) * 2878".format(weekday)
    df_small = df_small.query(q_str)
    df_small.index = range(2879)
    ax = df_small.plot(kind="area", colormap="rainbow",
                       figsize=(4, 2.2))
    labels = ["Waiting", "Rebalancing", "Picking Up"]
    labels.extend(["N. Pass: %d" % n for n in xrange(1, cap + 1)])
    handles, _ = ax.get_legend_handles_labels()
    handles = fix_area_handles(handles)
    lgd = ax.legend(reversed(handles),
                    reversed(labels),
                    loc='center left',
                    bbox_to_anchor=(1.0, 0.5),
                    borderaxespad=0,
                    handletextpad=0)
    d_str = "N. Vecs: {}, Cap: {}, M.W.T: {}".format(vecs, cap, wt)
    t_str = "Vehicle Occupancy Over Time On {} \n w/ ".format(days[weekday])
    ax.set_title(t_str + d_str)
    max_x_ticks = ax.get_xticks()[-1]
    ax.set_xticks(np.arange(0, max_x_ticks, max_x_ticks / 6))
    dr = pd.date_range(start="05-05-16", periods=len(ax.get_xticks()),
                       freq="4H")
    dr = dr.map(lambda t: t.strftime("%H"))
    ax.set_xticklabels(dr)
    vals = ax.get_yticks()
    ax.set_yticklabels(['{:3.0f}%'.format((x / 10) / (vecs / 1000))
                        for x in vals])
    ax.set_xlabel("Hour")
    plt.savefig(
        "figs/ts-area-v{}-c{}-w{}-{}.png".format(vecs, cap, wt, days[weekday]),
        bbox_extra_artists=(lgd,), bbox_inches='tight')
    plt.close()
コード例 #9
0
ファイル: plot_metrics.py プロジェクト: wallarelvo/mod
def make_ts_plot(vecs, wt, rb, field):
    # fmt = DateFormatter("%a")
    # matplotlib.rc("font", weight="bold")
    # matplotlib.rc("axes", labelweight="bold")
    # matplotlib.rc("figure", titleweight="bold")
    fig, ax = plt.subplots()
    fig.set_size_inches(4, 2.46)
    for cap, clr in zip(caps, clrs):
        df = get_metrics(vecs, cap, wt, 0)
        locs, labels = plt.xticks()
        _, dts = plot_ts(df, field, "o", color=clr, alpha=1,
                         label=str(cap), markersize=4)
    ticks = [min(dts)] + list(ax.get_xticks()) + [max(dts)]
    new_ticks = list()
    for i in xrange(len(ticks) - 1):
        new_ticks.append(ticks[i])
        new_ticks.append(0.5 * (ticks[i] + ticks[i + 1]))
    new_ticks.append(ticks[-1])
    ax.set_xticks(new_ticks)
    ticklabels = "| Su | Mo | Tu | We | Th | Fr | Sa |".split(" ")
    ax.set_xticklabels(ticklabels)
    lgd = plt.legend(loc="center left", fancybox=True,
                     shadow=True, bbox_to_anchor=(1, 0.5),
                     title="Capacity")
    lgd.get_title().set_fontsize(15)
    set_legend_marker_size(lgd, 15)
    plt.ylabel(prettify(field))
    if "%" in prettify(field):
        ax.set_ylim([0, 1])
        vals = ax.get_yticks()
        ax.set_yticklabels(['{:3.0f}%'.format(x * 100) for x in vals])
    # fig.autofmt_xdate()
    # plt.title("N. Vecs: {}, M.W.T: {}".format(vecs, wt))
    plt.savefig("figs/ts-{}-v{}-w{}.png".format(field, vecs, wt),
                bbox_extra_artists=(lgd,), bbox_inches='tight')
    plt.close()
コード例 #10
0
import json
import sys
from typing import Any, List, Mapping, Tuple
from pydantic.tools import parse_obj_as
from common import MethodAbs, get_metrics

with open(sys.argv[1], "r") as f:
    dump: Mapping[str, List[Tuple[str, Any]]] = json.load(f)

for method, abs_json in dump.items():
    abstractions = parse_obj_as(MethodAbs, abs_json)
    metrics = get_metrics(abstractions)

    print("=====" * 20)
    print("===== Method: " + method)
    print(abstractions.body)
    for stmt, details in abstractions.abstractions:
        print(" > " + stmt)
        print("    - Local:")
        for k, v in details.localNodeMap.items():
            print(f"      - {k}: {v}")
        print("    - Global:")
        for k, v in details.globalNodeMap.items():
            print(f"      - {k}: {v}")
        print("    - Node handlers:")
        for nodeID, event, methods in details.nodeHandlerMap:
            print(f"      - {nodeID}, {event}: {methods}")
        print("    - Dialog handlers:")
        for nodeID, event, methods in details.dialogHandlerMap:
            print(f"      - {nodeID}, {event}: {methods}")
        print()
コード例 #11
0
import joblib
import numpy as np
import matplotlib.pyplot as plt
from common import load_firstlayer_data, get_metrics
from Graphics import make_confusion_matrix


Istlayer_dataset_path = "../../Datasets/API-Intent-Traffic/Other_CSVs/1st_Layer/"

test_samples, test_labels=load_firstlayer_data(Istlayer_dataset_path, data='Testing')

rf = joblib.load("./Results/Models/RFOptiStochastic3.joblib")

predictions= rf.predict(test_samples)

cm, accuracy, precision, recall, f1 = get_metrics(test_labels, predictions)

categories=['Benign', 'Malware']
labels = ['True Neg','False Pos','False Neg','True Pos']

make_confusion_matrix(cm, 
                      group_names=labels,
                      categories=categories, 
                      cmap='binary',
                      sklearn_matrix=True)

def draw_importance(rf, test_samples):
        importances = rf.feature_importances_
        std = np.std([tree.feature_importances_ for tree in rf.estimators_],
             axis=0)
        indices = np.argsort(importances)[::-1]
コード例 #12
0
ファイル: application.py プロジェクト: testalauda/jenkins
    def create_k8s_host_app__k8s_applications(self):
        if self.app_region is not None and 'glusterfs' in (
            (self.region_data)['data']['features']['volume']['features']):
            test_flag = True
            ret_env = create_envs(self.app_k8s_env_file)
            self.assert_successful(ret_env)

            ret_volume = create_volume(self.app_k8s_gfs_volume, 'glusterfs',
                                       None)
            self.assert_successful(ret_volume)
            volume_id = ret_volume['volume_id']

            if settings.ENV == 'private':
                yaml_content = "{}:\n alauda_lb: ALB\n net: host\n image: {}/{}:{}\n" \
                               "size: XXS\n ports:\n - '{}:80:80/http'\n number :1\n"\
                               "env_file: {}\n volumes\n - {}:/var/\n" \
                    .format(self.app_host_service_name, get_registry_info().get('registry_url', None),
                            settings.PRIVATE_REPO, settings.IMAGE_TAG,
                            self.haproxy, self.app_k8s_env_file, self.app_k8s_gfs_volume)
            else:
                # APP yaml支持host模式服务,添加环境变量文件,添加gfs的volume
                yaml_content = "{}:\n alauda_lb: ALB\n ports:\n - '{}:81:81/http'\n" \
                               " image: {}/{}/hello-world:latest\n size: XXS\n net: host\n number: 1\n" \
                               " env_file: {}\n volumes:\n - {}:/var/\n" \
                    .format(self.app_host_service_name, self.haproxy, settings.REGISTRY_URL,
                            settings.CLAAS_NAMESPACE, self.app_k8s_env_file, self.app_k8s_gfs_volume)

            print('yaml_content:{}'.format(yaml_content))
            # self.create_app_action(self.app_k8s_name, yaml_content, self.app_region, 'ALB', self.haproxy, False)
            # 创建应用 会判断应用的最终状态,如果不是Running 直接返回
            ret1 = create_application(self.app_k8s_host_name,
                                      files_data=yaml_content,
                                      region=self.app_region)
            self.assert_successful(ret1)

            # 验证环境变量文件是否添加进去 失败可以继续后面测试
            ret4 = exec_feature(self.app_host_service_name,
                                self.namespace,
                                command="env",
                                commands_string="key=value",
                                app_name=self.app_k8s_host_name)
            if not ret4['success']:
                test_flag = False

            # 判断存储卷类型 不阻塞后面的测试
            ret6 = verify_volumes(self.app_host_service_name, volume_id,
                                  self.app_k8s_host_name)
            if not ret6['success']:
                test_flag = False

            # 判断是否有Metrics 不阻塞后面的测试
            ret7 = get_metrics(self.app_host_service_name,
                               self.app_k8s_host_name)
            if not ret7['success']:
                test_flag = False

            # check 服务的日志
            ret14 = get_logs(self.app_host_service_name,
                             app_name=self.app_k8s_host_name)
            if not ret14['success']:
                test_flag = False
            # 删除应用
            ret_delete = delete_application(self.app_k8s_host_name,
                                            self.app_region)
            self.assert_successful(ret_delete)
            result = {
                'success': test_flag,
                "create envfile": ret_env,
                "create gfs volume": ret_volume,
                "create application": ret1,
                "get service env": ret4,
                "get service volume type": ret6,
                "get service metrics": ret7,
                "get service log": ret14,
                "delete service": ret_delete
            }
            self.assert_successful(result)
            return {"success": True, "total": "All success"}

        else:
            return {
                "success": True,
                "total": "there is no alb region, skip the case"
            }
コード例 #13
0
from pydantic.tools import parse_obj_as
from common import MethodAbs, get_metrics

with open(sys.argv[1], "r") as f:
    dump1: Mapping[str, List[Tuple[str, Any]]] = json.load(f)

with open(sys.argv[2], "r") as f:
    dump2: Mapping[str, List[Tuple[str, Any]]] = json.load(f)

for m in set(dump1.keys()).intersection(set(dump2.keys())):
    m_printed = False
    abs1 = dump1[m]
    abs2 = dump2[m]
    parsed1 = parse_obj_as(MethodAbs, abs1)
    parsed2 = parse_obj_as(MethodAbs, abs2)
    metrics1 = get_metrics(parsed1)
    metrics2 = get_metrics(parsed2)
    # assert len(metrics1) == len(metrics2)
    if len(metrics1) != len(metrics2):
        continue
    if metrics1 != metrics2:
        for s1, s2 in zip(metrics1, metrics2):
            unit = s1["unit"]
            # assert s1["unit"] == s2["unit"]
            if s1["unit"] != s2["unit"]:
                continue
            for k in s1["metrics"].keys():
                m1 = s1["metrics"][k]
                m2 = s2["metrics"][k]
                if m1 != m2:
                    print("- %s: %s != %s" % (k, m1, m2))
 def create_and_evaluate_RF(dic):
     rf = RandomForestClassifier(**dic, n_jobs=n_jobs)
     rf.fit(train_samples, train_labels)
     predictions = rf.predict(test_samples)
     _, _, _, f1 = get_metrics(test_labels, predictions, get_cm=False)
     return {'loss': 1 - f1, 'status': STATUS_OK}
コード例 #15
0
ファイル: application.py プロジェクト: testalauda/jenkins
    def create_k8s_flannel_app__k8s_applications(self):
        ''' create app with most alauda yaml features: environment|links|net|number|size|
        volumes|alauda_lb|amount_points|labels'''
        if self.app_region is not None:
            test_flag = True
            ret1 = create_configuration(self.k8s_app_config, "value",
                                        self.k8s_app_config_key)
            self.assert_successful(ret1)

            if settings.ENV == 'private':
                yaml_content = "{}:\n alauda_lb: ALB\n net: flannel\n image: {}/{}:{}\n number: 1\n" \
                               "size: XXS\n ports:\n - '{}:80:80/http'\n" \
                               " environment:\n - k8s_key=k8s-value\n - __ALAUDA_FILE_LOG_PATH__=/home/*.txt\n" \
                               " volumes:\n - /home/:/var/\n mount_points:\n - path: /home/abc\n   config: {}/{}\n" \
                               " labels:\n - 'constraint:node==ip:{}'" \
                    .format(self.app_flannel_service_name, get_registry_info().get('registry_url', None),
                            settings.PRIVATE_REPO, settings.IMAGE_TAG, self.haproxy,
                            self.k8s_app_config, self.k8s_app_config_key, self.k8s_ip_tag
                            )
            else:
                # APP yaml支持flannel模式服务,添加环境变量,添加配置文件,添加local的volume,指定日志文件,部署在指定的机器上
                yaml_content = "{}:\n alauda_lb: ALB\n ports:\n - '{}:80:80/http'\n" \
                               " image: {}/{}/hello-world:latest\n size: XXS\n net: flannel\n number: 1\n" \
                               " environment:\n - k8s_key=k8s-value\n - __ALAUDA_FILE_LOG_PATH__=/home/*.txt\n"\
                               " volumes:\n - /home/:/var/\n mount_points:\n - path: /home/abc\n   config: {}/{}\n"\
                               " labels:\n - 'constraint:node==ip:{}'"\
                    .format(self.app_flannel_service_name, self.haproxy, settings.REGISTRY_URL,
                            settings.CLAAS_NAMESPACE, self.k8s_app_config, self.k8s_app_config_key, self.k8s_ip_tag)

            print('yaml_content:{}'.format(yaml_content))
            # self.create_app_action(self.app_k8s_name, yaml_content, self.app_region, 'ALB', self.haproxy, False)
            #创建应用 会判断应用的最终状态,如果不是Running 直接返回
            ret1 = create_application(self.app_k8s_name,
                                      files_data=yaml_content,
                                      region=self.app_region)
            self.assert_successful(ret1)

            # 验证应用内的服务是否可以访问,失败不影响后续操作
            ret_access_service = access_service(self.app_flannel_service_name,
                                                self.haproxy,
                                                self.app_k8s_name)
            if not ret_access_service['success']:
                test_flag = False

            # 检查应用创建成功后yaml,失败后返回,因为会影响更新
            ret2 = get_yaml(self.app_k8s_name)
            if ret2['status'] != 200:
                test_flag = False
                ret2 = {
                    "success":
                    False,
                    "message":
                    "get application yaml failed, jakiro api error code {}, error:{}"
                    .format(ret2["status"], ret2["text"])
                }
            elif self.app_flannel_service_name not in ret2['text']:
                test_flag = False
                ret2 = {
                    "success":
                    False,
                    "message":
                    "service_name is {},not in yaml:{}".format(
                        self.app_flannel_service_name, ret2['text'])
                }
            else:
                update_yaml = ret2['text'].replace("XXS", "XS")
                ret2 = {"success": True, "total": ret2["total"]}
            self.assert_successful(ret2)
            # 检查应用的compose-yaml 失败后不影响后续操作
            ret3 = get_compose_yaml(self.app_k8s_name)
            if ret3['status'] != 200:
                test_flag = False
                ret3 = {
                    "success":
                    False,
                    "message":
                    "get application compose yaml failed, jakiro api error code {},error:{}"
                    .format(ret3['status'], ret3['text'])
                }
            elif self.app_flannel_service_name not in ret3['text']:
                test_flag = False
                ret3 = {
                    "success":
                    False,
                    "message":
                    "service_name is {},not in yaml:{}".format(
                        self.app_flannel_service_name, ret3['text'])
                }
            else:
                ret3 = {"success": True, "total": ret3["total"]}

            # 验证环境变量是否添加进去 失败可以继续后面测试
            ret4 = exec_feature(self.app_flannel_service_name,
                                self.namespace,
                                command="env",
                                commands_string="k8s_key=k8s-value",
                                app_name=self.app_k8s_name)
            if not ret4['success']:
                test_flag = False
            # 验证配置文件是否添加进去 失败可以继续后面测试
            ret5 = exec_feature(self.app_flannel_service_name,
                                self.namespace,
                                command="'cat /home/abc'",
                                commands_string="value",
                                app_name=self.app_k8s_name)
            if not ret5['success']:
                test_flag = False

            # 判断存储卷类型 不阻塞后面的测试
            ret6 = verify_volumes(self.app_flannel_service_name, "host_path",
                                  self.app_k8s_name)
            if not ret6['success']:
                test_flag = False

            # 判断是否有Metrics 不阻塞后面的测试
            ret7 = get_metrics(self.app_flannel_service_name,
                               self.app_k8s_name)
            if not ret7['success']:
                test_flag = False

            ret_stop = stop_application(self.app_k8s_name, self.app_region)
            self.assert_successful(ret_stop)

            ret_start = start_application(self.app_k8s_name, self.app_region)
            self.assert_successful(ret_start)
            #验证更新操作,失败直接返回
            ret_update = update_application(self.app_k8s_name,
                                            files_data=update_yaml,
                                            region=self.app_region)
            self.assert_successful(ret_update)

            # check 服务的日志
            ret14 = get_logs(self.app_flannel_service_name,
                             app_name=self.app_k8s_name)
            if not ret14['success']:
                test_flag = False
            # check 日志文件
            ret15 = get_logfile(self.app_flannel_service_name,
                                self.app_k8s_name)
            if not ret15['success']:
                test_flag = False
            #删除应用
            ret_delete = delete_application(self.app_k8s_name, self.app_region)
            self.assert_successful(ret_delete)
            result = {
                'success': test_flag,
                "create application": ret1,
                "access sercie ": ret_access_service,
                "get yaml": ret2,
                "get compose yaml": ret3,
                "get service env": ret4,
                "get service config": ret5,
                "get service volume type": ret6,
                "get service metrics": ret7,
                "stop application": ret_stop,
                "start application": ret_start,
                "update application": ret_update,
                "get service log": ret14,
                "get service logfile": ret15,
                "delete service": ret_delete
            }
            self.assert_successful(result)
            return {"success": True, "total": "All success"}

        else:
            return {
                "success": True,
                "total": "there is no alb region, skip the case"
            }
コード例 #16
0
    def k8s_host__k8s_service(self):
        if self.haproxy:
            #设置flag
            test_flag = True
            #创建部署服务需要的环境变量文件 创建失败就直接返回
            ret1 = create_envs(self.k8s_envfile)
            self.assert_successful(ret1)
            #创建部署服务需要的glustfs存储卷,创建失败直接返回
            ret2 = create_volume(self.gluster_name, 'glusterfs', None)
            self.assert_successful(ret2)
            #获取当前所在集群的Haproxy信息,获取失败直接返回
            ret3 = get_haproxy(self.haproxy)

            self.assert_successful(ret3)
            #获取创建服务需要的数据
            k8sFlannelData = data.ServiceData(self.k8s_host_service,
                                              self.namespace,
                                              settings.SERVICE_CLAAS_REGION[0],
                                              lb_type='haproxy',
                                              alb_name=self.haproxy,
                                              lb_id=ret3['haproxy_id'],
                                              volume_id=ret2['volume_id'],
                                              volume_name=self.gluster_name,
                                              envfile=self.k8s_envfile)
            #创建Host模式挂载环境变量文件和glustfs存储卷的服务
            ret4 = create_service(self.k8s_host_service,
                                  k8sFlannelData.k8s_host_service(),
                                  settings.SERVICE_CLAAS_REGION[0])
            if not ret4["success"]:
                delete_volume(ret2['volume_id'], self.gluster_name)
            self.assert_successful(ret4)
            #访问服务,及时失败也可以继续执行测试
            ret5 = access_service(self.k8s_host_service, self.haproxy)
            if not ret5['success']:
                test_flag = False
            # 验证环境变量是否添加进去 失败可以继续后面测试
            ret6 = exec_feature(self.k8s_host_service,
                                self.namespace,
                                command="env",
                                commands_string="key=value")
            if not ret6['success']:
                test_flag = False
            # 验证volume id是否一致
            ret7 = verify_volumes(self.k8s_host_service, ret2['volume_id'])
            if not ret7['success']:
                test_flag = False

            # 判断是否有Metrics 不阻塞后面的测试
            ret8 = get_metrics(self.k8s_host_service)
            if not ret8['success']:
                test_flag = False
            # 停止服务 如果失败block后面操作
            ret9 = stop_app(self.k8s_host_service)
            self.assert_successful(ret9)
            # 启动服务 如果失败block 后面操作
            ret10 = start_app(self.k8s_host_service, num=1)
            self.assert_successful(ret10)
            # scale up 服务,更新服务的数量和size,失败block后面操作
            ret11 = update_service(self.k8s_host_service, num=2, size="XS")
            self.assert_successful(ret11)
            # scale down 服务更新服务的数量和size  失败block后面操作
            ret12 = update_service(self.k8s_host_service, num=1, size="XXS")
            self.assert_successful(ret12)

            # check 服务的日志
            ret13 = get_logs(self.k8s_host_service)
            if not ret13['success']:
                test_flag = False

            ret14 = delete_app(self.k8s_host_service,
                               settings.SERVICE_CLAAS_REGION[0])
            if not ret14['success']:
                test_flag = False
            #删除volume
            sleep(30)
            delete_volume(ret2['volume_id'], self.gluster_name)

            result = {
                'success': test_flag,
                "create envfile ": ret1,
                "create glustfs volume": ret2,
                "get haproxy id ": ret3,
                "create service": ret4,
                "access service": ret5,
                "check envfile": ret6,
                "check volume": ret7,
                "check metrics": ret8,
                "stop service": ret9,
                "start service": ret10,
                "scale up": ret11,
                "scale down": ret12,
                "check log": ret13,
                "delete service": ret14
            }
            self.assert_successful(result)
            return result
def max_features_optimisation(
    train_samples,
    train_labels,
    test_samples,
    test_labels,
    values=None,
    verbose=False,
    graph=True,
    save=True,
    metric='f1',
    max_depth=1000,
    min_samples_split=2,
    max_leaf_nodes=None,
    min_samples_leaf=1,
    n_estimators=100,
):
    if values == None:
        values = np.asarray(
            [2, *range(100, len(train_samples.columns), 150), None])
    metrics = pd.DataFrame(columns=['accuracy', 'precision', 'recall', 'f1'])
    index = values
    for max_feature in values:
        rf_max_features = RandomForestClassifier(
            min_samples_split=min_samples_split,
            max_depth=max_depth,
            min_samples_leaf=min_samples_leaf,
            max_leaf_nodes=max_leaf_nodes,
            n_estimators=n_estimators,
            max_features=max_feature)
        if verbose:
            print(
                'Optimisation du max_features : Entrainement pour max_features = ',
                max_feature)
        rf_max_features.fit(train_samples, train_labels)
        predictions_max_features = rf_max_features.predict(test_samples)
        arr = np.asarray(
            get_metrics(test_labels, predictions_max_features, get_cm=False))
        if np.isnan(arr).any():
            if verbose:
                print('Il y a eu une erreur pour cette valeur de max_features')
            index = np.delete(index, np.where(index == max_feature))
        else:
            metrics = metrics.append(pd.DataFrame(arr.reshape(1, -1),
                                                  columns=list(metrics)),
                                     ignore_index=True)
            if verbose:
                print(f"max_features = {max_feature} ; " + metric +
                      f" = {metrics.at[len(metrics)-1, metric]}")
    metrics = metrics.set_index(index)
    if save:
        metrics.to_csv('./Results/Optimisation/max_feature_optimisation.csv',
                       index=True)
        if verbose:
            print("Résultats intermédiaires de l'optimisation sauvegardés")
    X = np.asarray(metrics.index)
    Y = metrics[metric].to_numpy()
    i = np.argmax(Y)
    if verbose:
        print(
            f'Le maximum pour {metric} est de {Y[i]} et est atteint pour max_features={X[i]}'
        )
    if graph:
        fig, ax = plt.subplots(1)
        ax.plot(X, Y, 'r--')
        ax.set_ylabel(metric)
        ax.set_xlabel('max_features')
        ax.set_title(metric + ' in fonction of max_features')
        if i < len(X) - 1:
            ax.annotate(
                'local max',
                xy=(X[i], Y[i]),
                xytext=(X[i + 1], Y[i + 1]),
                arrowprops=dict(facecolor='black', shrink=0.05),
            )
        else:
            ax.annotate(
                'local max',
                xy=(X[i], Y[i]),
                xytext=(X[i - 1], Y[i - 1]),
                arrowprops=dict(facecolor='black', shrink=0.05),
            )
        fig.show
    else:
        title = metric + ' in fonction of max_features'
        return title, [X, Y], X[i]
    return X[i]
コード例 #18
0
    def inner_elb_service_test__k8s_service(self):
        test_flag = True
        # 创建一个内网的ELB 失败返回
        ret1 = create_alb(self.k8s_internal_elb,
                          create_type='manual',
                          type='elb',
                          alb_region=settings.SERVICE_CLAAS_REGION[0],
                          address_type="internal")
        self.assert_successful(ret1)

        elb_data = data.ServiceData(self.k8s_innerelb_service,
                                    settings.CLAAS_NAMESPACE,
                                    settings.SERVICE_CLAAS_REGION[0],
                                    lb_type='elb',
                                    alb_name=self.k8s_internal_elb,
                                    lb_id=ret1['id'])
        # 创建一个使用ELB网络模式的服务 失败返回
        ret2 = create_service(self.k8s_innerelb_service,
                              elb_data.k8s_innerelb_service(),
                              settings.SERVICE_CLAAS_REGION[0])
        self.assert_successful(ret2)
        # 验证服务是否可以访问 失败继续测试
        ret3 = access_service(self.k8s_innerelb_service, self.k8s_internal_elb)
        if not ret3['success']:
            test_flag = False

        # 验证环境变量是否添加进去 失败可以继续后面测试
        ret4 = exec_feature(self.k8s_innerelb_service,
                            self.namespace,
                            command="'/bin/ls /'")
        if not ret4['success']:
            test_flag = False
        # 判断是否有Metrics 不阻塞后面的测试
        ret5 = get_metrics(self.k8s_innerelb_service)
        if not ret5['success']:
            test_flag = False
        # 停止服务 如果失败block后面操作
        ret6 = stop_app(self.k8s_innerelb_service)
        self.assert_successful(ret6)
        # 启动服务 如果失败block 后面操作
        ret7 = start_app(self.k8s_innerelb_service, num=1)
        self.assert_successful(ret7)
        # scale up 服务,更新服务的数量和size,失败block后面操作
        ret8 = update_service(self.k8s_innerelb_service, num=2, size="XS")
        self.assert_successful(ret8)
        # scale down 服务更新服务的数量和size  失败block后面操作
        ret9 = update_service(self.k8s_innerelb_service, num=1, size="XXS")
        self.assert_successful(ret9)

        # check 服务的日志
        # ret10 = get_logs(self.k8s_innerelb_service)
        # if not ret10['success']:
        #     test_flag = False

        ret11 = delete_alb(self.k8s_innerelb_service)
        self.assert_successful(ret11)

        ret12 = delete_app(self.k8s_innerelb_service)
        self.assert_successful(ret12)
        result = {
            'success': test_flag,
            "create elb": ret1,
            "create elb service": ret2,
            "access service": ret3,
            "service exec ": ret4,
            "get metrics": ret5,
            "stop service": ret6,
            "start service": ret7,
            "scale up service": ret8,
            "scale down service": ret9,
            # "get logs": ret10,
            "delete elb": ret11,
            "delete service": ret12
        }
        self.assert_successful(result)
        return result
コード例 #19
0
    def k8s_flannel__k8s_service(self):
        # 创建flannel服务支持:环境变量,配置文件,local volume,日志文件,部署到指定机器
        if self.haproxy:
            test_flag = True
            ret1 = get_haproxy(self.haproxy)

            self.assert_successful(ret1)

            k8sFlannelData = data.ServiceData(
                self.k8s_flannel_service,
                self.namespace,
                settings.SERVICE_CLAAS_REGION[0],
                lb_type='haproxy',
                alb_name=self.haproxy,
                lb_id=ret1['haproxy_id'],
                node_tag=self.node_tag.split(":")[1],
                mipn_enabled=True)

            #创建服务支持 环境变量 local volume,配置文件,指定日志文件,部署在指定的机器上  block后面操作
            ret2 = create_service(self.k8s_flannel_service,
                                  k8sFlannelData.k8s_flannel_service(),
                                  settings.SERVICE_CLAAS_REGION[0])
            self.assert_successful(ret2)
            #验证服务是否可以访问 失败可以继续后面测试
            ret3 = access_service(self.k8s_flannel_service, self.haproxy)
            if not ret3['success']:
                test_flag = False
            #验证环境变量是否添加进去 失败可以继续后面测试
            ret4 = exec_feature(self.k8s_flannel_service,
                                self.namespace,
                                command="env",
                                commands_string="k8s_key=k8s_value")
            if not ret4['success']:
                test_flag = False
            #验证配置文件是否添加进去 失败可以继续后面测试
            ret5 = exec_feature(self.k8s_flannel_service,
                                self.namespace,
                                command="'cat /home/abc'",
                                commands_string="config")
            if not ret5['success']:
                test_flag = False
            #判断存储卷类型 不阻塞后面的测试
            ret6 = verify_volumes(self.k8s_flannel_service, "host_path")
            if not ret6['success']:
                test_flag = False
            #判断是否有Metrics 不阻塞后面的测试
            ret7 = get_metrics(self.k8s_flannel_service)
            if not ret7['success']:
                test_flag = False
            # 停止服务 如果失败block后面操作
            ret8 = stop_app(self.k8s_flannel_service)
            self.assert_successful(ret8)
            #启动服务 如果失败block 后面操作
            ret9 = start_app(self.k8s_flannel_service, num=1)
            self.assert_successful(ret9)
            #scale up 服务,更新服务的数量和size,失败block后面操作
            ret10 = update_service(self.k8s_flannel_service, num=2, size="XS")
            self.assert_successful(ret10)
            #check 所有的容器都部署在指定的机器上 不阻塞后面的测试
            ret11 = check_node(self.k8s_flannel_service, self.node_tag)
            if not ret11['success']:
                test_flag = False
            # scale down 服务更新服务的数量和size  失败block后面操作
            ret12 = update_service(self.k8s_flannel_service, num=1, size="XXS")
            self.assert_successful(ret12)
            # check 所有的容器都部署在指定的机器上 不阻塞后面的测试
            ret13 = check_node(self.k8s_flannel_service, self.node_tag)
            if not ret13['success']:
                test_flag = False
            # check 服务的日志
            ret14 = get_logs(self.k8s_flannel_service)
            if not ret14['success']:
                test_flag = False
            #check 日志文件
            ret15 = get_logfile(self.k8s_flannel_service)
            if not ret15['success']:
                test_flag = False
            #删除服务
            ret16 = delete_app(self.k8s_flannel_service,
                               settings.SERVICE_CLAAS_REGION[0])
            if not ret16['success']:
                test_flag = False

            result = {
                'success': test_flag,
                "get haproxy id ": ret1,
                "create k8s haproxy service": ret2,
                "access sercie ": ret3,
                "get service env": ret4,
                "get servie config": ret5,
                "get service volume type": ret6,
                "get service metrics": ret7,
                "stop service": ret8,
                "start service": ret9,
                "scale up service": ret10,
                "check instance in node": ret11,
                "scale down service": ret12,
                "check instance in node": ret13,
                "get service log": ret14,
                "get service logfile": ret15,
                "delete service": ret16
            }
            self.assert_successful(result)
            return result
コード例 #20
0
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()

plt.show()

test_samples, test_labels = get_test_data(
    test, static_df, dic, Snd_layer_data_path)  #Création des données de test

static_prediction = [u[0][70] for u in test_samples
                     ]  #Extraction de la premiere prédiction statique

static_prediction = np.round(
    np.asarray(static_prediction))  #Transormation en label prédit

hybrid_prediction = model.predict(test_samples)  #Prédiction hybride

hybrid_prediction = Binary_Classification(
    hybrid_prediction
)  #Transormation de la prédiction multiclasse en prédiction binaire

test_labels = Binary_Classification(
    test_labels)  #Transformation du label multiclasse en label binaire

print(get_metrics(
    test_labels, static_prediction,
    get_cm=True))  #Matrice de confusioon du premier modele statique
print(get_metrics(test_labels, hybrid_prediction,
                  get_cm=True))  #Matrice de confusion du modèle hybride