def test_registry_add(self):
        HEADING()

        Benchmark.Start()

        title = spec["info"]["title"]
        url = spec["servers"][0]["url"]

        print(f"add {title} -> {url}")
        registry = Registry()

        before = len(registry.list(name=title))

        pid = 1

        entry = registry.add(name=title, url=url, pid=pid)
        pprint(entry)

        after = len(registry.list(name=title))

        assert after == before + 1

        Benchmark.Stop()
예제 #2
0
 def test_provider_volume_create(self):
     HEADING()
     os.system(f"cms volume list --cloud={cloud}")
     name_generator.incr()
     Benchmark.Start()
     params = {
         "NAME": name,
         'size': None,
         'volume_type': None,
         'description': None,
         'region': None,
         'path': None
     }
     data = provider.create(**params)
     Benchmark.Stop()
     status = None
     if cloud == "openstack" or cloud == "google":
         for v in data:
             status = v['status']
     elif cloud == "oracle":
         for v in data:
             status = v['lifecycle_state']
     elif cloud == "aws" or cloud == "multipass":
         start_timeout = 360
         time = 0
         while time <= start_timeout:
             sleep(15)
             time += 15
             status = provider.status(name=name)[0]['State']
             if status == "available":
                 break
     elif cloud == "azure":
         status = provider.status(name=name)[0]['disk_state']
     assert status in [
         'available', 'AVAILABLE', 'PROVISIONING', 'READY', 'Unattached'
     ]
예제 #3
0
    def test_generate(self):
        """
        test whether python file can successfully be generated into yaml
        :return:
        """
        HEADING()

        Benchmark.Start()

        print("Generating yaml")

        try:
            Shell.run(
                f"cms openapi generate --all_functions --filename={path}/image.py"
            )
            print("Successfully generated image.yaml")
        except Exception as e:
            print(e)
            assert False, "Could not generate"

        file_list = os.listdir(path)
        assert 'image.yaml' in file_list

        Benchmark.Stop()
예제 #4
0
 def test_ls_la_wrapper_multi_options(self):
     HEADING()
     Benchmark.Start()
     r = shell.ls("-a", "-l")
     Benchmark.Stop()
     print(r)
예제 #5
0
 def test_ls_la_string(self):
     HEADING()
     Benchmark.Start()
     r = shell.execute('ls', "-l -a")
     Benchmark.Stop()
     print(r)
예제 #6
0
 def test_pwd(self):
     HEADING()
     Benchmark.Start()
     r = shell.execute('pwd')
     Benchmark.Stop()
     print(r)
예제 #7
0
 def test_list_secgroups(self):
     HEADING()
     Benchmark.Start()
     groups = provider.list_secgroups()
     Benchmark.Stop()
     provider.Print(groups, output='json', kind="secgroup")
예제 #8
0
 def test_clear_local_database(self):
     HEADING()
     Benchmark.Start()
     cm.clear(collection=f"local-key")
     Benchmark.Stop()
     assert True
예제 #9
0
 def test_key_delete(self):
     HEADING()
     Benchmark.Start()
     self.test_clear_cloud_database()
     Benchmark.Stop()
예제 #10
0
 def test_key(self):
     HEADING()
     Benchmark.Start()
     location = Location()
     assert location.key == "CLOUDMESH_CONFIG_DIR"
     Benchmark.Stop()
예제 #11
0
 def test_equal(self):
     HEADING()
     Benchmark.Start()
     location = Location()
     Benchmark.Stop()
     assert location == "a"
예제 #12
0
def main(argv):
    Benchmark.Start()
    home = os.environ['HOME']
    script_output_dir = f"{home}/.cloudmesh/eigenfaces-svm/vm_script_output_multi/"
    benchmark_output_dir = f"{home}/.cloudmesh/eigenfaces-svm/benchmark_output_multi/"

    if not os.path.exists(f"{home}/.cloudmesh/eigenfaces-svm"):
        os.mkdir(f"{home}/.cloudmesh/eigenfaces-svm")
    if not os.path.exists(script_output_dir):
        os.mkdir(script_output_dir)
    if not os.path.exists(benchmark_output_dir):
        os.mkdir(benchmark_output_dir)

    # Run script to launch VMs and benchmark OpenAPI service if command line arg "run" passed
    if len(argv) > 1 and argv[1] == "run":
        clouds = ['aws', 'google', 'azure']
        runtimes_dic = {'google': [], 'aws': [], 'azure': []}
        num_trials = 1
        vms = []
        print(f"Running {num_trials} trials for each cloud in {clouds}")
        for cloud in clouds:
            Shell.run(f"cms set cloud={cloud}")
            for i in range(num_trials):
                vm_name = f"{cloud}-{i}"
                print(f"Creating and running test on VM {vm_name}")
                start = default_timer()
                result = Shell.run(
                    f"{home}/cm/cloudmesh-openapi/tests/generator-eigenfaces-svm/eigenfaces-svm-full-multi-script {vm_name} > {script_output_dir}{cloud}-{i}"
                )
                end = default_timer()
                print(f"Script on {vm_name} finished in {end - start} seconds")
                runtimes_dic[cloud].append(end - start)
                ip = Shell.run(
                    f'cms vm ssh {vm_name} --command="which" |  grep -E -o -m 1 "([0-9]{{1,3}}[\.]){{3}}[0-9]{{1,3}}"'
                ).split()[0]
                vms.append((ip, cloud, i))
            runtimes = np.asarray(runtimes_dic[cloud])
            print(f"\n{cloud} script run time mean: {runtimes.mean()}")
            print(f"{cloud} script run time min: {runtimes.min()}")
            print(f"{cloud} script run time max: {runtimes.max()}")
            print(f"{cloud} script run time std: {runtimes.std()}\n")

        #Run tests
        with Pool(3) as p:
            p.map(test_ai_workflow, vms)

        #Delete VMs
        for cloud in clouds:
            Shell.run(f"cms set cloud={cloud}")
            for i in range(num_trials):
                Shell.run(f'cms vm terminate {cloud}-{i}')

    # Scarpe benchmark output from script outputs
    print(f'Scraping benchmarks from script output at {script_output_dir}')
    script_outputs = os.listdir(script_output_dir)
    for file in script_outputs:
        with open(f"{script_output_dir}{file}", 'r') as f:
            b = open(f"{benchmark_output_dir}{file}-benchmark", "w")
            b.write(
                "csv,timer,status,time,sum,start,tag,uname.node,user,uname.system,platform.version\n"
            )
            found_benchmark = False
            for line in f.readlines():
                if line.startswith(
                        "# csv,benchmark-eigenfaces-multi-cloud/test"
                ):  # some shells return csv info twice once as error "info" and normal output
                    found_benchmark = True
                    b.write(line[2:])  # keep csv,...
            b.close()
            if not found_benchmark:
                print(f"Error on script {script_output_dir}{file}")
                if os.path.exists(f"{benchmark_output_dir}{file}-benchmark"):
                    os.remove(f"{benchmark_output_dir}{file}-benchmark")

    # Read benchmark output and compute statistics
    print(
        f'Reading benchmarks from benchmark output at {benchmark_output_dir}')
    columns = [
        "csv", "timer", "status", "time", "sum", "start", "tag", "uname.node",
        "user", "uname.system", "platform.version, cloud"
    ]
    benchmark_df = pd.DataFrame(columns=columns)
    benchmark_outputs = os.listdir(benchmark_output_dir)
    for file in benchmark_outputs:
        cloud = file.split("-")[0]
        df = pd.read_csv(f"{benchmark_output_dir}{file}")
        df['cloud'] = cloud
        if cloud == 'aws':
            df.loc[df['uname.node'].str.startswith("ip"),
                   ["uname.node"]] = 'aws' + "-" + file.split("-")[1]
        benchmark_df = pd.concat([benchmark_df, df])

    benchmark_df['test_type'] = 'local'
    benchmark_df.loc[benchmark_df['uname.node'] == gethostname(),
                     ['test_type']] = 'remote'

    print("Printing trial statistics:")
    result = ""
    stats_df = pd.DataFrame(
        columns=['test', 'type', 'cloud', 'mean', 'min', 'max', 'std'])
    for cloud in benchmark_df['cloud'].unique():
        result += f"{cloud} has {len(benchmark_df.loc[benchmark_df['cloud'] == cloud]['uname.node'].unique())} VM samples.\n"
        for timer in benchmark_df['timer'].unique():
            for test_type in benchmark_df['test_type'].unique():
                df = benchmark_df.loc[(benchmark_df['cloud'] == cloud) &
                                      (benchmark_df['timer'] == timer) &
                                      (benchmark_df['test_type'] == test_type),
                                      ['time']]
                if len(df.values) > 0:
                    mean = df.values.mean()
                    min = df.values.min()
                    max = df.values.max()
                    std = df.values.std()
                    result += f"{cloud} {timer} {test_type} samples: {len(df.values)}\n"
                    result += f"{cloud} {timer} {test_type} mean: {mean}\n"
                    result += f"{cloud} {timer} {test_type} min: {min}\n"
                    result += f"{cloud} {timer} {test_type} max: {max}\n"
                    result += f"{cloud} {timer} {test_type} std: {std}\n\n"
                    to_append = [timer, test_type, cloud, mean, min, max, std]
                    stats_series = pd.Series(to_append, index=stats_df.columns)
                    stats_df = stats_df.append(stats_series, ignore_index=True)

    print(result)

    stats_df = stats_df.round(decimals=2)
    stats_df['test'] = stats_df['test'].str.replace(
        "benchmark-eigenfaces-multi-cloud/", "")
    # print(stats_df_print.sort_values(by=['test', 'type', 'cloud']).to_markdown(index=False))
    print(
        stats_df.sort_values(by=['test', 'type', 'cloud']).to_latex(
            index=False))

    # graph 1: stacked bar graph of all tests
    download_df = stats_df.loc[(stats_df['test'] == 'test_download_data')]
    download_means = download_df["mean"]

    download_mins = download_df["min"]
    download_stds = download_df["std"]
    download_labels = download_df["cloud"]

    train_df = stats_df.loc[(stats_df['test'] == 'test_train')]
    train_means = train_df["mean"]
    train_mins = train_df["min"]
    train_stds = train_df["std"]

    upload_df = stats_df.loc[(stats_df['test'] == 'test_upload')]
    upload_means = upload_df["mean"]
    upload_mins = upload_df["min"]
    upload_stds = upload_df["std"]

    predict_df = stats_df.loc[(stats_df['test'] == 'test_predict')]
    predict_means = predict_df["mean"]
    predict_mins = predict_df["min"]
    predict_stds = predict_df["std"]

    plt.style.use('seaborn-whitegrid')
    n = 3
    ind = np.arange(n)
    width = 0.35
    p1 = plt.bar(ind,
                 download_means,
                 width,
                 yerr=download_stds,
                 color='orange',
                 capsize=3)
    p2 = plt.bar(ind,
                 train_means,
                 width,
                 bottom=download_means,
                 yerr=train_stds,
                 color='green',
                 capsize=3)
    p3 = plt.bar(ind,
                 upload_means,
                 width,
                 bottom=download_means.values + train_means.values,
                 yerr=upload_stds,
                 color='yellow',
                 capsize=3)
    p4 = plt.bar(ind,
                 predict_means,
                 width,
                 bottom=download_means.values + train_means.values +
                 upload_means.values,
                 yerr=predict_stds,
                 color='blue',
                 capsize=3,
                 ecolor="gray")
    plt.ylabel('Time (s)')
    plt.title('AI Service Workflow Runtime')
    plt.xticks(ind, download_labels)
    plt.legend((p1[0], p2[0], p3[0], p4[0]),
               ('Download Data', 'Train', 'Upload', 'Predict'),
               bbox_to_anchor=(0, 0),
               loc='lower left',
               ncol=4,
               frameon=True)
    plt.savefig('ai_service_workflow_runtime.png')
    plt.savefig('ai_service_workflow_runtime.pdf')
    plt.savefig('ai_service_workflow_runtime.svg')
    plt.show()
예제 #13
0
def test_train(ip):
    Benchmark.Start()
    r = requests.get(f"http://{ip}:8080/cloudmesh/EigenfacesSVM/train")
    Benchmark.Stop()
    assert r.status_code == 200
예제 #14
0
def test_download_data(ip):
    Benchmark.Start()
    r = requests.get(f"http://{ip}:8080/cloudmesh/EigenfacesSVM/download_data")
    Benchmark.Stop()
    assert r.status_code == 200
def main(argv):
    Benchmark.Start()
    home = os.environ['HOME']
    script_output_dir = f"{home}/.cloudmesh/eigenfaces-svm/vm_script_output/"
    benchmark_output_dir = f"{home}/.cloudmesh/eigenfaces-svm/benchmark_output/"

    if not os.path.exists(f"{home}/.cloudmesh/eigenfaces-svm"):
        os.mkdir(f"{home}/.cloudmesh/eigenfaces-svm")
    if not os.path.exists(script_output_dir):
        os.mkdir(script_output_dir)
    if not os.path.exists(benchmark_output_dir):
        os.mkdir(benchmark_output_dir)

    # Run script to launch VMs and benchmark OpenAPI service if command line arg "run" passed
    if len(argv) > 1 and argv[1] == "run":
        clouds = ['aws', 'google', 'azure']
        runtimes_dic = {'google': [], 'aws': [], 'azure': []}
        num_trials = 3
        print(f"Running {num_trials} trials for each cloud in {clouds}")
        for cloud in clouds:
            Shell.run(f"cms set cloud={cloud}")
            for i in range(num_trials):
                vm_name = f"{cloud}-{i}"
                print(f"Creating and running test on VM {vm_name}")
                start = default_timer()
                result = Shell.run(
                    f"{home}/cm/cloudmesh-openapi/tests/generator-eigenfaces-svm/eigenfaces-svm-full-script {vm_name} > {script_output_dir}{cloud}-{i}"
                )
                end = default_timer()
                print(f"Script on {vm_name} finished in {end - start} seconds")
                runtimes_dic[cloud].append(end - start)
            runtimes = np.asarray(runtimes_dic[cloud])
            print(f"\n{cloud} script run time mean: {runtimes.mean()}")
            print(f"{cloud} script run time min: {runtimes.min()}")
            print(f"{cloud} script run time max: {runtimes.max()}")
            print(f"{cloud} script run time std: {runtimes.std()}\n")

    # Scarpe benchmark output from script outputs
    print(f'Scraping benchmarks from script output at {script_output_dir}')
    script_outputs = os.listdir(script_output_dir)
    for file in script_outputs:
        with open(f"{script_output_dir}{file}", 'r') as f:
            b = open(f"{benchmark_output_dir}{file}-benchmark", "w")
            b.write(
                "csv,timer,status,time,sum,start,tag,uname.node,user,uname.system,platform.version\n"
            )
            found_benchmark = False
            for line in f.readlines():
                if line[0:
                        10] == "# csv,test":  #some shells return csv info twice once as error "info" and normal output
                    found_benchmark = True
                    b.write(line[2:])  #keep csv,...
            b.close()
            if not found_benchmark:
                print(f"Error on script {script_output_dir}{file}")
                if os.path.exists(f"{benchmark_output_dir}{file}-benchmark"):
                    os.remove(f"{benchmark_output_dir}{file}-benchmark")

    # Read benchmark output and compute statistics
    print(
        f'Reading benchmarks from benchmark output at {benchmark_output_dir}')
    columns = [
        "csv", "timer", "status", "time", "sum", "start", "tag", "uname.node",
        "user", "uname.system", "platform.version, cloud"
    ]
    benchmark_df = pd.DataFrame(columns=columns)
    benchmark_outputs = os.listdir(benchmark_output_dir)
    for file in benchmark_outputs:
        cloud = file.split("-")[0]
        df = pd.read_csv(f"{benchmark_output_dir}{file}")
        df['cloud'] = cloud
        if cloud == 'aws':
            df.loc[df['uname.node'].str.startswith("ip"),
                   ["uname.node"]] = 'aws' + "-" + file.split("-")[1]
        benchmark_df = pd.concat([benchmark_df, df])

    benchmark_df['test_type'] = 'local'
    benchmark_df.loc[benchmark_df['uname.node'] == gethostname(),
                     ['test_type']] = 'remote'

    print("Printing trial statistics:")
    result = ""
    stats_df = pd.DataFrame(
        columns=['test', 'type', 'cloud', 'mean', 'min', 'max', 'std'])
    for cloud in benchmark_df['cloud'].unique():
        result += f"{cloud} has {len(benchmark_df.loc[benchmark_df['cloud']==cloud]['uname.node'].unique())-1} VM samples.\n"
        for timer in benchmark_df['timer'].unique():
            for test_type in benchmark_df['test_type'].unique():
                df = benchmark_df.loc[(benchmark_df['cloud'] == cloud) &
                                      (benchmark_df['timer'] == timer) &
                                      (benchmark_df['test_type'] == test_type),
                                      ['time']]
                if len(df.values) > 0:
                    mean = df.values.mean()
                    min = df.values.min()
                    max = df.values.max()
                    std = df.values.std()
                    result += f"{cloud} {timer} {test_type} samples: {len(df.values)}\n"
                    result += f"{cloud} {timer} {test_type} mean: {mean}\n"
                    result += f"{cloud} {timer} {test_type} min: {min}\n"
                    result += f"{cloud} {timer} {test_type} max: {max}\n"
                    result += f"{cloud} {timer} {test_type} std: {std}\n\n"
                    to_append = [timer, test_type, cloud, mean, min, max, std]
                    stats_series = pd.Series(to_append, index=stats_df.columns)
                    stats_df = stats_df.append(stats_series, ignore_index=True)

    print(result)
    stats_df = stats_df.round(decimals=2)
    stats_df['test'] = stats_df['test'].str.replace(
        "test_030_generator_eigenfaces_svm/test_", "")
    #print(stats_df_print.sort_values(by=['test', 'type', 'cloud']).to_markdown(index=False))
    print(
        stats_df.sort_values(by=['test', 'type', 'cloud']).to_latex(
            index=False))
    #pi_series = pd.Series(["test_download_data", "local", "pi", 135.5, 135.5, 135.5, 0.0], index=stats_df.columns)
    #stats_df = stats_df.append(pi_series, ignore_index=True)
    #pi_series = pd.Series(["test_scikitlearn_train", "local", "pi", 232.0, 232.0, 232.0, 0.0], index=stats_df.columns)
    #stats_df = stats_df.append(pi_series, ignore_index=True)
    #pi_series = pd.Series(["test_train", "local", "pi", 231.0, 231.0, 231.0, 0.0], index=stats_df.columns)
    #stats_df = stats_df.append(pi_series, ignore_index=True)
    #pi_series = pd.Series(["test_upload", "local", "pi", 0.05, 0.05, 0.05, 0.0], index=stats_df.columns)
    #stats_df = stats_df.append(pi_series, ignore_index=True)
    #pi_series = pd.Series(["test_predict", "local", "pi", 0.4, 0.4, 0.4, 0.0], index=stats_df.columns)
    #stats_df = stats_df.append(pi_series, ignore_index=True)

    sorter = ['aws', 'azure', 'google', 'mac book', 'docker', 'pi 4', 'pi 3b+']
    stats_df.cloud = stats_df.cloud.astype("category")
    stats_df.cloud.cat.set_categories(sorter, inplace=True)
    stats_df = stats_df.sort_values(["cloud"])

    if "pi 3b+" in stats_df['cloud'].unique():
        cost_df = stats_df[['test', 'type', 'cloud', 'mean']]
        #cost_df['cost/s'] = 0
        #cost_df['cost'] = 0
        cost_df.loc[cost_df['cloud'] == 'aws', ['cost/s']] = 0.1 / 60.0 / 60.0
        cost_df.loc[cost_df['cloud'] == 'azure',
                    ['cost/s']] = 0.096 / 60.0 / 60.0
        cost_df.loc[cost_df['cloud'] == 'google',
                    ['cost/s']] = 0.0949995 / 60.0 / 60.0
        cost_df.loc[cost_df['cloud'] == 'pi 3b+',
                    ['cost/s']] = 0.006546804 / 60.0 / 60.0
        cost_df.loc[cost_df['cloud'] == 'pi 4',
                    ['cost/s']] = 0.013324201 / 60.0 / 60.0
        cost_df['cost'] = cost_df['mean'].values * cost_df['cost/s'].values

        for test in cost_df['test'].unique():
            for type in cost_df['type'].unique():
                if type == 'remote':
                    continue
                sub_df = cost_df.loc[(cost_df['test'] == test)
                                     & (cost_df['type'] == type)]
                pi_cost = sub_df.loc[cost_df['cloud'] == 'pi 3b+',
                                     'cost'].values
                pi_mean = sub_df.loc[cost_df['cloud'] == 'pi 3b+',
                                     'mean'].values
                cost_inc = (sub_df['cost'].values - pi_cost) / pi_cost * 100
                mean_dec = (sub_df['mean'].values * -1 +
                            pi_mean) / pi_mean * 100
                cost_df.loc[(cost_df['test'] == test) &
                            (cost_df['type'] == type),
                            ["% runtime decrease"]] = mean_dec
                cost_df.loc[(cost_df['test'] == test) &
                            (cost_df['type'] == type),
                            ["% cost increase"]] = cost_inc

        cost_df["% cost increase"] = cost_df["% cost increase"].round(2)
        cost_df["% runtime decrease"] = cost_df["% runtime decrease"].round(2)
        cost_df = cost_df.drop(columns='cost/s')
        #pd.set_option('display.float_format', '{:.2E}'.format)
        print(
            cost_df.sort_values(by=['test', 'type', 'cloud']).to_latex(
                index=False, formatters={'cost': '{:,.2e}'.format}))

    suffix = ""
    if "pi 3b+" in stats_df['cloud'].unique():
        suffix = "_pi"

    # graph 1: download_data_local
    download_df = stats_df.loc[(stats_df['test'] == 'download_data')]
    download_means = download_df["mean"]
    download_mins = download_df["min"]
    download_stds = download_df["std"]
    download_labels = download_df["cloud"]

    #plt.style.use('ggplot')
    plt.style.use('seaborn-whitegrid')
    x = download_labels
    x_pos = [i for i, _ in enumerate(x)]
    #plt.bar(x_pos, download_means, yerr=download_stds,capsize=3, color=["green",'orange','blue', 'red'])
    plt.bar(x_pos, download_means, yerr=download_stds, capsize=3)
    plt.xlabel("Cloud")
    plt.ylabel("Seconds")
    plt.title("Time to Download and Extract Data")
    plt.xticks(x_pos, x)
    plt.savefig(f'sample_graph_1{suffix}.png')
    plt.savefig(f'sample_graph_1{suffix}.pdf')
    plt.savefig(f'sample_graph_1{suffix}.svg')
    plt.show()

    # graph 2: scikitlearn_train vs opeanpi_scikitlearn_train
    openapi_df = stats_df.loc[(stats_df['test'] == 'train')]
    openapi_means = openapi_df['mean']
    openapi_mins = openapi_df['min']
    openapi_stds = openapi_df['std']
    openapi_labels = openapi_df['cloud']

    scikitlearn_df = stats_df.loc[(stats_df['test'] == 'scikitlearn_train')]
    scikit_means = scikitlearn_df['mean']
    scikit_mins = scikitlearn_df['min']
    scikit_stds = scikitlearn_df['std']
    scikit_labels = scikitlearn_df['cloud']

    x = openapi_labels
    ind = np.arange(len(openapi_labels))
    width = 0.35
    #openapi_handles = plt.bar(ind, openapi_means, width, yerr=openapi_stds, capsize=3, color=["green", 'orange', 'blue', 'red'])
    #scikit_handles = plt.bar(ind + width, scikit_means, width, yerr=scikit_stds, capsize=3, color=["springgreen", 'bisque', 'skyblue', 'lightcoral'])
    openapi_handles = plt.bar(ind,
                              openapi_means,
                              width,
                              yerr=openapi_stds,
                              capsize=3)
    scikit_handles = plt.bar(ind + width,
                             scikit_means,
                             width,
                             yerr=scikit_stds,
                             capsize=3)
    plt.xlabel("Cloud")
    plt.ylabel("Seconds")
    plt.title("Model Training Time")
    plt.xticks(ind + width / 2, scikit_labels)
    #plt.legend([tuple(openapi_handles), tuple(scikit_handles)], ['OpenAPI service', 'Scikit-learn example'], numpoints=1,
    #           handler_map={tuple: HandlerTuple(ndivide=None)},frameon=True)
    plt.legend(['train', 'scikitlearn train'], frameon=True)
    plt.savefig(f'sample_graph_2{suffix}.png')
    plt.savefig(f'sample_graph_2{suffix}.pdf')
    plt.savefig(f'sample_graph_2{suffix}.svg')
    plt.show()

    # graph 3: upload_local vs upload_remote
    local_df = stats_df.loc[(stats_df['test'] == 'upload')
                            & (stats_df['type'] == 'local')]
    local_means = local_df['mean']
    local_mins = local_df['min']
    local_stds = local_df['std']
    local_labels = local_df['cloud']

    remote_df = stats_df.loc[(stats_df['test'] == 'upload')
                             & (stats_df['type'] == 'remote')]
    remote_means = remote_df['mean']
    remote_mins = remote_df['min']
    remote_stds = remote_df['std']
    remote_labels = remote_df['cloud']

    x = local_labels
    ind = np.arange(len(local_labels))
    width = 0.35
    #local_handels = plt.bar(ind, local_means, width, yerr=local_stds, capsize=3, color=["green", 'orange', 'blue', 'red'])
    local_handels = plt.bar(ind,
                            local_means,
                            width,
                            yerr=local_stds,
                            capsize=3)
    ind = np.arange(len(remote_labels))
    #remote_handles = plt.bar(ind + width, remote_means, width, yerr=remote_stds,capsize=3,
    #        color=["springgreen", 'bisque', 'skyblue'])
    remote_handles = plt.bar(ind + width,
                             remote_means,
                             width,
                             yerr=remote_stds,
                             capsize=3)
    ind = np.arange(len(local_labels))
    plt.xlabel("Cloud")
    plt.ylabel("Seconds")
    plt.title("Upload Function Runtime")
    plt.xticks(ind + width / 2, local_labels)
    #plt.legend([tuple(local_handels), tuple(remote_handles)], ['OpenAPI server', 'Remote client'], numpoints=1,
    #           handler_map={tuple: HandlerTuple(ndivide=None)}, frameon=True)
    plt.legend(['local', 'remote'], frameon=True)
    plt.savefig(f'sample_graph_3{suffix}.png')
    plt.savefig(f'sample_graph_3{suffix}.pdf')
    plt.savefig(f'sample_graph_3{suffix}.svg')
    plt.show()

    # graph 4  predict_local vs predict_remote
    local_df = stats_df.loc[(stats_df['test'] == 'predict')
                            & (stats_df['type'] == 'local')]
    local_means = local_df['mean']
    local_mins = local_df['min']
    local_stds = local_df['std']
    local_labels = local_df['cloud']

    remote_df = stats_df.loc[(stats_df['test'] == 'predict')
                             & (stats_df['type'] == 'remote')]
    remote_means = remote_df['mean']
    remote_mins = remote_df['min']
    remote_stds = remote_df['std']
    remote_labels = remote_df['cloud']

    x = local_labels
    ind = np.arange(len(local_labels))
    width = 0.35
    #local_handels = plt.bar(ind, local_means, width, yerr=local_stds, capsize=3, color=["green", 'orange', 'blue', 'red'])
    local_handels = plt.bar(ind,
                            local_means,
                            width,
                            yerr=local_stds,
                            capsize=3)
    ind = np.arange(len(remote_labels))
    #remote_handles = plt.bar(ind + width, remote_means, width, yerr=remote_stds, capsize=3,
    #        color=["springgreen", 'bisque', 'skyblue'])
    remote_handles = plt.bar(ind + width,
                             remote_means,
                             width,
                             yerr=remote_stds,
                             capsize=3)
    ind = np.arange(len(local_labels))
    plt.xlabel("Cloud")
    plt.ylabel("Seconds")
    plt.title("Predict Function Runtime")
    plt.xticks(ind + width / 2, local_labels)
    #plt.legend([tuple(local_handels), tuple(remote_handles)], ['OpenAPI server', 'Remote client'], numpoints=1,
    #           handler_map={tuple: HandlerTuple(ndivide=None)}, frameon=True)
    plt.legend(['local', 'remote'], frameon=True)
    plt.savefig(f'sample_graph_4{suffix}.png')
    plt.savefig(f'sample_graph_4{suffix}.pdf')
    plt.savefig(f'sample_graph_4{suffix}.svg')
    plt.show()

    Benchmark.Stop()
    Benchmark.print()
    return
예제 #16
0
 def test_cms_flavor_refresh(self):
     HEADING()
     Benchmark.Start()
     os.system(
         f"cms flavor list --cloud={cloud} --refresh > flavor-{cloud}.log")
     Benchmark.Stop()
예제 #17
0
 def test_cms_flavor(self):
     HEADING()
     Benchmark.Start()
     os.system(f"cms flavor list > flavor-local.log")
     Benchmark.Stop()
 def test_rename(self):
     HEADING()
     Benchmark.Start()
     provider.rename(source=self.name, destination=self.new_name)
     Benchmark.Stop()
예제 #19
0
 def test_cms_local(self):
     HEADING()
     Benchmark.Start()
     os.system("cms key add")
     os.system("cms key list > key-local.log")
     Benchmark.Stop()
 def test_sec_group_list_cloud(self):
     HEADING()
     Benchmark.Start()
     result = run(f"cms sec group list --cloud={cloud}")
     Benchmark.Stop()
     g = groups.list()
예제 #21
0
 def test_clear_cloud_database(self):
     HEADING()
     Benchmark.Start()
     cm.clear(collection=f"{cloud}-key")
     Benchmark.Stop()
     assert True
 def test_rule_load_to_cloud(self):
     HEADING()
     Benchmark.Start()
     result = run(f"cms sec group load deleteme --cloud={cloud}")
     Benchmark.Stop()
예제 #23
0
 def test_list_secgroups_rules(self):
     HEADING()
     Benchmark.Start()
     rule_groups = provider.list_secgroup_rules()
     Benchmark.Stop()
     provider.Print(output='json', kind="secgroup", data=rule_groups)
예제 #24
0
 def test_list(self):
     HEADING()
     Benchmark.Start()
     r = g.list(name='test')
     Benchmark.Stop()
예제 #25
0
 def test_ls_la_list(self):
     HEADING()
     Benchmark.Start()
     r = shell.execute('ls', ["-l", "-a"])
     Benchmark.Stop()
     print(r)
예제 #26
0
 def test_benchmark_stopwatch_1(self):
     HEADING()
     Benchmark.Start()
     time.sleep(0.1)
     Benchmark.Stop()
     assert True
예제 #27
0
 def test_ls_la_wrapper(self):
     HEADING()
     Benchmark.Start()
     r = shell.ls("-la")
     Benchmark.Stop()
     print(r)
예제 #28
0
 def test_empty_database(self):
     HEADING()
     Benchmark.Start()
     cm.clear(collection=f"{cloud}-flavor")
     Benchmark.Stop()
예제 #29
0
 def test_cms_init(self):
     HEADING()
     Benchmark.Start()
     result = os.system(f"cms init")
     Benchmark.Stop()
예제 #30
0
 def test_provider_flavor(self):
     HEADING()
     Benchmark.Start()
     r = provider.flavors()
     Benchmark.Stop()