Ejemplo n.º 1
0
def test_save_config():
    with TemporaryDirectory() as d:
        file = Path(d) / "settings.json"
        values = {"settings_slic3r_exe": "now"}
        save_config(values=values, path=file)
        r = read_config(file)
    assert r.slic3r_exe == "now"
Ejemplo n.º 2
0
def option_save(**kwargs):
    """
    The code run when the save argument is given. This will save the given configuration to a file with the
    name given with this argument.
    :param kwargs: A dictionary of arguments. This expects 'config' as a valid configuration, 'opts' as
                   a list of options created by getopt, and 'iterator' as the current iterator being used.
    """
    config = kwargs["config"]
    opts = kwargs["opts"]
    iterator = kwargs["iterator"]
    config_name = opts[iterator.current][1]
    old_config_name = config.name
    config.name = config_name
    try:
        configuration.save_config(config, config_name)
    except FileNotFoundError:
        print("\nERROR: The name \"" + config_name +
              "\" is not a valid configuration name.")
        config.name = old_config_name
def menu_option_save(config):
    """
    The code that is run when the menu option for saving the current configuration is selected.
    This will prompt the user for a name, then save the configuration to a file.
    :param config: The current backup configuration.
    """
    # Input: Give a name to this configuration
    config_name = input("Enter a name for this configuration: ")
    # Pass the config and the name to the save function (ask to overwrite if the name exists)
    overwrite = "y"
    if configuration.config_exists(config_name):
        overwrite = input(
            "Would you like to overwrite this existing configuration? (y/n): ")
    if overwrite.lower() == "y":
        old_config_name = config.name
        config.name = config_name
        try:
            configuration.save_config(config, config_name)
        except FileNotFoundError:
            print("\nERROR: The name \"" + config_name +
                  "\" is not a valid configuration name.")
            config.name = old_config_name
def menu_option_backup(config):
    """
    The code that is run when the menu option for backing up the selected files is selected.
    This will ask for confirmation before beginning, and then run the backup process.
    :param config: The current backup configuration.
    """
    # Do not continue if one of the paths in the configuration no longer exists
    if not config.all_paths_are_valid():
        print(
            "At least one of the input or output paths in this configuration is no longer valid."
        )
        print(
            "Please ensure all relevant drives are plugged in, or edit any invalid paths."
        )
    else:
        # If this configuration is new or was modified, ask to save it
        if config.name is None:
            save_input = input(
                "Your configuration has not been saved yet. Would you like to save it? (y/n): "
            )
            if save_input.lower() == "y":
                menu_option_save(config)
        elif configuration.config_was_modified(config):
            save_input = input(
                "This configuration has changed since it was last saved. " +
                "Would you like to update it? (y/n): ")
            if save_input.lower() == "y":
                configuration.save_config(config, config.name)

        # Ask to confirm if this is ok to backup
        backup_confirmation = input(
            "Would you like to start the backup with this configuration? (y/n): "
        )
        # If yes, run the backup
        if backup_confirmation.lower() == "y":
            backup.run_backup(config)
Ejemplo n.º 5
0
                                    dont_set_use=False)
    updates = []
    for app in apps:
        if True == args.all or args[app] == True:
            common.msg('Status', '{0} updated'.format(app))
            apps[app] = proxy
            config_changed = True

        if args.remove == app or args.remove == None:
            common.msg('Status', '{0} proxy is off'.format(app))
            apps[app]['use_proxy'] = False
            config_changed = True

    if True == config_changed:
        common.msg('Status', 'Saving')
        configuration.save_config()

    if True == args.config:
        for app in apps:
            common.print_config(app)

    if False == is_docker:
        # cant configure a docker inside a docker
        if True == args.all or True == args.docker:
            app_docker.configure()

    if True == args.all or True == args.git:
        app_git.configure()
    if True == args.all or True == args.npm:
        app_npm.configure()
    if True == args.all or True == args.yarn:
Ejemplo n.º 6
0
def master_test(gui):
    """
        Run automated tests for unrestricted plotting/statistics, restricted plotting/statistics,
        unsupervised learning and supervised learning.
    """

    def compare_stats(key, ref_path, test_path):
        """
            Compare two statistics files line by line and store discrepencies.

            Args:
                key (int): random number identifier for this test run
                ref_path (pathlib.Path): Path to reference statistics file
                test_path (pathlib.Path): Path to test statistics file
        """

        mismatches = dict()

        # Exctract important lines from files provided

        with open(ref_path, "r") as ref_file, open(test_path, "r") as test_file:
            ref_lines = ref_file.readlines()
            labels = ref_lines[1].strip().split(",")
            ref_vals = ref_lines[12].strip().split(",")
            # test_vals = test_file.readlines()[10].strip().split(",")
            test_vals = test_file.read().split("\n")[12].strip().split(",")

        # Compare values
        for i, label in enumerate(labels):
            if ref_vals[i].strip() != test_vals[i].strip():
                try:
                    if float(ref_vals[i]) != float(test_vals[i]):
                        mismatches[label] = (ref_vals[i], test_vals[i])
                except ValueError:
                    mismatches[label] = (ref_vals[i], "None")

        return mismatches

    def compare_configs(ref_path, test_path):
        """
            Compare two configuration files line by line and store discrepencies.

            Args:
                ref_path (pathlib.Path): Path to reference configuration file
                test_path (pathlib.Path): Path to test configuration file
        """

        with open(ref_path, "r") as ref_file, open(test_path, "r") as test_file:
            ref_lines = ref_file.readlines()
            test_lines = test_file.readlines()
        mismatches = dict()
        for ref_line, test_line in zip(ref_lines[2:], test_lines[2:]):
            if test_line.strip() != ref_line.strip():
                try:
                    # Get line label
                    label = re.search((r"[^=]*"), ref_line).group(0)
                    # Get reference and test values
                    ref_val = re.search((r"=(.*)"), ref_line).group(1).strip()
                    test_val = re.search((r"=(.*)"), test_line).group(1).strip()
                    # Try converting and comparing as floats
                    try:
                        if float(ref_val) != float(test_val):
                            mismatches[label] = (ref_val, test_val)
                    except:
                        if ref_val != test_val:
                            mismatches[label] = (ref_val, test_val)
                except:
                    mismatches[label] = (ref_val, "None")

        return mismatches

    # Initialization
    test_dir_path = gui.master_dir_path / "testing"
    test_out_dir = test_dir_path / "temp_output"

    in_file_path = test_dir_path / "input" / "test_input_long.csv"

    # Load config file
    ref_config_path = test_dir_path / "config" / "test_config.ini"
    load_config(gui, config_file_=ref_config_path)

    # Load testing input file
    replace_entry(gui.input_file_E, in_file_path)

    # Set up output
    rand_key = str(randint(1e6, 1e7))

    # ---------------------------------Statistics----------------------------------------
    # Declare paths
    unres_ref_stats_path = test_dir_path / "stats" / "ref_stats_unrestricted_long.csv"
    res_ref_stats_path = test_dir_path / "stats" / "ref_stats_restricted_long.csv"

    # Set up text coloring
    colorama.init()

    print(f"Key = {rand_key}")

    for test_type in ("unrestricted", "restricted"):
        # Test unrestricted statistics
        print(f"\n\nTesting statistics ({test_type})")
        if test_type == "restricted":
            gui.restrict_search_CB.select()
        else:
            gui.restrict_search_CB.deselect()

        ref_path = res_ref_stats_path if test_type == "restricted" else unres_ref_stats_path

        # Set up output file names
        test_stats_path = test_out_dir / f"{rand_key}_{test_type}.csv"
        test_plot_path = test_out_dir / f"{rand_key}_{test_type}.html"
        replace_entry(gui.stats_file_E, test_stats_path)
        replace_entry(gui.plot_file_E, test_plot_path)

        # Run statistical analysis
        gui.trigger_run()

        # Look for discrepencies in output files
        mismatches = dict()
        mismatches = compare_stats(rand_key, ref_path, test_stats_path)

        # Notify user of mismatched values if any
        if not mismatches:
            print(colored(f"{test_type.upper()} STATS PASSED".center(100, "-"), "green"))
        else:
            print(colored(f"{test_type.upper()} STATS FAILED".center(100, "-"), "red"))
            for key, values in mismatches.items():
                print(
                    colored(key, "yellow")
                    + ": test value of "
                    + colored(str(values[1]), "yellow")
                    + " did not match reference "
                    + colored(str(values[0]), "yellow")
                )

    # ---------------------------------Unsupervised learning--------------------------------------
    print(f"\n\nTesting unsupervised learning")

    load_config(gui, config_file_=ref_config_path)
    gui.unsupervised_learning()
    unsup_test_path = test_out_dir / f"{rand_key}_unsup_test_config.ini"
    unsup_ref_path = test_dir_path / "config" / "unsup_ref_config.ini"
    save_config(gui, out_file=str(unsup_test_path))

    # Search for config discrepencies
    mismatches = dict()
    mismatches = compare_configs(unsup_ref_path, unsup_test_path)

    if not mismatches:
        print(colored("UNSUP PASSED".center(100, "-"), "green"))
    else:
        print(colored("UNSUP FAILED".center(100, "-"), "red"))
        for key, values in mismatches.items():
            print(
                colored(key, "yellow")
                + ": test value of "
                + colored(str(values[1]), "yellow")
                + " did not match reference "
                + colored(str(values[0]), "yellow")
            )

    # ---------------------------------Supervised learning----------------------------------------
    print(f"\n\nTesting supervised learning")

    vertex_file_path = test_dir_path / "plots" / "vertex_selection.html"

    # Attempt to make vertex selection plot
    try:
        gui.select_vertices()
    except:
        print(colored("VERTEX SELECTION PLOT FAILED".center(100, "-"), "red"))
        traceback.print_exc()

    replace_entry(gui.vertex_file_E, vertex_file_path)

    load_config(gui, config_file_=ref_config_path)
    gui.supervised_learning()
    sup_test_path = test_out_dir / f"{rand_key}_sup_test_config.ini"
    sup_ref_path = test_dir_path / "config" / "sup_ref_config.ini"
    save_config(gui, out_file=str(sup_test_path))

    # Search for config discrepencies
    mismatches = dict()
    mismatches = compare_configs(sup_ref_path, sup_test_path)

    if not mismatches:
        print(colored("SUP PASSED".center(100, "-"), "green"))
    else:
        print(colored("SUP FAILED".center(100, "-"), "red"))
        for key, values in mismatches.items():
            print(
                colored(key, "yellow")
                + ": test value of "
                + colored(str(values[1]), "yellow")
                + " did not match reference "
                + colored(str(values[0]), "yellow")
            )

    # ---------------------------------Plot Editing----------------------------------------
    print(f"\n\nTesting plot editing")

    # Establish configuration
    ref_config_path = test_dir_path / "config" / "test_config.ini"
    load_config(gui, config_file_=ref_config_path)

    # Load testing input file and run in edit mode
    in_file_path = test_dir_path / "input" / "test_input_long.csv"
    replace_entry(gui.input_file_E, in_file_path)
    gui.edit_mode_CB.select()
    gui.trigger_run()

    # Declare file paths
    mod_input_path = test_dir_path / "input" / "ref_mod_plot.html"
    mod_ref_path = test_dir_path / "stats" / "ref_mod_stats.csv"

    # Fill entry boxes
    replace_entry(gui.input_file_E, mod_input_path)

    # Set up output file names
    test_mod_stats_path = test_out_dir / f"{rand_key}_modified.csv"
    test_mod_plot_path = test_out_dir / f"{rand_key}_modified.html"
    replace_entry(gui.stats_file_E, test_mod_stats_path)
    replace_entry(gui.plot_file_E, test_mod_plot_path)

    # Rerun with modified verticies
    gui.edit_mode_CB.deselect()
    gui.trigger_run()

    # Look for discrepencies in output files
    mismatches = dict()
    mismatches = compare_stats(rand_key, mod_ref_path, test_mod_stats_path)

    # Notify user of mismatched values if any
    if not mismatches:
        print(colored("PLOT EDITING PASSED".center(100, "-"), "green"))
    else:
        print(colored("PLOT EDITING FAILED".center(100, "-"), "red"))
        for key, values in mismatches.items():
            print(
                colored(key, "yellow")
                + ": test value of "
                + colored(str(values[1]), "yellow")
                + " did not match reference "
                + colored(str(values[0]), "yellow")
            )

    print(colored("TESTING COMPLETED".center(100, "-"), "blue"))
Ejemplo n.º 7
0
 def save_config(self, config_path):
     configuration.save_config(config_path, self.config)
Ejemplo n.º 8
0
def handle_save_options(queue, config):
    configuration.save_config(config)
    queue.put(
        views.show_message("Saved", "The configuration has been saved.",
                           views.MessageLevel.INFO))