Пример #1
0
def test_sftp_find(config):
    """Predicated on the result of test_sftp_put, this test ensures that the
    list of returned items is as expected from the sftp share.

    :param config:
    :return:
    """
    errors = []
    ns = tapestry.Namespace()
    ns.currentOS = platform.system()
    ns.addrNet, ns.portNet = config["sftp_id"].split(":")
    ns.nameNet = config["sftp_uid"]
    ns.network_credential_value = config["sftp_credential"]
    ns.network_credential_type = "passphrase"
    ns.network_credential_pass = False  # We're just testing passwords here

    connection, failure = tapestry.sftp_connect(ns)

    if not connection:
        errors.append(
            "[ERROR] Connection attempt failed - did the previous test succeed?"
        )
        return errors

    found = tapestry.sftp_find(connection, config["sftp_rootpath"])

    if len(found) == 0:
        errors.append("[ERROR] No Files Found Remotely!")
    else:
        if "control-file.tap" not in found:  # TODO add to docs
            errors.append("[ERROR] The find operation returned the following "
                          "list of items, but not the target item:")
            errors.append(str(found))

    return errors
Пример #2
0
def test_sftp_connect(
        config):  # TODO: Test Not Reflective of Reality (change args only)
    """A very simplistic test that validates a known-good set of SFTP
    information can be used to connect to a given SFTP endpoint and return a
    valid connection object. The errors returned by sftp_connect are added to
    the logger output, as is an error if the returned object is not an
    instance of the expected class.

    :param config:
    :return:
    """
    errors = []
    ns = tapestry.Namespace()
    ns.currentOS = platform.system()
    ns.addrNet, ns.portNet = config["sftp_id"].split(":")
    ns.nameNet = config["sftp_uid"]
    ns.network_credential_value = config["sftp_credential"]
    ns.network_credential_type = "passphrase"
    ns.network_credential_pass = False  # We're just testing passwords here

    connection, resp_errors = tapestry.sftp_connect(ns)

    if not connection:
        errors.append("[ERROR] Raised: '%s'" % resp_errors)

    if connection:
        if isinstance(connection, pysftp.Connection):
            pass
        else:
            errors.append("[ERROR] sftp_connect returned a connection that is"
                          " not an instance of the SFTPConnection class")

    return errors
Пример #3
0
def test_sftp_place(config):
    """A quick test that attempts to place a copy of the test article
    "control-config.cfg" onto the SFTP server. Can fail in two ways: a failure
    of the dependent sftp_connect call, or actual failure to place the file.

    :param config:
    :return:
    """
    errors = []
    ns = tapestry.Namespace()
    ns.currentOS = platform.system()
    ns.addrNet, ns.portNet = config["sftp_id"].split(":")
    ns.nameNet = config["sftp_uid"]
    ns.network_credential_value = config["sftp_credential"]
    ns.network_credential_type = "passphrase"
    ns.network_credential_pass = False  # We're just testing passwords here
    tgt_file = os.path.join(
        config["path_config"],
        os.path.join("test articles", "control-config.cfg"))

    connection, failure = tapestry.sftp_connect(ns)

    if not connection:
        errors.append(
            "[ERROR] Connection attempt failed - did the previous test succeed?"
        )
        return errors

    placed, raised = tapestry.sftp_place(connection, tgt_file,
                                         config["sftp_rootpath"])

    if not placed:
        errors.append("[ERROR] Raised: %s" % raised)

    return errors
Пример #4
0
def test_sftp_find(config):
    """This test checks if the sftp_find function correctly handles a case
    where the rootpath does not exist.

    :param config:
    :return:
    """
    errors = []

    ns = tapestry.Namespace()
    ns.currentOS = platform.system()
    ns.addrNet, ns.portNet = config["sftp_id"].split(":")
    ns.nameNet = config["sftp_uid"]
    ns.network_credential_value = config["sftp_credential"]
    ns.network_credential_type = "passphrase"
    ns.network_credential_pass = False  # We're just testing passwords here

    connection, failure = tapestry.sftp_connect(ns)

    if not connection:
        errors.append(
            "[ERROR] Connection attempt failed - did the previous test succeed?"
        )
        return errors

    found = tapestry.sftp_find(connection, "unwriteable")

    if len(found) > 0:
        errors.append(
            "[ERROR]Files were returned when they should not have been.")

    return errors
Пример #5
0
def test_sftp_connect_down(config):
    """A very simplistic test that validates the response of SFTP_connect in
    the event that the target sftp server is non-responsive, by pointing to a
    server that does not exist.

    :param config:
    :return:
    """
    errors = []
    ns = tapestry.Namespace()
    ns.currentOS = platform.system()
    ns.addrNet = "8.8.8.8"
    ns.portNet = 22
    ns.nameNet = config["sftp_uid"]
    ns.network_credential_value = config["sftp_credential"]
    ns.network_credential_type = "passphrase"
    ns.network_credential_pass = False  # We're just testing passwords here
    connection, resp_errors = tapestry.sftp_connect(ns)

    if connection:
        if isinstance(connection, tapestry.SFTPConnection):
            errors.append("[ERROR] sftp_connect returned a connection object, "
                          "which should not be the case.")
        else:
            errors.append("[ERROR] sftp_connect returned a connection that is"
                          " not an instance of the SFTPConnection class")

    return errors
Пример #6
0
def test_sftp_connect_invalid(config):
    """A very simplistic test that validates a known-good set of SFTP
    information can be used to connect to a given SFTP endpoint and return a
    valid connection object. The errors returned by sftp_connect are added to
    the logger output, as is an error if the returned object is not an
    instance of the expected class. An improper trust value is supplied in
    order to ensure untrusted servers are connected.

    :param config:
    :return:
    """
    errors = []

    ns = tapestry.Namespace()
    ns.currentOS = platform.system()
    ns.addrNet = "downloads.skillport.com"
    ns.portNet = 22
    ns.nameNet = config["sftp_uid"]
    ns.network_credential_value = config["sftp_credential"]
    ns.network_credential_type = "passphrase"
    ns.network_credential_pass = False  # We're just testing passwords here

    connection, resp_errors = tapestry.sftp_connect(ns)

    if connection:
        if isinstance(connection, tapestry.SFTPConnection):
            errors.append("[ERROR] sftp_connect returned a connection object, "
                          "which should not be the case.")
        else:
            errors.append("[ERROR] sftp_connect returned a connection that is"
                          " not an instance of the SFTPConnection class")

    return errors
Пример #7
0
def test_verify_invalid_block(config):
    """A highly simplified test for the verify_blocks functionality that relies
        on the existance of a badly-signed file to check. Due to the nature of the
        verify_blocks function this requires human intervention - future work will
        be to include a bypass method to facilitate this test.

        :param config:
        :return:
        """

    errors = []
    ns = tapestry.Namespace()
    ns.workDir = os.path.join(config["path_config"], "test articles")

    results = tapestry.verify_blocks(ns,
                                     gpg_agent=gnupg.GPG(verbose=True),
                                     testing=True)

    if len(results) == 2:
        errors.append(
            "[ERROR] The verify_blocks function accepted this invalid signature."
        )
    elif len(results) == 1:
        pass
    else:
        errors.append(
            "[ERROR] verify_blocks returned an unexpected number of items. See response."
        )
        errors.append("Response: %s" % results)

    return errors
Пример #8
0
def test_sftp_fetch(config):
    """A simple test to retreive a test file known to exist on the SFTP server,
     and place it into path_temp. Tests for success by checking that the file
    was actually placed.

    :param config:
    :return:
    """
    errors = []
    ns = tapestry.Namespace()
    ns.currentOS = platform.system()
    ns.addrNet, ns.portNet = config["sftp_id"].split(":")
    ns.nameNet = config["sftp_uid"]
    ns.network_credential_value = config["sftp_credential"]
    ns.network_credential_type = "passphrase"
    ns.network_credential_pass = False  # We're just testing passwords here

    connection, failure = tapestry.sftp_connect(ns)

    if not connection:
        errors.append(
            "[ERROR] Connection attempt failed - did the previous test succeed?"
        )
        return errors

    raised = tapestry.sftp_fetch(connection, config["sftp_rootpath"],
                                 "control-file.tap", config["path_temp"])

    if raised:
        errors.append("[ERROR] Raised: %s" % raised)
    else:
        for root, dirs, found in os.walk(config["path_temp"]):
            if "control-file.tap" not in found:
                errors.append(
                    "[ERROR] The find operation returned the following "
                    "list of items, but not the target item:")
                errors.append(str(found))

    return errors
Пример #9
0
def test_sftp_fetch(config):
    """A simple test to retreive a test file known to exist on the SFTP server,
     and place it into path_temp. Tests for success by checking that the file
    was actually placed.

    :param config:
    :return:
    """
    errors = []

    ns = tapestry.Namespace()
    ns.currentOS = platform.system()
    ns.addrNet, ns.portNet = config["sftp_id"].split(":")
    ns.nameNet = config["sftp_uid"]
    ns.network_credential_value = config["sftp_credential"]
    ns.network_credential_type = "passphrase"
    ns.network_credential_pass = False  # We're just testing passwords here

    connection, failure = tapestry.sftp_connect(ns)

    if not connection:
        errors.append(
            "[ERROR] Connection attempt failed - did the previous test succeed?"
        )
        return errors

    raised = tapestry.sftp_fetch(connection, config["sftp_rootpath"],
                                 "not_real_file.txt", config["path_temp"])

    if raised is not None:
        if not raised.startswith("Couldn't retrieve"):
            errors.append("[ERROR] Raised: %s" % raised)
    else:
        for root, dirs, found in os.walk(config["path_temp"]):
            if "not_real_file.txt" in found:
                errors.append("[ERROR] The find operation somehow returned a "
                              "file. How could this happen?")

    return errors
Пример #10
0
def test_sftp_place(config):
    """A quick test that attempts to place a copy of the test article
    "control-config.cfg" onto the SFTP server, in a location without write
    permissions.

    :param config:
    :return:
    """
    errors = []
    tgt_file = os.path.join(
        config["path_config"],
        os.path.join("test articles", "control-config.cfg"))

    ns = tapestry.Namespace()
    ns.currentOS = platform.system()
    ns.addrNet, ns.portNet = config["sftp_id"].split(":")
    ns.nameNet = config["sftp_uid"]
    ns.network_credential_value = config["sftp_credential"]
    ns.network_credential_type = "passphrase"
    ns.network_credential_pass = False  # We're just testing passwords here

    connection, failure = tapestry.sftp_connect(ns)

    if not connection:
        errors.append(
            "[ERROR] Connection attempt failed - did the previous test succeed?"
        )
        return errors

    placed, raised = tapestry.sftp_place(connection, tgt_file, "unwriteable")

    if placed:
        errors.append(
            "[ERROR] The file was placed when it should not have been. What went wrong?"
        )

    return errors
Пример #11
0
def test_verify_blocks(config):
    """A highly simplified test for the verify_blocks functionality that relies
    on the same files as test_media_retrieve_files. Due to the nature of the
    verify_blocks function this requires human intervention - future work will
    be to include a bypass method to facilitate this test.

    :param config:
    :return:
    """

    errors = []
    ns = tapestry.Namespace()
    ns.workDir = os.path.join(config["path_config"], "test articles")

    results = tapestry.verify_blocks(ns,
                                     gpg_agent=gnupg.GPG(verbose=True),
                                     testing=True)

    if len(results) == 1:
        if results[0] == os.path.join(ns.workDir, "testtap.tap"):
            pass  # doing this here leaves len(errors)=0, which is the test_case pass condition.
        else:
            errors.append(
                "[ERROR] An unexpected value was returned for the validity list: %s"
                % results[0])
    elif len(results) == 0:
        errors.append(
            "[ERROR] The verify_blocks function refused to validate the sample file."
            "Verify that the blocks are validly signed and try again.")
    else:
        errors.append(
            "[ERROR] verify_blocks returned an unexpected number of items. See response."
        )
        errors.append("Response: %s" % results)

    return errors
Пример #12
0
def test_parse_config(ns):
    """Loads an expected control config file, running it through (parse_config),
    then performs validation against the resulting NS object.

    :param ns: the config argument for test_case
    :return:
    """
    errors = []
    arg_ns = tapestry.Namespace()
    arg_ns.config_path = os.path.join(
        ns["path_config"], os.path.join("test articles", "control-config.cfg"))
    parsed_conf = tapestry.parse_config(arg_ns)

    # we know the state of the control config, so you can use a static dict to validate
    dict_control = {
        "activeFP": "AAAA-AAAA-AAAA-AAAA-AAAA",
        "fp": "AAAA-AAAA-AAAA-AAAA-AAAA",
        "signing": True,
        "sigFP": "CCCC-CCCC-CCCC-CCCC-CCCC",
        "keysize": 2048,
        "compress": True,
        "compressLevel": 9,
        "step": "none",
        "sumJobs": 0,
        "jobsDone": 0,
        "modeNetwork": "sftp",
        "addrNet": "240.0.0.0",
        "portNet": 22,
        "nameNet": "amartian",
        "dirNet": "olympus mons/the face",
        "retainLocal": True,
        "block_size_raw": int(64 * 2**20),
        "compid": "HAL 9000",
        "recovery_path": "The Obelisk",
        "uid": "anothermartian",
        "drop": "area51",
        "numConsumers": os.cpu_count(),
        "currentOS": platform.system(),
        "network_credential_type": "SFTP",
        "network_credential_value": "/",
        "network_credential_pass": False,
        "do_validation": True
    }

    # There are, however, dynamic constraints we have to test for
    if dict_control["currentOS"] == "Linux":
        catpaths = {"a": "nix_a", "b": "nix_b"}
        os_args = {
            "workDir": "/tmp/Tapestry/",
            "desktop": "/home/anothermartian/Desktop",
            "gpgDir": "/home/anothermartian/.gnupg",
            "categories_default": ["a"],
            "categories_inclusive": ["b"],
            "category_paths": catpaths
        }
        dict_control.update(os_args)
    elif dict_control["currentOS"] == "Windows":
        catpaths = {"a": "win_a", "b": "win_b"}
        os_args = {
            "workDir":
            "C:\\users\\anothermartian\\appdata\\local\\temp\\tapestry",
            "desktop": "C:\\Users\\anothermartian\\Desktop",
            "gpgDir": "C:\\Users\\anothermartian\\appdata\\roaming\\gnupg",
            "categories_default": ["a"],
            "categories_inclusive": ["b"],
            "category_paths": catpaths
        }
        dict_control.update(os_args)
    else:
        errors.append("[ERROR] Received unexpected value for for currentOS - "
                      "Are you on a supported platform?")
        return errors

    # Now, let's do this iteratively to make things simpler.
    dict_failures = {}
    for key in dict_control:
        try:
            result = parsed_conf.__getattribute__(key)
            if result != dict_control[key]:
                dict_failures.update({key: "did not have the expected value."})
        except AttributeError:
            dict_failures.update({key: "was not assigned."})

    # Finally, print the failures or passage
    if len(dict_failures) == 0:
        pass  # doing this here leaves len(errors)=0, which is the test_case pass condition.
    else:
        errors.append(
            "[FAIL] The following errors were detected in the return:")
        for key in dict_failures:
            errors.append("[ERROR] %s %s" % (key, dict_failures[key]))

    return errors
Пример #13
0
def test_build_ops_list(config):
    """Bundled set of 5 tests which confirm operation of
    tapestry.build_ops_list(). Relies on just the config and log shared by all
    tests. Validates inclusive/exclusive behaviour, index completion, and both
    the set of keys used in each item's record in the recovery index as well
    as the validity of those values. Has an overall pass/fail flag as well.

    :param config:
    :return:
    """
    namespace = tapestry.Namespace()
    # We need a dummy namespace object. Not the whole thing, just enough.
    namespace.categories_default = ["a"]
    namespace.categories_inclusive = ["b"]
    namespace.inc = False
    namespace.category_paths = {
        "a": config["path_temp"],
        "b": config["path_config"]
    }
    namespace.block_size_raw = 30000000  # Don't care at all.
    errors = []
    # This test is a special case where someone linked multiple tests into a
    # Single test object. Therefore rather than relying on test_case's traditional
    # reporting structure, we're flinging all logs into errors.

    # Argue to build ops list
    test_ops_reg = tapestry.build_ops_list(namespace)
    # and again with Inc.
    namespace.inc = True
    test_ops_inc = tapestry.build_ops_list(namespace)

    # validate the ops lists generated.
    # Assume invalid by default
    validity = {
        "count_short": False,
        "count_long": False,
        "all_keys": False,
        "test_hash": False,
        "test_size": False
    }
    # get a count of all items in directory "a"
    count_short = 0
    foo, bar, file = [None, None, None]  # satisfy the linter.
    for foo, bar, files in os.walk(namespace.category_paths["a"]):
        for file in files:
            count_short += 1
    del foo, bar, file
    # get count of items in "b"
    count_long = 0
    for foo, bar, files in os.walk(namespace.category_paths["b"]):
        for file in files:
            count_long += 1
    del foo, bar, file
    # is len test_ops_reg = len A?
    if len(test_ops_reg) == count_short:
        errors.append(
            "[PASS] The overall count of a non-inclusive run matched the expected value."
        )
        validity["count_short"] = True
    else:
        errors.append(
            "[FAIL] The overall count of a non-inclusive run did not match what was on disk"
        )
    # is len test_ops_inc = len A+B?
    if len(test_ops_inc) == (count_short + count_long):
        errors.append(
            "[PASS] The overall count of an inclusive run matched the expected value."
        )
        validity["count_long"] = True
    else:
        errors.append(
            "[FAIL] The overall count of an inclusive run did not match the expected value."
            "This likely indicates a failure to add the inclusive directories to the "
            "main run list.")

    del test_ops_inc  # We don't need this anymore and they can be weighty.
    # get first record.
    try:
        sample_item = test_ops_reg.popitem()[
            1]  # Get just the dictionary at the first key
    except KeyError:
        errors.append("Couldn't get a sample item - the ops list is empty!")
        return errors  # we can jump out of the function here, nothing else will pass.
    # These are all the keys expected in this index:
    expected = ["fname", "sha256", "category", "fpath", "fsize"]
    failed_keys = False  # For now.
    for key in expected:
        if key not in sample_item.keys():
            errors.append(
                "[FAIL] Key `%s` is missing from the sample item. This won't likely recover."
                % str(key))
            failed_keys = True

    if not failed_keys:
        validity["all_keys"] = True
        errors.append(
            "[PASS] All keys were found in the sample item as expected. This would recover."
        )

    # figure out where it is in reality.
    if not failed_keys:  # We need to have all the keys for this test.
        test_cat = sample_item["category"]
        path_origin = os.path.join(namespace.category_paths[test_cat],
                                   sample_item["fpath"])
        test_size = os.path.getsize(path_origin)
        test_hash = hashlib.sha256()
        with open(path_origin, "rb") as f:
            test_hash.update(f.read())
        if test_hash.hexdigest() == sample_item["sha256"]:
            errors.append(
                "[PASS] The item referred to as a sample has the expected SHA256 Hash."
            )
            validity["test_hash"] = True
        else:
            errors.append(
                "[FAIL] The item referred to has an unexpected SHA256 hash. Bad pathing?"
            )
            errors.append("Actual Value: %s" % test_hash.hexdigest())
            errors.append("Expected Value: %s" % sample_item["sha256"])
        if test_size == sample_item["fsize"]:
            errors.append(
                "[PASS] The item referred to as a sample has the expected overall size on disk."
            )
            validity["test_size"] = True
        else:
            errors.append(
                "[FAIL] The item referred to has a sample has an unexpected size. Bad pathing?"
            )

    # Finally, did everything pass?
    count_failed = 0
    for each in validity:
        if not validity[each]:
            count_failed += 1
    errors.append("\n")
    if count_failed <= 0:
        errors.append(
            "[OVERALL PASS] All tests that are part of this set passed.")
    else:
        errors.append(
            "[OVERALL FAIL] %s tests failed, therefore this set is considered failed."
            % count_failed)

    return errors