def test_image_update_broken_kernel( self, bitbake_variables, connection, latest_mender_image, http_server, board_type, use_s3, s3_address, ): """Test that an update with a broken kernel rolls back correctly. This is distinct from the test_broken_image_update test, which corrupts the filesystem. When grub.d integration is enabled, these two scenarios trigger very different code paths.""" file_flag = Helpers.get_file_flag(bitbake_variables) (active_before, passive_before) = determine_active_passive_part( bitbake_variables, connection) image_type = bitbake_variables["MENDER_DEVICE_TYPE"] temp_artifact = "temporary_artifact.mender" try: shutil.copyfile(latest_mender_image, temp_artifact) # Assume that artifact has the same kernel names as the currently # running image. kernels = connection.run( "find /boot/ -maxdepth 1 -name '*linu[xz]*' -o -name '*Image'" ).stdout.split() for kernel in kernels: # Inefficient, but there shouldn't be too many kernels. subprocess.check_call( ["mender-artifact", "rm", f"{temp_artifact}:{kernel}"]) Helpers.install_update( temp_artifact, connection, http_server, board_type, use_s3, s3_address, ) reboot(connection) # Now qemu is auto-rebooted twice; once to boot the dummy image, # where it fails, and the boot loader auto-reboots a second time # into the original partition. output = run_after_connect("mount", connection) # The update should have reverted to the original active partition, # since the kernel was missing. assert output.find(active_before) >= 0 assert output.find(passive_before) < 0 finally: os.remove(temp_artifact)
def testAwsAccountId(self, patched_boto): """Tests the output of Helpers.aws_account_id.""" patched_boto.return_value.get_caller_identity.return_value = { 'Arn': 'arn:aws:iam::123456654321:user/dliggat' } # Query for the account id; first to generate, second for a cache hit. self.assertEqual(Helpers.aws_account_id(), '123456654321') self.assertEqual(Helpers.aws_account_id(), '123456654321') # We should only ever the boto code once; the account value should # be memoized in the class after the initial invocation. patched_boto.assert_called_once_with('sts') patched_boto.return_value.get_caller_identity.assert_called_once_with()
def test_broken_image_update(self, bitbake_variables, connection): file_flag = Helpers.get_file_flag(bitbake_variables) install_flag = Helpers.get_install_flag(connection) (active_before, passive_before) = determine_active_passive_part( bitbake_variables, connection) image_type = bitbake_variables["MENDER_DEVICE_TYPE"] try: # Make a dummy/broken update retcode = subprocess.call( "dd if=/dev/zero of=image.dat bs=1M count=0 seek=16", shell=True) if retcode != 0: raise Exception("error creating dummy image") retcode = subprocess.call( "mender-artifact write rootfs-image -t %s -n test-update %s image.dat -o image.mender" % (image_type, file_flag), shell=True, ) if retcode != 0: raise Exception( "error writing mender artifact using command: mender-artifact write rootfs-image -t %s -n test-update %s image.dat -o image.mender" % (image_type, file_flag)) put_no_sftp("image.mender", connection, remote="/var/tmp/image.mender") connection.run("mender %s /var/tmp/image.mender" % install_flag) reboot(connection) # Now qemu is auto-rebooted twice; once to boot the dummy image, # where it fails, and uboot auto-reboots a second time into the # original partition. output = run_after_connect("mount", connection) # The update should have reverted to the original active partition, # since the image was bogus. assert output.find(active_before) >= 0 assert output.find(passive_before) < 0 finally: # Cleanup. if os.path.exists("image.mender"): os.remove("image.mender") if os.path.exists("image.dat"): os.remove("image.dat")
def testAwsAccountId(self, patched_boto): """Tests the output of Helpers.aws_account_id.""" patched_boto.return_value.describe_security_groups.return_value = { 'SecurityGroups': [{ 'OwnerId': '123456654321' }] } # Query for the account id; first to generate, second for a cache hit. self.assertEqual(Helpers.aws_account_id(), 123456654321) self.assertEqual(Helpers.aws_account_id(), 123456654321) # We should only ever the boto code once; the account value should # be memoized in the class after the initial invocation. patched_boto.assert_called_once_with('ec2') patched_boto.return_value.describe_security_groups.assert_called_once_with( GroupNames=['default'])
def expand(self): # print("------------------ EXPAND ------------------") actions_total_score = Helpers.actions_score(self.simulations) # print(f'Node probable actions score: {actions_total_score}') # print(f'{len(self.simulations)} actions simulated, building nodes...') for action in self.simulations: action_player_state, action_opponent_state = action.build_states( self.player_state, self.opponent_state) child = MCTNode( action_player_state, action_opponent_state, action.piece_id, action.action_queue, not self.opponent_playing, self, Helpers.calculate_probability( action.distance_score, actions_total_score, self.opponent_playing ) ) self.children[child.id] = child
def initial_expansion(self): print("------------------ INITIAL EXPANSION ------------------") for key, value in self.player_id_table.items(): self.simulate(key, value, value) actions_total_score = Helpers.actions_score(self.simulations) print(f'Node probable actions score: {actions_total_score}') print(f'{len(self.simulations)} actions simulated, building nodes...') for action in self.simulations: action_player_state, action_opponent_state = action.build_states( self.player_state, self.opponent_state) child = MCTNode( action_player_state, action_opponent_state, action.piece_id, action.action_queue, not self.opponent_playing, self, Helpers.calculate_probability( action.distance_score, actions_total_score, not self.opponent_playing ) ) self.children[child.id] = child # print(f'Node prior probability: {child.probability * 100}%') print("------------------ EXPANSION COMPLETE ------------------")
def test_too_big_image_update(self, bitbake_variables, connection): file_flag = Helpers.get_file_flag(bitbake_variables) install_flag = Helpers.get_install_flag(connection) image_type = bitbake_variables["MENDER_DEVICE_TYPE"] try: # Make a too big update subprocess.call( "dd if=/dev/zero of=image.dat bs=1M count=0 seek=4096", shell=True) subprocess.call( "mender-artifact write rootfs-image -t %s -n test-update-too-big %s image.dat -o image-too-big.mender" % (image_type, file_flag), shell=True, ) put_no_sftp( "image-too-big.mender", connection, remote="/var/tmp/image-too-big.mender", ) output = connection.run( "mender %s /var/tmp/image-too-big.mender ; echo 'ret_code=$?'" % install_flag) assert any([ "no space left on device" in out for out in [output.stderr, output.stdout] ]), output assert "ret_code=0" not in output.stdout, output finally: # Cleanup. if os.path.exists("image-too-big.mender"): os.remove("image-too-big.mender") if os.path.exists("image.dat"): os.remove("image.dat")
def do_install_mender_binary_delta( self, request, prepared_test_build, bitbake_variables, bitbake_image, connection, http_server, board_type, use_s3, s3_address, ): build_image( prepared_test_build["build_dir"], prepared_test_build["bitbake_corebase"], bitbake_image, ['IMAGE_INSTALL_append = " mender-binary-delta"'], [ 'BBLAYERS_append = " %s/../meta-mender-commercial"' % bitbake_variables["LAYERDIR_MENDER"] ], ) image = latest_build_artifact(request, prepared_test_build["build_dir"], "core-image*.mender") Helpers.install_update(image, connection, http_server, board_type, use_s3, s3_address) reboot(connection) run_after_connect("true", connection) connection.run("mender -commit") return image
def handler(event, context): """Entry point for the Lambda function.""" logger = setup_logging(context.aws_request_id) config = configuration() # Used to differentiate local vs Lambda. if bool(os.getenv('IS_LOCAL')): logger.debug('$IS_LOCAL set; likely running in development') else: logger.debug('No $IS_LOCAL set; likely running in Lambda') logger.info('This is being invoked from AWS account: {0}'.format( Helpers.aws_account_id())) return {'Success': True}
def test_perform_update( self, request, setup_board, prepared_test_build, bitbake_variables, bitbake_image, connection, http_server, board_type, use_s3, s3_address, ): """Perform a delta update. """ if ("read-only-rootfs" not in bitbake_variables["IMAGE_FEATURES"].strip().split()): pytest.skip("Only works when using read-only-rootfs IMAGE_FEATURE") if distutils.spawn.find_executable( "mender-binary-delta-generator") is None: pytest.fail("mender-binary-delta-generator not found in PATH") built_artifact = self.do_install_mender_binary_delta( request, prepared_test_build, bitbake_variables, bitbake_image, connection, http_server, board_type, use_s3, s3_address, ) with make_tempdir() as tmpdir: # Copy previous build artifact_from = os.path.join(tmpdir, "artifact_from.mender") shutil.copyfile(built_artifact, artifact_from) # Create new image installing some extra software build_image( prepared_test_build["build_dir"], prepared_test_build["bitbake_corebase"], bitbake_image, ['IMAGE_INSTALL_append = " nano"'], ) built_artifact = latest_build_artifact( request, prepared_test_build["build_dir"], "core-image*.mender") artifact_to = os.path.join(tmpdir, "artifact_to.mender") shutil.copyfile(built_artifact, artifact_to) # Create delta Artifact using mender-binary-delta-generator artifact_delta = os.path.join(tmpdir, "artifact_delta.mender") subprocess.check_call( f"mender-binary-delta-generator -n v2.0-deltafrom-v1.0 {artifact_from} {artifact_to} -o {artifact_delta}", shell=True, ) # Verbose provides/depends of the different Artifacts and the client (when supported) connection.run("mender show-provides", warn=True) subprocess.check_call( "mender-artifact read %s" % artifact_from, shell=True, ) subprocess.check_call( "mender-artifact read %s" % artifact_to, shell=True, ) subprocess.check_call( "mender-artifact read %s" % artifact_delta, shell=True, ) # Install Artifact, verify partitions and commit (active, passive) = determine_active_passive_part(bitbake_variables, connection) Helpers.install_update(artifact_delta, connection, http_server, board_type, use_s3, s3_address) reboot(connection) run_after_connect("true", connection) (new_active, new_passive) = determine_active_passive_part( bitbake_variables, connection) assert new_active == passive assert new_passive == active connection.run("mender -commit")
def __init__(self, base_url=None): self.base_url = base_url self.logicals_endpoint = VpnEndpoints().VPN_LOGICALS self.logical_servers = self.get_server_list() Helpers().evaluate_schema(self.logical_servers, 'schemas/logical_servers.json')
def invoked_function_arn(self): """Simulate the Lambda ARN that comes into the context object. """ return 'arn:aws:lambda:us-east-1:{0}:function:func-name'.format( Helpers.aws_account_id())
def test_uboot_mender_saveenv_canary(self, bitbake_variables, connection): """Tests that the mender_saveenv_canary works correctly, which tests that Mender will not proceed unless the U-Boot boot loader has saved the environment.""" file_flag = Helpers.get_file_flag(bitbake_variables) image_type = bitbake_variables["MACHINE"] try: # Make a dummy/broken update subprocess.call( "dd if=/dev/zero of=image.dat bs=1M count=0 seek=16", shell=True) subprocess.call( "mender-artifact write rootfs-image -t %s -n test-update %s image.dat -o image.mender" % (image_type, file_flag), shell=True, ) put_no_sftp("image.mender", connection, remote="/var/tmp/image.mender") env_conf = connection.run("cat /etc/fw_env.config").stdout env_conf_lines = env_conf.rstrip("\n\r").split("\n") assert len(env_conf_lines) == 2 for i in [0, 1]: entry = env_conf_lines[i].split() connection.run( "dd if=%s skip=%d bs=%d count=1 iflag=skip_bytes > /data/old_env%d" % (entry[0], int(entry[1], 0), int(entry[2], 0), i)) try: bootenv_print, bootenv_set = bootenv_tools(connection) # Try to manually remove the canary first. connection.run(f"{bootenv_set} mender_saveenv_canary") result = connection.run("mender install /var/tmp/image.mender", warn=True) assert (result.return_code != 0), "Update succeeded when canary was not present!" output = connection.run( f"{bootenv_print} upgrade_available").stdout.rstrip("\n") # Upgrade should not have been triggered. assert output == "upgrade_available=0" # Then zero the environment, causing the libubootenv to fail # completely. for i in [0, 1]: entry = env_conf_lines[i].split() connection.run( "dd if=/dev/zero of=%s seek=%d bs=%d count=1 oflag=seek_bytes" % (entry[0], int(entry[1], 0), int(entry[2], 0))) result = connection.run("mender install /var/tmp/image.mender", warn=True) assert (result.return_code != 0), "Update succeeded when canary was not present!" finally: # Restore environment to what it was. for i in [0, 1]: entry = env_conf_lines[i].split() connection.run( "dd of=%s seek=%d bs=%d count=1 oflag=seek_bytes < /data/old_env%d" % (entry[0], int(entry[1], 0), int(entry[2], 0), i)) connection.run("rm -f /data/old_env%d" % i) finally: # Cleanup. os.remove("image.mender") os.remove("image.dat")
def test_signed_updates(self, sig_case, bitbake_variables, connection): """Test various combinations of signed and unsigned, present and non- present verification keys.""" file_flag = Helpers.get_file_flag(bitbake_variables) # mmc mount points are named: /dev/mmcblk0p1 # ubi volumes are named: ubi0_1 (active, passive) = determine_active_passive_part(bitbake_variables, connection) if passive.startswith("ubi"): passive = "/dev/" + passive # Generate "update" appropriate for this test case. # Cheat a little. Instead of spending a lot of time on a lot of reboots, # just verify that the contents of the update are correct. new_content = sig_case.label with open("image.dat", "w") as fd: fd.write(new_content) # Write some extra data just to make sure the update is big enough # to be written even if the checksum is wrong. If it's too small it # may fail before it has a chance to be written. fd.write("\x00" * (1048576 * 8)) artifact_args = "" # Generate artifact with or without signature. if sig_case.signature: artifact_args += " -k %s" % signing_key(sig_case.key_type).private # Generate artifact with specific version. None means default. if sig_case.artifact_version is not None: artifact_args += " -v %d" % sig_case.artifact_version if sig_case.key_type: sig_key = signing_key(sig_case.key_type) else: sig_key = None image_type = bitbake_variables["MENDER_DEVICE_TYPE"] subprocess.check_call( "mender-artifact write rootfs-image %s -t %s -n test-update %s image.dat -o image.mender" % (artifact_args, image_type, file_flag), shell=True, ) # If instructed to, corrupt the signature and/or checksum. if ((sig_case.signature and not sig_case.signature_ok) or not sig_case.checksum_ok or not sig_case.header_checksum_ok): tar = subprocess.check_output(["tar", "tf", "image.mender"]) tar_list = tar.split() tmpdir = tempfile.mkdtemp() try: shutil.copy("image.mender", os.path.join(tmpdir, "image.mender")) cwd = os.open(".", os.O_RDONLY) os.chdir(tmpdir) try: tar = subprocess.check_output( ["tar", "xf", "image.mender"]) if not sig_case.signature_ok: # Corrupt signature. with open("manifest.sig", "r+") as fd: Helpers.corrupt_middle_byte(fd) if not sig_case.checksum_ok: os.chdir("data") try: data_list = subprocess.check_output( ["tar", "tzf", "0000.tar.gz"]) data_list = data_list.split() subprocess.check_call( ["tar", "xzf", "0000.tar.gz"]) # Corrupt checksum by changing file slightly. with open("image.dat", "r+") as fd: Helpers.corrupt_middle_byte(fd) # Pack it up again in same order. os.remove("0000.tar.gz") subprocess.check_call( ["tar", "czf", "0000.tar.gz"] + data_list) for data_file in data_list: os.remove(data_file) finally: os.chdir("..") if not sig_case.header_checksum_ok: data_list = subprocess.check_output( ["tar", "tzf", "header.tar.gz"]) data_list = data_list.split() subprocess.check_call(["tar", "xzf", "header.tar.gz"]) # Corrupt checksum by changing file slightly. with open("headers/0000/files", "a") as fd: # Some extra data to corrupt the header checksum, # but still valid JSON. fd.write(" ") # Pack it up again in same order. os.remove("header.tar.gz") subprocess.check_call(["tar", "czf", "header.tar.gz"] + data_list) for data_file in data_list: os.remove(data_file) # Make sure we put it back in the same order. os.remove("image.mender") subprocess.check_call(["tar", "cf", "image.mender"] + tar_list) finally: os.fchdir(cwd) os.close(cwd) shutil.move(os.path.join(tmpdir, "image.mender"), "image.mender") finally: shutil.rmtree(tmpdir, ignore_errors=True) put_no_sftp("image.mender", connection, remote="/data/image.mender") # mender-convert'ed images don't have transient mender.conf device_has_mender_conf = (connection.run( "test -f /etc/mender/mender.conf", warn=True).return_code == 0) # mender-convert'ed images don't have this directory, but the test uses # it to save certificates connection.run("mkdir -p /data/etc/mender") try: # Get configuration from device or create an empty one if device_has_mender_conf: connection.run( "cp /etc/mender/mender.conf /data/etc/mender/mender.conf.bak" ) get_no_sftp("/etc/mender/mender.conf", connection) else: with open("mender.conf", "w") as fd: json.dump({}, fd) # Update key in configuration. with open("mender.conf") as fd: config = json.load(fd) if sig_case.key: config[ "ArtifactVerifyKey"] = "/data/etc/mender/%s" % os.path.basename( sig_key.public) put_no_sftp( sig_key.public, connection, remote="/data/etc/mender/%s" % os.path.basename(sig_key.public), ) else: if config.get("ArtifactVerifyKey"): del config["ArtifactVerifyKey"] # Send new configuration to device with open("mender.conf", "w") as fd: json.dump(config, fd) put_no_sftp("mender.conf", connection, remote="/etc/mender/mender.conf") os.remove("mender.conf") # Start by writing known "old" content in the partition. old_content = "Preexisting partition content" if "ubi" in passive: # ubi volumes cannot be directly written to, we have to use # ubiupdatevol connection.run('echo "%s" | dd of=/tmp/update.tmp && ' "ubiupdatevol %s /tmp/update.tmp; " "rm -f /tmp/update.tmp" % (old_content, passive)) else: connection.run('echo "%s" | dd of=%s' % (old_content, passive)) result = connection.run("mender install /data/image.mender", warn=True) if sig_case.success: if result.return_code != 0: pytest.fail( "Update failed when it should have succeeded: %s, Output: %s" % (sig_case.label, result)) else: if result.return_code == 0: pytest.fail( "Update succeeded when it should not have: %s, Output: %s" % (sig_case.label, result)) if sig_case.update_written: expected_content = new_content else: expected_content = old_content try: content = connection.run( "dd if=%s bs=%d count=1" % (passive, len(expected_content))).stdout assert content == expected_content, "Case: %s" % sig_case.label # In Fabric context, SystemExit means CalledProcessError. We should # not catch all exceptions, because we want to leave assertions # alone. # In Fabric2 there might be different exception thrown in that case # which is UnexpectedExit. except (SystemExit, UnexpectedExit): if ("mender-ubi" in bitbake_variables.get( "MENDER_FEATURES", "").split() or "mender-ubi" in bitbake_variables.get( "DISTRO_FEATURES", "").split()): # For UBI volumes specifically: The UBI_IOCVOLUP call which # Mender uses prior to writing the data, takes a size # argument, and if you don't write that amount of bytes, the # volume is marked corrupted as a security measure. This # sometimes triggers in our checksum mismatch tests, so # accept the volume being unreadable in that case. pass else: raise finally: # Reset environment to what it was. _, bootenv_set = bootenv_tools(connection) connection.run(f"{bootenv_set} mender_boot_part %s" % active[-1:]) connection.run(f"{bootenv_set} mender_boot_part_hex %x" % int(active[-1:])) connection.run(f"{bootenv_set} upgrade_available 0") if device_has_mender_conf: connection.run( "cp -L /data/etc/mender/mender.conf.bak $(realpath /etc/mender/mender.conf)" ) if sig_key: connection.run("rm -f /etc/mender/%s" % os.path.basename(sig_key.public))
def test_network_based_image_update( self, successful_image_update_mender, bitbake_variables, connection, http_server, board_type, use_s3, s3_address, ): (active_before, passive_before) = determine_active_passive_part( bitbake_variables, connection) Helpers.install_update( successful_image_update_mender, connection, http_server, board_type, use_s3, s3_address, ) bootenv_print, _ = bootenv_tools(connection) output = connection.run(f"{bootenv_print} bootcount").stdout assert output.rstrip("\n") == "bootcount=0" output = connection.run(f"{bootenv_print} upgrade_available").stdout assert output.rstrip("\n") == "upgrade_available=1" output = connection.run(f"{bootenv_print} mender_boot_part").stdout assert output.rstrip("\n") == "mender_boot_part=" + passive_before[-1:] # Delete kernel and associated files from currently running partition, # so that the boot will fail if U-Boot for any reason tries to grab the # kernel from the wrong place. connection.run("rm -f /boot/* || true") reboot(connection) run_after_connect("true", connection) (active_after, passive_after) = determine_active_passive_part( bitbake_variables, connection) # The OS should have moved to a new partition, since the image was fine. assert active_after == passive_before assert passive_after == active_before output = connection.run(f"{bootenv_print} bootcount").stdout assert output.rstrip("\n") == "bootcount=1" output = connection.run(f"{bootenv_print} upgrade_available").stdout assert output.rstrip("\n") == "upgrade_available=1" output = connection.run(f"{bootenv_print} mender_boot_part").stdout assert output.rstrip("\n") == "mender_boot_part=" + active_after[-1:] connection.run("mender commit") output = connection.run(f"{bootenv_print} upgrade_available").stdout assert output.rstrip("\n") == "upgrade_available=0" output = connection.run(f"{bootenv_print} mender_boot_part").stdout assert output.rstrip("\n") == "mender_boot_part=" + active_after[-1:] active_before = active_after passive_before = passive_after reboot(connection) run_after_connect("true", connection) (active_after, passive_after) = determine_active_passive_part( bitbake_variables, connection) # The OS should have stayed on the same partition, since we committed. assert active_after == active_before assert passive_after == passive_before
def simulate(self, key, original_position, start_position, action_queue = list()): # ********************************************************************* # We must take into account the following rules of engagement: # # 1. Check if a movement is inside the matrix bounds # 2. Check if an adjacent space on the allowed configurations is free: # a. If the space is free, a jump can be made # b. If not, check if the adjacent space parallel to the direction of # the space we were originally checking is free and in bounds, # if so, a jump can be made # # Allowed configurations: # # 0 0 # ** | # 1 1 0 0 # ** | ** | # 0 -- 1 -- 1 -- 1 -- 0 AND 0 -- 1 -- 0 # | ** | ** # 1 1 0 0 # | ** # 0 0 # # ********************************************************************* if not self.opponent_playing: current_player_state = self.player_state current_opponent_state = self.opponent_state else: current_player_state = self.opponent_state current_opponent_state = self.player_state row, col = start_position rows, cols = current_player_state.shape # Horizontal validation for move_key in Configuration.VALID_MOVES: move = Configuration.VALID_MOVES[move_key] short_cell, long_cell = Helpers.get_cells( row, col, rows, cols, move ) short_value = None long_value = None if short_cell is not None: if current_player_state[short_cell] != 0: short_value = current_player_state[short_cell] elif current_opponent_state[short_cell] != 0: short_value = current_opponent_state else: short_value = 0 if long_cell is not None: if current_player_state[long_cell] != 0: long_value = current_player_state[long_cell] elif current_opponent_state[long_cell] != 0: long_value = current_opponent_state[long_cell] else: long_value = 0 if short_value and short_value == 0: aux_action_queue = action_queue.copy() aux_action_queue.append(short_cell) action_queue_object = ActionQueue(aux_action_queue, self.opponent_playing, key, original_position) if not action_queue_object.repeating: # Pruning # print(f'''{key} {move_key} {start_position} - > { # short_cell} Short''') # print(f'Progression: {aux_action_queue}') # print(current_player_state + current_opponent_state) self.simulations.append(action_queue_object) elif short_value and short_value != 0 and long_value == 0: aux_action_queue = action_queue.copy() aux_action_queue.append(long_cell) action_queue_object = ActionQueue(aux_action_queue, self.opponent_playing, key, original_position) if not action_queue_object.repeating: # Pruning # print(f'''{key} {move_key} {start_position} - > { # long_cell} Long''') # print(f'Progression: {aux_action_queue}') # print(current_player_state + current_opponent_state) self.simulations.append(action_queue_object) self.simulate(key, original_position, long_cell, aux_action_queue)