def get_git_hash(): """Determine git hash of project (if applicable)""" command = "git status" return_code = 0 try: check_output(command.split(), universal_newlines=True, stderr=STDOUT).split('\n') except FileNotFoundError: return_code = 1 LOGGER.debug( "Skipping git hash lookup since git was not found on the system") except CalledProcessError as error: return_code = error.returncode LOGGER.warning( "Skipping git hash lookup since command [%s] returned with error: %s", command, error.output) if return_code != 0: return "none" # This is a git repository, so we'll look up the hash value for the HEAD commit command = "git rev-parse HEAD" try: output = check_output(command.split(), universal_newlines=True, stderr=STDOUT).split('\n') except CalledProcessError as error: LOGGER.warning( "Skipping git hash lookup since command [%s] returned with error: %s", command, error.output) return "none" git_hash = output[0] LOGGER.debug("Git hash: %s", git_hash) return git_hash
def is_image_ready(self, image_name): """Checks if the given image is ready.""" def _is_image_ready(): """Checks if an image with image_name exists and status is READY""" # pylint: disable=no-member request = self.gce_service.images().get( project=self.gce_project_id, image=image_name) result = request.execute() if not result or result['status'] == 'FAILED': raise RuntimeError( "Creation of image [{}] failed!".format(image_name)) return result['status'] == 'READY' retrier = Retrier(_is_image_ready) retrier.tries = int( get_config_value('GCE_IMAGE_CREATE_COMPLETED_RETRY_COUNT')) retrier.delay = int( get_config_value('GCE_IMAGE_CREATE_COMPLETED_RETRY_DELAY')) LOGGER.info("Waiting for image [%s] to be ready.", image_name) try: if retrier.execute(): LOGGER.info("Image [%s] is ready.", image_name) self.metadata.set(self.__class__.__name__, 'image_id', image_name) return True LOGGER.warning( "Image [%s] was still not ready after checking [%d] times!", image_name, retrier.tries) return False except HttpError as exp: LOGGER.exception(exp) return False except RuntimeError as runtime_exception: LOGGER.exception(runtime_exception) return False
def _command_key_values_to_dict(command, regex): """Runs a command in a subprocess, searches the output of the command for key/value pairs using the specified regex, and returns a dictionary containing those pairs""" dictionary = {} LOGGER.debug("Searching for version information using command: %s", command) try: lines = check_output(command.split(), universal_newlines=True, stderr=STDOUT).split('\n') except FileNotFoundError: LOGGER.warning( "Command [%s] not found on system. Unable to check version!", command) return dictionary except CalledProcessError as error: LOGGER.warning( "Skipping version information since command [%s] returned with error: %s", command, error.output) return dictionary for line in lines: LOGGER.trace("Regex search string: %s", regex) LOGGER.trace("Regex search line: %s", line) search = re.search(regex, line) if search: LOGGER.trace("Regex succeeded") dictionary[search.group(1)] = search.group(2) else: LOGGER.trace("Regex failed") LOGGER.trace("Completed dictionary: %s", dictionary) return dictionary
def is_share_image_succeeded(self, share_account_ids): """Helper utility for share_image() that goes through the list of share_account_ids and confirms that the image was shared with all accounts. The function logs any error during its execution without propagating it up.""" response_json = None try: LOGGER.info("Checking which accounts were added for sharing this image") response_json = self.client.describe_image_share_permission(self.image_id) except ServerException as exc: LOGGER.exception(exc) if exc.get_error_code() == 'InvalidImageId.NotFound' and \ exc.get_error_msg().startswith('The specified ImageId does not exist'): raise RuntimeError('InvalidImageId.NotFound: Check if the Image ID exists') raise exc num_accounts = len(response_json['Accounts']['Account']) shared_accounts = [] for each_account in range(num_accounts): account_id = response_json['Accounts']['Account'][each_account]['AliyunId'] shared_accounts.append(int(account_id)) counter = 0 for an_account in share_account_ids: if an_account in shared_accounts: LOGGER.info("The image was successfully shared with account: %s", an_account) counter += 1 else: LOGGER.warning("The image was not shared with account: %s", an_account) # Confirm that the number of accounts in share_account_ids and image's # 'LaunchPermissions' are matching. return counter == len(share_account_ids)
def is_share_image_succeeded(self, share_account_ids): """Helper utility for share_image() that goes through the list of share_account_ids and confirms that the image was shared with all accounts. The function logs any error during its execution without propagating it up.""" try: LOGGER.info("Checking which accounts were added for sharing this AMI") image_launch_perms = self.ec2_client.describe_image_attribute( ImageId=self.image_id, Attribute='launchPermission', DryRun=False ) LOGGER.trace("image.describe_attribute() response => %s", image_launch_perms) except ClientError as client_error: # Simply log the exception without propagating it. LOGGER.exception(client_error) return False # Create a list of account IDs that has launch permission launch_permission_accounts = [] for each in image_launch_perms['LaunchPermissions']: launch_permission_accounts.append(each['UserId']) counter = 0 # Check which accounts were added for sharing this AMI for account_id in share_account_ids: if str(account_id) in launch_permission_accounts: LOGGER.info("The AMI was successfully shared with account: %s", account_id) counter += 1 else: LOGGER.warning("The AMI was not shared with account: %s", account_id) # Confirm that the number of accounts in share_account_ids and image's # 'LaunchPermissions' are matching. return counter == len(share_account_ids)
def set_number_of_threads(): """number of threads should not be higher than oss2.defaults.connection_pool_size""" if int(get_config_value('ALIBABA_THREAD_COUNT')) > int(oss2.defaults.connection_pool_size): number_of_threads_message = 'Will use only ' + \ '{} threads for the image upload, '.format(oss2.defaults.connection_pool_size) + \ 'the limit is imposed by oss2.defaults.connection_pool_size' LOGGER.warning(number_of_threads_message) return int(oss2.defaults.connection_pool_size) return int(get_config_value('ALIBABA_THREAD_COUNT'))
def main(): """main command handler""" parser = argparse.ArgumentParser(description='Prepare a cloud image from a virtual disk') parser.add_argument('-a', '--artifacts-dir', required=True, help='Absolute path to the artifacts directory') parser.add_argument('-c', '--check-name', action="store_true", help='Check cloud image name') parser.add_argument('-i', '--input', required=True, help='Absolute path to the input virtual disk') parser.add_argument('-p', '--platform', required=True, help='The cloud type (i.e. aws, gce, azure, alibaba)') parser.add_argument('-s', '--seed-image-name', default='', help='Use supplied autogenerated seed cloud image name') parser.add_argument('-u', '--user-image-name', default='', help='Use user-supplied cloud image name') args = parser.parse_args() # Check either seed or user cloud image name was provided if (args.seed_image_name == '' and args.user_image_name == '') or \ (args.seed_image_name != '' and args.user_image_name != ''): raise Exception('You must provide either --seed-image-name or --user-image-name') # create log handler for the global LOGGER create_log_handler() if args.check_name: # Check name if args.user_image_name == '': raise Exception('--check-name can only be used with --user-image-name') ImageController.check_valid_name(args.platform, args.user_image_name) else: result = False try: # Prepare image image_controller = ImageController(args.artifacts_dir, args.platform, args.input) image_controller.prepare(args.seed_image_name, args.user_image_name) # If execution came so far, all is well. result = True except RuntimeError as runtime_exce: LOGGER.exception(runtime_exce) finally: # Clean-up image controller and other internal constructs it created. image_controller.clean_up() if result is True: LOGGER.info("SUCCESS: Image generation completed.") else: LOGGER.warning("FAILURE: Check the log file '%s' and fix the problem " "before re-running.", get_config_value('LOG_FILE')) sys.exit(1) sys.exit(0)
def __init__(self, artifacts_dir, cloud_type, image_disk_path, should_clean=True): self.start_time = time.time() self.artifacts_dir = artifacts_dir self.cloud_type = cloud_type self.image_name = None self.image_disk_path = image_disk_path self.metadata = None self.status = 'failure' self.transformed_image_name = None self.working_dir = None self.should_clean = should_clean self.cloud_image = None if not os.path.isdir(artifacts_dir): raise ValueError( "Missing or invalid artifacts directory '{}'.".format( artifacts_dir)) if not is_supported_cloud(cloud_type): raise ValueError("Unexpected cloud '{}'.".format(cloud_type)) if not os.path.isfile(image_disk_path): raise ValueError( "Missing image disk '{}'.".format(image_disk_path)) # Create a working directory under the artifacts dir to temporarily store # various build constructs and files. self.create_working_dir(artifacts_dir) try: # Factory (could be a separate object) if cloud_type == 'alibaba': LOGGER.warning("Unimplemented cloud '%s'.", cloud_type) raise SystemExit(-1) if cloud_type == 'aws': self.cloud_image = AWSImage(self.working_dir, self.image_disk_path) elif cloud_type == 'azure': self.cloud_image = AzureImage(self.working_dir, self.image_disk_path) elif cloud_type == 'gce': self.cloud_image = GoogleImage(self.working_dir, self.image_disk_path) else: raise ValueError('Unexpected cloud type') self.cloud_image_name = self.image_name_factory(cloud_type) except BaseException as base_exception: LOGGER.exception(base_exception) raise base_exception
def image_name_factory(cloud_type): """Factory pattern for ImageName""" if cloud_type == 'alibaba': LOGGER.warning("Unimplemented cloud '%s'.", cloud_type) raise SystemExit(-1) if cloud_type == 'aws': return AWSImageName() if cloud_type == 'azure': return AzureImageName() if cloud_type == 'gce': return GoogleImageName() raise ValueError('Unexpected cloud type')
def is_snapshot_ready(self, import_task_id): """Checks if a snapshot with the given import_task_id exists and its status is 'completed'.""" def _is_snapshot_ready(): """Awaits the import operation represented by the import_task_id to reach 'completed' status.""" try: LOGGER.trace("Querying the status of import-task [%s].", import_task_id) response = \ self.ec2_client.describe_import_snapshot_tasks( ImportTaskIds=[import_task_id]) if not response: raise RuntimeError("describe_import_snapshot_tasks() returned none response!") LOGGER.trace("Response from describe_import_snapshot_tasks => '%s'", response) task_status = response['ImportSnapshotTasks'][0]['SnapshotTaskDetail']['Status'] if task_status == 'error': # Print the response before raising an exception. LOGGER.debug("describe_import_snapshot_tasks() response for [%s] => [%s]", import_task_id, response) raise RuntimeError("import-snapshot task [{}] in unrecoverable 'error' state.". format(import_task_id)) return task_status == 'completed' except ClientError as client_error: LOGGER.exception(client_error) raise RuntimeError("describe_import_snapshot_tasks() failed for [{}]!". format(import_task_id)) retrier = Retrier(_is_snapshot_ready) retrier.tries = int(get_config_value('AWS_IMPORT_SNAPSHOT_TASK_RETRY_COUNT')) retrier.delay = int(get_config_value('AWS_IMPORT_SNAPSHOT_TASK_RETRY_DELAY')) LOGGER.info("Waiting for the import snapshot task [%s] to complete.", import_task_id) try: if retrier.execute(): LOGGER.info("import_snapshot_task [%s] is completed.", import_task_id) # Call it one last time to get the snapshot_id. response = \ self.ec2_client.describe_import_snapshot_tasks( ImportTaskIds=[import_task_id]) self.snapshot_id = \ response['ImportSnapshotTasks'][0]['SnapshotTaskDetail']['SnapshotId'] LOGGER.info("SnapshotID = [%s].", self.snapshot_id) return True LOGGER.warning("import_snapshot_task [%s] didn't complete after checking [%d] times!", import_task_id, retrier.tries) return False except RuntimeError as runtime_exception: LOGGER.exception(runtime_exception) raise
def __send_request(self, request): """ Send a request to Alibaba cloud services """ aliyunsdkcore.request.set_default_protocol_type('https') request.set_protocol_type('https') request.set_accept_format('json') client = self.__get_acs_client() try: response_str = client.do_action_with_exception(request) except ClientException as exc: LOGGER.exception(exc) raise RuntimeError( 'Check correctness of ALIBABA_REGION configuration variable') except ServerException as exc: LOGGER.exception(exc) if exc.get_error_code() == 'InvalidAccessKeyId.NotFound' and \ exc.get_error_msg() == 'Specified access key is not found.': raise RuntimeError( 'InvalidAccessKeyId.NotFound: Check correctness of ' + 'ALIBABA_ACCESS_KEY_ID configuration variable') if exc.get_error_code() == 'IncompleteSignature' and \ exc.get_error_msg().startswith('The request signature does not conform to ' + 'Aliyun standards'): raise RuntimeError( 'IncompleteSignature: Check correctness of ' + 'ALIBABA_ACCESS_KEY_SECRET configuration variable') if exc.get_error_code() == 'InvalidAccessKeySecret' and \ exc.get_error_msg() == 'The AccessKeySecret is incorrect. Please check ' + \ 'your AccessKeyId and AccessKeySecret.': raise RuntimeError( 'InvalidAccessKeySecret: Check correctness of ' + 'ALIBABA_ACCESS_KEY_ID and ALIBABA_ACCESS_KEY_SECRET ' + 'configuration variables') raise exc response = json.loads(response_str) if 'Code' in response.keys(): LOGGER.warning( 'Request to Alibaba has \'Code\' attribute. Full Alibaba response:' ) LOGGER.warning( json.dumps(response, sort_keys=True, indent=4, separators=(',', ': '))) return response
def is_image_deleted(self, image_name): """Waits for the image to be deleted.""" retrier = Retrier(lambda s: not self.image_exists(s), image_name) retrier.tries = int( get_config_value('GCE_IMAGE_DELETE_COMPLETED_RETRY_COUNT')) retrier.delay = int( get_config_value('GCE_IMAGE_DELETE_COMPLETED_RETRY_DELAY')) LOGGER.info('Waiting for image [%s] to be deleted.', image_name) try: if retrier.execute(): LOGGER.info("Image [%s] was deleted.", image_name) return True LOGGER.warning( "Image [%s] was still not deleted after checking [%d] times!", image_name, retrier.tries) return False except HttpError as exp: LOGGER.exception(exp) return False
def call_subprocess(command, input_data=None, timeout_millis=-1, check_return_code=True, input_encoding="utf-8", output_encoding="utf-8"): """Calls a subprocess, records progress to console, performs error handling, and returns the output. ---- command: The command and arguments to execute. This must either be a string or a list. String formatting is more convenient for simple cases while list formatting provides more control over escaped characters and whitespace within arguments. If list formatting is used then the first item in the list will be executed as a subprocess and the remaining commands will be treated as arguments to that subprocess. ---- in_data: The data to send to the subprocess' STDIN. This is used for processes which ordinarily read data from pipes instead of arguments. This may either be a bytes-like-object or a string. ---- timeout_millis: The number of milliseconds to wait for the subprocess to return before killing it. A negative number means that no timeout will occur. ---- check_return_code: Raises a ReturnCodeError if the subprocess returns a non-zero exit status. ---- input_encoding: Encoding type to use when passing data to STDIN as a string. This is ignored for bytes-like-objects. ---- output_encoding: Encoding type to use when decoding output from the subprocess. Set this to None to receive raw binary output. """ if isinstance(command, str): # Popen will only accept a list command = command.split() if input_data: if isinstance(input_data, str): # Popen.communicate will only accept a bytes-like-object input_data = input_data.encode(input_encoding) elif not isinstance(input_data, bytes): message = "input_data was not a string or bytes-like-object! " \ "Unable to send to command [{}]!".format(" ".join(command)) LOGGER.error(message) raise TypeError(message) poll_millis = int(config.get_config_value("SUBPROCESS_POLL_MILLIS")) progress_update_delay_millis = \ int(config.get_config_value("CONSOLE_PROGRESS_BAR_UPDATE_DELAY")) * 1000 start_time_millis = time.time() * 1000 next_progress_update_millis = start_time_millis + progress_update_delay_millis # We create the output buffer as a list so that we can pass it by reference to the # communications thread. Once that thread has joined we'll be able to safely unwrap the output # string from position 0 of this list. output = [] LOGGER.info("Calling: %s", " ".join(command)) child = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # If the child process produces enough output to fill the STDOUT buffer then it will block until # another process frees up space in the buffer by reading from it. Unfortunately, Python's # subprocess.read function is a blocking call which will only return once the child process # exits and the pipe is closed. Since we need the main thread to make polling and progress calls # we're not able to call the blocking read function until after the child process has already # terminated. This leads to a deadlock where nothing is reading from the STDOUT buffer because # the child process hasn't terminated yet and the child process hasn't terminated yet because # it's waiting for the STDOUT buffer to be freed up by a read call. # A popular solution here is to to use subprocess.readline instead, as this will only block # until a newline character is detected in the output. However, this is still unreliable since # not all data sent to STDOUT is guaranteed to terminate in a newline. # A better solution is to start a separate communications thread where we can begin reading # without blocking the main thread from polling the child process. Since writing is affected by # a similar issue (STDIN can fill up and cause the main thread to block) we use the # Popen.communicate method to perform both reading and writing asynchronously on the # communications thread. comm = threading.Thread(target=lambda p, i, o: o.append(p.communicate(i)[0]), args=(child, input_data, output)) comm.start() wrote_progress = False while child.poll() is None: current_time_millis = time.time() * 1000 if current_time_millis > next_progress_update_millis: sys.stdout.write('.') sys.stdout.flush() wrote_progress = True next_progress_update_millis = current_time_millis + progress_update_delay_millis if timeout_millis > -1 and current_time_millis >= start_time_millis + timeout_millis: message = "Command [{}] has timed out!".format(" ".join(command)) LOGGER.warning(message) child.kill() comm.join() if output_encoding == "utf-8": LOGGER.warning("Command output was: %s", output[0].decode(output_encoding).rstrip()) raise TimeoutError(message) time.sleep(poll_millis / 1000) comm.join() if wrote_progress: sys.stdout.write('\n') sys.stdout.flush() if check_return_code and child.returncode != 0: message = "Command [{}] returned with error code [{}]!".format(" ".join(command), child.returncode) LOGGER.warning(message) if output_encoding == "utf-8": LOGGER.warning("Command output was: %s", output[0].decode(output_encoding).rstrip()) raise exceptions.ReturnCodeError(child.returncode, message) if output_encoding: return output[0].decode(output_encoding).rstrip() return output[0]