def upload(path, imagestore_string='fabric:ImageStore', show_progress=False): # pylint: disable=too-many-locals,missing-docstring from sfctl.config import (client_endpoint, no_verify_setting, ca_cert_info, cert_info) import requests abspath = validate_app_path(path) basename = os.path.basename(abspath) endpoint = client_endpoint() cert = cert_info() ca_cert = True if no_verify_setting(): ca_cert = False elif ca_cert_info(): ca_cert = ca_cert_info() if all([no_verify_setting(), ca_cert_info()]): raise CLIError('Cannot specify both CA cert info and no verify') # Upload to either to a folder, or native image store only if 'file:' in imagestore_string: dest_path = path_from_imagestore_string(imagestore_string) upload_to_fileshare(abspath, os.path.join(dest_path, basename), show_progress) elif imagestore_string == 'fabric:ImageStore': with requests.Session() as sesh: sesh.verify = ca_cert sesh.cert = cert upload_to_native_imagestore(sesh, endpoint, abspath, basename, show_progress) else: raise CLIError('Unsupported image store connection string')
def create(_): """Create a client for Service Fabric APIs.""" endpoint = client_endpoint() if not endpoint: raise CLIError( 'Connection endpoint not found. ' 'Before running sfctl commands, connect to a cluster using ' 'the "sfctl cluster select" command. ' 'If you are seeing this message on Linux after already selecting a cluster, ' 'you may need to run the command with sudo.') no_verify = no_verify_setting() if security_type() == 'aad': auth = AdalAuthentication(no_verify) else: cert = cert_info() ca_cert = ca_cert_info() auth = ClientCertAuthentication(cert, ca_cert, no_verify) client = ServiceFabricClientAPIs(auth, base_url=endpoint) # client.config.retry_policy has type msrest.pipeline.ClientRetryPolicy client.config.retry_policy.total = False client.config.retry_policy.policy.total = False # msrest defines ClientRetryPolicy in pipline.py. # ClientRetryPolicy.__init__ defines values for status_forcelist # which is passed to urllib3.util.retry.Retry client.config.retry_policy.policy.status_forcelist = None return client
def show_connection(): """Show which Service Fabric cluster this sfctl instance is connected to.""" endpoint = client_endpoint() if not endpoint: return None return endpoint
def upload(path, imagestore_string='fabric:ImageStore', show_progress=False, timeout=300): # pylint: disable=too-many-locals,missing-docstring from sfctl.config import (client_endpoint, no_verify_setting, ca_cert_info, cert_info) import requests abspath = validate_app_path(path) basename = os.path.basename(abspath) endpoint = client_endpoint() cert = cert_info() ca_cert = True if no_verify_setting(): ca_cert = False elif ca_cert_info(): ca_cert = ca_cert_info() if all([no_verify_setting(), ca_cert_info()]): raise CLIError('Cannot specify both CA cert info and no verify') # Note: pressing ctrl + C during upload does not end the current upload in progress, but only # stops the next one from occurring. This will be fixed in the future. # Upload to either to a folder, or native image store only if 'file:' in imagestore_string: dest_path = path_from_imagestore_string(imagestore_string) process = Process(target=upload_to_fileshare, args=(abspath, os.path.join(dest_path, basename), show_progress)) process.start() process.join(timeout) # If timeout is None then there is no timeout. if process.is_alive(): process.terminate( ) # This will leave any children of process orphaned. raise SFCTLInternalException( 'Upload has timed out. Consider passing a longer ' 'timeout duration.') elif imagestore_string == 'fabric:ImageStore': with requests.Session() as sesh: sesh.verify = ca_cert sesh.cert = cert # There is no need for a new process here since upload_to_native_imagestore(sesh, endpoint, abspath, basename, show_progress, timeout) else: raise CLIError('Unsupported image store connection string')
def create(_): """Create a client for Service Fabric APIs.""" endpoint = client_endpoint() if not endpoint: raise CLIError("Connection endpoint not found") no_verify = no_verify_setting() if security_type() == 'aad': auth = AdalAuthentication(no_verify) else: cert = cert_info() ca_cert = ca_cert_info() auth = ClientCertAuthentication(cert, ca_cert, no_verify) return ServiceFabricClientAPIs(auth, base_url=endpoint)
def create(_): """Create a client for Service Fabric APIs.""" endpoint = client_endpoint() if not endpoint: raise CLIError( "Connection endpoint not found. " "Before running sfctl commands, connect to a cluster using " "the 'sfctl cluster select' command.") no_verify = no_verify_setting() if security_type() == 'aad': auth = AdalAuthentication(no_verify) else: cert = cert_info() ca_cert = ca_cert_info() auth = ClientCertAuthentication(cert, ca_cert, no_verify) return ServiceFabricClientAPIs(auth, base_url=endpoint)
def upload( path, imagestore_string='fabric:ImageStore', show_progress=False, timeout=300, # pylint: disable=too-many-locals,missing-docstring,too-many-arguments,too-many-branches,too-many-statements compress=False, keep_compressed=False, compressed_location=None): from sfctl.config import (client_endpoint, no_verify_setting, ca_cert_info, cert_info) import requests path = _normalize_path(path) if compressed_location is not None: compressed_location = _normalize_path(compressed_location) abspath = validate_app_path(path) basename = os.path.basename(abspath) endpoint = client_endpoint() cert = cert_info() ca_cert = True if no_verify_setting(): ca_cert = False elif ca_cert_info(): ca_cert = ca_cert_info() if all([no_verify_setting(), ca_cert_info()]): raise CLIError('Cannot specify both CA cert info and no verify') if not compress and (keep_compressed or compressed_location is not None): raise CLIError( '--keep-compressed and --compressed-location options are only applicable ' 'if the --compress option is set') compressed_pkg_location = None created_dir_path = None if compress: parent_folder = os.path.dirname(path) file_or_folder_name = os.path.basename(path) compressed_pkg_location = os.path.join(parent_folder, 'sfctl_compressed_temp') if compressed_location is not None: compressed_pkg_location = compressed_location # Check if a zip file has already been created created_dir_path = os.path.join(compressed_pkg_location, file_or_folder_name) if os.path.exists(created_dir_path): if get_user_confirmation( str.format( 'Deleting previously generated compressed files at ' '{0}. If this folder has anything else, those will be ' 'deleted as well. Allow? ["y", "n"]: ', created_dir_path)): shutil.rmtree(created_dir_path) else: # We can consider adding an option to number the packages in the future. print( 'Stopping upload operation. Cannot compress to the following location ' 'because the path already exists: ' + created_dir_path) return # Let users know where to find the compressed app package before starting the # copy / compression, in case the process crashes in the middle, so users # will know where to clean up items from, or where to upload already compressed # app packages from if show_progress: print('Starting package compression into location: ' + compressed_pkg_location) print() # New line for formatting purposes compress_package(path, compressed_pkg_location) # Change the path to the path with the compressed package compressed_path = os.path.join(compressed_pkg_location, file_or_folder_name) # re-do validation and reset the variables abspath = validate_app_path(compressed_path) basename = os.path.basename(abspath) # Note: pressing ctrl + C during upload does not end the current upload in progress, but only # stops the next one from occurring. This will be fixed in the future. # Upload to either to a folder, or native image store only if 'file:' in imagestore_string: dest_path = path_from_imagestore_string(imagestore_string) process = Process(target=upload_to_fileshare, args=(abspath, os.path.join(dest_path, basename), show_progress)) process.start() process.join(timeout) # If timeout is None then there is no timeout. if process.is_alive(): process.terminate( ) # This will leave any children of process orphaned. raise SFCTLInternalException( 'Upload has timed out. Consider passing a longer ' 'timeout duration.') elif imagestore_string == 'fabric:ImageStore': with requests.Session() as sesh: sesh.verify = ca_cert sesh.cert = cert # There is no need for a new process here since upload_to_native_imagestore(sesh, endpoint, abspath, basename, show_progress, timeout) else: raise CLIError( 'Unsupported image store connection string. Value should be either ' '"fabric:ImageStore", or start with "file:"') # If code has reached here, it means that upload was successful # To reach here, user must have agreed to clear this folder or exist the API # So we can safely delete the contents # User is expected to not create a folder by the same name during the upload duration # If needed, we can consider adding our content under a GUID in the future if compress and not keep_compressed: # Remove the generated files if show_progress: print('Removing generated folder ' + created_dir_path) shutil.rmtree(created_dir_path)
def check_cluster_version(on_failure_or_connection, dummy_cluster_version=None): """ Check that the cluster version of sfctl is compatible with that of the cluster. Failures in making the API call (to check the cluster version) will be ignored and the time tracker will be reset to the current time. This is because we have no way of knowing if the API call failed because it doesn't exist on the cluster, or because of some other reason. We set the time tracker to the current time to avoid calling the API continuously for clusters without this API. Rather than each individual component deciding when to call this function, this should be called any time this might need to be triggered, and logic within this function will judge if a call to the cluster is required. :param on_failure_or_connection: True if this function is called due to an API call failure, or because it was called on connection to a new cluster endpoint. False otherwise. :type on_failure_or_connection: bool :param dummy_cluster_version: Used for testing purposes only. This is passed in to replace a call to the service fabric cluster to get the cluster version, in order to keep tests local. By default this value is None. If you would like to simulate the cluster call returning None, then enter 'NoResult' as a string :type dummy_cluster_version: str :returns: True if versions match, or if the check is not performed. False otherwise. """ from sfctl.state import get_cluster_version_check_time, set_cluster_version_check_time from warnings import warn # Before doing anything, see if a check needs to be triggered. # Always trigger version check if on failure or connection if not on_failure_or_connection: # Check if sufficient time has passed since last check last_check_time = get_cluster_version_check_time() if last_check_time is not None: # If we've already checked the cluster version before, see how long ago it has been time_since_last_check = datetime.utcnow() - last_check_time allowable_time = timedelta(hours=SF_CLI_VERSION_CHECK_INTERVAL) if allowable_time > time_since_last_check: # Don't perform any checks return True else: # If last_check_time is None, this means that we've not yet set a time, so it's never # been checked. Set the initial value. set_cluster_version_check_time() cluster_auth = get_cluster_auth() auth = _get_client_cert_auth(cluster_auth['pem'], cluster_auth['cert'], cluster_auth['key'], cluster_auth['ca'], cluster_auth['no_verify']) client = ServiceFabricClientAPIs(auth, base_url=client_endpoint()) sfctl_version = get_sfctl_version() # Update the timestamp of the last cluster version check set_cluster_version_check_time() if dummy_cluster_version is None: # This command may fail for various reasons. Most common reason as of writing this comment # is that the corresponding get_cluster_version API on the cluster doesn't exist. try: logger.info('Performing cluster version check') cluster_version = client.get_cluster_version().version except: # pylint: disable=bare-except ex = exc_info()[0] logger.info('Check cluster version failed due to error: %s', str(ex)) return True else: if dummy_cluster_version == 'NoResult': cluster_version = None else: cluster_version = dummy_cluster_version if cluster_version is None: # Do no checks if the get cluster version API fails, since most likely it failed # because the API doesn't exist. return True if not sfctl_cluster_version_matches(cluster_version, sfctl_version): warn( str.format( 'sfctl has version "{0}" which does not match the cluster version "{1}". ' 'See https://docs.microsoft.com/azure/service-fabric/service-fabric-cli#service-fabric-target-runtime ' # pylint: disable=line-too-long 'for version compatibility. Upgrade to a compatible version for the best experience.', sfctl_version, cluster_version)) return False return True