def run(self, tmp=None, task_vars=dict()): ''' handler for fetch operations ''' # FIXME: is this even required anymore? #if self.runner.noop_on_check(inject): # return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not (yet) supported for this module')) source = self._task.args.get('src', None) dest = self._task.args.get('dest', None) flat = boolean(self._task.args.get('flat')) fail_on_missing = boolean(self._task.args.get('fail_on_missing')) validate_checksum = boolean( self._task.args.get('validate_checksum', self._task.args.get('validate_md5'))) if 'validate_md5' in self._task.args and 'validate_checksum' in self._task.args: return dict( failed=True, msg= "validate_checksum and validate_md5 cannot both be specified") if source is None or dest is None: return dict(failed=True, msg="src and dest are required") source = self._shell.join_path(source) source = self._remote_expand_user(source, tmp) # calculate checksum for the remote file remote_checksum = self._remote_checksum(tmp, source) # use slurp if sudo and permissions are lacking remote_data = None if remote_checksum in ('1', '2') or self._connection_info.become: slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), tmp=tmp) if slurpres.get('rc') == 0: if slurpres['encoding'] == 'base64': remote_data = base64.b64decode(slurpres['content']) if remote_data is not None: remote_checksum = checksum_s(remote_data) # the source path may have been expanded on the # target system, so we compare it here and use the # expanded version if it's different remote_source = slurpres.get('source') if remote_source and remote_source != source: source = remote_source else: # FIXME: should raise an error here? the old code did nothing pass # calculate the destination name if os.path.sep not in self._shell.join_path('a', ''): source_local = source.replace('\\', '/') else: source_local = source dest = os.path.expanduser(dest) if flat: if dest.endswith(os.sep): # if the path ends with "/", we'll use the source filename as the # destination filename base = os.path.basename(source_local) dest = os.path.join(dest, base) if not dest.startswith("/"): # if dest does not start with "/", we'll assume a relative path dest = self._loader.path_dwim(dest) else: # files are saved in dest dir, with a subdir for each host, then the filename dest = "%s/%s/%s" % (self._loader.path_dwim(dest), self._connection_info.remote_addr, source_local) dest = dest.replace("//", "/") if remote_checksum in ('0', '1', '2', '3', '4'): # these don't fail because you may want to transfer a log file that possibly MAY exist # but keep going to fetch other log files if remote_checksum == '0': result = dict( msg="unable to calculate the checksum of the remote file", file=source, changed=False) elif remote_checksum == '1': if fail_on_missing: result = dict(failed=True, msg="the remote file does not exist", file=source) else: result = dict( msg= "the remote file does not exist, not transferring, ignored", file=source, changed=False) elif remote_checksum == '2': result = dict( msg= "no read permission on remote file, not transferring, ignored", file=source, changed=False) elif remote_checksum == '3': result = dict( msg= "remote file is a directory, fetch cannot work on directories", file=source, changed=False) elif remote_checksum == '4': result = dict( msg= "python isn't present on the system. Unable to compute checksum", file=source, changed=False) return result # calculate checksum for the local file local_checksum = checksum(dest) if remote_checksum != local_checksum: # create the containing directories, if needed if not os.path.isdir(os.path.dirname(dest)): os.makedirs(os.path.dirname(dest)) # fetch the file and check for changes if remote_data is None: self._connection.fetch_file(source, dest) else: f = open(dest, 'w') f.write(remote_data) f.close() new_checksum = secure_hash(dest) # For backwards compatibility. We'll return None on FIPS enabled # systems try: new_md5 = md5(dest) except ValueError: new_md5 = None if validate_checksum and new_checksum != remote_checksum: return dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum) return dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum) else: # For backwards compatibility. We'll return None on FIPS enabled # systems try: local_md5 = md5(dest) except ValueError: local_md5 = None return dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)
def run(self, tmp=None, task_vars=None): ''' handler for fetch operations ''' if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) if self._play_context.check_mode: result['skipped'] = True result['msg'] = 'check mode not (yet) supported for this module' return result source = self._task.args.get('src', None) dest = self._task.args.get('dest', None) flat = boolean(self._task.args.get('flat')) fail_on_missing = boolean(self._task.args.get('fail_on_missing')) validate_checksum = boolean(self._task.args.get('validate_checksum', self._task.args.get('validate_md5'))) if 'validate_md5' in self._task.args and 'validate_checksum' in self._task.args: result['failed'] = True result['msg'] = "validate_checksum and validate_md5 cannot both be specified" return result if source is None or dest is None: result['failed'] = True result['msg'] = "src and dest are required" return result source = self._connection._shell.join_path(source) source = self._remote_expand_user(source) remote_checksum = None if not self._play_context.become: # calculate checksum for the remote file, don't bother if using become as slurp will be used remote_checksum = self._remote_checksum(source, all_vars=task_vars) # use slurp if permissions are lacking or privilege escalation is needed remote_data = None if remote_checksum in ('1', '2', None): slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars, tmp=tmp) if slurpres.get('failed'): if not fail_on_missing and (slurpres.get('msg').startswith('file not found') or remote_checksum == '1'): result['msg'] = "the remote file does not exist, not transferring, ignored" result['file'] = source result['changed'] = False else: result.update(slurpres) return result else: if slurpres['encoding'] == 'base64': remote_data = base64.b64decode(slurpres['content']) if remote_data is not None: remote_checksum = checksum_s(remote_data) # the source path may have been expanded on the # target system, so we compare it here and use the # expanded version if it's different remote_source = slurpres.get('source') if remote_source and remote_source != source: source = remote_source # calculate the destination name if os.path.sep not in self._connection._shell.join_path('a', ''): source = self._connection._shell._unquote(source) source_local = source.replace('\\', '/') else: source_local = source dest = os.path.expanduser(dest) if flat: if dest.endswith(os.sep): # if the path ends with "/", we'll use the source filename as the # destination filename base = os.path.basename(source_local) dest = os.path.join(dest, base) if not dest.startswith("/"): # if dest does not start with "/", we'll assume a relative path dest = self._loader.path_dwim(dest) else: # files are saved in dest dir, with a subdir for each host, then the filename if 'inventory_hostname' in task_vars: target_name = task_vars['inventory_hostname'] else: target_name = self._play_context.remote_addr dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local) dest = dest.replace("//","/") if remote_checksum in ('0', '1', '2', '3', '4'): # these don't fail because you may want to transfer a log file that # possibly MAY exist but keep going to fetch other log files if remote_checksum == '0': result['msg'] = "unable to calculate the checksum of the remote file" result['file'] = source result['changed'] = False elif remote_checksum == '1': if fail_on_missing: result['failed'] = True result['msg'] = "the remote file does not exist" result['file'] = source else: result['msg'] = "the remote file does not exist, not transferring, ignored" result['file'] = source result['changed'] = False elif remote_checksum == '2': result['msg'] = "no read permission on remote file, not transferring, ignored" result['file'] = source result['changed'] = False elif remote_checksum == '3': result['msg'] = "remote file is a directory, fetch cannot work on directories" result['file'] = source result['changed'] = False elif remote_checksum == '4': result['msg'] = "python isn't present on the system. Unable to compute checksum" result['file'] = source result['changed'] = False return result # calculate checksum for the local file local_checksum = checksum(dest) if remote_checksum != local_checksum: # create the containing directories, if needed makedirs_safe(os.path.dirname(dest)) # fetch the file and check for changes if remote_data is None: self._connection.fetch_file(source, dest) else: try: f = open(to_bytes(dest, errors='strict'), 'w') f.write(remote_data) f.close() except (IOError, OSError) as e: raise AnsibleError("Failed to fetch the file: %s" % e) new_checksum = secure_hash(dest) # For backwards compatibility. We'll return None on FIPS enabled systems try: new_md5 = md5(dest) except ValueError: new_md5 = None if validate_checksum and new_checksum != remote_checksum: result.update(dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)) else: result.update(dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)) else: # For backwards compatibility. We'll return None on FIPS enabled systems try: local_md5 = md5(dest) except ValueError: local_md5 = None result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)) return result
def run(self, tmp=None, task_vars=None): ''' handler for fetch operations ''' if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect try: if self._play_context.check_mode: raise AnsibleActionSkip( 'check mode not (yet) supported for this module') source = self._task.args.get('src', None) original_dest = dest = self._task.args.get('dest', None) flat = boolean(self._task.args.get('flat'), strict=False) fail_on_missing = boolean(self._task.args.get( 'fail_on_missing', True), strict=False) validate_checksum = boolean(self._task.args.get( 'validate_checksum', True), strict=False) msg = '' # validate source and dest are strings FIXME: use basic.py and module specs if not isinstance(source, string_types): msg = "Invalid type supplied for source option, it must be a string" if not isinstance(dest, string_types): msg = "Invalid type supplied for dest option, it must be a string" if source is None or dest is None: msg = "src and dest are required" if msg: raise AnsibleActionFail(msg) source = self._connection._shell.join_path(source) source = self._remote_expand_user(source) remote_stat = {} remote_checksum = None if True: # Get checksum for the remote file even using become. Mitogen doesn't need slurp. # Follow symlinks because fetch always follows symlinks try: remote_stat = self._execute_remote_stat(source, all_vars=task_vars, follow=True) except AnsibleError as ae: result['changed'] = False result['file'] = source if fail_on_missing: result['failed'] = True result['msg'] = to_text(ae) else: result['msg'] = "%s, ignored" % to_text( ae, errors='surrogate_or_replace') return result remote_checksum = remote_stat.get('checksum') if remote_stat.get('exists'): if remote_stat.get('isdir'): result['failed'] = True result['changed'] = False result[ 'msg'] = "remote file is a directory, fetch cannot work on directories" # Historically, these don't fail because you may want to transfer # a log file that possibly MAY exist but keep going to fetch other # log files. Today, this is better achieved by adding # ignore_errors or failed_when to the task. Control the behaviour # via fail_when_missing if not fail_on_missing: result['msg'] += ", not transferring, ignored" del result['changed'] del result['failed'] return result # use slurp if permissions are lacking or privilege escalation is needed remote_data = None if remote_checksum in (None, '1', ''): slurpres = self._execute_module( module_name='ansible.legacy.slurp', module_args=dict(src=source), task_vars=task_vars) if slurpres.get('failed'): if not fail_on_missing: result['file'] = source result['changed'] = False else: result.update(slurpres) if 'not found' in slurpres.get('msg', ''): result[ 'msg'] = "the remote file does not exist, not transferring, ignored" elif slurpres.get('msg', '').startswith('source is a directory'): result[ 'msg'] = "remote file is a directory, fetch cannot work on directories" return result else: if slurpres['encoding'] == 'base64': remote_data = base64.b64decode(slurpres['content']) if remote_data is not None: remote_checksum = checksum_s(remote_data) # calculate the destination name if os.path.sep not in self._connection._shell.join_path('a', ''): source = self._connection._shell._unquote(source) source_local = source.replace('\\', '/') else: source_local = source # ensure we only use file name, avoid relative paths if not is_subpath(dest, original_dest): # TODO: ? dest = os.path.expanduser(dest.replace(('../',''))) raise AnsibleActionFail( "Detected directory traversal, expected to be contained in '%s' but got '%s'" % (original_dest, dest)) if flat: if os.path.isdir(to_bytes(dest, errors='surrogate_or_strict') ) and not dest.endswith(os.sep): raise AnsibleActionFail( "dest is an existing directory, use a trailing slash if you want to fetch src into that directory" ) if dest.endswith(os.sep): # if the path ends with "/", we'll use the source filename as the # destination filename base = os.path.basename(source_local) dest = os.path.join(dest, base) if not dest.startswith("/"): # if dest does not start with "/", we'll assume a relative path dest = self._loader.path_dwim(dest) else: # files are saved in dest dir, with a subdir for each host, then the filename if 'inventory_hostname' in task_vars: target_name = task_vars['inventory_hostname'] else: target_name = self._play_context.remote_addr dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local) dest = os.path.normpath(dest) # calculate checksum for the local file local_checksum = checksum(dest) if remote_checksum != local_checksum: # create the containing directories, if needed makedirs_safe(os.path.dirname(dest)) # fetch the file and check for changes if remote_data is None: self._connection.fetch_file(source, dest) else: try: f = open(to_bytes(dest, errors='surrogate_or_strict'), 'wb') f.write(remote_data) f.close() except (IOError, OSError) as e: raise AnsibleActionFail( "Failed to fetch the file: %s" % e) new_checksum = secure_hash(dest) # For backwards compatibility. We'll return None on FIPS enabled systems try: new_md5 = md5(dest) except ValueError: new_md5 = None if validate_checksum and new_checksum != remote_checksum: result.update( dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)) else: result.update({ 'changed': True, 'md5sum': new_md5, 'dest': dest, 'remote_md5sum': None, 'checksum': new_checksum, 'remote_checksum': remote_checksum }) else: # For backwards compatibility. We'll return None on FIPS enabled systems try: local_md5 = md5(dest) except ValueError: local_md5 = None result.update( dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)) finally: self._remove_tmp_path(self._connection._shell.tmpdir) return result
def run(self, tmp=None, task_vars=None): ''' handler for fetch operations ''' if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect try: result = dict(msg="", stderr="", stdout="", file="", md5sum="", dest="", remote_md5sum="", remote_checksum="", checksum="", delta="", failed=False) savf_name = '' created = False is_savf = False savf = '' ifs_created = False backup = False is_lib = False force_save = False flat = False if self._play_context.check_mode: result['skipped'] = True result[ 'msg'] = 'check mode not (yet) supported for this module' return result object_names = self._task.args.get('object_names', '*ALL') lib_name = self._task.args.get('lib_name', None) object_types = self._task.args.get('object_types', '*ALL') is_lib = boolean(self._task.args.get('is_lib', False), strict=True) savefile_name = self._task.args.get('savefile_name', None) force_save = boolean(self._task.args.get('force_save', False), strict=True) backup = boolean(self._task.args.get('backup', False), strict=True) format = self._task.args.get('format', '*SAVF') target_release = self._task.args.get('target_release', '*CURRENT') dest = self._task.args.get('dest', None) flat = boolean(self._task.args.get('flat', False), strict=True) fail_on_missing = boolean(self._task.args.get( 'fail_on_missing', True), strict=True) validate_checksum = boolean(self._task.args.get( 'validate_checksum', True), strict=True) # validate dest are strings FIXME: use basic.py and module specs if not isinstance(dest, string_types): result[ 'msg'] = "Invalid type supplied for dest option, it must be a string. " if lib_name is None or dest is None: result['msg'] = "lib_name and dest are required. " object_names = object_names.upper() object_types = object_types.upper() format = format.upper() target_release = target_release.upper() if lib_name is not None: lib_name = lib_name.upper() if savefile_name is not None: savefile_name = savefile_name.upper() if lib_name == 'QSYS' and (is_lib is True or (object_names == '*ALL' and object_types == '*ALL')): result['msg'] = "QSYS can't be saved." if format != "*SAVF": result['msg'] = "format can only be *SAVF." if result.get('msg'): result['failed'] = True return result startd = datetime.datetime.now() if len(object_names.split()) == 1 and is_lib is not True: if object_types == '*ALL' or object_types == '*FILE': if (object_names.split())[0][-1] == '*': module_args = { 'object_name': object_names[0:-1] + '+', 'lib_name': lib_name, 'use_regex': True } module_output = self._execute_module( module_name='ibmi_object_find', module_args=module_args) save_result = module_output if len(save_result['object_list']) == 1 and save_result['object_list'][0]['OBJTYPE'] == '*FILE' and \ save_result['object_list'][0]['OBJATTRIBUTE'] == 'SAVF': result[ 'msg'] += "Object is a save file, fetch it directly." savf_path = self._calculate_savf_path( save_result['object_list'][0]['OBJNAME'], lib_name) savf_name = save_result['object_list'][0][ 'OBJNAME'] is_savf = True else: module_args = { 'object_name': object_names, 'lib_name': lib_name } module_output = self._execute_module( module_name='ibmi_object_find', module_args=module_args) save_result = module_output if len(save_result['object_list']) == 1 and save_result['object_list'][0]['OBJTYPE'] == '*FILE' and \ save_result['object_list'][0]['OBJATTRIBUTE'] == 'SAVF': result[ 'msg'] += "Object is a save file, fetch it directly." savf_path = self._calculate_savf_path( object_names, lib_name) savf_name = object_names is_savf = True if is_savf is False: savf_name, savf_path = self._calculate_savf_name( object_names, lib_name, is_lib, savefile_name, task_vars, result) if is_lib is True: omitfile = 'OMITOBJ((%s/%s *FILE))' % (lib_name, savf_name) module_args = { 'lib_name': lib_name, 'savefile_name': savf_name, 'savefile_lib': lib_name, 'target_release': target_release, 'force_save': force_save, 'joblog': True, 'parameters': omitfile } module_output = self._execute_module( module_name='ibmi_lib_save', module_args=module_args) else: omitfile = 'OMITOBJ((%s/%s *FILE))' % (lib_name, savf_name) module_args = { 'object_names': object_names, 'object_lib': lib_name, 'object_types': object_types, 'savefile_name': savf_name, 'savefile_lib': lib_name, 'target_release': target_release, 'force_save': force_save, 'joblog': False, 'parameters': omitfile } module_output = self._execute_module( module_name='ibmi_object_save', module_args=module_args) save_result = module_output rc = save_result['rc'] if rc != 0 or ('CPC3708' in save_result['stdout']): result[ 'msg'] = 'Create SAVF failed. See stderr or stdout for more information.' result['failed'] = True result['stderr'] = save_result['stderr_lines'] result['stdout'] = save_result['stdout_lines'] return result created = True display.debug("savf_name = %s, savf_path = %s, force_save=%s" % (savf_name, savf_path, force_save)) source = savf_path commandmk = 'mkdir %s' % ifs_dir command = 'cp %s %s' % (savf_path, ifs_dir) try: module_output = self._execute_module( module_name='command', module_args={'_raw_params': commandmk}) save_result = module_output rc = save_result['rc'] display.debug("save_result['stderr_lines'] = %s" % (save_result['stderr_lines'])) if rc != 0 and ('exists' not in save_result['stderr']): result['msg'] = save_result['msg'] result['failed'] = True result['stderr'] = save_result['stderr_lines'] return result module_output = self._execute_module( module_name='command', module_args={'_raw_params': command}) save_result = module_output rc = save_result['rc'] if rc != 0: result['msg'] = save_result['msg'] result['failed'] = True result['stderr'] = save_result['stderr_lines'] result['stdout'] = save_result['stdout_lines'] return result ifs_created = True except Exception as e: result['msg'] = to_text(e) result['failed'] = True return result source = '%s/%s' % (ifs_dir, os.path.basename(savf_path)) if not isinstance(source, string_types): result[ 'msg'] = "Invalid type supplied for source option, it must be a string" result['failed'] = True return result source = self._connection._shell.join_path(source) source = self._remote_expand_user(source) remote_checksum = None if not self._connection.become: # calculate checksum for the remote file, don't bother if using become as slurp will be used # Force remote_checksum to follow symlinks because fetch always follows symlinks remote_checksum = self._remote_checksum(source, all_vars=task_vars, follow=True) # use slurp if permissions are lacking or privilege escalation is needed remote_data = None if remote_checksum in ('1', '2', None): slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars) if slurpres.get('failed'): if not fail_on_missing and ( slurpres.get('msg').startswith('file not found') or remote_checksum == '1'): result[ 'msg'] = "the remote file does not exist, not transferring, ignored" result['file'] = source result['changed'] = False else: result.update(slurpres) return result else: if slurpres['encoding'] == 'base64': remote_data = base64.b64decode(slurpres['content']) if remote_data is not None: remote_checksum = checksum_s(remote_data) # the source path may have been expanded on the # target system, so we compare it here and use the # expanded version if it's different remote_source = slurpres.get('source') if remote_source and remote_source != source: source = remote_source # calculate the destination name if os.path.sep not in self._connection._shell.join_path('a', ''): source = self._connection._shell._unquote(source) qsys_source = self._connection._shell._unquote(savf_path) source_local = qsys_source.replace('\\', '/') else: source_local = savf_path dest = os.path.expanduser(dest) if flat: if not dest.startswith("/"): # if dest does not start with "/", we'll assume a relative path dest = self._loader.path_dwim(dest) base = os.path.basename(source_local) dest = os.path.join(dest, base) else: # files are saved in dest dir, with a subdir for each host, then the filename if 'inventory_hostname' in task_vars: target_name = task_vars['inventory_hostname'] else: target_name = self._play_context.remote_addr dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local) dest = dest.replace("//", "/") if remote_checksum in ('0', '1', '2', '3', '4', '5'): result['changed'] = False result['file'] = source if remote_checksum == '0': result[ 'msg'] = "unable to calculate the checksum of the remote file" elif remote_checksum == '1': result['msg'] = "the remote file does not exist" elif remote_checksum == '2': result['msg'] = "no read permission on remote file" elif remote_checksum == '3': result[ 'msg'] = "remote file is a directory, fetch cannot work on directories" elif remote_checksum == '4': result[ 'msg'] = "python isn't present on the system. Unable to compute checksum" elif remote_checksum == '5': result[ 'msg'] = "stdlib json was not found on the remote machine. Only the raw module can work without those installed" # Historically, these don't fail because you may want to transfer # a log file that possibly MAY exist but keep going to fetch other # log files. Today, this is better achieved by adding # ignore_errors or failed_when to the task. Control the behaviour # via fail_when_missing if fail_on_missing: result['failed'] = True del result['changed'] else: result['msg'] += ", not transferring, ignored" return result # calculate checksum for the local file local_checksum = checksum(dest) if remote_checksum != local_checksum: # create the containing directories, if needed makedirs_safe(os.path.dirname(dest)) # fetch the file and check for changes if remote_data is None: self._connection.fetch_file(source, dest) else: try: f = open(to_bytes(dest, errors='surrogate_or_strict'), 'wb') f.write(remote_data) f.close() except (IOError, OSError) as e: raise AnsibleError("Failed to fetch the file: %s" % e) new_checksum = secure_hash(dest) # For backwards compatibility. We'll return None on FIPS enabled systems try: new_md5 = md5(dest) except ValueError: new_md5 = None if validate_checksum and new_checksum != remote_checksum: result.update( dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=savf, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)) else: endd = datetime.datetime.now() delta = endd - startd if (created is True and backup is True) or is_savf is True: savf = savf_path result['msg'] += " File is renewed on local." result.update({ 'changed': True, 'md5sum': new_md5, 'dest': dest, 'remote_md5sum': None, 'checksum': new_checksum, 'remote_checksum': remote_checksum, 'delta': str(delta), 'file': savf }) else: # For backwards compatibility. We'll return None on FIPS enabled systems try: local_md5 = md5(dest) except ValueError: local_md5 = None endd = datetime.datetime.now() delta = endd - startd if (created is True and backup is True) or is_savf is True: savf = savf_path result.update( dict(changed=False, md5sum=local_md5, file=savf, delta=str(delta), dest=dest, checksum=local_checksum)) except Exception as e: result['msg'] += "%s" % to_text(e) result['failed'] = True return result finally: if ((backup is False and is_savf is False) or result['failed'] is True) and created is True: cmd = 'DLTOBJ OBJ(%s/%s) OBJTYPE(*FILE)' % (lib_name, savf_name) module_output = self._execute_module( module_name='ibmi_cl_command', module_args={'cmd': cmd}) save_result = module_output rc = save_result['rc'] if rc != 0 and ('CPF2105' not in save_result['stderr']): result['msg'] += "Failed to delete SAVF on remote" if ifs_created is True: cmd = 'rm %s/%s' % (ifs_dir, os.path.basename(savf_path)) try: module_output = self._execute_module( module_name='command', module_args={'_raw_params': cmd}) save_result = module_output rc = save_result['rc'] if rc != 0: result['msg'] += "Failed to delete IFS on remote" except Exception as e: result[ 'msg'] += "exception happens when delete IFS file. error: %s" % to_text( e) self._remove_tmp_path(self._connection._shell.tmpdir) return result
def run(self, tmp=None, task_vars=dict()): """ handler for fetch operations """ if self._connection_info.check_mode: return dict(skipped=True, msg="check mode not (yet) supported for this module") source = self._task.args.get("src", None) dest = self._task.args.get("dest", None) flat = boolean(self._task.args.get("flat")) fail_on_missing = boolean(self._task.args.get("fail_on_missing")) validate_checksum = boolean(self._task.args.get("validate_checksum", self._task.args.get("validate_md5"))) if "validate_md5" in self._task.args and "validate_checksum" in self._task.args: return dict(failed=True, msg="validate_checksum and validate_md5 cannot both be specified") if source is None or dest is None: return dict(failed=True, msg="src and dest are required") source = self._connection._shell.join_path(source) source = self._remote_expand_user(source, tmp) # calculate checksum for the remote file remote_checksum = self._remote_checksum(tmp, source) # use slurp if sudo and permissions are lacking remote_data = None if remote_checksum in ("1", "2") or self._connection_info.become: slurpres = self._execute_module( module_name="slurp", module_args=dict(src=source), task_vars=task_vars, tmp=tmp ) if slurpres.get("rc") == 0: if slurpres["encoding"] == "base64": remote_data = base64.b64decode(slurpres["content"]) if remote_data is not None: remote_checksum = checksum_s(remote_data) # the source path may have been expanded on the # target system, so we compare it here and use the # expanded version if it's different remote_source = slurpres.get("source") if remote_source and remote_source != source: source = remote_source else: # FIXME: should raise an error here? the old code did nothing pass # calculate the destination name if os.path.sep not in self._connection._shell.join_path("a", ""): source_local = source.replace("\\", "/") else: source_local = source dest = os.path.expanduser(dest) if flat: if dest.endswith(os.sep): # if the path ends with "/", we'll use the source filename as the # destination filename base = os.path.basename(source_local) dest = os.path.join(dest, base) if not dest.startswith("/"): # if dest does not start with "/", we'll assume a relative path dest = self._loader.path_dwim(dest) else: # files are saved in dest dir, with a subdir for each host, then the filename if "inventory_hostname" in task_vars: target_name = task_vars["inventory_hostname"] else: target_name = self._connection_info.remote_addr dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local) dest = dest.replace("//", "/") if remote_checksum in ("0", "1", "2", "3", "4"): # these don't fail because you may want to transfer a log file that possibly MAY exist # but keep going to fetch other log files if remote_checksum == "0": result = dict(msg="unable to calculate the checksum of the remote file", file=source, changed=False) elif remote_checksum == "1": if fail_on_missing: result = dict(failed=True, msg="the remote file does not exist", file=source) else: result = dict( msg="the remote file does not exist, not transferring, ignored", file=source, changed=False ) elif remote_checksum == "2": result = dict( msg="no read permission on remote file, not transferring, ignored", file=source, changed=False ) elif remote_checksum == "3": result = dict( msg="remote file is a directory, fetch cannot work on directories", file=source, changed=False ) elif remote_checksum == "4": result = dict( msg="python isn't present on the system. Unable to compute checksum", file=source, changed=False ) return result # calculate checksum for the local file local_checksum = checksum(dest) if remote_checksum != local_checksum: # create the containing directories, if needed makedirs_safe(os.path.dirname(dest)) # fetch the file and check for changes if remote_data is None: self._connection.fetch_file(source, dest) else: f = open(dest, "w") f.write(remote_data) f.close() new_checksum = secure_hash(dest) # For backwards compatibility. We'll return None on FIPS enabled # systems try: new_md5 = md5(dest) except ValueError: new_md5 = None if validate_checksum and new_checksum != remote_checksum: return dict( failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum, ) return dict( changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum, ) else: # For backwards compatibility. We'll return None on FIPS enabled # systems try: local_md5 = md5(dest) except ValueError: local_md5 = None return dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)
def run(self, tmp=None, task_vars=None): ''' handler for fetch operations ''' if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect try: if self._play_context.check_mode: result['skipped'] = True result[ 'msg'] = 'check mode not (yet) supported for this module' return result source = self._task.args.get('src', None) dest = self._task.args.get('dest', None) flat = boolean(self._task.args.get('flat'), strict=False) fail_on_missing = boolean(self._task.args.get('fail_on_missing'), strict=False) validate_checksum = boolean(self._task.args.get( 'validate_checksum', self._task.args.get('validate_md5', True)), strict=False) # validate source and dest are strings FIXME: use basic.py and module specs if not isinstance(source, string_types): result[ 'msg'] = "Invalid type supplied for source option, it must be a string" if not isinstance(dest, string_types): result[ 'msg'] = "Invalid type supplied for dest option, it must be a string" # validate_md5 is the deprecated way to specify validate_checksum if 'validate_md5' in self._task.args and 'validate_checksum' in self._task.args: result[ 'msg'] = "validate_checksum and validate_md5 cannot both be specified" if 'validate_md5' in self._task.args: display.deprecated( 'Use validate_checksum instead of validate_md5', version='2.8') if source is None or dest is None: result['msg'] = "src and dest are required" if result.get('msg'): result['failed'] = True return result source = self._connection._shell.join_path(source) source = self._remote_expand_user(source) remote_checksum = None if not self._play_context.become: # calculate checksum for the remote file, don't bother if using become as slurp will be used # Force remote_checksum to follow symlinks because fetch always follows symlinks remote_checksum = self._remote_checksum(source, all_vars=task_vars, follow=True) # use slurp if permissions are lacking or privilege escalation is needed remote_data = None if remote_checksum in ('1', '2', None): slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars) if slurpres.get('failed'): if not fail_on_missing and ( slurpres.get('msg').startswith('file not found') or remote_checksum == '1'): result[ 'msg'] = "the remote file does not exist, not transferring, ignored" result['file'] = source result['changed'] = False else: result.update(slurpres) return result else: if slurpres['encoding'] == 'base64': remote_data = base64.b64decode(slurpres['content']) if remote_data is not None: remote_checksum = checksum_s(remote_data) # the source path may have been expanded on the # target system, so we compare it here and use the # expanded version if it's different remote_source = slurpres.get('source') if remote_source and remote_source != source: source = remote_source # calculate the destination name if os.path.sep not in self._connection._shell.join_path('a', ''): source = self._connection._shell._unquote(source) source_local = source.replace('\\', '/') else: source_local = source dest = os.path.expanduser(dest) if flat: if os.path.isdir(to_bytes(dest, errors='surrogate_or_strict') ) and not dest.endswith(os.sep): result[ 'msg'] = "dest is an existing directory, use a trailing slash if you want to fetch src into that directory" result['file'] = dest result['failed'] = True return result if dest.endswith(os.sep): # if the path ends with "/", we'll use the source filename as the # destination filename base = os.path.basename(source_local) dest = os.path.join(dest, base) if not dest.startswith("/"): # if dest does not start with "/", we'll assume a relative path dest = self._loader.path_dwim(dest) else: # files are saved in dest dir, with a subdir for each host, then the filename if 'inventory_hostname' in task_vars: target_name = task_vars['inventory_hostname'] else: target_name = self._play_context.remote_addr dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local) dest = dest.replace("//", "/") if remote_checksum in ('0', '1', '2', '3', '4', '5'): result['changed'] = False result['file'] = source if remote_checksum == '0': result[ 'msg'] = "unable to calculate the checksum of the remote file" elif remote_checksum == '1': result['msg'] = "the remote file does not exist" elif remote_checksum == '2': result['msg'] = "no read permission on remote file" elif remote_checksum == '3': result[ 'msg'] = "remote file is a directory, fetch cannot work on directories" elif remote_checksum == '4': result[ 'msg'] = "python isn't present on the system. Unable to compute checksum" elif remote_checksum == '5': result[ 'msg'] = "stdlib json or simplejson was not found on the remote machine. Only the raw module can work without those installed" # Historically, these don't fail because you may want to transfer # a log file that possibly MAY exist but keep going to fetch other # log files. Today, this is better achieved by adding # ignore_errors or failed_when to the task. Control the behaviour # via fail_when_missing if fail_on_missing: result['failed'] = True del result['changed'] else: result['msg'] += ", not transferring, ignored" return result # calculate checksum for the local file local_checksum = checksum(dest) if remote_checksum != local_checksum: # create the containing directories, if needed makedirs_safe(os.path.dirname(dest)) # fetch the file and check for changes if remote_data is None: self._connection.fetch_file(source, dest) else: try: f = open(to_bytes(dest, errors='surrogate_or_strict'), 'wb') f.write(remote_data) f.close() except (IOError, OSError) as e: raise AnsibleError("Failed to fetch the file: %s" % e) new_checksum = secure_hash(dest) # For backwards compatibility. We'll return None on FIPS enabled systems try: new_md5 = md5(dest) except ValueError: new_md5 = None if validate_checksum and new_checksum != remote_checksum: result.update( dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)) else: result.update({ 'changed': True, 'md5sum': new_md5, 'dest': dest, 'remote_md5sum': None, 'checksum': new_checksum, 'remote_checksum': remote_checksum }) else: # For backwards compatibility. We'll return None on FIPS enabled systems try: local_md5 = md5(dest) except ValueError: local_md5 = None result.update( dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)) finally: self._remove_tmp_path(self._connection._shell.tmpdir) return result
def run(self, tmp=None, task_vars=None): ''' handler for fetch operations ''' if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) try: if self._play_context.check_mode: result['skipped'] = True result[ 'msg'] = 'check mode not (yet) supported for this module' return result flat = boolean(self._task.args.get('flat'), strict=False) fail_on_missing = boolean(self._task.args.get( 'fail_on_missing', True), strict=False) validate_checksum = boolean(self._task.args.get( 'validate_checksum', True), strict=False) # validate source and dest are strings FIXME: use basic.py and module specs source = self._task.args.get('src') if not isinstance(source, string_types): result[ 'msg'] = "Invalid type supplied for source option, it must be a string" dest = self._task.args.get('dest') if not isinstance(dest, string_types): result[ 'msg'] = "Invalid type supplied for dest option, it must be a string" if result.get('msg'): result['failed'] = True return result source = self._connection._shell.join_path(source) source = self._remote_expand_user(source) # calculate checksum for the remote file, don't bother if using # become as slurp will be used Force remote_checksum to follow # symlinks because fetch always follows symlinks remote_checksum = self._remote_checksum(source, all_vars=task_vars, follow=True) # calculate the destination name if os.path.sep not in self._connection._shell.join_path('a', ''): source = self._connection._shell._unquote(source) source_local = source.replace('\\', '/') else: source_local = source dest = os.path.expanduser(dest) if flat: if os.path.isdir(to_bytes(dest, errors='surrogate_or_strict') ) and not dest.endswith(os.sep): result[ 'msg'] = "dest is an existing directory, use a trailing slash if you want to fetch src into that directory" result['file'] = dest result['failed'] = True return result if dest.endswith(os.sep): # if the path ends with "/", we'll use the source filename as the # destination filename base = os.path.basename(source_local) dest = os.path.join(dest, base) if not dest.startswith("/"): # if dest does not start with "/", we'll assume a relative path dest = self._loader.path_dwim(dest) else: # files are saved in dest dir, with a subdir for each host, then the filename if 'inventory_hostname' in task_vars: target_name = task_vars['inventory_hostname'] else: target_name = self._play_context.remote_addr dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local) dest = dest.replace("//", "/") if remote_checksum in REMOTE_CHECKSUM_ERRORS: result['changed'] = False result['file'] = source result['msg'] = REMOTE_CHECKSUM_ERRORS[remote_checksum] # Historically, these don't fail because you may want to transfer # a log file that possibly MAY exist but keep going to fetch other # log files. Today, this is better achieved by adding # ignore_errors or failed_when to the task. Control the behaviour # via fail_when_missing if fail_on_missing: result['failed'] = True del result['changed'] else: result['msg'] += ", not transferring, ignored" return result # calculate checksum for the local file local_checksum = checksum(dest) if remote_checksum != local_checksum: # create the containing directories, if needed makedirs_safe(os.path.dirname(dest)) # fetch the file and check for changes self._connection.fetch_file(source, dest) new_checksum = secure_hash(dest) # For backwards compatibility. We'll return None on FIPS enabled systems try: new_md5 = md5(dest) except ValueError: new_md5 = None if validate_checksum and new_checksum != remote_checksum: result.update( dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)) else: result.update({ 'changed': True, 'md5sum': new_md5, 'dest': dest, 'remote_md5sum': None, 'checksum': new_checksum, 'remote_checksum': remote_checksum }) else: # For backwards compatibility. We'll return None on FIPS enabled systems try: local_md5 = md5(dest) except ValueError: local_md5 = None result.update( dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)) finally: self._remove_tmp_path(self._connection._shell.tmpdir) return result
def run(self, tmp=None, task_vars=None): ''' handler for fetch operations ''' if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect try: if self._play_context.check_mode: result['skipped'] = True result['msg'] = 'check mode not (yet) supported for this module' return result source = self._task.args.get('src', None) dest = self._task.args.get('dest', None) flat = boolean(self._task.args.get('flat'), strict=False) fail_on_missing = boolean(self._task.args.get('fail_on_missing', True), strict=False) validate_checksum = boolean(self._task.args.get('validate_checksum', self._task.args.get('validate_md5', True)), strict=False) # validate source and dest are strings FIXME: use basic.py and module specs if not isinstance(source, string_types): result['msg'] = "Invalid type supplied for source option, it must be a string" if not isinstance(dest, string_types): result['msg'] = "Invalid type supplied for dest option, it must be a string" # validate_md5 is the deprecated way to specify validate_checksum if 'validate_md5' in self._task.args and 'validate_checksum' in self._task.args: result['msg'] = "validate_checksum and validate_md5 cannot both be specified" if 'validate_md5' in self._task.args: display.deprecated('Use validate_checksum instead of validate_md5', version='2.8') if source is None or dest is None: result['msg'] = "src and dest are required" if result.get('msg'): result['failed'] = True return result source = self._connection._shell.join_path(source) source = self._remote_expand_user(source) remote_checksum = None if not self._play_context.become: # calculate checksum for the remote file, don't bother if using become as slurp will be used # Force remote_checksum to follow symlinks because fetch always follows symlinks remote_checksum = self._remote_checksum(source, all_vars=task_vars, follow=True) # use slurp if permissions are lacking or privilege escalation is needed remote_data = None if remote_checksum in ('1', '2', None): slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars) if slurpres.get('failed'): if not fail_on_missing and (slurpres.get('msg').startswith('file not found') or remote_checksum == '1'): result['msg'] = "the remote file does not exist, not transferring, ignored" result['file'] = source result['changed'] = False else: result.update(slurpres) return result else: if slurpres['encoding'] == 'base64': remote_data = base64.b64decode(slurpres['content']) if remote_data is not None: remote_checksum = checksum_s(remote_data) # the source path may have been expanded on the # target system, so we compare it here and use the # expanded version if it's different remote_source = slurpres.get('source') if remote_source and remote_source != source: source = remote_source # calculate the destination name if os.path.sep not in self._connection._shell.join_path('a', ''): source = self._connection._shell._unquote(source) source_local = source.replace('\\', '/') else: source_local = source dest = os.path.expanduser(dest) if flat: if os.path.isdir(to_bytes(dest, errors='surrogate_or_strict')) and not dest.endswith(os.sep): result['msg'] = "dest is an existing directory, use a trailing slash if you want to fetch src into that directory" result['file'] = dest result['failed'] = True return result if dest.endswith(os.sep): # if the path ends with "/", we'll use the source filename as the # destination filename base = os.path.basename(source_local) dest = os.path.join(dest, base) if not dest.startswith("/"): # if dest does not start with "/", we'll assume a relative path dest = self._loader.path_dwim(dest) else: # files are saved in dest dir, with a subdir for each host, then the filename if 'inventory_hostname' in task_vars: target_name = task_vars['inventory_hostname'] else: target_name = self._play_context.remote_addr dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local) dest = dest.replace("//", "/") if remote_checksum in ('0', '1', '2', '3', '4', '5'): result['changed'] = False result['file'] = source if remote_checksum == '0': result['msg'] = "unable to calculate the checksum of the remote file" elif remote_checksum == '1': result['msg'] = "the remote file does not exist" elif remote_checksum == '2': result['msg'] = "no read permission on remote file" elif remote_checksum == '3': result['msg'] = "remote file is a directory, fetch cannot work on directories" elif remote_checksum == '4': result['msg'] = "python isn't present on the system. Unable to compute checksum" elif remote_checksum == '5': result['msg'] = "stdlib json or simplejson was not found on the remote machine. Only the raw module can work without those installed" # Historically, these don't fail because you may want to transfer # a log file that possibly MAY exist but keep going to fetch other # log files. Today, this is better achieved by adding # ignore_errors or failed_when to the task. Control the behaviour # via fail_when_missing if fail_on_missing: result['failed'] = True del result['changed'] else: result['msg'] += ", not transferring, ignored" return result # calculate checksum for the local file local_checksum = checksum(dest) if remote_checksum != local_checksum: # create the containing directories, if needed makedirs_safe(os.path.dirname(dest)) # fetch the file and check for changes if remote_data is None: self._connection.fetch_file(source, dest) else: try: f = open(to_bytes(dest, errors='surrogate_or_strict'), 'wb') f.write(remote_data) f.close() except (IOError, OSError) as e: raise AnsibleError("Failed to fetch the file: %s" % e) new_checksum = secure_hash(dest) # For backwards compatibility. We'll return None on FIPS enabled systems try: new_md5 = md5(dest) except ValueError: new_md5 = None if validate_checksum and new_checksum != remote_checksum: result.update(dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)) else: result.update({'changed': True, 'md5sum': new_md5, 'dest': dest, 'remote_md5sum': None, 'checksum': new_checksum, 'remote_checksum': remote_checksum}) else: # For backwards compatibility. We'll return None on FIPS enabled systems try: local_md5 = md5(dest) except ValueError: local_md5 = None result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)) finally: self._remove_tmp_path(self._connection._shell.tmpdir) return result
def run(self, tmp=None, task_vars=dict()): ''' handler for fetch operations ''' # FIXME: is this even required anymore? #if self.runner.noop_on_check(inject): # return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not (yet) supported for this module')) source = self._task.args.get('src', None) dest = self._task.args.get('dest', None) flat = boolean(self._task.args.get('flat')) fail_on_missing = boolean(self._task.args.get('fail_on_missing')) validate_checksum = boolean(self._task.args.get('validate_checksum', self._task.args.get('validate_md5'))) if 'validate_md5' in self._task.args and 'validate_checksum' in self._task.args: return dict(failed=True, msg="validate_checksum and validate_md5 cannot both be specified") if source is None or dest is None: return dict(failed=True, msg="src and dest are required") source = self._shell.join_path(source) source = self._remote_expand_user(source, tmp) # calculate checksum for the remote file remote_checksum = self._remote_checksum(tmp, source) # use slurp if sudo and permissions are lacking remote_data = None if remote_checksum in ('1', '2') or self._connection_info.sudo: slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), tmp=tmp) if slurpres.get('rc') == 0: if slurpres['encoding'] == 'base64': remote_data = base64.b64decode(slurpres['content']) if remote_data is not None: remote_checksum = checksum_s(remote_data) # the source path may have been expanded on the # target system, so we compare it here and use the # expanded version if it's different remote_source = slurpres.get('source') if remote_source and remote_source != source: source = remote_source else: # FIXME: should raise an error here? the old code did nothing pass # calculate the destination name if os.path.sep not in self._shell.join_path('a', ''): source_local = source.replace('\\', '/') else: source_local = source dest = os.path.expanduser(dest) if flat: if dest.endswith("/"): # if the path ends with "/", we'll use the source filename as the # destination filename base = os.path.basename(source_local) dest = os.path.join(dest, base) if not dest.startswith("/"): # if dest does not start with "/", we'll assume a relative path dest = self._loader.path_dwim(dest) else: # files are saved in dest dir, with a subdir for each host, then the filename dest = "%s/%s/%s" % (self._loader.path_dwim(dest), self._connection_info.remote_addr, source_local) dest = dest.replace("//","/") if remote_checksum in ('0', '1', '2', '3', '4'): # these don't fail because you may want to transfer a log file that possibly MAY exist # but keep going to fetch other log files if remote_checksum == '0': result = dict(msg="unable to calculate the checksum of the remote file", file=source, changed=False) elif remote_checksum == '1': if fail_on_missing: result = dict(failed=True, msg="the remote file does not exist", file=source) else: result = dict(msg="the remote file does not exist, not transferring, ignored", file=source, changed=False) elif remote_checksum == '2': result = dict(msg="no read permission on remote file, not transferring, ignored", file=source, changed=False) elif remote_checksum == '3': result = dict(msg="remote file is a directory, fetch cannot work on directories", file=source, changed=False) elif remote_checksum == '4': result = dict(msg="python isn't present on the system. Unable to compute checksum", file=source, changed=False) return result # calculate checksum for the local file local_checksum = checksum(dest) if remote_checksum != local_checksum: # create the containing directories, if needed if not os.path.isdir(os.path.dirname(dest)): os.makedirs(os.path.dirname(dest)) # fetch the file and check for changes if remote_data is None: self._connection.fetch_file(source, dest) else: f = open(dest, 'w') f.write(remote_data) f.close() new_checksum = secure_hash(dest) # For backwards compatibility. We'll return None on FIPS enabled # systems try: new_md5 = md5(dest) except ValueError: new_md5 = None if validate_checksum and new_checksum != remote_checksum: return dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum) return dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum) else: # For backwards compatibility. We'll return None on FIPS enabled # systems try: local_md5 = md5(dest) except ValueError: local_md5 = None return dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)