def move(self): try: log.debug("Moving '%s' to '%s'", self.config['move_from_remote'], self.config['move_to_remote']) # build cmd cmd = "%s %s %s %s --config=%s" % (cmd_quote(self.rclone_binary_path), 'move', cmd_quote(self.config['move_from_remote']), cmd_quote(self.config['move_to_remote']), cmd_quote(self.rclone_config_path)) extras = self.__extras2string() if len(extras) > 2: cmd += ' %s' % extras excludes = self.__excludes2string() if len(excludes) > 2: cmd += ' %s' % excludes if self.plex.get('enabled'): r = re.compile(r"https?://(www\.)?") rc_url = r.sub('', self.plex['rclone']['url']).strip().strip('/') cmd += ' --rc --rc-addr=%s' % cmd_quote(rc_url) if self.dry_run: cmd += ' --dry-run' # exec log.debug("Using: %s", cmd) process.execute(cmd, logs=True) return True except Exception: log.exception("Exception occurred while moving '%s' to '%s':", self.config['move_from_remote'], self.config['move_to_remote']) return False
def virt_costumize_script(image, script_file, args_str='', log_file=None, verbose=False): # NOTE: cannot specify a constant destination file :( # --copy-in {script_file}:/root/custumie.sh did not worked # LIBGUESTFS_BACKEND=direct , file permissionsss base_name = os.path.basename(script_file) cmd = '/root/' + base_name + ' ' + args_str if verbose: verb = '--verbose' else: verb = '' (r, log) = localsh.run_log(("LIBGUESTFS_BACKEND=direct " "virt-customize {verbose} --add {image} " "--memsize 1024 " "--copy-in {script_file}:/root/ " "--chmod 755:/root/{base_name} " "--run-command {cmd} " "--selinux-relabel ").format( image=cmd_quote(image), script_file=cmd_quote(script_file), base_name=cmd_quote(base_name), cmd=cmd_quote(cmd), verbose=verb)) print(log) if log_file: f = open(log_file, "w") f.write(log) f.close() if r: raise Exception("virt_costumize Failed")
def sync(self, **kwargs): if not self.instance_id: log.error("Sync was called, but no instance_id was found, aborting...") return False, None, None kwargs.update(self.kwargs) # create RcloneSyncer object rclone = RcloneSyncer(self.sync_from_config, self.sync_to_config, **kwargs) # start sync log.info("Starting sync for instance: %r", self.instance_id) resp, delayed_check, delayed_trigger = rclone.sync(self._wrap_command) log.info("Finished syncing for instance: %r", self.instance_id) # copy rclone.conf back from instance (in-case refresh tokens were used) (Copy seems not to be working atm) # cmd = "%s --region=%s cp %s:/root/.config/rclone/rclone.conf %s" % ( # cmd_quote(self.tool_path), cmd_quote(self.region), cmd_quote(self.instance_id), # cmd_quote(os.path.dirname(kwargs['rclone_config']))) # Use exec cat > rclone config until cp is resolved cmd = "%s --region=%s exec %s cat /root/.config/rclone/rclone.conf > %s" % ( cmd_quote(self.tool_path), cmd_quote(self.region), cmd_quote(self.instance_id), cmd_quote(kwargs['rclone_config'])) log.debug("Using: %s", cmd) log.debug("Copying rclone config from instance %r to: %r", self.instance_id, kwargs['rclone_config']) config_resp = process.popen(cmd, shell=True) if config_resp is None or len(config_resp) >= 2: log.error("Unexpected response while copying rclone config from instance: %s", config_resp) else: log.info("Copied rclone.conf from instance") return resp, delayed_check, delayed_trigger
def upload(self, callback): try: log.debug("Uploading '%s' to '%s'", self.config['upload_folder'], self.config['upload_remote']) # build cmd cmd = "rclone move %s %s" % ( cmd_quote(self.config['upload_folder']), cmd_quote(self.config['upload_remote'])) extras = self.__extras2string() if len(extras) > 2: cmd += ' %s' % extras excludes = self.__excludes2string() if len(excludes) > 2: cmd += ' %s' % excludes if self.dry_run: cmd += ' --dry-run' # exec log.debug("Using: %s", cmd) process.execute(cmd, callback) return True except Exception: log.exception( "Exception occurred while uploading '%s' to remote: %s", self.config['upload_folder'], self.name) return False
def get_reproduction_cmd(self, verb, jid): """Get a command line to reproduce this stage with the proper environment.""" # Define the base env command get_env_cmd = 'catkin {} --get-env {}'.format(verb, jid) # Add additional env args env_overrides_formatted = ' '.join([ '{}={}'.format(k, cmd_quote(v)) for k, v in self.env_overrides.items() ]) # Define the actual command to reproduce cmd_str = ' '.join([cmd_quote(t) for t in self.async_execute_process_kwargs['cmd']]) # Define the command to run the subcommand env_cmd = 'catkin env -si {} {}'.format( env_overrides_formatted, cmd_str) # Return the full command return 'cd {}; {} | {}; cd -'.format( self.async_execute_process_kwargs['cwd'], get_env_cmd, env_cmd)
def show_sections(config): if os.name == "nt": final_cmd = '""%s" --list"' % config["PLEX_SCANNER"] else: cmd = "export LD_LIBRARY_PATH=" + config["PLEX_LD_LIBRARY_PATH"] + ";" if not config["USE_DOCKER"]: cmd += "export PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR=" + config["PLEX_SUPPORT_DIR"] + ";" cmd += config["PLEX_SCANNER"] + " --list" if config["USE_DOCKER"]: final_cmd = "docker exec -u %s -it %s bash -c %s" % ( cmd_quote(config["PLEX_USER"]), cmd_quote(config["DOCKER_NAME"]), cmd_quote(cmd), ) elif config["USE_SUDO"]: final_cmd = 'sudo -u %s bash -c "%s"' % (config["PLEX_USER"], cmd) else: final_cmd = cmd logger.info("Using Plex Scanner") print("\n") print("Plex Sections:") print("==============") logger.debug(final_cmd) os.system(final_cmd)
def _delete(self, filename): commandline = "lftp -c \"source %s; cd %s; rm %s\"" % (cmd_quote( self.tempname), cmd_quote(self.remote_path), cmd_quote(filename)) log.Debug("CMD: %s" % commandline) _, l, e = self.subprocess_popen(commandline) log.Debug("STDERR:\n" "%s" % (e)) log.Debug("STDOUT:\n" "%s" % (l))
def move(self): try: log.debug("Moving '%s' to '%s'", self.config['move_from_remote'], self.config['move_to_remote']) # build cmd cmd = "%s %s %s %s --config=%s" % (cmd_quote(self.rclone_binary_path), 'move', cmd_quote(self.config['move_from_remote']), cmd_quote(self.config['move_to_remote']), cmd_quote(self.rclone_config_path)) extras = self.__extras2string() if len(extras) > 2: cmd += ' %s' % extras excludes = self.__excludes2string() if len(excludes) > 2: cmd += ' %s' % excludes if self.dry_run: cmd += ' --dry-run' if self.use_rc: cmd += ' --rc --rc-addr=%s' % cmd_quote('localhost:7949') # exec log.debug("Using: %s", cmd) process.execute(cmd, logs=True) return True except Exception: log.exception("Exception occurred while moving '%s' to '%s':", self.config['move_from_remote'], self.config['move_to_remote']) return False
def build_cmd(config, section, scan_path, scan_op): # build plex scanner command logger.info("Building Plex Scan Command") if os.name == 'nt': final_cmd = '""%s" --scan --refresh --section %s --directory "%s""' \ % (config['PLEX_SCANNER'], str(section), scan_path) else: cmd = 'export LD_LIBRARY_PATH=' + config['PLEX_LD_LIBRARY_PATH'] + ';' if not config['USE_DOCKER']: cmd += 'export PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR=' + config[ 'PLEX_SUPPORT_DIR'] + ';' if scan_op == 'scan': cmd += config[ 'PLEX_SCANNER'] + ' --scan --refresh --section ' + str( section) + ' --directory ' + cmd_quote(scan_path) elif scan_op == 'analyze': media_id = get_media_id(config, scan_path) cmd += config['PLEX_SCANNER'] + ' --analyze -o ' + str(media_id) elif scan_op == 'deep': media_id = get_media_id(config, scan_path) cmd += config['PLEX_SCANNER'] + ' --analyze-deeply -o ' + str( media_id) if config['USE_DOCKER']: final_cmd = 'docker exec -i %s bash -c %s' % (cmd_quote( config['DOCKER_NAME']), cmd_quote(cmd)) elif config['USE_SUDO']: final_cmd = 'sudo -u %s bash -c %s' % (config['PLEX_USER'], cmd_quote(cmd)) else: final_cmd = cmd return final_cmd
def _get(self, remote_filename, local_path): commandline = "lftp -c \"source %s; get %s -o %s\"" % ( cmd_quote(self.tempname), cmd_quote(self.remote_path) + remote_filename, cmd_quote(local_path.name)) log.Debug("CMD: %s" % commandline) _, l, e = self.subprocess_popen(commandline) log.Debug("STDERR:\n" "%s" % (e)) log.Debug("STDOUT:\n" "%s" % (l))
def setup(self, **kwargs): if not self.instance_id: log.error("Setup was called, but no instance_id was found, aborting...") return False if 'rclone_config' not in kwargs: log.error("Setup was called, but no rclone_config was found, aborting...") self.destroy() return False # install unzip cmd_exec = "apt-get -qq update && apt-get -y -qq install unzip && which unzip" cmd = "%s --region=%s exec %s %s" % ( cmd_quote(self.tool_path), cmd_quote(self.region), cmd_quote(self.instance_id), cmd_quote(cmd_exec)) log.debug("Using: %s", cmd) log.debug("Installing rclone to instance: %r", self.instance_id) resp = process.popen(cmd) if not resp or '/usr/bin/unzip' not in resp.lower(): log.error("Unexpected response while installing unzip: %s", resp) self.destroy() return False log.info("Installed unzip") # install rclone to instance cmd_exec = "cd ~ && curl -sO https://downloads.rclone.org/rclone-current-linux-amd64.zip && " \ "unzip -oq rclone-current-linux-amd64.zip && cd rclone-*-linux-amd64 && " \ "cp -rf rclone /usr/bin/ && cd ~ && rm -rf rclone-* && chown root:root /usr/bin/rclone && " \ "chmod 755 /usr/bin/rclone && mkdir -p /root/.config/rclone && which rclone" cmd = "%s --region=%s exec %s %s" % ( cmd_quote(self.tool_path), cmd_quote(self.region), cmd_quote(self.instance_id), cmd_quote(cmd_exec)) log.debug("Using: %s", cmd) log.debug("Installing rclone to instance: %r", self.instance_id) resp = process.popen(cmd) if not resp or '/usr/bin/rclone' not in resp.lower(): log.error("Unexpected response while installing rclone: %s", resp) self.destroy() return False log.info("Installed rclone") # copy rclone.conf to instance cmd = "%s --region=%s cp %s %s:/root/.config/rclone/" % ( cmd_quote(self.tool_path), cmd_quote(self.region), cmd_quote(kwargs['rclone_config']), cmd_quote(self.instance_id)) log.debug("Using: %s", cmd) log.debug("Copying rclone config %r to instance: %r", kwargs['rclone_config'], self.instance_id) resp = process.popen(cmd) if resp is None or len(resp) >= 2: log.error("Unexpected response while copying rclone config: %s", resp) self.destroy() return False log.info("Copied across rclone.conf") log.info("Successfully setup instance: %r", self.instance_id) return True
def _put(self, source_path, remote_filename): commandline = "lftp -c \"source %s; mkdir -p %s; put %s -o %s\"" % ( self.tempname, cmd_quote( self.remote_path), cmd_quote(source_path.name), cmd_quote(self.remote_path) + remote_filename) log.Debug("CMD: %s" % commandline) s, l, e = self.subprocess_popen(commandline) log.Debug("STATUS: %s" % s) log.Debug("STDERR:\n" "%s" % (e)) log.Debug("STDOUT:\n" "%s" % (l))
def _get(self, remote_filename, local_path): if isinstance(remote_filename, b"".__class__): remote_filename = util.fsdecode(remote_filename) commandline = u"lftp -c \"source %s; get %s -o %s\"" % ( cmd_quote(self.tempname), cmd_quote(self.remote_path) + remote_filename, cmd_quote(local_path.uc_name)) log.Debug(u"CMD: %s" % commandline) _, l, e = self.subprocess_popen(commandline) log.Debug(u"STDERR:\n" u"%s" % (e)) log.Debug(u"STDOUT:\n" u"%s" % (l))
def get_jobs(user="******", queue="*"): """Get list of dicts of job info""" user = cmd_quote(user) queue = cmd_quote(queue) if FAKE: jobs_text = open_file("test/jobs.txt") else: jobs_text = run_command(["qstat", "-u", user, "-q", queue]) jobs = process_jobs_xml(jobs_text) return jobs
def upload_files(self, file_defs): print "Uploading files ... " for curr_file_def in file_defs: full_local_filepath, dest_filename, full_remote_dir_path, num_files, overwrite = curr_file_def # Find all the files that match our full_local_filepath (which may contain pattern) local_files = sorted(glob.glob(full_local_filepath)) try: if len(local_files) > 0: # Ensure the remote dir exits # TODO: How expensive are calls to mkdir if dir already exists, # better to check dir exists first? subprocess.check_call("ssh " + remote_account + " 'mkdir -p " + full_remote_dir_path + "'", shell=True) curr_file_num = 1 for curr_file in local_files: if (num_files is not 0) and (curr_file_num > num_files): break # Deal with the case where we want to alter the destination filename curr_src_full_filename = os.path.basename(curr_file) if dest_filename is "": full_remote_filepath = cmd_quote(os.path.join(full_remote_dir_path, curr_src_full_filename)) else: filename, extension = os.path.splitext(curr_src_full_filename) full_remote_filepath = cmd_quote(os.path.join(full_remote_dir_path, dest_filename + extension)) # Deal with the case where we do not want to overwrite the dest file file_num = 2 if overwrite is False: full_remote_filename = os.path.basename(full_remote_filepath) while subprocess.call(['ssh', remote_account, 'test -e ' + full_remote_filepath]) == 0: filename_no_ext, filename_ext = os.path.splitext(full_remote_filename) full_remote_filepath = cmd_quote(os.path.join(full_remote_dir_path, filename_no_ext + "_" + str( file_num) + filename_ext)) file_num += 1 subprocess.check_call("scp " + curr_file + " " + remote_account + ":" + full_remote_filepath, shell=True) curr_file_num += 1 except subprocess.CalledProcessError as e: print "Error uploading files: ", e.returncode raise print "... upload finished."
def _put(self, source_path, remote_filename): if isinstance(remote_filename, b"".__class__): remote_filename = util.fsdecode(remote_filename) commandline = u"lftp -c \"source %s; mkdir -p %s; put %s -o %s\"" % ( self.tempname, cmd_quote( self.remote_path), cmd_quote(source_path.uc_name), cmd_quote(self.remote_path) + util.fsdecode(remote_filename)) log.Debug(u"CMD: %s" % commandline) s, l, e = self.subprocess_popen(commandline) log.Debug(u"STATUS: %s" % s) log.Debug(u"STDERR:\n" u"%s" % (e)) log.Debug(u"STDOUT:\n" u"%s" % (l))
def _delete(self, filename): commandline = "lftp -c \"source %s; cd %s; rm %s\"" % ( cmd_quote(self.tempname), cmd_quote(self.remote_path), cmd_quote(filename) ) log.Debug("CMD: %s" % commandline) _, l, e = self.subprocess_popen(commandline) log.Debug("STDERR:\n" "%s" % (e)) log.Debug("STDOUT:\n" "%s" % (l))
def upload_files(self, file_defs): print "Uploading files ... " for curr_file_def in file_defs: full_local_filepath, dest_filename, full_remote_dir_path, num_files, overwrite = curr_file_def # Find all the files that match our full_local_filepath (which may contain pattern) local_files = sorted(glob.glob(full_local_filepath)) try: if len(local_files) > 0: # Ensure the remote dir exits # TODO: How expensive are calls to mkdir if dir already exists, # better to check dir exists first? subprocess.check_call("ssh " + remote_account + " 'mkdir -p " + full_remote_dir_path + "'", shell=True) curr_file_num = 1 for curr_file in local_files: if (num_files is not 0) and (curr_file_num > num_files): break # Deal with the case where we want to alter the destination filename curr_src_full_filename = os.path.basename(curr_file) if dest_filename is "": full_remote_filepath = cmd_quote(os.path.join(full_remote_dir_path, curr_src_full_filename)) else: filename, extension = os.path.splitext(curr_src_full_filename) full_remote_filepath = cmd_quote(os.path.join(full_remote_dir_path, dest_filename + extension)) # Deal with the case where we do not want to overwrite the dest file file_num = 2 if overwrite is False: full_remote_filename = os.path.basename(full_remote_filepath) while subprocess.call(['ssh', remote_account, 'test -e ' + full_remote_filepath]) == 0: filename_no_ext, filename_ext = os.path.splitext(full_remote_filename) full_remote_filepath = cmd_quote(os.path.join(full_remote_dir_path, filename_no_ext + "_" + str(file_num) + filename_ext)) file_num += 1 subprocess.check_call("scp " + curr_file + " " + remote_account + ":" + full_remote_filepath, shell=True) curr_file_num += 1 except subprocess.CalledProcessError as e: print "Error uploading files: ", e.returncode raise print "... upload finished."
def __init__(self, instance, plugin): super(DaemonDevice, self).__init__(instance, plugin) self.namePattern = k_psSearch_daemon( processname=self.props['processName'], args=self.props['startArgs']) self.onCmd = k_daemonStartCmd( processname=cmd_quote(self.props['processName']), apppath=cmd_quote(self.props['applicationPath']), args=cmd_quote(self.props['startArgs'])) self.defaultOffCmd = k_daemonStopCmd( processname=cmd_quote(self.props['processName']))
def _get(self, remote_filename, local_path): commandline = "lftp -c \"source %s; get %s -o %s\"" % ( cmd_quote(self.tempname), cmd_quote(self.remote_path) + remote_filename, cmd_quote(local_path.name) ) log.Debug("CMD: %s" % commandline) _, l, e = self.subprocess_popen(commandline) log.Debug("STDERR:\n" "%s" % (e)) log.Debug("STDOUT:\n" "%s" % (l))
def analyze_item(config, scan_path): if not os.path.exists(config['PLEX_DATABASE_PATH']): logger.warning( "Could not analyze of '%s' because Plex database could not be found.", scan_path) return # get files metadata_item_id metadata_item_ids = get_file_metadata_ids(config, scan_path) if metadata_item_ids is None or not len(metadata_item_ids): logger.warning( "Aborting analysis of '%s' because could not find any 'metadata_item_id' for it.", scan_path) return metadata_item_id = ','.join(str(x) for x in metadata_item_ids) # build Plex analyze command analyze_type = 'analyze-deeply' if config['PLEX_ANALYZE_TYPE'].lower( ) == 'deep' else 'analyze' if os.name == 'nt': final_cmd = '"%s" --%s --item %s' % (config['PLEX_SCANNER'], analyze_type, metadata_item_id) else: cmd = 'export LD_LIBRARY_PATH=' + config['PLEX_LD_LIBRARY_PATH'] + ';' if not config['USE_DOCKER']: cmd += 'export PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR=' + config[ 'PLEX_SUPPORT_DIR'] + ';' cmd += config[ 'PLEX_SCANNER'] + ' --' + analyze_type + ' --item ' + metadata_item_id if config['USE_DOCKER']: final_cmd = 'docker exec -u %s -i %s bash -c %s' % \ (cmd_quote(config['PLEX_USER']), cmd_quote(config['DOCKER_NAME']), cmd_quote(cmd)) elif config['USE_SUDO']: final_cmd = 'sudo -u %s bash -c %s' % (config['PLEX_USER'], cmd_quote(cmd)) else: final_cmd = cmd # begin analysis logger.debug( "Starting %s analysis of 'metadata_item': %s", 'deep' if config['PLEX_ANALYZE_TYPE'].lower() == 'deep' else 'basic', metadata_item_id) logger.debug(final_cmd) if os.name == 'nt': utils.run_command(final_cmd) else: utils.run_command(final_cmd.encode("utf-8")) logger.info( "Finished %s analysis of 'metadata_item': %s", 'deep' if config['PLEX_ANALYZE_TYPE'].lower() == 'deep' else 'basic', metadata_item_id)
def _put(self, source_path, remote_filename): commandline = "lftp -c \"source %s; mkdir -p %s; put %s -o %s\"" % ( self.tempname, cmd_quote(self.remote_path), cmd_quote(source_path.name), cmd_quote(self.remote_path) + remote_filename ) log.Debug("CMD: %s" % commandline) s, l, e = self.subprocess_popen(commandline) log.Debug("STATUS: %s" % s) log.Debug("STDERR:\n" "%s" % (e)) log.Debug("STDOUT:\n" "%s" % (l))
def analyze_item(config, scan_path): if not os.path.exists(config["PLEX_DATABASE_PATH"]): logger.warning("Could not analyze of '%s' because Plex database could not be found.", scan_path) return # get files metadata_item_id metadata_item_ids = get_file_metadata_ids(config, scan_path) if metadata_item_ids is None or not metadata_item_ids: logger.warning( "Aborting analysis of '%s' because could not find any 'metadata_item_id' for it.", scan_path, ) return metadata_item_id = ",".join(str(x) for x in metadata_item_ids) # build Plex analyze command analyze_type = "analyze-deeply" if config["PLEX_ANALYZE_TYPE"].lower() == "deep" else "analyze" if os.name == "nt": final_cmd = '"%s" --%s --item %s' % (config["PLEX_SCANNER"], analyze_type, metadata_item_id) else: cmd = "export LD_LIBRARY_PATH=" + config["PLEX_LD_LIBRARY_PATH"] + ";" if not config["USE_DOCKER"]: cmd += "export PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR=" + config["PLEX_SUPPORT_DIR"] + ";" cmd += config["PLEX_SCANNER"] + " --" + analyze_type + " --item " + metadata_item_id if config["USE_DOCKER"]: final_cmd = "docker exec -u %s -i %s bash -c %s" % ( cmd_quote(config["PLEX_USER"]), cmd_quote(config["DOCKER_NAME"]), cmd_quote(cmd), ) elif config["USE_SUDO"]: final_cmd = "sudo -u %s bash -c %s" % (config["PLEX_USER"], cmd_quote(cmd)) else: final_cmd = cmd # begin analysis logger.debug( "Starting %s analysis of 'metadata_item': %s", "deep" if config["PLEX_ANALYZE_TYPE"].lower() == "deep" else "basic", metadata_item_id, ) logger.debug(final_cmd) utils.run_command(final_cmd.encode("utf-8")) logger.info( "Finished %s analysis of 'metadata_item': %s", "deep" if config["PLEX_ANALYZE_TYPE"].lower() == "deep" else "basic", metadata_item_id, )
def __excludes2string(self): return ' '.join( "--exclude=%s" % ( cmd_quote(glob.escape(value) if value.startswith(os.path.sep) else value) if isinstance(value, str) else value) for value in self.config['rclone_excludes']).replace('=None', '').strip()
def bindings_sanity_check(bindings, data_dir, check_mode): """Performs sanity-checking on the bindings list and related arguments. Return: <bindings> <error>""" # bindings is a list of the following: # { # device: [REQUIRED] # encryption_password: a password # encryption_key: /data_dir/filename # encryption_key_src: /path/to/file/on/controlnode # slot: 1 (default) # state: present (default) | absent # password_temporary: no (default) # threshold: 1 (default) # servers: [] (default) # } if not bindings: return None, {"msg": "No devices set"} for idx, binding in enumerate(bindings): # Set up the state. if "state" not in binding: bindings[idx]["state"] = "present" else: if binding["state"] not in ["present", "absent"]: errmsg = "state must be present or absent" return None, {"msg": errmsg} # Make sure we have the required information for the binding. if "device" not in binding: errmsg = "Each binding must have a device set" return None, {"msg": errmsg} # When running under check mode, encryption_key is not used, which means we # also do not need to have data_dir defined. if ("encryption_key" in binding or "encryption_key_src" in binding) and not check_mode: if not data_dir: return None, {"msg": "data_dir needs to be defined"} if "encryption_key" in binding: basefile = os.path.basename(binding["encryption_key"]) else: basefile = os.path.basename(binding["encryption_key_src"]) keyfile = os.path.join(data_dir, basefile) bindings[idx]["encryption_key"] = cmd_quote(keyfile) # The defaults for the remaining binding attributes. binding_defaults = { "slot": 1, "threshold": 1, "password_temporary": False, "servers": [], } for attr in binding_defaults: if attr not in bindings[idx]: bindings[idx][attr] = binding_defaults[attr] return bindings, None
def execute_command( cmd, cwd=None, capture=False, env=None ): # type: (t.List[str], t.Optional[str], bool, t.Optional[t.Dict[str, str]]) -> None """Execute the specified command.""" log('Execute command: %s' % ' '.join(cmd_quote(c) for c in cmd), verbosity=1) cmd_bytes = [to_bytes(c) for c in cmd] if capture: stdout = subprocess.PIPE stderr = subprocess.PIPE else: stdout = None stderr = None cwd_bytes = to_optional_bytes(cwd) process = subprocess.Popen(cmd_bytes, cwd=cwd_bytes, stdin=devnull(), stdout=stdout, stderr=stderr, env=env) # pylint: disable=consider-using-with stdout_bytes, stderr_bytes = process.communicate() stdout_text = to_optional_text(stdout_bytes) or u'' stderr_text = to_optional_text(stderr_bytes) or u'' if process.returncode != 0: raise SubprocessError(cmd, process.returncode, stdout_text, stderr_text)
def _list(self): # Do a long listing to avoid connection reset # remote_dir = urllib.unquote(self.parsed_url.path.lstrip('/')).rstrip() remote_dir = urllib.parse.unquote(self.parsed_url.path) # print remote_dir quoted_path = cmd_quote(self.remote_path) # failing to cd into the folder might be because it was not created already commandline = u"lftp -c \"source %s; ( cd %s && ls ) || ( mkdir -p %s && cd %s && ls )\"" % ( cmd_quote(self.tempname), quoted_path, quoted_path, quoted_path) log.Debug(u"CMD: %s" % commandline) _, l, e = self.subprocess_popen(commandline) log.Debug(u"STDERR:\n" u"%s" % (e)) log.Debug(u"STDOUT:\n" u"%s" % (l)) # Look for our files as the last element of a long list line return [x.split()[-1] for x in l.split(b'\n') if x]
def ffmpeg_pipe_stream(stream_url): global PLEX_FFMPEG_PATH, PLEX_BUFFER_SIZE pipe_cmd = "%s -re -i %s " \ "-codec copy " \ "-nostats " \ "-loglevel 0 " \ "-bsf:v h264_mp4toannexb " \ "-f mpegts " \ "-tune zerolatency " \ "pipe:1" % (PLEX_FFMPEG_PATH, cmd_quote(stream_url)) p = subprocess.Popen(shlex.split(pipe_cmd), stdout=subprocess.PIPE, bufsize=-1) try: pipes = [p.stdout] while pipes: ready, _, _ = select(pipes, [], []) for pipe in ready: data = pipe.read(PLEX_BUFFER_SIZE << 10) if data: yield data else: pipes.remove(pipe) except Exception: pass except GeneratorExit: pass try: p.terminate() except Exception: pass return
def do_rabbit_addusers(cname): self = facility.get_component(cname) pwd = cmd_quote(util.get_keymgr()(self.name, 'openstack')) localsh.run("""rabbitmqctl add_user openstack {passwd} || rabbitmqctl change_password openstack {passwd} && rabbitmqctl set_permissions -p / openstack ".*" ".*" ".*" """.format(passwd=pwd))
def __init__(self, cmd, status=0, stdout=None, stderr=None, runtime=None): """ :type cmd: list[str] :type status: int :type stdout: str | None :type stderr: str | None :type runtime: float | None """ message = 'Command "%s" returned exit status %s.\n' % (' '.join( cmd_quote(c) for c in cmd), status) if stderr: message += '>>> Standard Error\n' message += '%s%s\n' % (stderr.strip(), Display.clear) if stdout: message += '>>> Standard Output\n' message += '%s%s\n' % (stdout.strip(), Display.clear) message = message.strip() super(SubprocessError, self).__init__(message) self.cmd = cmd self.message = message self.status = status self.stdout = stdout self.stderr = stderr self.runtime = runtime
def delete_key(self): """Delete the server's public key from remote host. For example:: wrapper = RemoteWrapper(hostname, username) with wrapper.connect(password, public_key_filename): wrapper.delete_key() """ if self._public_key_filename is None: self._public_key_filename = os.path.expanduser('~/.ssh/id_rsa.pub') if self._client is None: raise ValueError( 'Wrapper must be connected before delete_key is called') with open(self._public_key_filename, 'rt') as f: key = f.read().strip() self.chdir('/tmp') filename = 'django-remote-submission-{}'.format(uuid.uuid4) with self.open(filename, 'wt') as f: program = textwrap.dedent('''\ sed -i.bak -e /{key}/d $HOME/.ssh/authorized_keys '''.format(key=cmd_quote(key.replace('/', '\/')))) f.write(program) args = [ 'bash', '/tmp/' + filename, ] self.exec_command(args, '/')
def get_size(path, excludes=None): try: cmd = "du -s --block-size=1G" if excludes: for item in excludes: cmd += ' --exclude=%s' % cmd_quote(item) cmd += ' %s | cut -f1' % cmd_quote(path) log.debug("Using: %s", cmd) # get size proc = os.popen(cmd) data = proc.read().strip("\n") proc.close() return int(data) if data.isdigit() else 0 except Exception: log.exception("Exception getting size of %r: ", path) return 0
def format_env_dict(environ): """Format an environment dict for printing to console similarly to `typeset` builtin.""" return '\n'.join([ 'typeset -x {}={}'.format(k, cmd_quote(v)) for k, v in environ.items() ])
def __extras2string(self): return ' '.join( "%s=%s" % (key, cmd_quote(value) if isinstance(value, str) else value) for (key, value) in self.rclone_extras.items()).replace('=None', '').strip()
def format_usage(program_name, description, commands=None, options=()): """ Construct the usage text. Parameters ---------- program_name : str Usually the name of the python file that contains the experiment. description : str description of this experiment (usually the docstring). commands : dict[str, func] Dictionary of supported commands. Each entry should be a tuple of (name, function). options : list[sacred.commandline_options.CommandLineOption] A list of all supported commandline options. Returns ------- str The complete formatted usage text for this experiment. It adheres to the structure required by ``docopt``. """ usage = USAGE_TEMPLATE.format( program_name=cmd_quote(program_name), description=description.strip() if description else '', options=_format_options_usage(options), arguments=_format_arguments_usage(options), commands=_format_command_usage(commands) ) return usage
def __parse_argument(self, argument): long_option_match = self.long_option_pattern.match(argument) short_option_match = self.short_option_pattern.match(argument) if long_option_match: groupdict = long_option_match.groupdict() return '--{option}={value}'.format( option=groupdict['option'], value=cmd_quote(groupdict['value']), ) elif short_option_match: groupdict = short_option_match.groupdict() return '-{option}={value}'.format( option=groupdict['option'], value=cmd_quote(groupdict['value']), ) else: return cmd_quote(argument)
def print_creds(environment_overrides): exports = [] for var, value in environment_overrides.items(): if value is not None: exports.append("export {}={}".format(var, cmd_quote(value))) else: exports.append("unset {}".format(var)) print("\n".join(exports))
def _list(self): # Do a long listing to avoid connection reset # remote_dir = urllib.unquote(self.parsed_url.path.lstrip('/')).rstrip() remote_dir = urllib.unquote(self.parsed_url.path) # print remote_dir quoted_path = cmd_quote(self.remote_path) # failing to cd into the folder might be because it was not created already commandline = "lftp -c \"source %s; ( cd %s && ls ) || ( mkdir -p %s && cd %s && ls )\"" % ( cmd_quote(self.tempname), quoted_path, quoted_path, quoted_path ) log.Debug("CMD: %s" % commandline) _, l, e = self.subprocess_popen(commandline) log.Debug("STDERR:\n" "%s" % (e)) log.Debug("STDOUT:\n" "%s" % (l)) # Look for our files as the last element of a long list line return [x.split()[-1] for x in l.split('\n') if x]
def convert(fname, pages=None): try: # attempt to find a pdftotext binary cmd = "where" if platform.system() == "Windows" else "which" cmd = subprocess.check_output(cmd + ' pdftotext', shell=True) cmd = cmd.decode('utf8').strip() if not cmd: raise EnvironmentError("pdftotext not found") return subprocess.check_output(' '.join([cmd, cmd_quote(fname), '-']), shell=True) except (EnvironmentError, subprocess.CalledProcessError): #logging.warning("pdftotext not found, defaulting to pdfminer.") return convert_miner(fname, pages=pages)
def expand_one_verb_alias(sysargs, verb_aliases, used_aliases): """Iterate through sysargs looking for expandable verb aliases. When a verb alias is found, sysargs is modified to effectively expand the alias. The alias is removed from verb_aliases and added to used_aliases. After finding and expanding an alias, this function returns True. If no alias is found to be expanded, this function returns False. """ cmd = os.path.basename(sys.argv[0]) for index, arg in enumerate(sysargs): if arg.startswith('-'): # Not a verb, continue through the arguments continue if arg in used_aliases: print(fmt( "@!@{gf}==>@| Expanding alias '@!@{yf}" + arg + "@|' was previously expanded, ignoring this time to prevent infinite recursion." )) if arg in verb_aliases: before = [] if index == 0 else sysargs[:index - 1] after = [] if index == len(sysargs) else sysargs[index + 1:] sysargs[:] = before + verb_aliases[arg] + after print(fmt( "@!@{gf}==>@| Expanding alias " "'@!@{yf}{alias}@|' " "from '@{yf}{before} @!{alias}@{boldoff}{after}@|' " "to '@{yf}{before} @!{expansion}@{boldoff}{after}@|'" ).format( alias=arg, expansion=' '.join([cmd_quote(aarg) for aarg in verb_aliases[arg]]), before=' '.join([cmd] + before), after=(' '.join([''] + after) if after else '') )) # Prevent the alias from being used again, to prevent infinite recursion used_aliases.append(arg) del verb_aliases[arg] # Return True since one has been found return True # Return False since no verb alias was found return False
def command(args, extra_args): # noqa: C901 FIXME!!! stack = "parallelcluster-" + args.cluster_name config = cfnconfig.ParallelClusterConfig(args) if args.command in config.aliases: config_command = config.aliases[args.command] else: config_command = "ssh {CFN_USER}@{MASTER_IP} {ARGS}" cfn = boto3.client( "cloudformation", region_name=config.region, aws_access_key_id=config.aws_access_key_id, aws_secret_access_key=config.aws_secret_access_key, ) try: stack_result = cfn.describe_stacks(StackName=stack).get("Stacks")[0] status = stack_result.get("StackStatus") valid_status = ["CREATE_COMPLETE", "UPDATE_COMPLETE", "UPDATE_ROLLBACK_COMPLETE"] invalid_status = ["DELETE_COMPLETE", "DELETE_IN_PROGRESS"] if status in invalid_status: LOGGER.info("Stack status: %s. Cannot SSH while in %s", status, " or ".join(invalid_status)) sys.exit(1) elif status in valid_status: outputs = stack_result.get("Outputs") username = _get_output_value(outputs, "ClusterUser") ip = ( _get_output_value(outputs, "MasterPublicIP") if _get_output_value(outputs, "MasterPublicIP") else _get_output_value(outputs, "MasterPrivateIP") ) if not username: LOGGER.info("Failed to get cluster %s username.", args.cluster_name) sys.exit(1) if not ip: LOGGER.info("Failed to get cluster %s ip.", args.cluster_name) sys.exit(1) else: # Stack is in CREATING, CREATED_FAILED, or ROLLBACK_COMPLETE but MasterServer is running ip = _get_master_server_ip(stack, config) template = cfn.get_template(StackName=stack) mappings = template.get("TemplateBody").get("Mappings").get("OSFeatures") base_os = _get_param_value(stack_result.get("Parameters"), "BaseOS") username = mappings.get(base_os).get("User") try: from shlex import quote as cmd_quote except ImportError: from pipes import quote as cmd_quote # build command cmd = config_command.format( CFN_USER=username, MASTER_IP=ip, ARGS=" ".join(cmd_quote(str(e)) for e in extra_args) ) # run command if not args.dryrun: os.system(cmd) else: LOGGER.info(cmd) except ClientError as e: LOGGER.critical(e.response.get("Error").get("Message")) sys.stdout.flush() sys.exit(1) except KeyboardInterrupt: LOGGER.info("\nExiting...") sys.exit(0)
def get_resultspace_environment(result_space_path, base_env=None, quiet=False, cached=True, strict=True): """Get the environemt variables which result from sourcing another catkin workspace's setup files as the string output of `cmake -E environment`. This cmake command is used to be as portable as possible. :param result_space_path: path to a Catkin result-space whose environment should be loaded, ``str`` :type result_space_path: str :param quiet: don't throw exceptions, ``bool`` :type quiet: bool :param cached: use the cached environment :type cached: bool :param strict: require the ``.catkin`` file exists in the resultspace :type strict: bool :returns: a dictionary of environment variables and their values """ # Set bae environment to the current environment if base_env is None: base_env = dict(os.environ) # Get the MD5 checksums for the current env hooks # TODO: the env hooks path should be defined somewhere env_hooks_path = os.path.join(result_space_path, 'etc', 'catkin', 'profile.d') if os.path.exists(env_hooks_path): env_hooks = [ md5(open(os.path.join(env_hooks_path, path)).read().encode('utf-8')).hexdigest() for path in os.listdir(env_hooks_path)] else: env_hooks = [] # Check the cache first, if desired if cached and result_space_path in _resultspace_env_cache: (cached_base_env, cached_env_hooks, result_env) = _resultspace_env_cache.get(result_space_path) if env_hooks == cached_env_hooks and cached_base_env == base_env: return dict(result_env) # Check to make sure result_space_path is a valid directory if not os.path.isdir(result_space_path): if quiet: return dict() raise IOError( "Cannot load environment from resultspace \"%s\" because it does not " "exist." % result_space_path ) # Check to make sure result_space_path contains a `.catkin` file # TODO: `.catkin` should be defined somewhere as an atom in catkin_pkg if strict and not os.path.exists(os.path.join(result_space_path, '.catkin')): if quiet: return dict() raise IOError( "Cannot load environment from resultspace \"%s\" because it does not " "appear to be a catkin-generated resultspace (missing .catkin marker " "file)." % result_space_path ) # Determine the shell to use to source the setup file shell_path = os.environ.get('SHELL', None) if shell_path is None: shell_path = DEFAULT_SHELL if not os.path.isfile(shell_path): raise RuntimeError( "Cannot determine shell executable. " "The 'SHELL' environment variable is not set and " "the default '{0}' does not exist.".format(shell_path) ) (_, shell_name) = os.path.split(shell_path) # Use fallback shell if using a non-standard shell if shell_name not in ['bash', 'zsh']: shell_name = 'bash' # Check to make sure result_space_path contains the appropriate setup file setup_file_path = os.path.join(result_space_path, 'env.sh') if not os.path.exists(setup_file_path): if quiet: return dict() raise IOError( "Cannot load environment from resultspace \"%s\" because the " "required setup file \"%s\" does not exist." % (result_space_path, setup_file_path) ) # Construct a command list which sources the setup file and prints the env to stdout norc_flags = { 'bash': '--norc', 'zsh': '-f' } command = ' '.join([ cmd_quote(setup_file_path), shell_path, norc_flags[shell_name], '-c', '"typeset -px"' ]) # Define some "blacklisted" environment variables which shouldn't be copied blacklisted_keys = ('_', 'PWD') env_dict = {} try: # Run the command synchronously to get the resultspace environmnet if 0: # NOTE: This sometimes fails to get all output (returns prematurely) lines = '' for ret in execute_process(command, cwd=os.getcwd(), env=base_env, emulate_tty=False, shell=True): if type(ret) is bytes: ret = ret.decode() if isinstance(ret, string_type): lines += ret else: p = subprocess.Popen(command, cwd=os.getcwd(), env=base_env, shell=True, stdout=subprocess.PIPE) lines, _ = p.communicate() # Extract the environment variables env_dict = { k: v for k, v in parse_env_str(lines).items() if k not in blacklisted_keys } # Check to make sure we got some kind of environment if len(env_dict) > 0: # Cache the result _resultspace_env_cache[result_space_path] = (base_env, env_hooks, env_dict) else: print("WARNING: Sourced environment from `{}` has no environment variables. Something is wrong.".format( setup_file_path)) except IOError as err: print("WARNING: Failed to extract environment from resultspace: {}: {}".format( result_space_path, str(err)), file=sys.stderr) return dict(env_dict)
def catkin_main(sysargs): # Initialize config try: initialize_config() except RuntimeError as exc: sys.exit("Failed to initialize config: {0}".format(exc)) # Create a top level parser parser = argparse.ArgumentParser( description="catkin command", formatter_class=argparse.RawDescriptionHelpFormatter) add = parser.add_argument add('-a', '--list-aliases', action="store_true", default=False, help="Lists the current verb aliases and then quits, all other arguments are ignored") add('--test-colors', action='store_true', default=False, help="Prints a color test pattern to the screen and then quits, all other arguments are ignored") add('--version', action='store_true', default=False, help="Prints the catkin_tools version.") color_control_group = parser.add_mutually_exclusive_group() add = color_control_group.add_argument add('--force-color', action='store_true', default=False, help='Forces catkin to output in color, even when the terminal does not appear to support it.') add('--no-color', action='store_true', default=False, help='Forces catkin to not use color in the output, regardless of the detect terminal type.') # Deprecated, moved to `catkin locate --shell-verbs add('--locate-extra-shell-verbs', action='store_true', help=argparse.SUPPRESS) # Generate a list of verbs available verbs = list_verbs() # Create the subparsers for each verb and collect the argument preprocessors argument_preprocessors = create_subparsers(parser, verbs) # Get verb aliases verb_aliases = get_verb_aliases() # Setup sysargs sysargs = sys.argv[1:] if sysargs is None else sysargs # Get colors config no_color = False force_color = os.environ.get('CATKIN_TOOLS_FORCE_COLOR', False) for arg in sysargs: if arg == '--no-color': no_color = True if arg == '--force-color': force_color = True if no_color or not force_color and not is_tty(sys.stdout): set_color(False) # Check for version if '--version' in sysargs: print('catkin_tools {} (C) 2014-{} Open Source Robotics Foundation'.format( pkg_resources.get_distribution('catkin_tools').version, date.today().year) ) print('catkin_tools is released under the Apache License,' ' Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)') print('---') print('Using Python {}'.format(''.join(sys.version.split('\n')))) sys.exit(0) # Deprecated option if '--locate-extra-shell-verbs' in sysargs: print('Please use `catkin locate --shell-verbs` instead of `catkin --locate-extra-shell-verbs`', file=sys.stderr) sys.exit(0) # Check for --test-colors for arg in sysargs: if arg == '--test-colors': test_colors() sys.exit(0) if not arg.startswith('-'): break # Check for --list-aliases for arg in sysargs: if arg == '--list-aliases' or arg == '-a': for alias in sorted(list(verb_aliases.keys())): print("{0}: {1}".format(alias, ' '.join([cmd_quote(aarg) for aarg in verb_aliases[alias]]))) sys.exit(0) if not arg.startswith('-'): break # Do verb alias expansion sysargs = expand_verb_aliases(sysargs, verb_aliases) # Determine the verb, splitting arguments into pre and post verb verb = None pre_verb_args = [] post_verb_args = [] for index, arg in enumerate(sysargs): # If the arg does not start with a `-` then it is a positional argument # The first positional argument must be the verb if not arg.startswith('-'): verb = arg post_verb_args = sysargs[index + 1:] break # If the `-h` or `--help` option comes before the verb, parse_args if arg in ['-h', '--help']: parser.parse_args(sysargs) # Otherwise it is a pre-verb option pre_verb_args.append(arg) # Error on no verb provided if verb is None: print(parser.format_usage()) sys.exit("Error: No verb provided.") # Error on unknown verb provided if verb not in verbs: print(parser.format_usage()) sys.exit("Error: Unknown verb '{0}' provided.".format(verb)) # First allow the verb's argument preprocessor to strip any args # and return any "extra" information it wants as a dict processed_post_verb_args, extras = argument_preprocessors[verb](post_verb_args) # Then allow argparse to process the left over post-verb arguments along # with the pre-verb arguments and the verb itself args = parser.parse_args(pre_verb_args + [verb] + processed_post_verb_args) # Extend the argparse result with the extras from the preprocessor for key, value in extras.items(): setattr(args, key, value) # Finally call the subparser's main function with the processed args # and the extras which the preprocessor may have returned sys.exit(args.main(args) or 0)
def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd): """ Prepares a persistent docker container for a specific function. :param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc. :param func_arn: The ARN of the lambda function. :param env_vars: The environment variables for the lambda. :param lambda_cwd: The local directory containing the code for the lambda function. :return: ContainerInfo class containing the container name and default entry point. """ with self.docker_container_lock: # Get the container name and id. container_name = self.get_container_name(func_arn) LOG.debug('Priming docker container: %s' % container_name) status = self.get_docker_container_status(func_arn) # Container is not running or doesn't exist. if status < 1: # Make sure the container does not exist in any form/state. self.destroy_docker_container(func_arn) env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars]) # Create and start the container LOG.debug('Creating container: %s' % container_name) cmd = ( 'docker create' ' --name "%s"' ' --entrypoint /bin/bash' # Load bash when it starts. ' --interactive' # Keeps the container running bash. ' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"' ' -e HOSTNAME="$HOSTNAME"' ' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"' ' %s' # env_vars ' lambci/lambda:%s' ) % (container_name, env_vars_str, runtime) LOG.debug(cmd) run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE) LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd)) cmd = ( 'docker cp' ' "%s/." "%s:/var/task"' ) % (lambda_cwd, container_name) LOG.debug(cmd) run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE) LOG.debug('Starting container: %s' % container_name) cmd = 'docker start %s' % (container_name) LOG.debug(cmd) run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE) # give the container some time to start up time.sleep(1) # Get the entry point for the image. LOG.debug('Getting the entrypoint for image: lambci/lambda:%s' % runtime) cmd = ( 'docker image inspect' ' --format="{{ .ContainerConfig.Entrypoint }}"' ' lambci/lambda:%s' ) % (runtime) LOG.debug(cmd) run_result = run(cmd, async=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE) entry_point = run_result.strip('[]\n\r ') LOG.debug('Using entrypoint "%s" for container "%s".' % (entry_point, container_name)) return ContainerInfo(container_name, entry_point)
def __init__(self, parsed_url): duplicity.backend.Backend.__init__(self, parsed_url) # we expect an output try: p = os.popen("lftp --version") fout = p.read() ret = p.close() except Exception: pass # there is no output if lftp not found if not fout: log.FatalError("LFTP not found: Please install LFTP.", log.ErrorCode.ftps_lftp_missing) # version is the second word of the second part of the first line version = fout.split('\n')[0].split(' | ')[1].split()[1] log.Notice("LFTP version is %s" % version) self.parsed_url = parsed_url self.scheme = duplicity.backend.strip_prefix(parsed_url.scheme, 'lftp').lower() self.scheme = re.sub('^webdav', 'http', self.scheme) self.url_string = self.scheme + '://' + parsed_url.hostname if parsed_url.port: self.url_string += ":%s" % parsed_url.port self.remote_path = re.sub('^/', '', parsed_url.path) # Fix up an empty remote path if len(self.remote_path) == 0: self.remote_path = '/' # Use an explicit directory name. if self.remote_path[-1] != '/': self.remote_path += '/' self.authflag = '' if self.parsed_url.username: self.username = self.parsed_url.username self.password = self.get_password() self.authflag = "-u '%s,%s'" % (self.username, self.password) if globals.ftp_connection == 'regular': self.conn_opt = 'off' else: self.conn_opt = 'on' # check for cacert file if https self.cacert_file = globals.ssl_cacert_file if self.scheme == 'https' and not globals.ssl_no_check_certificate: cacert_candidates = ["~/.duplicity/cacert.pem", "~/duplicity_cacert.pem", "/etc/duplicity/cacert.pem"] # look for a default cacert file if not self.cacert_file: for path in cacert_candidates: path = os.path.expanduser(path) if (os.path.isfile(path)): self.cacert_file = path break # save config into a reusable temp file self.tempfile, self.tempname = tempdir.default().mkstemp() os.write(self.tempfile, "set ssl:verify-certificate " + ("false" if globals.ssl_no_check_certificate else "true") + "\n") if self.cacert_file: os.write(self.tempfile, "set ssl:ca-file " + cmd_quote(self.cacert_file) + "\n") if globals.ssl_cacert_path: os.write(self.tempfile, "set ssl:ca-path " + cmd_quote(globals.ssl_cacert_path) + "\n") if self.parsed_url.scheme == 'ftps': os.write(self.tempfile, "set ftp:ssl-allow true\n") os.write(self.tempfile, "set ftp:ssl-protect-data true\n") os.write(self.tempfile, "set ftp:ssl-protect-list true\n") elif self.parsed_url.scheme == 'ftpes': os.write(self.tempfile, "set ftp:ssl-force on\n") os.write(self.tempfile, "set ftp:ssl-protect-data on\n") os.write(self.tempfile, "set ftp:ssl-protect-list on\n") else: os.write(self.tempfile, "set ftp:ssl-allow false\n") os.write(self.tempfile, "set http:use-propfind true\n") os.write(self.tempfile, "set net:timeout %s\n" % globals.timeout) os.write(self.tempfile, "set net:max-retries %s\n" % globals.num_retries) os.write(self.tempfile, "set ftp:passive-mode %s\n" % self.conn_opt) if log.getverbosity() >= log.DEBUG: os.write(self.tempfile, "debug\n") if self.parsed_url.scheme == 'ftpes': os.write(self.tempfile, "open %s %s\n" % (self.authflag, self.url_string.replace('ftpes', 'ftp'))) else: os.write(self.tempfile, "open %s %s\n" % (self.authflag, self.url_string)) os.close(self.tempfile) # print settings in debug mode if log.getverbosity() >= log.DEBUG: f = open(self.tempname, 'r') log.Debug("SETTINGS: \n" "%s" % f.read())