def deploy(c): c = Connection(os.environ.get("SSH_ADDRESS"), connect_kwargs={"passphrase": os.environ.get("PASSPHRASE")}) c.run(f"mkdir -p {SITE_FOLDER}") with c.cd(f"{SITE_FOLDER}"): _get_latest_sources(c) _update_pipenv(c) _update_staticfiles(c) _update_migrations(c)
def ssh_run(servername, cmd, sudo): try: user = os.environ['SSH_USER'] passw = os.environ['SSH_PASS'] sudotf = sudo command = cmd sname = servername print('user: %s passwd: %s servername: %s' % (user, passw, sname)) config = Config(overrides={'sudo': {'password': passw}}) c = Connection(host=sname, user=user, port=22, config=config, connect_kwargs={"password": passw}) if sudotf: result = c.sudo(command, pty=True, hide='stderr') if result.exited == 0: print("In SUDO") print(result.ok) print(result.stdout.strip()) return result.exited else: result = c.run(SSH_COMMAND, pty=True, hide='stderr') if result.exited == 0: print("In NOSUDO") print(result.ok) print(result.stdout.strip()) return result.exited except Exception as e: print("ssh_run Exception Caught: %s" % e) return "Exception Caught Clearing Certificate"
def rsyn_file(): passwd_file = "/home/tomcat/shiwm-service/py-scripts/get_sftp_list/scripts/rsync.pwd" rsync_bin = "/usr/bin/rsync" source_dir = "/home/tomcat/shiwm-service/sftp-duizhang" c = Connection("127.0.0.1") res = c.run( " %s -azvP %s --password-file=%s [email protected]::sftp-duizhang" % (rsync_bin, source_dir, passwd_file)) logging.info("Rsync message: %s" % res.stdout)
def deploy(): print("========================================") print("deploying to server") # + os.environ.get("ftp_host")) print("========================================") c = Connection(host='www.jonaso.de', port=21) result = c.run('uname -s') print(result.stdout.strip()) print(result.exited) print(result.ok) print(result.command) print(result.connection) print(result.connection.host) sys.exit() try: # cleanup # local_cleanup() # compress the folder # local("tar -zcvf %s %s" % (tarFilename, localdir)) pass # upload the tar file to the remote host # put(tarFilename, join(remotedir, tarFilename), use_sudo=True, mirror_local_mode=True) # with cd(remotedir): # untar the folder # sudo("tar -xvf " + tarFilename) # modify perms # TODO: check if this is necessary # sudo("chmod 755 " + remotedir) # drop the database # sudo("mysqladmin -f -u%s -p\"%s\" drop %s" % (dbUsername, dbPassword, dbName)) # sudo("cp -r wordpress/dist ./"); # sudo("rm -rf ./wordpress/dist"); # sudo("cp -r wordpress/static ./"); # sudo("rm -rf ./wordpress/static"); # sudo("cp -r wordpress/favicon.* ./"); # # sudo("rm -f ./favicon.*"); # sudo("cp -r wordpress/.htaccess ./.htaccess"); # sudo("rm -f ./wordpress/.htaccess"); finally: # cleanup # local_cleanup() # remote cleanup # remove the tar file and sql file # sudo("rm -f " + join(remotedir, localdir)) pass
class FabricWrapper: def __init__( self, host, user=None, password=None, key_filename=None, key_passphrase=None, port=22, sudo=False ): self.sudo = sudo if not user and not password and not key_filename: # Get details from ~/.ssh/config self.connection = Connection(host) elif key_filename and not password: self.connection = Connection( host=host, user=user, port=port, connect_kwargs={ 'key_filename': key_filename, 'passphrase': key_passphrase } ) elif not key_filename and password: self.connection = Connection( host=host, user=user, port=port, connect_kwargs={ 'password': password } ) elif key_filename and password: self.connection = Connection( host=host, user=user, port=port, connect_kwargs={ 'key_filename': key_filename, 'passphrase': key_passphrase if key_passphrase else password } ) else: logging.error( crayons.red(f'You need to provide either a private key_filename or password to connect to {host} with user: {user}') ) self.connection = None def execute(self, command, **kwargs): if not self.connection: logging.error(crayons.red('No connection object instantiated.')) return None return self.connection.sudo(command, **kwargs) if self.sudo else self.connection.run(command, **kwargs)
def deploy(c, git=True, restart=False, sync=False, target_machine="trotsky"): "push code to a server (configure your own here)" # desired_services = ["pyborg_discord", "pyborg_http", "pyborg_twitter", "pyborg_mastodon"] desired_services = ["pyborg_discord", "pyborg_http"] if git: c.run("git push --all") conn = Connection(target_machine) with conn.cd("src/pyborg-1up"): conn.run("git fetch") conn.run("git stash") conn.run("git pull") conn.run("git stash pop") if sync: conn.run("~/.poetry/bin/poetry install -v" ) # poetry manages this application if restart: units = " ".join(desired_services) conn.run("sudo systemctl restart {}".format(units), pty=True) print("Restarted services.") print("Deploy Completed.")
def deploy(c, restart=False, sync=False): "push code to a server (configure your own here)" c.run("git push --all") conn = Connection("trotsky") with conn.cd("src/pyborg-1up"): conn.run("git fetch") conn.run("git stash") conn.run("git pull") conn.run("git stash pop") if sync: conn.run("~/.local/bin/pipenv sync" ) # they all use the same pipenv managed virtualenv if restart: units = " ".join([ "pyborg_discord", "pyborg_http", "pyborg_twitter", "pyborg_mastodon" ]) conn.run("sudo systemctl restart {}".format(units), pty=True) print("Restarted services.") print("Deploy Completed.")
def _inner(): logger.warning( f'将本地文件夹代码 {python_proj_dir} 上传到远程 {host} 的 {remote_dir} 文件夹。') t_start = time.perf_counter() uploader = ParamikoFolderUploader( host, port, user, password, python_proj_dir, remote_dir, path_pattern_exluded_tuple, file_suffix_tuple_exluded, only_upload_within_the_last_modify_time, file_volume_limit, sftp_log_level) uploader.upload() logger.info( f'上传 本地文件夹代码 {python_proj_dir} 上传到远程 {host} 的 {remote_dir} 文件夹耗时 {round(time.perf_counter() - t_start, 3)} 秒' ) # conn.run(f'''export PYTHONPATH={remote_dir}:$PYTHONPATH''') func_name = task_fun.__name__ queue_name = task_fun.consumer.queue_name process_mark = f'fsdf_fabric_mark__{queue_name}__{func_name}' conn = Connection( host, port=port, user=user, connect_kwargs={"password": password}, ) kill_shell = f'''ps -aux|grep {process_mark}|grep -v grep|awk '{{print $2}}' |xargs kill -9''' logger.warning(f'{kill_shell} 命令杀死 {process_mark} 标识的进程') uploader.ssh.exec_command(kill_shell) # conn.run(kill_shell, encoding='utf-8',warn=True) # 不想提示,免得烦扰用户以为有什么异常了。所以用上面的paramiko包的ssh.exec_command python_exec_str = f'''export is_fsdf_remote_run=1;export PYTHONPATH={remote_dir}:$PYTHONPATH ;python3 -c "from {relative_module} import {func_name};{func_name}.multi_process_consume({process_num})" -fsdfmark {process_mark} ''' shell_str = f'''cd {remote_dir}; {python_exec_str}''' extra_shell_str2 = extra_shell_str # 内部函数对外部变量不能直接改。 if not extra_shell_str2.endswith(';') and extra_shell_str != '': extra_shell_str2 += ';' shell_str = extra_shell_str2 + shell_str logger.warning(f'使用语句 {shell_str} 在远程机器 {host} 上启动任务消费') conn.run(shell_str, encoding='utf-8', **invoke_runner_kwargs)
def deploy2(context, host_name): """Execute deploy task Linux: fab2 -i ~/.ssh/id_rsa --ssh-config=~/.ssh/config -H mango deploy2 dev0 Windows: fab2 -i C:/Users/pcdinh/.ssh/id_rsa --ssh-config=C:/Users/pcdinh/.ssh/config -H mango deploy2 dev0 ~/.ssh/config Host dev0 HostName 192.168.1.113 Port 22 User pcdinh IdentityFile ~/.ssh/mykey.pem :param context: :param host_name: :return: """ try: print(f"Connecting to {host_name}") conn = Connection(host_name) except KeyError: print(f"Error: Undefined host name: {host_name}") return conn.run("uname -s")
def deploy(c, restart=False, sync=False): "push code to a server (configure your own here)" c.run("git push --all") conn = Connection("trotsky") with conn.cd("src/pyborg-1up"): conn.run("git fetch") conn.run("git stash") conn.run("git pull") conn.run("git stash pop") if sync: conn.run("~/.local/bin/pipenv sync") # they all use the same pipenv managed virtualenv if restart: for unit in ["pyborg_discord", "pyborg_http", "pyborg_twitter", "pyborg_mastodon"]: conn.sudo("systemctl restart {}".format(unit), pty=True) print("Deploy Completed.")
def del_file(file_path): c = Connection('localhost') with c.cd("/home/tomcat/shiwm-service/"): res = c.run('[ -d %s ] && echo 00 || echo 11' % file_path, hide=True).stdout logging.info("The res is %s" % res) if res.startswith("00"): c.run("rm -rf ./sftp-duizhang/") logging.info("The die is exits, delete it now.") c.run("mkdir -p sftp-duizhang") logging.info("The die isn't exits, mkdir it now.") else: c.run("mkdir -p sftp-duizhang") logging.info("The die isn't exits, mkdir it now.")
def main(): fab = Connection(host='www.xachf.com', user='******', port=22, connect_timeout=20, connect_kwargs={"password": "******"}) # print(fab) res = fab.run('uname -s', hide=True) # 'command', 'connection', 'encoding', 'env', 'exited', 'failed', 'hide', 'ok', 'pty', 'return_code', 'shell', 'stderr', 'stdout', 'tail' # print(dir(res)) # print(res.stdout.strip()) # print(res.command, res.connection.host,res.connection.user) if res.ok: print('{}>>用户{} 执行[{}]命令成功'.format(res.connection.host, res.connection.user, res.command)) else: print('{}>>用户{} 执行[{}]命令失败'.format(res.connection.host, res.connection.user, res.command))
def start_deploy(): """ 打包部署到远程机器 :return: """ try: conn = Connection(host=HOST, port=PORT, user=USER, connect_kwargs={'password': PASSWORD}, connect_timeout=CONNECT_TIMEOUT) # shutil.make_archive(base_name, format, base_dir) """ base_name ---> 创建的目标文件名,包括路径,减去任何特定格式的扩展 format ---> 压缩包格式后缀:zip、tar、bztar、gztar、xztar base_dir ---> 开始打包的路径 """ folder = '{}/{}'.format(SERVER_PATH.rstrip('/'), PACK_FILENAME) if COMPRESS_TYPE.lower() == 'gztar': shutil.make_archive(PACK_FILENAME, 'gztar', LOCAL_PATH) elif COMPRESS_TYPE.lower() == 'bztar': shutil.make_archive(PACK_FILENAME, 'bztar', LOCAL_PATH) elif COMPRESS_TYPE.lower() == 'xztar': shutil.make_archive(PACK_FILENAME, 'xztar', LOCAL_PATH) elif COMPRESS_TYPE.lower() == 'tar': shutil.make_archive(PACK_FILENAME, 'tar', LOCAL_PATH) else: # COMPRESS_TYPE.lower() == 'zip': shutil.make_archive(PACK_FILENAME, 'zip', LOCAL_PATH) # 首先进入根目录 conn.run('cd {}'.format(SERVER_PATH.rstrip('/'))) # 先删除旧文件加以及文件,包等 conn.run('rm -rf {} {}'.format(PACK_FILENAME, PACK_FULL_FILENAME)) # 创建文件夹 conn.run('mkdir {}'.format(folder)) # 上传包 conn.put(PACK_FULL_FILENAME, SERVER_PATH) # 解压到指定目录下 conn.run('tar -C {} -zxvf {}/{}'.format(folder, SERVER_PATH.rstrip('/'), PACK_FULL_FILENAME)) # 赋值权限 # conn.sudo('chmod -R 666 {}'.format(folder)) # 重启web服务器 conn.run('systemctl nginx restart') # 重启网关接口 conn.run('supervisorctl start uwsgi ') except Exception as ex: print('{}'.format(ex))
if masterIp == "": host = '127.0.0.1' else: host = masterIp tdLog.info("Procedures for tdengine deployed in %s" % (host)) if windows: tdCases.logSql(logSql) tdLog.info("Procedures for testing self-deployment") td_clinet = TDSimClient("C:\\TDengine") td_clinet.deploy() remote_conn = Connection("root@%s" % host) with remote_conn.cd( '/var/lib/jenkins/workspace/TDinternal/community/tests/pytest' ): remote_conn.run("python3 ./test.py") conn = taos.connect(host="%s" % (host), config=td_clinet.cfgDir) tdCases.runOneWindows(conn, fileName) else: tdDnodes.init(deployPath) tdDnodes.setTestCluster(testCluster) tdDnodes.setValgrind(valgrind) tdDnodes.stopAll() is_test_framework = 0 key_word = 'tdCases.addLinux' try: if key_word in open(fileName).read(): is_test_framework = 1 except: pass if is_test_framework:
## EXECUTE REMOTE COMMAND IN HOST USING FABRIC LIBRARY (BASED ON INVOKE AND PARAMIKO) # DOC --> http://docs.fabfile.org/en/2.4/ # sudo pip3 install fabric2 from fabric2 import Connection # HOST host = 'HOST_IP' username = '******' pass = '******' # NOT RECOMMENDED IN PRODUCTION, LOTS OF BETTER WAYS AT DOC cmd = 'whoami' c = Connection( host=host, user=username, connect_kwargs={ "password":pass } ) try: result = c.run(cmd).stdout except Exception as e: print(e)
class ClusterShell: """ ClusterShell lets you run commands on multiple EC2 instances. ClusterShell takes in information about a set of EC2 instances that exist and allows you to run commands on some or all of the nodes. It also has convenience methods for copying files between the local filesystem and the cluster. """ def __init__(self, username, master_ip, worker_ips, ssh_key_path, use_bastion=False, wait_for_ssh=True, wait_for_ssh_timeout=120): """ Args: username: The username used to ssh to the instance. Often 'ubuntu' or 'ec2-user' master_ip: A single IP for the master node. Typically should be the public IP if the location this code is running is outside of the VPC and the private IP if running from another EC2 node in the same VPC. In many cases, the distinction between master and workers is arbitrary. If use_bastion is True, the master node will be the bastion host. worker_ips: A possibly empty list of ips for the worker nodes. If there is only a single worker, a string can be passed in instead of a list. ssh_key_path: The path to the SSH key required to SSH into the EC2 instances. Often ~/.ssh/something.pem use_bastion (bool): Whether or not to use the master node as the bastion host for SSHing to worker nodes. wait_for_ssh (bool): If true, block until commands can be run on all instances. This can be useful when you are launching EC2 instances, because the instances may be in the RUNNING state but the SSH daemon may not yet be running. wait_for_ssh_timeout: Number of seconds to spend trying to run commands on the instances before failing. This is NOT the SSH timeout, this upper bounds the amount of time spent retrying failed SSH connections. Only used if wait_for_ssh=True. """ if not isinstance(worker_ips, list): worker_ips = [worker_ips] self._username = username self._master_ip = master_ip self._worker_ips = worker_ips self._all_ips = [self._master_ip] + self._worker_ips self.use_bastion = use_bastion connect_kwargs = { "key_filename": [os.path.expanduser(ssh_key_path)], "banner_timeout": 30 # NOTE 1 above } self._master_conn = Connection(user=self._username, host=self._master_ip, forward_agent=True, connect_kwargs=connect_kwargs) worker_conns = [] for worker_ip in self._worker_ips: if self.use_bastion: c = Connection(user=self._username, host=worker_ip, connect_kwargs=connect_kwargs, gateway=Connection(user=self._username, host=master_ip, forward_agent=True, connect_kwargs=connect_kwargs)) else: c = Connection(user=self._username, host=worker_ip, connect_kwargs=connect_kwargs) worker_conns.append(c) self._individual_worker_conns = worker_conns self._worker_conns = ThreadingGroup.from_connections(worker_conns) self._all_conns = ThreadingGroup.from_connections([self._master_conn] + worker_conns) if wait_for_ssh: self.wait_for_ssh_ready(wait_timeout=wait_for_ssh_timeout) def wait_for_ssh_ready(self, wait_timeout=120): """Repeatedly try to run commands on all instances until successful or until timeout is reached.""" start_time = time.time() exceptions = [] while True: try: self.run_on_all("hostname", hide=True) break except fabric2.exceptions.GroupException as e: exceptions.append(e) elapsed_time = time.time() - start_time if elapsed_time > wait_timeout: exceptions_str = "\n".join([str(e) for e in exceptions]) raise RuntimeError( f"[ClusterShell.wait_for_ssh_ready] Unable to establish an SSH connection after " f"{wait_timeout} seconds. On EC2 this is often due to a problem with the security group, " f"although there are many potential causes." f"\nExceptions encountered:\n{exceptions_str}") secs_to_timeout = int(wait_timeout - elapsed_time) print(f"ClusterShell.wait_for_ssh_ready] Exception when SSHing to instances. Retrying until timeout in " f"{secs_to_timeout} seconds") time.sleep(1) def run_local(self, cmd): """Run a shell command on the local machine. Will wait for the command to finish and raise an exception if the return code is non-zero. Args: cmd: The shell command to run Returns: The stdout of the command as a byte string. """ return subprocess.check_output(shlex.split(cmd)) def run_on_master(self, cmd, **kwargs): """Run a shell command on the master node. Args: cmd: The shell command to run kwargs: http://docs.fabfile.org/en/2.4/api/connection.html#fabric.connection.Connection.run Returns: Result: An invoke Result object. `http://docs.pyinvoke.org/en/latest/api/runners.html#invoke.runners.Result` """ return self._master_conn.run(cmd, **kwargs) def run_on_all(self, cmd, **run_kwargs): """Run a shell command on every node. Args: cmd: The shell command to run run_kwargs: Keyword args to pass to fabric.run(). Fabric passes them through to Invoke, which are documented here: http://docs.pyinvoke.org/en/latest/api/runners.html#invoke.runners.Runner.run. Potentially useful args: hide=True will prevent run output from being output locally Returns: List of invoke.Result objects. Order is not guaranteed. http://docs.pyinvoke.org/en/latest/api/runners.html#invoke.runners.Result """ if self.use_bastion: if len(self._worker_ips) >= (MAX_CONNS_PER_GROUP - 1): results = self._run_on_all_workaround(cmd, MAX_CONNS_PER_GROUP, **run_kwargs) return list(results) results = self._all_conns.run(cmd, **run_kwargs) return list(results.values()) # TODO: Confirm this is required with (10+ nodes) def _run_on_all_workaround(self, cmd, group_size, **run_kwargs): total_conns = len(self._worker_conns) + 1 print(f'{total_conns} Nodes') groups = [] group_conns = [] for i, worker_conn in enumerate(self._individual_worker_conns): if i % group_size == 0 and i != 0: groups.append(ThreadingGroup.from_connections(group_conns)) group_conns = [] group_conns.append(worker_conn) flattened_results = [] # Either add the master to one of the groups or create a group for it (if groups are all full or no workers) if len(group_conns) != 0 and len(group_conns) != group_size: group_conns.append(self._master_conn) groups.append(ThreadingGroup.from_connections(group_conns)) else: if len(group_conns) != 0: groups.append(ThreadingGroup.from_connections(group_conns)) master_result = self.run_on_master(cmd, **run_kwargs) flattened_results.append(master_result) for i, worker_conn_group in enumerate(groups): group_results = worker_conn_group.run(cmd, **run_kwargs) flattened_results.extend(group_results.values()) return flattened_results def copy_from_master_to_local(self, remote_path, local_path): """Copy a file from the master node to the local node. Args: remote_path: The path of the file on the master node. If not an absolute path, will be relative to the working directory, typically the home directory. Will not expand tilde (~). local_path: The path to save the file to on the local file system. """ local_abs_path = Path(local_path).absolute() return self._master_conn.get(remote_path, local_abs_path) def copy_from_all_to_local(self, remote_abs_path, local_path): """Copy files from all nodes to the local filesystem. There will be one directory per node containing the file. Args: remote_abs_path: The absolute path of the file to download. Can be a directory or a cp/scp string including wildcards local_path: The absolute path of a directory on the local filesystem to download the files into. The path must not point to a file. """ if self.use_bastion: raise NotImplementedError("Copying has not yet been implemented for bastion mode. Please open a ticket at " "https://github.com/armandmcqueen/ec2-cluster if you would like to see this " "feature implemented") local_abs_path = Path(local_path).absolute() if not local_abs_path.exists(): local_abs_path.mkdir(parents=True) else: if local_abs_path.is_file(): raise RuntimeError(f'[ClusterShell.copy_from_all_to_local] local_path points to a file: ' f'{local_abs_path}') master_dir = local_abs_path / "0" master_dir.mkdir() master_ip_path = master_dir / "ip.txt" with open(master_ip_path, 'w') as f: f.write(self.master_ip) self.run_local(f'scp ' f'-o StrictHostKeyChecking=no ' f'-o "UserKnownHostsFile /dev/null" ' f'-o "LogLevel QUIET" ' f'-r ' f'{self._username}@{self.master_ip}:{remote_abs_path} {master_dir}/') # Create and populate staging folder for each worker's data for ind, worker_ip in enumerate(self._worker_ips): worker_id = ind + 1 worker_node_dir = local_abs_path / str(worker_id) worker_node_dir.mkdir() worker_ip_path = worker_node_dir / "ip.txt" with open(worker_ip_path, 'w') as f: f.write(worker_ip) self.run_local(f'scp ' f'-o StrictHostKeyChecking=no ' f'-o "UserKnownHostsFile /dev/null" ' f'-o "LogLevel QUIET" ' f'-r ' f'{self._username}@{worker_ip}:{remote_abs_path} {worker_node_dir}/') def copy_from_local_to_master(self, local_path, remote_path): """Copy a file from the local filesystem to the master node. Args: local_path: The path of the file to send to the master node remote_path: The path where the file will be saved on the master node. Does not expand tilde (~), but if not an absolute path, will usually interpret the path as relative to the home directory. """ local_abs_path = Path(local_path).absolute() return self._master_conn.put(local_abs_path, remote_path) def copy_from_local_to_all(self, local_path, remote_path): """Copy a file from the local filesystem to every node in the cluster. Args: local_path: The path of the file to send to the master and worker nodes remote_path: The path where the file will be saved on the master and worker nodes. Does not expand tilde (~), but if not an absolute path, will usually interpret the path as relative to the home directory. """ if self.use_bastion: raise NotImplementedError("Copying has not yet been implemented for bastion mode. Please open a ticket at " "https://github.com/armandmcqueen/ec2-cluster if you would like to see this " "feature implemented") local_abs_path = Path(local_path).absolute() self.copy_from_local_to_master(local_abs_path, remote_path) for worker_conn in self._individual_worker_conns: worker_conn.put(local_abs_path, remote_path) @property def username(self): """The username used to instantiate the ClusterShell""" return self._username @property def master_ip(self): """The master IP used to instantiate the ClusterShell""" return self._master_ip @property def non_master_ips(self): """All IPs other than the master node. May be an empty list""" return self._worker_ips @property def all_ips(self): """A list of master and worker IPs""" return self._all_ips
class Remote: def __init__(self, host, user, password=None, ssh_private_key=None): """ Args: host (str): server host user (str): user name password (str): password, default None; If password is None, use ssh_key. ssh_private_key (Path, str): ssh public key path, default None; If ssh_key is None, use password. """ if password is None and ssh_private_key is None: logger.warning( "ssh_private_key and password are both none, ssh_private_key will use `~/.ssh/id_rsa`!" ) ssh_private_key = str(Path("~/.ssh/id_rsa").expanduser().resolve()) if ssh_private_key is not None: ssh_key = str(Path(ssh_private_key).expanduser().resolve()) connect_kwargs = {"key_filename": ssh_key} self.conn = Connection(host=host, user=user, connect_kwargs=connect_kwargs) else: config = Config(overrides={"sudo": {"password": password}}) connect_kwargs = {"password": password} self.conn = Connection(host=host, user=user, connect_kwargs=connect_kwargs, config=config) self._home_dir = None @property def home_dir(self): if self._home_dir is None: self._home_dir = self.run("pwd", hide="stdout").stdout.strip() return self._home_dir def _execute_remote_shell(self, files, remote_dir=None): if remote_dir is None: remote_dir = self.home_dir self.run( f"if [ ! -d {remote_dir} ]; then sudo mkdir -p {remote_dir}; fi") if not isinstance(files, Iterable): files = [files] for file_path in map(lambda x: Path(x), files): self.conn.put(str(file_path), remote_dir) # NOTE: This place may require a password # Consider using `self.conn.sudo(f"/bin/bash /home/{user}/{shell_path.name}")` instead self.run(f"/bin/bash {remote_dir}/{file_path.name}") def _dump_remote_yaml(self, obj, remote_file_path="tmp.yaml"): r_dir, r_name = os.path.split(remote_file_path) if r_dir.startswith("~"): r_dir = f"{self.home_dir}{r_dir[1:]}" # create tmp file in local _temp_dir = CUR_DIR.joinpath("tmp") _temp_dir.mkdir(parents=True, exist_ok=True) temp_client_path = _temp_dir.joinpath(r_name) with temp_client_path.open("w") as fp: yaml.dump(obj, fp) # create remote dir self.run(f"if [ ! -d {r_dir} ]; then sudo mkdir -p {r_dir}; fi") self.conn.put(str(temp_client_path), r_dir) # delete tmp file temp_client_path.unlink() def deploy_qlib_client(self, client_config=None): """deploy qlib clinet Args: client_config (dict): qlib client config """ if client_config is None: raise ValueError("client_config cannot None") shell_path = CUR_DIR.joinpath("install_qlib_client.sh") self._execute_remote_shell(shell_path) self._dump_remote_yaml(client_config, "~/qlib_client_config.yaml") def deploy_qlib_server(self, client_config=None): """deploy qlib server Args: client_config (dict): qlib client config """ shell_path_client = CUR_DIR.joinpath("install_qlib_client.sh") shell_path_server = CUR_DIR.joinpath("install_qlib_server.sh") self._execute_remote_shell((shell_path_client, shell_path_server)) # Download default China Stock data self.run_python( f"{self.home_dir}/code/qlib/scripts/get_data.py qlib_data_cn --target_dir {QLIB_DATA_DIR}", sudo=True) if client_config is not None: client_config = copy.deepcopy(client_config) client_config["provider_uri"] = client_config["mount_path"] client_config["auto_mount"] = False self._dump_remote_yaml(client_config, "~/qlib_client_config.yaml") def run(self, command, hide=True): """run command in remote server Parameters --------- command : str command hide : bool, str hide shell stdout or stderr, value from stdout/stderr/True, default True """ return self.conn.run(command, hide=hide) def run_python(self, command, hide=True, sudo=False): """run python command Parameters ---------- command : str python command hide : bool, str hide shell stdout or stderr, value from stdout/stderr/True, default True sudo : bool sudo command """ # FIXME: hard code, fabric cannot use `~/.bashrc` PATH python_path = self.run( f"source {self.home_dir}/miniconda3/etc/profile.d/conda.sh; conda activate base; which python", hide=True).stdout.strip() command = f"{python_path} {command}" if sudo: command = f"sudo {command}" return self.run(command, hide=hide)
class ExtScripts: def __init__ (self, config_file = None, commands_file = None, script_location = None, script_type = "win" ): """ :param commands_file: :param script_location: :param script_type: """ '''SET ALL THE NEEDED PARAMS''' '''!!!LOG STUFF!!! AS IT HAPPENS''' self.logger = logging.getLogger(__name__) '''CONFIG ASSUMING ENV VARS''' self.commands_file = commands_file self.script_location = script_location self.script_type = script_type ''' CONFIG FILE IS AN ABSTRACTION HERE SINCE THIS CLASS IS JUST REUSED ALMOST AS IS FOR SIMPLICITY THE GetBuildTest class which reads actual ini serves the role of config class from actual test suites. But the basic idea is the same setup of params used at the moment ''' self.conf = config_file ''' SETUP SSH ''' self.ssh_target_ip = None self.ssh_target_port = None self.ssh_target_user = None self.ssh_target_pswd = None self.ssh_client = None ''' SETUP SSH COPY ''' self.ssh_scp_content_location = None self.ssh_target_dir = None ''' SETUP FABRIC CONNECTION ''' self.fabric_connection = None ''' SETUP commands to be executed remotely ''' self.ssh_commands_to_exec = {} ''' SETUP a possibility to check commands execution of the remote host via dedicated script. The script command should be part of the above dictionary and key for that should be made known in advance ''' self.ssh_ded_test_key = None ''' SETUP PARAMS when CONFIG is available ''' if self.conf == None: ''' SETUP VIRT_BOX exec commands (VBoxmanage based) from conf file PREFS ''' self.vm_start_cmnd = None self.vm_shutdown_cmnd = None self.vm_resnap_cmnd = None self.vm_makesnap_cmnd = None else: '''SETUP VIRT_BOX exec commands (VBoxmanage based) from conf file PREFS. These do things like restore to the proper snap, stop, start VBox image currenly used etc. ''' self.set_vbox_manage() '''SETUP SSH Connection PARAMS''' '''These are general prefs for any ssh connection establishment and will be used to connect to build/test/sut machines in this BOOTSTRAP SCENARIO ''' self.ssh_target_ip = None self.ssh_target_port = None self.ssh_target_user = None self.ssh_target_pswd = None ''' SETUP SSH COPY. the same as above. Whether it is get or put they are to HELP. ''' self.ssh_source_dir = None self.ssh_target_dir = None '''------------------------------------------------------------------------------------------------------''' ''' STUFF SPECIFIC FOR BOOTSTARP ''' '''Files locations and ssh connections. Probably they must be deleted manages without them but such a illustration of not very well thought through 'good intentions' pyving way to ... obviously there is always such a big temptation to be good. ''' self.ssh_scp_content_location_build = None self.ssh_scp_content_location_test = None self.ssh_scp_content_location_sut = None '''Connections preferences''' self.ssh_target_build_ip = None self.ssh_target_build_port = None self.ssh_target_build_user = None self.ssh_target_build_pswd = None self.ssh_target_tests_ip = None self.ssh_target_tests_port = None self.ssh_target_tests_user = None self.ssh_target_tests_pswd = None self.ssh_target_sut_ip = None self.ssh_target_sut_port = None self.ssh_target_sut_user = None self.ssh_target_sut_pswd = None '''scp preferences ''' self.ssh_scp_content_location_build = None self.ssh_scp_content_location_test = None self.ssh_scp_content_location_sut = None ''' And they are all set to serve for F**K knows what purpose. Lovely.... Stupidity it is to think that anything can be prepared "in advance". ''' # self.set_all_ssh_connections() '''------------------------------------------------------------------------------------------------------''' '''SETUP A PLACEHOLDER FOR ANY EXTERNAL SCRIPT EXEC PARAMS''' self.script_exec = None self.script_dir = None return def run_script (self): self.logger.debug("<== RUNNING EXTERNAL SCRIPT ==>") self.logger.debug(self.script_exec) p = Popen(self.script_exec, shell = False, cwd = self.script_dir ) stdout, stderr = p.communicate() p.terminate() p.kill() self.logger.debug("<== STOP RUNNING EXTERNAL SCRIPT -|DONE|- ==>") return '''--------------------------------------------------------------------------------------------------------------''' '''Functions for remote shell scripts execution''' def install_build (self): self.set_fabric_connection() self.fabric_run_commands() return def start_sut (self): self.set_fabric_connection() self.fabric_run_commands() time.sleep(40) return def stop_sut (self): self.set_fabric_connection() self.fabric_run_commands() return def restart_sut (self): self.start_sut() self.stop_sut() return '''------------------------------------------------------------------------------------------------------------------''' '''Functions VM execution''' '''------------------------------------------------------------------------------------------------------------------''' def vm_start (self): self.logger.debug("<== Start VM ==>") self.logger.debug(self.vm_start_cmnd) if self.conf.syst == 'Windows': p = Popen(self.vm_start_cmnd, shell = False, cwd = self.script_location ) stdout, stderr = p.communicate() elif self.conf.syst == 'Linux': # p = Popen(self.vm_shutdown_cmnd, p = Popen(shlex.split(self.vm_start_cmnd), shell = False, cwd = self.script_location ) stdout, stderr = p.communicate() '''It takes some time for Virtual box to start. SO here we are waiting while ssh session can be opened into box as a test. It is good enough indication that other activities can be performed on the REMOTE boX''' test = 1 while test == 1: try: time.sleep(60) self.logger.debug("<==SSH client attempts to connect to VM ==>") self.create_Paramiko_SSHClient() ssh_trans = self.ssh_client.get_transport() ssh_conn_state = ssh_trans.is_active() if ssh_conn_state == True: test = 2 else: raise Exception('Something UnPredictable is happening with SSH_Client') self.logger.debug("<==SSH client HAS CONNECTED to VM -|DONE|- ==>") except: try: raise except TimeoutError: self.ssh_client.close() continue except Exception as e: self.ssh_client.close() continue p.terminate() p.kill() return def vm_shutdown (self): self.logger.debug("<==SHUTTING DOWN NEEDED image IF IT IS UP ==>") self.logger.debug(self.vm_shutdown_cmnd) if self.conf.syst == 'Windows': p = Popen(self.vm_shutdown_cmnd, # p = Popen(shlex.split(self.vm_shutdown_cmnd), shell = False, cwd = self.script_location ) stdout, stderr = p.communicate() p.terminate() p.kill() self.logger.debug("<==SHUTTING DOWN NEEDED image -|DONE|- ==>") return elif self.conf.syst == 'Linux': # p = Popen(self.vm_shutdown_cmnd, p = Popen(shlex.split(self.vm_shutdown_cmnd), shell = False, cwd = self.script_location ) stdout, stderr = p.communicate() p.terminate() p.kill() self.logger.debug("<==SHUTTING DOWN NEEDED image -|DONE|- ==>") return def vm_restore_snap (self): self.logger.debug("<== RESTORING NEEDED SNAP ==>") self.logger.debug(self.vm_resnap_cmnd) p = Popen(self.vm_resnap_cmnd, shell = False, cwd = self.script_location ) stdout, stderr = p.communicate() p.terminate() p.kill() self.logger.debug("<== RESTORING NEEDED SNAP -|DONE|- ==>") return '''------------------------------------------------------------------------------------------------------------------''' '''Functions for LOGGING server control''' '''------------------------------------------------------------------------------------------------------------------''' def start_log_server (self): self.logger.debug("<== STARTING Log Server FOR BL to WRITE OUT its STATUS ==>") self._p = Popen(self.vm_log_srv_exec, shell = False, cwd = self.vm_log_srv_exec_dir ) stdout, stderr = self._p.communicate() log_srv_pid = self._p.pid self._p.terminate() self._p.kill() log_srv_pid = self._p.pid return '''------------------------------------------------------------------------------------------------------------------''' '''Functions for retrieving builds and tests''' '''------------------------------------------------------------------------------------------------------------------''' def test_build_to_test (self, map_to_test = None, key_to_test = None, sub_key_name_builds = None, sub_key_name_tdir = None): found = False if map_to_test != None: for key in map_to_test: res = map_to_test.get(key) builds = res[sub_key_name_builds] if int(key_to_test) in builds: res02 = res[sub_key_name_tdir] found = True return res02 # break if found != True: raise Exception("BUILD ID IS NOT IN CONF FILE") return def get_build (self): self.ssh_set_connection_params() self.set_fabric_connection() self.scp_files() return def get_tests (self): return '''------------------------------------------------------------------------------------------------------------------''' '''Common tools''' '''------------------------------------------------------------------------------------------------------------------''' '''Splitting on delimeter and getting element by index''' def str_split_get_pop_elem (self, str_in = None, delim_in = None, which_elem = None): if (str_in or delim_in) == None: raise Exception("Missing some params") if (not (str(which_elem) == "FIRST")) and (not (str(which_elem) == "LAST")) and ( not isinstance(which_elem, int)): raise Exception("INDEX PASSED is NOT a VALID 'FIRST or 'LAST' string OR NOT a NUMBER" + which_elem) else: res_arr = str_in.split(delim_in) if which_elem == 'LAST': res = res_arr.pop() return res if which_elem == 'FIRST': res = res_arr.pop(0) return res else: # if isinstance(which_elem, int): res = res_arr.pop(which_elem) return res # else: # raise Exception("INDEX PASSED is NOT a NUMBER") return '''ZIPPING 2 maps together''' def zip_to_map (self, to_zip01 = None, to_zip02 = None, ): vals_combined = zip(to_zip01, to_zip02) final_var = {} '''Final ARRAY of commands for execution over SSH''' for key, val in vals_combined: final_var[key] = val return final_var '''Extracting some conf params to DICT ''' def conf_key_to_arr (self, conf_parser = None, section_key = None, switch = None): # possible values: keys, values final_arr = [] if switch == 'keys': final_arr = [key for key in conf_parser[section_key]] elif switch == 'values': final_arr = [key for key in conf_parser[section_key].values()] return final_arr ''' ENV VARS ''' def load_env_vars (self): # def set_env_vars(self): """ Run the cmd file to set env variables :param default_level: :param env_key: :return: """ ''' Run the cmd file to set env variables''' if self.script_type == "win": p = Popen(self.commands_file, shell = False, cwd = self.script_location ) stdout, stderr = p.communicate() p.terminate() p.kill() return if self.script_type == "sh": return '''------------------------------------------------------------------------------------------------------------------''' '''Functions for SCP and remote commands exec over SSH''' '''------------------------------------------------------------------------------------------------------------------''' def scp_put_files (self): """""" '''SET CONFIG SCP PARAMS''' if self.conf != None: '''SET CONNECTION PREFS''' # self.set_connection_params() '''SET SSH CLIENT AND TRANSPORT''' self.create_Paramiko_SSHClient() scp = SCPClient(self.ssh_client.get_transport()) '''SET PREFS AND COPY ALL CONFIGS AND SCRIPTS to DUT''' # self.set_scp_details() scp.put(self.ssh_scp_content_location, remote_path = self.ssh_target_dir) return else: raise ValueError("Configuration file is not PRESENT in the class") def scp_get_files (self): """""" if self.conf != None: '''SET CONNECTION PREFS''' # self.set_connection_params() '''SET SSH CLIENT AND TRANSPORT''' self.create_Paramiko_SSHClient() scp = SCPClient(self.ssh_client.get_transport()) self.logger.info("<== SCP CLASS STARTED GETTING files ==> ") scp.get(remote_path = self.ssh_scp_content_location, local_path = self.ssh_target_dir) scp.close() self.logger.info("<== SCP CLASS HAS GOT ==> ") '''Check if it is actually here''' f_name = self.str_split_get_pop_elem(str_in = self.ssh_scp_content_location, delim_in = '/', which_elem = 'LAST' ) if self.conf.syst == 'Windows': copied_file = Path(self.ssh_target_dir + '\\' + f_name) elif self.conf.syst == 'Linux': copied_file = Path(self.ssh_target_dir + '/' + f_name) if copied_file.is_file(): self.logger.debug("<== File with name ==> ") self.logger.debug("<==" + str(copied_file) + "==> ") self.logger.debug("<== seems to be in place Now the question is it the right one ---> ") else: raise ValueError("Configuration file is not PRESENT in the CLASS!!!") return def set_all_ssh_connections (self): """ VAR SSH points connections :return: """ self.ssh_target_build_ip = self.conf.ssh_build_ip self.ssh_target_build_port = self.conf.ssh_build_port self.ssh_target_build_user = self.conf.ssh_build_user self.ssh_target_build_pswd = self.conf.ssh_build_pswd self.ssh_target_tests_ip = self.conf.ssh_tests_ip self.ssh_target_tests_port = self.conf.ssh_tests_port self.ssh_target_tests_user = self.conf.ssh_tests_user self.ssh_target_tests_pswd = self.conf.ssh_tests_pswd self.ssh_target_sut_ip = self.conf.ssh_sut_ip self.ssh_target_sut_port = self.conf.ssh_sut_port self.ssh_target_sut_user = self.conf.ssh_sut_user self.ssh_target_sut_pswd = self.conf.ssh_sut_pswd ''' SETUP SSH COPY ''' self.ssh_scp_content_location_build = self.conf.ssh_scp_content_location_build self.ssh_scp_content_location_test = self.conf.ssh_scp_content_location_test self.ssh_scp_content_location_sut = self.conf.ssh_scp_content_location_sut def set_connection_params (self): """ SET CONNECTION PREFS """ if self.conf != None: self.ssh_target_ip = self.conf.ssh_host self.ssh_target_port = self.conf.ssh_port self.ssh_target_user = self.conf.ssh_user self.ssh_target_pswd = self.conf.ssh_pwd else: raise Exception('Configuration class is NOT loaded BUT needed. To early to set that!') return '''------------------------------------------------------------------------------------------------------------------''' '''SETTERS for VARIOUS PARAMS''' '''------------------------------------------------------------------------------------------------------------------''' def create_Paramiko_SSHClient (self): """ Setup pure paramico SSH client :return: """ if self.conf == None: self.set_connection_params() if self.ssh_client != None: self.ssh_client.close() self.ssh_client = paramiko.SSHClient() self.ssh_client.load_system_host_keys() self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) self.logger.debug(" <== CONNECTION PARAMS: " + self.ssh_target_ip + ": " + self.ssh_target_port + " ==>") self.ssh_client.connect(hostname = self.ssh_target_ip, port = self.ssh_target_port, username = self.ssh_target_user, password = self.ssh_target_pswd ) return def set_fabric_connection (self): """ SET CONNECTION PREFS """ self.fabric_connection = Connection(host = self.ssh_target_ip, port = self.ssh_target_port, user = self.ssh_target_user) self.fabric_connection.connect_kwargs.password = self.ssh_target_pswd return def set_scp_details (self): """ SET ALL THE PARAMS RELATED TO COPING :return: """ self.ssh_scp_content_location = self.conf.ssh_content_to_copy self.ssh_target_dir = self.conf.ssh_target_dir return def set_vbox_manage (self): """ SETUP VIRT_BOX exec commands (VBoxmanage based) from conf file PREFS """ if self.conf != None: self.vm_start_cmnd = self.conf.vm_vbox_manage + ' startvm ' + self.conf.vm_alt_img + ' --type headless' self.vm_shutdown_cmnd = self.conf.vm_vbox_manage + ' controlvm ' + self.conf.vm_alt_img + ' poweroff' self.vm_resnap_cmnd = self.conf.vm_vbox_manage + ' snapshot ' + self.conf.vm_alt_img + ' restore ' + self.conf.vm_alt_img_snapshot self.vm_makeclone_cmnd = self.conf.vm_vbox_manage + ' clonevm ' + self.conf.vm_alt_img else: raise Exception("Test Suite`s conf file is not Present") return '''------------------------------------------------------------------------------------------------------------------''' '''UTILITY FUNCTIONS ''' '''------------------------------------------------------------------------------------------------------------------''' def fabric_run_commands (self): """ TODO: There must be a possibility of dedicated test on remote host with a ASH script which may be needed only in some cases. The script should be uploaded to the target machine in the same archive as config files and start scripts. For that configure special key on the class level and after check whether it exists pop a command for the script execution from common dict (may not be entirely good idea but will work) """ ded_test_command = None if self.ssh_ded_test_key != None: ded_test_command = self.ssh_commands_to_exec.pop(self.ssh_ded_test_key) else: raise Exception("TESTING SCRIPT IS NOT PRESENT") return for key, command in self.ssh_commands_to_exec.items(): if key == self.ssh_ded_test_key: # ded_test_command = self.ssh_commands_to_exec.pop(self.ssh_ded_test_key) continue else: self.logger.debug("<== THIS COMMAND is Going To be Executed REMOTELY ==>") self.logger.debug(str(command)) self.logger.debug("<===================================================>") result = self.fabric_connection.run(command) if ded_test_command != None: result_dedicated_test = self.fabric_connection.run(ded_test_command) else: result_dedicated_test = None ''' self.vm_ssh_cmds_exec_banner(command=command, ending=' for the key ' + str(key) + ' has been executed ', exec_res01='result.ok: ' + str(result.ok), exec_res02='result.return_code: ' + str(result.return_code), exec_res03='result_dedicated_test.return_code: ' + str( result_dedicated_test.return_code), logging_level='DEBUG', logger=self.logger ) ''' self.logger.debug("<===================================================>") self.logger.debug("<== WITH THE FOLLOWING RESULT ==>") self.logger.debug(str(result.ok) + ' ' + str(result.ok) + ' ' + str(result.return_code) + ' ' + str( result_dedicated_test.return_code)) self.logger.debug("<===================================================>") if result.ok == True and result.return_code == 0 and ( result_dedicated_test != None or result_dedicated_test.return_code == 0): pass else: raise Exception('Last command did not go thru. Execution interrupted') return def set_test_env (self): """ Function: 1) starts VM 2) copies current archive of configs and scripts to SUT Host 3) sets predefined start SUT remote commands as default 4) executes then in start)sut function :return: """ '''make it avalable to the ScriptsControlling class''' # es.conf = sonata_conf self.vm_shutdown() self.vm_start() '''test whether files can be copied via scp''' self.scp_files() '''Do the test for actions on the remote host''' self.ssh_ded_test_key = 'test_exec' '''Start the SUT on the remote box''' self.ssh_commands_to_exec = self.sut_start_commands self.start_sut() return def tear_down__test_env (self): """ Function: 1) Stops SUT 2) Stops VM 3) Restores clean snap :return: """ self.ssh_commands_to_exec.clear() self.ssh_commands_to_exec = self.sut_stop_commands self.stop_sut() self.vm_shutdown() self.vm_restore_snap() return '''
from fabric2 import Config # SSHコンフィグレーション from fabric2 import Connection # コネクション from fabric2 import ThreadingGroup # マルチスレッド実行 from fabric2 import SerialGroup # シングルスレッド実行 from invoke import task # @task annotation from invoke import Responder #con = Connection('[email protected]', connect_kwargs = { 'key_filename': 'id_rsa'}) # SSH の設定ファイル指定 # Config.ssh_config_path = 'ssh_config' hosts = ('192.168.19.128', '192.168.19.129') # SingleThread Run print('--- SingleThread Run ---') for host in hosts: con = Connection(host) print(con.host) con.run('hostname') # SingleThread Group Run print('--- SingleThread Group Run ---') result = SerialGroup('192.168.19.128', '192.168.19.129').run('hostname') # MultiThread Group Run print('--- MultiThread Group Run ---') result = ThreadingGroup('192.168.19.128', '192.168.19.129').run('hostname')
from __future__ import print_function
class gce_api: URI = 'https://www.googleapis.com' CommonCalls = {'machineTypeList': 'https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/machineTypes', 'imagesList': 'https://www.googleapis.com/compute/v1/projects/{project}/global/images', 'projectInfo': 'https://www.googleapis.com/compute/v1/projects/{project}', 'firewallList': 'https://www.googleapis.com/compute/v1/projects/{project}/global/firewalls', 'firewallResource':'https://www.googleapis.com/compute/v1/projects/{project}/global/firewalls/{firewallName}', 'instances': 'https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances', 'serialPort': 'https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances/{instanceName}/serialPort', 'instanceInfo': 'https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances/{instanceName}' } def __init__(self,json_key,properties,storage_key): self.properties = properties self.properties['keyFile'] = F'{os.path.join(self.properties["keyDir"],self.properties["instanceName"])}' self.properties['pubKeyFile'] = F'{self.properties["keyFile"] + ".pub"}' self.credentials = service_account.Credentials.from_service_account_file(json_key) self.credentials_storage = service_account.Credentials.from_service_account_file(storage_key) self.scoped_credentials = self.credentials.with_scopes(['https://www.googleapis.com/auth/cloud-platform']) self.storage_credentials = self.credentials_storage.with_scopes(['https://www.googleapis.com/auth/devstorage.full_control']) self.authed_session = AuthorizedSession(self.scoped_credentials) self.storage_session = AuthorizedSession(self.storage_credentials) os.environ['GOOGLE_APPLICATION_CREDENTIALS']=storage_key self.storage_client = storage.Client() #GOOGLE_APPLICATION_CREDENTIALS should have been set as an environment variable. This is shit but storage_client here can't seem to accept the path to the json file def waitUntilDone(func): def wrapper(self,*args,**kwargs): response = func(self,*args,**kwargs) if 'status' in response.keys() and response != None: while True: #response['status'] != "DONE": display(response) time.sleep(0.5) response = func(self,*args,**kwargs) # display(response) else : response = None return response return wrapper def get(self,*args,**kwargs): self.method = "get" return self.selectRunType(*args,**kwargs) def post(self,*args,**kwargs): self.method = "post" return self.selectRunType(*args,**kwargs) def delete(self,*args,**kwargs): self.method = "delete" return self.selectRunType(*args,**kwargs) def selectRunType(self,*args,**kwargs): wait = kwargs.get('wait',False) kwargs.pop('wait',None) if not wait: result = self.runRequest(*args,**kwargs) else: result = self.persistent(*args,**kwargs) return result def runRequest(self,*args,**kwargs): properties = kwargs.get('properties',None) if properties != None: self.properties = properties kwargs.pop('properties',None) call=gce_api.CommonCalls[args[0]].format(**self.properties) #display(kwargs) response = getattr(self.authed_session,self.method)(call,**kwargs) # display(call) if response.status_code == 200: return json.loads(response.text) else: display("Response code was {}. It might not have worked".format(response.status_code)) return None def request_storage(self,url, payload='None', method='get'): if payload is 'None': return getattr(self.storage_session,method)(url) else: return getattr(self.storage_session,method)(url,json=payload) @waitUntilDone def persistent(self,*args,**kwargs): return self.runRequest(*args,**kwargs) def create_bucket(self,name): return self.storage_client.create_bucket(name) def generateSSHKey(self): display('Generating ssh key...') c = Connection('localhost') c.local('rm -f "{keyFile}.*"'.format(**self.properties)) c.local("echo 'yes' | ssh-keygen -t rsa -f {keyFile} -C {username} -N '' ".format(**self.properties),hide='out') c.close() #p = Popen("echo 'yes' | ssh-keygen -t rsa -f {keyFile} -C {username} -N '' ".format(**self.properties), # stdout=PIPE, # shell=True, # stderr=PIPE # ) #print(p.communicate()) with open (self.properties['pubKeyFile'],'r') as f: display('Opening {}'.format(self.properties['pubKeyFile'])) self.pub = f.read().strip() def setConnection(self): self.connection = Connection(host=self.properties['ip'], user=self.properties['username'], connect_kwargs={"key_filename": self.properties['keyFile'],} ) #self.connection.open() def setSSHPort(self,ip='',inOffice='True'): #display(cloudInfo) ipList = ["151.157.0.0/16",] if not inOffice: ipList.append(ip) info = self.get('firewallList') firewalls = [i['name'] for i in info['items']] ssh = { "name" : "ssh", "allowed": [ { "IPProtocol": "tcp", "ports": [ "22", ] } ], "sourceRanges": ipList, "targetTags": [ "ssh" ] } if 'ssh' in firewalls: self.properties['firewallName'] = 'ssh' info = self.delete('firewallResource') display(info['operationType'],info['targetLink']) #Waiting until the firewall has been deleted info = self.get('firewallList') firewalls = [i['name'] for i in info['items']] while 'ssh' in firewalls: time.sleep(0.5) info=self.get('firewallList') firewalls = [i['name'] for i in info['items']] # Actually creating the firewall info = self.post('firewallList',json=ssh) display(info['operationType'],info['targetLink']) def runScript(self,file,getResults=False,out='results.txt'): self.connection.put(file) name = os.path.basename(file) self.connection.run('chmod +x {}'.format(name)) self.connection.run('./{}'.format(name)) if getResults: self.connection.get("results.txt",out)