def shell(native=False, tmux=True): """ Open common ssh shell """ if native or eval(str(native)): action('Opening native fabric shell') open_shell(config['ssh_startup'] if eval(str(tmux)) else '') else: action('Opening ssh shell') key = current()['ssh_cert'] password = SERVERS.by_attr( 'ip', env['host_string'].split('@')[1].split(':')[0])[0]['ssh_password'] if key: key = '-i ' + os.path.join(home_folder, '.temp/', key) # ssh = "sshpass -p '%s' ssh %s -p %s %s %s" % (password, ssh = "ssh %s -p %s %s %s" % ( env['host_string'].split(':')[0], env['host_string'].split(':')[1], key, "-t '%s'" % config['ssh_startup'] if eval(str(tmux)) else '') if platform.system() == 'Linux': ssh = "sshpass -p '%s' %s" % (password, ssh) else: print password os.system(ssh) print ssh clean()
def deploy_node(): # Note that open_shell was not working in the example we ran through subnet_id = create_network() node_ip = create_server(subnet_id) print node_ip env["hosts"] = [node_ip] open_shell()
def shell(native=False, tmux=True): """ Open common ssh shell """ if native or eval(str(native)): action('Opening native fabric shell') open_shell(config['ssh_startup'] if eval(str(tmux)) else '') else: action('Opening ssh shell') key = current()['ssh_cert'] password = SERVERS.by_attr('ip', env['host_string'].split('@')[1].split(':')[0])[0]['ssh_password'] if key: key = '-i ' + os.path.join(home_folder, '.temp/', key) # ssh = "sshpass -p '%s' ssh %s -p %s %s %s" % (password, ssh = "ssh %s -p %s %s %s" % ( env['host_string'].split(':')[0], env['host_string'].split(':')[1], key, "-t '%s'" % config['ssh_startup'] if eval(str(tmux)) else '') if platform.system() == 'Linux': ssh = "sshpass -p '%s' %s" % (password, ssh) else: print password os.system(ssh) print ssh clean()
def reload_server(user, password, shell_after=True): env.user = user env.password = password checkout() with cd("~/%s" % git_repo_name): run("pm2 reload app.json") if shell_after: open_shell()
def deploy(user, password, shell_before=False, shell_after=True): env.user = user env.password = password if shell_before: open_shell() checkout() start_server() if shell_after: open_shell()
def console(name=None, **kwargs): """ Connects to Container console :param name: Name of container """ if not name: raise StandardError('You must specify a name') open_shell('sudo lxc-console -e b -n {0} ; exit'.format(name))
def fixture(): """ Summary. Returns: TYPE: Description """ dp = put('merge.sql', '/home/django/django_project/merge.sql') if dp.succeeded: open_shell()
def open_shell(): """ Opens a shell sequentially on all servers passed into the command. """ print(colors.green("Opening shell on server {host}".format(host=fab.env.host))) fab.open_shell() print(colors.green("Finished shell session on server {host}".format(host=fab.env.host)))
def set_hostname(hostname): ctx.logger.info("Change Cisco CSR1000v's hostname") ctx.logger.info(str(env)) command = "enable \n \ conf t \n \ hostname {0} \n \ end \n \ write memory \n \ exit \n".format(hostname) open_shell(command) return None
def debug(): """ Summary. Returns: TYPE: Description """ server = run("cd {}".format(remote_dir)) if server.succeeded: with cd(remote_dir): open_shell()
def shell(buildout_dir=None): """ Execute some commands on server """ appenv_info = env.deploy_info[env.appenv] if not buildout_dir: current_link = appenv_info.get('current_link') if current_link: buildout_dir = run("readlink {}".format(current_link), warn_only=True) if not buildout_dir: buildout_dir = appenv_info.get('buildout') or 'buildout' open_shell("cd {}".format(buildout_dir))
def reload_server(user, password, shell_after=True): """ Restarts server...If its in cluster mode, will be able to reload with 0 downtime """ env.user = user env.password = password checkout() with cd("~/%s" % git_repo_name): run("pm2 reload app.json") if shell_after: open_shell()
def update_frontend(user, password, shell_after=True): """ Just rebuilds the application frontend files Server will then be able to just pull from the new files and display the front end updates If there are updates for the server itself that serves the frontend content, use `reload_server(...)` """ env.user = user env.password = password checkout() with cd("~/%s" % git_repo_name): run("grunt build:dist --force") if shell_after: open_shell()
def setup_python_virtualenv(self): # download dependencies run('sudo -H pip3 install --upgrade pip') run('sudo pip3 install setuptools') run('sudo -H pip3 install virtualenv virtualenvwrapper') # configure virtualenvwrapper to load on terminal open run('echo "export VIRTUALENVWRAPPER_PYTHON=/usr/bin/python3" >> ~/.bashrc') run('echo "export WORKON_HOME=/home/{}/venv" >> ~/.bashrc'.format(self.USER_ID)) run('echo "source /usr/local/bin/virtualenvwrapper.sh" >> ~/.bashrc') # final step of this is to reboot server # run('source ~/.bashrc') # does not work, run manually open_shell() # 1. source ~/.bashrc # 2. mkvirtualenv woobak return True
def hatop(buildout_dir=None): appenv_info = env.deploy_info[env.appenv] if not env.is_master: print env.host_string, 'not master' return if 'haproxy' not in appenv_info: print 'haproxy not in', appenv_info.keys() return print 'yes' if not buildout_dir: current_link = appenv_info.get('current_link') if current_link: buildout_dir = run("readlink {}".format(current_link), warn_only=True) if not buildout_dir: buildout_dir = appenv_info.get('buildout') or 'buildout' open_shell('cd {} && exec /usr/bin/python ./hatop -s var/run/haproxy-socket'.format(buildout_dir))
def execute(): key_filename = os.path.expanduser(profile.ssh_key) with fab.settings(host_string=host_string, key_filename=key_filename, port=port, warn_only=True): if use_sudo: output = fab.sudo(command) elif open_shell: fab.open_shell(command) return None else: output = fab.run(command) if output.failed: raise CloudifyCliError('Failed to execute: {0} ({1})'.format( output.real_command, output.stderr)) return output
def execute(): key_filename = os.path.expanduser(utils.get_management_key()) with fab.settings( host_string=host_string, key_filename=key_filename, warn_only=True): if use_sudo: output = fab.sudo(command) elif open_shell: fab.open_shell(command) return None else: output = fab.run(command) if output.failed: raise CloudifyCliError( 'Failed to execute: {0} ({1})'.format( output.real_command, output.stderr)) return output
def stop_server(): "runs shutdown.sh -force and kills tomcat" with settings(warn_only=True): rrun('source ~/.bashrc && {CSPACE_JEESERVER_HOME}/bin/shutdown.sh -force', pty=False) pid = get_pid() if not pid: print(red("no java process found")) return pids = pid.split('\r\n') if len(pids) > 1: puts(magenta("multiple pids found: " + ','.join(pids))) puts(magenta("kill them yourself (exit when done)")) open_shell() else: puts(yellow("stopping pid " + pid)) with settings(warn_only=True): rrun('kill -9 ' + pid) rm_pid()
def execute(): with fab.settings( host_string=host_string, key_filename=key_filename, port=port, warn_only=True): if use_sudo: output = fab.sudo(command) elif open_shell: fab.open_shell(command) return None else: output = fab.run(command) if output.failed: raise CloudifyCliError( 'Failed to execute: {0} ({1})'.format( output.real_command, output.stderr)) return output
def showVersion(): """ This will save the current running configuration as the startup configuration. """ with open("/tmp/debug","w") as ofile: ofile.write(cyan("Test session %s @ %s" % (env.user , env.host) ) ) try: #open_shell("terminal lenght 0" ) #open_shell("show version" ) open_shell("enable\n\nterminal lenght 0\nshow version\n \nexit\n" ) # open_shell("exit\n" ) # open_shell("enable\nP1ngazz0\nterminal lenght 0\nshow version\n exit\n" ) # open_shell("show version", shell=False) except Exception as e: print( red( "%s exception: %s" % (env.host, e) ) ) return
def ssh(hosts, *cmd): if cmd: fullcmd = "" for i in cmd: fullcmd += " | " + i fullcmd = fullcmd.lstrip(' | ') if type(hosts) is list or type(hosts) is tuple: for host in hosts: fab.env.host_string = host if cmd: fab.run(fullcmd) else: fab.open_shell() elif type(hosts) is str: fab.env.host_string = hosts if cmd: fab.run(fullcmd) else: fab.open_shell() else: print "Error: hosts must be string, list or tuple."
def config(): """ Enters global configuration mode. We'll ask the user if they want to manually enter commands from here out. If so, we'll open a shell for them, otherwise we'll continue on and wait for commands. """ confirmation = "Entering configuration mode, " confirmation += "would you like to procede with manual entry?" if confirm(cyan(confirmation)): try: run("configure terminal", shell=False) print(cyan("Opening a shell for you, you may now enter commands:")) open_shell() except Exception as e: print e else: try: cyan("Entering configuration mode for automatic command entry...") run("configure terminal", shell=False) except Exception as e: print e
def start_django_test_server_as_daemon(self): # first your virtualenv on the server needs to install all python dependencies # since accessing virtualenv with Fabric is difficult use 'open_shell' # 1. workon {projectname} # 2. pip install -r requirements.txt # 3. fab migrate if not exists('/home/{0}/venv/{1}/django-admin.py'.format( self.USER_ID, self.PROJECT_NAME)): with cd('/home/{0}/{1}'.format(self.USER_ID, self.PROJECT_NAME)): run('echo "install all Python dependencies and migrate"') open_shell() # intall Supervisor to daemonize uWSGI Django application run('sudo apt-get install supervisor') # move the Supervisor uwsgi.conf file to server put('./config/supervisor/uwsgi.conf', '/etc/supervisor/conf.d/uwsgi.conf') run('sudo supervisorctl reread') run('sudo supervisorctl update') run('sudo service supervisor start') # open port 8080 run('sudo ufw allow 8080') return True
def ssh(): fab.puts("Opening shell to host {0}".format(fab.env.host_string)) fab.open_shell()
def home_shell(): """Open a shell in home directory""" open_shell("cd " + settings.HOME)
def shell(): """Open a remote shell""" open_shell("cd " + settings.GIT_DIR)
def shell(): open_shell()
def master(args): """ Main entry point for the master utility. """ start = time.time() with shell.Step(1): print "Cloud setup validation:" # Load master configuration print "* Parsing the master XML definition file" config = etree.parse(args.setup) # Validate the configuration file print "* Validating the master XML definition against the XML schema" conf.schema('master-image').assertValid(config) master = config.getroot().attrib manifests = config.xpath('/master/apply/@manifest') uploads = config.xpath('/master/upload/@asset') # Instantiate connections with shell.Step(2): print "Instantiation of the cloud manager connection:" print "* Choosing region" try: region = master['region'] except KeyError: region = DEFAULT_REGION print " └ Selected region '{0}'".format(region) print "* Connecting to the EC2 manager" c = ec2.connect_to_region( aws_access_key_id=args.access_key_id, aws_secret_access_key=args.secret_key, region_name=region, ) with shell.Step(3): print "Virtual setup initialization:" print "* Checking for duplicate image names" try: image = c.get_all_images(filters={'name': master['name']})[0] except IndexError: print " └ Name '{0}' not used yet".format(master['name']) else: print " └ Name '{0}' already used".format(master['name']) print " └ Checking for different user" iam_c = iam.IAMConnection( aws_access_key_id=args.access_key_id, aws_secret_access_key=args.secret_key, ) uid = iam_c.get_user()['GetUserResponse']['GetUserResult']['User']['UserId'] if image.ownerId == uid: if not args.force: raise ValueError("The name '{0}' is already taken by the " \ "image '{1}'.".format(master['name'], image.id)) else: print " └ Same user but --force flag set, deregistering image" image.deregister() print print shell.hilite("Note that only the AMI was deregistered, " \ "the relative snapshot was left in place. Remove it " \ "manually if desired.", shell.MAGENTA) print print "* Creating temporary security group" group = c.create_security_group( 'pop-' + random_string(16), 'Temporary security group for POP master image creation' ) print " └ New security group named '{0.name}'".format(group) print " └ Authorizing external SSH access" group.authorize('tcp', 22, 22, "0.0.0.0/0") #group.authorize('tcp', 80, 80, "0.0.0.0/0") print "* Creating key pair" key = c.create_key_pair('pop-' + random_string(16)) print " └ New key pair named '{0.name}'".format(key) key_filename = 'pop-master-pk-' + random_string(8) + '.pem' with open(key_filename, 'w') as fh: fh.write(key.material) os.chmod(key_filename, stat.S_IRUSR | stat.S_IWUSR) print " └ Private key written to '{0}'".format(key_filename) print "* Getting base image" image = c.get_image(master['base']) print "* Launching new instance" res = image.run( key_name=key.name, security_groups=[group], instance_type=TYPES[image.architecture], ) print " └ New reservation with ID '{0}'".format(res.id) instance = res.instances[0] print "* Waiting for machine to boot", instance = shell.wait(instance, 'running', interval=.5) address = instance.dns_name print shell.nowrap(" └ Public address is '{0}'".format(address)) print "* Waiting for instance to come online", shell.wait(ConnectionAttempt(address, 22), 'connected', interval=.8) print print "Instance online; you can manually connect using this command:\n" print shell.nowrap(shell.hilite( "ssh -i {0} {1}@{2}".format(key_filename, USER, address), shell.MAGENTA )) if args.clean: print print "Note that the machine will be available only until the master " \ "image creation process successfully completes. If an error " \ "happens before completion, the availability of the instance " \ "will depend on the stage at which the error happened." print "If you want to access the machine after the image creation " \ "process completes, use the --no-clean flag." print else: print print "The --no-clean flag is set, the instance will remain " \ "available after the image creation process completes." print "Remember to terminate it manually once done with it." print with shell.Step(4): print "Instance customization:" with remote_machine(USER, address, key_filename, args.debug): print "* Configuring sources for VPC deployment" fab.use_vpc_sources() print "* Installing puppet" sudo('apt-get -y install puppet') sudo('update-rc.d -f puppet remove') base = os.path.dirname(os.path.realpath(args.setup.name)) upload_files(base, uploads, '/var/uploads') upload_files(base, manifests, '/var/manifests') print "* Applying manifests" for manifest in manifests: print " └ Applying '{0}'".format(manifest), shell.wait( Manifest(os.path.join('/var/manifests', manifest), args.debug), 'applied', interval=.8 ) if args.manual: print print "Base setup done, manual setup requested." op = confirm('Open an SSH connection now?', default=True) if op: stdout = sys.stdout while hasattr(sys.stdout, 'stdout'): sys.stdout = sys.stdout.stdout print print "-" * shell.size()[0] open_shell() print "-" * shell.size()[0] sys.stdout = stdout print sys.stdout.write("Connection closed, press the return key once done.") sys.stdout.flush() raw_input() print else: print "Please manually setup the imahe and press the return" \ " key once done." raw_input() print print "* Cleaning up" sudo('rm -rf /var/manifests /var/uploads') with shell.Step(5): print "Image creation:" print "* Creating image from running instance" ami = c.create_image(instance.id, master['name'], master['description']) while True: try: image = c.get_image(ami) print " └ New AMI created with ID '{0}'".format(ami) break except: print " └ AMI not found, trying again".format(ami) pass print "* Waiting for image creation to complete", shell.wait(image, 'available', interval=.5) if 'public' in master: print "* Making image public" image.set_launch_permissions(group_names=['all']) with shell.Step(6): print "Resources cleanup:" if args.clean: print "* Terminating instance" instance.terminate() print "* Deleting key pair" c.delete_key_pair(key.name) os.remove(key_filename) print "* Deleting security group" group.delete() else: print "* The --no-clean flag is set, skipping cleanup" raise shell.Step.Skipped() duration = int(time.time() - start) duration = '{0:.0f}m {1:.0f}s'.format(duration // 60, duration % 60) with shell.Wrapper(72): print print "Master image creation completed in {0}; you can launch new " \ "instances of the just created image by specifying the " \ "following AMI ID:\n".format(duration) print shell.hilite(" {0}".format(ami), shell.MAGENTA)
def task5(): open_shell('cat show_cpuinfo.py')
def shell(): "open a shell on the remote host" open_shell("source ~/.virtualenvs/{app}/bin/activate".format(app=env.user))
def task5(): open_shell("cat show_cpuinfo.py")
def logs(): "watch logs on remote server" open_shell("tail -f /var/log/{app}/*log /var/log/nginx/{app}*log && exit".format(app=env.user))
def ipython(): "open ipython environment on remote host" open_shell("~/.virtualenvs/{app}/bin/shell.py && exit".format(app=env.user))
def home_shell(): '''Open a shell in home directory''' open_shell('cd ' + settings.HOME)
def CommandEditDnsCryptPiHoleSetupConfig(self): editor = FabricCommandClass.CommandEditDnsCryptPiHoleSetupConfig.editor open_shell(command='sudo ' + editor + ' ' + defaultLocation + ' ; exit')
def ipython(): "open ipython environment on remote host" open_shell( "~/.virtualenvs/{app}/bin/shell.py && exit".format(app=env.user))
def smart_shell(command=''): env_commands = "cd '%s'; %s" % (env.cwd, " && ".join(env.command_prefixes)) open_shell('%s; %s' % (env_commands, command))
def logs(): "watch logs on remote server" open_shell( "tail -f /var/log/{app}/*log /var/log/nginx/{app}*log && exit".format( app=env.user))
def main(self, aopt, args): ins = start.main(self, aopt, args) aopt['ssh_settings']['host_string'] = ins.public_dns_name with fab.settings(**aopt['ssh_settings']): fab.open_shell() self.out("OK")
def run(self): open_shell()
def login(): open_shell(command="cd %s" % env.home_folder)
def shell(): open_shell(". .profile && cd '%s' && . bin/activate" % server_prefix)
def master(args): """ Main entry point for the master utility. """ start = time.time() with shell.Step(1): print "Cloud setup validation:" # Load master configuration print "* Parsing the master XML definition file" config = etree.parse(args.setup) # Validate the configuration file print "* Validating the master XML definition against the XML schema" conf.schema('master-image').assertValid(config) master = config.getroot().attrib manifests = config.xpath('/master/apply/@manifest') uploads = config.xpath('/master/upload/@asset') # Instantiate connections with shell.Step(2): print "Instantiation of the cloud manager connection:" print "* Choosing region" try: region = master['region'] except KeyError: region = DEFAULT_REGION print " └ Selected region '{0}'".format(region) print "* Connecting to the EC2 manager" c = ec2.connect_to_region( aws_access_key_id=args.access_key_id, aws_secret_access_key=args.secret_key, region_name=region, ) with shell.Step(3): print "Virtual setup initialization:" print "* Checking for duplicate image names" try: image = c.get_all_images(filters={'name': master['name']})[0] except IndexError: print " └ Name '{0}' not used yet".format(master['name']) else: print " └ Name '{0}' already used".format(master['name']) print " └ Checking for different user" iam_c = iam.IAMConnection( aws_access_key_id=args.access_key_id, aws_secret_access_key=args.secret_key, ) uid = iam_c.get_user( )['GetUserResponse']['GetUserResult']['User']['UserId'] if image.ownerId == uid: if not args.force: raise ValueError("The name '{0}' is already taken by the " \ "image '{1}'.".format(master['name'], image.id)) else: print " └ Same user but --force flag set, deregistering image" image.deregister() print print shell.hilite("Note that only the AMI was deregistered, " \ "the relative snapshot was left in place. Remove it " \ "manually if desired.", shell.MAGENTA) print print "* Creating temporary security group" group = c.create_security_group( 'pop-' + random_string(16), 'Temporary security group for POP master image creation') print " └ New security group named '{0.name}'".format(group) print " └ Authorizing external SSH access" group.authorize('tcp', 22, 22, "0.0.0.0/0") #group.authorize('tcp', 80, 80, "0.0.0.0/0") print "* Creating key pair" key = c.create_key_pair('pop-' + random_string(16)) print " └ New key pair named '{0.name}'".format(key) key_filename = 'pop-master-pk-' + random_string(8) + '.pem' with open(key_filename, 'w') as fh: fh.write(key.material) os.chmod(key_filename, stat.S_IRUSR | stat.S_IWUSR) print " └ Private key written to '{0}'".format(key_filename) print "* Getting base image" image = c.get_image(master['base']) print "* Launching new instance" res = image.run( key_name=key.name, security_groups=[group], instance_type=TYPES[image.architecture], ) print " └ New reservation with ID '{0}'".format(res.id) instance = res.instances[0] print "* Waiting for machine to boot", instance = shell.wait(instance, 'running', interval=.5) address = instance.dns_name print shell.nowrap(" └ Public address is '{0}'".format(address)) print "* Waiting for instance to come online", shell.wait(ConnectionAttempt(address, 22), 'connected', interval=.8) print print "Instance online; you can manually connect using this command:\n" print shell.nowrap( shell.hilite( "ssh -i {0} {1}@{2}".format(key_filename, USER, address), shell.MAGENTA)) if args.clean: print print "Note that the machine will be available only until the master " \ "image creation process successfully completes. If an error " \ "happens before completion, the availability of the instance " \ "will depend on the stage at which the error happened." print "If you want to access the machine after the image creation " \ "process completes, use the --no-clean flag." print else: print print "The --no-clean flag is set, the instance will remain " \ "available after the image creation process completes." print "Remember to terminate it manually once done with it." print with shell.Step(4): print "Instance customization:" with remote_machine(USER, address, key_filename, args.debug): print "* Configuring sources for VPC deployment" fab.use_vpc_sources() print "* Installing puppet" sudo('apt-get -y install puppet') sudo('update-rc.d -f puppet remove') base = os.path.dirname(os.path.realpath(args.setup.name)) upload_files(base, uploads, '/var/uploads') upload_files(base, manifests, '/var/manifests') print "* Applying manifests" for manifest in manifests: print " └ Applying '{0}'".format(manifest), shell.wait(Manifest(os.path.join('/var/manifests', manifest), args.debug), 'applied', interval=.8) if args.manual: print print "Base setup done, manual setup requested." op = confirm('Open an SSH connection now?', default=True) if op: stdout = sys.stdout while hasattr(sys.stdout, 'stdout'): sys.stdout = sys.stdout.stdout print print "-" * shell.size()[0] open_shell() print "-" * shell.size()[0] sys.stdout = stdout print sys.stdout.write( "Connection closed, press the return key once done.") sys.stdout.flush() raw_input() print else: print "Please manually setup the imahe and press the return" \ " key once done." raw_input() print print "* Cleaning up" sudo('rm -rf /var/manifests /var/uploads') with shell.Step(5): print "Image creation:" print "* Creating image from running instance" ami = c.create_image(instance.id, master['name'], master['description']) while True: try: image = c.get_image(ami) print " └ New AMI created with ID '{0}'".format(ami) break except: print " └ AMI not found, trying again".format(ami) pass print "* Waiting for image creation to complete", shell.wait(image, 'available', interval=.5) if 'public' in master: print "* Making image public" image.set_launch_permissions(group_names=['all']) with shell.Step(6): print "Resources cleanup:" if args.clean: print "* Terminating instance" instance.terminate() print "* Deleting key pair" c.delete_key_pair(key.name) os.remove(key_filename) print "* Deleting security group" group.delete() else: print "* The --no-clean flag is set, skipping cleanup" raise shell.Step.Skipped() duration = int(time.time() - start) duration = '{0:.0f}m {1:.0f}s'.format(duration // 60, duration % 60) with shell.Wrapper(72): print print "Master image creation completed in {0}; you can launch new " \ "instances of the just created image by specifying the " \ "following AMI ID:\n".format(duration) print shell.hilite(" {0}".format(ami), shell.MAGENTA)
def open_shell(cls, path='/tmp'): """ Open a shell on the remote instance. """ with cd(path): open_shell()
def shell_connect(self): """Open shell to remote server""" fabric.open_shell("export TERM=xterm")
def shell(): api.open_shell()
def login(): open_shell(command="cd %s" %env.home_folder)
def shell(): '''Open a remote shell''' open_shell('cd ' + settings.GIT_DIR)
def CommandEditDnsCryptProxyConfig(self): editor = FabricCommandClass.CommandEditDnsCryptProxyConfig.editor open_shell(command=editor + ' /etc/dnscrypt-proxy/dnscrypt-proxy.toml; exit')