def _close_fabric_connections(self): ''' Closes all fabric connections to avoids "inactive" ssh connection errors. ''' for key in connections.keys(): connections[key].close() del connections[key]
def push_docs(): """A script (push_docs) points to this. It pushes the current copy of the docs up to the development doc area on openmdao.org. """ startdir = os.getcwd() branchdir = dirname(dirname(dirname(sys.executable))) docdir = join(branchdir, 'docs') idxpath = join(docdir, '_build', 'html', 'index.html') if not os.path.isfile(idxpath): build_docs() try: os.chdir(join(docdir, '_build')) try: if exists('docs.tar.gz'): os.remove('docs.tar.gz') archive = tarfile.open('docs.tar.gz', 'w:gz') archive.add('html') archive.close() finally: os.chdir(startdir) with settings(host_string='*****@*****.**'): # tar up the docs so we can upload them to the server # put the docs on the server and untar them put(join(docdir,'_build','docs.tar.gz'), 'downloads/docs.tar.gz') with cd('downloads'): run('tar xzf docs.tar.gz') run('rm -rf dev_docs') run('mv html dev_docs') run('rm -f docs.tar.gz') finally: for key in connections.keys(): connections[key].close() del connections[key]
def handle(self, *args, **options): deployment = Deployment(options['staging']) deployment.deploy(options['initial'], options['staging']) for key in connections.keys(): connections[key].close() del connections[key]
def main(argv=None): if argv is None: argv = sys.argv[1:] parser = OptionParser() parser.add_option("-l", action="store_true", dest='local', help="test a local release") parser.add_option("-v", action="store", type='string', dest='version', help="version to test", default='latest') parser.add_option("--host", action='append', dest='hosts', default=[], metavar='HOST', help="add a host url to test the release on") (options, args) = parser.parse_args(argv) if options.local: site_url = 'http://torpedo.grc.nasa.gov:31004' else: site_url = 'http://openmdao.org' if options.hosts: hosts = options.hosts else: hosts = ['storm.grc.nasa.gov', 'torpedo.grc.nasa.gov', 'viper.grc.nasa.gov'] print 'testing on hosts: %s' % hosts # ensure that all network connections are closed # TODO: once we move to Fabric 0.9.4, just use disconnect_all() function try: for host in hosts: _testrelease(site_url, options.version, host) finally: for key in connections.keys(): connections[key].close() del connections[key]
def fabric_cleanup(debug=False): """close all active fabric connections""" for key in connections.keys(): if debug: print 'closing connection %s' % key connections[key].close() del connections[key]
def cleanup(self): # Cleanup Fabric connections for key in connections.keys(): connections[key].close() del connections[key] # Close API connection self.aws_conn.close() self.elb_conn.close()
def fabric_cleanup(debug=False): """Close all active fabric connections.""" for key in connections.keys(): try: if debug: print "closing connection %s" % key connections[key].close() del connections[key] except Exception as err: print str(err)
def ssh(settings): with settings: try: yield finally: from fabric.state import connections for key in connections.keys(): connections[key].close() del connections[key]
def fabric_cleanup(debug=False): """Close all active fabric connections.""" for key in connections.keys(): try: if debug: print 'closing connection %s' % key connections[key].close() del connections[key] except Exception as err: print str(err)
def release(version=None): if sys.platform != 'win32': raise RuntimeError("OpenMDAO releases should be built on Windows so Windows binary distributions can be built") try: for host in hosts: _release(version, is_local=False, home='~') finally: for key in connections.keys(): connections[key].close() del connections[key]
def clear_fabric_cache(): """ Fabric caches it's connections, so it won't have to re-connect every time you use it. But, when working with VMs whose connections are getting reset, we can't use a cache. Use this function to reset fabric's cache """ connection_keys = connections.keys() for host_string in connection_keys: connections[host_string].close() del connections[host_string]
def supervise(self, pargs): '''supervise ACTION APP [ HOST [ HOST ... ] ] Supervises an application to a list of hosts. If hosts are omitted, attempts to determine host list automatically. ACTION may be one of the following: restart - Restarts the application ''' action, app = pargs[0:2] # Translate action into supervisorctl command if action in SUPCTL_CMD_MAP: cmd = SUPCTL_CMD_MAP[action] % app else: raise HatException("Invalid action: " + action) # Determine receiving hosts hostnames = pargs[2:] idb = self._get_infradb() hosts = None if hostnames: hosts = idb.hosts(hostnames) else: try: service = idb.service(app) hosts = service.hosts() except ubik.infra.db.InfraDBException: pass if not hosts: raise HatException("Could not determine hosts for " + app) log.debug("hosts to supervise: %s", hosts) # TODO: Determine actual user via InfraDB deploy_user = self.config.get('deploy', 'user') print >>self.output, ('Running "%s" on the following hosts:' % cmd) for host in hosts: print >>self.output, "\t%s@%s" % (deploy_user, host) yesno = prompt("Proceed?", default='No') if yesno.strip()[0].upper() != 'Y': return try: for host in hosts: with settings(host_string=str(host), user=deploy_user): fab_output = run(cmd, shell=False) finally: # TODO: replace with disconnect_all() w/ fabric 0.9.4+ for key in connections.keys(): connections[key].close() del connections[key]
def run(self, host, username=None, ssh_id=None, user=None, identity=None): username = username or os.environ['USER'] ssh_id = ssh_id or os.path.expanduser('~/.ssh/id_rsa.pub') user = user or 'ubuntu' identity = identity or os.environ['EC2_KEYPAIR_PATH'] try: with settings(host_string=host): addAdmin(username, ssh_id, user, identity) finally: for key in connections.keys(): connections[key].close() del connections[key]
def disconnect(self, log=False): for key in connections.keys(): if output.status: connections[key].close() del connections[key] if output.status: if log: self._logger.info('Disconnected from {0}'.format( denormalize(key))) else: self._logger.warning( 'Failed to disconnect from {0}'.format( denormalize(key)))
def main(): """ Main entry point for the `ec2hashcat` command """ from fabric.state import connections from ec2hashcat.commands.base import Handler try: handler = Handler() handler.dispatch() finally: for key in connections.keys(): connections[key].close() del connections[key]
def local(command, capture=True): """ Run a command locally. :param command: The command to run :param capture: Return the command's output? """ out = fab_local(command, capture) for key in connections.keys(): connections[key].close() del connections[key] return out
def disconnect_all(): """ Disconnect from all currently connected servers. Used at the end of ``fab``'s main loop, and also intended for use by library users. """ from fabric.state import connections, output # Explicitly disconnect from all servers for key in connections.keys(): if output.status: print "Disconnecting from %s..." % denormalize(key), connections[key].close() if output.status: print "done."
def disconnect_all(): """ disconnect from all hosts """ try: fabric.network.disconnect_all() except Exception, e: print >> sys.stderr, type(e), e print >> sys.stderr, "disconnect_all() didn't work (perhaps fabric < 0.9.4, http://docs.fabfile.org/0.9.3/api/core/network.html#fabric.network.disconnect_all)" print >> sys.stderr, "running old-skool disconnect_all hack: http://docs.fabfile.org/0.9.3/usage/library.html?highlight=wait#disconnecting" from fabric.state import connections for key in connections.keys(): connections[key].close() del connections[key]
def localrelease(version=None): if sys.platform != 'win32': raise RuntimeError("OpenMDAO releases should be built on Windows so Windows binary distributions can be built") try: for host in hosts: # first, make sure we're in sync with the webfaction server - don't need to do any more probably #print 'syncing downloads dir...' #run('rsync -arvzt --delete [email protected]:downloads /OpenMDAO/release_test') #print 'syncing dists dir...' #run('rsync -arvzt --delete [email protected]:dists /OpenMDAO/release_test') print 'creating release...' # REAL ONE _release(version, is_local=True, home='/OpenMDAO/release_test', url=TEST_URL) _release(version, is_local=True, home='/OpenMDAO/release_test', url=TEST_URL) finally: for key in connections.keys(): connections[key].close() del connections[key]
def disconnect_all(): """ Disconnect from all currently connected servers. Used at the end of ``fab``'s main loop, and also intended for use by library users. """ from fabric.state import connections, output # Explicitly disconnect from all servers for key in connections.keys(): if output.status: # Here we can't use the py3k print(x, end=" ") # because 2.5 backwards compatibility sys.stdout.write("Disconnecting from %s... " % denormalize(key)) connections[key].close() del connections[key] if output.status:
def main(argv=None): #if sys.platform != 'win32': # raise RuntimeError("OpenMDAO releases should be built on Windows so Windows binary distributions can be built") hosts=["torpedo.grc.nasa.gov"] ###NEED TO CHANGE OpenMDAO/release_test TO SOMETHING ELSE FOR TESTING PURPOSES - MAYBE ADD A TEST SWITCH!!!!! try: for host in hosts: # first, make sure we're in sync with the webfaction server - don't need to do any more probably #print 'syncing downloads dir...' #run('rsync -arvzt --delete [email protected]:downloads /OpenMDAO/release_test') #print 'syncing dists dir...' #run('rsync -arvzt --delete [email protected]:dists /OpenMDAO/release_test') print 'creating release...' #READ ONE_release(version=None, is_local=True, home='/OpenMDAO/dev/ckrenek/scripts2', url=TEST_URL) _release(hosts, version=None, is_local=True, home='/OpenMDAO/dev/ckrenek/scripts2', url=TEST_URL) finally: for key in connections.keys(): connections[key].close() del connections[key]
def main(argv=None): if argv is None: argv = sys.argv[1:] #Figure out what branch we're in startdir=os.getcwd() branchdir=subprocess.Popen(['git rev-parse --show-toplevel'], stdout=subprocess.PIPE, shell=True).communicate()[0] print("Testing repo %s" % branchdir) #parse through any command line options parser = OptionParser() parser.add_option("-l", "--runlocal", action="store_true", dest="runlocal", default=False, help="force tests to run also on current platform") parser.add_option("-p", "--platform", action="append", dest="runplatforms", help="add a host url to run the tests on", default=["torpedo.grc.nasa.gov", "viper.grc.nasa.gov", "storm.grc.nasa.gov"]) (options, args) = parser.parse_args(argv) runlocal = options.runlocal currenthost = gethostname() print("Testing on hosts: %s" % options.runplatforms) # ensure that all network connections are closed # TODO: once we move to Fabric 0.9.4, just use disconnect_all() function tarfilename = "testbranch.tar.gz" try: _make_archive(tarfilename) for hostname in options.runplatforms: if not runlocal and (currenthost == hostname): print("skipping tests on %s" % currenthost) #skip local platform unless runlocal is true else: _testbranch(hostname) os.remove(tarfilename) finally: for key in connections.keys(): connections[key].close() del connections[key]
def _disconnect_fabric(): for key in connections.keys(): connections[key].close() del connections[key]
def disconnect(): for key in connections.keys(): connections[key].close() del connections[key]
def disconnect_all2(): from fabric.state import connections for key in connections.keys(): connections[key].close() del connections[key]
def run_command(machine_id, host, ssh_user, private_key, command): """Runs a command over Fabric. Fabric does not support passing the private key as a string, but only as a file. To solve this, a temporary file with the private key is created and its path is returned. In ec2 we always favor the provided dns_name and set the user name to the default ec2-user. IP or dns_name come from the js machine model. A few useful parameters for fabric configuration that are not currently used:: * env.connection_attempts, defaults to 1 * env.timeout - e.g. 20 in secs defaults to 10 * env.always_use_pty = False to avoid running commands like htop. However this might cause problems. Check fabric's docs. .. warning:: EC2 machines have default usernames other than root. However when attempting to connect with root@... it doesn't return an error but a message (e.g. Please login as the ec2-user rather than the user root). This misleads fabric to believe that everything went fine. To deal with this we check if the returned output contains a fragment of this message. """ if not host: log.error("Host not provided, exiting.") return Response("Host not set", 400) if not command: log.warn("No command was passed, returning empty.") return Response("Command not set", 400) if not private_key: log.warn("No private key provided, returning empty") return Response("Key not set", 400) (tmp_key, tmp_path) = tempfile.mkstemp() key_fd = os.fdopen(tmp_key, "w+b") key_fd.write(private_key) key_fd.close() env.key_filename = [tmp_path] if ssh_user: env.user = ssh_user else: env.user = "******" env.abort_on_prompts = True env.no_keys = True env.no_agent = True env.host_string = host env.warn_only = True env.combine_stderr = True env.keepalive = 15 try: cmd_output = run(command, timeout=COMMAND_TIMEOUT) except Exception as e: if "SSH session not active" in e: from fabric.state import connections conn_keys = [k for k in connections.keys() if host in k] for key in conn_keys: del connections[key] try: cmd_output = run(command, timeout=COMMAND_TIMEOUT) log.warn("Recovered!") except Exception as e: log.error("Failed to recover :(") log.error("Exception while executing command: %s" % e) os.remove(tmp_path) return Response("Exception while executing command: %s" % e, 503) else: log.error("Exception while executing command: %s" % e) os.remove(tmp_path) return Response("Exception while executing command: %s" % e, 503) except SystemExit as e: log.warn("Got SystemExit: %s" % e) os.remove(tmp_path) return Response("SystemExit: %s" % e, 401) os.remove(tmp_path) return Response(cmd_output, 200)
def run_task(name, args): execute(name, args) for key in connections.keys(): connections[key].close() del connections[key]
def do_action(project, actionargs, deploypath, global_config, extra_env = {}): target = actionargs[0] branch = 'release' if len(actionargs) < 2 else actionargs[1] deploypath = join(deploypath, target) dependspath = join(deploypath, 'depends') codepath = join(deploypath, 'code') staticpath = join(deploypath, 'static') htmlpath = join(deploypath, 'html') reader = JWLReader(project) #SWITCH TO RELEASE BRANCH try: if branch != 'CURRENT': sys_call('git checkout ' + branch, reader.path) reader.compile_coffee() #SETUP DEPLOY CONFIG envkey = target + '_' dplines = ['from jwl import deployconfig'] config_data = {} for section in reader._config.sections(): for key, value in reader._config.items(section): if section.startswith(envkey) or section.find('_') == -1: if section.startswith(envkey): sectiontitle = 'env.' + section[len(envkey):] else: sectiontitle = section rvalue = repr(value) dplines.append("deployconfig.set2('%(sectiontitle)s.%(key)s', %(rvalue)s)"%locals()) dplines.append("print '%(sectiontitle)s.%(key)s', '=', %(rvalue)s"%locals()) config_data[sectiontitle + '.' + key] = value print sectiontitle + '.' + key, '=', value config_data['env'] = target for key, value in extra_env.iteritems(): rvalue = repr(value) dplines.append("deployconfig.set2('%(key)s', %(rvalue)s)"%locals()) dplines.append("print '%(key)s', '=', %(rvalue)s"%locals()) config_data[key] = value #server_side paths server_deploypath = config_data['env.basic.deploypath'] server_dependspath = server_deploypath + '/depends' server_codepath = server_deploypath + '/code' server_staticpath = server_deploypath + '/static' server_htmlpath = server_deploypath + '/html' rserver_dependspath = repr(server_dependspath) rserver_codepath = repr(server_codepath) rserver_staticpath = repr(server_staticpath) rserver_htmlpath = repr(server_htmlpath) deployrepo = config_data['env.basic.deployrepo'] if not exists(deploypath): makedirs(deploypath) sys_call('git clone ' + deployrepo + ' ' + deploypath) sys_call('git checkout uploaded', deploypath) else: #clean but don't remove the .git directory sys_call('git pull origin uploaded', deploypath) for p in (dependspath, codepath, htmlpath, staticpath): if exists(p): rmtree(p, onerror=onerror) for p in (dependspath, codepath, htmlpath, staticpath): if not exists(p): makedirs(p) print 'fetching dependencies' #fetch the dependencies depends = reader.config_items('depends') for name, url in depends: dpath = join(dependspath, name) if url.startswith('local:'): url = url[6:] ls = url.split(';') i = 0 try: while not exists(ls[i]): i += 1 except: raise Exception('could not find path ' + url) url = ls[i] if exists(dpath): rmtree(dpath) shutil.copytree(url, dpath, ignore=shutil.ignore_patterns('*.git', '*.svn')) else: if not exists(dpath): try: makedirs(dpath) sys_call('git init', dpath) #sys_call('git remote add origin ' + url, dpath) except: rmtree(dpath) raise sys_call('git pull ' + url + ' master', dpath) #run any custom compilation code if exists(join(reader.path, 'compile.py')): old_path = list(sys.path) sys.path.append(reader.path) sys.path.append(dependspath) import compile compile compile.run(reader) del sys.path[:] sys.path.extend(old_path) gen(join(codepath, 'deployconfig_init.py'), '\n'.join(dplines)) #legacy... # config_data['facebook_app_id'] = config_data['env.facebook.facebook_app_id'] urlhandlers = [] #create the html pages for sourcefile in reader.get_html(config_data): stripped_name = basename(sourcefile.path).rsplit('.', 1)[0] gen(join(htmlpath, stripped_name), merge_source_file(sourcefile)) urlhandlers.append('urlhandlers.append((r"/(%(stripped_name)s)", NoCacheStaticHandler, {"path": %(rserver_htmlpath)s}))'%locals()) urlhandlers.append('urlhandlers.append((r"/()", NoCacheStaticHandler, {"path": %(rserver_htmlpath)s, "default_filename": "index"}))'%locals()) #copy over resources if exists(staticpath): rmtree(staticpath) for sourcefile in reader.get_resources(config_data): relative_path = relpath(sourcefile.path, reader.resources) if not exists(join(staticpath, dirname(relative_path))): makedirs(join(staticpath, dirname(relative_path))) if sourcefile.binary: shutil.copy(sourcefile.path, join(staticpath, relative_path)) else: gen(join(staticpath, relative_path), merge_source_file(sourcefile)) rprefix = reader.resource_prefix urlhandlers.append('urlhandlers.append((r"/%(rprefix)s/(.*)", NoCacheStaticHandler, {"path": %(rserver_staticpath)s}))'%locals()) #copy over any raw python files for file in reader.list_python(): shutil.copy(file, join(codepath, basename(file))) cookie_secret = repr(reader.config('basic', 'cookie_secret')) # deal with in a non-legacy way at some point? # google_consumer_key = reader.config('google', 'consumer_key') # google_consumer_secret = reader.config('google', 'consumer_secret') #build server_interface.js server_interface_path = resolve_import('jwl_make/server_interface.js', None) del sys.path[:] sys.path.extend(clean_path) sys.path.append(dependspath) sys.path.append(codepath) import index from jwl.remote_method import make_dummy_handler with open(join(htmlpath, 'server_interface.js'), 'w') as f: f.write('var server = "%s";'%reader.server_prefix) with open(server_interface_path, 'r') as f2: f.write(f2.read()) f.write('\n') f.write(make_dummy_handler(index.main).write_js_interface()) urlhandlers.append('urlhandlers.append((r"/(server_interface.js)", NoCacheStaticHandler, {"path": %(rserver_htmlpath)s}))'%locals()) urlhandlercode = '\n'.join(urlhandlers) readerserverprefix = reader.server_prefix is_debug = config_data['env.basic.debug'] server_port = config_data['env.basic.port'] #build the execution file launch_server = r""" import sys sys.path.append(%(rserver_dependspath)s) sys.path.append(%(rserver_codepath)s) import deployconfig_init from jwl import deployconfig deployconfig.set(debug=%(is_debug)s) import index import tornado from jwl.tornado_launch import launch from jwl.remote_method import make_dummy_handler, NoCacheStaticHandler urlhandlers = [] %(urlhandlercode)s #GOOGLE LOGIN # from jwl.googleauth import LoginController # urlhandlers.append((r"/auth/(.*)", LoginController)) if __name__ == '__main__': urlhandlers.append((r"/%(readerserverprefix)s.*", index.main)) print 'about to run startup code' index.do_startup(urlhandlers) application = tornado.web.Application(urlhandlers, cookie_secret=%(cookie_secret)s, gzip=True)#, google_consumer_key=google_consumer_key, google_consumer_secret=google_consumer_secret) index.main._my_application = application print 'startup code complete, starting server...' launch(application, %(server_port)s) """%locals() gen(join(codepath, 'launch_server.py'), launch_server) print 'about to upload...' #check in the local code to git sys_call('git add --all', deploypath, failokay=True) sys_call('git commit -a -m "automated..."', deploypath, failokay=True) sys_call('git push origin uploaded', deploypath) #Upload to server host_string = config_data['env.basic.host'] if host_string == 'localhost': execute = sys_call else: import fabric.api as fab def execute(args, cwd, fail_okay): with fab.settings(host_string=host_string,key_filename=keyfile,disable_known_hosts=True): with fab.cd(cwd): with fab.settings(warn_only=fail_okay): fab.run(args) keyfile = global_config.get('keys', config_data['env.basic.sshkey']) try: execute('git add --all', server_deploypath, True) execute('git commit -a -m "saving any changes such as .pyc etc"', server_deploypath, True) execute('git merge uploaded', server_deploypath, False) execute(config_data['env.basic.startcommand'], server_deploypath, False) finally: if host_string != 'localhost': from fabric.state import connections for key in connections.keys(): connections[key].close() del connections[key] finally: #SWITCH BACK TO MASTER BRANCH if branch != 'CURRENT': sys_call('git checkout master', reader.path)
def run_command(machine_id, host, ssh_user, private_key, command): """Runs a command over Fabric. Fabric does not support passing the private key as a string, but only as a file. To solve this, a temporary file with the private key is created and its path is returned. In ec2 we always favor the provided dns_name and set the user name to the default ec2-user. IP or dns_name come from the js machine model. A few useful parameters for fabric configuration that are not currently used:: * env.connection_attempts, defaults to 1 * env.timeout - e.g. 20 in secs defaults to 10 * env.always_use_pty = False to avoid running commands like htop. However this might cause problems. Check fabric's docs. .. warning:: EC2 machines have default usernames other than root. However when attempting to connect with root@... it doesn't return an error but a message (e.g. Please login as the ec2-user rather than the user root). This misleads fabric to believe that everything went fine. To deal with this we check if the returned output contains a fragment of this message. """ #~ log.error("runcommand(%s,%s,%s,%s,%s)" % (machine_id, host, ssh_user, private_key, command)) if not host: log.error('Host not provided, exiting.') return Response('Host not set', 400) if not command: log.warn('No command was passed, returning empty.') return Response('Command not set', 400) if not private_key: log.warn('No private key provided, returning empty') return Response('Key not set', 400) (tmp_key, tmp_path) = tempfile.mkstemp() key_fd = os.fdopen(tmp_key, 'w+b') key_fd.write(private_key) key_fd.close() env.key_filename = [tmp_path] if ssh_user: env.user = ssh_user else: env.user = '******' env.abort_on_prompts = True env.no_keys = True env.no_agent = True env.host_string = host env.warn_only = True env.combine_stderr = True env.keepalive = 15 try: cmd_output = run(command, timeout=COMMAND_TIMEOUT) except Exception as e: if 'SSH session not active' in e: from fabric.state import connections conn_keys = [k for k in connections.keys() if host in k] for key in conn_keys: del connections[key] try: cmd_output = run(command, timeout=COMMAND_TIMEOUT) log.warn("Recovered!") except Exception as e: log.error("Failed to recover :(") log.error('Exception while executing command: %s' % e) os.remove(tmp_path) return Response('Exception while executing command: %s' % e, 503) else: log.error('Exception while executing command: %s' % e) os.remove(tmp_path) return Response('Exception while executing command: %s' % e, 503) except SystemExit as e: log.warn('Got SystemExit: %s' % e) os.remove(tmp_path) return Response('SystemExit: %s' % e, 401) os.remove(tmp_path) return Response(cmd_output, 200)
def do_action(project, actionargs, deploypath, global_config, extra_env={}): target = actionargs[0] branch = 'release' if len(actionargs) < 2 else actionargs[1] deploypath = join(deploypath, target) dependspath = join(deploypath, 'depends') codepath = join(deploypath, 'code') staticpath = join(deploypath, 'static') htmlpath = join(deploypath, 'html') reader = JWLReader(project) #SWITCH TO RELEASE BRANCH try: if branch != 'CURRENT': sys_call('git checkout ' + branch, reader.path) reader.compile_coffee() #SETUP DEPLOY CONFIG envkey = target + '_' dplines = ['from jwl import deployconfig'] config_data = {} for section in reader._config.sections(): for key, value in reader._config.items(section): if section.startswith(envkey) or section.find('_') == -1: if section.startswith(envkey): sectiontitle = 'env.' + section[len(envkey):] else: sectiontitle = section rvalue = repr(value) dplines.append( "deployconfig.set2('%(sectiontitle)s.%(key)s', %(rvalue)s)" % locals()) dplines.append( "print '%(sectiontitle)s.%(key)s', '=', %(rvalue)s" % locals()) config_data[sectiontitle + '.' + key] = value print sectiontitle + '.' + key, '=', value config_data['env'] = target for key, value in extra_env.iteritems(): rvalue = repr(value) dplines.append("deployconfig.set2('%(key)s', %(rvalue)s)" % locals()) dplines.append("print '%(key)s', '=', %(rvalue)s" % locals()) config_data[key] = value #server_side paths server_deploypath = config_data['env.basic.deploypath'] server_dependspath = server_deploypath + '/depends' server_codepath = server_deploypath + '/code' server_staticpath = server_deploypath + '/static' server_htmlpath = server_deploypath + '/html' rserver_dependspath = repr(server_dependspath) rserver_codepath = repr(server_codepath) rserver_staticpath = repr(server_staticpath) rserver_htmlpath = repr(server_htmlpath) deployrepo = config_data['env.basic.deployrepo'] if not exists(deploypath): makedirs(deploypath) sys_call('git clone ' + deployrepo + ' ' + deploypath) sys_call('git checkout uploaded', deploypath) else: #clean but don't remove the .git directory sys_call('git pull origin uploaded', deploypath) for p in (dependspath, codepath, htmlpath, staticpath): if exists(p): rmtree(p, onerror=onerror) for p in (dependspath, codepath, htmlpath, staticpath): if not exists(p): makedirs(p) print 'fetching dependencies' #fetch the dependencies depends = reader.config_items('depends') for name, url in depends: dpath = join(dependspath, name) if url.startswith('local:'): url = url[6:] ls = url.split(';') i = 0 try: while not exists(ls[i]): i += 1 except: raise Exception('could not find path ' + url) url = ls[i] if exists(dpath): rmtree(dpath) shutil.copytree(url, dpath, ignore=shutil.ignore_patterns( '*.git', '*.svn')) else: if not exists(dpath): try: makedirs(dpath) sys_call('git init', dpath) #sys_call('git remote add origin ' + url, dpath) except: rmtree(dpath) raise sys_call('git pull ' + url + ' master', dpath) #run any custom compilation code if exists(join(reader.path, 'compile.py')): old_path = list(sys.path) sys.path.append(reader.path) sys.path.append(dependspath) import compile compile compile.run(reader) del sys.path[:] sys.path.extend(old_path) gen(join(codepath, 'deployconfig_init.py'), '\n'.join(dplines)) #legacy... # config_data['facebook_app_id'] = config_data['env.facebook.facebook_app_id'] urlhandlers = [] #create the html pages for sourcefile in reader.get_html(config_data): stripped_name = basename(sourcefile.path).rsplit('.', 1)[0] gen(join(htmlpath, stripped_name), merge_source_file(sourcefile)) urlhandlers.append( 'urlhandlers.append((r"/(%(stripped_name)s)", NoCacheStaticHandler, {"path": %(rserver_htmlpath)s}))' % locals()) urlhandlers.append( 'urlhandlers.append((r"/()", NoCacheStaticHandler, {"path": %(rserver_htmlpath)s, "default_filename": "index"}))' % locals()) #copy over resources if exists(staticpath): rmtree(staticpath) for sourcefile in reader.get_resources(config_data): relative_path = relpath(sourcefile.path, reader.resources) if not exists(join(staticpath, dirname(relative_path))): makedirs(join(staticpath, dirname(relative_path))) if sourcefile.binary: shutil.copy(sourcefile.path, join(staticpath, relative_path)) else: gen(join(staticpath, relative_path), merge_source_file(sourcefile)) rprefix = reader.resource_prefix urlhandlers.append( 'urlhandlers.append((r"/%(rprefix)s/(.*)", NoCacheStaticHandler, {"path": %(rserver_staticpath)s}))' % locals()) #copy over any raw python files for file in reader.list_python(): shutil.copy(file, join(codepath, basename(file))) cookie_secret = repr(reader.config('basic', 'cookie_secret')) # deal with in a non-legacy way at some point? # google_consumer_key = reader.config('google', 'consumer_key') # google_consumer_secret = reader.config('google', 'consumer_secret') #build server_interface.js server_interface_path = resolve_import('jwl_make/server_interface.js', None) del sys.path[:] sys.path.extend(clean_path) sys.path.append(dependspath) sys.path.append(codepath) import index from jwl.remote_method import make_dummy_handler with open(join(htmlpath, 'server_interface.js'), 'w') as f: f.write('var server = "%s";' % reader.server_prefix) with open(server_interface_path, 'r') as f2: f.write(f2.read()) f.write('\n') f.write(make_dummy_handler(index.main).write_js_interface()) urlhandlers.append( 'urlhandlers.append((r"/(server_interface.js)", NoCacheStaticHandler, {"path": %(rserver_htmlpath)s}))' % locals()) urlhandlercode = '\n'.join(urlhandlers) readerserverprefix = reader.server_prefix is_debug = config_data['env.basic.debug'] server_port = config_data['env.basic.port'] #build the execution file launch_server = r""" import sys sys.path.append(%(rserver_dependspath)s) sys.path.append(%(rserver_codepath)s) import deployconfig_init from jwl import deployconfig deployconfig.set(debug=%(is_debug)s) import index import tornado from jwl.tornado_launch import launch from jwl.remote_method import make_dummy_handler, NoCacheStaticHandler urlhandlers = [] %(urlhandlercode)s #GOOGLE LOGIN # from jwl.googleauth import LoginController # urlhandlers.append((r"/auth/(.*)", LoginController)) if __name__ == '__main__': urlhandlers.append((r"/%(readerserverprefix)s.*", index.main)) print 'about to run startup code' index.do_startup(urlhandlers) application = tornado.web.Application(urlhandlers, cookie_secret=%(cookie_secret)s, gzip=True)#, google_consumer_key=google_consumer_key, google_consumer_secret=google_consumer_secret) index.main._my_application = application print 'startup code complete, starting server...' launch(application, %(server_port)s) """ % locals() gen(join(codepath, 'launch_server.py'), launch_server) print 'about to upload...' #check in the local code to git sys_call('git add --all', deploypath, failokay=True) sys_call('git commit -a -m "automated..."', deploypath, failokay=True) sys_call('git push origin uploaded', deploypath) #Upload to server host_string = config_data['env.basic.host'] if host_string == 'localhost': execute = sys_call else: import fabric.api as fab def execute(args, cwd, fail_okay): with fab.settings(host_string=host_string, key_filename=keyfile, disable_known_hosts=True): with fab.cd(cwd): with fab.settings(warn_only=fail_okay): fab.run(args) keyfile = global_config.get('keys', config_data['env.basic.sshkey']) try: execute('git add --all', server_deploypath, True) execute('git commit -a -m "saving any changes such as .pyc etc"', server_deploypath, True) execute('git merge uploaded', server_deploypath, False) execute(config_data['env.basic.startcommand'], server_deploypath, False) finally: if host_string != 'localhost': from fabric.state import connections for key in connections.keys(): connections[key].close() del connections[key] finally: #SWITCH BACK TO MASTER BRANCH if branch != 'CURRENT': sys_call('git checkout master', reader.path)
def close_connection(self): for key in connections.keys(): if self.hostname in key: connections[key].close() del connections[key] break
}) broker_ip = workers[0].private_ip # 4) Work - install packages, deploy celery, execute client scripts execute(deploy, git_host_keys, git_url, git_folder) with fabric_settings(warn_only=False): # abort main.py if broker fails to start execute(start_broker, rabbitmq_username, rabbitmq_password) execute(start_workers, git_folder, broker_ip, rabbitmq_username, rabbitmq_password) execute(start_client, git_folder, broker_ip, rabbitmq_username, rabbitmq_password) finally: print "\nCleaning up..." # terminate any fabric SSH sessions from fabric.state import connections for key in connections.keys(): print "Disconnecting from ssh://%s..." % key connections[key].close() # terminate any VMs that were created print "Terminating cluster instances..." for worker in workers: worker.instance.delete() # remove keypair from nova if novakey: print "Removing nova keypair %s..." % novakey.fingerprint novakey.delete() print "Session complete. All workers have been purged.\n"
def deploy(self): '''deploy APP VERSION [ HOST [ HOST ... ] ] Deploys an application to a list of hosts. If hosts are omitted, attempts to determine host list automatically. ''' name, version = self.args[0:2] # Determine receiving hosts hostnames = self.args[2:] idb = self._get_infradb() hosts = None if hostnames: hosts = idb.hosts(hostnames) else: try: service = idb.service(name) hosts = service.hosts() except ubik.infra.db.InfraDBException: pass if not hosts: raise HatException("Could not determine hosts for deploy") log.debug("hosts to deploy: %s", hosts) # Determine the package types we need pkgpath = dict.fromkeys([h.pkgtype() for h in hosts]) log.debug("pkg types to deploy: %s", pkgpath.keys()) cache = self._get_package_cache() self._add_package_config(name) for pkgtype in pkgpath: filename = cache.get(name=name, version=version, type=pkgtype) # 'name' could be the package config name rather than the actual # package name, so we should try dereferencing using build config if not filename: try: pkgname = self.config.get('package', 'name') filename = cache.get(name=pkgname, version=version, type=pkgtype) except: pass if filename: pkgpath[pkgtype] = filename else: raise HatException("Package of type '%s' is needed for this " "deploy, but it doesn't exist. " % pkgtype) print >>self.output, "About to deploy the following packages:" for pkgname in pkgpath.values(): print >>self.output, "\t%s" % pkgname print >>self.output, "To the following hosts:" for host in hosts: print >>self.output, "\t%s" % host yesno = prompt("Proceed?", default='No') if yesno.strip()[0].upper() != 'Y': return deploy_user = self.config.get('deploy', 'user') try: for host in hosts: host_pkgtype = host.pkgtype() host_pkgpath = pkgpath[host_pkgtype] host_pkgfilename = os.path.basename(host_pkgpath) with settings(host_string=str(host), user=deploy_user): fab_output = run("mkdir -p pkgs/", shell=False) if fab_output: print >>self.output, fab_output put(host_pkgpath, "pkgs/") pkg_delete = True if host_pkgtype in PKG_INSTALL_CMD.keys(): fab_output = run(PKG_INSTALL_CMD[host_pkgtype] + " pkgs/%s" % host_pkgfilename, shell=False) else: fab_output = ("Unable to determine install command for " "package type %s. Leaving package " "~%s/pkgs/%s for manual install." % (host_pkgtype, deploy_user, host_pkgfilename)) pkg_delete = False if fab_output: print >>self.output, fab_output if pkg_delete: fab_output = run("rm pkgs/" + host_pkgfilename, shell=False) if fab_output: print >>self.output, fab_output if self.config.get('deploy', 'restart') == 'supervisor': try: service = self.config.get('supervisor', 'service') fab_output = run("sup restart " + service, shell=False) if fab_output: print >>self.output, fab_output except ubik.config.NoOptionError: log.error("supervisor restart specified by config " "but supervisor.service option missing.") finally: # TODO: replace with disconnect_all() w/ fabric 0.9.4+ for key in connections.keys(): connections[key].close() del connections[key]
def main(): """ Main command-line execution loop. """ try: # Parse command line options parser, options, arguments = parse_options() # Handle regular args vs -- args arguments = parser.largs remainder_arguments = parser.rargs # Update env with any overridden option values # NOTE: This needs to remain the first thing that occurs # post-parsing, since so many things hinge on the values in env. for option in env_options: state.env[option.dest] = getattr(options, option.dest) # Handle --hosts, --roles (comma separated string => list) for key in ['hosts', 'roles']: if key in state.env and isinstance(state.env[key], str): state.env[key] = state.env[key].split(',') # Handle output control level show/hide update_output_levels(show=options.show, hide=options.hide) # Handle version number option if options.show_version: print("Fabric %s" % state.env.version) sys.exit(0) # Load settings from user settings file, into shared env dict. state.env.update(load_settings(state.env.rcfile)) # Find local fabfile path or abort fabfile = find_fabfile() if not fabfile: abort("Couldn't find any fabfiles!") # Store absolute path to fabfile in case anyone needs it state.env.real_fabfile = fabfile # Load fabfile (which calls its module-level code, including # tweaks to env values) and put its commands in the shared commands # dict commands.update(load_fabfile(fabfile)) # Abort if no commands found if not commands and not remainder_arguments: abort("Fabfile didn't contain any commands!") # Now that we're settled on a fabfile, inform user. if state.output.debug: print("Using fabfile '%s'" % fabfile) # Handle list-commands option (now that commands are loaded) if options.list_commands: list_commands() # Handle show (command-specific help) option if options.display: display_command(options.display) # If user didn't specify any commands to run, show help if not (arguments or remainder_arguments): parser.print_help() sys.exit(0) # Or should it exit with error (1)? # Parse arguments into commands to run (plus args/kwargs/hosts) commands_to_run = parse_arguments(arguments) # Parse remainders into a faux "command" to execute remainder_command = parse_remainder(remainder_arguments) # Figure out if any specified task names are invalid unknown_commands = [] for tup in commands_to_run: if tup[0] not in commands: unknown_commands.append(tup[0]) # Abort if any unknown commands were specified if unknown_commands: abort("Command(s) not found:\n%s" \ % indent(unknown_commands)) # Generate remainder command and insert into commands, commands_to_run if remainder_command: r = '<remainder>' commands[r] = lambda: api.run(remainder_command) commands_to_run.append((r, [], {}, [], [])) # At this point all commands must exist, so execute them in order. for name, args, kwargs, cli_hosts, cli_roles in commands_to_run: # Get callable by itself command = commands[name] # Set current command name (used for some error messages) state.env.command = name # Set host list (also copy to env) state.env.all_hosts = hosts = get_hosts( command, cli_hosts, cli_roles) # If hosts found, execute the function on each host in turn for host in hosts: username, hostname, port = normalize(host) state.env.host_string = host state.env.host = hostname # Preserve user prev_user = state.env.user state.env.user = username state.env.port = port # Log to stdout if state.output.running: print("[%s] Executing task '%s'" % (host, name)) # Actually run command commands[name](*args, **kwargs) # Put old user back state.env.user = prev_user # If no hosts found, assume local-only and run once if not hosts: commands[name](*args, **kwargs) # If we got here, no errors occurred, so print a final note. if state.output.status: print("\nDone.") except SystemExit: # a number of internal functions might raise this one. raise except KeyboardInterrupt: if state.output.status: print >> sys.stderr, "\nStopped." sys.exit(1) except: sys.excepthook(*sys.exc_info()) # we might leave stale threads if we don't explicitly exit() sys.exit(1) finally: # Explicitly disconnect from all servers for key in connections.keys(): if state.output.status: print "Disconnecting from %s..." % denormalize(key), connections[key].close() if state.output.status: print "done." sys.exit(0)
def main(): """ First function called upon command line invocation. Builds the command line parser, parses the arguments, configures logging and invokes the command. """ # Configure logging file_formatter = logging.Formatter("%(asctime)s - %(levelname)10s - " \ "%(message)s (%(pathname)s:%(lineno)d)") console_formatter = logging.Formatter("%(levelname)10s: %(message)s") # All console output not explicitly directed to the user should be a log # message instead console_handler = logging.StreamHandler(sys.stdout) console_handler.setFormatter(console_formatter) console_handler.setLevel(20) # Don't show debug log messages until the # verbosity is set # Buffer the logging until no errors happen buffered_handler = logging.handlers.MemoryHandler(9999, logging.CRITICAL) # Capture all logging output and write it to the specified log file file_handler = logging.FileHandler('pas.log', 'w', delay=True) file_handler.setFormatter(file_formatter) file_handler.setLevel(40) logger = logging.getLogger() logger.setLevel(1) logger.addHandler(console_handler) logger.addHandler(buffered_handler) # Build base parser parser = commands.build_mainparser() arguments = itertools.takewhile(lambda x: x.startswith('-'), sys.argv[1:]) arguments = (arg for arg in arguments if arg not in ('-h', '--help')) command_line = sys.argv[:1] + list(arguments) # Parse the base arguments (verbosity and settings) args, remaining = parser.parse_known_args(command_line) buffered_handler.setTarget(file_handler) # Get the verbosity level verbosity = max(1, VERBOSITY - 10 * (len(args.verbose) - len(args.quiet))) console_handler.setLevel(verbosity) file_handler.setLevel(1) paramiko_logger = logging.getLogger('paramiko.transport') paramiko_logger.setLevel(verbosity + 10) # Load settings try: settings.loadfrompath(path=args.settings) nosettings = False except ImportError: from ..conf import basesettings settings.load(basesettings) nosettings = True # Build complete parser parser = commands.build_subparsers(parser) # Parse arguments command = args = parser.parse_args() res = 0 # Check that settings where loaded if needed if not getattr(command.execute, 'nosettings', False) and nosettings: logger.critical("This command requires the settings module to be " \ "present on path or defined using the " \ "PAS_SETTINGS_MODULE environment variable.") res = 1 # Execute command if not res: res = command.execute(args) # Cleanup fabric connections if needed for key in connections.keys(): connections[key].close() del connections[key] # Check execution result if res: # ...an error occurred, write the logfile buffered_handler.flush() print print "pas exited with a non-zero exit status (%d). A complete log " \ "was stored in the %s file." % (res, LOGFILE) print else: # ...no errors occurred, avoid to flush the buffer buffered_handler.setTarget(None) # Need to close the buffered handler before sysexit is called or it will # result in an exception buffered_handler.close() return res
def deploy(self): '''deploy APP VERSION [ HOST [ HOST ... ] ] Deploys an application to a list of hosts. If hosts are omitted, attempts to determine host list automatically. ''' name, version = self.args[0:2] # Determine receiving hosts hostnames = self.args[2:] idb = self._get_infradb() hosts = None if hostnames: hosts = idb.hosts(hostnames) else: try: service = idb.service(name) hosts = service.hosts() except ubik.infra.db.InfraDBException: pass if not hosts: raise HatException("Could not determine hosts for deploy") log.debug("hosts to deploy: %s", hosts) # Determine the package types we need pkgpath = dict.fromkeys([h.pkgtype() for h in hosts]) log.debug("pkg types to deploy: %s", pkgpath.keys()) cache = self._get_package_cache() self._add_package_config(name) for pkgtype in pkgpath: filename = cache.get(name=name, version=version, type=pkgtype) # 'name' could be the package config name rather than the actual # package name, so we should try dereferencing using build config if not filename: try: pkgname = self.config.get('package', 'name') filename = cache.get(name=pkgname, version=version, type=pkgtype) except: pass if filename: pkgpath[pkgtype] = filename else: raise HatException("Package of type '%s' is needed for this " "deploy, but it doesn't exist. " % pkgtype) print >> self.output, "About to deploy the following packages:" for pkgname in pkgpath.values(): print >> self.output, "\t%s" % pkgname print >> self.output, "To the following hosts:" for host in hosts: print >> self.output, "\t%s" % host yesno = prompt("Proceed?", default='No') if yesno.strip()[0].upper() != 'Y': return deploy_user = self.config.get('deploy', 'user') try: for host in hosts: host_pkgtype = host.pkgtype() host_pkgpath = pkgpath[host_pkgtype] host_pkgfilename = os.path.basename(host_pkgpath) with settings(host_string=str(host), user=deploy_user): fab_output = run("mkdir -p pkgs/", shell=False) if fab_output: print >> self.output, fab_output put(host_pkgpath, "pkgs/") pkg_delete = True if host_pkgtype in PKG_INSTALL_CMD.keys(): fab_output = run(PKG_INSTALL_CMD[host_pkgtype] + " pkgs/%s" % host_pkgfilename, shell=False) else: fab_output = ( "Unable to determine install command for " "package type %s. Leaving package " "~%s/pkgs/%s for manual install." % (host_pkgtype, deploy_user, host_pkgfilename)) pkg_delete = False if fab_output: print >> self.output, fab_output if pkg_delete: fab_output = run("rm pkgs/" + host_pkgfilename, shell=False) if fab_output: print >> self.output, fab_output if self.config.get('deploy', 'restart') == 'supervisor': try: service = self.config.get('supervisor', 'service') fab_output = run("sup restart " + service, shell=False) if fab_output: print >> self.output, fab_output except ubik.config.NoOptionError: log.error("supervisor restart specified by config " "but supervisor.service option missing.") finally: # TODO: replace with disconnect_all() w/ fabric 0.9.4+ for key in connections.keys(): connections[key].close() del connections[key]