def instantiateCredential(conf, cred): """ Takes a credential and instanitates it. It returns a Record that has all of the information users of that instantiated credential will need """ if not conf('config_loaded', default=False): conf = config.configFromConfig( conf, base=config.configFromStream( open(conf('conf_file', default=DEFAULT_CONFIG_FILE)), base=conf)) certFile = os.path.join( conf('general.secure_tmp'), cred.name + '_cert.pem') keyFile = os.path.join(conf('general.secure_tmp'), cred.name + '_key.pem') if not os.path.exists(certFile) or open(certFile).read() != cred.cert: open(certFile, 'w').write(cred.cert) if not os.path.exists(keyFile) or open(keyFile).read() != cred.pkey: open(keyFile, 'w').write(cred.pkey) newCred = functional.Record( name=cred.name, conf=conf, cert=certFile, pkey=keyFile, ec2URL=None, env={}) if 'ec2_url' in cred.metadata: newCred = newCred.update( env=functional.updateDict(newCred.env, dict(EC2_URL=cred.metadata['ec2_url']))) yield _createGroups(newCred) yield _createKeypair(newCred) defer.returnValue(newCred)
def runPipelineConfig(taskName, name, pipeline, conf, queue=None): """ Takes a config object representing a pipeline options, validates those options in pipeline.OPTIONS and passes the results onto runPipelineWithConfig """ ## # Mocheezmo way to have it load a conf file. This will be removed in the future tmpConfigName = os.path.join('/tmp', str(time.time()) + '.config') options = list(pipeline.OPTIONS) options.append( ('conf', '', '--conf', 'Conf file (DO NOT SPECIFY, FOR INTERNAL USE)', const('/tmp/machine.conf'))) options.append(( 'CONFIG_FILE', '-c', '--CONFIG_FILE', 'Config file for the pipeline. Specify this if you do not want to specify options on the comamnd line', const(tmpConfigName))) ## # Load up machine.conf and apply it to our current config conf = config.configFromConfig(conf, config.configFromStream( open('/tmp/machine.conf'), config.configFromEnv()), lazy=True) vals = {} for o in options: vals[o[0]] = cli.applyOption(conf(o[0], default=None), o, conf) conf = config.configFromMap(vals, conf) ## # For some ergatis trickery we then need to output this config to a temp file so ergatis can pull variables from it confDict = config.configToDict(conf) confVals = {} cv = [('.'.join(k.split('.')[:-1]), k.split('.')[-1], v) for k, v in confDict.iteritems()] for s, k, v in cv: confVals.setdefault(s, {})[k] = v fout = open(tmpConfigName, 'w') for s, d in confVals.iteritems(): if s not in ['', 'env']: fout.write('[' + s + ']\n') for k, v in d.iteritems(): fout.write('%s=%s\n' % (k, str(v))) fout.close() return runPipelineWithConfig(taskName, name, pipeline, conf, queue)
def runPipelineConfig(taskName, name, pipeline, conf, queue=None): """ Takes a config object representing a pipeline options, validates those options in pipeline.OPTIONS and passes the results onto runPipelineWithConfig """ ## # Mocheezmo way to have it load a conf file. This will be removed in the future tmpConfigName = os.path.join("/tmp", str(time.time()) + ".config") options = list(pipeline.OPTIONS) options.append(("conf", "", "--conf", "Conf file (DO NOT SPECIFY, FOR INTERNAL USE)", const("/tmp/machine.conf"))) options.append( ( "CONFIG_FILE", "-c", "--CONFIG_FILE", "Config file for the pipeline. Specify this if you do not want to specify options on the comamnd line", const(tmpConfigName), ) ) ## # Load up machine.conf and apply it to our current config conf = config.configFromConfig( conf, config.configFromStream(open("/tmp/machine.conf"), config.configFromEnv()), lazy=True ) vals = {} for o in options: vals[o[0]] = cli.applyOption(conf(o[0], default=None), o, conf) conf = config.configFromMap(vals, conf) ## # For some ergatis trickery we then need to output this config to a temp file so ergatis can pull variables from it confDict = config.configToDict(conf) confVals = {} cv = [(".".join(k.split(".")[:-1]), k.split(".")[-1], v) for k, v in confDict.iteritems()] for s, k, v in cv: confVals.setdefault(s, {})[k] = v fout = open(tmpConfigName, "w") for s, d in confVals.iteritems(): if s not in ["", "env"]: fout.write("[" + s + "]\n") for k, v in d.iteritems(): fout.write("%s=%s\n" % (k, str(v))) fout.close() return runPipelineWithConfig(taskName, name, pipeline, conf, queue)
def instantiateCredential(conf, cred): """ Takes a credential and instanitates it. It returns a Record that has all of the information users of that instantiated credential will need """ if not conf('config_loaded', default=False): conf = config.configFromConfig(conf, base=config.configFromStream(open(conf('conf_file', default=DEFAULT_CONFIG_FILE)), base=conf)) certFile = os.path.join(conf('general.secure_tmp'), cred.name + '_cert.pem') keyFile = os.path.join(conf('general.secure_tmp'), cred.name + '_key.pem') if not os.path.exists(certFile) or open(certFile).read() != cred.cert: open(certFile, 'w').write(cred.cert) if not os.path.exists(keyFile) or open(keyFile).read() != cred.pkey: open(keyFile, 'w').write(cred.pkey) newCred = functional.Record(name=cred.name, conf=conf, cert=certFile, pkey=keyFile, ec2URL=None, env={}) if 'ec2_url' in cred.metadata: newCred = newCred.update(env=functional.updateDict(newCred.env, dict(EC2_URL=cred.metadata['ec2_url']))) yield _createGroups(newCred) yield _createKeypair(newCred) defer.returnValue(newCred)
def startMaster(state, credClient, taskName, cluster): @defer.inlineCallbacks def _saveCluster(instances): instances = yield credClient.updateInstances(instances) cl = yield state.persistManager.loadCluster(cluster.clusterName, cluster.userName) cl = cl.setMaster(instances[0]) yield state.persistManager.saveCluster(cl) defer.returnValue(func.Record(succeeded=instances, failed=[])) credConfigMap = yield credClient.credentialConfig() credConfig = config.configFromMap(credConfigMap) baseConf = config.configFromConfig(cluster.config, base=credConfig) clusterConf = config.configFromMap( { 'general.ctype': credConfig('general.ctype'), 'cluster.cluster_public_key': '/mnt/keys/devel1.pem.pub' }, base=baseConf) cl = cluster.update(config=clusterConf) mode = [vappio_config.MASTER_NODE] masterConfFilename = '/tmp/machine.' + global_state.make_ref() + '.conf' masterConf = vappio_config.createDataFile(cl.config, mode, outFile=masterConfFilename) dataFile = vappio_config.createMasterDataFile(cl, masterConf) groups = [g.strip() for g in cl.config('cluster.master_groups').split(',')] masterInstanceList = yield runInstances( credClient, cl.config('cluster.ami'), cl.config('cluster.key'), cl.config('cluster.master_type'), groups, cl.config('cluster.availability_zone', default=None), cl.config('cluster.master_bid_price', default=None), 1, 1, open(dataFile).read()) cl = cl.setMaster(masterInstanceList[0]) yield state.persistManager.saveCluster(cl) os.remove(masterConf) os.remove(dataFile) instances = yield waitForInstances(masterInstanceList, [ updateTask(taskName, 'Waiting for master'), waitForState(credClient, 'running', WAIT_FOR_STATE_TRIES), _saveCluster, updateTask(taskName, 'Master in running state'), waitForSSH(cl.config('ssh.user'), cl.config('ssh.options'), WAIT_FOR_SSH_TRIES), _saveCluster, updateTask(taskName, 'SSH up'), waitForBoot('/tmp/startup_complete', cl.config('ssh.user'), cl.config('ssh.options'), WAIT_FOR_BOOT_TRIES), _saveCluster, updateTask(taskName, 'Booted'), waitForClusterInfo('local', 'guest', WAIT_FOR_SERVICES_TRIES), _saveCluster, updateTask(taskName, 'Cluster info responded') ]) yield credClient.terminateInstances(instances.failed) if not instances.succeeded: raise Error('Master failed to start') cl = yield state.persistManager.loadCluster(cl.clusterName, cl.userName) cl = cl.setState(cl.RUNNING) yield state.persistManager.saveCluster(cl) defer.returnValue(cl)
def instantiateCredential(conf, cred): if not conf('config_loaded', default=False): conf = config.configFromConfig(conf, base=config.configFromStream(open(conf('conf_file')), base=conf)) certFile = os.path.join(conf('general.secure_tmp'), cred.name + '_cert.pem') keyFile = os.path.join(conf('general.secure_tmp'), cred.name + '_key.pem') mainDeferred = defer.succeed(None) if not os.path.exists(certFile) and not os.path.exists(keyFile): tmpCertFile = os.path.join(conf('general.secure_tmp'), cred.name + '_cert-tmp.pem') tmpKeyFile = os.path.join(conf('general.secure_tmp'), cred.name + '_key-tmp.pem') if 'ec2_url' not in cred.metadata: return defer.fail(Exception('You must have an ec2_url')) parsedUrl = urlparse.urlparse(cred.metadata['ec2_url']) if ':' not in parsedUrl.netloc: return defer.fail(Exception('Your URL must contain a port')) host, port = parsedUrl.netloc.split(':') fout = open(tmpCertFile, 'w') fout.write(cred.cert) fout.close() fout = open(tmpKeyFile, 'w') fout.write(cred.pkey) fout.close() d = commands.runProcess(['nimbusCerts2EC2.py', '--in-cert=' + tmpCertFile, '--out-cert=' + certFile, '--in-key=' + tmpKeyFile, '--out-key=' + keyFile, '--java-cert-dir=/tmp', '--java-cert-host=' + host, '--java-cert-port=' + port], stdoutf=None, stderrf=None, log=True) def _chmod(_exitCode): return commands.runProcess(['chmod', '+r', keyFile], stdoutf=None, stderrf=None) d.addCallback(_chmod) def _unlink(v): os.unlink(tmpCertFile) os.unlink(tmpKeyFile) return v d.addCallback(_unlink) d.addErrback(_unlink) mainDeferred.addCallback(lambda _ : d) ec2Home = cred.metadata.get('ec2_api_tools', '/opt/ec2-api-tools-1.3-57419') newCred = func.Record(name=cred.name, conf=conf, cert=certFile, pkey=keyFile, ec2Path=os.path.join(ec2Home, 'bin'), env=dict(EC2_JVM_ARGS='-Djavax.net.ssl.trustStore=/tmp/jssecacerts', EC2_HOME=ec2Home, EC2_URL=cred.metadata['ec2_url'])) if os.path.exists(conf('cluster.cluster_private_key') + '.pub'): pubKey = open(conf('cluster.cluster_private_key') + '.pub').read().rstrip() def _addKeypair(): keyPairDefer = ec2.addKeypair(newCred, conf('cluster.key') + '||' + pubKey) def _printError(f): log.msg('Adding keypair failed, retrying') log.err(f) return f keyPairDefer.addErrback(_printError) return keyPairDefer mainDeferred.addCallback(lambda _ : defer_utils.tryUntil(10, _addKeypair, onFailure=defer_utils.sleep(30))) mainDeferred.addCallback(lambda _ : newCred) return mainDeferred
def instantiateCredential(conf, cred): if not conf('config_loaded', default=False): conf = config.configFromConfig(conf, base=config.configFromStream(open( conf('conf_file')), base=conf)) certFile = os.path.join(conf('general.secure_tmp'), cred.name + '_cert.pem') keyFile = os.path.join(conf('general.secure_tmp'), cred.name + '_key.pem') mainDeferred = defer.succeed(None) if not os.path.exists(certFile) and not os.path.exists(keyFile): tmpCertFile = os.path.join(conf('general.secure_tmp'), cred.name + '_cert-tmp.pem') tmpKeyFile = os.path.join(conf('general.secure_tmp'), cred.name + '_key-tmp.pem') if 'ec2_url' not in cred.metadata: return defer.fail(Exception('You must have an ec2_url')) parsedUrl = urlparse.urlparse(cred.metadata['ec2_url']) if ':' not in parsedUrl.netloc: return defer.fail(Exception('Your URL must contain a port')) host, port = parsedUrl.netloc.split(':') fout = open(tmpCertFile, 'w') fout.write(cred.cert) fout.close() fout = open(tmpKeyFile, 'w') fout.write(cred.pkey) fout.close() d = commands.runProcess([ 'nimbusCerts2EC2.py', '--in-cert=' + tmpCertFile, '--out-cert=' + certFile, '--in-key=' + tmpKeyFile, '--out-key=' + keyFile, '--java-cert-dir=/tmp', '--java-cert-host=' + host, '--java-cert-port=' + port ], stdoutf=None, stderrf=None, log=True) def _chmod(_exitCode): return commands.runProcess(['chmod', '+r', keyFile], stdoutf=None, stderrf=None) d.addCallback(_chmod) def _unlink(v): os.unlink(tmpCertFile) os.unlink(tmpKeyFile) return v d.addCallback(_unlink) d.addErrback(_unlink) mainDeferred.addCallback(lambda _: d) ec2Home = cred.metadata.get('ec2_api_tools', '/opt/ec2-api-tools-1.3-57419') newCred = func.Record( name=cred.name, conf=conf, cert=certFile, pkey=keyFile, ec2Path=os.path.join(ec2Home, 'bin'), env=dict(EC2_JVM_ARGS='-Djavax.net.ssl.trustStore=/tmp/jssecacerts', EC2_HOME=ec2Home, EC2_URL=cred.metadata['ec2_url'])) if os.path.exists(conf('cluster.cluster_private_key') + '.pub'): pubKey = open(conf('cluster.cluster_private_key') + '.pub').read().rstrip() def _addKeypair(): keyPairDefer = ec2.addKeypair(newCred, conf('cluster.key') + '||' + pubKey) def _printError(f): log.msg('Adding keypair failed, retrying') log.err(f) return f keyPairDefer.addErrback(_printError) return keyPairDefer mainDeferred.addCallback(lambda _: defer_utils.tryUntil( 10, _addKeypair, onFailure=defer_utils.sleep(30))) mainDeferred.addCallback(lambda _: newCred) return mainDeferred
def startMaster(state, credClient, taskName, cluster): @defer.inlineCallbacks def _saveCluster(instances): instances = yield credClient.updateInstances(instances) cl = yield state.persistManager.loadCluster(cluster.clusterName, cluster.userName) cl = cl.setMaster(instances[0]) yield state.persistManager.saveCluster(cl) defer.returnValue(func.Record(succeeded=instances, failed=[])) credConfigMap = yield credClient.credentialConfig() credConfig = config.configFromMap(credConfigMap) baseConf = config.configFromConfig(cluster.config, base=credConfig) clusterConf = config.configFromMap({'general.ctype': credConfig('general.ctype'), 'cluster.cluster_public_key': '/mnt/keys/devel1.pem.pub'}, base=baseConf) cl = cluster.update(config=clusterConf) mode = [vappio_config.MASTER_NODE] masterConfFilename = '/tmp/machine.' + global_state.make_ref() + '.conf' masterConf = vappio_config.createDataFile(cl.config, mode, outFile=masterConfFilename) dataFile = vappio_config.createMasterDataFile(cl, masterConf) groups = [g.strip() for g in cl.config('cluster.master_groups').split(',')] masterInstanceList = yield runInstances(credClient, cl.config('cluster.ami'), cl.config('cluster.key'), cl.config('cluster.master_type'), groups, cl.config('cluster.availability_zone', default=None), cl.config('cluster.master_bid_price', default=None), 1, 1, open(dataFile).read()) cl = cl.setMaster(masterInstanceList[0]) yield state.persistManager.saveCluster(cl) os.remove(masterConf) os.remove(dataFile) instances = yield waitForInstances(masterInstanceList, [updateTask(taskName, 'Waiting for master'), waitForState(credClient, 'running', WAIT_FOR_STATE_TRIES), _saveCluster, updateTask(taskName, 'Master in running state'), waitForSSH(cl.config('ssh.user'), cl.config('ssh.options'), WAIT_FOR_SSH_TRIES), _saveCluster, updateTask(taskName, 'SSH up'), waitForBoot('/tmp/startup_complete', cl.config('ssh.user'), cl.config('ssh.options'), WAIT_FOR_BOOT_TRIES), _saveCluster, updateTask(taskName, 'Booted'), waitForClusterInfo('local', 'guest', WAIT_FOR_SERVICES_TRIES), _saveCluster, updateTask(taskName, 'Cluster info responded')]) yield credClient.terminateInstances(instances.failed) if not instances.succeeded: raise Error('Master failed to start') cl = yield state.persistManager.loadCluster(cl.clusterName, cl.userName) cl = cl.setState(cl.RUNNING) yield state.persistManager.saveCluster(cl) defer.returnValue(cl)