def confIfPipelineConfigSet(conf, options): """ Takes a conf, checks to see if a pipeline conf file is specified, if so it loads it up and applies it OVER any options specified on the command line. This may seem counter intuitive but it makes other things easier, for example a pipeline redefining anything in the machines.conf since that is also in this conf. It then applies the functions in the OPTIONS variable in the values in the config file """ if conf('CONFIG_FILE', default=None) is not None: fconf = config.configFromStream(open(conf('CONFIG_FILE'))) keys = fconf.keys() m = {} for o in options: ## # Get the name of the option, it's the first element of the tuple name = o[0] f = o[4] if name in keys: m[name] = applyIfCallable(f(fconf(name)), conf) ## # lazy=True is for saftey incase there is a value in the CONFIG_FILE that we use that # really depends on a value in the map we just created return config.configFromMap( m, config.configFromStream(open(conf('CONFIG_FILE')), conf, lazy=False)) else: return conf
def pipelineFromDict(d): return createPipeline(taskName=d['taskName'], name=d['name'], pid=d['pid'], ptype=reflect.namedAny('vappio.pipelines.' + d['ptype']), config=config.configFromMap(dict(d['config'])))
def tagFromDict(d): return Tag( tagName=d['tag_name'], files=d['files'], metadata=dict(d['metadata']), phantom=config.configFromMap(d['phantom']) if d['phantom'] else None, taskName=d['metadata'].get('task_name'))
def instantiateCredential(conf, cred): """ Takes a credential and instanitates it. It returns a Record that has all of the information users of that instantiated credential will need """ if not conf('config_loaded', default=False): conf = config.configFromMap({'config_loaded': True}, base=config.configFromStream(open( conf('general.conf_file', default=DEFAULT_CONFIG_FILE)), base=conf)) certFile = os.path.join(conf('general.secure_tmp'), cred.name + '_cert.pem') keyFile = os.path.join(conf('general.secure_tmp'), cred.name + '_key.pem') if not os.path.exists(certFile) or open(certFile).read() != cred.cert: open(certFile, 'w').write(cred.cert) if not os.path.exists(keyFile) or open(keyFile).read() != cred.pkey: open(keyFile, 'w').write(cred.pkey) newCred = functional.Record(cert=certFile, pkey=keyFile, ec2URL=None, env={}) if 'ec2_url' in cred.metadata: return (conf, newCred.update(env=functional.updateDict( newCred.env, dict(EC2_URL=cred.metadata['ec2_url'])))) else: return (conf, newCred)
def realizePhantom(ctype, baseDir, tagfile): """ This takes a phantom tag and turns it into a real tag (creating/downloading the files) ctype - The current cluster type, this says which cluster.$ctype to look at baseDir - The base directory to download the files to tagfile - The tag file. In the tag, ctype and baseDir can be referenced through ${ctype} and ${base_dir} """ newTag = configFromMap({'ctype': ctype, 'base_dir': baseDir}, tagfile) ## # Get what to download, first try the url then get the command if it is not there. # We define a url as anything that starts with <protocol_we_understand>:// # so if it doesn't match that, it's a command and we run that download = newTag('phantom.cluster.%s.url' % ctype, default=newTag( 'phantom.cluster.%s.command' % ctype, default=newTag('phantom.cluster.ALL.command'))) if download.startswith('http://'): #downloadHttp(ctype, baseDir, download, tagfile) pass elif download.startswith('s3://'): ## # We might ened to modify realizePhantom to take a conf that will have our s3 credentails in it #downloadS3(ctype, baseDir, download, tagfile) pass else: ## # It's a command: runCommand(ctype, baseDir, download, tagfile)
def main(options, _args): runSystemEx('svn copy https://clovr.svn.sourceforge.net/svnroot/clovr/trunk https://clovr.svn.sourceforge.net/svnroot/clovr/tags/%s -m "Cutting release %s"' % (options('general.version'), options('general.version')), log=True) runSystemEx('svn copy https://vappio.svn.sourceforge.net/svnroot/vappio/trunk https://vappio.svn.sourceforge.net/svnroot/vappio/tags/%s -m "Cutting release %s"' % (options('general.version'), options('general.version')), log=True) runSystemEx('scp %s:/export/%s .' % (options('general.remote_name'), options('general.image')), log=True) runSystemEx('cp %s /usr/local/projects/clovr/images' % options('general.image'), log=True) runSystemEx('cp %s VMware_conversion/shared/convert_img.img' % options('general.image'), log=True) convertChannel = threads.runThreadWithChannel(convertImage).channel.sendWithChannel(options) waitForPasswordChange() bundleChannel = threads.runThreadWithChannel(bundleAMI).channel.sendWithChannel(options) try: convertChannel.receive() vmWareDir = 'clovr-vmware.%s' % options('general.version') runSystemEx('mkdir -p ' + vmWareDir, log=True) runSystemEx('mv VMware_conversion/shared/converted_img.vmdk %s' % os.path.join(vmWareDir, 'clovr.9-04.x86-64.%s.vmdk' % options('general.version'))) runSystemEx('mkdir -p %s %s' % (os.path.join(vmWareDir, 'keys'), os.path.join(vmWareDir, 'user_data')), log=True) runSystemEx('cp -rv /usr/local/projects/clovr/shared ' + vmWareDir, log=True) fout = open(os.path.join(vmWareDir, 'start_clovr.vmx'), 'w') clovrConf = config.configFromMap(dict(version=options('general.version'))) for line in open('/usr/local/projects/clovr/start_clovr.vmx'): fout.write(config.replaceStr(line, clovrConf)) except Exception, err: errorPrint('Converting image failed. Error message:') errorPrint(str(err))
def realizePhantom(ctype, baseDir, tagfile): """ This takes a phantom tag and turns it into a real tag (creating/downloading the files) ctype - The current cluster type, this says which cluster.$ctype to look at baseDir - The base directory to download the files to tagfile - The tag file. In the tag, ctype and baseDir can be referenced through ${ctype} and ${base_dir} """ newTag = configFromMap({'ctype': ctype, 'base_dir': baseDir}, tagfile) ## # Get what to download, first try the url then get the command if it is not there. # We define a url as anything that starts with <protocol_we_understand>:// # so if it doesn't match that, it's a command and we run that download = newTag('phantom.cluster.%s.url' % ctype, default=newTag('phantom.cluster.%s.command' % ctype, default=newTag('phantom.cluster.ALL.command'))) if download.startswith('http://'): #downloadHttp(ctype, baseDir, download, tagfile) pass elif download.startswith('s3://'): ## # We might ened to modify realizePhantom to take a conf that will have our s3 credentails in it #downloadS3(ctype, baseDir, download, tagfile) pass else: ## # It's a command: runCommand(ctype, baseDir, download, tagfile)
def _credential(): if os.path.exists('/tmp/cred-info'): cert, pkey, ctype, metadata = open('/tmp/cred-info').read().split( '\t') return { 'name': 'local', 'desc': 'Local credential', 'ctype': ctype, 'cert': open(cert).read(), 'pkey': open(pkey).read(), 'metadata': metadata and dict([v.split('=', 1) for v in metadata.split(',')]) or {}, 'conf': config.configFromStream(open('/tmp/machine.conf'), lazy=True) } else: return { 'name': 'local', 'desc': 'Local credential', 'ctype': 'local', 'cert': None, 'pkey': None, 'metadata': {}, 'conf': config.configFromMap({}) }
def confIfPipelineConfigSet(conf, options): """ Takes a conf, checks to see if a pipeline conf file is specified, if so it loads it up and applies it OVER any options specified on the command line. This may seem counter intuitive but it makes other things easier, for example a pipeline redefining anything in the machines.conf since that is also in this conf. It then applies the functions in the OPTIONS variable in the values in the config file """ if conf("CONFIG_FILE", default=None) is not None: fconf = config.configFromStream(open(conf("CONFIG_FILE"))) keys = fconf.keys() m = {} for o in options: ## # Get the name of the option, it's the first element of the tuple name = o[0] f = o[4] if name in keys: m[name] = applyIfCallable(f(fconf(name)), conf) ## # lazy=True is for saftey incase there is a value in the CONFIG_FILE that we use that # really depends on a value in the map we just created return config.configFromMap(m, config.configFromStream(open(conf("CONFIG_FILE")), conf, lazy=False)) else: return conf
def instantiateCredential(conf, cred): if 'conf_file' not in conf or not conf('conf_file'): conf = config.configFromMap({'conf_file': DEFAULT_CONFIG_FILE}, base=conf) newCred = func.Record(name=cred.name, conf=conf) return defer.succeed(newCred)
def instantiateCredential(conf, cred): if 'ec2_url' not in cred.metadata: cred.metadata['ec2_url'] = DIAG_EC2_URL if 'conf_file' not in conf or not conf('conf_file'): conf = config.configFromMap({'conf_file': DEFAULT_CONFIG_FILE}, base=conf) return nimbus.instantiateCredential(conf, cred)
def instantiateCredential(conf, cred): if not conf('config_loaded', default=False): conf = config.configFromMap({'config_loaded': True}, base=config.configFromStream(open( conf('general.conf_file', default=DEFAULT_CONFIG_FILE)), base=conf)) return (conf, None)
def instantiateCredential(conf, cred): """Instantiates a credential based off the configuration provided.""" if 'conf_file' not in conf or not conf('conf_file'): conf = config.configFromMap({'conf_file': DEFAULT_CONFIG_FILE}, base=conf) newCred = func.Record(name=cred.name, conf=conf) return defer.succeed(newCred)
def pipelineFromDict(d): return createPipeline( taskName=d["taskName"], name=d["name"], pid=d["pid"], ptype=reflect.namedAny("vappio.pipelines." + d["ptype"]), config=config.configFromMap(dict(d["config"])), )
def tagFromDict(d): return Tag( tagName=d["tag_name"], files=d["files"], metadata=dict(d["metadata"]), phantom=config.configFromMap(d["phantom"]) if d["phantom"] else None, taskName=d["metadata"].get("task_name"), )
def run(state, pipeline): if not os.path.exists(state.conf('config.pipeline_configs')): os.mkdir(state.conf('config.pipeline_configs')) tmpConfigName = os.path.join(state.conf('config.pipeline_configs'), str(time.time()) + '.config') pipeline.config = config.configFromMap({'CONFIG_FILE': tmpConfigName}, base=pipeline.config) fout = open(tmpConfigName, 'w') # We want to produce an ini like file with [section]'s sections = {} for k in pipeline.config.keys(): sections.setdefault('.'.join(k.split('.')[:-1]), []).append(k) for s, ks in sections.iteritems(): if s not in ['', 'env']: fout.write('[' + s + ']\n') for k in ks: shortK = k.split('.')[-1] fout.write('%s=%s\n' % (shortK, str(pipeline.config(k)))) fout.close() templateDir = os.path.join(state.machineconf('dirs.clovr_pipelines_template_dir'), pipeline.protocol) templateConfig = os.path.join(templateDir, 'pipeline_tmpl.config') templateLayout = os.path.join(templateDir, 'pipeline.layout') tmpPipelineConfig = os.path.join(state.conf('config.pipeline_configs'), str(time.time()) + '.pipeline.config') fout = open(tmpPipelineConfig, 'w') for line in handleIncludes(open(templateConfig)): fout.write(config.replaceStr(line, pipeline.config) + '\n') fout.close() cmd = ['run_pipeline.pl', '--config=' + tmpPipelineConfig, '--templatelayout=' + templateLayout, '--taskname=' + pipeline.taskName] if pipeline.queue: cmd.append('--queue=' + pipeline.queue) stdout = StringIO.StringIO() stderr = StringIO.StringIO() yield commands.runProcess(cmd, stdoutf=stdout.write, stderrf=stderr.write) pipelineId = stdout.getvalue().strip() if not pipelineId: raise commands.ProgramRunError(cmd, stderr.getvalue()) defer.returnValue(pipeline.update(pipelineId=pipelineId))
def handleWWWListAddCredentials(request): if 'credential_name' in request.body and core.keysInDict( ['credential_name', 'description', 'ctype', 'metadata'], request.body): # Users can provide a file name or the actual contents of the # certificate. if 'cert_file' in request.body: cert = open(request.body['cert_file']).read() else: cert = request.body['cert'] if 'pkey_file' in request.body: pkey = open(request.body['pkey_file']).read() else: pkey = request.body['pkey'] conf = config.configFromMap(request.body.get('conf', {}), base=config.configFromEnv()) cred = persist.createCredential(name=request.body['credential_name'], desc=request.body['description'], ctype=request.body['ctype'], cert=cert, pkey=pkey, active=True, metadata=request.body['metadata'], conf=conf) taskName = yield tasks_tx.createTaskAndSave('addCredential', 1) instantiateAndSaveCredential(taskName, cred, request.state.credentialPersist) queue.returnQueueSuccess(request.mq, request.body['return_queue'], taskName) defer.returnValue(request) elif 'credential_name' not in request.body: credentials = request.state.credentialsCache.getAllCredentials() credentialsDicts = [{ 'name': name, 'description': c['cred_instance'].credential.desc, 'num_instances': len(c['instances']), 'ctype': c['cred_instance'].credential.getCType() } for name, c in credentials.iteritems() if ('credential_names' in request.body and name in request.body['credential_names']) or 'credential_names' not in request.body] queue.returnQueueSuccess(request.mq, request.body['return_queue'], credentialsDicts) defer.returnValue(request) else: queue.returnQueueError(request.mq, request.body['return_queue'], 'Unknown credential query') raise UnknownRequestError(str(request.body))
def pipelineSSFromDict(d): return PipelineSnapshot(name=d['name'], taskName=d['taskName'], pid=d['pid'], ptype=reflect.namedAny('vappio.pipelines.' + d['ptype']), config=config.configFromMap(d['config']), complete=d['complete'], total=d['total'], state=d['state'])
def pipelineFromDict(d): return Pipeline(pipelineId=d['pipeline_id'], pipelineName=d['pipeline_name'], userName=d['user_name'], protocol=d['protocol'], checksum=d['checksum'], taskName=d['task_name'], queue=d['queue'], children=d['children'], config=config_.configFromMap(d['config'], lazy=True))
def clusterFromDocument(self, d): cl = Cluster(clusterName=d['cluster_name'], userName=d['user_name'], credName=d['cred_name'], config=config.configFromMap(json.loads(d['config']))) cl = cl.setMaster(d['master']) cl = cl.setState(d['state']) cl = cl.addExecNodes(d['exec_nodes']) cl = cl.addDataNodes(d['data_nodes']) cl = cl.update(startTask=d['start_task']) return cl
def createCluster(request): persistManager = request.state.persistManager baseConf = config.configFromStream(open('/tmp/machine.conf')) cluster = persist.Cluster( request.body['cluster_name'], request.body['user_name'], request.body['cred_name'], config.configFromMap(request.body['conf'], base=baseConf)) yield persistManager.saveCluster(cluster) defer.returnValue(request)
def pipelineSSFromDict(d): return PipelineSnapshot( name=d["name"], taskName=d["taskName"], pid=d["pid"], ptype=reflect.namedAny("vappio.pipelines." + d["ptype"]), config=config.configFromMap(d["config"]), complete=d["complete"], total=d["total"], state=d["state"], )
def createExecDataFile(conf, master, masterMachineConf): """ Creates a exec data file as the perl start_cluster works This is very similar to createMasterDataFile, should be refactored a bit """ outName = os.path.join('/tmp', str(time.time())) ## # Going to load the master machine.conf and modify node type masterConf = config.configFromStream(open(masterMachineConf), lazy=True) masterConf = config.configFromMap({'NODE_TYPE': EXEC_NODE}, masterConf, lazy=True) fout = open(outName, 'w') fout.write('\n'.join([ k + '=' + str(v) for k, v in config.configToDict(masterConf).iteritems() ])) fout.close() template = open(conf('cluster.exec_user_data_tmpl')).read() clusterPrivateKey = open(conf('cluster.cluster_private_key')).read() outf = [] runSingleProgramEx('ssh-keygen -y -f ' + conf('cluster.cluster_private_key'), outf.append, None, log=True) if conf('general.ctype') == 'ec2': template = template.replace('<TMPL_VAR NAME=MASTER_DNS>', master['private_dns']) else: template = template.replace('<TMPL_VAR NAME=MASTER_DNS>', master['public_dns']) clusterPublicKey = ''.join(outf) template = template.replace('<TMPL_VAR NAME=CLUSTER_PRIVATE_KEY>', clusterPrivateKey) template = template.replace('<TMPL_VAR NAME=CLUSTER_PUBLIC_KEY>', clusterPublicKey) template = template.replace('<TMPL_VAR NAME=MACHINE_CONF>', open(outName).read().replace('${', '\\${')) os.remove(outName) outf = os.path.join(conf('general.secure_tmp'), 'exec_user_data.sh') open(outf, 'w').write(template) return outf
def createCluster(request): persistManager = request.state.persistManager baseConf = config.configFromStream(open('/tmp/machine.conf')) cluster = persist.Cluster(request.body['cluster_name'], request.body['user_name'], request.body['cred_name'], config.configFromMap(request.body['conf'], base=baseConf)) yield persistManager.saveCluster(cluster) defer.returnValue(request)
def __init__(self, pipelineId, pipelineName, userName, protocol, checksum, taskName, queue, children, config): func.Record.__init__(self, pipelineId=pipelineId, pipelineName=pipelineName, userName=userName, protocol=protocol, checksum=checksum, taskName=taskName, queue=queue, children=children, config=config_.configFromMap(config, lazy=True))
def instantiateCredential(conf, cred): if not conf('config_loaded', default=False): conf = config.configFromMap({'config_loaded': True}, base=config.configFromStream(open( conf('general.conf_file')), base=conf)) certFile = os.path.join(conf('general.secure_tmp'), cred.name + '_cert.pem') keyFile = os.path.join(conf('general.secure_tmp'), cred.name + '_key.pem') if not os.path.exists(certFile) and not os.path.exists(keyFile): tmpCertFile = os.path.join(conf('general.secure_tmp'), cred.name + '_cert-tmp.pem') tmpKeyFile = os.path.join(conf('general.secure_tmp'), cred.name + '_key-tmp.pem') if 'ec2_url' not in cred.metadata: raise Exception('You must have an ec2_url') parsedUrl = urlparse.urlparse(cred.metadata['ec2_url']) if ':' not in parsedUrl.netloc: raise Exception('Your URL must contain a port') host, port = parsedUrl.netloc.split(':') fout = open(tmpCertFile, 'w') fout.write(cred.cert) fout.close() fout = open(tmpKeyFile, 'w') fout.write(cred.pkey) fout.close() try: commands.runSystemEx(' '.join([ 'nimbusCerts2EC2.py', '--in-cert=' + tmpCertFile, '--out-cert=' + certFile, '--in-key=' + tmpKeyFile, '--out-key=' + keyFile, '--java-cert-dir=/tmp', '--java-cert-host=' + host, '--java-cert-port=' + port ]) + ' > /dev/null 2>&1', log=True) commands.runSystemEx('chmod +r ' + keyFile) finally: os.unlink(tmpCertFile) os.unlink(tmpKeyFile) ec2Home = '/opt/ec2-api-tools-1.3-42584' newCred = func.Record( cert=certFile, pkey=keyFile, ec2Path=os.path.join(ec2Home, 'bin'), env=dict(EC2_JVM_ARGS='-Djavax.net.ssl.trustStore=/tmp/jssecacerts', EC2_HOME=ec2Home, EC2_URL=cred.metadata['ec2_url'])) if os.path.exists(conf('cluster.cluster_private_key') + '.pub'): pubKey = open(conf('cluster.cluster_private_key') + '.pub').read().rstrip() ec2_control.addKeypair(newCred, '"' + conf('cluster.key') + '||' + pubKey + '"') return (conf, newCred)
def credentialFromDict(d): """ Main difference is d['cert'] is a string for the module/class to use for this credential and this loads that module/class """ return createCredential(d['name'], d['desc'], d['ctype'], d['cert'], d['pkey'], d['active'], d['metadata'], config.configFromMap(d['conf']))
def loadTagFile(fname): """ Loads a tagfile, returns a config object of attributes Also considering a .phantom type which would represent files that don't really exist. I think this makes sense as you should be able to tarnsfer .phantom files around but .metadata's should be generated when you make a tag Will explain more abou this in a wiki page somewhere... """ ## # Phantom filse are in a format that configFromStream can read. This is because phantom files # are expected to be written and modified by humans. .metadata files on the other hand # are just expected to be the produce of a machine storing information so uses json if os.path.exists(fname + '.phantom'): ## # Put everythin under phantom # We want to do it lazily too since we will be adding # data it can access later phantom = configFromMap( { 'phantom_tag': True, 'phantom': configToDict( configFromStream(open(fname + '.phantom'), lazy=True)) }, lazy=True) else: phantom = configFromMap({}) ## # If the fname actually exists, open its meta data + files # if the fname does not exist but the phantom does, return the phantom # otherwise, throw an exception about missing the tagfile if os.path.exists(fname): if os.path.exists(fname + '.metadata'): metadata = configFromMap( {'metadata': json.loads(open(fname + '.metadata').read())}, phantom, lazy=True) else: metadata = configFromMap({}, phantom) return configFromMap( {'files': [f.strip() for f in open(fname) if f.strip()]}, metadata, lazy=True) elif not os.path.exists(fname) and os.path.exists(fname + '.phantom'): if os.path.exists(fname + '.metadata'): metadata = configFromMap( {'metadata': json.loads(open(fname + '.metadata').read())}, phantom, lazy=True) return metadata else: return phantom else: raise MissingTagFileError(fname)
def runPipelineConfig(taskName, name, pipeline, conf, queue=None): """ Takes a config object representing a pipeline options, validates those options in pipeline.OPTIONS and passes the results onto runPipelineWithConfig """ ## # Mocheezmo way to have it load a conf file. This will be removed in the future tmpConfigName = os.path.join("/tmp", str(time.time()) + ".config") options = list(pipeline.OPTIONS) options.append(("conf", "", "--conf", "Conf file (DO NOT SPECIFY, FOR INTERNAL USE)", const("/tmp/machine.conf"))) options.append( ( "CONFIG_FILE", "-c", "--CONFIG_FILE", "Config file for the pipeline. Specify this if you do not want to specify options on the comamnd line", const(tmpConfigName), ) ) ## # Load up machine.conf and apply it to our current config conf = config.configFromConfig( conf, config.configFromStream(open("/tmp/machine.conf"), config.configFromEnv()), lazy=True ) vals = {} for o in options: vals[o[0]] = cli.applyOption(conf(o[0], default=None), o, conf) conf = config.configFromMap(vals, conf) ## # For some ergatis trickery we then need to output this config to a temp file so ergatis can pull variables from it confDict = config.configToDict(conf) confVals = {} cv = [(".".join(k.split(".")[:-1]), k.split(".")[-1], v) for k, v in confDict.iteritems()] for s, k, v in cv: confVals.setdefault(s, {})[k] = v fout = open(tmpConfigName, "w") for s, d in confVals.iteritems(): if s not in ["", "env"]: fout.write("[" + s + "]\n") for k, v in d.iteritems(): fout.write("%s=%s\n" % (k, str(v))) fout.close() return runPipelineWithConfig(taskName, name, pipeline, conf, queue)
def runPipelineConfig(taskName, name, pipeline, conf, queue=None): """ Takes a config object representing a pipeline options, validates those options in pipeline.OPTIONS and passes the results onto runPipelineWithConfig """ ## # Mocheezmo way to have it load a conf file. This will be removed in the future tmpConfigName = os.path.join('/tmp', str(time.time()) + '.config') options = list(pipeline.OPTIONS) options.append( ('conf', '', '--conf', 'Conf file (DO NOT SPECIFY, FOR INTERNAL USE)', const('/tmp/machine.conf'))) options.append(( 'CONFIG_FILE', '-c', '--CONFIG_FILE', 'Config file for the pipeline. Specify this if you do not want to specify options on the comamnd line', const(tmpConfigName))) ## # Load up machine.conf and apply it to our current config conf = config.configFromConfig(conf, config.configFromStream( open('/tmp/machine.conf'), config.configFromEnv()), lazy=True) vals = {} for o in options: vals[o[0]] = cli.applyOption(conf(o[0], default=None), o, conf) conf = config.configFromMap(vals, conf) ## # For some ergatis trickery we then need to output this config to a temp file so ergatis can pull variables from it confDict = config.configToDict(conf) confVals = {} cv = [('.'.join(k.split('.')[:-1]), k.split('.')[-1], v) for k, v in confDict.iteritems()] for s, k, v in cv: confVals.setdefault(s, {})[k] = v fout = open(tmpConfigName, 'w') for s, d in confVals.iteritems(): if s not in ['', 'env']: fout.write('[' + s + ']\n') for k, v in d.iteritems(): fout.write('%s=%s\n' % (k, str(v))) fout.close() return runPipelineWithConfig(taskName, name, pipeline, conf, queue)
def main(options, _args): runSystemEx( 'svn copy https://clovr.svn.sourceforge.net/svnroot/clovr/trunk https://clovr.svn.sourceforge.net/svnroot/clovr/tags/%s -m "Cutting release %s"' % (options('general.version'), options('general.version')), log=True) runSystemEx( 'svn copy https://vappio.svn.sourceforge.net/svnroot/vappio/trunk https://vappio.svn.sourceforge.net/svnroot/vappio/tags/%s -m "Cutting release %s"' % (options('general.version'), options('general.version')), log=True) runSystemEx('scp %s:/export/%s .' % (options('general.remote_name'), options('general.image')), log=True) runSystemEx('cp %s /usr/local/projects/clovr/images' % options('general.image'), log=True) runSystemEx('cp %s VMware_conversion/shared/convert_img.img' % options('general.image'), log=True) convertChannel = threads.runThreadWithChannel( convertImage).channel.sendWithChannel(options) waitForPasswordChange() bundleChannel = threads.runThreadWithChannel( bundleAMI).channel.sendWithChannel(options) try: convertChannel.receive() vmWareDir = 'clovr-vmware.%s' % options('general.version') runSystemEx('mkdir -p ' + vmWareDir, log=True) runSystemEx( 'mv VMware_conversion/shared/converted_img.vmdk %s' % os.path.join( vmWareDir, 'clovr.9-04.x86-64.%s.vmdk' % options('general.version'))) runSystemEx('mkdir -p %s %s' % (os.path.join( vmWareDir, 'keys'), os.path.join(vmWareDir, 'user_data')), log=True) runSystemEx('cp -rv /usr/local/projects/clovr/shared ' + vmWareDir, log=True) fout = open(os.path.join(vmWareDir, 'start_clovr.vmx'), 'w') clovrConf = config.configFromMap( dict(version=options('general.version'))) for line in open('/usr/local/projects/clovr/start_clovr.vmx'): fout.write(config.replaceStr(line, clovrConf)) except Exception, err: errorPrint('Converting image failed. Error message:') errorPrint(str(err))
def createCluster(request): """Instantiates a skeleton of the cluster that will be imported. This cluster will later have the proper attributes and values populated from the source cluster. """ persistManager = request.state.persistManager baseConf = config.configFromStream(open('/tmp/machine.conf')) cluster = persist.Cluster(request.body['dst_cluster'], request.body['user_name'], request.body['cred_name'], config.configFromMap({'cluster.cluster_public_key': '/mnt/keys/devel1.pem.pub'}, base=baseConf)) yield persistManager.saveCluster(cluster) defer.returnValue(request)
def createDataFile(conf, mode, outFile='/tmp/machine.conf'): ## # We want everything from the clovr config in here for now conf = config.configFromMap({'config_loaded': True}, base=conf, lazy=conf.lazy) fout = open(outFile, 'w') fout.write('[]\n') fout.writelines(['%s=%s\n' % (k, str(conf(k))) for k in conf.keys()]) fout.write(open(conf('instance.config_file')).read()) fout.write('\n'.join([ '[]', 'NODE_TYPE=' + ','.join(mode), 'general.ctype=' + conf('general.ctype', default='UNKNOWN') ]) + '\n') fout.close() return outFile
def createExecDataFile(conf, master, masterMachineConf): """ Creates a exec data file as the perl start_cluster works This is very similar to createMasterDataFile, should be refactored a bit """ outName = os.path.join('/tmp', str(time.time())) ## # Going to load the master machine.conf and modify node type masterConf = config.configFromStream(open(masterMachineConf), lazy=True) masterConf = config.configFromMap({'NODE_TYPE': EXEC_NODE}, masterConf, lazy=True) fout = open(outName, 'w') fout.write('\n'.join([k + '=' + str(v) for k, v in config.configToDict(masterConf).iteritems()])) fout.close() template = open(conf('cluster.exec_user_data_tmpl')).read() clusterPrivateKey = open(conf('cluster.cluster_private_key')).read() outf = [] runSingleProgramEx('ssh-keygen -y -f ' + conf('cluster.cluster_private_key'), outf.append, None, log=True) if conf('general.ctype') == 'ec2': template = template.replace('<TMPL_VAR NAME=MASTER_DNS>', master['private_dns']) else: template = template.replace('<TMPL_VAR NAME=MASTER_DNS>', master['public_dns']) clusterPublicKey = ''.join(outf) template = template.replace('<TMPL_VAR NAME=CLUSTER_PRIVATE_KEY>', clusterPrivateKey) template = template.replace('<TMPL_VAR NAME=CLUSTER_PUBLIC_KEY>', clusterPublicKey) template = template.replace('<TMPL_VAR NAME=MACHINE_CONF>', open(outName).read().replace('${', '\\${')) os.remove(outName) outf = os.path.join(conf('general.secure_tmp'), 'exec_user_data.sh') open(outf, 'w').write(template) return outf
def createCluster(request): """Instantiates a skeleton of the cluster that will be imported. This cluster will later have the proper attributes and values populated from the source cluster. """ persistManager = request.state.persistManager baseConf = config.configFromStream(open('/tmp/machine.conf')) cluster = persist.Cluster( request.body['dst_cluster'], request.body['user_name'], request.body['cred_name'], config.configFromMap( {'cluster.cluster_public_key': '/mnt/keys/devel1.pem.pub'}, base=baseConf)) yield persistManager.saveCluster(cluster) defer.returnValue(request)
def instantiateCredential(conf, cred): if not conf('config_loaded', default=False): conf = config.configFromMap({'config_loaded': True}, base=config.configFromStream(open(conf('general.conf_file')), base=conf)) certFile = os.path.join(conf('general.secure_tmp'), cred.name + '_cert.pem') keyFile = os.path.join(conf('general.secure_tmp'), cred.name + '_key.pem') if not os.path.exists(certFile) and not os.path.exists(keyFile): tmpCertFile = os.path.join(conf('general.secure_tmp'), cred.name + '_cert-tmp.pem') tmpKeyFile = os.path.join(conf('general.secure_tmp'), cred.name + '_key-tmp.pem') if 'ec2_url' not in cred.metadata: raise Exception('You must have an ec2_url') parsedUrl = urlparse.urlparse(cred.metadata['ec2_url']) if ':' not in parsedUrl.netloc: raise Exception('Your URL must contain a port') host, port = parsedUrl.netloc.split(':') fout = open(tmpCertFile, 'w') fout.write(cred.cert) fout.close() fout = open(tmpKeyFile, 'w') fout.write(cred.pkey) fout.close() try: commands.runSystemEx(' '.join(['nimbusCerts2EC2.py', '--in-cert=' + tmpCertFile, '--out-cert=' + certFile, '--in-key=' + tmpKeyFile, '--out-key=' + keyFile, '--java-cert-dir=/tmp', '--java-cert-host=' + host, '--java-cert-port=' + port]) + ' > /dev/null 2>&1', log=True) commands.runSystemEx('chmod +r ' + keyFile) finally: os.unlink(tmpCertFile) os.unlink(tmpKeyFile) ec2Home = '/opt/ec2-api-tools-1.3-42584' newCred = func.Record(cert=certFile, pkey=keyFile, ec2Path=os.path.join(ec2Home, 'bin'), env=dict(EC2_JVM_ARGS='-Djavax.net.ssl.trustStore=/tmp/jssecacerts', EC2_HOME=ec2Home, EC2_URL=cred.metadata['ec2_url'])) if os.path.exists(conf('cluster.cluster_private_key') + '.pub'): pubKey = open(conf('cluster.cluster_private_key') + '.pub').read().rstrip() ec2_control.addKeypair(newCred, '"' + conf('cluster.key') + '||' + pubKey + '"') return (conf, newCred)
def _credential(): if os.path.exists('/tmp/cred-info'): cert, pkey, ctype, metadata = open('/tmp/cred-info').read().split('\t') return {'name': 'local', 'desc': 'Local credential', 'ctype': ctype, 'cert': open(cert).read(), 'pkey': open(pkey).read(), 'metadata': metadata and dict([v.split('=', 1) for v in metadata.split(',')]) or {}, 'conf': config.configFromStream(open('/tmp/machine.conf'), lazy=True)} else: return {'name': 'local', 'desc': 'Local credential', 'ctype': 'local', 'cert': None, 'pkey': None, 'metadata': {}, 'conf': config.configFromMap({})}
def instantiateCredential(conf, cred): """ Takes a credential and instanitates it. It returns a Record that has all of the information users of that instantiated credential will need """ if not conf("config_loaded", default=False): conf = config.configFromMap( {"config_loaded": True}, base=config.configFromStream(open(conf("general.conf_file", default=DEFAULT_CONFIG_FILE)), base=conf), ) certFile = os.path.join(conf("general.secure_tmp"), cred.name + "_cert.pem") keyFile = os.path.join(conf("general.secure_tmp"), cred.name + "_key.pem") if not os.path.exists(certFile) or open(certFile).read() != cred.cert: open(certFile, "w").write(cred.cert) if not os.path.exists(keyFile) or open(keyFile).read() != cred.pkey: open(keyFile, "w").write(cred.pkey) newCred = functional.Record(cert=certFile, pkey=keyFile, ec2URL=None, env={}) if "ec2_url" in cred.metadata: return (conf, newCred.update(env=functional.updateDict(newCred.env, dict(EC2_URL=cred.metadata["ec2_url"])))) else: return (conf, newCred)
def handleWWWUpdatePipelineConfig(request): """ Sets the config section of a pipeline exactly to what is given Input: { cluster: string user_name: string criteria: { key/valie } config: { key/value } } Output: None """ pipelines = yield request.state.pipelinePersist.loadAllPipelinesBy(request.body['criteria'], request.body['user_name']) if len(pipelines) == 1: p = pipelines[0].update(config=config.configFromMap(request.body['config'])) yield request.state.pipelinePersist.savePipeline(p) else: raise Error('More than one pipelines matches provided criteria: ' + repr(request.body['criteria'])) defer.returnValue(request.update(response=None))
def _realizePhantom(ctype, baseDir, phantom): phantomConfig = config.configFromMap(func.updateDict(phantom, {'ctype': ctype, 'base_dir': baseDir}), lazy=True) download = str(phantomConfig('cluster.%s.url' % ctype, default=phantomConfig('cluster.%s.command' % ctype, default=phantomConfig('cluster.ALL.command')))) if download.startswith('http://'): #_downloadHttp(ctype, baseDir, download, phantomConfig) pass elif download.startswith('s3://'): ## # We might ened to modify realizePhantom to take a conf that will have our s3 credentails in it #_downloadS3(ctype, baseDir, download, phantomConfig) pass else: ## # It's a command: return _runCommand(ctype, baseDir, download, phantomConfig)
def handleWWWUpdatePipelineConfig(request): """ Sets the config section of a pipeline exactly to what is given Input: { cluster: string user_name: string criteria: { key/valie } config: { key/value } } Output: None """ pipelines = yield request.state.pipelinePersist.loadAllPipelinesBy( request.body['criteria'], request.body['user_name']) if len(pipelines) == 1: p = pipelines[0].update( config=config.configFromMap(request.body['config'])) yield request.state.pipelinePersist.savePipeline(p) else: raise Error('More than one pipelines matches provided criteria: ' + repr(request.body['criteria'])) defer.returnValue(request.update(response=None))
def loadTagFile(fname): """ Loads a tagfile, returns a config object of attributes Also considering a .phantom type which would represent files that don't really exist. I think this makes sense as you should be able to tarnsfer .phantom files around but .metadata's should be generated when you make a tag Will explain more abou this in a wiki page somewhere... """ ## # Phantom filse are in a format that configFromStream can read. This is because phantom files # are expected to be written and modified by humans. .metadata files on the other hand # are just expected to be the produce of a machine storing information so uses json if os.path.exists(fname + '.phantom'): ## # Put everythin under phantom # We want to do it lazily too since we will be adding # data it can access later phantom = configFromMap({'phantom_tag': True, 'phantom': configToDict(configFromStream(open(fname + '.phantom'), lazy=True))}, lazy=True) else: phantom = configFromMap({}) ## # If the fname actually exists, open its meta data + files # if the fname does not exist but the phantom does, return the phantom # otherwise, throw an exception about missing the tagfile if os.path.exists(fname): if os.path.exists(fname + '.metadata'): metadata = configFromMap({'metadata': json.loads(open(fname + '.metadata').read())}, phantom, lazy=True) else: metadata = configFromMap({}, phantom) return configFromMap({'files': [f.strip() for f in open(fname) if f.strip()]}, metadata, lazy=True) elif not os.path.exists(fname) and os.path.exists(fname + '.phantom'): if os.path.exists(fname + '.metadata'): metadata = configFromMap({'metadata': json.loads(open(fname + '.metadata').read())}, phantom, lazy=True) return metadata else : return phantom else: raise MissingTagFileError(fname)
from twisted.application import service from twisted.internet import task from igs.utils import config from vappio_tx.mq import client def printIt(mq, m): print 'ACK', m.headers['message-id'] print m.body mq.ack(m.headers['message-id'], headers={}) conf = config.configFromMap({'username': '', 'password': '', 'host': 'localhost', 'port': 61613}) application = service.Application('test') s2 = client.makeService(conf) s2.setServiceParent(application) s2.mqFactory.subscribe(lambda m : printIt(s2.mqFactory, m), '/queue/inbox', {}) s1 = client.makeService(conf) s1.setServiceParent(application) def loopingCall(): lc = task.LoopingCall(lambda : s1.mqFactory.send('/queue/inbox', {'ack-timeout': 60}, 'foo'))
def handleWWWListAddCredentials(request): if 'credential_name' in request.body and core.keysInDict(['credential_name', 'description', 'ctype', 'metadata'], request.body): # Users can provide a file name or the actual contents of the # certificate. if 'cert_file' in request.body: cert = open(request.body['cert_file']).read() else: cert = request.body['cert'] if 'pkey_file' in request.body: pkey = open(request.body['pkey_file']).read() else: pkey = request.body['pkey'] conf = config.configFromMap(request.body.get('conf', {}), base=config.configFromEnv()) cred = persist.createCredential(name=request.body['credential_name'], desc=request.body['description'], ctype=request.body['ctype'], cert=cert, pkey=pkey, active=True, metadata=request.body['metadata'], conf=conf) taskName = yield tasks_tx.createTaskAndSave('addCredential', 1) instantiateAndSaveCredential(taskName, cred, request.state.credentialPersist) queue.returnQueueSuccess(request.mq, request.body['return_queue'], taskName) defer.returnValue(request) elif 'credential_name' not in request.body: credentials = request.state.credentialsCache.getAllCredentials() credentialsDicts = [{'name': name, 'description': c['cred_instance'].credential.desc, 'num_instances': len(c['instances']), 'ctype': c['cred_instance'].credential.getCType()} for name, c in credentials.iteritems() if ('credential_names' in request.body and name in request.body['credential_names']) or 'credential_names' not in request.body] queue.returnQueueSuccess(request.mq, request.body['return_queue'], credentialsDicts) defer.returnValue(request) else: queue.returnQueueError(request.mq, request.body['return_queue'], 'Unknown credential query') raise UnknownRequestError(str(request.body))
from twisted.internet import reactor from twisted.application import service from twisted.internet import task from igs.utils import config from vappio_tx.mq import client conf = config.configFromMap({'mq.username': '', 'mq.password': '', 'mq.host': 'localhost', 'mq.port': 61613}) application = service.Application('test') s1 = client.makeService(conf) s1.setServiceParent(application) def loopingCall(): lc = task.LoopingCall(lambda : s1.mqFactory.send('/queue/inbox', 'foo', {'ack-timeout': 60})) lc.start(0) reactor.callLater(1, loopingCall)
vals = {} ## # The order of the options below # var name, short option, long option, help message, function to apply, binary option for o in _iterBool(options): n, _s, l, _h, f, _b = o try: vals[n] = applyOption(getattr(ops, n), o, baseConf) except Exception, err: raise CLIError(l, err) if putInGeneral: vals = {'general': vals} return configFromMap(vals, baseConf), args ## # These are various functions to make building and verifying data easier def notNone(v): """ Throws MissingOptionError if v is None, otherwise returns v """ if v is None: raise MissingOptionError('Must provide a value for option') return v def defaultIfNone(d):
vals = {} ## # The order of the options below # var name, short option, long option, help message, function to apply, binary option for o in _iterBool(options): n, _s, l, _h, f, _b = o try: vals[n] = applyOption(getattr(ops, n), o, baseConf) except Exception, err: raise CLIError(l, err) if putInGeneral: vals = {'general': vals} return configFromMap(vals, baseConf), args ## # These are various functions to make building and verifying data easier def notNone(v): """ Throws MissingOptionError if v is None, otherwise returns v """ if v is None: raise MissingOptionError('Must provide a value for option') return v
from twisted.application import service from twisted.internet import task from igs.utils import config from vappio_tx.mq import client def printIt(mq, m): print "ACK", m.headers["message-id"] print m.body mq.ack(m.headers["message-id"], headers={}) conf = config.configFromMap({"username": "", "password": "", "host": "localhost", "port": 61613}) application = service.Application("test") s2 = client.makeService(conf) s2.setServiceParent(application) s2.mqFactory.subscribe(lambda m: printIt(s2.mqFactory, m), "/queue/inbox", {}) s1 = client.makeService(conf) s1.setServiceParent(application) def loopingCall(): lc = task.LoopingCall(lambda: s1.mqFactory.send("/queue/inbox", {"ack-timeout": 60}, "foo")) lc.start(0)
def loadLocalCluster(mq, state): """ If local cluster is not present, load it """ def _credential(): if os.path.exists('/tmp/cred-info'): cert, pkey, ctype, metadata = open('/tmp/cred-info').read().split('\t') return {'name': 'local', 'desc': 'Local credential', 'ctype': ctype, 'cert': open(cert).read(), 'pkey': open(pkey).read(), 'metadata': metadata and dict([v.split('=', 1) for v in metadata.split(',')]) or {}, 'conf': config.configFromStream(open('/tmp/machine.conf'), lazy=True)} else: return {'name': 'local', 'desc': 'Local credential', 'ctype': 'local', 'cert': None, 'pkey': None, 'metadata': {}, 'conf': config.configFromMap({})} try: cluster = yield state.persistManager.loadCluster('local', None) baseConf = config.configFromStream(open('/tmp/machine.conf'), base=config.configFromEnv()) conf = config.configFromMap({'config_loaded': True, 'cluster.cluster_public_key': '/mnt/keys/devel1.pem.pub'}, base=baseConf) if (cluster.credName == 'local' and conf('MASTER_IP') not in [cluster.master['public_dns'], cluster.master['private_dns']]): master = dict(instance_id='local', ami_id=None, public_dns=conf('MASTER_IP'), private_dns=conf('MASTER_IP'), state='running', key=None, index=None, instance_type=None, launch=None, availability_zone=None, monitor=None, spot_request_id=None, bid_price=None) cluster = cluster.setMaster(master).update(config=conf) yield state.persistManager.saveCluster(cluster) defer.returnValue(cluster) except persist.ClusterNotFoundError: credential = _credential() credTaskName = yield cred_client.saveCredential(credential['name'], credential['desc'], credential['ctype'], credential['cert'], credential['pkey'], credential['metadata'], credential['conf']) ## Wait for credential to be added. ## TODO: Should handle failure here yield tasks_tx.blockOnTask('localhost', 'local', credTaskName) credClient = cred_client.CredentialClient('local', mq, state.conf) ## If it isn't a local ctype then we need to wait for ## the credential to come alive if credential['ctype'] != 'local': instances = yield credClient.listInstances() else: instances = [] baseConf = config.configFromStream(open('/tmp/machine.conf'), base=config.configFromEnv()) conf = config.configFromMap({'config_loaded': True, 'cluster.cluster_public_key': '/mnt/keys/devel1.pem.pub'}, base=baseConf) cluster = persist.Cluster('local', None, 'local', conf) startTaskName = yield tasks_tx.createTaskAndSave('startCluster', 1) yield tasks_tx.updateTask(startTaskName, lambda t : t.setState(tasks_tx.task.TASK_COMPLETED).progress()) cluster = cluster.update(startTask=startTaskName) masterIp = cluster.config('MASTER_IP') masterIdx = func.find(lambda i : masterIp in [i['public_dns'], i['private_dns']], instances) if masterIdx is not None: master = instances[masterIdx] else: master = dict(instance_id='local', ami_id=None, public_dns=masterIp, private_dns=masterIp, state='running', key=None, index=None, instance_type=None, launch=None, availability_zone=None, monitor=None, spot_request_id=None, bid_price=None) cluster = cluster.setMaster(master) cluster = cluster.setState(cluster.RUNNING) yield state.persistManager.saveCluster(cluster) defer.returnValue(cluster)
from twisted.internet import reactor from twisted.application import service from twisted.internet import task from igs.utils import config from vappio_tx.mq import client conf = config.configFromMap({ 'mq.username': '', 'mq.password': '', 'mq.host': 'localhost', 'mq.port': 61613 }) application = service.Application('test') s1 = client.makeService(conf) s1.setServiceParent(application) def loopingCall(): lc = task.LoopingCall( lambda: s1.mqFactory.send('/queue/inbox', 'foo', {'ack-timeout': 60})) lc.start(0) reactor.callLater(1, loopingCall)