def _run_chef_recipe(self, recipe, role='pm1'): """Run the proper recipe from the autooam cookbook.""" # this is a little ugly. Current we use two base boxes that came from Vagrant # whereas the rest are built by veewee. Most of the veewee boxes put ruby # one place whereas the Vagrant boxes put it somewhere else. The centos58 # veewee box is unique too. # TODO: this doesn't work with our new boxes. They don't like/need the ruby # part out in front of chef-solo. When thef is pruned, this should also # be pruned. if ( vagboxes.get_os_family(self._config['boxtype']) == 'ubuntu' and eval(vagboxes.get_os_version(self._config['boxtype'])) < 14.04 ): ruby_path = '/opt/vagrant_ruby/bin' elif( self._config['boxtype'] == 'cal-centos58' ): ruby_path = '/usr/local/bin' else: ruby_path = '/usr/bin' cmd_ = 'sudo %s/ruby %s/chef-solo -c /tmp/vagrant-chef-*/solo.rb -j /tmp/vagrant-chef-*/dna.json -o %s' % (ruby_path, ruby_path, recipe) return self.shell_command(role, cmd_)
def write_config(self, dir_ = '.', prop = 'hadoop.properties', nodes = 'nodes-byon.yaml'): ''' Writes the two configuration files to the specified directory ''' pf = open( '%s/%s' % (dir_, prop), 'w') pf.write('whirr.cluster-name=%s\n' % self._cluster.name()) pf.write('whirr.cluster-user=vagrant\n') pf.write('whirr.instance-templates=%s\n' % self._cluster.config()['hadoop']['instance-templates']) if self._cluster.config()['hadoop'].has_key('templates-namenode'): pf.write('whirr.templates.hadoop-namenode+hadoop-jobtracker.byon-instance-ids=%s\n' % self._cluster.config()['hadoop']['templates-namenode']) if self._cluster.config()['hadoop'].has_key('templates-datanode'): pf.write('whirr.templates.hadoop-datanode+hadoop-tasktracker.byon-instance-ids=%s\n' % self._cluster.config()['hadoop']['templates-datanode']) pf.write('whirr.service-name=byon\n') pf.write('whirr.provider=byon\n') pf.write('jclouds.byon.endpoint=file://%s/%s\n' % (dir_, nodes)) pf.write('whirr.provider=whirr.java.install-function = install_oracle_jdk6\n') if self._cluster.config()['hadoop'].has_key('version'): pf.write('whirr.hadoop.version=%s\n' % self._cluster.config()['hadoop']['version']) pf.write('whirr.hadoop.tarball.url=http://archive.apache.org/dist/hadoop/core/hadoop-${whirr.hadoop.version}/hadoop-${whirr.hadoop.version}.tar.gz\n') else: pf.write('whirr.hadoop.install-function=install_cdh_hadoop\n') pf.write('whirr.hadoop.configure-function=configure_cdh_hadoop\n') pf.close() nf = open( '%s/%s' % (dir_, nodes), 'w' ) nf.write('nodes:\n') for m in sorted(self._cluster.machines()): if not m == 'em1': # exclude dedicated EM server mach = self._cluster.machine(m) nf.write(' - id: %s\n' % m) nf.write(' hostname: %s\n' % mach['ip']) nf.write(' os_arch: x86_64\n') boxtype = self._cluster.config()['boxtype'] nf.write(' os_family: %s\n' % vagboxes.get_os_family(boxtype)) nf.write(' os_description: %s\n' % vagboxes.get_description(boxtype)) nf.write(' os_version: %s\n' % vagboxes.get_os_version(boxtype)) nf.write(' group: vagrant\n') nf.write(' username: vagrant\n') nf.write(' credential_url: file://%s/insecure_private_key\n' % common.props['vmi.vagrantvmi.vagrantroot']) nf.write(' sudo_password:\n') nf.close()
def testBoxes(self): self.assertEqual( vagboxes.get_default_pkgtype('cal-precise64'), 'deb') self.assertEqual( vagboxes.get_os_family('cal-precise64'), 'ubuntu') self.assertEqual( vagboxes.get_os_version('cal-precise64'), '12.04') self.assertEqual( vagboxes.get_description('cal-precise64'), 'Ubuntu 12.04.1 LTS (Precise Pangolin)')
def run_install_recipe(self, cb=None): """Run the proper install recipe from the autooam cookbook.""" ret = 0 # skip whirr hadoop install if we are running in unittest mode if not common.props['vmi.vagrantvmi.unit-test']: if self._config['hadoop']: if cb: cb('Whirr Hadoop install Step') Log.info('Executing whirr launch-cluster for Hadoop') w = WhirrConfigWriter( self ) w.write_config( self._vmi._rundir ) owd = os.getcwd() os.chdir(self._vmi._rundir) cmd = '%s/whirr launch-cluster --config hadoop.properties --private-key-file %s/insecure_private_key' %\ (common.props['cluster.cluster.whirrdir'], common.props['vmi.vagrantvmi.vagrantroot']) ret = utils.syscall_log(cmd, self._vmi._outfile)[0] os.chdir(owd) # we also need to install libhdfs before moving on to the InfiniDB install if ret == 0: if cb: cb('Chef autooam::hadoop_postinstall Step') # must be done on every InfiniDB node! # TODO: consider moving this to an ansible playbook for m in self._machines.keys(): if not m == "em1": if ( vagboxes.get_os_family(self._config['boxtype']) == 'ubuntu' or\ vagboxes.get_os_family(self._config['boxtype']) == 'debian' ): cmd = 'sudo apt-get -y install libhdfs0' else: cmd = 'sudo yum -y install hadoop-libhdfs' ret = self.shell_command(m, cmd) if ret != 0: break if ret != 0: Log.error('There were errors during Hadoop installation, did not attempt InfiniDB install') return ret if cb: cb('InfiniDB install Step') Log.info('Performing InfiniDB install') recipe = 'autooam::binary_install' if self._config['binary'] else 'autooam::package_install' # Use EM to install Infinidb if self._emapi and common.props['cluster.cluster.use_em_for_dbinstall']: # Install EM (if applicable) ret = self._em_install(cb) if ret != 0: return ret # install db step ret = self._em_installdb(cb) # Install Infinidb through execution of ansible playbooks else: if self._chefmode: ret = self._run_chef_recipe(recipe) else: ret = self._run_ansible_playbook(recipe) if ret != 0: Log.error('There were errors installing InfiniDB') return ret if self._emapi: # Install and attach to EM (if applicable) ret = self._em_install(cb) if ret != 0: return ret ret = self._em_attach( cb=cb ) return ret