Example #1
0
 def __init__(self):
     '''
     Constructor.
     
     Ensure that we have a package directory present, then create the
     cache directory if it doesn't already exist.
     '''
     self._basedir = common.props['vmi.versionmgr.basedir']       
     if not os.path.exists(self._basedir):
         raise Exception("Package reference directory %s does not exist!" % self._basedir) 
     self._cachedir = common.props['vmi.versionmgr.packagedir']
     if not os.path.exists(self._cachedir):
         utils.mkdir_p(self._cachedir)
     
     self._filepatt = {
         'binary' : re.compile('(calpont-|)infinidb-ent-([0-9\-\.]*).x86_64.bin.tar.gz'),
         'deb' : re.compile('(calpont-|)infinidb-ent-([0-9\-\.]*).amd64.deb.tar.gz'),
         'rpm' : re.compile('(calpont-|)infinidb-ent-([0-9\-\.]*).x86_64.rpm.tar.gz'),
         'binary-datdup' : re.compile('calpont-infinidb-datdup-(.*).x86_64.bin.tar.gz'),
         # debian support is via the binary package for now
         'deb-datdup' : re.compile('calpont-infinidb-datdup-(.*).x86_64.bin.tar.gz'),
         'rpm-datdup' : re.compile('calpont-datdup-(.*).x86_64.rpm'),
         'binary-std' : re.compile('infinidb-([0-9\-\.]*).x86_64.bin.tar.gz'),
         'deb-std' : re.compile('infinidb-([0-9\-\.]*).amd64.deb.tar.gz'),
         'rpm-std' : re.compile('infinidb-([0-9\-\.]*).x86_64.rpm.tar.gz'),
     }
     self._verspatt = re.compile('(calpont-|)infinidb-(ent-|)(.*).(x86_64.bin|amd64.deb|x86_64.rpm).tar.gz')
Example #2
0
 def _get_autooam_utils(self, version):
     '''
     Installs required autooam utils that are not packaged in standard 
     edition.  At present, this is only the healthcheck utility, but in 
     the future it may expand to include others.
     
     @param version - version to retrieve.  Must be the name of a directory
                      in the package structure (ex. a directory name under
                      //calweb/shared/Iterations)
     @return        - Boolean value indicating success(True)/failure(False)
                      On failure, an error log of some type will be generated
     '''
     if not version == 'Latest' and version[0] < '4':
         # for versions prior to 4.0 this doesn't make sense - we only
         # test enterprise and it is always in the package
         return True
     binent = self._haspkg(self._basedir, version, 'binary', False, True)
     if not binent:
         Log.error("Unable to extract Calpont/bin/healthcheck for version %s, no binary enterprise package found!" % version)
         return False
     cwd = os.getcwd()
     cachepath = '%s/%s' % (self._cachedir,binent[1]) 
     utils.mkdir_p(cachepath)
     os.chdir(cachepath)
     ret = [ os.system('tar xvzf %s Calpont/bin/healthcheck > /dev/null 2>&1' % binent[0]) >> 8 ]
     if ret[0] != 0:
         Log.error('tar xvzf %s Calpont/bin/healthcheck failed: %s' % (binent[0],ret[0]))
         return False
     
     os.chdir(cwd)
     if not os.path.exists('%s/Calpont/bin/healthcheck' % cachepath):
         Log.error("Unable to extract healthcheck to %s/Calpont/bin/healthcheck" % cachepath)
         return False
     
     return True
Example #3
0
    def retrieve(self, version, ptype):
        '''locates the specified version and package type and caches in the local package directory
        
        @param version - version to retrieve.  Must be the name of a directory
                         in the package structure (ex. a directory name under
                         //calweb/shared/Iterations)
        @param ptype   - package type.  One of 'bin', 'deb', or 'rpm'

        Returns the relative path to the package tarball which is guaranteed to be
        located in props['cluster.emvmgr.basedir']
        
        Raises exceptions on failure to locate the specified package (or 
        other misc. errors such as a bad type, etc.).
        '''        
        # unit test bailout
        if common.props['vmi.vagrantvmi.unit-test']:
            # this is purely made up.  In unit test right now, nobody actually tries to do anything with this return
            return( 'build-99999', os.path.join( common.props['cluster.emvmgr.packagedir'], 'build-99999', 'infinidb-entmgr-1.0-1.el6.x86_64.rpm'))
        
        if version == 'Latest':
            buildver = self._get_current_build_version()
            fname, fsize, fdate = self._get_package_details(ptype)
            Log.info('Current build version is %s' % buildver)
            pkgpath = os.path.join( self.__packagedir, buildver )
            pkgfile = os.path.join( pkgpath, fname )
            if not os.path.isdir( pkgpath ):
                # we don't have this directory at all so we know we will create and download
                utils.mkdir_p( pkgpath )
            else:
                # the directory already exists - let's see if the file is already current
                if os.path.exists(pkgfile) and eval(fsize) == os.path.getsize(pkgfile):
                    Log.info('Package %s is already up-to-date' % pkgfile)
                    return (buildver, pkgfile)
            
            Log.info('Fetching %s into %s' % (fname, pkgpath))
            
            cmd = 'wget -O %s --http-user=%s --http-password=%s https://infinidb.atlassian.net/builds/artifact/EM-EM/shared/build-latest/%s/%s?os_authType=basic' %\
                ( pkgfile, self.__httpuser, self.__httppassword, self.__pkgmap[ptype], fname )
            
            rc, out, err = utils.syscall_log(cmd)

            return (buildver, pkgfile)
        else:
            # in this case we aren't going to contact the bamboo server, but 
            # rather use a previously downloaded version
            pkgpath = os.path.join( self.__packagedir, version )
            if not os.path.isdir( pkgpath ):
                Log.error('Error retrieving EM version %s, %s path does not exist' % (version, pkgpath))
                return (None, None)
            
            fileext = ptype if ptype != 'bin' else '.tar.gz'
            pkgs = glob.glob('%s/*%s' % (pkgpath, fileext))
            if len(pkgs) == 0:
                Log.error('No packages of type %s found in %s' % (ptype, pkgpath))
                return (None, None)
            else:
                if len(pkgs) > 1:
                    Log.warn('Multiple packages of type %s found in %s, using first : %s' % (ptype, pkgpath, pkgs))
                return (version, pkgs[0])
Example #4
0
    def __init__(self):
        '''
        Constructor
        '''
        self.__packagedir = common.props['cluster.emvmgr.packagedir']
        self.__httpuser = common.props['cluster.emvmgr.httpuser']
        self.__httppassword = common.props['cluster.emvmgr.httppassword']

        self.__pkgmap = {
            'rpm' : 'infinidb-em.rpm',
            'deb' : 'infinidb-em.deb',
            'bin' : 'infinidb-em.tar.gz'
        }
        
        if not os.path.isdir(self.__packagedir):
            utils.mkdir_p(self.__packagedir)
Example #5
0
    def _update_cache(self, refloc, path, pfile):
        '''
        Private method that updates the cached file of the specified file.
        
        @param refloc   - fully specified path to package.
        @param path     - a relative path to where the package will be cached.
        @param pfile    - name of the package file
        
        Effective the operation is:
            cp -p <refloc> <cache-base>/<path>/.

        @return - relative path of the package file in self._cachedir
        
        @raises - Exception if failed to update cache
        '''
        Log.info('Updating local package cache for %s/%s' % (path,pfile))
        cachepath = self._cachedir + '/' + path
        utils.mkdir_p(cachepath)
        cachefile = cachepath + '/' + pfile
        cmd = 'cp -p %s %s' % (refloc, cachefile)
        ret = os.system(cmd) >> 8
        if ret != 0:
            raise Exception('Failed to copy %s to %s' % (refloc, cachefile))
        return '%s/%s' % (path, pfile)
Example #6
0
    def _alloc_construct(self, cluster):
        '''create a new vmi instance.'''
        if not cluster.config().has_key('boxtype'):
            raise Exception("Vagrant cluster creation requires a boxtype in the ConfigSpec")

        # this hadoop validation check was formerly in configspec, but
        # moved to here to remove autooam/vagboxes dependency from
        # emtools/configspec
        if cluster.config().has_key('hadoop') and cluster.config()['hadoop']:
            if not vagboxes.hadoop_support(cluster.config()['boxtype']):
                raise Exception("Hadoop not supported on boxtype %s" % self.jsonmap['boxtype'])

        self._subnet = self._salloc.alloc(cluster)
        
        # first we want to look for our root directory, make sure it
        # does not already exist and then create it
        root = common.props['vmi.vagrantvmi.vagrantdir']
        utils.mkdir_p(root)        
        self._rundir = '%s/%s_%s' % (root, cluster.name(), str(cluster.id()))
        os.makedirs(self._rundir)

        # this is where we will write stdout and stderr for any calls
        # executed agaist this VMI
        self._outfile = "%s/%s.out" % (self._rundir, cluster.name())
        
        self._defmemsize = common.props['vmi.vagrantvmi.defmemsize']
        self._defcpus = common.props['vmi.vagrantvmi.defcpus']
        
        # do a sanity check to make sure we don't ask for a non-existent package
        # we only support enterprise=False for versions 4.0 and later
        entpkg = cluster.config()['enterprise']
        if ConfigSpec._version_greaterthan('4.0.0-0',cluster.config()['idbversion']):
            Log.info('resetting enterprise to True for version %s ' % cluster.config()['idbversion'])
            entpkg = True
            
        # make sure that our package exists
        vm = VersionManager()
        if cluster.config()['idbuser'] != 'root' or cluster.config()['binary']:
            ptype = 'binary'
            # set this to true in case not already set so that vagrant file writer
            # can depend on it being accurate
            cluster.config()['binary'] = True
        else:
            ptype = vagboxes.get_default_pkgtype(cluster.config()['boxtype'])
        self._pkgfile = vm.retrieve(cluster.config()['idbversion'], ptype, enterprise=entpkg)
        
        # handle the upgrade version if the user specified it
        upgfile = None
        upgvers = None
        if cluster.config()['upgrade']:
            upgfile = vm.retrieve(cluster.config()['upgrade'], ptype, enterprise=entpkg)
            upgvers = vm.get_pkg_version(upgfile)
        self._upgfile = upgfile
            
        # handle datdup package if the user requested it - note that the datdup
        # package is only relevant prior to version 4.0
        datduppkgfile = None
        if cluster.config()['datdup'] and not ConfigSpec._version_greaterthan(cluster.config()['idbversion'],'4.0.0-0'):
            datduppkgfile = vm.retrieve(cluster.config()['idbversion'], ptype, datdup=True)
        
        self._alloc_machines()
        
        h = PostConfigureHelper()
        self._pfile  = '%s/postconfigure.in' % self._rundir
        h.write_input(self._pfile, cluster, ptype)
 
        # @bug 5990: don't need to copy public key.  vagrant
        # public access should already be setup when cluster
        # was instantiated.
        # copy public key to shared directory so that vagrant can access
        #utils.mkdir_p("%s/.ssh" % self._rundir)
        #shutil.copy( '%s.pub' % common.props['emtools.test.sshkeyfile'],
        #    '%s/.ssh/public_key' % self._rundir)

        self._vfile = self._rundir + '/Vagrantfile'
        vfile = VagrantFileWriter(
                    cluster, 
                    self._pkgfile,
                    vm.get_pkg_version(self._pkgfile),
                    datduppkgfile, 
                    self._upgfile,
                    upgvers,
                    self._subnet,
                    self._rundir)
        vfile.writeVagrantFile( self._vfile )
        cluster.vmi(self)

        # For external DBRoot storage: delete/recreate dataN directories
        # locally, to be NFS mounted for use on each PM
        if cluster.config()['storage'] == 'external':
            rootCount = cluster.config().total_dbroot_count()
            for i in range( rootCount ):
                dbRootDir = '%s/data%d' % (self._rundir, i+1)
                if os.path.exists( dbRootDir ):
                    shutil.rmtree( dbRootDir )
                os.mkdir( dbRootDir )
Example #7
0
def main(argv):
    """
    main function
    """

    try:
        opts, args = getopt.getopt(argv, "hvi", ["json="])
    except getopt.GetoptError:
        usage()
        sys.exit(2)

    # defaults
    use_stdin = False
    json_file = ""

    # parse command line arguments
    for o, a in opts:
        if o == "-h":
            usage()
            sys.exit(2)
        elif o == "-v":
            print "installdatabase.py Version: %s" % version
            sys.exit(1)
        elif o == "-i":
            use_stdin = True
        elif o == "--json":
            json_file = a
        else:
            print "unsupported option: %s" % o
            usage()
            sys.exit(2)

    if (use_stdin and json_file) or (not use_stdin and not json_file):
        print "ERROR: Must specify exactly one of -i or --json"
        usage()
        sys.exit(2)

    try:
        # load input into json string
        jsonstr = None
        if use_stdin:
            lines = sys.stdin.readlines()
            jsonstr = "".join(lines)
        elif json_file:
            f = open(json_file)
            lines = f.readlines()
            jsonstr = "".join(lines)

        Log = logutils.getLogger("installdatabase")
        req = installreq.InstallReq(jsonstr)
        Log.info("request: %s" % req.json_dumps())

        # construct configspec
        cfgspecbld = ConfigSpecBuilder(req)
        cfgspec, machines = cfgspecbld.run()

        # determine the approprate package file to be installed
        emVM = EmVersionManager()
        pkgfile = emVM.retrieve(cfgspec["idbversion"], "binary")
        Log.info("pkgfile: %s" % pkgfile)

        # create runtime directory
        root = common.props["emtools.playbookmgr.cluster_base"]
        rundir = "%s/%s" % (root, cfgspec["name"])
        if not os.path.exists(rundir):
            mkdir_p(rundir)

        # create the cluster
        emCluster = EmCluster(cfgspec["name"], cfgspec, rundir, pkgfile, machines)

        # create the postconfig response file
        h = PostConfigureHelper()
        pfile = "%s/postconfigure.in" % rundir
        h.write_input(pfile, emCluster, "binary")

        # perform the db install
        rc, results, out, err = emCluster.run_install_recipe()

        reply_dict = {
            "cluster_name": cfgspec["name"],
            "playbook_info": {
                "name": emCluster.get_playbook_filename(),
                "hostspec": emCluster.get_inventory_filename(),
                "extravars": emCluster.get_extra_vars(),
            },
            "rc": rc,
            "stdout": out,
            "stderr": err,
            "recap_info": results,
        }

        # test stub output
        # reply_dict = {
        #    'cluster_name' : cfgspec['name'],
        #    'playbook_info': {
        #        'name'     : 'test_name',
        #        'hostspec' : 'test_hostspec',
        #        'extravars': 'test_extravars'
        #    },
        #    'rc'           : 0,
        #    'stdout'       : 'test_stdout',
        #    'stderr'       : 'test_stderr',
        # }
        preply = playbookreply.PlaybookReply_from_dict(reply_dict)

        Log.info("reply: %s" % preply.json_dumps())
        print preply

        return 0

    except:
        import traceback

        print errormsg.ErrorMsg_from_parms(msg=json.dumps(traceback.format_exc()))
        sys.exit(1)
Example #8
0
    def testBasic(self):        
        
        # standard list_available with no type spec
        v = VersionManager()
        self.assertEqual( sorted(v.list_available()), [ '2.2.11-1', '3.5.1-5', '4.0.0-0' ] )
        
        # standard retrieve from reference with no cache
        pfile = v.retrieve('3.5.1-5','deb')
        self.assertEqual( pfile, '3.5.1-5/packages/calpont-infinidb-ent-3.5.1-5.amd64.deb.tar.gz')
        self.assertEqual( os.path.getmtime('%s/%s' % (self._cachepath, pfile)), 
                          os.path.getmtime('%s/%s' % (self._pkgpath, pfile)))

        # retrieve again - this time should come from cache
        pfile = v.retrieve('3.5.1-5','deb')
        self.assertEqual( pfile, '3.5.1-5/packages/calpont-infinidb-ent-3.5.1-5.amd64.deb.tar.gz')
        self.assertEqual( v.get_pkg_version(pfile), '3.5.1-5' )
        
        # the 'Latest' directory in reference has a bin file
        self.assertEqual( sorted(v.list_available('binary')), [ '2.2.11-1', '3.5.1-5', '4.0', '4.0.0-0', '4.0.0-1', '4.5.0-1', 'Latest' ] )

        with self.assertRaisesRegexp(Exception,"Unsupported package type notype"):
            v.list_available('notype')

        with self.assertRaisesRegexp(Exception,"Unsupported package type newtype"):
            v.retrieve('3.5.1-5','newtype')
            
        # now create something that will be in cache only
        newpath = '%s/Unit-test/packages' % self._cachepath
        utils.mkdir_p(newpath) 
        os.system('touch %s/calpont-infinidb-ent-19.0.1-1.x86_64.rpm.tar.gz' % newpath)
        self.assertEqual( sorted(v.list_available('rpm')), [ '2.2.11-1', '3.5.1-5', '4.0.0-0', 'Unit-test' ] )

        pfile = v.retrieve('Unit-test','rpm')
        self.assertEqual( pfile, '19.0.1-1/packages/calpont-infinidb-ent-19.0.1-1.x86_64.rpm.tar.gz')
        self.assertEqual( v.get_pkg_version(pfile), '19.0.1-1' )
           
        # not try to retrieve some files that don't exist
        with self.assertRaisesRegexp(Exception,"Unable to locate enterprise package for version Unit-test"):
            v.retrieve('Unit-test','deb')

        with self.assertRaisesRegexp(Exception,"Unable to locate enterprise package for version non-existent"):
            v.retrieve('non-existent','deb')

        with self.assertRaisesRegexp(Exception,"Unable to locate enterprise package for version Latest"):
            v.retrieve('Latest','rpm')

        pfile = v.retrieve('Latest','binary')
        self.assertEqual( pfile, '9.9-9/packages/infinidb-ent-9.9-9.x86_64.bin.tar.gz')
        self.assertEqual( v.get_pkg_version(pfile), '9.9-9' )
        refpath = 'Latest/packages/infinidb-ent-9.9-9.x86_64.bin.tar.gz'
        self.assertEqual( os.path.getmtime('%s/%s' % (self._cachepath, pfile)), 
                          os.path.getmtime('%s/%s' % (self._pkgpath, refpath)))
        
        # now we want to force a situation where cached and local copies appear out of sync
        os.system('touch %s/%s' % (self._cachepath, pfile))
        self.assertNotEqual( os.path.getmtime('%s/%s' % (self._cachepath, pfile)), 
                          os.path.getmtime('%s/%s' % (self._pkgpath, refpath)))
        pfile = v.retrieve('Latest','binary')
        self.assertEqual( pfile, '9.9-9/packages/infinidb-ent-9.9-9.x86_64.bin.tar.gz')
        self.assertEqual( os.path.getmtime('%s/%s' % (self._cachepath, pfile)), 
                          os.path.getmtime('%s/%s' % (self._pkgpath, refpath)))

        pfile = v.retrieve('4.0.0-0','binary')
        self.assertEqual( pfile, '4.0.0-0/packages/infinidb-ent-4.0.0-0.x86_64.bin.tar.gz')
        self.assertEqual( os.path.getmtime('%s/%s' % (self._cachepath, pfile)), 
                          os.path.getmtime('%s/%s' % (self._pkgpath, pfile)))
        self.assertEqual( v.get_pkg_version(pfile), '4.0.0-0' )
Example #9
0
def main(argv):
    '''
    main function
    '''

    try:
        opts, args = getopt.getopt(argv, "hvi", ['json='])
    except getopt.GetoptError:
        usage()
        sys.exit(2)

    # defaults
    use_stdin = False
    json_file = ''

    # parse command line arguments
    for o, a in opts:
        if o == '-h':
            usage()
            sys.exit(2)
        elif o == '-v':
            print 'installdatabase.py Version: %s' % version
            sys.exit(1)
        elif o == '-i':
            use_stdin = True
        elif o == '--json':
            json_file = a
        else:
            print 'unsupported option: %s' % o
            usage()
            sys.exit(2)

    if (use_stdin and json_file) or (not use_stdin and not json_file):
        print 'ERROR: Must specify exactly one of -i or --json'
        usage()
        sys.exit(2)

    try:
        # load input into json string
        jsonstr = None
        if use_stdin:
            lines = sys.stdin.readlines()
            jsonstr = ''.join(lines)
        elif json_file:
            f = open(json_file)
            lines = f.readlines()
            jsonstr = ''.join(lines)

        Log = logutils.getLogger('installdatabase')
        req = installreq.InstallReq(jsonstr)
        Log.info('request: %s' % req.json_dumps())

        # construct configspec
        cfgspecbld = ConfigSpecBuilder(req)
        cfgspec, machines = cfgspecbld.run()

        # determine the approprate package file to be installed
        emVM = EmVersionManager()
        pkgfile = emVM.retrieve(cfgspec['idbversion'], 'binary')
        Log.info('pkgfile: %s' % pkgfile)

        # create runtime directory
        root = common.props['emtools.playbookmgr.cluster_base']
        rundir = '%s/%s' % (root, cfgspec['name'])
        if not os.path.exists(rundir):
            mkdir_p(rundir)

        # create the cluster
        emCluster = EmCluster(cfgspec['name'], cfgspec, rundir, pkgfile,
                              machines)

        # create the postconfig response file
        h = PostConfigureHelper()
        pfile = '%s/postconfigure.in' % rundir
        h.write_input(pfile, emCluster, 'binary')

        # perform the db install
        rc, results, out, err = emCluster.run_install_recipe()

        reply_dict = {
            'cluster_name': cfgspec['name'],
            'playbook_info': {
                'name': emCluster.get_playbook_filename(),
                'hostspec': emCluster.get_inventory_filename(),
                'extravars': emCluster.get_extra_vars()
            },
            'rc': rc,
            'stdout': out,
            'stderr': err,
            'recap_info': results
        }

        # test stub output
        #reply_dict = {
        #    'cluster_name' : cfgspec['name'],
        #    'playbook_info': {
        #        'name'     : 'test_name',
        #        'hostspec' : 'test_hostspec',
        #        'extravars': 'test_extravars'
        #    },
        #    'rc'           : 0,
        #    'stdout'       : 'test_stdout',
        #    'stderr'       : 'test_stderr',
        #}
        preply = playbookreply.PlaybookReply_from_dict(reply_dict)

        Log.info('reply: %s' % preply.json_dumps())
        print preply

        return 0

    except:
        import traceback
        print errormsg.ErrorMsg_from_parms(
            msg=json.dumps(traceback.format_exc()))
        sys.exit(1)