Exemplo n.º 1
0
def initiateSetup():
  verbose = fabric_config['fabric::verbose']
  ### Print stdout and error messages only on verbose 
  if verbose == False:
    output.update({'running': False, 'stdout': False, 'stderr': False})

  log("Copying the files to lb node")
  dir =  fabric_config['fabric::directory']
  base_dir=os.path.basename(dir)
  tarfile='%s-%s.tar.gz' % (base_dir,int(time.time()))
  os.system('cd %s; tar zcf %s %s' % (os.path.dirname(dir),tarfile,base_dir))
  ## Copy files to lb
  execute(putFiles,hosts=env.hosts,files = {os.path.dirname(dir) + '/' + tarfile: '/tmp'})

  ### Untar and copy fab file and ssh private key and puppet code to lb
  run('cd /tmp; tar  -zxf %s; cp -r /tmp/%s/jiocloud_puppet_builder/resource_spawner/{fabfile.py,fabric.yaml} ~/; cp -f /tmp/%s/id_rsa ~/.ssh' % (tarfile,base_dir,base_dir))
  sudo ('cp -r /tmp/%s/jiocloud_puppet_builder /var/puppet' % base_dir) 

  log("Setting up the system on %s" % env.host)
  log("Run userdata.sh on lb node")
  sudo("bash /tmp/%s/jiocloud_puppet_builder/resource_spawner/userdata.sh  -r lb" % base_dir)

  log("Run fab from lb1 to setup the cloud: %s" % fabric_config['project'])

  ## Enable output - it is required to print inner fab messages
  output.update({'running': True, 'stdout': True, 'stderr': True})

  run('fab -f ~/fabfile  -i ~ubuntu/.ssh/id_rsa fabConfig:/tmp/%s/jiocloud_puppet_builder/resource_spawner/fabric.yaml,user_yaml=/tmp/%s/%s setup'  % (base_dir,base_dir,fabric_config['fabric::user_yaml']))
  os.system('rm -fr %s' % dir)
Exemplo n.º 2
0
 def teardown(self):
     env.clear()  # In case tests set env vars that didn't exist previously
     env.update(self.previous_env)
     output.update(self.previous_output)
     shutil.rmtree(self.tmpdir)
     # Clear Fudge mock expectations...again
     clear_expectations()
Exemplo n.º 3
0
def initiateSetup(dir,verbose='no'):
  ### Print stdout and error messages only on verbose 
  if verbose.upper() == 'NO':
    output.update({'running': False, 'stdout': False, 'stderr': False})

  log("Copying the files to lb node")
  ## Make a tar of the directory and send to remote
  base_dir=os.path.basename(dir)
  os.system('cd %s; tar zcf  %s.tar.gz %s' % (os.path.dirname(dir),base_dir,base_dir))

  ## Copy files to lb
  execute(putFiles,hosts=env.host,files = {dir + ".tar.gz": '/tmp'})

  ### Untar and copy fab file and ssh private key and puppet code to lb
  run('cd /tmp; tar  -zxf %s.tar.gz; cp -r /tmp/%s/jiocloud_puppet_builder/resource_spawner/fabfile.py ~/; cp -f /tmp/%s/id_rsa ~/.ssh' % (base_dir,base_dir,base_dir))
  sudo ('cp -r /tmp/%s/jiocloud_puppet_builder /var/puppet' % base_dir) 

  log("Setting up the system on %s" % env.host)

  log("Run userdata.sh on lb node")
  sudo("bash /tmp/%s/jiocloud_puppet_builder/resource_spawner/userdata.sh  -r lb" % base_dir)

  log("Run fab from lb1 to setup the cloud: %s" % env.project)

  ## Enable output - it is required to print inner fab messages
  output.update({'running': True, 'stdout': True, 'stderr': True})

  run('fab -f ~/fabfile  -i ~ubuntu/.ssh/id_rsa --set cpservers=%s,ocservers=%s,stservers=%s,dbservers=%s,lbservers=%s setup:/tmp/%s,verbose:%s,verify=%s'  % (env.cpservers,env.ocservers,env.stservers,env.dbservers,env.lbservers, base_dir,verbose,False))
Exemplo n.º 4
0
def initiateSetup():
  verbose = fabric_config['fabric::verbose']
  ### Print stdout and error messages only on verbose 
  if verbose == False:
    output.update({'running': False, 'stdout': False, 'stderr': False})

  log("Copying the files to lb node")
  dir =  fabric_config['fabric::directory']
  base_dir=os.path.basename(dir)
  tarfile='%s-%s.tar.gz' % (base_dir,int(time.time()))
  os.system('cd %s; tar zcf %s %s' % (os.path.dirname(dir),tarfile,base_dir))
  ## Copy files to lb
  execute(putFiles,hosts=env.hosts,files = {os.path.dirname(dir) + '/' + tarfile: '/tmp'})

  ### Untar and copy fab file and ssh private key and puppet code to lb
  run('cd /tmp; tar  -zxf %s; cp -r /tmp/%s/jiocloud_puppet_builder/resource_spawner/{fabfile.py,fabric.yaml} ~/; cp -f /tmp/%s/id_rsa ~/.ssh' % (tarfile,base_dir,base_dir))
  sudo ('cp -r /tmp/%s/jiocloud_puppet_builder /var/puppet' % base_dir) 

  log("Setting up the system on %s" % env.host)
  log("Run userdata.sh on lb node")
  sudo("bash /tmp/%s/jiocloud_puppet_builder/resource_spawner/userdata.sh  -r lb" % base_dir)

  log("Run fab from lb1 to setup the cloud: %s" % fabric_config['project'])

  ## Enable output - it is required to print inner fab messages
  output.update({'running': True, 'stdout': True, 'stderr': True})

  run('fab -f ~/fabfile  -i ~ubuntu/.ssh/id_rsa fabConfig:/tmp/%s/jiocloud_puppet_builder/resource_spawner/fabric.yaml,user_yaml=/tmp/%s/%s setup'  % (base_dir,base_dir,fabric_config['fabric::user_yaml']))
  os.system('rm -fr %s' % dir)
Exemplo n.º 5
0
 def teardown(self):
     env.clear() # In case tests set env vars that didn't exist previously
     env.update(self.previous_env)
     output.update(self.previous_output)
     shutil.rmtree(self.tmpdir)
     # Clear Fudge mock expectations...again
     clear_expectations()
Exemplo n.º 6
0
def _set_output(groups, which):
    """
    Refactored subroutine used by ``hide`` and ``show``.
    """
    # Preserve original values, pull in new given value to use
    previous = {}
    for group in output.expand_aliases(groups):
        previous[group] = output[group]
        output[group] = which
    # Yield control
    yield
    # Restore original values
    output.update(previous)
Exemplo n.º 7
0
def checkAll(verbose='no'):
    """check all components and return on success, optional argument, 'verbose' for verbose output"""

    if verbose.upper() == 'NO':
        output.update({'running': False, 'stdout': False, 'stderr': False})


#  rv = {'checkCephOsd': 1, 'checkCephStatus': 1, 'checkNova': 1, 'checkCinder': 1, 'getOSControllerProcs': 1 }
    rv = {
        'checkCephOsd': 1,
        'checkCephStatus': 1,
        'checkNova': 1,
        'checkCinder': 1,
    }
    status = 1
    timeout = 5
    initial_prun = 1
    maxAttempts = 40
    attempt = 1
    log("Start checking the cluster")
    log("Checking the services")
    success = 0
    while (attempt < maxAttempts):
        attempt += 1
        time.sleep(timeout)
        if status != 0:
            log("System is not up... checking....")
            status = 0
            for key, val in rv.items():
                if val != 0:
                    log("Executing %s" % key)
                    rv[key] = execute(key).values()[0]
                    if rv[key] != 0:
                        status = 1
        else:
            attempt = maxAttempts
            success = 1

    if success == 1:
        log("The openstack cloud is up and running")
        return 0
    else:
        log("Something failed, exiting")
        return 100
Exemplo n.º 8
0
def checkAll(verbose='no'):
  """check all components and return on success, optional argument, 'verbose' for verbose output""" 

  if verbose.upper() == 'NO':
    output.update({'running': False, 'stdout': False, 'stderr': False})

#  rv = {'checkCephOsd': 1, 'checkCephStatus': 1, 'checkNova': 1, 'checkCinder': 1, 'getOSControllerProcs': 1 }
  rv = {'checkCephOsd': 1, 'checkCephStatus': 1, 'checkNova': 1, 'checkCinder': 1, }
  status = 1
  timeout = 5
  initial_prun = 1
  maxAttempts = 40
  attempt = 1
  log("Start checking the cluster")
  log("Checking the services")
  success = 0
  while ( attempt < maxAttempts ):
    attempt += 1
    time.sleep(timeout)
    if status != 0:
      log("System is not up... checking....")
      status = 0 
      for key,val in rv.items():
        if val != 0:
          log( "Executing %s" % key)
          rv[key] = execute(key).values()[0]
          if rv[key] != 0:
            status = 1
    else:
      attempt = maxAttempts        
      success = 1

  if success == 1:
    log("The openstack cloud is up and running")
    return 0
  else:
    log("Something failed, exiting")
    return 100
Exemplo n.º 9
0
def initiateSetup(dir,verbose='quiet'):
### Harish: For some reason python paramiko is not working from the machine I run, I will debug it later
### For now not checking if the server is up
#  log("Verifying the server is up")
#  while not verifySshd([env.host],'ubuntu','~/ubuntu-key'):
#    sleep(5)
#    continue

  ### Print stdout and error messages only on verbose 
  if verbose.upper() == 'QUIET':
    output.update({'running': False, 'stdout': False, 'stderr': False})

  log("Copying the files to lb node")
  put(dir, '~/')
  log("Setting up the system on %s" % env.host)
  base_dir = os.path.basename(dir)
#  print("%s----------" % env.key_filename)
  log("Run userdata.sh on lb node")
  sudo("bash ~/%s/userdata.sh  -r lb" % base_dir)
  log("Run fab from lb1 to setup the cloud: %s" % env.cloudname)
 
  ## Enable output - it is required to print inner fab messages
  output.update({'running': True, 'stdout': True, 'stderr': True})
  run('fab -f ~ubuntu/%s/fabfile  -i ~ubuntu/.ssh/id_rsa --set cpservers=10.1.0.253:10.1.0.252,ocservers=10.1.0.11:10.1.0.12,stservers=10.1.0.51:10.1.0.52:10.1.0.53,dbservers=10.1.0.10,lbservers=10.1.0.5 setup:~ubuntu/%s/userdata.sh'  % (base_dir,base_dir))
Exemplo n.º 10
0
def setup(verify=True):
    """Setup the entire cloud system"""
    timeout = 5
    maxAttempts = 40
    verbose = fabric_config['fabric::verbose']
    ### Print stdout and error messages only on verbose
    if verbose == False:
        output.update({'running': False, 'stdout': False, 'stderr': False})
    log("Waiting till all servers are sshable")
    while not verifySshd(env.roledefs['all'], 'ubuntu'):
        sleep(5)
        continue
    log("all nodes are sshable now")
    log("Copy data to All except lb servers")
    nodes_to_run_userdata = env.roledefs['oc'] + env.roledefs[
        'cp'] + env.roledefs['cp'] + env.roledefs['st'] + env.roledefs['db']
    dir = '/tmp/' + fabric_config['project']
    ## Make tar
    log("Making tar")
    os.system('cd %s/..; tar zcf  %s.tar.gz %s' %
              (dir, dir, os.path.basename(dir)))
    ## Copy the tar
    log("Copy tar files")
    execute(putFiles,
            hosts=nodes_to_run_userdata,
            files={dir + ".tar.gz": '/tmp'})

    log("Running userdata script on all servers")
    ## Untar, copy puppet files, run userdata
    command_to_run = 'tar -C %s -zxf %s.tar.gz; cp -r %s/jiocloud_puppet_builder  /var/puppet/; bash %s/jiocloud_puppet_builder/resource_spawner/userdata.sh -r non-lb -p http://%s:3128' % (
        os.path.dirname(dir), dir, dir, dir, env.roledefs['lb'][0])

    execute(runCommand,
            hosts=nodes_to_run_userdata,
            cmd=command_to_run,
            run_type="sudo")

    status = 1
    attempt = 1
    ## Configure contrail vm - this is only applicable for
    ## vm spawned from contrail golden image
    log("Configuring contrail node")
    execute(configContrail, hosts='10.1.0.245')
    ## Run puppet - first run on all servers
    log("Initial execution of puppet run on storage")
    with hide('warnings'), settings(warn_only=True):
        execute(runPapply, hosts=env.roledefs['all'])

        ## Reduce the wait time for cloud-init-nonet on cp nodes as they have vhost0 which would not be coming up by that.
        execute(reduceCloudinitWaitTime, hosts=env.roledefs['cp'])

## Run papply on LB and DB nodes
    log("Running papply on LB and db nodes")
    with hide('warnings'), settings(warn_only=True):
        execute(runPapply,
                hosts=[
                    '10.1.0.5', '10.1.0.10', '10.1.0.53', '10.1.0.52',
                    '10.1.0.51'
                ])

        ## Sync the time initially to avoid ntp to take longer to sync te clocks.
        log("Executing ntpdate for initial time sync")
        try:
            execute(runNtpdate, hosts='10.1.0.5')
        except Exception:
            log('Failed time sync on lb, retrying')
            execute(runNtpdate, hosts='10.1.0.5')
        try:
            execute(runNtpdate, hosts=env.roledefs['all'])
        except Exception:
            log('Failed time sync on lb, retrying')
            execute(runNtpdate, hosts=env.roledefs['all'])
    ##
    log("Checking ceph mon status")
    with hide('warnings'), settings(host_string='10.1.0.53', warn_only=True):
        st_ceph_mon = sudo("ceph mon stat | grep st1,st2,st3")
        if st_ceph_mon.return_code != 0:
            log("Ceph Mons are not up, fixing")
            while st_ceph_mon.return_code != 0:
                execute(runPapply,
                        hosts=['10.1.0.53', '10.1.0.52', '10.1.0.51'])
                with settings(host_string='10.1.0.53', warn_only=True):
                    execute(waitForSSH)
                    try:
                        st_ceph_mon = sudo("ceph mon stat | grep st1,st2,st3")
                    except Exception:
                        log("Failed checking mon stat, retrying")
                        st_ceph_mon = sudo("ceph mon stat | grep st1,st2,st3")

    log("ceph mon are up, running papply on db, cp and oc nodes")
    nodes = ['10.1.0.10'
             ] + env.roledefs['cp'] + env.roledefs['oc'] + env.roledefs['st']
    execute(runPapply, hosts=nodes)

    log("Restarting ntp on all servers")
    execute(restartNtp, hosts=env.roledefs['all'])

    log("Configuring All CP nodes")
    execute(configCP, hosts=env.roledefs['cp'])
    execute(runPapply, hosts=nodes)
    if verify == True:
        execute(checkAll, verbose=verbose)
Exemplo n.º 11
0
def setup(script,verbose='quiet'):
  """Setup the entire cloud system""" 
  if verbose.upper() == 'QUIET':
    output.update({'running': False, 'stdout': False, 'stderr': False})
  log("Waiting till all servers are sshable")
  while not verifySshd(env.roledefs['all'],'ubuntu'):
      sleep(5)
      continue
  log("all nodes are sshable now")
  log("Copy data to All except lb servers")
  nodes_to_run_userdata =  env.roledefs['oc'] + env.roledefs['cp'] + env.roledefs['cp'] + env.roledefs['st'] +env.roledefs['db']

  execute(putFiles,hosts=nodes_to_run_userdata,files= {script: '/tmp'})
  
  fileToRun = os.path.basename(script)

  execute(runCommand,hosts=nodes_to_run_userdata,cmd="bash /tmp/%s -r oc -p http://10.1.0.5:3128" % fileToRun,run_type="sudo")
  status = 1
  timeout = 5
  initial_prun = 1
  maxAttempts = 40
  attempt = 1
  ## Configure contrail vm - this is only applicable for 
  ## vm spawned from contrail golden image
  log("Configuring contrail node")
  execute(configContrail,hosts='10.1.0.245')
  ## Run puppet - first run on all servers
  log("Initial execution of puppet run on storage")
  with hide('warnings'), settings(warn_only = True):
    execute(runPapply,hosts=env.roledefs['all'])
  
  ## Reduce the wait time for cloud-init-nonet on cp nodes as they have vhost0 which would not be coming up by that.
    execute(reduceCloudinitWaitTime,hosts=env.roledefs['cp'])
  
## Run papply on LB and DB nodes
  log("Running papply on LB and db nodes")
  with hide('warnings'), settings(warn_only = True):
    execute(runPapply,hosts=['10.1.0.5','10.1.0.10','10.1.0.53','10.1.0.52','10.1.0.51'] ) 
#    execute(runPapply,hosts=['10.1.0.5','10.1.0.10']) 

  ## Sync the time initially to avoid ntp to take longer to sync te clocks.
    log("Executing ntpdate for initial time sync")
    try: 
      execute(runNtpdate,hosts='10.1.0.5')
    except Exception:
      log('Failed time sync on lb, retrying')
      execute(runNtpdate,hosts='10.1.0.5')
    try:
      execute(runNtpdate,hosts=env.roledefs['all'])
    except Exception:
      log('Failed time sync on lb, retrying')
      execute(runNtpdate,hosts=env.roledefs['all'])
  ##
  log("Checking ceph mon status")
  with hide('warnings'), settings(host_string = '10.1.0.53', warn_only = True):
    st_ceph_mon = sudo ("ceph mon stat | grep st1,st2,st3")
    if st_ceph_mon.return_code != 0:
      log("Ceph Mons are not up, fixing")
      while st_ceph_mon.return_code != 0:
        execute(runPapply,hosts=['10.1.0.53','10.1.0.52','10.1.0.51'])
        with  settings(host_string = '10.1.0.53', warn_only = True):
          execute(waitForSSH)
          try:
	    st_ceph_mon = sudo ("ceph mon stat | grep st1,st2,st3")
          except Exception:
            log("Failed checking mon stat, retrying")
	    st_ceph_mon = sudo ("ceph mon stat | grep st1,st2,st3")

  log("ceph mon are up, running papply on db, cp and oc nodes")
  nodes = ['10.1.0.10'] + env.roledefs['cp'] + env.roledefs['oc'] + env.roledefs['st']
  execute(runPapply,hosts=nodes)

  log("Restarting ntp on all servers")
  execute(restartNtp,hosts=env.roledefs['all'])  

  log("Configuring All CP nodes")
  execute(configCP,hosts=env.roledefs['cp'])
  execute(runPapply,hosts=nodes)
Exemplo n.º 12
0
 def teardown(self):
     env.update(self.previous_env)
     output.update(self.previous_output)
     shutil.rmtree(self.tmpdir)
     # Clear Fudge mock expectations...again
     clear_expectations()
Exemplo n.º 13
0
 def teardown(self):
     env.update(self.previous_env)
     output.update(self.previous_output)
     shutil.rmtree(self.tmpdir)
Exemplo n.º 14
0
def setup(dir,verbose='no',verify=False):
  """Setup the entire cloud system""" 
  timeout = 5
  maxAttempts = 40
  if verbose.upper() == 'NO':
    output.update({'running': False, 'stdout': False, 'stderr': False})
  log("Waiting till all servers are sshable")
  while not verifySshd(env.roledefs['all'],'ubuntu'):
      sleep(5)
      continue
  log("all nodes are sshable now")
  log("Copy data to All except lb servers")
  nodes_to_run_userdata =  env.roledefs['oc'] + env.roledefs['cp'] + env.roledefs['cp'] + env.roledefs['st'] + env.roledefs['db']

  ## Make tar
  log("Making tar")
  os.system('cd %s/..; tar zcf  %s.tar.gz %s' % (dir,dir,os.path.basename(dir)))
  ## Copy the tar
  log("Copy tar files")
  execute(putFiles,hosts=nodes_to_run_userdata,files = {dir + ".tar.gz": '/tmp'})

  log("Running userdata script on all servers")
  ## Untar, copy puppet files, run userdata
  command_to_run='tar -C %s -zxf %s.tar.gz; cp -r %s/jiocloud_puppet_builder  /var/puppet/; bash %s/jiocloud_puppet_builder/resource_spawner/userdata.sh -r non-lb -p http://%s:3128' % (os.path.dirname(dir),dir,dir,dir,env.roledefs['lb'][0])

  execute(runCommand,hosts=nodes_to_run_userdata,cmd=command_to_run,run_type="sudo")

  status = 1
  attempt = 1
  ## Configure contrail vm - this is only applicable for 
  ## vm spawned from contrail golden image
  log("Configuring contrail node")
  execute(configContrail,hosts='10.1.0.245')
  ## Run puppet - first run on all servers
  log("Initial execution of puppet run on storage")
  with hide('warnings'), settings(warn_only = True):
    execute(runPapply,hosts=env.roledefs['all'])
  
  ## Reduce the wait time for cloud-init-nonet on cp nodes as they have vhost0 which would not be coming up by that.
    execute(reduceCloudinitWaitTime,hosts=env.roledefs['cp'])
  
## Run papply on LB and DB nodes
  log("Running papply on LB and db nodes")
  with hide('warnings'), settings(warn_only = True):
    execute(runPapply,hosts=['10.1.0.5','10.1.0.10','10.1.0.53','10.1.0.52','10.1.0.51'] ) 

  ## Sync the time initially to avoid ntp to take longer to sync te clocks.
    log("Executing ntpdate for initial time sync")
    try: 
      execute(runNtpdate,hosts='10.1.0.5')
    except Exception:
      log('Failed time sync on lb, retrying')
      execute(runNtpdate,hosts='10.1.0.5')
    try:
      execute(runNtpdate,hosts=env.roledefs['all'])
    except Exception:
      log('Failed time sync on lb, retrying')
      execute(runNtpdate,hosts=env.roledefs['all'])
  ##
  log("Checking ceph mon status")
  with hide('warnings'), settings(host_string = '10.1.0.53', warn_only = True):
    st_ceph_mon = sudo ("ceph mon stat | grep st1,st2,st3")
    if st_ceph_mon.return_code != 0:
      log("Ceph Mons are not up, fixing")
      while st_ceph_mon.return_code != 0:
        execute(runPapply,hosts=['10.1.0.53','10.1.0.52','10.1.0.51'])
        with  settings(host_string = '10.1.0.53', warn_only = True):
          execute(waitForSSH)
          try:
	    st_ceph_mon = sudo ("ceph mon stat | grep st1,st2,st3")
          except Exception:
            log("Failed checking mon stat, retrying")
	    st_ceph_mon = sudo ("ceph mon stat | grep st1,st2,st3")

  log("ceph mon are up, running papply on db, cp and oc nodes")
  nodes = ['10.1.0.10'] + env.roledefs['cp'] + env.roledefs['oc'] + env.roledefs['st']
  execute(runPapply,hosts=nodes)

  log("Restarting ntp on all servers")
  execute(restartNtp,hosts=env.roledefs['all'])  

  log("Configuring All CP nodes")
  execute(configCP,hosts=env.roledefs['cp'])
  execute(runPapply,hosts=nodes)
  if verify == True:
    execute(checkAll,verbose=verbose)
Exemplo n.º 15
0
def checkAll(verbose='quiet'):
  """check all components and return on success, optional argument, 'verbose' for verbose output""" 

  if verbose.upper() == 'QUIET':
    output.update({'running': False, 'stdout': False, 'stderr': False})

#  rv = {'checkCephOsd': 1, 'checkCephStatus': 1, 'checkNova': 1, 'checkCinder': 1, 'getOSControllerProcs': 1 }
  rv = {'checkCephOsd': 1, 'checkCephStatus': 1, 'checkNova': 1, 'checkCinder': 1, }
  status = 1
  timeout = 5
  initial_prun = 1
  maxAttempts = 40
  attempt = 1
  log("Start checking the cluster")
  log("Waiting till all servers are sshable")
  while not verify_sshd(env.roledefs['all'],'ubuntu'):
      sleep(5)
      continue
  log("all nodes are sshable now")
  ## Run puppet - first run on all servers
  log("Initial execution of puppet run on storage")
  with hide('warnings'), settings(warn_only = True):
    execute(runPapply,hosts=env.roledefs['all'])
  
  ## Reduce the wait time for cloud-init-nonet on cp nodes as they have vhost0 which would not be coming up by that.
    execute(reduceCloudinitWaitTime,hosts=env.roledefs['cp'])
  
## Run papply on LB and DB nodes
  log("Running papply on LB and db nodes")
  with hide('warnings'), settings(warn_only = True):
    execute(runPapply,hosts=['10.1.0.5','10.1.0.10','10.1.0.53','10.1.0.52','10.1.0.51'] ) 
#    execute(runPapply,hosts=['10.1.0.5','10.1.0.10']) 

  ## Sync the time initially to avoid ntp to take longer to sync te clocks.
    log("Executing ntpdate for initial time sync")
    try: 
      execute(runNtpdate,hosts='10.1.0.5')
    except Exception:
      log('Failed time sync on lb, retrying')
      execute(runNtpdate,hosts='10.1.0.5')
    try:
      execute(runNtpdate,hosts=env.roledefs['all'])
    except Exception:
      log('Failed time sync on lb, retrying')
      execute(runNtpdate,hosts=env.roledefs['all'])
  ##
  log("Checking ceph mon status")
  with hide('warnings'), settings(host_string = '10.1.0.53', warn_only = True):
    st_ceph_mon = sudo ("ceph mon stat | grep st1,st2,st3")
    if st_ceph_mon.return_code != 0:
      log("Ceph Mons are not up, fixing")
      while st_ceph_mon.return_code != 0:
        execute(runPapply,hosts=['10.1.0.53','10.1.0.52','10.1.0.51'])
        with  settings(host_string = '10.1.0.53', warn_only = True):
          execute(waitForSSH)
          try:
	    st_ceph_mon = sudo ("ceph mon stat | grep st1,st2,st3")
          except Exception:
            log("Failed checking mon stat, retrying")
	    st_ceph_mon = sudo ("ceph mon stat | grep st1,st2,st3")

  log("ceph mon are up, running papply on db, cp and oc nodes")
  nodes = ['10.1.0.10'] + env.roledefs['cp'] + env.roledefs['oc'] + env.roledefs['st']
  execute(runPapply,hosts=nodes)

  log("Restarting ntp on all servers")
  execute(restartNtp,hosts=env.roledefs['all'])  

  log("Configuring All CP nodes")
  execute(configCP,hosts=env.roledefs['cp'])

  log("Configuring contrail node")
  execute(configContrail,hosts='10.1.0.245')
  execute(runPapply,hosts=nodes)
  
  log("Checking the services")
  success = 0
  while ( attempt < maxAttempts ):
    attempt += 1
    time.sleep(timeout)
    if status != 0:
      log("System is not up... checking....")
      status = 0 
      for key,val in rv.items():
        if val != 0:
          log( "Executing %s" % key)
          rv[key] = execute(key).values()[0]
          if rv[key] != 0:
            status = 1
    else:
      attempt = maxAttempts        
      success = 1

  if success == 1:
    log("The openstack cloud is up and running")
    return 0
  else:
    log("Something failed, exiting")
    return 100
Exemplo n.º 16
0
from shutil import copytree, rmtree

# Initialize the app_config variable
load_config("conf/app.yaml", os.environ['ROOT'])
from apputils.config import app_config

logger = logging.getLogger('app')

# Hosts configurations
# env.roledefs = app_config["machines"]
# env.hosts = [ip for ips in env.roledefs.values() for ip in ips]

# Extra default configuration
env.warn_only = True
output.update({
    "warnings": False,
    "running": False
})

# Global variable to know if the execution is done locally or remotely
local_exec = False


# @task
# def install():
#     """Install the pipeline on the specified cluster
#     """
#     logger.debug("Installing pipeline...")
#
#     local_root = os.environ['ROOT']
#     remote_root = app_config['root']
#
Exemplo n.º 17
0
 def teardown(self):
     env.update(self.previous_env)
     output.update(self.previous_output)
Exemplo n.º 18
0
from fabric.state import output
from shutil import copytree, rmtree

# Initialize the app_config variable
load_config("conf/app.yaml", os.environ['ROOT'])
from apputils.config import app_config

logger = logging.getLogger('app')

# Hosts configurations
# env.roledefs = app_config["machines"]
# env.hosts = [ip for ips in env.roledefs.values() for ip in ips]

# Extra default configuration
env.warn_only = True
output.update({"warnings": False, "running": False})

# Global variable to know if the execution is done locally or remotely
local_exec = False

# @task
# def install():
#     """Install the pipeline on the specified cluster
#     """
#     logger.debug("Installing pipeline...")
#
#     local_root = os.environ['ROOT']
#     remote_root = app_config['root']
#
#     if local_exec:
#         if abspath(local_root) == abspath(remote_root):
Exemplo n.º 19
0
 def teardown(self):
     env.update(self.previous_env)
     output.update(self.previous_output)
     shutil.rmtree(self.tmpdir)
Exemplo n.º 20
0
 def teardown(self):
     env.update(self.previous_env)
     output.update(self.previous_output)
     shutil.rmtree(self.tmpdir)
     # Clear Fudge mock expectations...again
     clear_expectations()
Exemplo n.º 21
0
from fabric.api import env, sudo, settings, task
from fabric.state import output

from django_fabfile.utils import config


master = config.get("RDBMS", "MASTER")
backup = config.get("RDBMS", "BACKUP")
username = config.get("DEFAULT", "USERNAME")
pcp_password = config.get("RDBMS", "PCP_PASSWORD")

env.update({"disable_known_hosts": True, "user": username, "warn_only": True})
output.update({"running": False})


def return_(master, backup, node_id):
    with settings(host_string=master):
        sudo(
            'su postgres -c "pcp_attach_node 60 127.0.0.1 9898 postgres'
            ' {pcp_password} {node_id}"'.format(node_id=node_id, pcp_password=pcp_password)
        )
    with settings(host_string=backup):
        sudo(
            'su postgres -c "pcp_attach_node 60 127.0.0.1 9898 postgres'
            ' {pcp_password} {node_id}"'.format(node_id=node_id, pcp_password=pcp_password)
        )


def failover(new_primary_host, old_primary_host, failed_node_id, master_node_id):
    trigger = "/var/log/pgpool/trigger/trigger_file1"
    with settings(host_string=new_primary_host):
Exemplo n.º 22
0
'''

import logging
from collections import namedtuple
from fabric.api import env, local, quiet, settings, task
from fabric.state import output as fabric_output

from website.settings import DATABASES, PROJECT_ROOT

LOG = logging.getLogger(__name__)


# Fabric environment settings
env.hosts = ['localhost']
fabric_output.update({
    'running': False,
    'stdout': True,
})

Status = namedtuple('Status', ['RUNNING', 'STOPPED'])
STATUS = Status(0, 1)


# Setup and base commands
RABBITMQ_CMD = 'sudo rabbitmqctl {action}'.format


@task
def start_rabbitmq(detached=True):
    detached = parse_bool(detached)
    cmd = 'sudo rabbitmq-server' + (' -detached' if detached else '')
    local(cmd)
Exemplo n.º 23
0
 def teardown(self):
     env.update(self.previous_env)
     output.update(self.previous_output)
Exemplo n.º 24
0
Admin tasks

@author: dvanaken
'''

import os.path
from collections import namedtuple
from fabric.api import env, execute, local, quiet, settings, sudo, task
from fabric.state import output as fabric_output

from website.settings import PRELOAD_DIR, PROJECT_ROOT

# Fabric environment settings
env.hosts = ['localhost']
fabric_output.update({
    'running': False,
    'stdout': True,
})

Status = namedtuple('Status', ['RUNNING', 'STOPPED'])
STATUS = Status(0, 1)

if local('hostname', capture=True).strip() == 'ottertune':
    PREFIX = 'sudo -u celery '
    SUPERVISOR_CONFIG = '-c config/prod_supervisord.conf'
else:
    PREFIX = ''
    SUPERVISOR_CONFIG = '-c config/supervisord.conf'

# Setup and base commands
SUPERVISOR_CMD = (PREFIX + 'supervisorctl ' + SUPERVISOR_CONFIG +
                  ' {action} celeryd').format
Exemplo n.º 25
0
from fabric.api import env, sudo, settings, task
from fabric.state import output

from django_fabfile.utils import config

master = config.get('RDBMS', 'MASTER')
backup = config.get('RDBMS', 'BACKUP')
username = config.get('DEFAULT', 'USERNAME')
pcp_password = config.get('RDBMS', 'PCP_PASSWORD')

env.update({'disable_known_hosts': True, 'user': username, 'warn_only': True})
output.update({'running': False})


def return_(master, backup, node_id):
    with settings(host_string=master):
        sudo('su postgres -c "pcp_attach_node 60 127.0.0.1 9898 postgres'
             ' {pcp_password} {node_id}"'.format(node_id=node_id,
                                                 pcp_password=pcp_password))
    with settings(host_string=backup):
        sudo('su postgres -c "pcp_attach_node 60 127.0.0.1 9898 postgres'
             ' {pcp_password} {node_id}"'.format(node_id=node_id,
                                                 pcp_password=pcp_password))


def failover(new_primary_host, old_primary_host, failed_node_id,
             master_node_id):
    trigger = '/var/log/pgpool/trigger/trigger_file1'
    with settings(host_string=new_primary_host):
        sudo('su postgres -c "touch {trigger}"'.format(trigger=trigger))
        sudo('su postgres -c "/usr/local/etc/dnsmadeeasy-update.sh'