def install_nvm(options):
  print '---------- Installing NVM ---------'
  check_run_quick('sudo chmod 775 /usr/local')
  check_run_quick('sudo mkdir -m 777 -p /usr/local/node /usr/local/nvm')

  result = check_fetch(
    'https://raw.githubusercontent.com/creationix/nvm/{nvm_version}/install.sh'
    .format(nvm_version=NVM_VERSION))

  fd, temp = tempfile.mkstemp()
  os.write(fd, result.content)
  os.close(fd)

  try:
    run_and_monitor(
        'bash -c "NVM_DIR=/usr/local/nvm source {temp}"'.format(temp=temp))
  finally:
    os.remove(temp)

#  curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.26.0/install.sh | NVM_DIR=/usr/local/nvm bash


  check_run_and_monitor('sudo bash -c "cat > /etc/profile.d/nvm.sh"',
                        input=__NVM_SCRIPT)

  print '---------- Installing Node {version} ---------'.format(
    version=NODE_VERSION)

  run_and_monitor('bash -c "source /etc/profile.d/nvm.sh'
                  '; nvm install {version}'
                  '; nvm alias default {version}"'
                  .format(version=NODE_VERSION))
示例#2
0
  def run_test_profile_helper(self, test_name, spec):
    """Helper function for running an individual test.

    The caller wraps this to trap and handle exceptions.

    Args:
      test_name: The test being run.
      spec: The test specification profile.
            This argument will be pruned as values are consumed from it.
    """
    quota = spec.pop('quota', {})
    command = self.make_test_command_or_none(test_name, spec)
    if command is None:
      return
    capture = CommandOutputMediator(test_name)

    logging.info('Acquiring quota for test "%s"...', test_name)
    quota_tracker = self.__quota_tracker
    acquired_quota = quota_tracker.acquire_all_safe(test_name, quota)
    if acquired_quota:
      logging.info('"%s" acquired quota %s', test_name, acquired_quota)

    execute_time = None
    start_time = time.time()
    try:
      logging.info('Scheduling "%s"...', test_name)

      # This will block. Note that we already acquired quota, thus
      # we are blocking holding onto that quota. However since we are
      # blocked awaiting a thread, nobody else can execute either,
      # so it doesnt matter that we might be starving them of quota.
      self.__semaphore.acquire(True)
      execute_time = time.time()
      wait_time = int(execute_time - start_time + 0.5)
      if wait_time > 1:
        logging.info('"%s" had a semaphore contention for %d secs.',
                     test_name, wait_time)
      logging.info('Executing "%s"...', test_name)
      logging.debug('Running %s', ' '.join(command))
      result = run_and_monitor(' '.join(command),
                               echo=False,
                               observe_stdout=capture.capture_stdout,
                               observe_stderr=capture.capture_stderr)
    finally:
      logging.info('Finished executing "%s"...', test_name)
      self.__semaphore.release()
      if acquired_quota:
        quota_tracker.release_all_safe(test_name, acquired_quota)

    capture.flush()
    end_time = time.time()
    delta_time = int(end_time - execute_time + 0.5)

    with self.__lock:
      if result.returncode == 0:
        logging.info('%s PASSED after %d secs', test_name, delta_time)
        self.__passed.append((test_name, result.stdout))
      else:
        logging.info('FAILED %s after %d secs', test_name, delta_time)
        self.__failed.append((test_name, result.stderr))
示例#3
0
  def validate_options_helper(cls, options):
    """Adds custom configuration parameters to argument parser.

    This is a helper function for make_deployer().
    """
    if not options.deploy_aws_name:
      return
    if not options.deploy_aws_pem_path:
      raise ValueError('--deploy_aws_pem_path not specified.')
    if not os.path.exists(options.deploy_aws_pem_path):
      raise ValueError('File "{path}" does not exist.'
                       .format(path=options.deploy_aws_pem_path))
    if not options.deploy_aws_security_group:
      raise ValueError('--deploy_aws_security_group not specified.')

    if options.deploy_deploy:
      response = run_and_monitor(
          'aws ec2 describe-instances'
          ' --profile {region}'
          ' --filters "Name=tag:Name,Values={name}'
          ',Name=instance-state-name,Values=running"'
          .format(region=options.deploy_aws_region,
                  name=options.deploy_aws_name),
          echo=False)
      if response.returncode != 0:
        raise ValueError('Could not probe AWS: {0}'.format(response))
      exists = json.JSONDecoder().decode(response.stdout).get('Reservations')
      if exists:
        raise ValueError(
            'Running "{name}" already exists: {info}'
            .format(name=options.deploy_aws_name, info=exists[0]))
 def do_undeploy(self):
   """Implements the BaseBomValidateDeployer interface."""
   options = self.options
   if options.deploy_spinnaker_type == 'distributed':
     run_and_monitor(
         'ssh'
         ' -i {ssh_key}'
         ' -o StrictHostKeyChecking=no'
         ' -o UserKnownHostsFile=/dev/null'
         ' {ip} sudo hal deploy clean'
         .format(ip=self.instance_ip, ssh_key=self.ssh_key_path))
   check_run_and_monitor(
       'az vm delete -y'
       ' --name {name}'
       ' --resource-group {rg}'
       .format(name=options.deploy_azure_name,
               rg=options.deploy_azure_resource_group))
def _install_spinnaker_packages_helper(options, bucket):
  """Install the spinnaker packages from the specified path.

  Args:
    bucket [string]: The path to install from, or a storage service URI.
  """
  if not options.spinnaker:
      return

  print 'Installing Spinnaker components from {0}.'.format(bucket)

  install_config_dir = get_config_install_dir(options)
  spinnaker_dir = get_spinnaker_dir(options)

  with open(os.path.join(spinnaker_dir, 'release_config.cfg'), 'r') as f:
    content = f.read()
    package_list = (re.search('\nPACKAGE_LIST="(.*?)"', content)
                    .group(1).split())


  ###########################
  # Copy Subsystem Packages
  ###########################
  print 'Downloading spinnaker release packages...'
  package_dir = os.path.join(spinnaker_dir, 'install')
  safe_mkdir(package_dir)
  jobs = []
  for pkg in package_list:
    jobs.append(start_copy_file(options,
                                os.path.join(bucket, pkg), package_dir))

  check_wait_for_copy_complete(jobs)

  for pkg in package_list:
    print 'Installing {0}.'.format(pkg)

    # Let this fail because it may have dependencies
    # that we'll pick up below.
    run_and_monitor('sudo dpkg -i ' + os.path.join(package_dir, pkg))
    check_run_and_monitor('sudo apt-get install -f -y')
    # Convert package name to install directory name.
    inject_spring_config_location(options, pkg[0:pkg.find('_')])

  # Install package dependencies
  check_run_and_monitor('sudo apt-get install -f -y')
  def do_undeploy(self):
    """Implements the BaseBomValidateDeployer interface."""
    options = self.options
    if options.deploy_spinnaker_type == 'distributed':
      run_and_monitor(
          'ssh'
          ' -i {ssh_key}'
          ' -o StrictHostKeyChecking=no'
          ' -o UserKnownHostsFile=/dev/null'
          ' {ip} sudo hal deploy clean'
          .format(ip=self.instance_ip, ssh_key=self.ssh_key_path))

    check_run_and_monitor(
        'gcloud -q compute instances delete'
        ' --account {gcloud_account}'
        ' --project {project} --zone {zone} {instance}'
        .format(gcloud_account=options.deploy_hal_google_service_account,
                project=options.deploy_google_project,
                zone=options.deploy_google_zone,
                instance=options.deploy_google_instance))
示例#7
0
  def git_clone(self, repository, owner=None):
      """Clone the specified repository

      Args:
        repository [string]: The name of the github repository (without owner).
        owner [string]: An explicit repository owner.
               If not provided use the configured options.
      """
      name = repository.name
      repository_dir = get_repository_dir(name)
      upstream_user = repository.owner
      branch = self.pull_branch or 'master'
      origin_url = self.get_github_repository_url(repository, owner=owner)
      upstream_url = 'https://github.com/{upstream_user}/{name}.git'.format(
              upstream_user=upstream_user, name=name)

      # Don't echo because we're going to hide some failure.
      print 'Cloning {name} from {origin_url} -b {branch}.'.format(
          name=name, origin_url=origin_url, branch=branch)
      shell_result = run_and_monitor(
          'git clone {url} -b {branch}'.format(url=origin_url, branch=branch),
          echo=False)
      if not shell_result.returncode:
          if shell_result.stdout:
              print shell_result.stdout
      else:
          if repository in self.__extra_repositories:
             sys.stderr.write('WARNING: Missing optional repository {name}.\n'
                                  .format(name=name))
             sys.stderr.write('         Continue on without it.\n')
             return
          sys.stderr.write(shell_result.stderr or shell_result.stdout)
          sys.stderr.write(
              'FATAL: Cannot continue without required repository {name}.\n'
              '       Consider using github to fork one from {upstream}.\n'.
              format(name=name, upstream=upstream_url))
          raise SystemExit('Repository {url} not found.'.format(url=origin_url))

      if self.__options.add_upstream and origin_url != upstream_url:
          print '  Adding upstream repository {upstream}.'.format(
              upstream=upstream_url)
          check_run_quick('git -C "{dir}" remote add upstream {url}'
                              .format(dir=repository_dir, url=upstream_url),
                          echo=False)

      if self.__options.disable_upstream_push:
          which = 'upstream' if origin_url != upstream_url else 'origin'
          print '  Disabling git pushes to {which} {upstream}'.format(
              which=which, upstream=upstream_url)
          check_run_quick(
              'git -C "{dir}" remote set-url --push {which} disabled'
                  .format(dir=repository_dir, which=which),
              echo=False)
示例#8
0
  def git_clone(self, repository, owner=None):
      """Clone the specified repository

      Args:
        repository [string]: The name of the github repository (without owner).
        owner [string]: An explicit repository owner.
               If not provided use the configured options.
      """
      name = repository.name
      repository_dir = get_repository_dir(name)
      upstream_user = repository.owner
      branch = self.pull_branch or 'master'
      origin_url = self.get_github_repository_url(repository, owner=owner)
      upstream_url = 'https://github.com/{upstream_user}/{name}.git'.format(
              upstream_user=upstream_user, name=name)

      # Don't echo because we're going to hide some failure.
      print 'Cloning {name} from {origin_url} -b {branch}.'.format(
          name=name, origin_url=origin_url, branch=branch)
      shell_result = run_and_monitor(
          'git clone {url} -b {branch}'.format(url=origin_url, branch=branch),
          echo=False)
      if not shell_result.returncode:
          if shell_result.stdout:
              print shell_result.stdout
      else:
          if repository in self.__extra_repositories:
             sys.stderr.write('WARNING: Missing optional repository {name}.\n'
                                  .format(name=name))
             sys.stderr.write('         Continue on without it.\n')
             return
          sys.stderr.write(shell_result.stderr or shell_result.stdout)
          sys.stderr.write(
              'FATAL: Cannot continue without required repository {name}.\n'
              '       Consider using github to fork one from {upstream}.\n'.
              format(name=name, upstream=upstream_url))
          raise SystemExit('Repository {url} not found.'.format(url=origin_url))

      if self.__options.add_upstream and origin_url != upstream_url:
          print '  Adding upstream repository {upstream}.'.format(
              upstream=upstream_url)
          check_run_quick('git -C "{dir}" remote add upstream {url}'
                              .format(dir=repository_dir, url=upstream_url),
                          echo=False)

      if self.__options.disable_upstream_push:
          which = 'upstream' if origin_url != upstream_url else 'origin'
          print '  Disabling git pushes to {which} {upstream}'.format(
              which=which, upstream=upstream_url)
          check_run_quick(
              'git -C "{dir}" remote set-url --push {which} disabled'
                  .format(dir=repository_dir, which=which),
              echo=False)
示例#9
0
    def do_undeploy(self):
        """Implements the BaseBomValidateDeployer interface."""
        options = self.options
        if options.deploy_spinnaker_type == 'distributed':
            run_and_monitor('ssh'
                            ' -i {ssh_key}'
                            ' -o StrictHostKeyChecking=no'
                            ' -o UserKnownHostsFile=/dev/null'
                            ' {user}@{ip} sudo hal deploy clean'.format(
                                user=self.hal_user,
                                ip=self.instance_ip,
                                ssh_key=self.ssh_key_path))

        check_run_and_monitor(
            'gcloud -q compute instances delete'
            ' --account {gcloud_account}'
            ' --project {project} --zone {zone} {instance}'.format(
                gcloud_account=options.deploy_hal_google_service_account,
                project=options.deploy_google_project,
                zone=options.deploy_google_zone,
                instance=options.deploy_google_instance))
示例#10
0
 def do_determine_instance_ip(self):
   """Implements GenericVmValidateBomDeployer interface."""
   options = self.options
   response = run_and_monitor(
       'az vm list-ip-addresses --name {name} --resource-group {group}'.format(
           name=options.deploy_azure_name,
           group= options.deploy_azure_resource_group),
       echo=False)
   if response.returncode != 0:
     raise ValueError('Could not determine public IP: {0}'.format(response))
   found = json.JSONDecoder().decode(response.stdout)[0].get('virtualMachine')
   if not found:
     raise RuntimeError(
         '"{0}" is not running'.format(options.deploy_azure_name))
   return found['network']['publicIpAddresses'][0]['ipAddress']
示例#11
0
 def do_determine_instance_ip(self):
   """Implements GenericVmValidateBomDeployer interface."""
   options = self.options
   response = run_and_monitor(
       'az vm list-ip-addresses --name {name} --resource-group {group}'.format(
           name=options.deploy_azure_name,
           group= options.deploy_azure_resource_group),
       echo=False)
   if response.returncode != 0:
     raise ValueError('Could not determine public IP: {0}'.format(response))
   found = json.JSONDecoder().decode(response.stdout)[0].get('virtualMachine')
   if not found:
     raise RuntimeError(
         '"{0}" is not running'.format(options.deploy_azure_name))
   return found['network']['publicIpAddresses'][0]['ipAddress']
    def do_determine_instance_ip(self):
        """Implements GenericVmValidateBomDeployer interface."""
        options = self.options
        response = run_and_monitor(
            'aws ec2 describe-instances'
            ' --output json'
            ' --filters "Name=tag:Name,Values={name}'
            ',Name=instance-state-name,Values=running"'.format(
                name=options.deploy_aws_name),
            echo=False)
        if response.returncode != 0:
            raise ValueError(
                'Could not determine public IP: {0}'.format(response))
        found = json.JSONDecoder().decode(response.stdout).get('Reservations')
        if not found:
            raise RuntimeError('"{0}" is not running'.format(
                options.deploy_aws_name))

        return found[0]['Instances'][0]['PublicIpAddress']
示例#13
0
    def do_undeploy(self):
        """Implements the BaseBomValidateDeployer interface."""
        options = self.options
        logging.info('Terminating "%s"', options.deploy_aws_name)

        if self.__instance_id:
            all_ids = [self.__instance_id]
        else:
            lookup_response = run_and_monitor(
                'aws ec2 describe-instances'
                ' --profile {region}'
                ' --filters "Name=tag:Name,Values={name}'
                ',Name=instance-state-name,Values=running"'.format(
                    region=options.deploy_aws_region,
                    name=options.deploy_aws_name),
                echo=False)
            if lookup_response.returncode != 0:
                raise ValueError('Could not lookup instance id: {0}',
                                 lookup_response)
            exists = json.JSONDecoder().decode(
                lookup_response.stdout).get('Reservations')
            if not exists:
                logging.warning('"%s" is not running', options.deploy_aws_name)
                return
            all_ids = []
            for reservation in exists:
                all_ids.extend([
                    instance['InstanceId']
                    for instance in reservation['Instances']
                ])

        for instance_id in all_ids:
            logging.info('Terminating "%s" instanceId=%s',
                         options.deploy_aws_name, instance_id)
            response = run_quick('aws ec2 terminate-instances'
                                 '  --profile {region}'
                                 '  --instance-ids {id}'.format(
                                     region=options.deploy_aws_region,
                                     id=instance_id))
            if response.returncode != 0:
                logging.warning('Failed to delete "%s" instanceId=%s',
                                options.deploy_aws_name, instance_id)
示例#14
0
  def do_determine_instance_ip(self):
    """Implements GenericVmValidateBomDeployer interface."""
    options = self.options
    response = run_and_monitor(
        'aws ec2 describe-instances'
        ' --profile {region}'
        ' --output json'
        ' --filters "Name=tag:Name,Values={name}'
        ',Name=instance-state-name,Values=running"'
        .format(region=options.deploy_aws_region,
                name=options.deploy_aws_name),
        echo=False)
    if response.returncode != 0:
      raise ValueError('Could not determine public IP: {0}'.format(response))
    found = json.JSONDecoder().decode(response.stdout).get('Reservations')
    if not found:
      raise RuntimeError(
          '"{0}" is not running'.format(options.deploy_aws_name))

    return found[0]['Instances'][0]['PublicIpAddress']
示例#15
0
  def do_undeploy(self):
    """Implements the BaseBomValidateDeployer interface."""
    options = self.options
    logging.info('Terminating "%s"', options.deploy_aws_name)

    if self.__instance_id:
      all_ids = [self.__instance_id]
    else:
      lookup_response = run_and_monitor(
          'aws ec2 describe-instances'
          ' --profile {region}'
          ' --filters "Name=tag:Name,Values={name}'
          ',Name=instance-state-name,Values=running"'
          .format(region=options.deploy_aws_region,
                  name=options.deploy_aws_name),
          echo=False)
      if lookup_response.returncode != 0:
        raise ValueError('Could not lookup instance id: {0}', lookup_response)
      exists = json.JSONDecoder().decode(
          lookup_response.stdout).get('Reservations')
      if not exists:
        logging.warning('"%s" is not running', options.deploy_aws_name)
        return
      all_ids = []
      for reservation in exists:
        all_ids.extend([instance['InstanceId']
                        for instance in reservation['Instances']])

    for instance_id in all_ids:
      logging.info('Terminating "%s" instanceId=%s',
                   options.deploy_aws_name, instance_id)
      response = run_quick(
          'aws ec2 terminate-instances'
          '  --profile {region}'
          '  --instance-ids {id}'
          .format(region=options.deploy_aws_region, id=instance_id))
      if response.returncode != 0:
        logging.warning('Failed to delete "%s" instanceId=%s',
                        options.deploy_aws_name, instance_id)