Beispiel #1
0
    def update_sources(self, source_dir, projects, revision, svn='svn'):
        self.report_build_step('update-sources')
        self.halt_on_failure()

        # TODO: This needs to be updated to use the monorepo.
        # Where to check the project out relative to an LLVM checkout.
        checkout_locations = {
            'llvm': '',
            'clang': 'tools/clang',
            'lld': 'tools/lld',
            'compiler-rt': 'projects/compiler-rt',
            'debuginfo-tests': 'projects/debuginfo-tests',
            }
        # If the project is named differently in svn, put it here.
        svn_locations = { 'clang': 'cfe' }
        svn_uri_pattern = 'https://llvm.org/svn/llvm-project/%s/trunk'

        for project in projects:
            # TODO: Fail the build and report an error if we don't know the
            # checkout location.
            path = checkout_locations[project]
            if not path:
                path = source_dir
            elif not os.path.isabs(path):
                path = pjoin(source_dir, path)
            uri = svn_uri_pattern % (svn_locations.get(project, project),)
            util.report("Updating %s to %s at %s from %s" %
                        (project, revision, util.shquote(path), uri))
            if os.path.exists(pjoin(path, '.svn')):
                cmd = [svn, 'up', '-r', revision]
            else:
                util.mkdirp(path)
                cmd = [svn, 'co', '-r', revision, uri, '.']
            util.report_run_cmd(cmd, cwd=path)
Beispiel #2
0
def run():
  # Setup the gantry arguments
  parser = argparse.ArgumentParser(description='gantry continuous deployment system')
  parser.add_argument('config_file', help = 'The configuration file')
  parser.add_argument('action', help = 'The action to perform', choices = ACTIONS.keys())
  parser.add_argument('component_name', help = 'The name of the component to manage')
  parser.add_argument('-m', dest='monitor', action='store_true', help = 'If specified and the action is "start" or "update", gantry will remain running to monitor components, auto restarting them as necessary')
  
  args = parser.parse_args()
  component_name = args.component_name
  action = args.action
  should_monitor = args.monitor
  config_file = args.config_file
  
  # Load the config.
  config = loadConfig(config_file)
  if not config:
    return
  
  # Create the manager.
  manager = RuntimeManager(config)
  
  # Find the component
  component = manager.getComponent(component_name)
  if not component:
    raise Exception('Unknown component: ' + component_name)

  # Run the action with the component and config.
  result = ACTIONS[action](component)
  if result and should_monitor:
    report('Starting monitoring of component: ' + component_name)
    monitor(component)
def update_k2_version(svn_revision):
	"""
	将文件版本信息和产品版本信息写入k2_version.h,后续构建会将此信息编入pe文件版本信息内
	:param svn_revision:
	:return:
	"""
	def write_k2_version(k2_version_file, version_info):
		with open(k2_version_file, "w") as f:
			f.write(version_info)

	version_info = "#ifndef K2_VERSION_H_\n"
	version_info += "#define K2_VERSION_H_\n"

	file_version=get_file_version(svn_revision)
	str_file_version = file_version.replace(",", ".")
	str_product_version = util.get_new_version()
	util.report("New Version Is:%s" % str_product_version)
	product_version = str_product_version.replace(".", ",")

	version_info += '#define FILE_VERSION  {file_version}\n'.format(file_version=file_version)
	version_info += '#define STR_FILE_VERSION  "{str_file_version}"\n'.format(str_file_version=str_file_version)
	version_info += '#define PRODUCT_VERSION  {product_version}\n'.format(product_version=product_version)
	version_info += '#define STR_PRODUCT_VERSION  "{str_product_version}"\n'.format(str_product_version=str_product_version)

	version_info += '#define COMPANY_NAME  "{company_name}"\n'.format(company_name="S2 Games")
	version_info += '#define LEGAL_COPY_RIGTE  "{legal_copy_write}"\n'.format(
		legal_copy_write="Copyright (C) 2013 S2 Games")
	version_info += '#define PRODUCT_NAME  "{product_name}"\n'.format(product_name="Strife")
	version_info += "#endif  //! #ifndef K2_VERSION_H_\n"

	k2_version_file = os.path.join(config.SRC_DIR, "k2", "k2_version.h")
	write_k2_version(k2_version_file, version_info)
def giving_up(removed_releases,fileid):
	if not removed_releases:
		util.report("No possible releases left")
		return
	util.report("Possible releases:")
	for releaseid in removed_releases:
		# If this release only has one track that we found,
		# and we have other possible releases, ignore this one.
		#
		# TODO: Actually, we should just display the top 2
		#  releases, by number of tracks found on it.
		if len(removed_releases[releaseid])<2 \
			and len(removed_releases)>1:
				continue
		release = lookups.get_release_by_releaseid(releaseid)
		util.report("%s - %s (%s.html)" % (
			release.artist.name,
			release.title,
			releaseid))
		for trackind in range(len(release.tracks)):
			if (trackind+1) in removed_releases[releaseid]:
				continue
			util.report(" #%02d %s.html %s" % (
				trackind+1,
				release.tracks[trackind].id,
				release.tracks[trackind].title))
		util.report(" %s" % (
			util.output_list(removed_releases[releaseid].keys())
			))
Beispiel #5
0
  def update(self):
    """ Updates a running instance of the component. Returns True on success and False
        otherwise.
    """
    self.logger.debug('Updating component %s', self.getName())
    client = getDockerClient()

    # Get the list of currently running container(s).
    existing_containers = self.getAllContainers(client)
    existing_primary = self.getPrimaryContainer()

    # Start the new instance.
    container = self.start()
    if not container:
      return False

    # Mark all the existing containers as draining.
    for existing in existing_containers:
      setContainerStatus(existing, 'draining')

    # Update the port proxy to redirect the external ports to the new
    # container.
    report('Redirecting traffic to new container', component=self)
    self.manager.adjustForUpdatingComponent(self, container)

    # Signal the existing primary container to terminate
    if existing_primary is not None:
      self.manager.terminateContainer(existing_primary, self)

    return True
Beispiel #6
0
    def set_environment(self, env=None, vs_tools=None, arch=None):
        self.report_build_step('set-environment')
        try:
            new_env = {
                'TERM': 'dumb',
            }
            if os.name == 'nt':
                if vs_tools is None:
                    vs_tools = os.path.expandvars('%VS140COMNTOOLS%')
                if arch is None:
                    arch = os.environ['PROCESSOR_ARCHITECTURE'].lower()
                else:
                    arch = arch.lower()
                vcvars_path = pjoin(
                    vs_tools, '..', '..', 'VC', 'vcvarsall.bat')
                cmd = util.shquote_cmd([vcvars_path, arch]) + ' && set'
                output = subprocess.check_output(cmd, shell=True)
                for line in output.splitlines():
                    var, val = line.split('=', 1)
                    new_env[var] = val

            if env is not None:
                new_env.epdate(env)

            for (var, val) in new_env.items():
                os.environ[var] = val

            for var in sorted(os.environ.keys()):
                util.report('%s=%s' % (var, os.environ[var]))
        except:
            self.report_step_exception()
            raise
Beispiel #7
0
def  exploit(request):
    if not request.url.params:
        return
    for param  in request.url.params:  # only  get
        for poc in SQL_POCS:
            injectable = False
            req_tmp = request.copy()
            req_tmp.__class__ = GetRequest
            req_tmp.setParam(param, req_tmp.getParam(param) + poc)
            for (dbms, regex) in ((dbms, regex) for dbms in SQLI_ERROR_SINGS for regex in SQLI_ERROR_SINGS[dbms]):
                if    re.search(regex, req_tmp.fetch(), re.I):
                    # print "%s" % req_tmp
                    util.report({"type":"sqli", "content":util.json_encode({"sqli_type":"%s Error Based" % dbms, "param":param, "detail":"%s" % req_tmp})})
                    return  
        for prefix, suffix in itertools.product(SQL_PREFIXES, SQL_SUFFIXES):
            poc1 = "%s AND 1=1 %s" % (prefix, suffix)
            poc2 = "%s AND 1=2 %s" % (prefix, suffix)
            req_tmp1 = request.copy()
            req_tmp1.__class__ = GetRequest
            req_tmp1.setParam(param, req_tmp1.getParam(param) + poc1)
            req_tmp2 = request.copy()
            req_tmp2.__class__ = GetRequest
            req_tmp2.setParam(param, req_tmp2.getParam(param) + poc2)
            if (len(req_tmp1.fetch()) != len(req_tmp2.fetch())):
            	util.report({"type":"sqli", "content":util.json_encode({"sqli_type":"UNION query", "param":param, "detail":"%s" % req_tmp})})
                # print "UNION SQLI:param %s %s"  % (param,req_tmp2)
                return
Beispiel #8
0
def run_many(tests):
    global NWORKERS, pool
    start = time.time()
    total = 0
    failed = {}

    tests = list(tests)
    NWORKERS = min(len(tests), NWORKERS)
    pool = Pool(NWORKERS)
    util.BUFFER_OUTPUT = NWORKERS > 1

    def run_one(name, cmd, **kwargs):
        result = util.run(cmd, **kwargs)
        if result:
            # the tests containing AssertionError might have failed because
            # we spawned more workers than CPUs
            # we therefore will retry them sequentially
            failed[name] = [cmd, kwargs, 'AssertionError' in (result.output or '')]

    if NWORKERS > 1:
        gevent.spawn(info)

    try:
        try:
            for name, cmd, options in tests:
                total += 1
                spawn(run_one, name, cmd, **options).name = ' '.join(cmd)
            gevent.run()
        except KeyboardInterrupt:
            try:
                if pool:
                    util.log('Waiting for currently running to finish...')
                    pool.join()
            except KeyboardInterrupt:
                util.report(total, failed, exit=False, took=time.time() - start)
                util.log('(partial results)\n')
                raise
    except:
        pool.kill()  # this needed to kill the processes
        raise

    toretry = [key for (key, (cmd, kwargs, can_retry)) in failed.items() if can_retry]
    failed_then_succeeded = []

    if NWORKERS > 1 and toretry:
        util.log('\nWill re-try %s failed tests without concurrency:\n- %s\n', len(toretry), '\n- '.join(toretry))
        for name, (cmd, kwargs, _ignore) in failed.items():
            if not util.run(cmd, buffer_output=False, **kwargs):
                failed.pop(name)
                failed_then_succeeded.append(name)

    util.report(total, failed, took=time.time() - start)

    if failed_then_succeeded:
        util.log('\n%s tests failed during concurrent run but succeeded when ran sequentially:', len(failed_then_succeeded))
        util.log('- ' + '\n- '.join(failed_then_succeeded))
    assert not pool, pool

    os.system('rm -f */@test*_tmp')
def sign_setup():
	util.report("Begin Signature Setup.....")
	setup_name = "Strife-{version}-setup.exe".format(version=util.get_new_version())
	unsign_names = [setup_name]
	sign_remote_dir = util.get_new_version() + '_setup_final'
	sign_src_dir = config.PACKAGE_OUT
	waitsign.Sign(sign_src_dir, sign_remote_dir, unsign_names)
	util.report("End Signature Setup.....")
Beispiel #10
0
def update_on_image(component):
  running_image = component.getPrimaryContainerImageId()
  latest_image = component.getImageId()

  if running_image != latest_image:
    report('Newer Image found for component ' + component.getName())
    return ACTIONS['update'](component)
  return True
def make_installer(version):
    set_nis_info(config.NSI_SCRIPT, version)
    cmd = '{nsi_tool} /X"SetCompressor  /FINAL lzma" {nsi_script}'.format(
        nsi_tool=config.NSI_TOOL, nsi_script=config.NSI_SCRIPT
    )

    p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    while p.poll() == None:
        util.report(p.stdout.readline())
Beispiel #12
0
 def setRetarded(self, imaginaryOffset):
     assert self.partitionFunction != None and self.lehmannNominators != None and self.lehmannDenominators != None, 'Partition Function and Lehmann terms have to be set in advance.'
     report('Calculating one-particle Green\'s function(retarded)...', self.verbose)
     t0 = time()
     if self.species == 'fermionic':
         self.retardedData.update(lehmannSumDynamic(self.lehmannNominators, self.lehmannDenominators, self.partitionFunction, self.mesh, self.zeroFrequencyTerms, imaginaryOffset, [+1, +1]))
     elif self.species == 'bosonic':
         self.retardedData.update(lehmannSumDynamic(self.lehmannNominators, self.lehmannDenominators, self.partitionFunction, self.mesh, self.zeroFrequencyTerms, imaginaryOffset, [+1, -1]))
     report('took '+str(time()-t0)[:4]+' seconds', self.verbose)
Beispiel #13
0
 def setMatsubara(self):
     assert self.partitionFunction != None and self.lehmannNominators != None and self.lehmannDenominators != None, 'Partition Function and Lehmann terms have to be set in advance.'
     report('Calculating one-particle Green\'s function(Matsubara)...', self.verbose)
     t0 = time()
     if self.species == 'fermionic':
         self.matsubaraData.update(lehmannSumDynamic(self.lehmannNominators, self.lehmannDenominators, self.partitionFunction, self.matsubaraMesh, self.zeroFrequencyTerms, 0, [+1, +1]))
     if self.species == 'bosonic':
         self.matsubaraData.update(lehmannSumDynamic(self.lehmannNominators, self.lehmannDenominators, self.partitionFunction, self.matsubaraMesh, self.zeroFrequencyTerms, 0, [+1, -1]))
     report('took '+str(time()-t0)[:4]+' seconds', self.verbose)
Beispiel #14
0
def run_many(tests, expected=None, failfast=False):
    global NWORKERS, pool
    start = time()
    total = 0
    failed = {}

    NWORKERS = min(len(tests), NWORKERS)
    pool = Pool(NWORKERS)
    util.BUFFER_OUTPUT = NWORKERS > 1

    def run_one(cmd, **kwargs):
        result = util.run(cmd, **kwargs)
        if result:
            if failfast:
                sys.exit(1)
            # the tests containing AssertionError might have failed because
            # we spawned more workers than CPUs
            # we therefore will retry them sequentially
            failed[result.name] = [cmd, kwargs, 'AssertionError' in (result.output or '')]

    try:
        try:
            for cmd, options in tests:
                total += 1
                spawn(run_one, cmd, **(options or {}))
            gevent.wait()
        except KeyboardInterrupt:
            try:
                if pool:
                    util.log('Waiting for currently running to finish...')
                    pool.join()
            except KeyboardInterrupt:
                util.report(total, failed, exit=False, took=time() - start, expected=expected)
                util.log('(partial results)\n')
                raise
    except:
        traceback.print_exc()
        pool.kill()  # this needed to kill the processes
        raise

    toretry = [key for (key, (cmd, kwargs, can_retry)) in failed.items() if can_retry]
    failed_then_succeeded = []

    if NWORKERS > 1 and toretry:
        util.log('\nWill retry %s failed tests without concurrency:\n- %s\n', len(toretry), '\n- '.join(toretry))
        for name, (cmd, kwargs, _ignore) in failed.items():
            if not util.run(cmd, buffer_output=False, **kwargs):
                failed.pop(name)
                failed_then_succeeded.append(name)

    if failed_then_succeeded:
        util.log('\n%s tests failed during concurrent run but succeeded when ran sequentially:', len(failed_then_succeeded))
        util.log('- ' + '\n- '.join(failed_then_succeeded))

    util.log('gevent version %s from %s', gevent.__version__, gevent.__file__)
    util.report(total, failed, took=time() - start, expected=expected)
    assert not pool, pool
Beispiel #15
0
  def killComponents(self, componentNames):
    """ Tells all the given components on all systems to die. """
    self.initialize(componentNames)

    report('Marking components as killed', project = self.project_name)
    for component in self.components:
      report('Marking component as killed', project = self.project_name, component = component,
        level = ReportLevels.EXTRA)
      state = ComponentState(self.project_name, component, self.etcd_client)
      state.setStatus(KILLED_STATUS)
Beispiel #16
0
 def fillingFunction(muTrial):
     self.hamiltonian.matrix = self.hamiltonian.matrix - muTrial * nMatrix
     self.calcEigensystem()
     self.calcPartitionFunction()
     self.calcOccupation()
     fillingTrial = self.getTotalOccupation()
     self.hamiltonian.matrix = self.hamiltonian.matrix + muTrial * nMatrix
     report('Filling(mu='+str(muTrial)+') = '+str(fillingTrial), self.verbose)
     self.filling = fillingTrial
     return fillingTrial - filling
def commit_resource():
	old_dir = os.getcwd()
	resource_dir = os.path.abspath(os.path.join(config.ROOT_DIR, "StrifeServer"))
	os.chdir(resource_dir)
	print os.getcwd()
	cmd = '''svn commit -m "server resource"'''
	p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
	while p.poll() == None:
		util.report(p.stdout.readline())
	os.chdir(old_dir)
Beispiel #18
0
  def handleKilled(self, was_initial_check):
    """ Handles when the component has been marked to be killed. """
    self.monitor_event.clear()

    if was_initial_check:
      report('Component %s is marked as killed' % self.component.getName(),
             project=self.project_name, component=self.component)

    self.is_running = False
    self.component.stop(kill=True)
    return CHECK_SLEEP_TIME
Beispiel #19
0
 def setMu(self, mu):
     c = AnnihilationOperator(self.singleParticleBasis)
     nMatrix = nsum([c[orb].H.dot(c[orb]) for orb in self.orderedSingleParticleStates], axis = 0)
     self.hamiltonian.matrix = self.hamiltonian.matrix + self.mu * nMatrix
     self.mu = mu
     self.hamiltonian.matrix = self.hamiltonian.matrix - mu * nMatrix
     self.energyEigenvalues = None
     self.energyEigenstates = None
     self.partitionFunction = None
     self.occupation = dict()
     report('Chemical potential set to '+str(mu), self.verbose)
Beispiel #20
0
  def markUpdated(self, componentNames):
    """ Tells all the given components to update themselves. """
    self.initialize(componentNames)

    report('Updating the image IDs on components', project = self.project_name)
    for component in self.components:
      image_id = component.getImageId()
      state = ComponentState(self.project_name, component, self.etcd_client)

      report('Component ' + component.getName() + ' -> ' + image_id[0:12], project = self.project_name,
        component = component)
      state.setReadyStatus(image_id)      
Beispiel #21
0
def run():
  #setup logging
  logging.basicConfig(level=logging.DEBUG)
  
  # Setup the gantry arguments
  parser = argparse.ArgumentParser(description='gantry continuous deployment system')
  parser.add_argument('config_file', help='The configuration file')
  parser.add_argument('action', help='The action to perform', choices=ACTIONS.keys())
  parser.add_argument('component_name', help='The name of the component to manage')
  parser.add_argument('-m', dest='monitor', action='store_true', help='If specified and the action is "start" or "update", gantry will remain running to monitor components, auto restarting them as necessary')
  parser.add_argument('--setconfig', dest='config_overrides', action='append', help='Configuration overrides for the component')

  args = parser.parse_args()
  component_name = args.component_name
  action = args.action
  should_monitor = args.monitor
  config_file = args.config_file
  config_overrides = args.config_overrides

  # Load the config.
  config = loadConfig(config_file)
  if not config:
    return

  # Create the manager.
  manager = RuntimeManager(config)

  # Find the component
  component = manager.getComponent(component_name)
  if not component:
    raise Exception('Unknown component: ' + component_name)
    
  # Apply the config overrides (if any).
  if config_overrides:
    component.applyConfigOverrides(config_overrides)

  # Run the action with the component and config.
  result = ACTIONS[action](component)
  if result and should_monitor:
    try:
      report('Starting monitoring of component: ' + component_name)
      monitor(component)
    except KeyboardInterrupt:
      report('Terminating monitoring of component: ' + component_name)

  def cleanup_monitor(signum, frame):
    manager.join()

  # Set the signal handler and a 5-second alarm
  signal.signal(signal.SIGINT, cleanup_monitor)

  # We may have to call cleanup manually if we weren't asked to monitor
  cleanup_monitor(None, None)
Beispiel #22
0
 def calcOccupation(self, singleParticleState = None):
     c = AnnihilationOperator(self.singleParticleBasis)
     if singleParticleState == None:
         states = self.orderedSingleParticleStates
     else:
         states = [singleParticleState]
     report('Calculating Occupation...', self.verbose)
     t0 = time()
     for state in states:
         n_state = c[state].H.dot(c[state])
         self.occupation.addOperator(state, n_state)
     self.setLehmannSumStatic(self.occupation)
     report('took '+str(time()-t0)[:4]+' seconds', self.verbose)
Beispiel #23
0
 def run(self, container, report):
   container_port = self.config.getExtraField('port')
   local_port = self.getLocalPort(container, container_port)
   
   report('Checking TCP port in container ' + container['Id'][0:12] + ': ' + str(local_port),
     level = ReportLevels.EXTRA)
   try:
     s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     s.connect('127.0.0.1', local_port)
     s.close()
   except:
     return False
     
   return True
Beispiel #24
0
 def calcG1(self, singleParticleStatePairs = None):
     c = AnnihilationOperator(self.singleParticleBasis)
     if singleParticleStatePairs == None:
         statePairs = [(state1, state2) for state1, state2 in product(self.orderedSingleParticleStates, self.orderedSingleParticleStates)]
     else:
         statePairs = singleParticleStatePairs
     report('Calculating one-particle Green\'s function transition elements and energies...', self.verbose)
     t0 = time()
     for statePair in statePairs:
         c_state = c[statePair[0]]
         c_state_dag = c[statePair[1]].H
         self.g1.addOperatorPair(statePair, (c_state, c_state_dag))
     self.setLehmannTermsDynamic(self.g1)
     report('took '+str(time()-t0)[:4]+' seconds', self.verbose)
Beispiel #25
0
    def handleStopped(self, was_initial_check):
        """ Handles when the component has been marked to be stopped. """
        self.monitor_event.clear()

        if was_initial_check:
            report(
                "Component " + self.component.getName() + " is marked as stopped",
                project=self.project_name,
                component=self.component,
            )

        self.is_running = False
        self.component.stop(kill=False)
        return CHECK_SLEEP_TIME
Beispiel #26
0
    def isHealthy(self):
        """ Runs the health checks on this component's container, ensuring that it is healthy.
        Returns True if healthy and False otherwise.
    """
        self.logger.debug("Checking if component %s is healthy...", self.getName())
        container = self.getPrimaryContainer()
        if not container:
            self.logger.debug("No container running for component %s", self.getName())
            return False

        checks = []
        for check in self.config.health_checks:
            checks.append((check, buildHealthCheck(check)))

        for (config, check) in checks:
            report("Config: ")
            report(config)
            report("Running health check: " + config.getTitle(), component=self)
            result = check.run(container, report)
            if not result:
                report("Health check failed", component=self)
                return False

        self.logger.debug("Component %s is healthy", self.getName())
        return True
Beispiel #27
0
  def updateProxy(self):
    """ Updates the proxy used for port mapping to conform to the current running container
        list.
    """
    client = getDockerClient()
    
    # Clear all routes in the proxy.
    # TODO: When this is in daemon mode, don't need do this. We could selectively
    # edit it instead.
    self.proxy.clear_routes()
    
    # Add routes for the non-draining containers and collect the draining containers to
    # watch.
    report('Finding running containers...', level = ReportLevels.EXTRA)
    draining_containers = []
    starting_containers = []
    
    for component in self.components.values():
      for container in component.getAllContainers(client):
        if getContainerStatus(container) != 'draining':
          starting_containers.append(container)
          for mapping in component.config.ports:
            local_port = containerutil.getLocalPort(client, container, mapping.container)
            route = Route(mapping.kind == 'http', mapping.external, 'localhost', local_port)
            self.proxy.add_route(route)
        else:
          draining_containers.append(container)

    # Commit the changes to the proxy.
    if draining_containers or starting_containers:
      report('Updating proxy...', level = ReportLevels.EXTRA)
      self.proxy.commit()
    else:
      report('Shutting down proxy...', level = ReportLevels.EXTRA)
      self.proxy.shutdown()
    
    # Mark the starting containers as running.
    for container in starting_containers:
      setContainerStatus(container, 'running')

    if draining_containers:
      report('Starting monitoring...', level = ReportLevels.EXTRA)
    
    # If there are any draining containers, add them to the watcher thread.
    with self.watcher_lock:
      self.containers_watched.extend(draining_containers)
    
    # Call the event to wakeup the watcher thread.
    if draining_containers:
      self.watcher_event.set()    
    
    # If in local mode, then wait until all the containers have drained. This
    # prevents the python debugger from shutting down, since the other threads
    # are all daemon threads.
    if not self.daemon_mode and draining_containers:
      while True:
        self.watcher_thread.join(10)
        if not self.watcher_thread.isAlive():
          break
def make_uninstaller(version):
	set_nis_info(config.NSI_SCRIPT_UNINSTALL, version)
	cmd = '{nsi_tool} /X"SetCompressor  /FINAL lzma" {nsi_script}'.format(nsi_tool=config.NSI_TOOL, nsi_script=config.NSI_SCRIPT_UNINSTALL)

	p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
	while p.poll() == None:
		util.report( p.stdout.readline())

	make_uninstaller = os.path.join(config.PACKAGE_OUT, "make_uninstaller.exe")
	p = subprocess.Popen(make_uninstaller, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
	p.wait()

	src = os.path.join(config.PACKAGE_OUT, config.UNINSTALL_NAME)
	dst_dir = os.path.join(config.PACKAGE_OUT, "bin")
	shutil.copy(src, dst_dir)
def generate_from_metadata(file):
	"""Return track id's by looking up the name on music brainz

	Args:
		musicfile: The file containing the track in question.
	
	Yields:
		A set of track_id, by querying based on metadata tags
	"""
	trackid=file.getMDTrackID()
	if trackid:
		try:
			yield lookups.get_track_by_id(trackid)
		except Exception, e:
			util.report("WARNING: Unexpected exception when looking up mbtrackid %s: %s" % (trackid, e))
Beispiel #30
0
 def __init__(self, t, u, siteSpaceTransformation = None, transformationLabels = None, verbose = False):
     self.verbose = verbose
     self.t = array(t)
     self.u = u
     self.spins = ['up', 'dn']
     self.sites = range(len(t))
     self.siteSpaceTransformation = siteSpaceTransformation
     report('Setting up the Hubbard Hamiltonian...', self.verbose)
     t0 = time()
     hubbardMatrix = setHubbardMatrix(self.t, self.u, self.spins, self.sites, siteSpaceTransformation)
     report('took '+str(time()-t0)[:4]+' seconds', self.verbose)
     if transformationLabels == None:
         orbitals = self.sites
     else:
         orbitals =  transformationLabels
     Hamiltonian.__init__(self, [self.spins, orbitals], hubbardMatrix, self.verbose)
Beispiel #31
0
def check_src(language):
    '''
    Check external source files referenced in title attributes of code blocks.
    '''
    prefix_len = len(SOURCE_DIR + '/')

    def _unprefix(filename):
        return filename[prefix_len:]

    content = get_all_docs(language, remove_code_blocks=False)
    referenced = match_body(content, r'{:\s+title="([^"]+)\s*"[^}]*}') | \
        match_body(content, r'<!--\s+used="([^"]+)"\s+-->')
    actual = {
        _unprefix(filename)
        for filename in glob.iglob('{}/**/*.*'.format(SOURCE_DIR),
                                   recursive=True)
        if not _ignore_file(filename)
    }
    report('Source Files', 'unused', actual - referenced)
    report('Source Files', 'missing', referenced - actual)
    def set_environment(self, env=None, vs_tools=None, arch=None):
        self.report_build_step('set-environment')
        try:
            new_env = {
                'TERM': 'dumb',
            }
            if os.name == 'nt':
                new_env.update(get_vcvars(vs_tools, arch))

            if env is not None:
                new_env.epdate(env)

            for (var, val) in new_env.items():
                os.environ[var] = val

            for var in sorted(os.environ.keys()):
                util.report('%s=%s' % (var, os.environ[var]))
        except Exception as e:
            self.report_step_exception(e)
            raise
Beispiel #33
0
  def stop(self, kill=False):
    """ Stops all containers for this component. """
    if not self.isRunning():
      return

    self.logger.debug('Stopping component %s', self.getName())
    client = getDockerClient()

    # Mark all the containers as draining.
    report('Draining all containers...', component=self)
    self.elbManager.deregisterAllContainers()
    for container in self.getAllContainers(client):
      setContainerStatus(container, 'draining')
      self.manager.terminateContainer(container, self)

    # Kill any associated containers if asked.
    if kill:
      for container in self.getAllContainers(client):
        report('Killing container ' + container['Id'][:12], component=self)
        client.kill(container)
        removeContainerMetadata(container)
Beispiel #34
0
def  exploit(request):
    if not request.url.params and not request.fields:
        return
    if isinstance(request, GetRequest):
        for param, poc in itertools.product(request.url.params, XSS_POCS):
                req_tmp = request.copy()
                req_tmp.__class__ = GetRequest
                req_tmp.setParam(param, req_tmp.getParam(param)+poc)
                if poc in req_tmp.fetch():
                    #print  "xss vulnerability param:%s info:%s" % (param, req_tmp)
                    util.report({"type":"xss","content":util.json_encode({"xss_type":"GET","param":param,"detail":"%s" % req_tmp})})
                    break
    else:          
        for field, poc in itertools.product(request.fields, XSS_POCS):
            req_tmp = request.copy()
            req_tmp.__class__ = PostRequest
            req_tmp.setField(field, req_tmp.getField(field)+poc)
            if poc in req_tmp.fetch():
                #print  "xss vulnerability param:%s info:%s" % (field, req_tmp)
                util.report({"type":"xss","content":util.json_encode({"xss_type":"POST","field":field,"detail":"%s" % req_tmp})})
                break
Beispiel #35
0
def check_figures(language):
    '''
    Check included figures.
    '''
    def _ignore(filename):
        return filename.startswith('.') or \
            filename.endswith('.odg') or \
            filename.endswith('.pdf') or \
            filename.endswith('.xml')

    def _redundant(filename, defined):
        return filename.endswith('.png') and \
            filename.replace('.png', '.svg') in defined

    content = get_all_docs(language)
    used = _match_lines(
        content, r'{%\s+include\s+figure.html[^%]+src=".+/figures/([^"]+)"')
    defined = {f for f in os.listdir(FIGURE_DIR) if not _ignore(f)}
    defined -= {f for f in defined if _redundant(f, defined)}
    report('Figures', 'unused', defined - used)
    report('Figures', 'missing', used - defined)
Beispiel #36
0
  def start(self):
    """ Starts a new instance of the component. Note that this does *not* update the proxy. """
    client = getDockerClient()
    self.logger.debug('Starting container for component %s', self.getName())

    # Ensure that we have the image. If not, we try to download it.
    self.ensureImage(client)

    # Start the instance with the proper image ID.
    container = self.createContainer(client)
    report('Starting container ' + container['Id'][:12], component=self)

    if self.config.privileged:
      report('Container will be run in privileged mode', component=self)

    client.start(container=container.get('Id'))

    # Health check until the instance is ready.
    report('Waiting for health checks...', component=self)

    # Start a health check thread to determine when the component is ready.
    timeout = self.config.getReadyCheckTimeout()
    readycheck_thread = Thread(target=self.readyCheck, args=[container, timeout])
    readycheck_thread.daemon = True
    readycheck_thread.start()

    # Wait for the health thread to finish.
    readycheck_thread.join(self.config.getReadyCheckTimeout())

    # If the thread is still alived, then our join timed out.
    if readycheck_thread.isAlive():
      report('Timed out waiting for health checks. Stopping container...', component=self)
      client.stop(container)
      report('Container stopped', component=self)
      return None

    # Otherwise, the container is ready. Set it as starting.
    setContainerComponent(container, self.getName())
    setContainerStatus(container, 'starting')
    return container
Beispiel #37
0
def  scanner():
  info = {} 
  info['app'] = []
  info['web'] = []
  
  for port in  g.O['web-ports']:
       data = scan_web_server(g.O['target'], int(port))
       if data is not  None :
           util.report({"type":"sys_info", "content":util.json_encode(data)})
           info['web'].append(data)
             
  for port in g.O['app-ports']:
      # print port
      data = scan_app_service(g.O['target'], int(port))
      # print type(data)
      if data is not  None :
            # print  encode_dict(data)
          util.report({"type":"sys_info", "content":util.json_encode(data)})
          info['app'].append(data)
  

  return  info
Beispiel #38
0
  def isHealthy(self):
    """ Runs the health checks on this component's container, ensuring that it is healthy.
        Returns True if healthy and False otherwise.
    """
    self.logger.debug('Checking if component %s is healthy...', self.getName())
    container = self.getPrimaryContainer()
    if not container:
      self.logger.debug('No container running for component %s', self.getName())
      return False

    checks = []
    for check in self.config.health_checks:
      checks.append((check, buildHealthCheck(check)))

    for (config, check) in checks:
      report('Running health check: ' + config.getTitle(), component=self)
      result = check.run(container, report)
      if not result:
        report('Health check failed', component=self)
        return False

    self.logger.debug('Component %s is healthy', self.getName())
    return True
Beispiel #39
0
def commit_files():
    old_dir = os.getcwd()
    resource_dir = config.SYNC_SVN_DIR
    os.chdir(resource_dir)
    add_cmd = "svn add * --force"

    p = subprocess.Popen(add_cmd,
                         shell=True,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)
    while p.poll() == None:
        util.report(p.stdout.readline())

    commit_cmd = '''svn commit -m "strife update files sync"'''

    p = subprocess.Popen(commit_cmd,
                         shell=True,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)
    print commit_cmd
    while p.poll() == None:
        util.report(p.stdout.readline())
    os.chdir(old_dir)
Beispiel #40
0
    def update_sources(self, source_dir, projects, revision, svn='svn'):
        self.report_build_step('update-sources')
        self.halt_on_failure()

        # TODO: This needs to be updated to use the monorepo.
        # Where to check the project out relative to an LLVM checkout.
        checkout_locations = {
            'llvm': '',
            'clang': 'tools/clang',
            'lld': 'tools/lld',
            'compiler-rt': 'projects/compiler-rt',
            'debuginfo-tests': 'projects/debuginfo-tests',
        }
        # If the project is named differently in svn, put it here.
        svn_locations = {'clang': 'cfe'}
        svn_uri_pattern = 'https://llvm.org/svn/llvm-project/%s/trunk'

        for project in projects:
            # TODO: Fail the build and report an error if we don't know the
            # checkout location.
            path = checkout_locations[project]
            if not path:
                path = source_dir
            elif not os.path.isabs(path):
                path = pjoin(source_dir, path)
            uri = svn_uri_pattern % (svn_locations.get(project, project), )
            util.report_run_cmd([svn, 'cleanup'], cwd=path)
            util.report("Updating %s to %s at %s from %s" %
                        (project, revision, util.shquote(path), uri))
            if os.path.exists(pjoin(path, '.svn')):
                util.report("Cleaning up in case of svn errors...")
                util.report_run_cmd([svn, 'cleanup'], cwd=path)
                cmd = [svn, 'up', '-r', revision]
            else:
                util.mkdirp(path)
                cmd = [svn, 'co', '-r', revision, uri, '.']
            util.report_run_cmd(cmd, cwd=path)
Beispiel #41
0
    def report_step_exception(self, exn=None):
        # Don't print a stack trace if a command ('ninja check') exited with a
        # non-zero exit code. That is non-exceptional expected behavior, so just
        # print the return code and fail the step.
        if exn and isinstance(exn, subprocess.CalledProcessError):
            cmd = ""
            try:
                cmd = repr(exn.cmd[0])
            except:
                pass
            util.report("Command " + cmd + " failed with return code " +
                        str(exn.returncode))
            util.report('@@@STEP_FAILURE@@@')
            return

        if exn:
            util.report(str(exn))
        util.report('@@@STEP_EXCEPTION@@@')
Beispiel #42
0
def get_vcvars(vs_tools, arch):
    """Get the VC tools environment using vswhere.exe or buildtools docker

    This is intended to work either when VS is in its standard installation
    location, or when the docker instructions have been followed, and we can
    find Visual C++ in C:/BuildTools.

    Visual Studio provides a docker image with instructions here:
    https://docs.microsoft.com/en-us/visualstudio/install/build-tools-container?view=vs-2019

    This vswhere code is following the guidelines from strategy 1 in this blog
    post:
        https://blogs.msdn.microsoft.com/vcblog/2017/03/06/finding-the-visual-c-compiler-tools-in-visual-studio-2017/

    It doesn't work when VS is not installed at the default location.
    """
    if not arch:
        # First check the wow64 processor architecture, since python is probably
        # 32-bit, then fall back to PROCESSOR_ARCHITECTURE.
        arch = os.environ.get('PROCESSOR_ARCHITEW6432', '').lower()
        if not arch:
            arch = os.environ.get('PROCESSOR_ARCHITECTURE', '').lower()
    else:
        arch = arch.lower()

    # Use vswhere.exe if it exists.
    if os.path.exists(VSWHERE_PATH):
        cmd = [VSWHERE_PATH, "-latest", "-property", "installationPath"]
        vs_path = subprocess.check_output(cmd).decode(sys.stdout.encoding)
        vs_path = vs_path.strip()
        util.report("Running vswhere to find VS: " + repr(cmd))
        util.report("vswhere output: " + vs_path)
        if not os.path.isdir(vs_path):
            raise ValueError("VS install path does not exist: " + vs_path)
        vcvars_path = pjoin(vs_path, 'VC', 'Auxiliary', 'Build',
                            'vcvarsall.bat')
    elif os.path.exists(BUILDTOOLS_VSDEVCMD):
        vcvars_path = BUILDTOOLS_VSDEVCMD
    elif vs_tools is None:
        vs_tools = os.path.expandvars('%VS140COMNTOOLS%')
        vcvars_path = pjoin(vs_tools, '..', '..', 'VC', 'vcvarsall.bat')

    # Newer vcvarsall.bat scripts aren't quiet, so direct them to NUL, aka
    # Windows /dev/null.
    cmd = util.shquote_cmd([vcvars_path, arch]) + ' > NUL && set'
    util.report("Running vcvars: " + cmd)
    output = \
        subprocess.check_output(cmd, shell=True).decode(sys.stdout.encoding)
    new_env = {}
    for line in output.splitlines():
        var, val = line.split('=', 1)
        new_env[var] = val
    return new_env
Beispiel #43
0
def check_gloss(language):
    '''
    Check for unused and undefined glossary entries and alphabetical order.
    '''
    content = get_all_docs(language)

    used = match_body(content, r'\[.+?\]\(#(g:.+?)\)')
    defined = _match_lines(content, r'\*\*.+?\*\*{:#(g:.+?)}')
    report('Glossary Entries', 'unused', defined - used)
    report('Glossary Entries', 'missing', used - defined)

    keys = _get_lines(content, r'\*\*(.+?)\*\*{:#g:.+?}')
    report('Glossary Entries', 'out of order', _out_of_order(keys))
Beispiel #44
0
def check_cites(language):
    '''
    Check for unused and undefined citations and for bibliography order.
    '''
    key_pat = r'{:#b:([^}]+)}'
    content = get_all_docs(language)

    used = _match_lines(content, r'\[([^\]]+)\]\(#BIB\)', splitter=',')
    defined = _match_lines(content, key_pat)
    report('Citations', 'unused', defined - used)
    report('Citations', 'undefined', used - defined)

    keys = _get_lines(content, key_pat)
    report('Citations', 'out of order', _out_of_order(keys))
Beispiel #45
0
def monitor(component):
    while True:
        # Sleep for 30 seconds.
        time.sleep(30)

        # Conduct the checks.
        report('Checking in on component ' + component.getName())
        if not component.isHealthy():
            report('Component ' + component.getName() +
                   ' is not healthy. Killing and restarting')
            component.stop(kill=True)
            if not component.update():
                report('Could not restart component ' + component.getName())
                return
Beispiel #46
0
def check_links(language):
    '''
    Check that external links are defined and used.
    '''
    content = get_all_docs(language)
    used = match_body(content, r'\[.+?\]\[(.+?)\]')
    with open(LINK_FILE, 'r') as reader:
        body = reader.read()
    matches = re.findall(r'^\[(.+?)\]', body, flags=re.DOTALL + re.MULTILINE)
    links = Counter(matches)
    duplicate = {key for key in links if links[key] > 1}
    defined = set(links.keys())
    report('External Links', 'unused', defined - used)
    report('External Links', 'undefined', used - defined)
    report('External Links', 'duplicated', duplicate)
Beispiel #47
0
  def updateProxy(self):
    """ Updates the proxy used for port mapping to conform to the current running container
        list.
    """
    client = getDockerClient()

    # Clear all routes in the proxy.
    # TODO: When this is in daemon mode, don't need do this. We could selectively
    # edit it instead.
    self.proxy.clear_routes()

    # Add routes for the non-draining containers and collect the draining containers to
    # watch.
    report('Finding running containers...', level=ReportLevels.EXTRA)
    draining_containers = []
    starting_containers = []

    for component in self.components.values():
      for container in component.getAllContainers(client):
        if getContainerStatus(container) != 'draining':
          container_ip = containerutil.getContainerIPAddress(client, container)
          starting_containers.append(container)

          # Add the normal exposed ports.
          for mapping in component.config.ports:
            route = Route(mapping.kind == 'http', mapping.external, container_ip,
                          mapping.container)
            self.proxy.add_route(route)

          # Add the container link ports.
          for link in component.config.defined_component_links:
            route = Route(link.kind == 'http', link.getHostPort(), container_ip, link.port)
            self.proxy.add_route(route)
        else:
          draining_containers.append(container)

    # Commit the changes to the proxy.
    if draining_containers or starting_containers:
      report('Updating proxy...', level=ReportLevels.EXTRA)
      self.proxy.commit()
    else:
      report('Shutting down proxy...', level=ReportLevels.EXTRA)
      self.proxy.shutdown()

    # Mark the starting containers as running.
    for container in starting_containers:
      setContainerStatus(container, 'running')
Beispiel #48
0
    def monitorComponent(self):
        """ Monitors a component by pinging it every MONITOR_SLEEP_TIME seconds or so. If a component
        fails, then the system will try to restart it. If that fails, the component is marked
        as dead.
    """
        while True:
            # Wait for the component to be running.
            self.monitor_event.wait()

            # Sleep MONITOR_SLEEP_TIME seconds.
            time.sleep(MONITOR_SLEEP_TIME)

            # Check the component.
            report('Checking in on component',
                   project=self.project_name,
                   component=self.component,
                   level=ReportLevels.BACKGROUND)

            if not self.component.isHealthy():
                self.logger.debug('Component %s is not healty',
                                  self.component.getName())
                with self.update_lock:
                    # Just to be sure...
                    if not self.is_running:
                        continue

                    # Ensure that the component is still ready.
                    state = self.state.getState()
                    current_status = ComponentState.getStatusOf(state)
                    if current_status == READY_STATUS:
                        report('Component ' + self.component.getName() +
                               ' is not healthy. Restarting...',
                               project=self.project_name,
                               component=self.component)

                        if not self.component.update():
                            report('Could not restart component ' +
                                   self.component.getName(),
                                   project=self.project_name,
                                   component=self.component,
                                   level=ReportLevels.IMPORTANT)
                            self.monitor_event.clear()
                            continue
Beispiel #49
0
    def readyCheck(self, container, timeout):
        """ Method which performs ready health check(s) on a container, returning whether
        they succeeded or not.

        container: The container running the component that will be checked.
        timeout: The amount of time after which the checks have timed out.
    """
        self.logger.debug('Checking if component %s is ready...',
                          self.getName())
        checks = []
        for check in self.config.ready_checks:
            checks.append((check, buildHealthCheck(check)))

        start = time.time()
        while True:
            now = time.time()
            if now - start > timeout:
                # Timed out completely.
                self.logger.debug('Component %s ready checks have timed out')
                return False

            # Try each check. If any fail, we'll sleep and try again.
            check_failed = None
            for (config, check) in checks:
                report('Running health check: ' + config.getTitle(),
                       component=self)
                result = check.run(container, report)
                if not result:
                    report('Health check failed', component=self)
                    check_failed = config
                    break

            if check_failed:
                report('Sleeping ' + str(check_failed.timeout) +
                       ' second(s)...',
                       component=self)
                time.sleep(check_failed.timeout)
            else:
                break

        return True
Beispiel #50
0
def get_vcvars(vs_tools, arch):
    """Get the VC tools environment using vswhere.exe from VS 2017

    This code is following the guidelines from strategy 1 in this blog post:
        https://blogs.msdn.microsoft.com/vcblog/2017/03/06/finding-the-visual-c-compiler-tools-in-visual-studio-2017/

    It doesn't work when VS is not installed at the default location.
    """
    if not arch:
        # First check the wow64 processor architecture, since python is probably
        # 32-bit, then fall back to PROCESSOR_ARCHITECTURE.
        arch = os.environ.get('PROCESSOR_ARCHITEW6432', '').lower()
        if not arch:
            arch = os.environ.get('PROCESSOR_ARCHITECTURE', '').lower()
    else:
        arch = arch.lower()

    # Use vswhere.exe if it exists.
    if os.path.exists(VSWHERE_PATH):
        cmd = [VSWHERE_PATH, "-latest", "-property", "installationPath"]
        vs_path = subprocess.check_output(cmd).strip()
        util.report("Running vswhere to find VS: " + repr(cmd))
        util.report("vswhere output: " + vs_path)
        if not os.path.isdir(vs_path):
            raise ValueError("VS install path does not exist: " + vs_path)
        vcvars_path = pjoin(vs_path, 'VC', 'Auxiliary', 'Build',
                            'vcvarsall.bat')
    elif vs_tools is None:
        vs_tools = os.path.expandvars('%VS140COMNTOOLS%')
        vcvars_path = pjoin(vs_tools, '..', '..', 'VC', 'vcvarsall.bat')

    # Newer vcvarsall.bat scripts aren't quiet, so direct them to NUL, aka
    # Windows /dev/null.
    cmd = util.shquote_cmd([vcvars_path, arch]) + ' > NUL && set'
    util.report("Running vcvars: " + cmd)
    output = subprocess.check_output(cmd, shell=True)
    new_env = {}
    for line in output.splitlines():
        var, val = line.split('=', 1)
        new_env[var] = val
    return new_env
Beispiel #51
0
def run_many(tests, expected=None, failfast=False):
    global NWORKERS, pool
    start = time()
    total = 0
    failed = {}

    NWORKERS = min(len(tests), NWORKERS)
    pool = Pool(NWORKERS)
    util.BUFFER_OUTPUT = NWORKERS > 1

    def run_one(cmd, **kwargs):
        result = util.run(cmd, **kwargs)
        if result:
            if failfast:
                sys.exit(1)
            # the tests containing AssertionError might have failed because
            # we spawned more workers than CPUs
            # we therefore will retry them sequentially
            failed[result.name] = [
                cmd, kwargs, 'AssertionError' in (result.output or '')
            ]

    try:
        try:
            for cmd, options in tests:
                total += 1
                spawn(run_one, cmd, **(options or {}))
            gevent.wait()
        except KeyboardInterrupt:
            try:
                if pool:
                    util.log('Waiting for currently running to finish...')
                    pool.join()
            except KeyboardInterrupt:
                util.report(total,
                            failed,
                            exit=False,
                            took=time() - start,
                            expected=expected)
                util.log('(partial results)\n')
                raise
    except:
        traceback.print_exc()
        pool.kill()  # this needed to kill the processes
        raise

    toretry = [
        key for (key, (cmd, kwargs, can_retry)) in failed.items() if can_retry
    ]
    failed_then_succeeded = []

    if NWORKERS > 1 and toretry:
        util.log('\nWill retry %s failed tests sequentially:\n- %s\n',
                 len(toretry), '\n- '.join(toretry))
        for name, (cmd, kwargs, _ignore) in failed.items():
            if not util.run(cmd, buffer_output=False, **kwargs):
                failed.pop(name)
                failed_then_succeeded.append(name)

    if failed_then_succeeded:
        util.log(
            '\n%s tests failed during concurrent run but succeeded when ran sequentially:',
            len(failed_then_succeeded))
        util.log('- ' + '\n- '.join(failed_then_succeeded))

    util.log('gevent version %s from %s', gevent.__version__, gevent.__file__)
    util.report(total, failed, took=time() - start, expected=expected)
    assert not pool, pool
Beispiel #52
0
def main(title, filenames):
    text = [open(f, 'r').read() for f in filenames]
    headings = set([x for sublist in text for x in HEADING.findall(sublist)])
    figures = set(['fig:{}'.format(x) for sublist in text for x in FIGURE.findall(sublist)])
    refs = set([r for sublist in text for r in REF.findall(sublist)])
    report('{}: used but not defined'.format(title), refs - (headings | figures))
Beispiel #53
0
def sign_files():
    util.report("Begin Signature Files.....")
    uninstaller.make_uninstaller(util.get_new_version())
    do_digital_signature()
    post_signature()
    util.report("End Signature Files.....")
Beispiel #54
0
def main():
    start = time.time()
    total = 0
    failed = {}

    tests = sys.argv[1:]
    if not tests:
        tests = set(glob.glob('test_*.py')) - set(['test_support.py'])
        tests = sorted(tests)

    def run_one(name, cmd, **kwargs):
        result = util.run(cmd, **kwargs)
        if result:
            # the tests containing AssertionError might have failed because
            # we spawned more workers than CPUs
            # we therefore will retry them sequentially
            failed[name] = [
                cmd, kwargs, 'AssertionError' in (result.output or '')
            ]

    if NWORKERS:
        gevent.spawn(info)

    try:
        try:
            for filename in tests:
                total += 1
                if 'TESTRUNNER' in open(filename).read():
                    module = __import__(filename.rsplit('.', 1)[0])
                    for name, cmd, options in module.TESTRUNNER():
                        total += 1
                        name = filename + ' ' + name
                        spawn(run_one, name, cmd,
                              **options).name = ' '.join(cmd)
                else:
                    cmd = [sys.executable, '-u', filename]
                    spawn(run_one, filename, cmd,
                          timeout=TIMEOUT).name = ' '.join(cmd)
            gevent.run()
        except KeyboardInterrupt:
            try:
                if pool:
                    util.log('Waiting for currently running to finish...')
                    pool.join()
            except KeyboardInterrupt:
                util.report(total,
                            failed,
                            exit=False,
                            took=time.time() - start)
                util.log('(partial results)\n')
                raise
    except:
        pool.kill()  # this needed to kill the processes
        raise

    toretry = [
        key for (key, (cmd, kwargs, can_retry)) in failed.items() if can_retry
    ]
    failed_then_succeeded = []

    if NWORKERS > 1 and toretry:
        util.log('\nWill re-try %s failed tests without concurrency:\n- %s\n',
                 len(toretry), '\n- '.join(toretry))
        for name, (cmd, kwargs, _ignore) in failed.items():
            if not util.run(cmd, buffer_output=False, **kwargs):
                failed.pop(name)
                failed_then_succeeded.append(name)

    util.report(total, failed, took=time.time() - start)

    if failed_then_succeeded:
        util.log(
            '\n%s tests failed during concurrent run but succeeded when ran sequentially:',
            len(failed_then_succeeded))
        util.log('- ' + '\n- '.join(failed_then_succeeded))
    assert not pool, pool
Beispiel #55
0
    def handleReady(self, state, was_initial_check):
        """ Handles when the component has been marked as ready. """

        # If the status is ready, we update the component if:
        #   - The ID of the component's image does not match that found in the status.
        #   - The process is not running.
        imageid = ComponentState.getImageIdOf(state)
        imageid_different = imageid != self.component.getImageId()
        should_update = not self.is_running or imageid_different

        if should_update:
            self.is_running = False
            self.monitor_event.clear()

            # We need to update this machine's copy. First, do a test and set to ensure that
            # we are the only machine allowed to update. If the test and set fails, we'll
            # try again in 10s.
            if imageid_different:
                report('Detected pushed update for component ' +
                       self.component.getName(),
                       project=self.project_name,
                       component=self.component)
            else:
                report('Component %s is not running; starting' %
                       self.component.getName(),
                       project=self.project_name,
                       component=self.component)

            result = self.state.setUpdatingStatus('updating', self.machine_id,
                                                  state)
            if not result:
                # The exchange failed. Sleep CHECK_SHORT_SLEEP_TIME seconds and try again.
                report(
                    'Could not grab update lock. Will try again in %s seconds'
                    % CHECK_SHORT_SLEEP_TIME,
                    project=self.project_name,
                    component=self.component)
                return CHECK_SHORT_SLEEP_TIME

            # Start the update by pulling the repo for the component.
            if imageid_different:
                report('Pulling the image for component ' +
                       self.component.getName())
                if not self.component.pullRepo():
                    # The pull failed.
                    report('Pull failed of image %s for component %s' %
                           (imageid[0:12], self.component.getName()),
                           project=self.project_name,
                           component=self.component,
                           level=ReportLevels.IMPORTANT)
                    self.state.setUpdatingStatus('pullfail', self.machine_id,
                                                 result)
                    return CHECK_SLEEP_TIME

            # Run the update on the component and wait for it to finish.
            if imageid_different:
                report('Starting update for component ' +
                       self.component.getName(),
                       project=self.project_name,
                       component=self.component)

            if not self.component.update():
                # The update failed.
                self.state.setUpdatingStatus('updatefail', self.machine_id,
                                             result)
                return CHECK_SLEEP_TIME

            # Otherwise, the update has succeeded. Mark the component as ready, so another
            # gantryd can start its update.
            if imageid_different:
                report('Update completed for component ' +
                       self.component.getName(),
                       project=self.project_name,
                       component=self.component)
            else:
                report('Component ' + self.component.getName() +
                       ' is now running',
                       project=self.project_name,
                       component=self.component)

            self.state.setReadyStatus(self.component.getImageId())
            self.is_running = True
            self.monitor_event.set()

        return CHECK_SLEEP_TIME
Beispiel #56
0
 def halt_on_failure(self):
     util.report('@@@HALT_ON_FAILURE@@@')
Beispiel #57
0
    def run_steps(self,
                  stages=1,
                  check_targets=None,
                  check_stages=None,
                  extra_cmake_args=None,
                  stage1_extra_cmake_args=None,
                  revision=None,
                  compiler='clang',
                  linker='ld.lld',
                  env=None,
                  jobs=None):
        """
        stages: number of stages to run (default: 1)
        check_targets: targets to run during the check phase (default: ['check-all'])
        check_stages: stages for which to run the check phase
            (array of bool, default: all True)
        extra_cmake_args: extra arguments to pass to cmake (default: [])
        stage1_extra_cmake_args: extra arguments to pass to cmake for stage 1
            (default: use extra_cmake_args)
        revision: revision to check out (default: os.environ['BUILDBOT_REVISION'],
            or, if that is unset, the latest revision)
        compiler: compiler to use after stage 1
            ('clang' or 'clang-cl'; default 'clang')
        linker: linker to use after stage 1
            (None (let cmake choose) or 'lld' (default))
        env: environment overrides (map; default is no overrides)
        jobs: number of jobs to run concurrently (default: determine automatically)
        """

        # Set defaults.
        if check_targets is None:
            check_targets = ['check-all']
        if check_stages is None:
            check_stages = [True] * stages
        if extra_cmake_args is None:
            extra_cmake_args = []
        if not revision:
            revision = os.environ.get('BUILDBOT_REVISION')
        if stage1_extra_cmake_args is None:
            stage1_extra_cmake_args = extra_cmake_args

        c_compiler, cxx_compiler = self.compiler_binaries(compiler)

        self.set_environment(env)

        # On Windows, if we're building clang-cl, make sure stage1 is built with
        # MSVC (cl.exe), and not gcc from mingw. CMake will prefer gcc if it is
        # available.
        if c_compiler == 'clang-cl':
            stage1_extra_cmake_args += [
                '-DCMAKE_C_COMPILER=cl', '-DCMAKE_CXX_COMPILER=cl'
            ]

        if not revision:
            cmd = ['svn', 'info', 'https://llvm.org/svn/llvm-project/']
            try:
                svninfo = subprocess.check_output(cmd)
            except subprocess.CalledProcessError as e:
                util.report("Failed to get most recent SVN rev: " + str(e))
                return 1
            m = re.search('Revision: ([0-9]+)', svninfo)
            if m:
                revision = m.group(1)
            else:
                util.report(
                    "Failed to find svn revision in svn info output:\n" +
                    svninfo)
                return 1
        if not revision.isdigit():
            util.report("SVN revision %s is not a positive integer" %
                        (revision, ))

        # Update sources.
        cwd = os.getcwd()
        source_dir = pjoin(cwd, 'llvm.src')
        build_dir = pjoin(cwd, 'build')
        svn_uri_pattern = 'https://llvm.org/svn/llvm-project/%s/trunk'
        projects = []
        projects.append(('llvm', None, svn_uri_pattern % ('llvm', )))
        projects.append(('clang', pjoin('tools',
                                        'clang'), svn_uri_pattern % ('cfe', )))
        cmake_args = ['-GNinja']
        for p in ['lld']:
            projects.append((p, pjoin('tools', p), svn_uri_pattern % (p, )))

        self.update_sources(source_dir, projects, revision)

        # Build and check stages.
        self.build_and_check_stages(stages, build_dir, source_dir, cmake_args,
                                    extra_cmake_args, c_compiler, cxx_compiler,
                                    linker, check_stages, check_targets,
                                    stage1_extra_cmake_args, jobs)

        return 0
Beispiel #58
0
 def report_step_exception(self, exn=None):
     if exn:
         util.report(str(exn))
     util.report('@@@STEP_EXCEPTION@@@')
Beispiel #59
0
 def report_build_step(self, step):
     util.report('@@@BUILD_STEP %s@@@' % (step, ))
Beispiel #60
0
def run():
    # Setup the gantry arguments
    parser = argparse.ArgumentParser(
        description='gantry continuous deployment system')
    parser.add_argument('config_file', help='The configuration file')
    parser.add_argument('action',
                        help='The action to perform',
                        choices=ACTIONS.keys())
    parser.add_argument('component_name',
                        help='The name of the component to manage')
    parser.add_argument(
        '-m',
        dest='monitor',
        action='store_true',
        help=
        'If specified and the action is "start" or "update", gantry will remain running to monitor components, auto restarting them as necessary'
    )
    parser.add_argument('--setconfig',
                        dest='config_overrides',
                        action='append',
                        help='Configuration overrides for the component')

    args = parser.parse_args()
    component_name = args.component_name
    action = args.action
    should_monitor = args.monitor
    config_file = args.config_file
    config_overrides = args.config_overrides

    # Load the config.
    config = loadConfig(config_file)
    if not config:
        return

    # Create the manager.
    manager = RuntimeManager(config)

    # Find the component
    component = manager.getComponent(component_name)
    if not component:
        raise Exception('Unknown component: ' + component_name)

    # Apply the config overrides (if any).
    if config_overrides:
        component.applyConfigOverrides(config_overrides)

    # Run the action with the component and config.
    result = ACTIONS[action](component)
    if result and should_monitor:
        try:
            report('Starting monitoring of component: ' + component_name)
            monitor(component)
        except KeyboardInterrupt:
            report('Terminating monitoring of component: ' + component_name)

    def cleanup_monitor(signum, frame):
        manager.join()

    # Set the signal handler and a 5-second alarm
    signal.signal(signal.SIGINT, cleanup_monitor)

    # We may have to call cleanup manually if we weren't asked to monitor
    cleanup_monitor(None, None)