Esempio n. 1
0
    def launch_workers(self):
        target_jobs, = self.master_db.query_master_fetchone('SELECT max_jobs FROM queues WHERE name = "default"')

        self.master_db.update_list_ensemble_dbs()
        if len(self.master_db.active_dbs) == 0:
            utils.inline_msg("MSG", "No active dbs... sleeping ")
            return

        current_count = self.active_threads()
        to_launch = target_jobs - current_count
        if to_launch >= 0:
             utils.newline_msg("STATUS", "[n_jobs=%d] run=%d ::: new=%d" % (target_jobs,current_count,to_launch ) )
        else:
             utils.newline_msg("STATUS", "[n_jobs=%d] run=%d :!: exceed" % (target_jobs,current_count))


        for i_t in range(to_launch):
            self.lock.acquire()
            pick = self.master_db.pick_ensemble()
            status = pick.get_updated_status()
            if status['process_not_run'] == 0:
                print "+D+ ----- %s " % (pick.full_name)
                self.master_db.query_master_db('UPDATE dbs SET status= ? WHERE full_name = ?', "D", pick.full_name)
                return

            self.lock.release()

            nt = SPGRunningAtom(pick, self.lock)
            # nt = SPGRunningAtom(pick, lock=self.get_lock( pick ) )

            nt.start()
Esempio n. 2
0
    def launch_workers(self):
        target_jobs, = self.master_db.query_master_fetchone('SELECT max_jobs FROM queues WHERE name = "default"')

        self.master_db.update_list_ensemble_dbs()
        if len(self.master_db.active_dbs) == 0:
            utils.inline_msg("MSG", "No active dbs... sleeping ")
            return

        current_count = self.active_threads()
#        print "+++++++++++", to_launch
        vec_to_launch = []

        launch = defaultdict(lambda: 0)
        running = {}

        for ae in self.master_db.active_dbs:
            ens = self.master_db.result_dbs[ae  ]
            running[ ens['id'] ] = self.active_processes[ ae ]
            qty_to_launch = int( m.floor(0.5 + target_jobs*ens['weight']/self.master_db.normalising) - self.active_processes[ ae ] )

            vec_to_launch += qty_to_launch * [ae]
            launch[ ens['id'] ] += qty_to_launch
 #       for id in launch:
 #           print "+++ (%d) %d + %d = //%d//, "%( id, launch[id], running[id],launch[id]+running[id] )
 #       print

        to_launch = len(vec_to_launch)
        if to_launch >= 0:
             utils.newline_msg("STATUS", utils.str_color( "@green[n_jobs=%d] run=%d %s ::: new=%d" % (target_jobs,current_count, dict(running),to_launch) ) )
        else:
             utils.newline_msg("STATUS", utils.str_color( "@yellow[n_jobs=%d] run=%d :!: exceeded number" % (target_jobs,current_count)) )

 #       print to_launch, len( vec_to_launch ), launch

#        for i_t in range(to_launch):
        for ae in vec_to_launch:
            pick = self.master_db.EnsembleConstructor(ae, init_db=True)
            self.lock.acquire()
#            pick = self.master_db.pick_ensemble()

            pick.test_run = self.test_run

            status = pick.get_updated_status()
            if status['process_not_run'] == 0:
                print "+D+ ----- %s " % (pick.full_name)
                self.master_db.query_master_db('UPDATE dbs SET status= ? WHERE full_name = ?', "D", pick.full_name)
                return

            self.lock.release()

            nt = SPGRunningAtom(pick, self.lock, self.active_processes)
#            nt.test_run = self.test_run
            # nt = SPGRunningAtom(pick, lock=self.get_lock( pick ) )

            nt.start()
Esempio n. 3
0
      utils.newline_msg("MSG", "running simulation")
 #     if options.dummy:
 #         executor = ParameterEnsembleInputFilesGenerator( db_name )
 #     else:
      executor = ParameterEnsembleExecutor( db_name )

      executor.init_db()
      if options.test_run:
          executor.next()
          executor.launch_process( remove_files=False)
          continue

      for values in executor:
          if options.verbose:
              utils.inline_msg("RUN", "[%s] %s" % (executor.current_spg_uid, executor.variable_values()))
          # executor.launch_process()
          try:
             executor.launch_process()
             executor.dump_result()
          except (KeyboardInterrupt,):
              print >> sys.stderr
              utils.newline_msg("SYS", "keyboard interrupted, exiting")
              executor.query_set_run_status("N")
              sys.exit(1)

 #     if options.tree:
 #         os.chdir(path)
      
#      if options.dummy:
#          executor.reset()
Esempio n. 4
0
        #         executor = ParameterEnsembleInputFilesGenerator( db_name )
        #     else:
        executor = ParameterEnsembleExecutor(db_name)

        executor.init_db()
        if options.test_run:
            next(executor)
            executor.launch_process(remove_files=False)

            executor.dump_result()
            continue

        for values in executor:
            if options.verbose:
                utils.inline_msg(
                    "RUN", "[%s] %s" %
                    (executor.current_spg_uid, executor.variable_values()))
            # executor.launch_process()
            try:
                executor.launch_process()
                executor.dump_result()
            except (KeyboardInterrupt, ):
                print(file=sys.stderr)
                utils.newline_msg("SYS", "keyboard interrupted, exiting")
                executor.query_set_run_status("N")
                sys.exit(1)

#     if options.tree:
#         os.chdir(path)

#      if options.dummy:
Esempio n. 5
0
                newline_msg("INF", "initialising queue: '%s' [max_jobs: %s]"%(name, max_jobs),indent = 2)
                newline_msg("INF", "initialising queue: '%s' [max_jobs: %s]"%(name, max_jobs), stream = file_log)
                
                if options.queue == "torque":
                    all_queues[name] = TorqueQueue(name, max_jobs, workers_sleep = options.workers_sleep)
                else:
                    all_queues[name] = Queue(name, max_jobs, workers_sleep = options.workers_sleep)
            else:
                all_queues[name].jobs = max_jobs

            all_queues[name].update_worker_info()
            worker_diff  = all_queues[name].normalise_workers()
            if worker_diff > 0 :
                newline_msg("INF",  "queue: %s seeded-killed = %d]"%(name, worker_diff), stream = file_log)
                
            inline_msg("INF", "populate data for '%s'."%name,indent = 2)
            if not options.skip_init:
                pex.seed_atoms( name )
                seeded_atoms_ac.append(pex.seeded_atoms )
         
        if not options.skip_harvest:
            inline_msg("INF", "harvest data..................",indent = 2)
            pex.harvest_atoms()
    
        newline_msg("INF", "syncing..................(s:%s - h:%d)"%(seeded_atoms_ac, pex.harvested_atoms), indent = 2)
        newline_msg("INF", "syncing (s:%s - h:%d)"%(seeded_atoms_ac, pex.harvested_atoms), stream = file_log)
        
        if pex.harvested_atoms == 0:
            harvests_without_results += 1
        else:
            harvests_without_results = 0
Esempio n. 6
0
                newline_msg("INF", "initialising queue '%s'"%name,indent = 2)
                print >> file_log,  "initialising queue: %s [max_jobs: %s]"%(name, max_jobs)
                
                if options.queue == "torque":
                    all_queues[name] = TorqueQueue(name, max_jobs)
                else:
                    all_queues[name] = Queue(name, max_jobs)
            else:
                all_queues[name].jobs = max_jobs

            all_queues[name].update_worker_info()
            worker_diff  = all_queues[name].normalise_workers()
            if worker_diff > 0 :
                print >> file_log,  "queue: %s seeded-killed = %d]"%(name, worker_diff)
                
            inline_msg("INF", "populate/harvest data.",indent = 2)
            if not options.skip_init:
                pex.seed_atoms( name )
                seeded_atoms_ac.append(pex.seeded_atoms ) 
        if not options.skip_harvest:
            pex.harvest_atoms()
    
        inline_msg("INF", "syncing..................(s:%s - h:%d)"%(seeded_atoms_ac, pex.harvested_atoms), indent = 2)
        print >> file_log, "atoms: seeded= %s - harvested= %d"%(seeded_atoms_ac, pex.harvested_atoms)
        file_log.flush()
        
        if not options.skip_sync:
            pex.synchronise_master()
      
        newline_msg("INF", "sleep %s"%options.sleep,indent = 2)
        if options.sleep < 0:  sys.exit(0)
Esempio n. 7
0
                newline_msg("INF", "initialising queue '%s'[max_jobs: %s]"%(name, max_jobs),indent = 2)
                newline_msg("INF", "initialising queue: '%s' [max_jobs: %s]"%(name, max_jobs), stream = file_log)
                
                if options.queue == "torque":
                    all_queues[name] = TorqueQueue(name, max_jobs)
                else:
                    all_queues[name] = Queue(name, max_jobs)
            else:
                all_queues[name].jobs = max_jobs

            all_queues[name].update_worker_info()
            worker_diff  = all_queues[name].normalise_workers()
            if worker_diff > 0 :
                print >> file_log,  "queue: %s seeded-killed = %d]"%(name, worker_diff)
                
            inline_msg("INF", "populate data for '%s'."%name,indent = 2)
            if not options.skip_init:
                pex.seed_atoms( name )
                seeded_atoms_ac.append(pex.seeded_atoms )
         
        if not options.skip_harvest:
            inline_msg("INF", "harvest data..................",indent = 2)
            pex.harvest_atoms()
    
        inline_msg("INF", "syncing..................(s:%s - h:%d)"%(seeded_atoms_ac, pex.harvested_atoms), indent = 2)
        newline_msg("INF", "syncing (s:%s - h:%d)"%(seeded_atoms_ac, pex.harvested_atoms), stream = file_log)
        
        if not options.skip_sync:
            pex.synchronise_master()
      
        newline_msg("INF", "sleep %s"%options.sleep,indent = 2)
Esempio n. 8
0
    def launch_workers(self):
        target_jobs, = self.master_db.query_master_fetchone(
            'SELECT max_jobs FROM queues WHERE name = "default"')

        self.master_db.update_list_ensemble_dbs()
        if len(self.master_db.active_dbs) == 0:
            utils.inline_msg("MSG", "No active dbs... sleeping ")
            return

        current_count = self.active_threads()
        #        print "+++++++++++", to_launch
        vec_to_launch = []

        launch = defaultdict(lambda: 0)
        running = {}

        for ae in self.master_db.active_dbs:
            ens = self.master_db.result_dbs[ae]
            running[ens['id']] = self.active_processes[ae]
            qty_to_launch = int(
                m.floor(0.5 + target_jobs * ens['weight'] /
                        self.master_db.normalising) -
                self.active_processes[ae])

            vec_to_launch += qty_to_launch * [ae]
            launch[ens['id']] += qty_to_launch
#       for id in launch:
#           print "+++ (%d) %d + %d = //%d//, "%( id, launch[id], running[id],launch[id]+running[id] )
#       print

        to_launch = len(vec_to_launch)
        if to_launch >= 0:
            utils.newline_msg(
                "STATUS",
                utils.str_color(
                    "@green[n_jobs=%d] run=%d %s ::: new=%d" %
                    (target_jobs, current_count, dict(running), to_launch)))
        else:
            utils.newline_msg(
                "STATUS",
                utils.str_color(
                    "@yellow[n_jobs=%d] run=%d :!: exceeded number" %
                    (target_jobs, current_count)))

#       print to_launch, len( vec_to_launch ), launch

#        for i_t in range(to_launch):
        for ae in vec_to_launch:
            pick = self.master_db.EnsembleConstructor(ae, init_db=True)
            self.lock.acquire()
            #            pick = self.master_db.pick_ensemble()

            pick.test_run = self.test_run

            status = pick.get_updated_status()
            if status['process_not_run'] == 0:
                print("+D+ ----- %s " % (pick.full_name))
                self.master_db.query_master_db(
                    'UPDATE dbs SET status= ? WHERE full_name = ?', "D",
                    pick.full_name)
                return

            self.lock.release()

            nt = SPGRunningAtom(pick, self.lock, self.active_processes)
            #            nt.test_run = self.test_run
            # nt = SPGRunningAtom(pick, lock=self.get_lock( pick ) )

            nt.start()