def cache_expires_in(self, timedelta=timedelta(0)): """Sets the Cache-Control header of the response. See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 Args: timedelta - the time this resource can be cached """ self.response.headers["Expires"] = utils.format_http_time(datetime.utcnow() + timedelta) self.response.headers["Cache-Control"] = "max-age=%d" % utils.total_seconds(timedelta) logging.info("Setting caches to expire after %s" % timedelta)
def get_current_user(self): """Tornado standard method--implemented our way.""" expiration = self.settings.get("auth_timeout", "14d") # Need the expiration in days (which is a bit silly but whatever): expiration = float(total_seconds(convert_to_timedelta(expiration))) / float(86400) user_json = self.get_secure_cookie("gateone_user", max_age_days=expiration) if not user_json: return None user = tornado.escape.json_decode(user_json) # Add the IP attribute user["ip_address"] = self.request.remote_ip return user
def get_current_user(self): """Tornado standard method--implemented our way.""" expiration = self.settings.get('auth_timeout', "14d") # Need the expiration in days (which is a bit silly but whatever): expiration = (float(total_seconds(convert_to_timedelta(expiration))) / float(86400)) user_json = self.get_secure_cookie("gateone_user", max_age_days=expiration) if not user_json: return None user = tornado.escape.json_decode(user_json) # Add the IP attribute user['ip_address'] = self.request.remote_ip return user
def match_proposal(self,propID): """ This operation try to match the specified proposal with each request of the DB @pre : offermanager_port has been initialized and is the port of the OfferManager module propId is the id of a proposal in the database @post : DB has not been modified. for each request matching the specified proposal, a message is sent to OfferManager through its port: ('buildoffer',requestID,proposalID) with requestID, the database ID of the matching request """ infos=Proposal.objects.get(id=propID) requests=Request.objects.filter(nb_requested_seats__lte=infos.number_of_seats, status='P') for request in requests: found = False for offer in Offer.objects.filter(request=request): if Ride.objects.filter(offer=offer): found=True break if not found: route_points = RoutePoints.objects.filter(proposal=infos).order_by('order') valid_pair = list() for i in xrange(len(route_points)-1): if get_distance((request.departure_point_lat,request.departure_point_long),(route_points[i].latitude,route_points[i].longitude))<request.departure_range: for j in range(i+1,len(route_points)): if get_distance((request.arrival_point_lat,request.arrival_point_long),(route_points[j].latitude,route_points[j].longitude))<request.arrival_range: valid_pair.append((i,j)) for (i,j) in valid_pair: #delete all not in time arrival if total_seconds(abs(get_time_at_point([(r.latitude,r.longitude) for r in route_points],j,infos.departure_time,infos.arrival_time)-request.arrival_time)) < request.max_delay: self.send_to(self.offermanager_port, ('buildoffer', request.id, infos.id, ( route_points[i].latitude, route_points[i].longitude, get_time_at_point([(r.latitude,r.longitude) for r in route_points], i, infos.departure_time, infos.arrival_time), route_points[i].id ), ( route_points[j].latitude, route_points[j].longitude, get_time_at_point([(r.latitude,r.longitude) for r in route_points], j, infos.departure_time,infos.arrival_time), route_points[j].id ) ))
def _process_rx_pkt(self, rx_pkt, pkt_time_stamp): """Process a pkt from the instrument""" if self._data_file_state == 1: # Open new data file and write a pkt to it self._data_file_path = "".join( (fg_mgr_config.temp_dir, fg_mgr_config.proc_mnemonic, '_', utils.time_stamp_str(pkt_time_stamp), '.dat.csv')) if not self._open_data_file(): return self._write_to_data_file(self._data_file_hdr_row) self._format_and_write_pkt(rx_pkt, pkt_time_stamp) self._file_pkt_cnt = 1 self._data_file_state = 2 self._prev_pkt_time_stamp = pkt_time_stamp return if self._data_file_state is 2: # If there is a significant gap between instrument # data pkts, don't store the current packet and # start a new data file time_between_pkts = utils.total_seconds(pkt_time_stamp - self._prev_pkt_time_stamp) self._prev_pkt_time_stamp = pkt_time_stamp #self._log.debug('time between pkts: %.3f' % time_between_pkts) data_gap = False if time_between_pkts > fg_mgr_config.max_data_pkt_gap: self._log.error( 'Excessive time gap between fluxgate data packets') data_gap = True if not data_gap: self._format_and_write_pkt(rx_pkt, pkt_time_stamp) self._file_pkt_cnt += 1 end_of_hour = (pkt_time_stamp.minute == 59) and (pkt_time_stamp.second == 59) if data_gap or end_of_hour: self._data_file.close() # Spin off a thread to execute the XMLRPC command. # If it's a big file, it will take a while for the USB mgr # to copy the file to temp storage. compress = True save_file_thread = SaveFileThread(self._data_file_path, compress, self._log) # save_file_thread deletes data file after storage self._data_file_path = None self._data_file_state = 1 return self._log.error('DataThread._process_rx_pkt: unknown state value')
def start_new(): if not configs: return # Could launch Mistral with different seeds if we run out of provided configs config = configs.pop() remaining_time = int(timeout - total_seconds(datetime.datetime.now() - start_time) - solver_buffer_time) if config[ 'solver'] == "Mistral": # Mistral's timing seems to consistently be longer than the specified timeout. remaining_time = max(remaining_time - 1, 1) defaults = { 'njfilename': njfilename, 'threads': 1, 'tcutoff': remaining_time, 'var': 'DomainOverWDegree', 'val': 'Lex', 'verbose': 0, 'restart': GEOMETRIC, 'base': 256, 'factor': 1.3, 'lcLevel': 4, 'lds': 0, 'dee': 1, 'btd': 0, 'rds': 0, 'dichotomic': 0, 'dichtcutoff': 10, 'varElimOrder': 0 } d = dict(defaults.items() + config.items()) cmd = ( "python %(njfilename)s -solver %(solver)s -tcutoff %(tcutoff)d " "-threads %(threads)d -var %(var)s -val %(val)s " "-restart %(restart)d -base %(base)d -factor %(factor).1f " "-verbose %(verbose)d -lds %(lds)d -btd %(btd)d -rds %(rds)d " "-dee %(dee)d -lcLevel %(lcLevel)d -varElimOrder %(varElimOrder)d " "-dichotomic %(dichotomic)d -dichtcutoff %(dichtcutoff)d" % d) args = (str(config), datetime.datetime.now(), pid_queue, result_queue, cmd, int(memlimit / cores)) thread = threading.Thread(target=run_cmd, args=args) threads.append(thread) thread.start() print "% Launching:", cmd
def match_request(requestID): """ This operation try to match the specified proposal with each request of the DB @pre : DB has been initialized and is the SQL database offermanager_port has been initialized and is the port of the OfferManager module requestId is the id of a request in the database @post : DB has not been modified. for each proposal matching the specified request, a message is sent to OfferManager through its port: ('buildoffer',requestID,proposalID) with proposalID, the database ID of the matching proposal """ request=Request.objects.get(id=requestID) proposals=Proposal.objects.filter(number_of_seats__gte=request.nb_requested_seats) for infos in proposals: route_points = RoutePoints.objects.filter(proposal=infos).order_by('order') valid_pair = list() for i in xrange(len(route_points)-2): if get_distance((request.departure_point.latitude,request.departure_point.longitude),(route_points[i].latitude,route_points[i].longitude))<request.departure_range: for j in range(i+1,len(route_points)): if get_distance((request.arrival_point.latitude,request.arrival_point.longitude),(route_points[j].latitude,route_points[j].longitude))<request.arrival_range: valid_pair.append((i,j)) for (i,j) in valid_pair: #delete all not in time arrival if total_seconds(abs(get_time_at_point([(r.latitude,r.longitude) for r in route_points],j,infos.departure_time,infos.arrival_time)-request.arrival_time)) < request.max_delay: build_offer(requestID, infos.id, ( route_points[i].latitude, route_points[i].longitude, get_time_at_point([(r.latitude,r.longitude) for r in route_points], i,infos.departure_time,infos.arrival_time), route_points[i].id ), ( route_points[j].latitude, route_points[j].longitude, get_time_at_point([(r.latitude,r.longitude) for r in route_points], j,infos.departure_time,infos.arrival_time), route_points[j].id ))
def start_new(): if not configs: return # Could launch Mistral with different seeds if we run out of provided configs config = configs.pop() remaining_time = int(timeout - total_seconds(datetime.datetime.now() - start_time) - solver_buffer_time) if config['solver'] == "Mistral": # Mistral's timing seems to consistently be longer than the specified timeout. remaining_time = max(remaining_time - 1, 1) defaults = {'njfilename': njfilename, 'threads': 1, 'tcutoff': remaining_time, 'var': 'DomainOverWDegree', 'val': 'Lex', 'verbose': 0, 'restart': GEOMETRIC, 'base': 256, 'factor': 1.3, 'lcLevel': 4, 'lds': 0, 'dee': 1, 'btd': 0, 'rds': 0, 'dichotomic': 0, 'dichtcutoff': 10} d = dict(defaults.items() + config.items()) cmd = ("python %(njfilename)s -solver %(solver)s -tcutoff %(tcutoff)d " "-threads %(threads)d -var %(var)s -val %(val)s " "-restart %(restart)d -base %(base)d -factor %(factor).1f " "-verbose %(verbose)d -lds %(lds)d -btd %(btd)d -rds %(rds)d " "-dee %(dee)d -lcLevel %(lcLevel)d " "-dichotomic %(dichotomic)d -dichtcutoff %(dichtcutoff)d" % d) args = (str(config), datetime.datetime.now(), pid_queue, result_queue, cmd, int(memlimit / cores)) thread = threading.Thread(target=run_cmd, args=args) threads.append(thread) thread.start() print "% Launching:", cmd
def qualifying_results(round_n=None, season=None): """ Get relevant information for a current-year Formula 1 qualifying session. If no round number is passed, the most-recent qualifying data is provided. :param round_n: Round number for the qualifying session. :param season: The year of the qualifying session. :return driver_dict: A dict containing driver results and round info. """ if round_n is None: if season is None: # Most-recent qualifying, current season. url = 'https://ergast.com/api/f1/current/last/qualifying.json' else: # Most-recent qualifying, specific season. url = 'https://ergast.com/api/f1/{}/last/qualifying.json'.format( season) else: if season is None: # Specific qualifying, current season. url = 'https://ergast.com/api/f1/current/{}/qualifying.json'.format( round_n) else: # Specific qualifying, specific season. url = 'https://ergast.com/api/f1/{}/{}/qualifying.json'.format( season, round_n) response = requests.get(url) if not response.ok: return {} try: data = json.loads(response.text) # If the race has not happened yet. if not data['MRData']['RaceTable']['Races']: return {} """ The loop uses range instead of foreach because it references the previous driver's qualifying time by index[i-1] to compare them. """ driver_list = [] for i in range( len(data['MRData']['RaceTable']['Races'][0] ['QualifyingResults'])): fn = data['MRData']['RaceTable']['Races'][0]['QualifyingResults'][ i]['Driver']['givenName'] ln = data['MRData']['RaceTable']['Races'][0]['QualifyingResults'][ i]['Driver']['familyName'] url = data['MRData']['RaceTable']['Races'][0]['QualifyingResults'][ i]['Driver']['url'] pos = data['MRData']['RaceTable']['Races'][0]['QualifyingResults'][ i]['position'] try: # Drivers may not partake in Q1. q1_text = data['MRData']['RaceTable']['Races'][0][ 'QualifyingResults'][i]['Q1'] q1_secs = total_seconds(q1_text) except KeyError: q1_text = '-' q1_secs = '-' try: # Drivers eliminated in Q1 won't be in Q2. q2_text = data['MRData']['RaceTable']['Races'][0][ 'QualifyingResults'][i]['Q2'] q2_secs = total_seconds(q2_text) except KeyError: q2_text = '-' q2_secs = '-' try: # Drivers eliminated in Q2 won't be in Q3. q3_text = data['MRData']['RaceTable']['Races'][0][ 'QualifyingResults'][i]['Q3'] q3_secs = total_seconds(q3_text) # Check the driver position. Cannot reference the previous driver if they are position 1. if int(data['MRData']['RaceTable']['Races'][0] ['QualifyingResults'][i]['position']) > 1: # Subtract the faster driver's time from the current driver to find the delta. q3_delta = round( q3_secs - total_seconds(data['MRData']['RaceTable']['Races'][0] ['QualifyingResults'][i - 1]['Q3']), 4) else: q3_delta = '' except KeyError: q3_text = '-' q3_secs = '-' q3_delta = '' driver_qualifying = { 'fn': fn, 'ln': ln, 'url': url, 'pos': pos, 'q1': { 'text': q1_text, 'seconds': q1_secs }, 'q2': { 'text': q2_text, 'seconds': q2_secs }, 'q3': { 'text': q3_text, 'seconds': q3_secs, 'delta': q3_delta } } driver_list.append(driver_qualifying) driver_dict = {'Driver': driver_list} round_n = data['MRData']['RaceTable']['Races'][0]['round'] season = data['MRData']['RaceTable']['Races'][0]['season'] date = date_format( data['MRData']['RaceTable']['Races'][0]['date']) # Format date. race = data['MRData']['RaceTable']['Races'][0]['raceName'] circuit = data['MRData']['RaceTable']['Races'][0]['Circuit'][ 'circuitName'] circuit_url = data['MRData']['RaceTable']['Races'][0]['Circuit']['url'] driver_dict['RoundInfo'] = { 'race': race, 'circuit': circuit, 'url': circuit_url, 'round': round_n, 'season': season, 'date': date } return driver_dict except ValueError: return {}
def duration(self): "Duration of the journey [s]." return total_seconds(self.end_time - self.start_time) / 60.0
def period(self): "Time since the previous point [s]." if self.previous: return total_seconds(self.time - self.previous.time) else: return 1
def njportfolio(njfilename, cores, timeout, memlimit): from Numberjack import available_solvers from multiprocessing import Queue, cpu_count from Queue import Empty start_time = datetime.datetime.now() result_queue = Queue() pid_queue = Queue() available = available_solvers() threads = [] configs = [] configs.append({ 'solver': 'Mistral', 'var': 'DomainOverWDegree', 'val': 'Lex', 'restart': GEOMETRIC, 'base': 256, 'factor': 1.3 }) if 'CPLEX' in available: configs.append({'solver': 'CPLEX'}) elif 'Gurobi' in available: configs.append({'solver': 'Gurobi'}) if 'Toulbar2' in available: configs.append({'solver': 'Toulbar2', 'lds': 1}) # configs.append({'solver': 'Toulbar2', 'btd': 3, 'lcLevel': 1, 'rds': 1}) # configs.append({'solver': 'Toulbar2', 'btd': 1, 'varElimOrder': 3}) # requires libboost-graph-dev installed and recompile Toulbar2 with flag BOOST active in setup.py configs.append({ 'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': LUBY, 'base': 10000 }) configs.append({ 'solver': 'Mistral', 'dichotomic': 1, 'dichtcutoff': 10, 'base': 10, 'restart': GEOMETRIC, 'base': 256, 'factor': 1.3 }) configs.append({'solver': 'MiniSat'}) configs.append({ 'solver': 'Mistral', 'var': 'DomainOverWDegree', 'val': 'Lex', 'restart': GEOMETRIC, 'base': 10, 'factor': 1.3 }) configs.append({ 'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': GEOMETRIC, 'base': 256, 'factor': 1.5 }) if 'SCIP' in available: configs.append({'solver': 'SCIP'}) configs.append({ 'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': GEOMETRIC, 'base': 512, 'factor': 2 }) configs.append({ 'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': LUBY, 'base': 5000 }) configs.append({ 'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': GEOMETRIC, 'base': 512, 'factor': 1.3 }) configs.append({ 'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': LUBY, 'base': 1000 }) configs.append({ 'solver': 'Mistral', 'var': 'DomainOverWDegree', 'val': 'Lex', 'restart': GEOMETRIC, 'base': 256, 'factor': 1.5 }) configs.append({ 'solver': 'Mistral', 'var': 'DomainOverWLDegree', 'val': 'Lex', 'restart': GEOMETRIC, 'base': 256, 'factor': 1.3 }) configs.reverse() # Reverse the list so we can just pop(). if cores <= 0 or cores > cpu_count(): cores = cpu_count() def start_new(): if not configs: return # Could launch Mistral with different seeds if we run out of provided configs config = configs.pop() remaining_time = int(timeout - total_seconds(datetime.datetime.now() - start_time) - solver_buffer_time) if config[ 'solver'] == "Mistral": # Mistral's timing seems to consistently be longer than the specified timeout. remaining_time = max(remaining_time - 1, 1) defaults = { 'njfilename': njfilename, 'threads': 1, 'tcutoff': remaining_time, 'var': 'DomainOverWDegree', 'val': 'Lex', 'verbose': 0, 'restart': GEOMETRIC, 'base': 256, 'factor': 1.3, 'lcLevel': 4, 'lds': 0, 'dee': 1, 'btd': 0, 'rds': 0, 'dichotomic': 0, 'dichtcutoff': 10, 'varElimOrder': 0 } d = dict(defaults.items() + config.items()) cmd = ( "python %(njfilename)s -solver %(solver)s -tcutoff %(tcutoff)d " "-threads %(threads)d -var %(var)s -val %(val)s " "-restart %(restart)d -base %(base)d -factor %(factor).1f " "-verbose %(verbose)d -lds %(lds)d -btd %(btd)d -rds %(rds)d " "-dee %(dee)d -lcLevel %(lcLevel)d -varElimOrder %(varElimOrder)d " "-dichotomic %(dichotomic)d -dichtcutoff %(dichtcutoff)d" % d) args = (str(config), datetime.datetime.now(), pid_queue, result_queue, cmd, int(memlimit / cores)) thread = threading.Thread(target=run_cmd, args=args) threads.append(thread) thread.start() print "% Launching:", cmd def tidy_up(*args): num_pids_seen = 0 if pid_queue.empty(): return while num_pids_seen < len(threads): try: pid = pid_queue.get() num_pids_seen += 1 os.killpg(pid, signal.SIGKILL) except Empty: pass except OSError: pass # Process already finished. except IOError: break # If manager process for pid_queue has been killed if pid_queue.empty(): break # Set handlers for term and interupt signals signal.signal(signal.SIGTERM, tidy_up) signal.signal(signal.SIGINT, tidy_up) # Initially start 'cores' number of subprocesses. for i in xrange(cores): start_new() objective_type = check_optimization(njfilename) num_finished = 0 finished_names = [] results = [] found_sol = False should_continue = True while should_continue: if total_seconds(datetime.datetime.now() - start_time) + 2 * result_poll_timeout >= timeout: should_continue = False try: success, exitcode, process_name, solversstartt, stdout, stderr = \ result_queue.get(True, result_poll_timeout) num_finished += 1 finished_names.append(process_name) if success: started_after = total_seconds(solversstartt - start_time) timetaken = total_seconds(datetime.datetime.now() - solversstartt) res = SolverResult(stdout, objective_type) found_sol = True print "%% Solver %s started after %.1f, finished %.1f. objective: %d" \ % (process_name, started_after, timetaken, res.objective * objective_type) if not objective_type: print stdout break else: results.append(res) if res.opt: break # If not optimal, wait for further result to come in until timeout almost exceeded. else: print "%% Failed: %s exitcode: %d" % (process_name, exitcode) print_commented_fzn(stdout) print_commented_fzn(stderr) start_new() if num_finished == len(threads): break except Empty: pass # Nothing new posted to the result_queue yet. except EOFError: break except IOError: break # Can happen if sent term signal. except KeyboardInterrupt: break if results: print min(results).stdout # Print the best solution if not found_sol: print "=====UNKNOWN=====" tidy_up() print "%% Total time in njportfolio: %.1f" % total_seconds( datetime.datetime.now() - start_time) # Join each thread, otherwise one could try queue.put() after we exit for t in threads: t.join()
def njportfolio(njfilename, cores, timeout, memlimit): from Numberjack import available_solvers from multiprocessing import Queue, cpu_count from Queue import Empty start_time = datetime.datetime.now() result_queue = Queue() pid_queue = Queue() available = available_solvers() threads = [] configs = [] configs.append({'solver': 'Mistral', 'var': 'DomainOverWDegree', 'val': 'Lex', 'restart': GEOMETRIC, 'base': 256, 'factor': 1.3}) if 'CPLEX' in available: configs.append({'solver': 'CPLEX'}) elif 'Gurobi' in available: configs.append({'solver': 'Gurobi'}) if 'Toulbar2' in available: configs.append({'solver': 'Toulbar2', 'lds': 1}) configs.append({'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': LUBY, 'base': 10000}) # configs.append({'solver': 'Toulbar2', 'btd': 3, 'lcLevel': 1, 'rds': 1}) configs.append({'solver': 'Mistral', 'dichotomic': 1, 'dichtcutoff': 10, 'base': 10, 'restart': GEOMETRIC, 'base': 256, 'factor': 1.3}) configs.append({'solver': 'MiniSat'}) configs.append({'solver': 'Mistral', 'var': 'DomainOverWDegree', 'val': 'Lex', 'restart': GEOMETRIC, 'base': 10, 'factor': 1.3}) configs.append({'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': GEOMETRIC, 'base': 256, 'factor': 1.5}) if 'SCIP' in available: configs.append({'solver': 'SCIP'}) configs.append({'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': GEOMETRIC, 'base': 512, 'factor': 2}) configs.append({'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': LUBY, 'base': 5000}) configs.append({'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': GEOMETRIC, 'base': 512, 'factor': 1.3}) configs.append({'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': LUBY, 'base': 1000}) configs.append({'solver': 'Mistral', 'var': 'DomainOverWDegree', 'val': 'Lex', 'restart': GEOMETRIC, 'base': 256, 'factor': 1.5}) configs.append({'solver': 'Mistral', 'var': 'DomainOverWLDegree', 'val': 'Lex', 'restart': GEOMETRIC, 'base': 256, 'factor': 1.3}) configs.reverse() # Reverse the list so we can just pop(). if cores <= 0 or cores > cpu_count(): cores = cpu_count() def start_new(): if not configs: return # Could launch Mistral with different seeds if we run out of provided configs config = configs.pop() remaining_time = int(timeout - total_seconds(datetime.datetime.now() - start_time) - solver_buffer_time) if config['solver'] == "Mistral": # Mistral's timing seems to consistently be longer than the specified timeout. remaining_time = max(remaining_time - 1, 1) defaults = {'njfilename': njfilename, 'threads': 1, 'tcutoff': remaining_time, 'var': 'DomainOverWDegree', 'val': 'Lex', 'verbose': 0, 'restart': GEOMETRIC, 'base': 256, 'factor': 1.3, 'lcLevel': 4, 'lds': 0, 'dee': 1, 'btd': 0, 'rds': 0, 'dichotomic': 0, 'dichtcutoff': 10} d = dict(defaults.items() + config.items()) cmd = ("python %(njfilename)s -solver %(solver)s -tcutoff %(tcutoff)d " "-threads %(threads)d -var %(var)s -val %(val)s " "-restart %(restart)d -base %(base)d -factor %(factor).1f " "-verbose %(verbose)d -lds %(lds)d -btd %(btd)d -rds %(rds)d " "-dee %(dee)d -lcLevel %(lcLevel)d " "-dichotomic %(dichotomic)d -dichtcutoff %(dichtcutoff)d" % d) args = (str(config), datetime.datetime.now(), pid_queue, result_queue, cmd, int(memlimit / cores)) thread = threading.Thread(target=run_cmd, args=args) threads.append(thread) thread.start() print "% Launching:", cmd def tidy_up(*args): num_pids_seen = 0 if pid_queue.empty(): return while num_pids_seen < len(threads): try: pid = pid_queue.get() num_pids_seen += 1 os.killpg(pid, signal.SIGKILL) except Empty: pass except OSError: pass # Process already finished. except IOError: break # If manager process for pid_queue has been killed if pid_queue.empty(): break # Set handlers for term and interupt signals signal.signal(signal.SIGTERM, tidy_up) signal.signal(signal.SIGINT, tidy_up) # Initially start 'cores' number of subprocesses. for i in xrange(cores): start_new() objective_type = check_optimization(njfilename) num_finished = 0 finished_names = [] results = [] found_sol = False should_continue = True while should_continue: if total_seconds(datetime.datetime.now() - start_time) + 2 * result_poll_timeout >= timeout: should_continue = False try: success, exitcode, process_name, solversstartt, stdout, stderr = \ result_queue.get(True, result_poll_timeout) num_finished += 1 finished_names.append(process_name) if success: started_after = total_seconds(solversstartt - start_time) timetaken = total_seconds(datetime.datetime.now() - solversstartt) res = SolverResult(stdout, objective_type) found_sol = True print "%% Solver %s started after %.1f, finished %.1f. objective: %d" \ % (process_name, started_after, timetaken, res.objective * objective_type) if not objective_type: print stdout break else: results.append(res) if res.opt: break # If not optimal, wait for further result to come in until timeout almost exceeded. else: print "%% Failed: %s exitcode: %d" % (process_name, exitcode) print_commented_fzn(stdout) print_commented_fzn(stderr) start_new() if num_finished == len(threads): break except Empty: pass # Nothing new posted to the result_queue yet. except EOFError: break except IOError: break # Can happen if sent term signal. except KeyboardInterrupt: break if results: print min(results).stdout # Print the best solution if not found_sol: print "=====UNKNOWN=====" tidy_up() print "%% Total time in njportfolio: %.1f" % total_seconds(datetime.datetime.now() - start_time) # Join each thread, otherwise one could try queue.put() after we exit for t in threads: t.join()