def __init__(self, problemName, model=None, resultDataModel=None, credentials=None, *attachments): ''' Constructs an Optimizer instance. The instance requires an optimization model as a parameter. You can also provide one or more data files as attachments, either in OPL .dat or in JSON format. This data does not change from solve to solve. If you have input data that does change, you can provide it to the solve method as an OPLCollector object. :param problemName: name of this optimization problem instance :type problemName: String :param model: an optimization model written in OPL :type model: Model.Source object or String :param resultDataModel: the application data model for the results of the optimization :type resultDataModel: dict<String, StructType> :param credentials: DOcplexcloud url and api key :type credentials: {"url":String, "key":String} :param attachments: URLs for files representing the data that does not vary from solve to solve :type attachments: list<URL> ''' self.name = problemName self.model = model self.resultDataModel = resultDataModel self.attachData(attachments) self.streamsRegistry = [] self.history = [] self.credentials = credentials self.jobclient = JobClient(credentials["url"], credentials["key"]); self.solveStatus = JobSolveStatus.UNKNOWN;
def connectServer(self): #doCloud if self.doCloudUrl != None: if import_error: self.Error(import_error) if sys.platform == "win32": os.environ['REQUESTS_CA_BUNDLE'] = os.path.join(os.path.dirname(sys.executable), "GMSPython", "Lib", "site-packages", "certifi", "cacert.pem") self.doCloudClient = JobClient(self.doCloudUrl, self.doCloudKey) #neos else: if self.logopt in [1,3,4]: sys.stdout.write("Connecting to: %s://%s:%s\n" % (self.serverProtocol,self.serverHost,self.serverPort)) if self.logopt in [2,4]: # Append the message to the logfile indicated try: f = open(kestrel.logfilename,'a') f.write("Connecting to: %s://%s:%s\n" % (self.serverProtocol,self.serverHost,self.serverPort)) f.close() except IOError as e: self.Fatal("Could not append to log file %s" % self.logfilename) self.neos = xmlrpc.client.Server("%s://%s:%s" % (self.serverProtocol,self.serverHost,self.serverPort)) reply = self.neos.ping() if reply.find('alive') < 0: raise KestrelException("Unable to contact NEOS at https://%s:%d" % \ (self.host, self.port))
def run_model(): url = 'https://api-oaas.docloud.ibmcloud.com/job_manager/rest/v1/' key = 'api_fc2d8028-3ebc-4a3a-8664-b4018b1c05a8' client = JobClient(url=url, api_key=key) resp = client.execute(input=[ 'workshop_model.py', '../input/best_submission.csv', '../input/family_data.csv' ], output='solution.json', load_solution=True, log='logs.txt')
def __init__(self, url, api_key, concurrent_jobs): """ Creates a new controller to submit job asynchronously. Args: url: The DOcloud url. api_key: The DOcloud api key. concurrent_jobs: The number of concurrently submitted jobs. """ self.nb_threads = concurrent_jobs self.client = JobClient(url, api_key) # This is the opl model file self.mod_file = "models/truck.mod" # The executor self.executor = ThreadPoolExecutor(self.nb_threads)
class ControllerMultiJob: """ This controller submit single job asynchronously. """ def __init__(self, url, api_key, concurrent_jobs): """ Creates a new controller to submit job asynchronously. Args: url: The DOcloud url. api_key: The DOcloud api key. concurrent_jobs: The number of concurrently submitted jobs. """ self.nb_threads = concurrent_jobs self.client = JobClient(url, api_key) # This is the opl model file self.mod_file = "models/truck.mod" # The executor self.executor = ThreadPoolExecutor(self.nb_threads) def shutdown(self): self.executor.shutdown() def submitJob(self, pb, responses, latch): """Asynchronously submits a single job. Once the job has finished running, the latch is decreased. Args: pb: The ``Problem`` to submit and run. responses: A list of responses where the response for this ``pb`` is added. latch: The latch to decrease once the ``Problem`` has been solved. """ def submitAndCountdown(pb, responses, latch): # Encodes the problem using the specified encoder (which extends # json.JSONEncoder) data = json.dumps(pb, cls=ProblemEncoder).encode('utf-8') print("Running %s" % pb.problem_id) resp = self.client.execute( input=[{ 'name': "truck.mod", 'filename': self.mod_file }, { 'name': "truck.json", 'data': data }], gzip=True, load_solution=True, delete_on_completion=True, parameters={'oaas.client.problem.id': pb.problem_id}) responses.append(resp) latch.count_down() self.executor.submit(submitAndCountdown, pb, responses, latch)
class ControllerMultiJob: """ This controller submit single job asynchronously. """ def __init__(self, url, api_key, concurrent_jobs): """ Creates a new controller to submit job asynchronously. Args: url: The DOcloud url. api_key: The DOcloud api key. concurrent_jobs: The number of concurrently submitted jobs. """ self.nb_threads = concurrent_jobs self.client = JobClient(url, api_key) # This is the opl model file self.mod_file = "models/truck.mod" # The executor self.executor = ThreadPoolExecutor(self.nb_threads) def shutdown(self): self.executor.shutdown() def submitJob(self, pb, responses, latch): """Asynchronously submits a single job. Once the job has finished running, the latch is decreased. Args: pb: The ``Problem`` to submit and run. responses: A list of responses where the response for this ``pb`` is added. latch: The latch to decrease once the ``Problem`` has been solved. """ def submitAndCountdown(pb, responses, latch): # Encodes the problem using the specified encoder (which extends # json.JSONEncoder) data = json.dumps(pb, cls=ProblemEncoder).encode('utf-8') print("Running %s" % pb.problem_id) resp = self.client.execute(input=[{'name': "truck.mod", 'filename': self.mod_file}, {'name': "truck.json", 'data': data}], gzip=True, load_solution=True, delete_on_completion=True, parameters={'oaas.client.problem.id': pb.problem_id}) responses.append(resp) latch.count_down() self.executor.submit(submitAndCountdown, pb, responses, latch)
def solving_placement_problem_from_file(topology_graph, request_graph, test_num, CPLEX_PATH, cplex_models_path, results_path, locally): # Reading networkx file G_topology = read_json_file(topology_graph) G_request = read_json_file(request_graph) set_PM = list(G_topology.nodes) set_state_or_nf = list(G_request.nodes) set_state, set_nf, set_replica = [], [], [] for i in set_state_or_nf: if "function" in i: set_nf.append(i) elif "state" in i: set_state.append(i) elif "replica" in i: set_replica.append(i) if not os.path.isfile("{}/p5_cplex_model_{}_2.lp".format( cplex_models_path, test_num)): cplex_f = open('{}/p5_cplex_model_{}_2.lp'.format( cplex_models_path, test_num), mode='a') # TODO: Validating request graph for i in set_state: try: G_request.nodes[i]['size'] except: RuntimeError( "The given request graph is incorrect: State {} has no 'size' value" .format(i)) s = {i: G_request.nodes[i]['size'] for i in set_state + set_replica} c = {i: G_topology.nodes[i]['capacity'] for i in set_PM} print("Generating delay matrix...") d = generating_delay_matrix(G_topology) print("Generating state-function adjacency matrix...") e_r = generating_req_adj(set_state, set_nf + set_replica, G_request) print("Generating Function mapping matrix...") M = generating_nf_mapping_matrix(G_topology) print("Generating Anti-Affinity set") AA = generating_AA(set_state, G_request) print("Generating OR-Link set") OL = generating_OL(set_state, set_nf, set_replica, G_request) # ## Into File ############################################################################################ cplex_f = open('{}/p5_cplex_model_{}_2.lp'.format( cplex_models_path, test_num), mode='a') cplex_f.write("Minimize\n obj: [ ") servers = [i for i in set_PM if "server" in i] server_permutations = list(itertools.permutations(servers, 2)) first = True for i, j in server_permutations: asd = list( itertools.permutations(set_state + set_replica + set_nf, 2)) for u, v in asd: if (e_r[u, v] * d[i, j] * 2 > 0): if first: cplex_f.write( " {} y_({},{})_({},{})*z_({},{})\n".format( e_r[u, v] * d[i, j] * 2, i, u, j, v, u, v)) first = False else: cplex_f.write( " + {} y_({},{})_({},{})*z_({},{})\n".format( e_r[u, v] * d[i, j] * 2, i, u, j, v, u, v)) cplex_f.close() cplex_f = open('{}/p5_cplex_model_{}_2.lp'.format( cplex_models_path, test_num), mode='rb+') cplex_f.seek(-1, os.SEEK_END) cplex_f.truncate() cplex_f = open('{}/p5_cplex_model_{}_2.lp'.format( cplex_models_path, test_num), mode='a') cplex_f.write("]/2 \n") # contraint 1 -------------------------------------------------------------------------------------------- print("Generating mapping constraints") cplex_f.write("\nSubject To \n") for u in set_state + set_replica: c_name = "c1_{}".format(u) cplex_f.write(" {}: ".format(c_name)) for i in set_PM: cplex_f.write(" x_({},{}) +".format(i, u)) cplex_f.close() cplex_f = open('{}/p5_cplex_model_{}_2.lp'.format( cplex_models_path, test_num), mode='rb+') cplex_f.seek(-2, os.SEEK_END) cplex_f.truncate() cplex_f = open('{}/p5_cplex_model_{}_2.lp'.format( cplex_models_path, test_num), mode='a') cplex_f.write(" = 1\n") # contraint 2 -------------------------------------------------------------------------------------------- print("Generating capacity constraints") for i in set_PM: c_name = "c2_{}".format(i) cplex_f.write(" {}: ".format(c_name)) for u in set_state + set_replica: cplex_f.write("{} x_({},{}) +".format(s[u], i, u)) cplex_f.close() cplex_f = open('{}/p5_cplex_model_{}_2.lp'.format( cplex_models_path, test_num), mode='rb+') cplex_f.seek(-2, os.SEEK_END) cplex_f.truncate() cplex_f = open('{}/p5_cplex_model_{}_2.lp'.format( cplex_models_path, test_num), mode='a') cplex_f.write(" <= {}\n".format(c[i])) # contraint 3 -------------------------------------------------------------------------------------------- print("Generating AA constraints") for i in set_PM: if "server" in i: for u, v in AA: c_name = "c3_{}_{}_in_{}".format(u, v, i) cplex_f.write(" {}: ".format(c_name)) cplex_f.write(" x_({},{}) + x_({}, {}) <= 1\n".format( i, u, i, v)) # contraint 4 -------------------------------------------------------------------------------------------- print("Generating NF mapping constraints") for function in set_nf: for server in set_PM: c_name = "c4_{}_in_{}".format(function, server) try: if M[function] == server: cplex_f.write(" {}: ".format(c_name)) cplex_f.write(" x_({},{}) = 1\n".format( server, function)) else: cplex_f.write(" {}: ".format(c_name)) cplex_f.write(" x_({},{}) = 0\n".format( server, function)) except: cplex_f.write(" {}: ".format(c_name)) cplex_f.write(" x_({},{}) = 0\n".format(server, function)) # contraint 5 -------------------------------------------------------------------------------------------- def for_multiprocessing(list, from_, to_, test_num, process_id, cplex_models_path): print("Starting process {}, from: {}, to:{}".format( process_id, from_, to_)) c5_f = open('{}/c5_testnum{}_tmp{}_2.txt'.format( cplex_models_path, test_num, process_id), mode='a') start = from_ for i, j in list[from_:to_]: if (start % 10000) == 0: print("{}: {}".format(process_id, start)) c_name = "c5_({},{})_({},{})_0".format(i[0], i[1], j[0], j[1]) c5_f.write(" {}: ".format(c_name)) c5_f.write(" y_({},{})_({},{}) >= 0 \n".format( i[0], i[1], j[0], j[1])) c_name = "c5_({},{})_({},{})_1".format(i[0], i[1], j[0], j[1]) c5_f.write(" {}: ".format(c_name)) c5_f.write( " y_({},{})_({},{}) - x_({},{}) - x_({},{}) >= -1 \n". format(i[0], i[1], j[0], j[1], i[0], i[1], j[0], j[1])) start += 1 c5_f.close() print("Ending process {}".format(process_id)) print(datetime.datetime.now()) print("Generating QP -> ILP transformation constraints") index_set = set() for i in set_PM: for u in set_state + set_replica + set_nf: index_set.add((i, u)) index_combinations = list(itertools.permutations(index_set, 2)) index_combinations_size = len(index_combinations) print("Size of this contraints: {}".format(index_combinations_size)) from_to_list = [] core_num = 10 core_job_count = len(index_combinations) / 10 for i in range(core_num): from_ = i * core_job_count to_ = (i + 1) * core_job_count if i == core_num - 1: to_ = index_combinations_size from_to_list.append((from_, to_)) import multiprocessing processes = [] for i in range(0, core_num): p = multiprocessing.Process( target=for_multiprocessing, args=(index_combinations, from_to_list[i][0], from_to_list[i][1], test_num, i, cplex_models_path)) processes.append(p) p.start() for process in processes: process.join() tempfiles = [ "{}/c5_testnum{}_tmp{}_2.txt".format(cplex_models_path, test_num, i) for i in range(core_num) ] for tempfile in tempfiles: #print("Adding file {}".format(tempfile)) tmp_f = open(tempfile, "r") cplex_f.write(tmp_f.read()) for tempfile in tempfiles: os.remove(tempfile) # contraint 6 -------------------------------------------------------------------------------------------- print("Generating 'Does it matter?' constraints") for u in (set_state + set_nf + set_replica): for v in (set_state + set_nf + set_replica): if u != v: if is_OL(u, v, OL): c_name = "c6_({},{})_0".format(u, v) cplex_f.write("\n {}: ".format(c_name)) first = True for i, j in get_OLs(u, v, OL): if first: cplex_f.write(" z_({},{})".format(i, j)) first = False else: cplex_f.write(" + z_({},{})".format(i, j)) cplex_f.write(" = 1 \n") else: c_name = "c6_({},{})_1".format(u, v) if "function" in u and "replica" in v: cplex_f.write("\n {}: ".format(c_name)) cplex_f.write(" z_({},{}) = 0".format(u, v)) elif "replica" in u and "state" in v: cplex_f.write("\n {}: ".format(c_name)) cplex_f.write(" z_({},{}) = 0".format(u, v)) else: cplex_f.write("\n {}: ".format(c_name)) cplex_f.write(" z_({},{}) = 1".format(u, v)) # Bounds -------------------------------------------------------------------------------------------- cplex_f.write("\nBounds\n") for i in set_PM: for u in set_state + set_replica + set_nf: cplex_f.write("0 <= x_({},{}) <= 1\n".format(i, u)) index_set = set() for i in set_PM: for u in set_state + set_replica + set_nf: index_set.add((i, u)) index_permutations = list(itertools.permutations(index_set, 2)) for i, j in index_permutations: cplex_f.write("0 <= y_({},{})_({},{}) <= 1\n".format( i[0], i[1], j[0], j[1])) index_permutations = list( itertools.permutations((set_state + set_nf + set_replica), 2)) for u, v in index_permutations: cplex_f.write("0 <= z_({},{}) <= 1\n".format(u, v)) # Binaries -------------------------------------------------------------------------------------------- cplex_f.write("\nBinaries\n") for i in set_PM: for u in set_state + set_replica + set_nf: cplex_f.write(" x_({},{})\n".format(i, u)) index_set = set() for i in set_PM: for u in set_state + set_replica + set_nf: index_set.add((i, u)) index_permutations = list(itertools.permutations(index_set, 2)) for i, j in index_permutations: cplex_f.write(" y_({},{})_({},{})\n".format( i[0], i[1], j[0], j[1])) index_permutations = list( itertools.permutations((set_state + set_nf + set_replica), 2)) for u, v in index_permutations: cplex_f.write(" z_({},{})\n".format(u, v)) time.sleep(2) cplex_f.write("End\n") cplex_f.close() ######################################################################################################## if not os.path.isfile(CPLEX_PATH): raise RuntimeError('CPLEX does not exist ({})'.format(CPLEX_PATH)) if not os.path.isfile("{}/p5_cplex_model_{}_2.sav".format( cplex_models_path, test_num)): subprocess.call( "{} -c 'read {}/p5_cplex_model_{}_2.lp' 'write {}/p5_cplex_model_{}_2.sav sav'" .format(CPLEX_PATH, cplex_models_path, test_num, cplex_models_path, test_num), shell=True) t1 = datetime.datetime.now() cost = 0 mapping_result = {i: "" for i in set_state + set_nf + set_replica} if locally: # solving problem in locally print("\n\nSolving the problem locally - 2") subprocess.call( "{} -c 'read {}/p5_cplex_model_{}_2.mps' 'optimize' 'write {}/p5_cplex_result_{}_2 sol'" .format(CPLEX_PATH, cplex_models_path, test_num, results_path, test_num), shell=True) else: print("\n\nSolving the problem remotely in the IBM cloud - 2") if not os.path.isfile("{}/p5_cplex_result_{}_2".format( results_path, test_num)): client = JobClient( "https://api-oaas.docloud.ibmcloud.com/job_manager/rest/v1/", "api_e7f3ec88-92fd-4432-84d7-f708c4a33132") print( "You can check the status of the problem procesing here: https://dropsolve-oaas.docloud.ibmcloud.com/dropsolve" ) resp = client.execute(input=[ "{}/p5_cplex_model_{}_2.sav".format(cplex_models_path, test_num) ], output="{}/p5_cplex_result_{}_2".format( results_path, test_num)) if resp.job_info["solveStatus"] == "INFEASIBLE_SOLUTION": print("There is no valid mapping!") return 0 def is_json_file(report_input_file): with open(report_input_file) as unknown_file: c = unknown_file.read(1) if c != '<': return True return False t2 = datetime.datetime.now() if is_json_file("{}/p5_cplex_result_{}_2".format(results_path, test_num)): with open("{}/p5_cplex_result_{}_2".format(results_path, test_num)) as f: result = json.load(f) for i in result["CPLEXSolution"]["variables"]: if ("x_" in list(i["name"])) and i["value"] == str(1): # print("{} = 1".format(i["name"])) server = i["name"].split(',')[0][3:] ve = i["name"].split(',')[1][:-1] mapping_result[ve] = server print("*** Delay cost: {} ***".format( result["CPLEXSolution"]["header"]["objectiveValue"])) cost = result["CPLEXSolution"]["header"]["objectiveValue"] else: with open("{}/p5_cplex_result_{}_2".format(results_path, test_num), 'r') as file: xml_result = file.read().replace('\n', '') result = xmltodict.parse(xml_result) print("*** Delay cost: {} ***".format( result["CPLEXSolution"]["header"]["@objectiveValue"])) cost = result["CPLEXSolution"]["header"]["@objectiveValue"] running_time = t2 - t1 print("RUNNING TIME: {}".format(running_time)) return cost, mapping_result, running_time
solver_manager = SolverManagerFactory('neos') opt = SolverFactory('cplex', solver_io='lp') results = solver_manager.solve(model, opt=opt) model.solutions.store_to(results) print(results) # >>>> 3. docplexcloud # key = #DOCPLEX API KEY (get from docloud website) # base_url = #DOCPLEX URL (get from docloud website) # Dependencies : docloud (pip install docloud) import json import glob import pandas from docloud.job import JobClient client = JobClient(base_url, key) model.write("temp.lp", io_options={"symbolic_solver_labels": True}) # with open("temp.lp") as lpfile: # resp = client.execute(input=lpfile,output=None,load_solution = True) file = glob.glob("temp.lp") resp = client.execute(input=file, output=None, load_solution=True) solution = json.loads(resp.solution.decode("utf-8")) #os.remove(file) for i, k in solution['CPLEXSolution']['header'].items(): print(i, ':', k) results = pandas.DataFrame(solution['CPLEXSolution']['variables']).filter( items=['index', 'name', 'value', 'status']) print(results) # >>>> 4. GUROBI
from docloud.job import JobClient if __name__ == '__main__': url = "Paste your base URL" api_key = "Paste your api key" client = JobClient(url, api_key) resp = client.execute(input=["models/truck.dat", "models/truck.mod"], output="results.json")
def submit_model_data(self, attachments=None, gzip=False, info_callback=None, info_to_monitor=None): """Submits a job to the cloud service. Args: attachments: A list of attachments. Each attachement is a dict with the following keys: - 'name' : the name of the attachment - 'data' : the data for the attachment gzip: If ``True``, data is gzipped before sent over the network info_callback: A call back to be called when some info are available. That callback takes one parameter that is a dict containing the info as they are available. info_to_monitor: A set of information to monitor with info_callback. Currently, can be ``jobid`` and ``progress``. """ self.__vars = None self.timed_out = False self.results.clear() if not info_to_monitor: info_to_monitor = {} # check that url is valid parts = urlparse(self.docloud_context.url) if not parts.scheme: raise DOcloudConnectorException( "Malformed URL: '%s': No schema supplied." % self.docloud_context.url) proxies = self.docloud_context.proxies try: client = JobClient(self.docloud_context.url, self.docloud_context.key, proxies=proxies) except TypeError: # docloud client <= 1.0.172 do not have the proxes warnings.warn( "Using a docloud client that do not support warnings in init()", UserWarning) client = JobClient(self.docloud_context.url, self.docloud_context.key) self.log("client created") if proxies: self.log("proxies = %s" % proxies) # prepare client if self.docloud_context.log_requests: client.rest_callback = \ lambda m, u, *a, **kw: self._rest_callback(m, u, *a, **kw) client.verify = self.docloud_context.verify client.timeout = self.docloud_context.get('timeout', None) try: try: # Extract the list of attachment names att_names = [a['name'] for a in attachments] # create job jobid = client.create_job( attachments=att_names, parameters=self.docloud_context.job_parameters) self.log("job creation submitted, id is: {0!s}".format(jobid)) if info_callback and 'jobid' in info_to_monitor: info_callback({'jobid': jobid}) except ConnectionError as c_e: raise DOcloudConnectorException( "Cannot connect to {0}, error: {1}".format( self.docloud_context.url, str(c_e))) try: # now upload data for a in attachments: pos = 0 if 'data' in a: att_data = {'data': a['data']} elif 'file' in a: att_data = {'file': a['file']} pos = a['file'].tell() elif 'filename' in a: att_data = {'filename': a['filename']} client.upload_job_attachment(jobid, attid=a['name'], **att_data) self.log("Attachment: %s has been uploaded" % a['name']) if self.docloud_context.debug_dump_dir: target_dir = self.docloud_context.debug_dump_dir if not os.path.exists(target_dir): os.makedirs(target_dir) self.log("Dumping input attachment %s to dir %s" % (a['name'], target_dir)) with open(os.path.join(target_dir, a['name']), "wb") as f: if 'data' in 'a': if isinstance(a['data'], bytes): f.write(a['data']) else: f.write(a['data'].encode('utf-8')) else: a['file'].seek(pos) f.write(a['file']) # execute job client.execute_job(jobid) self.log("DOcplexcloud execute submitted has been started") # get job execution status until it's processed or failed timedout = False try: self._executionStatus = self.wait_for_completion( client, jobid, info_callback=info_callback, info_to_monitor=info_to_monitor) except DOcloudInterruptedException: timedout = True self.log("docloud execution has finished") # get job status. Do this before any time out handling self.jobInfo = client.get_job(jobid) if self.docloud_context.fire_last_progress and info_callback: progress_data = self.map_job_info_to_progress_data( self.jobInfo) info_callback({'progress': progress_data}) if timedout: self.timed_out = True self.log("Solve timed out after {waittime} sec".format( waittime=self.docloud_context.waittime)) return # get solution => download all attachments try: for a in client.get_job_attachments(jobid): if a['type'] == 'OUTPUT_ATTACHMENT': name = a['name'] self.log("Downloading attachment '%s'" % name) attachment_as_string = self._as_string( client.download_job_attachment(jobid, attid=name)) self.results[name] = attachment_as_string if self.docloud_context.debug_dump_dir: target_dir = self.docloud_context.debug_dump_dir if not os.path.exists(target_dir): os.makedirs(target_dir) self.log("Dumping attachment %s to dir %s" % (name, target_dir)) with open(os.path.join(target_dir, name), "wb") as f: f.write( attachment_as_string.encode('utf-8')) except DOcloudNotFoundError: self.log("no solution in attachment") self.log("docloud results have been received") # on_solve_finished_cb if self.docloud_context.on_solve_finished_cb: self.docloud_context.on_solve_finished_cb(jobid=jobid, client=client, connector=self) return finally: if self.docloud_context.delete_job: deleted = client.delete_job(jobid) self.log("delete status for job: {0!s} = {1!s}".format( jobid, deleted)) finally: client.close()
def run(choice, threshold, confidence_interval, facility_number, min_threshold): """ This solves the corresponding optimization problem to cplex cloud. It takes 5 parameters: i) choice : An integer , (1-5) which represents the optimization model choice of the user ii) threshold(m) : An integer , which represents the maximum possible distance between two districts which are counted as appropriate_pairs , meter iii) confidence_interval : An integer , (0-100) which represents the confidence level of the stochastic model iv) facility_number : An integer , (0 - NUMBER_OF_DISTRICT) which represents the maximum fire station number for maximum coverage models v) min_threshold(min) : An integer , which represents the maximum traveling time between two districts in terms of minutes. It returns nothing but it creates a .txt file that contains the solution of the corresponding optimization problem. """ #Create a clinet client = JobClient(CPLEX_BASE_URL, CPLEX_API_KEY) # Choice == 1 , base model if choice == 1: resp = client.execute(input=[ "Mod_Files/BaseModel.mod", "Mod_Files/BaseModel_" + str(threshold) + ".dat" ], output="Solutions/BaseModel_Sol_" + str(threshold) + ".txt") return "Solutions/BaseModel_Sol_" + str(threshold) + ".txt" # Choice == 2 , max_coverage model if choice == 2: resp = client.execute(input=[ "Mod_Files/MultiCoverage.mod", "Mod_Files/MultiCoverage_" + str(threshold) + ".dat" ], output="Solutions/MultiCoverage_Sol_" + str(threshold) + ".txt") return "Solutions/MultiCoverage_Sol_" + str(threshold) + ".txt" # Choice == 3 , max coverage if choice == 3: resp = client.execute(input=[ "Mod_Files/MaxCoverage.mod", "Mod_Files/MaxCoverage_" + str(threshold) + "_" + str(facility_number) + ".dat" ], output="Solutions/MaxCoverage_Sol_" + str(threshold) + "_" + str(facility_number) + ".txt") return "Solutions/MaxCoverage_Sol_" + str(threshold) + "_" + str( facility_number) + ".txt" # Choice == 4 , stochastic_coverage if choice == 4: resp = client.execute(input=[ "Mod_Files/StochasticCoverage.mod", "Mod_Files/Stochastic_Coverage_" + str(min_threshold) + "_" + str(confidence_interval) + ".dat" ], output="Solutions/Stochastic_Coverage_Sol_" + str(min_threshold) + "_" + str(confidence_interval) + ".txt") return "Solutions/Stochastic_Coverage_Sol_" + str( min_threshold) + "_" + str(confidence_interval) + ".txt" # Choice == 5 , stochastic max coverage if choice == 5: resp = client.execute(input=[ "Mod_Files/MaxCoverage.mod", "Mod_Files/Stochastic_MaxCoverage_" + str(min_threshold) + "_" + str(facility_number) + "_" + str(confidence_interval) + ".dat" ], output="Solutions/Stochastic_MaxCoverage_Sol_" + str(min_threshold) + "_" + str(facility_number) + "_" + str(confidence_interval) + ".txt") return "Solutions/Stochastic_MaxCoverage_Sol_" + str( min_threshold) + "_" + str(facility_number) + "_" + str( confidence_interval) + ".txt"
* cplex_config.py * cplex_config_<hostname>.py * docloud_config.py (must only contain context.solver.docloud configuration) These files contain the credentials and other properties. For example, something similar to:: context.solver.docloud.url = 'https://docloud.service.com/job_manager/rest/v1' context.solver.docloud.key = 'example api_key' ''' url = 'https://api-oaas.docloud.ibmcloud.com/job_manager/rest/v1' key = 'example api_key' # if url is None or key is None: # create a default context and use credentials defined in there. # context = Context.make_default_context() # url = context.solver.docloud.url # key = context.solver.docloud.key client = JobClient(url=url, api_key=key) resp = client.execute(input=[ 'NASL_MP.py', 'data/teams.csv', 'data/constraint_detail.csv', 'data/dates.csv', 'data/distances.json' ], output='solution.json', waittime=60, load_solution=True, log='logs.txt') # In[ ]:
the following files: * cplex_config.py * cplex_config_<hostname>.py * docloud_config.py (must only contain context.solver.docloud configuration) These files contain the credentials and other properties. For example, something similar to:: context.solver.docloud.url = 'https://docloud.service.com/job_manager/rest/v1' context.solver.docloud.key = 'example api_key' ''' url = None key = None if url is None or key is None: # create a default context and use credentials defined in there. context = Context.make_default_context() url = context.solver.docloud.url key = context.solver.docloud.key client = JobClient(url=url, api_key=key) resp = client.execute(input=[ 'diet_pandas.py', 'diet_food.csv', 'diet_nutrients.csv', 'diet_food_nutrients.csv' ], output='solution.json', load_solution=True, log='logs.txt')
from docloud.job import JobClient if __name__ == '__main__': url = "Paste your base URL" api_key = "Paste your api key" client = JobClient(url, api_key) with open("models/truck.mod", "rb") as modFile: resp = client.execute(input=[{ "name": "truck.mod", "file": modFile }, "models/truck.dat"], output="results.json", log="solver.log", gzip=True, waittime=300, delete_on_completion=True)
class KestrelGamsClient: def __init__(self,argv): self.time1 = time.time() self.argv=argv self.serverProtocol="https" self.serverHost="neos-server.org" self.doCloudUrl=None self.doCloudKey=None self.doCloudClient=None self.doCloudPrmFile=None self.serverPort=3333 self.solverName=None self.jobNumber=None self.password=None self.priority="long" self.socket_timeout=0 self.authUsername=None self.authUserPassword=None self.gmo = None self.gev = None # action-parameter is outdated ''' if len(self.argv) >= 3: self.cntrfile = self.argv[2] self.action = self.argv[1].lower() if self.action not in ['kill','retrieve','submit','solve']: self.Usage() else: self.Usage() ''' if len(self.argv) >= 2: self.cntrfile = self.argv[1] self.action = 'solve' else: self.Usage() def Usage(self): sys.stderr.write("\n--- Kestrel fatal error: usage\n") sys.stderr.write(" gamske_ux.out <cntrfile>\n") sys.exit(1) def Fatal(self, str): sys.stderr.write("\n--- Kestrel fatal error: %s\n\n" % str) sys.exit(1) def Error(self, str): if self.logopt in [1,3,4]: # Write the message to standard output sys.stdout.write("\n--- Kestrel error: %s\n\n" % str) if self.logopt in [2,4]: # Append the error message to the logfile indicated try: f = open(self.logfilename,'a') f.write("\n--- Kestrel error: %s\n\n" % str) f.close() except IOError as e: self.Fatal("Could not append to log file %s" % self.logfilename) try: f = open(self.statfilename,'a') f.write("=1\n\n--- Kestrel error: %s\n\n=2\n" % str) f.close() except IOError as e: self.Fatal("Could not append to status file %s\n" % self.statfilename) sys.exit(0) def getDefaultEmail(self): if 'NEOS_EMAIL' in os.environ: return os.environ['NEOS_EMAIL'] return None def parseControlFile(self): """ This function does the following with the cntr file line 13: extract isAscii, useOptions line 18: matrix file, save and change to gamsmatr.scr line 19: instruction file; save and change to gamsinst.scr line 20: set options file to 'kestrel.opt' line 21: status file; save and change to gamsstat.scr line 22: solution file; save and change to gamssolu.scr line 23: log file; save and remove absolute path line 24: dictionary file; save and change to gamsdict.scr line 25: set to '2' to write to log file line 28-30: set working,system,scratch directories to '.' line 33,34,35: remove license line 37: set parameter file line 38: read #models #solvers ignore next #models + 2*#solvers with (SOLVER # # 0 ..) + 3*#solvers with (SOLVER # # 1 ...) lines next two lines are more license (remove them) set directories of remaining paths to current directory (.scr, .so, sbbinfo.) change the scratch file extension to 'scr' """ try: f = open(self.cntrfile,'r') lines = f.readlines() f.close() except IOError as e: self.Fatal("Could not open control file %s" % self.cntrfile) # extract control version number self.cntver = 0 m = re.match(r'(\d+)',lines[0]) if m and m.groups(): self.cntver = int(m.groups()[0]) self.modeltype = int(lines[1].split()[0]) #if self.cntver != 41 and self.cntver != 42: # self.Fatal("GAMS 22.x required") if self.cntver not in [41, 42, 44, 46, 47, 48, 49, 50]: self.Fatal("GAMS cntr-file version 41, 42, 44, 46, 47, 48, 49, 50 required") # extract isAscii, useOptions m = re.match(r'(\d+)\s+(\d+)',lines[12]) if m and m.groups(): self.isAscii=m.groups()[0] self.useOptions = int(m.groups()[1]) else: self.Fatal("Line 13 of the control file is incorrect") # is this is an MPSGE model? self.isMPSGE = int(lines[15].split()[0]) # get the matrix and instruction scratch files and patch self.matrfilename = lines[17].strip() lines[17] = "gamsmatr.scr\n" self.instfilename = lines[18].strip() lines[18] = "gamsinst.scr\n" # patch option file name; always use kestrel.opt self.optfilename = "" m = re.match(r'(.*)kestrel.*\.(.*)',lines[19]) if m and m.groups(): self.optfilename = m.groups()[0] + "kestrel." + m.groups()[1] lines[19] = "kestrel.opt\n" # get the status and solution scratch files and patch self.statfilename = lines[20].strip() lines[20] = "gamsstat.scr\n" self.solufilename = lines[21].strip() lines[21] = "gamssolu.scr\n" # get the log filename and patch self.logfilename = lines[22].strip() lines[22] = "gamslog.scr\n" # get the dictionary filename and patch self.dictfilename = lines[23].strip() lines[23] = "gamsdict.scr\n" # get the logfile option, then make output written to logfile m = re.match(r'(\d+)',lines[24]) if m and m.groups(): self.logopt = int(m.groups()[0]) lines[24]="2\n" # set working, system, and scratch directories self.scrdir = lines[29].strip() lines[27] = lines[28] = lines[29] = '.\n' # remove first part of license lines[32] = lines[33] = lines[34] = "\n" # patch parameter file lines[36] = "gmsprmun.scr" # downgrade the cntr-file version 50 to 49 // No change required since this was in the license section which does not get copied if self.cntver == 50: # 50 -> 49 lines[0] = "49\n" self.cntver = 49 # downgrade the cntr-file version 49 to 48 // No change required since this was in the license section which does not get copied if self.cntver == 49: # 49 -> 48 lines[0] = "48\n" self.cntver = 48 # downgrade the cntr-file version 48 to 47 if self.cntver == 48: # 48 -> 47 lines[0] = "47\n" self.cntver = 47 # remove last two numbers of this line lines[13] = lines[13].rpartition(' ')[0] + "\n" lines[13] = lines[13].rpartition(' ')[0] + "\n" # downgrade the cntr-file version 47 to 46 if self.cntver == 47: # 47 -> 46 lines[0] = "46\n" self.cntver = 46 # remove line with file name lines = lines[:-2] lines.append("") # downgrade the cntr-file version 46 to 42 # no support for threads, external funclib and scensolver if self.cntver == 46: # 46 -> 42 lines[0] = "42\n" # remove last number of this line lines[2] = lines[2].rpartition(' ')[0] + "\n" # remove threads-option lines[13] = lines[13].rpartition(' ')[0] + "\n" # remove last two lines lines = lines[:-2] # treat the cntr-file now like a version 42 one self.cntver = 42 # downgrade the cntr-file version 44 to 42 elif self.cntver == 44: # 44 -> 42 lines[0] = "42\n" # remove threads-option lines[13] = lines[13].rpartition(' ')[0] + "\n" # remove last line lines = lines[:-1] # treat the cntr-file now like a version 42 one self.cntver = 42 # ignore solver section and patch rest based on version number if self.cntver == 41: # remove second part of license lines[-11] = lines[-10] = "\n" # make everything in local directory lines[-9] = 'model.scr\n' lines[-4] = 'model.so\n' lines[-3] = 'sbbinfo.scr\n' lines[-2] = 'gamscntr.scr\n' lines[-1] = './\n' # set scratch file extension self.scrext = "scr" # get the entire control file name self.cntr = "".join(lines[:37]) + "".join(lines[-11:]) elif self.cntver == 42: # remove second part of license lines[-13] = lines[-12] = "\n" # make everything in local directory lines[-11] = 'model.scr\n' lines[-6] = 'model.so\n' lines[-5] = 'sbbinfo.scr\n' lines[-4] = 'gamscntr.scr\n' lines[-3] = './\n' # patch scratch file extension self.scrext = lines[-2].strip() lines[-2] = 'scr\n' # get the entire control file name self.cntr = "".join(lines[:37]) + "".join(lines[-13:]) def writeErrorOutputFiles(self): """ This writes solution and status files returned when an error occurs. """ try: f = open(self.statfilename,"w") f.write("""=0 Kestrel\n""") f.close() except IOError as e: self.Error("Could not initialize status file %s\n" % self.statfilename) try: f = open(self.solufilename,"w") f.write(""" 1 6.0000000000000000E+00 2 1.3000000000000000E+01 3 0.0000000000000000E+00 4 0.0 5 0.0000000000000000E+00 6 0.0 7 0.0 8 0.0 0 0.0\n""") f.close() except IOError as e: self.Error("Could not open solution file %s\n" % self.solufilename) def writeLog(self, text): if self.logopt in [1,3,4]: sys.stdout.write(text) if self.logopt in [2,4]: try: f = open(self.logfilename,'a') f.write(text) f.close() except IOError as e: self.Fatal("Could not append to log file %s" % self.logfilename) def parseOptionsFile(self): if (self.useOptions == 0): # raise KestrelSolverException("No options file indicated\n",self.kestrelGamsSolvers) self.solverName = solverMap[self.modeltype] elif os.access(self.optfilename,os.R_OK): optfile = open(self.optfilename,'r') self.writeLog("Reading parameter(s) from \"" + self.optfilename + "\"\n") for line in optfile: m = re.match(r'neos_user_password[\s=]+(\S+)',line) if m: self.writeLog(">> neos_user_password ******") else: self.writeLog(">> " + line) m = re.match(r'kestrel_priority[\s=]+(\S+)',line) if m: value = m.groups()[0] if value.lower()=="short": self.priority = "short" m = re.match(r'kestrel_solver[\s=]+(\S+)',line) if m: self.solverName = m.groups()[0] m = re.match(r'neos_server[\s=]+(\S+)://(\S+):(\d+)',line) if m: self.serverProtocol = m.groups()[0] self.serverHost = m.groups()[1] self.serverPort = m.groups()[2] m = re.match(r'neos_username[\s=]+(\S+)',line) if m: self.authUsername = m.groups()[0] m = re.match(r'neos_user_password[\s=]+(\S+)',line) if m: self.authUserPassword = m.groups()[0] elif re.match(r'neos_server[\s=]+(\S+)://(\S+)',line): m = re.match(r'neos_server[\s=]+(\S+)://(\S+)',line) self.serverProtocol = m.groups()[0] self.serverHost = m.groups()[1] elif re.match(r'neos_server[\s=]+(\S+):(\d+)',line): m = re.match(r'neos_server[\s=]+(\S+):(\d+)',line) self.serverHost = m.groups()[0] self.serverPort = m.groups()[1] else: m = re.match(r'neos_server[\s=]+(\S+)',line) if m: self.serverHost = m.groups()[0] m = re.match(r'kestrel_(job|jobnumber|jobNumber)[\s=]+(\d+)', line) if m: self.jobNumber=int(m.groups()[1]) m = re.match(r'kestrel_(pass|password)[\s=]+(\S+)', line) if m: self.password = m.groups()[1] m = re.match(r'socket_timeout[\s=]+(\d+)',line) if m: self.socket_timeout = m.groups()[0] socket.setdefaulttimeout(float(self.socket_timeout)) # options for doCloud m = re.match(r'docloud_url[\s=]+(\S+)',line) if m: self.doCloudUrl = m.groups()[0] m = re.match(r'docloud_key[\s=]+(\S+)',line) if m: self.doCloudKey = m.groups()[0] m = re.match(r'docloud_prmfile[\s=]+(\S+)',line) if m: self.doCloudPrmFile = m.groups()[0] optfile.close() self.writeLog("\nFinished reading from \"" + self.optfilename + "\"\n") else: raise KestrelSolverException("Could not read options file %s\n" % self.optfilename,self.kestrelGamsSolvers) def connectServer(self): #doCloud if self.doCloudUrl != None: if import_error: self.Error(import_error) if sys.platform == "win32": os.environ['REQUESTS_CA_BUNDLE'] = os.path.join(os.path.dirname(sys.executable), "GMSPython", "Lib", "site-packages", "certifi", "cacert.pem") self.doCloudClient = JobClient(self.doCloudUrl, self.doCloudKey) #neos else: if self.logopt in [1,3,4]: sys.stdout.write("Connecting to: %s://%s:%s\n" % (self.serverProtocol,self.serverHost,self.serverPort)) if self.logopt in [2,4]: # Append the message to the logfile indicated try: f = open(kestrel.logfilename,'a') f.write("Connecting to: %s://%s:%s\n" % (self.serverProtocol,self.serverHost,self.serverPort)) f.close() except IOError as e: self.Fatal("Could not append to log file %s" % self.logfilename) self.neos = xmlrpc.client.Server("%s://%s:%s" % (self.serverProtocol,self.serverHost,self.serverPort)) reply = self.neos.ping() if reply.find('alive') < 0: raise KestrelException("Unable to contact NEOS at https://%s:%d" % \ (self.host, self.port)) def obtainSolvers(self): #doCloud -> skip if self.doCloudUrl != None: return # Form a list of all kestrel-gams solver available on NEOS allKestrelSolvers = self.neos.listSolversInCategory("kestrel") self.kestrelGamsSolvers = [] for s in allKestrelSolvers: i = s.find(':GAMS') if i > 0: self.kestrelGamsSolvers.append(s[0:i]) def checkOptionsFile(self): if self.solverName and (self.solverName.lower() not in [s.lower() for s in self.kestrelGamsSolvers]): errmsg = "Solver '%s' not available on NEOS.\n" % self.solverName raise KestrelSolverException(errmsg, self.kestrelGamsSolvers) def formSubmission(self): if not self.solverName: raise KestrelSolverException("No 'kestrel_solver' option found in option file\n",self.kestrelGamsSolvers) # Get the matrix, dictionary and instruction file gamsFiles = {} gamsFiles['cntr'] = io.BytesIO(self.cntr.encode()) if os.access(self.matrfilename,os.R_OK): gamsFiles['matr'] = io.BytesIO() f = open(self.matrfilename,"rb") zipper = gzip.GzipFile(mode='wb',fileobj=gamsFiles['matr']) zipper.write(f.read()) zipper.close() f.close() if os.access(self.instfilename,os.R_OK): gamsFiles['inst'] = io.BytesIO() f = open(self.instfilename,"rb") zipper = gzip.GzipFile(mode='wb',fileobj=gamsFiles['inst']) zipper.write(f.read()) zipper.close() f.close() if os.access(self.dictfilename,os.R_OK): gamsFiles['dict'] = io.BytesIO() f = open(self.dictfilename,"rb") zipper = gzip.GzipFile(mode='wb',fileobj=gamsFiles['dict']) zipper.write(f.read()) zipper.close() f.close() if self.isMPSGE != 0 and self.modeltype == 5 and os.access(os.path.join(self.scrdir,'gedata.' + self.scrext),os.R_OK): # MCP might be an MPSGE model gamsFiles['cge'] = io.BytesIO() f = open(os.path.join(self.scrdir,'gedata.' + self.scrext),"rb") zipper = gzip.GzipFile(mode='wb',fileobj=gamsFiles['cge']) s=f.read() end = s.find(b"gamsdict.") if end != -1: start = end while ord(s[end]) != 32: #whitespace end = end+1 while ord(s[start]) != 0: start = start-1 orgStr = s[start+1:end] replStr = b"./gamsdict.scr" + b" "*(len(orgStr) - len("./gamsdict.scr")) s = s.replace(orgStr, replStr) zipper.write(s) zipper.close() f.close() self.xml = """ <document> <category>kestrel</category> <solver>%s</solver> <inputType>GAMS</inputType> <priority>%s</priority> """ % (self.solverName,self.priority) for key in list(gamsFiles.keys()): self.xml += "<%s><base64>%s</base64></%s>\n" % (key,base64.b64encode(gamsFiles[key].getvalue()).decode(),key) gamsFiles[key].close() # Remove 'kestrel', 'neos' and 'socket_timeout' options from options file; they are not needed email = None xpressemail = None runningtime = None self.xml += "<options><![CDATA[" if self.useOptions: with open(self.optfilename) as fp: for line in fp.readlines(): if not re.match(r'kestrel|neos_server|neos_username|neos_user_password|email|xpressemail|runtime|socket_timeout',line): self.xml += line elif re.match(r'email',line): email = line.rsplit()[1] elif re.match(r'xpressemail',line): xpressemail = line.rsplit()[1] elif re.match(r'runtime',line): runningtime = line.rsplit()[1] self.xml += "]]></options>\n" if not email: email = self.getDefaultEmail() if not email: self.Error("No email address provided. Either specify it in an option file or set environment variable NEOS_EMAIL (e.g. via gamsconfig.yaml).") self.xml += "<email>" self.xml += email self.xml += "</email>\n" if xpressemail: self.xml += "<xpressemail>" self.xml += xpressemail self.xml += "</xpressemail>\n" if runningtime: self.xml += "<priority>" self.xml += runningtime self.xml += "</priority>" self.xml += "</document>" def submit(self): user = "******" % (os.getenv('LOGNAME'), socket.getfqdn(socket.gethostname())) if self.authUsername is None or self.authUserPassword is None: if self.authUsername: self.writeLog("\nWarning: 'neos_username' was specified, but not 'neos_user_password'") if self.authUserPassword: self.writeLog("\nWarning: 'neos_user_password' was specified, but not 'neos_username'") (self.jobNumber,self.password) = \ self.neos.submitJob(self.xml,user,"kestrel") else: (self.jobNumber,self.password) = \ self.neos.authenticatedSubmitJob(self.xml,self.authUsername,self.authUserPassword,"kestrel") if self.jobNumber==0: raise KestrelException(self.password) if self.logopt in [1,3,4]: # Send the output to the screen sys.stdout.write("\nNEOS job#=%d, pass=%s\n\n" % (self.jobNumber,self.password)) sys.stdout.write("Check the following URL for progress report :\n") #sys.stdout.write("http://www-neos.mcs.anl.gov/cgi-bin/nph-neos-solver.cgi?admin=results&jobnumber=%d&pass=%s\n\n" % (self.jobNumber,self.password)) sys.stdout.write("%s://%s/neos/cgi-bin/nph-neos-solver.cgi?admin=results&jobnumber=%d&pass=%s\n\n" % (self.serverProtocol,self.serverHost,self.jobNumber,self.password)) if self.logopt in [2,4]: # Append the error message to the logfile indicated try: f = open(self.logfilename,'a') f.write("\nNEOS job#=%d, pass=%s\n\n" % (self.jobNumber,self.password)) f.write("Check the following URL for progress report :\n") f.write("%s://%s/neos/cgi-bin/nph-neos-solver.cgi?admin=results&jobnumber=%d&pass=%s\n\n" % (self.serverProtocol,self.serverHost,self.jobNumber,self.password)) f.close() except IOError as e: self.Error("Could not append to log file %s" % self.logfilename) try: f = open(self.statfilename,'a') f.write("=1\n\n") f.write("\nNEOS job#=%d, pass=%s\n\n" % (self.jobNumber,self.password)) f.write("Check the following URL for progress report :\n") f.write("%s://%s/neos/cgi-bin/nph-neos-solver.cgi?admin=results&jobnumber=%d&pass=%s\n\n" % (self.serverProtocol,self.serverHost,self.jobNumber,self.password)) f.write("=2\n") f.close() except IOError as e: self.Error("Could not append to status file %s\n" % self.statfilename) def generateMPS(self): #patch cntr file in order to use option file try: f = open(self.cntrfile,'r') lines = f.readlines() f.close() except IOError as e: self.Fatal("Could not open control file %s" % self.cntrfile) lines[12] = "101010 1\n" lines[19] = os.path.join(self.scrdir, "convert.opt\n") fname = os.path.join(self.scrdir, "gamscntr2.dat") f = open(fname, "w") f.writelines(["%s" % i for i in lines]) f.close() fname = os.path.join(self.scrdir, "convert.opt") f = open(fname, "w") f.write("CplexMPS") f.close() #win if sys.platform == "win32": si = subprocess.STARTUPINFO() try: si.dwFlags |= subprocess.STARTF_USESHOWWINDOW si.wShowWindow = subprocess.SW_HIDE except: si.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW si.wShowWindow = subprocess._subprocess.SW_HIDE cmdLine = "gmsgennx.exe " + os.path.join(self.scrdir, "gamscntr2.dat") + " convert" self._p = subprocess.Popen(cmdLine, startupinfo=si, cwd=self.scrdir, creationflags=subprocess.CREATE_NEW_CONSOLE) exitcode = self._p.wait() #unix/linux else: self._p = subprocess.Popen(["gmsgennx.exe", os.path.join(self.scrdir, "gamscntr2.dat"), "convert"], cwd=self.scrdir) self.gev = new_gevHandle_tp() ret = gevCreate(self.gev, GMS_SSSIZE) if not ret[0]: raise Exception(ret[1]) if gevInitEnvironmentLegacy(self.gev, os.path.join(self.scrdir, "gamscntr.dat")) != 0: raise Exception("Error calling gevInitEnvironmentLegacy") self.gmo = new_gmoHandle_tp() ret = gmoCreate(self.gmo, GMS_SSSIZE) if not ret[0]: raise Exception(ret[1]) gmoRegisterEnvironment(self.gmo, gevHandleToPtr(self.gev)) ret = gmoLoadDataLegacy(self.gmo) if ret[0] != 0: raise gams.workspace.GamsException(ret[1]) if gmoModelType(self.gmo) not in [gmoProc_lp, gmoProc_mip, gmoProc_rmip, gmoProc_qcp, gmoProc_miqcp, gmoProc_rmiqcp]: gmoSolveStatSet(self.gmo, gmoSolveStat_Capability) gmoModelStatSet(self.gmo, gmoModelStat_NoSolutionReturned) gmoCompleteSolution(self.gmo) if gmoUnloadSolutionLegacy(self.gmo) != 0: gevLogStat(self.gev, '*** Could not write solution') self.Error("Wrong model type. Supported model types: LP, MIP RMIP, QCP, MIQCP, RMIQCP\n") input = [ os.path.join(self.scrdir, "cplex.mps"), os.path.join(self.scrdir, "cplex.prm") ] #create cplex param file f = open(os.path.join(self.scrdir, "cplex.prm"), "w") f.write("CPLEX Parameter File Version 12.6.3.0\n") reslim = str(gevGetIntOpt(self.gev, gevIterLim)) if reslim != GMS_SV_NA: f.write("CPX_PARAM_ITLIM " + str(reslim) + "\n") optca = gevGetDblOpt(self.gev, gevOptCA) if optca != GMS_SV_NA: f.write("CPX_PARAM_EPAGAP " + str(optca) + "\n") optcr = gevGetDblOpt(self.gev, gevOptCR) if optcr != GMS_SV_NA: f.write("CPX_PARAM_EPGAP " + str(optcr) + "\n") cutoff = gevGetDblOpt(self.gev, gevCutOff) if cutoff != GMS_SV_NA and gevGetIntOpt(self.gev, gevUseCutOff): f.write("CPX_PARAM_CUTLO " + str(cutoff) + "\n") f.write("CPX_PARAM_CUTUP " + str(cutoff) + "\n") nodlim = gevGetIntOpt(self.gev, gevNodeLim) if nodlim != GMS_SV_NA: f.write("CPX_PARAM_NODELIM " + str(nodlim) + "\n") cheat = gevGetDblOpt(self.gev, gevCheat) if cheat != GMS_SV_NA and gevGetIntOpt(self.gev, gevUseCheat): f.write("CPX_PARAM_OBJDIF " + str(cheat) + "\n") threads = gevGetIntOpt(self.gev, gevThreadsRaw) if threads != GMS_SV_NA: if threads >=0: f.write("CPX_PARAM_THREADS " + str(threads) + "\n") else: threads = max(multiprocessing.cpu_count() + threads, 1) f.write("CPX_PARAM_THREADS " + str(threads) + "\n") f.write("CPX_PARAM_MIPDISPLAY 4\n") if self.doCloudPrmFile: if os.path.isabs(self.doCloudPrmFile): prmFile = self.doCloudPrmFile else: prmFile = os.path.join(os.path.dirname(self.optfilename), self.doCloudPrmFile) f2 = open(prmFile, "r") lines = f2.readlines()[1:] for l in lines: f.write(l) f2.close() f.close() logFile = os.path.join(self.scrdir, "doLog.dat") self.resp = self.doCloudClient.execute(input = input, output = os.path.join(self.scrdir, "results.json"), load_solution=True, log = logFile) f = open(logFile, "r") lines = f.readlines() for l in lines: gevLogPChar(self.gev, l) def getResultsDoCloud(self): sol = json.loads(self.resp.solution.decode("utf-8")) if sol["CPLEXSolution"]["header"]["solutionTypeString"] == "basic": gotBasis = True else: gotBasis = False if sol["CPLEXSolution"]["header"]["solutionTypeString"] == "primal": isPrimal = True else: isPrimal = False mtol = ltol = 0 etypemap = ['E','G','L','X','X','X'] # Do the header info gmoSetHeadnTail(self.gmo, gmoHobjval, float(sol["CPLEXSolution"]["header"]["objectiveValue"])) gmoSolveStatSet(self.gmo, 1) gmoModelStatSet(self.gmo, 1) if isPrimal: gmoSetHeadnTail(self.gmo, gmoHmarginals, 0) # Do the rows for equ in sol["CPLEXSolution"]["linearConstraints"]: idx = int(equ["index"]) rhsvalue = gmoGetRhsOne(self.gmo, idx) level = rhsvalue-float(equ["slack"]) if isPrimal: dual = GMS_SV_NA else: dual = float(equ["dual"]) rowsign = etypemap[gmoGetEquTypeOne(self.gmo,idx)] if not gotBasis: if abs(dual) < mtol: rowindic = gmoBstat_Basic dual = 0.0 elif ((gmoSense(self.gmo) == gmoObj_Min and dual > 0) or (gmoSense(self.gmo) == gmoObj_Max and dual < 0)) and (abs(float(equ["slack"])) < ltol): rowindic = gmoBstat_Lower level = rhsvalue elif ((gmoSense(self.gmo) == gmoObj_Min and dual < 0) or (gmoSense(self.gmo) == gmoObj_Max and dual > 0)) and (abs(float(equ["slack"])) < ltol): rowindic = gmoBstat_Upper level = rhsvalue else: rowindic = gmoBstat_Super else: if "LL" == equ["status"]: if rowsign == 'G': rowindic = gmoBstat_Lower else: rowindic = gmoBstat_Upper elif "BS" == equ["status"]: rowindic = gmoBstat_Basic else: print("Illegal value in rowstat") rowindic = gmoBstat_Super gmoSetSolutionEquRec(self.gmo, idx, level, dual, rowindic, gmoCstat_OK) # Do the columns for var in sol["CPLEXSolution"]["variables"]: idx = int(var["index"]) primal = float(var["value"]) if isPrimal: dual = GMS_SV_NA else: dual = float(var["reducedCost"]) xctype = gmoGetVarTypeOne(self.gmo, idx) if not gotBasis: lb = gmoGetVarLowerOne(self.gmo, idx) ub = gmoGetVarUpperOne(self.gmo, idx) if abs(dual) < mtol: colindic = gmoBstat_Basic dual = 0.0 elif ((gmoSense(self.gmo) == gmoObj_Min and dual > 0) or (gmoSense(self.gmo) == gmoObj_Max and dual < 0)) and (abs(primal - lb) < ltol): colindic = gmoBstat_Lower primal = lb elif ((gmoSense(self.gmo) == gmoObj_Min and dual < 0) or (gmoSense(self.gmo) == gmoObj_Max and dual < 0)) and (abs(ub - primal) < ltol): colindic = gmoBstat_Upper; x = lb else: colindic = gmoBstat_Super elif var["status"] == "LL": colindic = gmoBstat_Lower; elif var["status"] == "BS": colindic = gmoBstat_Basic; elif var["status"] == "UL": colindic = gmoBstat_Upper; elif var["status"] == "SB": colindic = gmoBstat_Super; else: print("Illegal value in colstat.") colindic = gmoBstat_Super if gmoModelType(self.gmo) == gmoProc_mip and xctype != gmovar_X: # For integer or SOS variables always report super basic. colindic = gmoBstat_Super gmoSetSolutionVarRec(self.gmo, idx, primal, dual, colindic, gmoCstat_OK) gmoSetHeadnTail(self.gmo, gmoHresused, time.time() - self.time1) gmoCompleteSolution(self.gmo) if gmoUnloadSolutionLegacy(self.gmo) != 0: gevLogStat(self.gev, '*** Could not write solution') def getText(self,node): """ Returns the text from the node of an xml document """ s = "" if isinstance(node,str): return node if isinstance(node.nodeValue,str): return node.data elif node.hasChildNodes(): for n in node.childNodes: s += self.getText(n) return s def parseSolution(self,xmlstring): doc = xml.dom.minidom.parseString(xmlstring) node = doc.getElementsByTagName('solu') if node and len(node): try: f = open(self.solufilename,'w') f.write(self.getText(node[0])) f.close() except IOError as e: self.Error("Could not write solution file %s\n" % self.solufilename) node = doc.getElementsByTagName('stat') if node and len(node): try: f = open(self.statfilename,'w') f.write(self.getText(node[0])) f.close() except IOError as e: self.Error("Could not write status file %s\n" % self.statfilename) node = doc.getElementsByTagName('log') if node and len(node): if self.logopt in [1,3,4]: # Send the output to the screen sys.stdout.write(self.getText(node[0])) if self.logopt in [2,4]: # Append the error message to the logfile indicated try: f = open(self.logfilename,'a') f.write(self.getText(node[0])) f.close() except IOError as e: self.Error("Could not append log file %s\n" % self.logfilename) doc.unlink() def getResults(self): offset = 0 status = self.neos.getJobStatus(self.jobNumber,self.password) try: while (status == "Waiting" or status=="Running"): (results,offset) = self.neos.getIntermediateResults(self.jobNumber, self.password,offset) if isinstance(results,xmlrpc.client.Binary): results = results.data.decode() if results and len(results): if self.logopt in [1,3,4]: # Send the output to the screen sys.stdout.write(results) if self.logopt in [2,4]: # Append the error message to the logfile indicated try: f = open(self.logfilename,'a') f.write(results) f.close() except IOError as e: self.Error("Could not append to log file %s" % self.logfilename) try: f = open(self.statfilename,'a') f.write("=1\n\n") f.write(results) f.write("=2\n") f.close() except IOError as e: self.Error("Could not append to status file %s\n" % self.statfilename) status = self.neos.getJobStatus(self.jobNumber,self.password) time.sleep(5) except KeyboardInterrupt as e: msg = '''Keyboard Interrupt\n\ Job is still running on remote machine\n\ To retrieve results, run GAMS using solver 'kestrel' with option file:\n\ kestrel_job %d\n\ kestrel_pass %s\n\n\ To stop job, run GAMS using solver 'kestrelkil' with above option file\n\ ''' % (self.jobNumber, self.password) self.Error(msg) resultsXML = self.neos.getFinalResults(self.jobNumber,self.password) if isinstance(resultsXML,xmlrpc.client.Binary): resultsXML = resultsXML.data self.parseSolution(resultsXML)
import json from docloud.job import JobClient from trucking.model import ProblemEncoder, solution_decoder from trucking.factory import ProblemFactory """This sample demonstrates how you can encode your object model as JSON and how to decode DOcloud JSON output as objects. """ if __name__ == '__main__': url = "Paste your base URL" api_key = "Paste your api key" client = JobClient(url, api_key) factory = ProblemFactory() pb = factory.createSampleProblem() # encode the problem using the specified encoder (which extends json.JSONEncoder) data = json.dumps(pb, cls=ProblemEncoder).encode('utf-8') # submit the model truck.mod for execution, # using the Problem encoded as JSON for data # then download result once the execution is done # and finally delete the job resp = client.execute( input=["models/truck.mod", { 'name': "truck.json", 'data': data }], output="results.json",
def solving_placement_problem_from_file(topology_graph, request_graph, test_num): if not os.path.isfile( "./cplex_models/p5_cplex_model_{}.lp".format(test_num)): cplex_f = open('./cplex_models/p5_cplex_model_{}.lp'.format(test_num), mode='a') # Reading networkx file G_topology = read_json_file(topology_graph) G_request = read_json_file(request_graph) set_PM = list(G_topology.nodes) set_state_or_nf = list(G_request.nodes) set_state, set_nf, set_replica = [], [], [] for i in set_state_or_nf: if "function" in i: set_nf.append(i) elif "state" in i: set_state.append(i) elif "replica" in i: set_replica.append(i) # TODO: Validating request graph for i in set_state: try: G_request.nodes[i]['size'] except: RuntimeError( "The given request graph is incorrect: State {} has no 'size' value" .format(i)) s = {i: G_request.nodes[i]['size'] for i in set_state + set_replica} c = {i: G_topology.nodes[i]['capacity'] for i in set_PM} print("Generating delay matrix...") d = generating_delay_matrix(G_topology) print("Generating state-function adjacency matrix...") e_r = generating_req_adj(set_state, set_nf + set_replica, G_request) print("Generating Function mapping matrix...") M = generating_nf_mapping_matrix(G_topology) print("Generating Anti-Affinity set") AA = generating_AA(set_state, G_request) print("Generating OR-Link set") OL = generating_OL(set_state, set_nf, set_replica, G_request) opt_model = cpx.Model(name="P5") # Binary variables print("Creating variables 1...") x_vars = {(i, u): opt_model.binary_var(name="x_({0},{1})".format(i, u)) for i in set_PM for u in set_state + set_replica + set_nf} print("Creating variables 2...") index_set = set() for i in set_PM: for u in set_state + set_replica + set_nf: index_set.add((i, u)) index_permutations = list(itertools.permutations(index_set, 2)) y_vars = {(i[0], i[1], j[0], j[1]): opt_model.binary_var( name="y_({},{})_({},{})".format(i[0], i[1], j[0], j[1])) for i, j in index_permutations} print("Creating variables 3...") index_permutations = list( itertools.permutations((set_state + set_nf + set_replica), 2)) z_vars = {(u, v): opt_model.binary_var(name="z_({},{})".format(u, v)) for u, v in index_permutations} # == constraints 1 - virtual element can be mapped into only one server print( "Creating constraints 1 - virtual element can be mapped into only one server" ) for u in set_state + set_replica: c_name = "c1_{}".format(u) opt_model.add_constraint(ct=opt_model.sum(x_vars[i, u] for i in set_PM) == 1, ctname=c_name) # <= constraints 2 - server capacity constraint print("Creating constraints 2 - server capacity constraint") for i in set_PM: c_name = "c2_{}".format(i) opt_model.add_constraint( ct=opt_model.sum(s[u] * x_vars[i, u] for u in set_state + set_replica) <= c[i], ctname=c_name) # <= constraints 3 - anti-affinity rules print("Creating constraints 3 - anti-affinity rules") for i in set_PM: if "server" in i: for u, v in AA: c_name = "c3_{}_{}_in_{}".format(u, v, i) opt_model.add_constraint( ct=(x_vars[i, u] + x_vars[i, v]) <= 1, ctname=c_name) # == constraints 4 - NFs running places print("Creating constraints 4 - NFs running places") for function in set_nf: for server in set_PM: c_name = "c4_{}_in_{}".format(function, server) try: if M[function] == server: opt_model.add_constraint(ct=x_vars[server, function] == 1, ctname=c_name) # print("x_vars[{}, {}] == 1".format(server, function)) else: opt_model.add_constraint(ct=x_vars[server, function] == 0, ctname=c_name) # print("x_vars[{}, {}] == 0".format(server, function)) except: opt_model.add_constraint(ct=x_vars[server, function] == 0, ctname=c_name) # print("x_vars[{}, {}] == 0".format(server, function)) # >= constraints 5 - QP -> ILP transformation constraints print("Creating constraints 5 - QP -> ILP transformation constraints") index_set = set() for i in set_PM: for u in set_state + set_replica + set_nf: index_set.add((i, u)) index_combinations = list(itertools.permutations(index_set, 2)) for i, j in index_combinations: c_name = "c5_({},{})_({},{})_0".format(i[0], i[1], j[0], j[1]) opt_model.add_constraint(ct=y_vars[i[0], i[1], j[0], j[1]] >= 0, ctname=c_name) c_name = "c5_({},{})_({},{})_1".format(i[0], i[1], j[0], j[1]) opt_model.add_constraint( ct=y_vars[i[0], i[1], j[0], j[1]] >= (x_vars[i[0], i[1]] + x_vars[j[0], j[1]] - 1), ctname=c_name) print("Creating constraints 6 - z variable rules") for u in (set_state + set_nf + set_replica): for v in (set_state + set_nf + set_replica): if u != v: if is_OL(u, v, OL): c_name = "c6_({},{})_0".format(u, v) opt_model.add_constraint(ct=opt_model.sum( z_vars[(i, j)] for i, j in get_OLs(u, v, OL)) == 1, ctname=c_name) else: c_name = "c6_({},{})_1".format(u, v) if "function" in u and "replica" in v: opt_model.add_constraint(ct=z_vars[(u, v)] == 0, ctname=c_name) elif "replica" in u and "state" in v: opt_model.add_constraint(ct=z_vars[(u, v)] == 0, ctname=c_name) else: opt_model.add_constraint(ct=z_vars[(u, v)] == 1, ctname=c_name) print("Creating Objective function...") servers = [i for i in set_PM if "server" in i] server_permutations = list(itertools.permutations(servers, 2)) objective = opt_model.sum( y_vars[i, u, j, v] * e_r[u, v] * d[i, j] * z_vars[u, v] for i, j in server_permutations for u, v in list( itertools.permutations(set_state + set_replica + set_nf, 2))) # # # for minimization opt_model.minimize(objective) print("Exporting the problem") opt_model.export_as_lp(basename="p5_cplex_model_{}".format(test_num), path="./cplex_models") subprocess.call( "/home/epmetra/projects/cplex/cplex/bin/x86-64_linux/cplex -c 'read /home/epmetra/projects/LO/cplex_models/p5_cplex_model_{}.lp' 'write /home/epmetra/projects/LO/cplex_models/p5_cplex_model_{}.mps mps'" .format(test_num, test_num), shell=True) # solving with local cplex # print("Solving the problem locally") # print(datetime.datetime.now()) # asd = opt_model.solve() # solving in the docplex cloud print("Solving the problem by the cloud - 1") print(datetime.datetime.now()) if not os.path.isfile( "optimization_results/p5_cplex_result_{}.json".format(test_num)): client = JobClient( "https://api-oaas.docloud.ibmcloud.com/job_manager/rest/v1/", "api_e7f3ec88-92fd-4432-84d7-f708c4a33132") print( "You can check the status of the problem procesing here: https://dropsolve-oaas.docloud.ibmcloud.com/dropsolve" ) resp = client.execute( input=["./cplex_models/p5_cplex_model_{}.mps".format(test_num)], output="optimization_results/p5_cplex_result_{}.json".format( test_num)) mapping_result = {i: "" for i in set_state + set_nf + set_replica} if resp.job_info["solveStatus"] == "INFEASIBLE_SOLUTION": print("There is no valid mapping!") return 0 else: with open("./optimization_results/p5_cplex_result_{}.json".format( test_num)) as f: result = json.load(f) for i in result["CPLEXSolution"]["variables"]: if ("x_" in i["name"]) and i["value"] == str(1): print("{} = 1".format(i["name"])) server = i["name"].split(',')[0][3:] ve = i["name"].split(',')[1][:-1] mapping_result[ve] = server print("*** Delay cost: {} ***".format( result["CPLEXSolution"]["header"]["objectiveValue"])) return result["CPLEXSolution"]["header"][ "objectiveValue"], mapping_result else: with open("./optimization_results/p5_cplex_result_{}.json".format( test_num)) as f: result = json.load(f) for i in result["CPLEXSolution"]["variables"]: if ("x_" in i["name"]) and i["value"] == str(1): print("{} = 1".format(i["name"])) print("*** Delay cost: {} ***".format( result["CPLEXSolution"]["header"]["objectiveValue"])) return result["CPLEXSolution"]["header"]["objectiveValue"]
from docloud.job import JobClient if __name__ == '__main__': url = "Paste your base URL" api_key = "Paste your api key" client = JobClient(url, api_key) with open("models/truck.mod", "rb") as modFile: resp = client.execute(input=[{"name":"truck.mod", "file":modFile}, "models/truck.dat"], output="results.json", log="solver.log", gzip=True, waittime=300, delete_on_completion=True)
def run_command(prog, argv, url=None, key=None): description = '''Command line client for DOcplexcloud.''' epilog = '''Command details: info Get and display information for the jobs which ids are specified as ARG. download Download the attachment to the the current directory. rm Delete the jobs which ids are specfied as ARG. rm all Delete all jobs. logs Download and display the logs for the jobs which id are specified. ls Lists the jobs.''' epilog_cli = ''' execute Submit a job and wait for end of execution. Each ARG that is a file is uploaded as the job input. Example: Example: python run.py execute model.py model.data -v executes a job which input files are model.py and model.dada, in verbose mode. ''' filter_help = ''' Within filters, the following variables are defined: now: current date and time as timestamp in millisec minute: 60 sec in millisec hour: 60 minutes in millisec day: 24 hour in millisec job: The current job being filtered Example filter usage: Delete all jobs older than 3 hour python -m docplex.cli --filter "now-job['startedAt'] > 3*hour " rm ''' if ip is None: epilog += epilog_cli epilog += filter_help parser = argparse.ArgumentParser(prog=prog, description=description, epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('command', metavar='COMMAND', help='DOcplexcloud command') parser.add_argument('arguments', metavar='ARG', nargs='*', help='Arguments for the command') parser.add_argument('--no-delete', action='store_true', default=False, dest='nodelete', help="If specified, jobs are not deleted after execution") parser.add_argument('-v', '--verbose', action='store_true', help='Verbose mode') parser.add_argument('--as', nargs=1, metavar='HOST', dest="host_config", default=None, help="'as host' - use the cplex_config_<HOST>.py configuration file found in PYTHONPATH") parser.add_argument('--url', nargs=1, metavar='URL', dest="url", default=None, help="The DOcplexcloud connection URL. If not specified, will use those found in docplex config files") parser.add_argument('--key', nargs=1, metavar='API_KEY', dest="key", default=None, help="The DOcplexcloud connection key. If not specified, will use those found in docplex config files") parser.add_argument('--details', action='store_true', default=False, help='Display solve details as they are available') parser.add_argument('--filter', metavar='FILTER', default=None, help='filter on job. Example: --filter "True if (now-job.createdAt) > 3600"') parser.add_argument('--quiet', '-q', action='store_true', default=False, help='Only show numeric IDs as output') args = parser.parse_args(argv) program_result = ProgramResults() # Get the context here so that we have some credentials at hand context = Context.make_default_context() if args.host_config is not None: config_name = "cplex_config_%s.py" % args.host_config[0] config_file = list(filter(os.path.isfile, [os.path.join(x, config_name) for x in sys.path])) if len(config_file) == 0: print("Could not find config file for host: %s" % args.host_config[0]) program_result.return_code = -1 return(program_result) if args.verbose: print("Overriding host config with: %s" % config_file[0]) context.read_settings(config_file[0]) # use credentials in context unless they are given to this function client_url = context.solver.docloud.url if url is None else url client_key = context.solver.docloud.key if key is None else key # but if there are some credentials in arguments (--url, --key), use them if args.url: client_url = args.url if args.key: client_key = args.key if args.verbose: print('**** Connecting to %s with key %s' % (client_url, client_key)) print('Will send command %s' % args.command) print('Arguments:') for i in args.arguments: print(' -> %s' % i) print('verbose = %s' % args.verbose) client = JobClient(client_url, client_key) target_jobs = [] if args.filter: jobs = client.get_all_jobs() now = (datetime.datetime.now() - datetime.datetime(1970,1,1)).total_seconds() * 1000.0 minute = 60 * 1000 hour = 60 * minute day = 24 * hour context = {'now': now, 'minute': minute, 'hour': hour, 'day': day, } for j in jobs: context['job'] = j keep = False try: keep = eval(args.filter, globals(), context) except KeyError: # if a key was not foud, just assume expression is false keep = False if keep: target_jobs.append(j) if target_jobs: for i in target_jobs: print('applying to %s' % i['_id']) if args.command == 'ls': ls_jobs(client, program_result, quiet=args.quiet, selected_jobs=target_jobs) elif args.command == 'info': if target_jobs: args.arguments = [x["_id"] for x in target_jobs] elif len(args.arguments) == 1 and args.arguments[0] == 'all': args.arguments = [x["_id"] for x in client.get_all_jobs()] for id in args.arguments: info_text = "NOT FOUND" try: job = client.get_job(id) info_text = json.dumps(job, indent=3) except: pass print("%s:\n%s" % (id, info_text)) elif args.command == 'rm': if target_jobs: joblist = [x["_id"] for x in target_jobs] elif args.arguments: joblist = args.arguments else: joblist = shlex.split(sys.stdin.read()) rm_job(client, joblist, verbose=args.verbose) elif args.command == 'logs': if target_jobs: if len(target_jobs) != 1: print('Logs can only be retrieved when filter select one job (actual selection count = %s)' % len(target_jobs)) program_result.return_code = -1 return(program_result) args.arguments = [x["_id"] for x in target_jobs] if not args.arguments: print('Please specify job list in arguments or using filter.') program_result.return_code = -1 return(program_result) for jid in args.arguments: log_items = client.get_log_items(jid) for log in log_items: for record in log["records"]: print(record["message"]) elif args.command == 'download': if target_jobs: if len(target_jobs) != 1: print('Jobs can only be downloaded when filter select one job (actual selection count = %s)' % len(target_jobs)) program_result.return_code = -1 return(program_result) args.arguments = [x["_id"] for x in target_jobs] for jid in args.arguments: job = client.get_job(jid) for attachment in job['attachments']: print('downloading %s' % attachment['name']) with open(attachment['name'], 'wb') as f: f.write(client.download_job_attachment(id, attachment['name'])) elif args.command == 'execute': if target_jobs: print('Execute command does not support job filtering') program_result.return_code = -1 return(program_result) inputs = [{'name': basename(a), 'filename': a} for a in args.arguments] if args.verbose: for i in inputs: print("Uploading %s as attachment name %s" % (i['filename'], i['name'])) execute_job(client, inputs, args.verbose, args.details, args.nodelete) else: print("Unknown command: %s" % args.command) program_result.return_code = -1 return(program_result) return(program_result)
class Optimizer(object): ''' Handles the actual optimization task. Creates and executes a job builder for an optimization problem instance. Encapsulates the DOCloud API. This class is designed to facilitate multiple calls to the optimizer, such as would occur in a decomposition algorithm, although it transparently supports single use as well. In particular, the data can be factored into a constant data set that does not vary from run to run (represented by a JSON or .dat file) and a variable piece that does vary (represented by a Collector object). The optimization model can also be factored into two pieces, a best practice for large models and multi-models: A data model that defines the tuples and tuple sets that will contain the input and output data. An optimization model that defines the decision variables, decision expressions, objective function, constraints, and pre- and post-processing data transformations. Factoring either the data or the optimization model in this fashion is optional. The problem instance is specified by the OPL model and input data received from the invoking (e.g. ColumnGeneration) instance. Input and output data are realized as instances of OPLCollector, which in turn are specified by their respective schemas. This class is completely independent of the specific optimization problem to be solved. ''' def __init__(self, problemName, model=None, resultDataModel=None, credentials=None, *attachments): ''' Constructs an Optimizer instance. The instance requires an optimization model as a parameter. You can also provide one or more data files as attachments, either in OPL .dat or in JSON format. This data does not change from solve to solve. If you have input data that does change, you can provide it to the solve method as an OPLCollector object. :param problemName: name of this optimization problem instance :type problemName: String :param model: an optimization model written in OPL :type model: Model.Source object or String :param resultDataModel: the application data model for the results of the optimization :type resultDataModel: dict<String, StructType> :param credentials: DOcplexcloud url and api key :type credentials: {"url":String, "key":String} :param attachments: URLs for files representing the data that does not vary from solve to solve :type attachments: list<URL> ''' self.name = problemName self.model = model self.resultDataModel = resultDataModel self.attachData(attachments) self.streamsRegistry = [] self.history = [] self.credentials = credentials self.jobclient = JobClient(credentials["url"], credentials["key"]); self.solveStatus = JobSolveStatus.UNKNOWN; def getName(self): """ Returns the name of this problem """ return self.name def setOPLModel(self, name, dotMods=None, modelText=None): ''' Sets the OPL model. This method can take any number of dotMod arguments, but there are two common use cases: First, the optimization model can be composed of two pieces: A data model that defines the tuples and tuple sets that will contain the input and output data. An optimization model that defines the decision variables, decision expressions, objective function, constraints, and pre- and post-processing data transformations. The two are concatenated, so they must be presented in that order. If such a composite model is used, you do not need to import the data model into the optimization model using an OPL include statement. Second, you do not have to use a separate data model, in which case a single dotMod must be provided which encompasses both the data model and the optimization model. @param name: the name assigned to this OPL model (should have the format of a file name with a .mod extension) @type name: String @param dotMods: URLs pointing to OPL .mod files, which will be concatenated in the order given @type dotMods: List<URL> @param modelText: the text of the OPL model, which will be concatenated in the order given @type modelText: List<String> @return this optimizer @raise ValueError if a model has already been defined or if dotMods or modelText is empty ''' if self.model is not None: raise ValueError("model has already been set") self.model = ModelSource(name=name, dotMods=dotMods, modelText=modelText) return self def setResultDataModel(self, resultDataModel): ''' Sets the application data model for the results of the optimization @param resultDataModel: the application data model for the results of the optimization @type resultDataModel: dict<String, StructType> ''' if self.resultDataModel is not None: raise ValueError("results data model has already been defined") self.resultDataModel = resultDataModel return self def attachData(self, attachments): ''' Attaches one or more data files, either in OPL .dat or in JSON format. This data does not change from solve to solve. If you have input data that does change, you can provide it as a Collector object. @param attachments: files representing the data that does not vary from solve to solve @type attachments: list<URL> @return this optimizer @raise ValueError if an item of the same name has already been attached ''' self.attachments = {} if attachments is not None: for f in attachments: fileName = os.path.splitext(os.path.basename(urlparse(f)))[0] if fileName in self.attachments: raise ValueError(fileName + " already attached") self.attachments[fileName] = f return self; def solve(self, inputData=None, solutionId=""): ''' Solves an optimization problem instance by calling the DOCloud solve service (Oaas). Creates a new job request, incorporating any changes to the variable input data, for a problem instance to be processed by the solve service. Once the problem is solved, the results are mapped to an instance of an OPL Collector. Note: this method will set a new destination for the JSON serialization of the input data. @param inputData: the variable, solve-specific input data @type inputData: OPLCollector @param solutionId: an identifier for the solution, used in iterative algorithms (set to empty string if not needed) @type solutionId: String @return: a solution collector ''' inputs = [] if self.model is None: raise ValueError("A model attachment must be provided to the optimizer") if self.model: # is not empty stream = self.model.toStream() inputs.append({"name": self.model.getName(), "file": stream}) self.streamsRegistry.append(stream) if self.attachments: # is not empty for f in self.attachments: stream = urllib.FancyURLopener(self.attachments[f]) inputs.append({"name": f, "file": stream}) self.streamsRegistry.append(stream) if inputData is not None: outStream = cStringIO.StringIO() inputData.setJsonDestination(outStream).toJSON() inStream = cStringIO.StringIO(outStream.getvalue()) inputs.append({"name": inputData.getName() + ".json", "file": inStream}) self.streamsRegistry.extend([outStream, inStream]) response = self.jobclient.execute( input=inputs, output="results.json", load_solution=True, log="solver.log", gzip=True, waittime=300, # seconds delete_on_completion=False) self.jobid = response.jobid status = self.jobclient.get_execution_status(self.jobid) if status == JobExecutionStatus.PROCESSED: results = cStringIO.StringIO(response.solution) self.streamsRegistry.append(results) self.solveStatus = response.job_info.get( 'solveStatus') # INFEASIBLE_SOLUTION or UNBOUNDED_SOLUTION or OPTIMAL_SOLUTION or... solution = (OPLCollector(self.getName() + "Result" + solutionId, self.resultDataModel)).setJsonSource( results).fromJSON() self.history.append(solution) elif status == JobExecutionStatus.FAILED: # get failure message if defined message = "" if (response.getJob().getFailureInfo() != None): message = response.getJob().getFailureInfo().getMessage() print("Failed " + message) else: print("Job Status: " + status) for s in self.streamsRegistry: s.close(); self.jobclient.delete_job(self.jobid); return solution def getSolveStatus(self): """ @return the solve status as a string Attributes: UNKNOWN: The algorithm has no information about the solution. FEASIBLE_SOLUTION: The algorithm found a feasible solution. OPTIMAL_SOLUTION: The algorithm found an optimal solution. INFEASIBLE_SOLUTION: The algorithm proved that the model is infeasible. UNBOUNDED_SOLUTION: The algorithm proved the model unbounded. INFEASIBLE_OR_UNBOUNDED_SOLUTION: The model is infeasible or unbounded. """ return self.solveStatus